[med-svn] [cwltool] 01/05: Imported Upstream version 1.0.20160316204054

Michael Crusoe misterc-guest at moszumanska.debian.org
Fri Dec 9 09:14:48 UTC 2016


This is an automated email from the git hooks/post-receive script.

misterc-guest pushed a commit to branch master
in repository cwltool.

commit 596404540f5ac0cb5fb9313462c77283ea05a163
Author: Michael R. Crusoe <crusoe at ucdavis.edu>
Date:   Fri Mar 18 06:34:23 2016 -0700

    Imported Upstream version 1.0.20160316204054
---
 MANIFEST.in                           |   1 +
 PKG-INFO                              |   2 +-
 cwltool.egg-info/PKG-INFO             |   2 +-
 cwltool.egg-info/SOURCES.txt          |   3 +
 cwltool.egg-info/requires.txt         |   2 +-
 cwltool/builder.py                    |   3 +-
 cwltool/cwlrdf.py                     | 136 ++++++++++++--
 cwltool/cwltest.py                    |  50 +++--
 cwltool/draft2tool.py                 |  81 +++++++--
 cwltool/expression.py                 |   6 +-
 cwltool/job.py                        |  11 +-
 cwltool/main.py                       | 109 ++++++-----
 cwltool/pathmapper.py                 |   1 +
 cwltool/process.py                    |  47 +++++
 cwltool/sandboxjs.py                  |  59 +++---
 cwltool/schemas/draft-3/Process.yml   |   1 +
 cwltool/schemas/draft-3/contrib.md    |   3 +
 cwltool/schemas/draft-3/invocation.md |  10 +-
 cwltool/workflow.py                   |  55 +++---
 ez_setup.py                           | 332 ++++++++++++++++++++++++++++++++++
 gittaggers.py                         |  23 +++
 setup.cfg                             |   2 +-
 setup.py                              |   7 +-
 23 files changed, 782 insertions(+), 164 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
new file mode 100644
index 0000000..624b1d1
--- /dev/null
+++ b/MANIFEST.in
@@ -0,0 +1 @@
+include gittaggers.py ez_setup.py
diff --git a/PKG-INFO b/PKG-INFO
index cdf478d..626843b 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20160209222805
+Version: 1.0.20160316204054
 Summary: Common workflow language reference implementation
 Home-page: https://github.com/common-workflow-language/common-workflow-language
 Author: Common workflow language working group
diff --git a/cwltool.egg-info/PKG-INFO b/cwltool.egg-info/PKG-INFO
index cdf478d..626843b 100644
--- a/cwltool.egg-info/PKG-INFO
+++ b/cwltool.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20160209222805
+Version: 1.0.20160316204054
 Summary: Common workflow language reference implementation
 Home-page: https://github.com/common-workflow-language/common-workflow-language
 Author: Common workflow language working group
diff --git a/cwltool.egg-info/SOURCES.txt b/cwltool.egg-info/SOURCES.txt
index b542e5f..be6d829 100644
--- a/cwltool.egg-info/SOURCES.txt
+++ b/cwltool.egg-info/SOURCES.txt
@@ -1,4 +1,7 @@
+MANIFEST.in
 README.rst
+ez_setup.py
+gittaggers.py
 setup.py
 cwltool/__init__.py
 cwltool/__main__.py
diff --git a/cwltool.egg-info/requires.txt b/cwltool.egg-info/requires.txt
index fc82bc5..4e6c69d 100644
--- a/cwltool.egg-info/requires.txt
+++ b/cwltool.egg-info/requires.txt
@@ -3,4 +3,4 @@ PyYAML
 rdflib >= 4.2.0
 rdflib-jsonld >= 0.3.0
 shellescape
-schema_salad == 1.6.20160202222448
+schema_salad == 1.7.20160316203940
diff --git a/cwltool/builder.py b/cwltool/builder.py
index c6b3463..1394616 100644
--- a/cwltool/builder.py
+++ b/cwltool/builder.py
@@ -161,4 +161,5 @@ class Builder(object):
         return expression.do_eval(ex, self.job, self.requirements,
                                   self.outdir, self.tmpdir,
                                   self.resources,
-                                  context=context, pull_image=pull_image)
+                                  context=context, pull_image=pull_image,
+                                  timeout=self.timeout)
diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py
index 02b42bf..f8a6e41 100644
--- a/cwltool/cwlrdf.py
+++ b/cwltool/cwlrdf.py
@@ -1,40 +1,56 @@
 import json
-from rdflib import Graph, plugin
+import urlparse
+from rdflib import Graph, plugin, URIRef
 from rdflib.serializer import Serializer
 
-def printrdf(workflow, wf, ctx, sr):
-    wf["@context"] = ctx
-    g = Graph().parse(data=json.dumps(wf), format='json-ld', location=workflow)
-    print(g.serialize(format=sr))
+def makerdf(workflow, wf, ctx):
+    prefixes = {}
+    for k,v in ctx.iteritems():
+        if isinstance(v, dict):
+            v = v["@id"]
+        doc_url, frg = urlparse.urldefrag(v)
+        if "/" in frg:
+            p, _ = frg.split("/")
+            prefixes[p] = "%s#%s/" % (doc_url, p)
 
-def printdot(workflow, wf, ctx, sr):
     wf["@context"] = ctx
     g = Graph().parse(data=json.dumps(wf), format='json-ld', location=workflow)
 
-    print "digraph {"
+    # Bug in json-ld loader causes @id fields to be added to the graph
+    for s,p,o in g.triples((None, URIRef("@id"), None)):
+        g.remove((s, p, o))
 
-    #g.namespace_manager.qname(predicate)
+    for k,v in prefixes.iteritems():
+        g.namespace_manager.bind(k, v)
+
+    return g
+
+def printrdf(workflow, wf, ctx, sr):
+    print(makerdf(workflow, wf, ctx).serialize(format=sr))
 
-    def lastpart(uri):
-        uri = str(uri)
-        if "/" in uri:
-            return uri[uri.rindex("/")+1:]
-        else:
-            return uri
+def lastpart(uri):
+    uri = str(uri)
+    if "/" in uri:
+        return uri[uri.rindex("/")+1:]
+    else:
+        return uri
 
+
+def dot_with_parameters(g):
     qres = g.query(
-        """SELECT ?step ?run
+        """SELECT ?step ?run ?runtype
            WHERE {
               ?step cwl:run ?run .
+              ?run rdf:type ?runtype .
            }""")
 
-    for step, run in qres:
+    for step, run, runtype in qres:
         print '"%s" [label="%s"]' % (lastpart(step), "%s (%s)" % (lastpart(step), lastpart(run)))
 
     qres = g.query(
         """SELECT ?step ?inp ?source
            WHERE {
-              ?wf cwl:steps ?step .
+              ?wf Workflow:steps ?step .
               ?step cwl:inputs ?inp .
               ?inp cwl:source ?source .
            }""")
@@ -47,7 +63,7 @@ def printdot(workflow, wf, ctx, sr):
     qres = g.query(
         """SELECT ?step ?out
            WHERE {
-              ?wf cwl:steps ?step .
+              ?wf Workflow:steps ?step .
               ?step cwl:outputs ?out .
            }""")
 
@@ -76,5 +92,89 @@ def printdot(workflow, wf, ctx, sr):
     for (inp,) in qres:
         print '"%s" [shape=octagon]' % (lastpart(inp))
 
+def dot_without_parameters(g):
+    dotname = {}
+    clusternode = {}
+
+    print "compound=true"
+
+    subworkflows = set()
+    qres = g.query(
+        """SELECT ?run
+           WHERE {
+              ?wf rdf:type cwl:Workflow .
+              ?wf Workflow:steps ?step .
+              ?step cwl:run ?run .
+              ?run rdf:type cwl:Workflow .
+           } ORDER BY ?wf""")
+    for (run,) in qres:
+        subworkflows.add(run)
+
+    qres = g.query(
+        """SELECT ?wf ?step ?run ?runtype
+           WHERE {
+              ?wf rdf:type cwl:Workflow .
+              ?wf Workflow:steps ?step .
+              ?step cwl:run ?run .
+              ?run rdf:type ?runtype .
+           } ORDER BY ?wf""")
+
+    currentwf = None
+    for wf, step, run, runtype in qres:
+        if step not in dotname:
+            dotname[step] = lastpart(step)
+
+        if wf != currentwf:
+            if currentwf is not None:
+                print "}"
+            if wf in subworkflows:
+                if wf not in dotname:
+                    dotname[wf] = "cluster_" + lastpart(wf)
+                print 'subgraph "%s" { label="%s"' % (dotname[wf], lastpart(wf))
+                currentwf = wf
+                clusternode[wf] = step
+            else:
+                currentwf = None
+
+        if str(runtype) != "https://w3id.org/cwl/cwl#Workflow":
+            print '"%s" [label="%s"]' % (dotname[step], urlparse.urldefrag(str(step))[1])
+
+    if currentwf is not None:
+        print "}\n"
+
+    qres = g.query(
+        """SELECT DISTINCT ?src ?sink ?srcrun ?sinkrun
+           WHERE {
+              ?wf1 Workflow:steps ?src .
+              ?wf2 Workflow:steps ?sink .
+              ?src cwl:outputs ?out .
+              ?inp cwl:source ?out .
+              ?sink cwl:inputs ?inp .
+              ?src cwl:run ?srcrun .
+              ?sink cwl:run ?sinkrun .
+           }""")
+
+    for src, sink, srcrun, sinkrun in qres:
+        attr = ""
+        if srcrun in clusternode:
+            attr += 'ltail="%s"' % dotname[srcrun]
+            src = clusternode[srcrun]
+        if sinkrun in clusternode:
+            attr += ' lhead="%s"' % dotname[sinkrun]
+            sink = clusternode[sinkrun]
+        print '"%s" -> "%s" [%s]' % (dotname[src], dotname[sink], attr)
+
+
+def printdot(workflow, wf, ctx, include_parameters=False):
+    g = makerdf(workflow, wf, ctx)
+
+    print "digraph {"
+
+    #g.namespace_manager.qname(predicate)
+
+    if include_parameters:
+        dot_with_parmeters(g)
+    else:
+        dot_without_parameters(g)
 
     print "}"
diff --git a/cwltool/cwltest.py b/cwltool/cwltest.py
index 4fa10c4..31afadb 100755
--- a/cwltool/cwltest.py
+++ b/cwltool/cwltest.py
@@ -10,8 +10,9 @@ import tempfile
 import yaml
 import pipes
 import logging
+import schema_salad.ref_resolver
 
-_logger = logging.getLogger("cwltool")
+_logger = logging.getLogger("cwltest")
 _logger.addHandler(logging.StreamHandler())
 _logger.setLevel(logging.INFO)
 
@@ -131,12 +132,15 @@ def run_test(args, i, t):
 
 
 def main():
-    parser = argparse.ArgumentParser()
+    parser = argparse.ArgumentParser(description='Compliance tests for cwltool')
     parser.add_argument("--test", type=str, help="YAML file describing test cases", required=True)
     parser.add_argument("--basedir", type=str, help="Basedir to use for tests", default=".")
-    parser.add_argument("-n", type=int, default=None, help="Run a specific test")
+    parser.add_argument("-l", action="store_true", help="List tests then exit")
+    parser.add_argument("-n", type=str, default=None, help="Run a specific tests, format is 1,3-6,9")
     parser.add_argument("--tool", type=str, default="cwl-runner",
                         help="CWL runner executable to use (default 'cwl-runner'")
+    parser.add_argument("--only-tools", action="store_true", help="Only test tools")
+
     args = parser.parse_args()
 
     if not args.test:
@@ -149,22 +153,40 @@ def main():
     failures = 0
     unsupported = 0
 
+    if args.only_tools:
+        alltests = tests
+        tests = []
+        for t in alltests:
+            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
+            cwl, _ = loader.resolve_ref(t["tool"])
+            if cwl["class"] == "CommandLineTool":
+                tests.append(t)
+
+    if args.l:
+        for i, t in enumerate(tests):
+            print "[%i] %s" % (i+1, t["doc"].strip())
+        return 0
+
     if args.n is not None:
-        sys.stderr.write("\rTest [%i/%i] " % (args.n, len(tests)))
-        rt = run_test(args, args.n-1, tests[args.n-1])
+        ntest = []
+        for s in args.n.split(","):
+            sp = s.split("-")
+            if len(sp) == 2:
+                ntest.extend(range(int(sp[0])-1, int(sp[1])))
+            else:
+                ntest.append(int(s)-1)
+    else:
+        ntest = range(0, len(tests))
+
+    for i in ntest:
+        t = tests[i]
+        sys.stderr.write("\rTest [%i/%i] " % (i+1, len(tests)))
+        sys.stderr.flush()
+        rt = run_test(args, i, t)
         if rt == 1:
             failures += 1
         elif rt == UNSUPPORTED_FEATURE:
             unsupported += 1
-    else:
-        for i, t in enumerate(tests):
-            sys.stderr.write("\rTest [%i/%i] " % (i+1, len(tests)))
-            sys.stderr.flush()
-            rt = run_test(args, i, t)
-            if rt == 1:
-                failures += 1
-            elif rt == UNSUPPORTED_FEATURE:
-                unsupported += 1
 
     if failures == 0 and unsupported == 0:
          _logger.info("All tests passed")
diff --git a/cwltool/draft2tool.py b/cwltool/draft2tool.py
index f5e50d3..b506b5f 100644
--- a/cwltool/draft2tool.py
+++ b/cwltool/draft2tool.py
@@ -11,7 +11,7 @@ import glob
 import logging
 import hashlib
 import random
-from process import Process, shortname, uniquename
+from process import Process, shortname, uniquename, adjustFileObjs
 from errors import WorkflowException
 import schema_salad.validate as validate
 from aslist import aslist
@@ -51,6 +51,32 @@ class ExpressionTool(Process):
 
         yield j
 
+def remove_hostfs(f):
+    if "hostfs" in f:
+        del f["hostfs"]
+
+def revmap_file(builder, outdir, f):
+    """Remap a file back to original path. For Docker, this is outside the container.
+
+    Uses either files in the pathmapper or remaps internal output directories
+    to the external directory.
+    """
+
+    if f.get("hostfs"):
+        return
+
+    revmap_f = builder.pathmapper.reversemap(f["path"])
+    if revmap_f:
+        f["path"] = revmap_f[1]
+        f["hostfs"] = True
+        return f
+    elif f["path"].startswith(builder.outdir):
+        f["path"] = os.path.join(outdir, f["path"][len(builder.outdir)+1:])
+        f["hostfs"] = True
+        return f
+    else:
+        raise WorkflowException("Output file path %s must be within designated output directory (%s) or an input file pass through." % (f["path"], builder.outdir))
+
 
 class CommandLineTool(Process):
     def __init__(self, toolpath_object, **kwargs):
@@ -102,6 +128,7 @@ class CommandLineTool(Process):
         reffiles = set((f["path"] for f in builder.files))
 
         j = self.makeJobRunner()
+        j.builder = builder
         j.joborder = builder.job
         j.stdin = None
         j.stdout = None
@@ -135,8 +162,16 @@ class CommandLineTool(Process):
         builder.pathmapper = self.makePathMapper(reffiles, input_basedir, **kwargs)
         builder.requirements = j.requirements
 
-        for f in builder.files:
-            f["path"] = builder.pathmapper.mapper(f["path"])[1]
+        # map files to assigned path inside a container. We need to also explicitly
+        # walk over input as implicit reassignment doesn't reach everything in builder.bindings
+        def _check_adjust(f):
+            if not f.get("containerfs"):
+                f["path"] = builder.pathmapper.mapper(f["path"])[1]
+                f["containerfs"] = True
+            return f
+
+        adjustFileObjs(builder.files, _check_adjust)
+        adjustFileObjs(builder.bindings, _check_adjust)
 
         _logger.debug("[job %s] command line bindings is %s", j.name, json.dumps(builder.bindings, indent=4))
         _logger.debug("[job %s] path mappings is %s", j.name, json.dumps({p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4))
@@ -183,17 +218,23 @@ class CommandLineTool(Process):
 
     def collect_output_ports(self, ports, builder, outdir):
         try:
+            ret = {}
             custom_output = os.path.join(outdir, "cwl.output.json")
             if builder.fs_access.exists(custom_output):
-                outputdoc = yaml.load(custom_output)
-                validate.validate_ex(self.names.get_name("outputs_record_schema", ""), outputdoc)
-                return outputdoc
-
-            ret = {}
+                with builder.fs_access.open(custom_output, "r") as f:
+                    ret = yaml.load(f)
+                _logger.debug("Raw output from %s: %s", custom_output, json.dumps(ret, indent=4))
+                adjustFileObjs(ret, remove_hostfs)
+                adjustFileObjs(ret, functools.partial(revmap_file, builder, outdir))
+                adjustFileObjs(ret, remove_hostfs)
+                validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret)
+                return ret
 
             for port in ports:
                 fragment = shortname(port["id"])
                 ret[fragment] = self.collect_output(port, builder, outdir)
+            if ret:
+                adjustFileObjs(ret, remove_hostfs)
             validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret)
             return ret if ret is not None else {}
         except validate.ValidationException as e:
@@ -204,16 +245,25 @@ class CommandLineTool(Process):
         if "outputBinding" in schema:
             binding = schema["outputBinding"]
             globpatterns = []
+
+            revmap = functools.partial(revmap_file, builder, outdir)
+
             if "glob" in binding:
                 r = []
                 for gb in aslist(binding["glob"]):
+                    gb = builder.do_eval(gb)
+                    if gb:
+                        globpatterns.extend(aslist(gb))
+
+                for gb in globpatterns:
+                    if gb.startswith("/"):
+                        raise WorkflowError("glob patterns must not start with '/'")
                     try:
-                        gb = builder.do_eval(gb)
-                        globpatterns.append(gb)
-                        if gb:
-                            r.extend([{"path": g, "class": "File"} for g in builder.fs_access.glob(os.path.join(outdir, gb))])
+                        r.extend([{"path": g, "class": "File", "hostfs": True}
+                                  for g in builder.fs_access.glob(os.path.join(outdir, gb))])
                     except (OSError, IOError) as e:
                         _logger.warn(str(e))
+
                 for files in r:
                     checksum = hashlib.sha1()
                     with builder.fs_access.open(files["path"], "rb") as f:
@@ -262,6 +312,9 @@ class CommandLineTool(Process):
                     else:
                         r = r[0]
 
+            # Ensure files point to local references outside of the run environment
+            adjustFileObjs(r, revmap)
+
             if "secondaryFiles" in schema:
                 for primary in aslist(r):
                     if isinstance(primary, dict):
@@ -270,9 +323,9 @@ class CommandLineTool(Process):
                             if isinstance(sf, dict) or "$(" in sf or "${" in sf:
                                 sfpath = builder.do_eval(sf, context=r)
                                 if isinstance(sfpath, basestring):
-                                    sfpath = {"path": sfpath, "class": "File"}
+                                    sfpath = revmap({"path": sfpath, "class": "File"})
                             else:
-                                sfpath = {"path": substitute(primary["path"], sf), "class": "File"}
+                                sfpath = {"path": substitute(primary["path"], sf), "class": "File", "hostfs": True}
 
                             for sfitem in aslist(sfpath):
                                 if builder.fs_access.exists(sfitem["path"]):
diff --git a/cwltool/expression.py b/cwltool/expression.py
index 03714fe..3e9f62c 100644
--- a/cwltool/expression.py
+++ b/cwltool/expression.py
@@ -116,7 +116,8 @@ def param_interpolate(ex, obj, strip=True):
         return ex
 
 
-def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources, context=None, pull_image=True):
+def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources,
+            context=None, pull_image=True, timeout=None):
     runtime = resources.copy()
     runtime["tmpdir"] = tmpdir
     runtime["outdir"] = outdir
@@ -132,6 +133,7 @@ def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources, context=None,
     if isinstance(ex, basestring):
         for r in requirements:
             if r["class"] == "InlineJavascriptRequirement":
-                return sandboxjs.interpolate(ex, jshead(r.get("expressionLib", []), rootvars))
+                return sandboxjs.interpolate(ex, jshead(r.get("expressionLib", []), rootvars),
+                                             timeout=timeout)
         return param_interpolate(ex, rootvars)
     return ex
diff --git a/cwltool/job.py b/cwltool/job.py
index 6321745..6e9b0dc 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -47,8 +47,8 @@ class CommandLineJob(object):
         (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
 
         for f in self.pathmapper.files():
-            if not os.path.exists(self.pathmapper.mapper(f)[0]):
-                raise WorkflowException("Required input file %s not found" % self.pathmapper.mapper(f)[0])
+            if not os.path.isfile(self.pathmapper.mapper(f)[0]):
+                raise WorkflowException("Required input file %s not found or is not a regular file." % self.pathmapper.mapper(f)[0])
 
         img_id = None
         if docker_req and kwargs.get("use_container") is not False:
@@ -67,7 +67,12 @@ class CommandLineJob(object):
             runtime.append("--volume=%s:%s:rw" % (os.path.abspath(self.tmpdir), "/tmp"))
             runtime.append("--workdir=%s" % ("/var/spool/cwl"))
             runtime.append("--read-only=true")
-            runtime.append("--net=none")
+            if kwargs.get("enable_net") is not True:
+                runtime.append("--net=none")
+
+            if self.stdout:
+                runtime.append("--log-driver=none")
+
             euid = docker_vm_uid() or os.geteuid()
             runtime.append("--user=%s" % (euid))
 
diff --git a/cwltool/main.py b/cwltool/main.py
index 832002b..cd27bfc 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -21,6 +21,7 @@ import pkg_resources  # part of setuptools
 import update
 from process import shortname
 import rdflib
+from aslist import aslist
 
 _logger = logging.getLogger("cwltool")
 
@@ -29,7 +30,7 @@ _logger.addHandler(defaultStreamHandler)
 _logger.setLevel(logging.INFO)
 
 def arg_parser():
-    parser = argparse.ArgumentParser()
+    parser = argparse.ArgumentParser(description='Reference executor for Common Workflow Language')
     parser.add_argument("--conformance-test", action="store_true")
     parser.add_argument("--basedir", type=str)
     parser.add_argument("--outdir", type=str, default=os.path.abspath('.'),
@@ -94,6 +95,10 @@ def arg_parser():
                         help="Output RDF serialization format used by --print-rdf (one of turtle (default), n3, nt, xml)",
                         default="turtle")
 
+    parser.add_argument("--eval-timeout",
+                        help="Time to wait for a Javascript expression to evaluate before giving an error.",
+                        type=float)
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--print-rdf", action="store_true",
                         help="Print corresponding RDF graph for workflow and exit")
@@ -116,6 +121,8 @@ def arg_parser():
 
     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
 
+    parser.add_argument("--enable-net", action="store_true", help="Use docker's default network for container, default to disable network")
+
     parser.add_argument("workflow", type=str, nargs="?", default=None)
     parser.add_argument("job_order", nargs=argparse.REMAINDER)
 
@@ -276,23 +283,24 @@ def load_tool(argsworkflow, updateonly, strict, makeTool, debug,
             workflowobj = {"cwlVersion": "https://w3id.org/cwl/cwl#draft-2",
                            "id": fileuri,
                            "@graph": workflowobj}
-
-        if "cwl:tool" in workflowobj:
-            jobobj = workflowobj
-            workflowobj = document_loader.fetch(urlparse.urljoin(uri, workflowobj["cwl:tool"]))
-
-        workflowobj = update.update(workflowobj, document_loader, fileuri)
-        document_loader.idx.clear()
-
-        if updateonly:
-            print json.dumps(workflowobj, indent=4)
-            return 0
     elif isinstance(argsworkflow, dict):
         workflowobj = argsworkflow
         uri = urifrag
+        fileuri = ""
     else:
         raise schema_salad.validate.ValidationException("Must be URI or dict")
 
+    if "cwl:tool" in workflowobj:
+        jobobj = workflowobj
+        workflowobj = document_loader.fetch(urlparse.urljoin(uri, workflowobj["cwl:tool"]))
+
+    workflowobj = update.update(workflowobj, document_loader, fileuri)
+    document_loader.idx.clear()
+
+    if updateonly:
+        print json.dumps(workflowobj, indent=4)
+        return 0
+
     try:
         processobj, metadata = schema_salad.schema.load_and_validate(document_loader, avsc_names, workflowobj, strict)
     except (schema_salad.validate.ValidationException, RuntimeError) as e:
@@ -308,16 +316,19 @@ def load_tool(argsworkflow, updateonly, strict, makeTool, debug,
         return 0
 
     if print_dot:
-        printdot(argsworkflow, processobj, document_loader.ctx, rdf_serializer)
+        printdot(argsworkflow, processobj, document_loader.ctx)
         return 0
 
     if urifrag:
         processobj, _ = document_loader.resolve_ref(uri)
     elif isinstance(processobj, list):
-        _logger.error("Tool file contains graph of multiple objects, must specify one of #%s",
-                      ", #".join(urlparse.urldefrag(i["id"])[1]
-                                 for i in processobj if "id" in i))
-        return 1
+        if 1 == len(processobj):
+            processobj = processobj[0]
+        else:
+            _logger.error("Tool file contains graph of multiple objects, must specify one of #%s",
+                          ", #".join(urlparse.urldefrag(i["id"])[1]
+                                     for i in processobj if "id" in i))
+            return 1
 
     try:
         t = makeTool(processobj, strict=strict, makeTool=makeTool, loader=document_loader, avsc_names=avsc_names)
@@ -340,7 +351,7 @@ def load_tool(argsworkflow, updateonly, strict, makeTool, debug,
 
     return t
 
-def load_job_order(args, t, parser):
+def load_job_order(args, t, parser, stdin):
 
     job_order_object = None
 
@@ -412,42 +423,16 @@ def load_job_order(args, t, parser):
 
     return (job_order_object, input_basedir)
 
-
-def scandeps(base, doc):
-    r = []
-    if isinstance(doc, dict):
-        if "$import" in doc:
-            p = os.path.join(base, doc["$import"])
-            with open(p) as f:
-                r.append({
-                    "class": "File",
-                    "path": p,
-                    "secondaryFiles": scandeps(os.path.dirname(p), yaml.load(f))
-                })
-        elif "$include" in doc:
-            p = os.path.join(base, doc["$include"])
-            r.append({
-                "class": "File",
-                "path": p
-            })
-        elif "$schemas" in doc:
-            for s in doc["$schemas"]:
-                p = os.path.join(base, s)
-                r.append({
-                    "class": "File",
-                    "path": p
-                })
-        else:
-            for d in doc.itervalues():
-                r.extend(scandeps(base, d))
-    elif isinstance(doc, list):
-        for d in doc:
-            r.extend(scandeps(base, d))
-    return r
-
 def print_deps(fn):
     with open(fn) as f:
-        print json.dumps(scandeps(os.path.dirname(fn), yaml.load(f)), indent=4)
+        deps = {"class": "File",
+                "path": fn}
+        sf = process.scandeps(os.path.dirname(fn), yaml.load(f),
+                              set(("$import", "run")),
+                              set(("$include", "$schemas", "path")))
+        if sf:
+            deps["secondaryFiles"] = sf
+        print json.dumps(deps, indent=4)
 
 def main(args=None,
          executor=single_job_executor,
@@ -499,7 +484,7 @@ def main(args=None,
                       print_dot=args.print_dot,
                       rdf_serializer=args.rdf_serializer)
     except Exception as e:
-        _logger.error("I'm sorry, I couldn't load this CWL file.\n%s", e, exc_info=(e if args.debug else False))
+        _logger.error("I'm sorry, I couldn't load this CWL file, try again with --debug for more information.\n%s\n", e, exc_info=(e if args.debug else False))
         return 1
 
     if type(t) == int:
@@ -519,7 +504,7 @@ def main(args=None,
             _logger.error("Temporary directory prefix doesn't exist.")
             return 1
 
-    job_order_object = load_job_order(args, t, parser)
+    job_order_object = load_job_order(args, t, parser, stdin)
 
     if type(job_order_object) == int:
         return job_order_object
@@ -536,19 +521,27 @@ def main(args=None,
                        pull_image=args.enable_pull,
                        rm_container=args.rm_container,
                        tmpdir_prefix=args.tmpdir_prefix,
+                       enable_net=args.enable_net,
                        rm_tmpdir=args.rm_tmpdir,
                        makeTool=makeTool,
                        move_outputs=args.move_outputs,
-                       select_resources=selectResources
+                       select_resources=selectResources,
+                       eval_timeout=args.eval_timeout
                        )
         # This is the workflow output, it needs to be written
-        stdout.write(json.dumps(out, indent=4))
-        stdout.flush()
+        if out is not None:
+            stdout.write(json.dumps(out, indent=4))
+            stdout.flush()
+        else:
+            return 1
     except (validate.ValidationException) as e:
         _logger.error("Input object failed validation:\n%s", e, exc_info=(e if args.debug else False))
         return 1
     except workflow.WorkflowException as e:
-        _logger.error("Workflow error:\n  %s", e, exc_info=(e if args.debug else False))
+        _logger.error("Workflow error, try again with --debug for more information:\n  %s", e, exc_info=(e if args.debug else False))
+        return 1
+    except Exception as e:
+        _logger.error("Unhandled error, try again with --debug for more information:\n  %s", e, exc_info=(e if args.debug else False))
         return 1
 
     return 0
diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py
index f7aab43..68bcb56 100644
--- a/cwltool/pathmapper.py
+++ b/cwltool/pathmapper.py
@@ -32,6 +32,7 @@ class PathMapper(object):
         for k,v in self._pathmap.items():
             if v[1] == target:
                 return (k, v[0])
+        return None
 
 class DockerPathMapper(PathMapper):
     def __init__(self, referenced_files, basedir):
diff --git a/cwltool/process.py b/cwltool/process.py
index a74b452..952291a 100644
--- a/cwltool/process.py
+++ b/cwltool/process.py
@@ -141,6 +141,18 @@ def adjustFiles(rec, op):
         for d in rec:
             adjustFiles(d, op)
 
+def adjustFileObjs(rec, op):
+    """Apply an update function to each File object in the object `rec`."""
+
+    if isinstance(rec, dict):
+        if rec.get("class") == "File":
+            op(rec)
+        for d in rec:
+            adjustFileObjs(rec[d], op)
+    if isinstance(rec, list):
+        for d in rec:
+            adjustFileObjs(d, op)
+
 def formatSubclassOf(fmt, cls, ontology, visited):
     """Determine if `fmt` is a subclass of `cls`."""
 
@@ -191,6 +203,8 @@ class Process(object):
         self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
         if "loader" in kwargs:
             self.formatgraph = kwargs["loader"].graph
+        else:
+            self.formatgraph = None
 
         self.validate_hints(self.tool.get("hints", []), strict=kwargs.get("strict"))
 
@@ -267,6 +281,7 @@ class Process(object):
         builder.names = self.names
         builder.requirements = self.requirements
         builder.resources = {}
+        builder.timeout = kwargs.get("eval_timeout")
 
         dockerReq, _ = self.get_requirement("DockerRequirement")
         if dockerReq and kwargs.get("use_container"):
@@ -370,3 +385,35 @@ def uniquename(stem):
         u = "%s_%s" % (stem, c)
     _names.add(u)
     return u
+
+def scandeps(base, doc, reffields, urlfields):
+    r = []
+    if isinstance(doc, dict):
+        for k, v in doc.iteritems():
+            if k in reffields:
+                for u in aslist(v):
+                    if not isinstance(u, basestring):
+                        continue
+                    p = os.path.join(base, u)
+                    with open(p) as f:
+                        deps = {
+                            "class": "File",
+                            "path": p
+                        }
+                        sf = scandeps(os.path.dirname(p), yaml.load(f), reffields, urlfields)
+                        if sf:
+                            deps["secondaryFiles"] = sf
+                        r.append(deps)
+            elif k in urlfields:
+                for u in aslist(v):
+                    p = os.path.join(base, u)
+                    r.append({
+                        "class": "File",
+                        "path": p
+                    })
+            else:
+                r.extend(scandeps(base, v, reffields, urlfields))
+    elif isinstance(doc, list):
+        for d in doc:
+            r.extend(scandeps(base, d, reffields, urlfields))
+    return r
diff --git a/cwltool/sandboxjs.py b/cwltool/sandboxjs.py
index 9708e0a..3cb3cde 100644
--- a/cwltool/sandboxjs.py
+++ b/cwltool/sandboxjs.py
@@ -1,49 +1,62 @@
 import subprocess
 import json
 import threading
+import errno
 
 class JavascriptException(Exception):
     pass
 
-def execjs(js, jslib):
-    try:
-        nodejs = subprocess.Popen(["nodejs"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    except OSError as e:
-        if e.errno == 2:
-            nodejs = subprocess.Popen(["docker", "run",
-                                       "--attach=STDIN", "--attach=STDOUT", "--attach=STDERR",
-                                       "--interactive",
-                                       "--rm",
-                                       "commonworkflowlanguage/nodejs-engine", "nodejs"],
-                                      stdin=subprocess.PIPE,
-                                      stdout=subprocess.PIPE,
-                                      stderr=subprocess.PIPE)
-        else:
-            raise
+def execjs(js, jslib, timeout=None):
+    nodejs = None
+    trynodes = (["nodejs"], ["node"], ["docker", "run",
+                                        "--attach=STDIN", "--attach=STDOUT", "--attach=STDERR",
+                                        "--sig-proxy=true",
+                                        "--interactive",
+                                        "--rm",
+                                        "node:slim"])
+    for n in trynodes:
+        try:
+            nodejs = subprocess.Popen(n, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            break
+        except OSError as e:
+            if e.errno == errno.ENOENT:
+                pass
+            else:
+                raise
+
+    if nodejs is None:
+        raise JavascriptException("cwltool requires Node.js engine to evaluate Javascript expressions, but couldn't find it.  Tried %s" % (trynodes,))
 
     fn = "\"use strict\";%s\n(function()%s)()" % (jslib, js if isinstance(js, basestring) and len(js) > 1 and js[0] == '{' else ("{return (%s);}" % js))
     script = "console.log(JSON.stringify(require(\"vm\").runInNewContext(%s, {})));\n" % json.dumps(fn)
 
+    killed = []
     def term():
         try:
-            nodejs.terminate()
+            nodejs.kill()
+            killed.append(True)
         except OSError:
             pass
 
-    # Time out after 5 seconds
-    tm = threading.Timer(5, term)
+    if timeout is None:
+        timeout = 20
+
+    tm = threading.Timer(timeout, term)
     tm.start()
 
     stdoutdata, stderrdata = nodejs.communicate(script)
     tm.cancel()
 
+    if killed:
+        raise JavascriptException("Long-running script killed after %s seconds.\nscript was: %s\n" % (timeout, fn))
+
     if nodejs.returncode != 0:
-        raise JavascriptException("Returncode was: %s\nscript was: %s\nstdout was: '%s'\nstderr was: '%s'\n" % (nodejs.returncode, script, stdoutdata, stderrdata))
+        raise JavascriptException("Returncode was: %s\nscript was: %s\nstdout was: '%s'\nstderr was: '%s'\n" % (nodejs.returncode, fn, stdoutdata, stderrdata))
     else:
         try:
             return json.loads(stdoutdata)
-        except ValueError:
-            raise JavascriptException("Returncode was: %s\nscript was: %s\nstdout was: '%s'\nstderr was: '%s'\n" % (nodejs.returncode, script, stdoutdata, stderrdata))
+        except ValueError as e:
+            raise JavascriptException("%s\nscript was: %s\nstdout was: '%s'\nstderr was: '%s'\n" % (e, fn, stdoutdata, stderrdata))
 
 class SubstitutionError(Exception):
     pass
@@ -120,7 +133,7 @@ def scanner(scan):
         return None
 
 
-def interpolate(scan, jslib):
+def interpolate(scan, jslib, timeout=None):
     scan = scan.strip()
     parts = []
     w = scanner(scan)
@@ -128,7 +141,7 @@ def interpolate(scan, jslib):
         parts.append(scan[0:w[0]])
 
         if scan[w[0]] == '$':
-            e = execjs(scan[w[0]+1:w[1]], jslib)
+            e = execjs(scan[w[0]+1:w[1]], jslib, timeout=timeout)
             if w[0] == 0 and w[1] == len(scan):
                 return e
             leaf = json.dumps(e, sort_keys=True)
diff --git a/cwltool/schemas/draft-3/Process.yml b/cwltool/schemas/draft-3/Process.yml
index a080616..38b217c 100644
--- a/cwltool/schemas/draft-3/Process.yml
+++ b/cwltool/schemas/draft-3/Process.yml
@@ -62,6 +62,7 @@ $graph:
       type: string
       doc: The path to the file.
       jsonldPredicate:
+        "_id": "cwl:path"
         "_type": "@id"
     - name: checksum
       type: ["null", string]
diff --git a/cwltool/schemas/draft-3/contrib.md b/cwltool/schemas/draft-3/contrib.md
index cce32d1..0d9473c 100644
--- a/cwltool/schemas/draft-3/contrib.md
+++ b/cwltool/schemas/draft-3/contrib.md
@@ -5,8 +5,11 @@ Authors:
 
 Contributers:
 
+* Brad Chapman <bchapman at hsph.harvard.edu>, Harvard Chan School of Public Health
 * John Chilton <jmchilton at gmail.com>, Galaxy Project, Pennsylvania State University
 * Michael R. Crusoe <crusoe at ucdavis.edu>, University of California, Davis
+* Andrey Kartashov <Andrey.Kartashov at cchmc.org>, Cincinnati Children's Hospital
+* Dan Leehr <dan.leehr at duke.edu>, Duke University
 * Hervé Ménager <herve.menager at gmail.com>, Institut Pasteur
 * Stian Soiland-Reyes [soiland-reyes at cs.manchester.ac.uk](mailto:soiland-reyes at cs.manchester.ac.uk), University of Manchester
 * Luka Stojanovic <luka.stojanovic at sbgenomics.com>, Seven Bridges Genomics
diff --git a/cwltool/schemas/draft-3/invocation.md b/cwltool/schemas/draft-3/invocation.md
index 1342711..1d1791a 100644
--- a/cwltool/schemas/draft-3/invocation.md
+++ b/cwltool/schemas/draft-3/invocation.md
@@ -90,8 +90,14 @@ An implementation may forbid the tool from writing to any location in the
 runtime environment file system other than the designated temporary directory,
 system temporary directory, and designated output directory.  An implementation
 may provide read-only input files, and disallow in-place update of input files.
-The designated temporary directory and designated output directory may reside
-on different mount points on different file systems.
+The designated temporary directory, system temporary directory and designated
+output directory may each reside on different mount points on different file
+systems.
+
+An implementation may forbid the tool from directly accessing network
+resources.  Correct tools must not assume any network access.  Future versions
+of the specification may incorporate optional process requirements that
+describe the networking needs of a tool.
 
 The `runtime` section available in [parameter references](#Parameter_references)
 and [expressions](#Expressions) contains the following fields.  As noted
diff --git a/cwltool/workflow.py b/cwltool/workflow.py
index 84fb584..6ba693e 100644
--- a/cwltool/workflow.py
+++ b/cwltool/workflow.py
@@ -136,6 +136,7 @@ class WorkflowJobStep(object):
         self.id = step.id
         self.submitted = False
         self.completed = False
+        self.iterable = None
         self.name = uniquename("step %s" % shortname(self.id))
 
     def job(self, joborder, basedir, output_callback, **kwargs):
@@ -158,12 +159,11 @@ class WorkflowJob(object):
             # tmp_outdir_prefix defaults to tmp, so this is unlikely to be used
             self.outdir = tempfile.mkdtemp()
 
-        self.name = uniquename(kwargs.get("name", shortname(self.workflow.tool["id"])))
+        self.name = uniquename("workflow %s" % kwargs.get("name", shortname(self.workflow.tool["id"])))
 
-        _logger.debug("[workflow %s] initialized from %s", self.name, self.tool["id"])
+        _logger.debug("[%s] initialized step from %s", self.name, self.tool["id"])
 
     def receive_output(self, step, outputparms, jobout, processStatus):
-        _logger.debug("[workflow %s] step %s completed", self.name, id(step))
         for i in outputparms:
             if "id" in i:
                 if i["id"] in jobout:
@@ -172,14 +172,15 @@ class WorkflowJob(object):
                     _logger.error("Output is missing expected field %s" % i["id"])
                     processStatus = "permanentFail"
 
+        _logger.debug("[%s] produced output %s", step.name, json.dumps(jobout, indent=4))
+
         if processStatus != "success":
             if self.processStatus != "permanentFail":
                 self.processStatus = processStatus
 
-            if processStatus == "success":
-                _logger.info("Workflow step %s completion status is %s", step.id, processStatus)
-            else:
-                _logger.warn("Workflow step %s completion status is %s", step.id, processStatus)
+            _logger.warn("[%s] completion status is %s", step.name, processStatus)
+        else:
+            _logger.info("[%s] completion status is %s", step.name, processStatus)
 
         step.completed = True
 
@@ -192,14 +193,14 @@ class WorkflowJob(object):
         try:
             inputobj = object_from_state(self.state, inputparms, False, supportsMultipleInput)
             if inputobj is None:
-                _logger.debug("[workflow %s] job step %s not ready", self.name, step.id)
+                _logger.debug("[%s] job step %s not ready", self.name, step.id)
                 return
 
-            _logger.debug("[step %s] starting job step %s of workflow %s", id(step), step.id, id(self))
-
             if step.submitted:
                 return
 
+            _logger.debug("[%s] starting %s", self.name, step.name)
+
             callback = functools.partial(self.receive_output, step, outputparms)
 
             valueFrom = {i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i}
@@ -232,9 +233,9 @@ class WorkflowJob(object):
                     jobs = flat_crossproduct_scatter(step, inputobj, basedir,
                                                      scatter, callback, 0, **kwargs)
             else:
-                _logger.debug("[workflow %s] Job is input %s", self.name, json.dumps(inputobj, indent=4))
+                _logger.debug("[job %s] job input %s", step.name, json.dumps(inputobj, indent=4))
                 inputobj = {k: valueFromFunc(k, v) for k,v in inputobj.items()}
-                _logger.debug("[workflow %s] Evaluated job input to %s", self.name, json.dumps(inputobj, indent=4))
+                _logger.debug("[job %s] evaluated job input to %s", step.name, json.dumps(inputobj, indent=4))
                 jobs = step.job(inputobj, basedir, callback, **kwargs)
 
             step.submitted = True
@@ -249,7 +250,7 @@ class WorkflowJob(object):
             step.completed = True
 
     def run(self, **kwargs):
-        _logger.debug("[workflow %s] starting", self.name)
+        _logger.debug("[%s] workflow starting", self.name)
 
     def job(self, joborder, basedir, output_callback, move_outputs=True, **kwargs):
         self.state = {}
@@ -274,19 +275,26 @@ class WorkflowJob(object):
         output_dirs = set()
 
         completed = 0
+        iterables = []
         while completed < len(self.steps) and self.processStatus == "success":
             made_progress = False
-            completed = 0
+
             for step in self.steps:
-                if step.completed:
-                    completed += 1
-                else:
-                    for newjob in self.try_make_job(step, basedir, **kwargs):
+                if not step.submitted:
+                    step.iterable = self.try_make_job(step, basedir, **kwargs)
+
+                if step.iterable:
+                    for newjob in step.iterable:
                         if newjob:
                             made_progress = True
                             if newjob.outdir:
                                 output_dirs.add(newjob.outdir)
-                        yield newjob
+                            yield newjob
+                        else:
+                            break
+
+            completed = sum(1 for s in self.steps if s.completed)
+
             if not made_progress and completed < len(self.steps):
                 yield None
 
@@ -324,17 +332,17 @@ class WorkflowJob(object):
                         dirname = os.path.dirname(dst)
                         if not os.path.exists(dirname):
                             os.makedirs(dirname)
-                        _logger.debug("[workflow %s] Moving '%s' to '%s'", self.name, src, dst)
+                        _logger.debug("[%s] Moving '%s' to '%s'", self.name, src, dst)
                         shutil.move(src, dst)
                         f["path"] = dst
 
             for a in output_dirs:
                 if os.path.exists(a) and empty_subtree(a):
                     if kwargs.get("rm_tmpdir", True):
-                        _logger.debug("[workflow %s] Removing intermediate output directory %s", self.name, a)
+                        _logger.debug("[%s] Removing intermediate output directory %s", self.name, a)
                         shutil.rmtree(a, True)
 
-        _logger.info("[workflow %s] outdir is %s", self.name, self.outdir)
+        _logger.info("[%s] outdir is %s", self.name, self.outdir)
 
         output_callback(wo, self.processStatus)
 
@@ -422,7 +430,7 @@ class WorkflowStep(Process):
             inp_map = {i["id"]: i for i in inputparms}
             for s in scatter:
                 if s not in inp_map:
-                    raise WorkflowException("Invalid Scatter parameter '%s'" % s)
+                    raise WorkflowException("Scatter parameter '%s' does not correspond to an input parameter of this step, inputs are %s" % (s, inp_map.keys()))
 
                 inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}
 
@@ -464,6 +472,7 @@ class WorkflowStep(Process):
                                             **kwargs):
                 yield t
         except WorkflowException:
+            _logger.error("Exception on step '%s'", kwargs.get("name"))
             raise
         except Exception as e:
             _logger.exception("Unexpected exception")
diff --git a/ez_setup.py b/ez_setup.py
new file mode 100644
index 0000000..f5fa741
--- /dev/null
+++ b/ez_setup.py
@@ -0,0 +1,332 @@
+#!/usr/bin/env python
+"""Bootstrap setuptools installation
+
+To use setuptools in your package's setup.py, include this
+file in the same directory and add this to the top of your setup.py::
+
+    from ez_setup import use_setuptools
+    use_setuptools()
+
+To require a specific version of setuptools, set a download
+mirror, or use an alternate download directory, simply supply
+the appropriate options to ``use_setuptools()``.
+
+This file can also be run as a script to install or upgrade setuptools.
+"""
+import os
+import shutil
+import sys
+import tempfile
+import zipfile
+import optparse
+import subprocess
+import platform
+import textwrap
+import contextlib
+
+from distutils import log
+
+try:
+    from site import USER_SITE
+except ImportError:
+    USER_SITE = None
+
+DEFAULT_VERSION = "3.4.1"
+DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
+
+def _python_cmd(*args):
+    """
+    Return True if the command succeeded.
+    """
+    args = (sys.executable,) + args
+    return subprocess.call(args) == 0
+
+
+def _install(archive_filename, install_args=()):
+    with archive_context(archive_filename):
+        # installing
+        log.warn('Installing Setuptools')
+        if not _python_cmd('setup.py', 'install', *install_args):
+            log.warn('Something went wrong during the installation.')
+            log.warn('See the error message above.')
+            # exitcode will be 2
+            return 2
+
+
+def _build_egg(egg, archive_filename, to_dir):
+    with archive_context(archive_filename):
+        # building an egg
+        log.warn('Building a Setuptools egg in %s', to_dir)
+        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
+    # returning the result
+    log.warn(egg)
+    if not os.path.exists(egg):
+        raise IOError('Could not build the egg.')
+
+
+def get_zip_class():
+    """
+    Supplement ZipFile class to support context manager for Python 2.6
+    """
+    class ContextualZipFile(zipfile.ZipFile):
+        def __enter__(self):
+            return self
+        def __exit__(self, type, value, traceback):
+            self.close
+    return zipfile.ZipFile if hasattr(zipfile.ZipFile, '__exit__') else \
+        ContextualZipFile
+
+
+ at contextlib.contextmanager
+def archive_context(filename):
+    # extracting the archive
+    tmpdir = tempfile.mkdtemp()
+    log.warn('Extracting in %s', tmpdir)
+    old_wd = os.getcwd()
+    try:
+        os.chdir(tmpdir)
+        with get_zip_class()(filename) as archive:
+            archive.extractall()
+
+        # going in the directory
+        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
+        os.chdir(subdir)
+        log.warn('Now working in %s', subdir)
+        yield
+
+    finally:
+        os.chdir(old_wd)
+        shutil.rmtree(tmpdir)
+
+
+def _do_download(version, download_base, to_dir, download_delay):
+    egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
+                       % (version, sys.version_info[0], sys.version_info[1]))
+    if not os.path.exists(egg):
+        archive = download_setuptools(version, download_base,
+                                      to_dir, download_delay)
+        _build_egg(egg, archive, to_dir)
+    sys.path.insert(0, egg)
+
+    # Remove previously-imported pkg_resources if present (see
+    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
+    if 'pkg_resources' in sys.modules:
+        del sys.modules['pkg_resources']
+
+    import setuptools
+    setuptools.bootstrap_install_from = egg
+
+
+def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+        to_dir=os.curdir, download_delay=15):
+    to_dir = os.path.abspath(to_dir)
+    rep_modules = 'pkg_resources', 'setuptools'
+    imported = set(sys.modules).intersection(rep_modules)
+    try:
+        import pkg_resources
+    except ImportError:
+        return _do_download(version, download_base, to_dir, download_delay)
+    try:
+        pkg_resources.require("setuptools>=" + version)
+        return
+    except pkg_resources.DistributionNotFound:
+        return _do_download(version, download_base, to_dir, download_delay)
+    except pkg_resources.VersionConflict as VC_err:
+        if imported:
+            msg = textwrap.dedent("""
+                The required version of setuptools (>={version}) is not available,
+                and can't be installed while this script is running. Please
+                install a more recent version first, using
+                'easy_install -U setuptools'.
+
+                (Currently using {VC_err.args[0]!r})
+                """).format(VC_err=VC_err, version=version)
+            sys.stderr.write(msg)
+            sys.exit(2)
+
+        # otherwise, reload ok
+        del pkg_resources, sys.modules['pkg_resources']
+        return _do_download(version, download_base, to_dir, download_delay)
+
+def _clean_check(cmd, target):
+    """
+    Run the command to download target. If the command fails, clean up before
+    re-raising the error.
+    """
+    try:
+        subprocess.check_call(cmd)
+    except subprocess.CalledProcessError:
+        if os.access(target, os.F_OK):
+            os.unlink(target)
+        raise
+
+def download_file_powershell(url, target):
+    """
+    Download the file at url to target using Powershell (which will validate
+    trust). Raise an exception if the command cannot complete.
+    """
+    target = os.path.abspath(target)
+    cmd = [
+        'powershell',
+        '-Command',
+        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
+    ]
+    _clean_check(cmd, target)
+
+def has_powershell():
+    if platform.system() != 'Windows':
+        return False
+    cmd = ['powershell', '-Command', 'echo test']
+    devnull = open(os.path.devnull, 'wb')
+    try:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    finally:
+        devnull.close()
+    return True
+
+download_file_powershell.viable = has_powershell
+
+def download_file_curl(url, target):
+    cmd = ['curl', url, '--silent', '--output', target]
+    _clean_check(cmd, target)
+
+def has_curl():
+    cmd = ['curl', '--version']
+    devnull = open(os.path.devnull, 'wb')
+    try:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    finally:
+        devnull.close()
+    return True
+
+download_file_curl.viable = has_curl
+
+def download_file_wget(url, target):
+    cmd = ['wget', url, '--quiet', '--output-document', target]
+    _clean_check(cmd, target)
+
+def has_wget():
+    cmd = ['wget', '--version']
+    devnull = open(os.path.devnull, 'wb')
+    try:
+        try:
+            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
+        except Exception:
+            return False
+    finally:
+        devnull.close()
+    return True
+
+download_file_wget.viable = has_wget
+
+def download_file_insecure(url, target):
+    """
+    Use Python to download the file, even though it cannot authenticate the
+    connection.
+    """
+    try:
+        from urllib.request import urlopen
+    except ImportError:
+        from urllib2 import urlopen
+    src = dst = None
+    try:
+        src = urlopen(url)
+        # Read/write all in one block, so we don't create a corrupt file
+        # if the download is interrupted.
+        data = src.read()
+        dst = open(target, "wb")
+        dst.write(data)
+    finally:
+        if src:
+            src.close()
+        if dst:
+            dst.close()
+
+download_file_insecure.viable = lambda: True
+
+def get_best_downloader():
+    downloaders = [
+        download_file_powershell,
+        download_file_curl,
+        download_file_wget,
+        download_file_insecure,
+    ]
+
+    for dl in downloaders:
+        if dl.viable():
+            return dl
+
+def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+        to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
+    """
+    Download setuptools from a specified location and return its filename
+
+    `version` should be a valid setuptools version number that is available
+    as an egg for download under the `download_base` URL (which should end
+    with a '/'). `to_dir` is the directory where the egg will be downloaded.
+    `delay` is the number of seconds to pause before an actual download
+    attempt.
+
+    ``downloader_factory`` should be a function taking no arguments and
+    returning a function for downloading a URL to a target.
+    """
+    # making sure we use the absolute path
+    to_dir = os.path.abspath(to_dir)
+    zip_name = "setuptools-%s.zip" % version
+    url = download_base + zip_name
+    saveto = os.path.join(to_dir, zip_name)
+    if not os.path.exists(saveto):  # Avoid repeated downloads
+        log.warn("Downloading %s", url)
+        downloader = downloader_factory()
+        downloader(url, saveto)
+    return os.path.realpath(saveto)
+
+def _build_install_args(options):
+    """
+    Build the arguments to 'python setup.py install' on the setuptools package
+    """
+    return ['--user'] if options.user_install else []
+
+def _parse_args():
+    """
+    Parse the command line for options
+    """
+    parser = optparse.OptionParser()
+    parser.add_option(
+        '--user', dest='user_install', action='store_true', default=False,
+        help='install in user site package (requires Python 2.6 or later)')
+    parser.add_option(
+        '--download-base', dest='download_base', metavar="URL",
+        default=DEFAULT_URL,
+        help='alternative URL from where to download the setuptools package')
+    parser.add_option(
+        '--insecure', dest='downloader_factory', action='store_const',
+        const=lambda: download_file_insecure, default=get_best_downloader,
+        help='Use internal, non-validating downloader'
+    )
+    parser.add_option(
+        '--version', help="Specify which version to download",
+        default=DEFAULT_VERSION,
+    )
+    options, args = parser.parse_args()
+    # positional arguments are ignored
+    return options
+
+def main():
+    """Install or upgrade setuptools and EasyInstall"""
+    options = _parse_args()
+    archive = download_setuptools(
+        version=options.version,
+        download_base=options.download_base,
+        downloader_factory=options.downloader_factory,
+    )
+    return _install(archive, _build_install_args(options))
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/gittaggers.py b/gittaggers.py
new file mode 100644
index 0000000..05ce123
--- /dev/null
+++ b/gittaggers.py
@@ -0,0 +1,23 @@
+from setuptools.command.egg_info import egg_info
+import subprocess
+import time
+
+class EggInfoFromGit(egg_info):
+    """Tag the build with git commit timestamp.
+
+    If a build tag has already been set (e.g., "egg_info -b", building
+    from source package), leave it alone.
+    """
+    def git_timestamp_tag(self):
+        gitinfo = subprocess.check_output(
+            ['git', 'log', '--first-parent', '--max-count=1',
+             '--format=format:%ct', '.']).strip()
+        return time.strftime('.%Y%m%d%H%M%S', time.gmtime(int(gitinfo)))
+
+    def tags(self):
+        if self.tag_build is None:
+            try:
+                self.tag_build = self.git_timestamp_tag()
+            except subprocess.CalledProcessError:
+                pass
+        return egg_info.tags(self)
diff --git a/setup.cfg b/setup.cfg
index c70c7be..bd7adb4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,5 +1,5 @@
 [egg_info]
-tag_build = .20160209222805
+tag_build = .20160316204054
 tag_date = 0
 tag_svn_revision = 0
 
diff --git a/setup.py b/setup.py
index dd5e8fe..6529439 100644
--- a/setup.py
+++ b/setup.py
@@ -2,9 +2,12 @@
 
 import os
 import sys
-import setuptools.command.egg_info as egg_info_cmd
 import shutil
 
+import ez_setup
+ez_setup.use_setuptools()
+
+import setuptools.command.egg_info as egg_info_cmd
 from setuptools import setup, find_packages
 
 SETUP_DIR = os.path.dirname(__file__)
@@ -36,7 +39,7 @@ setup(name='cwltool',
           'rdflib >= 4.2.0',
           'rdflib-jsonld >= 0.3.0',
           'shellescape',
-          'schema_salad == 1.6.20160202222448'
+          'schema_salad == 1.7.20160316203940'
         ],
       test_suite='tests',
       tests_require=[],

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cwltool.git



More information about the debian-med-commit mailing list