[med-svn] [cwltool] 01/01: New upstream version 1.0.20170803160545

Michael Crusoe misterc-guest at moszumanska.debian.org
Fri Aug 11 12:07:29 UTC 2017


This is an automated email from the git hooks/post-receive script.

misterc-guest pushed a commit to annotated tag upstream/1.0.20170803160545
in repository cwltool.

commit cd43a05acb2b9385a1d520cada534843140e921f
Author: Michael R. Crusoe <michael.crusoe at gmail.com>
Date:   Fri Aug 4 05:02:56 2017 -0700

    New upstream version 1.0.20170803160545
---
 MANIFEST.in                                        |  23 +-
 Makefile                                           |  51 +-
 PKG-INFO                                           | 448 ++++++++++++++-
 README.rst                                         | 415 +++++++++++++-
 cwltool.egg-info/PKG-INFO                          | 448 ++++++++++++++-
 cwltool.egg-info/SOURCES.txt                       |  77 ++-
 cwltool.egg-info/requires.txt                      |  16 +-
 cwltool.py                                         |   2 +
 cwltool/__init__.py                                |   1 +
 cwltool/__main__.py                                |   4 +-
 cwltool/builder.py                                 |  68 ++-
 cwltool/cwlNodeEngine.js                           |   6 +-
 cwltool/cwlrdf.py                                  |  26 +-
 cwltool/docker.py                                  |  40 +-
 cwltool/{docker_uid.py => docker_id.py}            |  39 +-
 cwltool/draft2tool.py                              | 392 ++++++++-----
 cwltool/errors.py                                  |   1 +
 cwltool/expression.py                              |  74 ++-
 cwltool/extensions.yml                             |  36 ++
 cwltool/factory.py                                 |  25 +-
 cwltool/flatten.py                                 |   1 +
 cwltool/job.py                                     | 381 ++++++++-----
 cwltool/load_tool.py                               | 198 ++++---
 cwltool/main.py                                    | 619 ++++++++++++++-------
 cwltool/mutation.py                                |  69 +++
 cwltool/pack.py                                    | 144 +++--
 cwltool/pathmapper.py                              | 179 +++---
 cwltool/process.py                                 | 412 +++++++++-----
 cwltool/resolver.py                                |  32 +-
 cwltool/sandboxjs.py                               | 249 +++++++--
 cwltool/schemas/draft-3/Workflow.yml               |   1 +
 cwltool/schemas/v1.0/CommandLineTool.yml           |  50 +-
 cwltool/schemas/v1.0/Process.yml                   | 190 ++++++-
 cwltool/schemas/v1.0/UserGuide.yml                 | 148 ++++-
 cwltool/schemas/v1.0/Workflow.yml                  |  68 ++-
 cwltool/schemas/v1.0/concepts.md                   |   4 +-
 .../v1.0/salad/schema_salad/metaschema/map_res.yml |  36 ++
 .../salad/schema_salad/metaschema/map_res_proc.yml |  12 +
 .../schema_salad/metaschema/map_res_schema.yml     |  30 +
 .../salad/schema_salad/metaschema/map_res_src.yml  |   8 +
 .../salad/schema_salad/metaschema/metaschema.yml   |  13 +
 .../schema_salad/metaschema/metaschema_base.yml    |   7 +
 .../v1.0/salad/schema_salad/metaschema/salad.md    |  19 +-
 .../salad/schema_salad/metaschema/typedsl_res.yml  |  33 ++
 .../schema_salad/metaschema/typedsl_res_proc.yml   |  26 +
 .../schema_salad/metaschema/typedsl_res_schema.yml |  17 +
 .../schema_salad/metaschema/typedsl_res_src.yml    |   9 +
 cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml    |  10 +-
 cwltool/schemas/v1.1.0-dev1/Process.yml            |  18 +-
 cwltool/schemas/v1.1.0-dev1/UserGuide.yml          |  28 +-
 cwltool/schemas/v1.1.0-dev1/Workflow.yml           |  21 +-
 cwltool/schemas/v1.1.0-dev1/concepts.md            |   4 +-
 cwltool/software_requirements.py                   | 122 ++++
 cwltool/stdfsaccess.py                             |  45 +-
 cwltool/update.py                                  |  70 ++-
 cwltool/utils.py                                   | 158 +++++-
 cwltool/workflow.py                                | 506 ++++++++++++-----
 ez_setup.py                                        | 391 -------------
 gittaggers.py                                      |   4 +-
 setup.cfg                                          |  13 +-
 setup.py                                           |  71 ++-
 tests/2.fasta                                      |  11 +
 tests/2.fastq                                      |  12 +
 tests/__init__.py                                  |   0
 tests/echo-cwlrun-job.yaml                         |   6 +
 tests/echo-job.yaml                                |   5 +
 tests/echo.cwl                                     |  15 +
 tests/listing-job.yml                              |   3 +
 tests/random_lines.cwl                             |  29 +
 tests/random_lines_job.json                        |   8 +
 tests/random_lines_mapping.cwl                     |  29 +
 tests/seqtk_seq.cwl                                |  24 +
 tests/seqtk_seq_job.json                           |   6 +
 tests/seqtk_seq_with_docker.cwl                    |  26 +
 tests/seqtk_seq_wrong_name.cwl                     |  27 +
 tests/test_bad_outputs_wf.cwl                      |  33 ++
 tests/test_check.py                                |  22 +
 tests/test_cwl_version.py                          |  14 +
 tests/test_default_path.py                         |  15 +
 tests/test_deps_env_resolvers_conf.yml             |   3 +
 tests/test_deps_env_resolvers_conf_rewrite.yml     |   3 +
 tests/test_deps_mapping.yml                        |   6 +
 tests/test_docker_warning.py                       |  25 +
 tests/test_examples.py                             | 520 +++++++++++++++++
 tests/test_ext.py                                  | 149 +++++
 tests/test_fetch.py                                |  54 ++
 tests/test_js_sandbox.py                           |  35 ++
 tests/test_pack.py                                 |  35 ++
 tests/test_pathmapper.py                           |  56 ++
 tests/test_relax_path_checks.py                    |  46 ++
 tests/test_toolargparse.py                         | 117 ++++
 tests/tmp1/tmp2/tmp3/.gitkeep                      |   0
 tests/util.py                                      |  19 +
 tests/wf/badout1.cwl                               |  13 +
 tests/wf/badout2.cwl                               |  13 +
 tests/wf/badout3.cwl                               |  13 +
 tests/wf/cat.cwl                                   |   6 +
 tests/wf/default_path.cwl                          |  11 +
 tests/wf/echo.cwl                                  |  24 +
 tests/wf/empty.ttl                                 |   0
 tests/wf/expect_packed.cwl                         | 130 +++++
 tests/wf/hello.txt                                 |   6 +
 tests/wf/listing_deep.cwl                          |  12 +
 tests/wf/listing_none.cwl                          |  12 +
 tests/wf/listing_shallow.cwl                       |  12 +
 tests/wf/listing_v1_0.cwl                          |   7 +
 tests/wf/missing_cwlVersion.cwl                    |  31 ++
 tests/wf/mut.cwl                                   |  16 +
 tests/wf/mut2.cwl                                  |  19 +
 tests/wf/mut3.cwl                                  |  21 +
 tests/wf/revsort-job.json                          |   6 +
 tests/wf/revsort.cwl                               |  68 +++
 tests/wf/revtool.cwl                               |  39 ++
 tests/wf/scatterfail.cwl                           |  39 ++
 tests/wf/sorttool.cwl                              |  35 ++
 tests/wf/updatedir.cwl                             |  16 +
 tests/wf/updatedir_inplace.cwl                     |  20 +
 tests/wf/updateval.cwl                             |  20 +
 tests/wf/updateval.py                              |   6 +
 tests/wf/updateval_inplace.cwl                     |  24 +
 tests/wf/wffail.cwl                                |  38 ++
 tests/wf/wrong_cwlVersion.cwl                      |  32 ++
 122 files changed, 6945 insertions(+), 1705 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
index 051df48..2808c92 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1,22 @@
-include gittaggers.py ez_setup.py Makefile cwltool.py
+include gittaggers.py Makefile cwltool.py
+include tests/*
+include tests/tmp1/tmp2/tmp3/.gitkeep
+include tests/wf/*
+include cwltool/schemas/v1.0/*.yml
+include cwltool/schemas/draft-2/*.yml
+include cwltool/schemas/draft-3/*.yml
+include cwltool/schemas/draft-3/*.md
+include cwltool/schemas/draft-3/salad/schema_salad/metaschema/*.yml
+include cwltool/schemas/draft-3/salad/schema_salad/metaschema/*.md
+include cwltool/schemas/v1.0/*.yml
+include cwltool/schemas/v1.0/*.md
+include cwltool/schemas/v1.0/salad/schema_salad/metaschema/*.yml
+include cwltool/schemas/v1.0/salad/schema_salad/metaschema/*.md
+include cwltool/schemas/v1.1.0-dev1/*.yml
+include cwltool/schemas/v1.1.0-dev1/*.md
+include cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml
+include cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md
+include cwltool/cwlNodeEngine.js
+include cwltool/extensions.yml
+global-exclude *~
+global-exclude *.pyc
diff --git a/Makefile b/Makefile
index 6544feb..2b6a380 100644
--- a/Makefile
+++ b/Makefile
@@ -26,8 +26,8 @@ MODULE=cwltool
 # `SHELL=bash` doesn't work for some, so don't use BASH-isms like
 # `[[` conditional expressions.
 PYSOURCES=$(wildcard ${MODULE}/**.py tests/*.py) setup.py
-DEVPKGS=pep8 diff_cover autopep8 pylint coverage pep257 flake8
-DEBDEVPKGS=pep8 python-autopep8 pylint python-coverage pep257 sloccount python-flake8
+DEVPKGS=pep8 diff_cover autopep8 pylint coverage pydocstyle flake8 pytest isort mock
+DEBDEVPKGS=pep8 python-autopep8 pylint python-coverage pydocstyle sloccount python-flake8 python-mock
 VERSION=1.0.$(shell date +%Y%m%d%H%M%S --date=`git log --first-parent \
 	--max-count=1 --format=format:%cI`)
 mkfile_dir := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
@@ -50,7 +50,7 @@ install-deb-dep:
 
 ## install     : install the ${MODULE} module and schema-salad-tool
 install: FORCE
-	./setup.py build install
+	pip install .
 
 ## dist        : create a module package for distribution
 dist: dist/${MODULE}-$(VERSION).tar.gz
@@ -65,6 +65,11 @@ clean: FORCE
 	rm -Rf .coverage
 	rm -f diff-cover.html
 
+# Linting and code style related targets
+## sorting imports using isort: https://github.com/timothycrosley/isort
+sort_imports:
+	isort ${MODULE}/*.py tests/*.py setup.py
+
 ## pep8        : check Python code style
 pep8: $(PYSOURCES)
 	pep8 --exclude=_version.py  --show-source --show-pep8 $^ || true
@@ -75,15 +80,16 @@ pep8_report.txt: $(PYSOURCES)
 diff_pep8_report: pep8_report.txt
 	diff-quality --violations=pep8 pep8_report.txt
 
-## pep257      : check Python code style
-pep257: $(PYSOURCES)
-	pep257 --ignore=D100,D101,D102,D103 $^ || true
+pep257: pydocstyle
+## pydocstyle      : check Python code style
+pydocstyle: $(PYSOURCES)
+	pydocstyle --ignore=D100,D101,D102,D103 $^ || true
 
-pep257_report.txt: $(PYSOURCES)
-	pep257 setup.py $^ > pep257_report.txt 2>&1 || true
+pydocstyle_report.txt: $(PYSOURCES)
+	pydocstyle setup.py $^ > pydocstyle_report.txt 2>&1 || true
 
-diff_pep257_report: pep257_report.txt
-	diff-quality --violations=pep8 pep257_report.txt
+diff_pydocstyle_report: pydocstyle_report.txt
+	diff-quality --violations=pep8 $^
 
 ## autopep8    : fix most Python code indentation and formatting
 autopep8: $(PYSOURCES)
@@ -147,15 +153,26 @@ list-author-emails:
 	@git log --format='%aN,%aE' | sort -u | grep -v 'root'
 
 
-mypy: ${PYSOURCES}
-	rm -Rf typeshed/2.7/ruamel/yaml
+mypy2: ${PYSOURCES}
+	rm -Rf typeshed/2and3/ruamel/yaml
 	ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
-		typeshed/2.7/ruamel/yaml
-	rm -Rf typeshed/2.7/schema_salad
+		typeshed/2and3/ruamel/yaml
+	rm -Rf typeshed/2and3/schema_salad
 	ln -s $(shell python -c 'from __future__ import print_function; import schema_salad; import os.path; print(os.path.dirname(schema_salad.__file__))') \
-		typeshed/2.7/schema_salad
-	MYPYPATH=typeshed/2.7 mypy --py2 --disallow-untyped-calls \
-		 --warn-redundant-casts --warn-unused-ignores --fast-parser \
+		typeshed/2and3/schema_salad
+	MYPYPATH=$MYPYPATH:typeshed/2.7:typeshed/2and3 mypy --py2 --disallow-untyped-calls \
+		 --warn-redundant-casts \
+		 cwltool
+
+mypy3: ${PYSOURCES}
+	rm -Rf typeshed/2and3/ruamel/yaml
+	ln -s $(shell python3 -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
+		typeshed/2and3/ruamel/yaml
+	rm -Rf typeshed/2and3/schema_salad
+	ln -s $(shell python3 -c 'from __future__ import print_function; import schema_salad; import os.path; print(os.path.dirname(schema_salad.__file__))') \
+		typeshed/2and3/schema_salad
+	MYPYPATH=$MYPYPATH:typeshed/3:typeshed/2and3 mypy --disallow-untyped-calls \
+		 --warn-redundant-casts \
 		 cwltool
 
 FORCE:
diff --git a/PKG-INFO b/PKG-INFO
index eea3cfe..e5d73f5 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,11 +1,11 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20170114120503
+Version: 1.0.20170803160545
 Summary: Common workflow language reference implementation
 Home-page: https://github.com/common-workflow-language/cwltool
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
-License: Apache 2.0
+License: UNKNOWN
 Download-URL: https://github.com/common-workflow-language/cwltool
 Description: ==================================================================
         Common workflow language tool description reference implementation
@@ -13,6 +13,11 @@ Description: ==================================================================
         
         CWL Conformance test: |Build Status|
         
+        Travis: |Unix Build Status|
+        
+        .. |Unix Build Status| image:: https://img.shields.io/travis/common-workflow-language/cwltool/master.svg?label=unix%20build
+           :target: https://travis-ci.org/common-workflow-language/cwltool
+        
         This is the reference implementation of the Common Workflow Language.  It is
         intended to be feature complete and provide comprehensive validation of CWL
         files as well as provide other tools related to working with CWL.
@@ -35,7 +40,7 @@ Description: ==================================================================
         
           pip install cwlref-runner
         
-        If installling alongside another CWL implementation then::
+        If installing alongside another CWL implementation then::
         
           pip install cwltool
         
@@ -47,7 +52,24 @@ Description: ==================================================================
         
         Remember, if co-installing multiple CWL implementations then you need to
         maintain which implementation ``cwl-runner`` points to via a symbolic file
-        system link or [another facility](https://wiki.debian.org/DebianAlternatives).
+        system link or `another facility <https://wiki.debian.org/DebianAlternatives>`_.
+        
+        Running tests locally
+        ---------------------
+        
+        -  Running basic tests ``(/tests)``:
+        
+        .. code:: bash
+        
+            python setup.py test
+        
+        -  Running the entire suite of CWL conformance tests:
+        
+        The GitHub repository for the CWL specifications contains a script that tests a CWL
+        implementation against a wide array of valid CWL files using the `cwltest <https://github.com/common-workflow-language/cwltest>`_
+        program
+        
+        Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/master/CONFORMANCE_TESTS.md
         
         Run on the command line
         -----------------------
@@ -61,19 +83,10 @@ Description: ==================================================================
         
           cwltool [tool-or-workflow-description] [input-job-settings]
         
-        Import as a module
-        ----------------
-        
-        Add::
-        
-          import cwltool
-        
-        to your script.
-        
         Use with boot2docker
         --------------------
         boot2docker is running docker inside a virtual machine and it only mounts ``Users``
-        on it. The default behavoir of CWL is to create temporary directories under e.g.
+        on it. The default behavior of CWL is to create temporary directories under e.g.
         ``/Var`` which is not accessible to Docker containers.
         
         To run CWL successfully with boot2docker you need to set the ``--tmpdir-prefix``
@@ -96,4 +109,411 @@ Description: ==================================================================
         following locations will be searched:
         http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
         
+        
+        Use with GA4GH Tool Registry API
+        --------------------------------
+        
+        Cwltool can launch tools directly from `GA4GH Tool Registry API`_ endpoints.
+        
+        By default, cwltool searches https://dockstore.org/ .  Use --add-tool-registry to add other registries to the search path.
+        
+        For example ::
+        
+          cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats:master test.json
+        
+        and (defaults to latest when a version is not specified) ::
+        
+          cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats test.json
+        
+        For this example, grab the test.json (and input file) from https://github.com/CancerCollaboratory/dockstore-tool-bamstats
+        
+        .. _`GA4GH Tool Registry API`: https://github.com/ga4gh/tool-registry-schemas
+        
+        Import as a module
+        ------------------
+        
+        Add::
+        
+          import cwltool
+        
+        to your script.
+        
+        The easiest way to use cwltool to run a tool or workflow from Python is to use a Factory::
+        
+          import cwltool.factory
+          fac = cwltool.factory.Factory()
+        
+          echo = f.make("echo.cwl")
+          result = echo(inp="foo")
+        
+          # result["out"] == "foo"
+        
+        Leveraging SoftwareRequirements (Beta)
+        --------------------------------------
+        
+        CWL tools may be decoarated with ``SoftwareRequirement`` hints that cwltool
+        may in turn use to resolve to packages in various package managers or
+        dependency management systems such as `Environment Modules
+        <http://modules.sourceforge.net/>`__.
+        
+        Utilizing ``SoftwareRequirement`` hints using cwltool requires an optional
+        dependency, for this reason be sure to use specify the ``deps`` modifier when
+        installing cwltool. For instance::
+        
+          $ pip install 'cwltool[deps]'
+        
+        Installing cwltool in this fashion enables several new command line options.
+        The most general of these options is ``--beta-dependency-resolvers-configuration``.
+        This option allows one to specify a dependency resolvers configuration file.
+        This file may be specified as either XML or YAML and very simply describes various
+        plugins to enable to "resolve" ``SoftwareRequirement`` dependencies.
+        
+        To discuss some of these plugins and how to configure them, first consider the
+        following ``hint`` definition for an example CWL tool.
+        
+        .. code:: yaml
+        
+          SoftwareRequirement:
+            packages:
+            - package: seqtk
+              version:
+              - r93
+        
+        Now imagine deploying cwltool on a cluster with Software Modules installed
+        and that a ``seqtk`` module is avaialble at version ``r93``. This means cluster
+        users likely won't have the ``seqtk`` the binary on their ``PATH`` by default but after
+        sourcing this module with the command ``modulecmd sh load seqtk/r93`` ``seqtk`` is
+        available on the ``PATH``. A simple dependency resolvers configuration file, called
+        ``dependency-resolvers-conf.yml`` for instance, that would enable cwltool to source
+        the correct module environment before executing the above tool would simply be:
+        
+        .. code:: yaml
+        
+          - type: module
+        
+        The outer list indicates that one plugin is being enabled, the plugin parameters are
+        defined as a dictionary for this one list item. There is only one required parameter
+        for the plugin above, this is ``type`` and defines the plugin type. This parameter
+        is required for all plugins. The available plugins and the parameters
+        available for each are documented (incompletely) `here
+        <https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__.
+        Unfortunately, this documentation is in the context of Galaxy tool ``requirement`` s instead of CWL ``SoftwareRequirement`` s, but the concepts map fairly directly.
+        
+        cwltool is distributed with an example of such seqtk tool and sample corresponding
+        job. It could executed from the cwltool root using a dependency resolvers 
+        configuration file such as the above one using the command::
+        
+          cwltool --beta-dependency-resolvers-configuration /path/to/dependency-resolvers-conf.yml \
+              tests/seqtk_seq.cwl \
+              tests/seqtk_seq_job.json
+        
+        This example demonstrates both that cwltool can leverage
+        existing software installations and also handle workflows with dependencies
+        on different versions of the same software and libraries. However the above
+        example does require an existing module setup so it is impossible to test this example
+        "out of the box" with cwltool. For a more isolated test that demonstrates all
+        the same concepts - the resolver plugin type ``galaxy_packages`` can be used.
+        
+        "Galaxy packages" are a lighter weight alternative to Environment Modules that are
+        really just defined by a way to lay out directories into packages and versions
+        to find little scripts that are sourced to modify the environment. They have
+        been used for years in Galaxy community to adapt Galaxy tools to cluster 
+        environments but require neither knowledge of Galaxy nor any special tools to 
+        setup. These should work just fine for CWL tools.
+        
+        The cwltool source code repository's test directory is setup with a very simple
+        directory that defines a set of "Galaxy  packages" (but really just defines one
+        package named ``random-lines``). The directory layout is simply::
+        
+          tests/test_deps_env/
+            random-lines/
+              1.0/
+                env.sh
+        
+        If the ``galaxy_packages`` plugin is enabled and pointed at the
+        ``tests/test_deps_env`` directory in cwltool's root and a ``SoftwareRequirement``
+        such as the following is encountered.
+        
+        .. code:: yaml
+        
+          hints:
+            SoftwareRequirement:
+              packages:
+              - package: 'random-lines'
+                version:
+                - '1.0'
+        
+        Then cwltool will simply find that ``env.sh`` file and source it before executing
+        the corresponding tool. That ``env.sh`` script is only responsible for modifying
+        the job's ``PATH`` to add the required binaries.
+        
+        This is a full example that works since resolving "Galaxy packages" has no 
+        external requirements. Try it out by executing the following command from cwltool's
+        root directory::
+        
+          cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf.yml \
+              tests/random_lines.cwl \
+              tests/random_lines_job.json
+        
+        The resolvers configuration file in the above example was simply:
+        
+        .. code:: yaml
+        
+          - type: galaxy_packages
+            base_path: ./tests/test_deps_env
+        
+        It is possible that the ``SoftwareRequirement`` s in a given CWL tool will not
+        match the module names for a given cluster. Such requirements can be re-mapped
+        to specific deployed packages and/or versions using another file specified using
+        the resolver plugin parameter `mapping_files`. We will
+        demonstrate this using `galaxy_packages` but the concepts apply equally well
+        to Environment Modules or Conda packages (described below) for instance.
+        
+        So consider the resolvers configuration file
+        (`tests/test_deps_env_resolvers_conf_rewrite.yml`):
+        
+        .. code:: yaml
+        
+          - type: galaxy_packages
+            base_path: ./tests/test_deps_env
+            mapping_files: ./tests/test_deps_mapping.yml
+        
+        And the corresponding mapping configuraiton file (`tests/test_deps_mapping.yml`):
+        
+        .. code:: yaml
+        
+          - from:
+              name: randomLines
+              version: 1.0.0-rc1
+            to:
+              name: random-lines
+              version: '1.0'
+        
+        This is saying if cwltool encounters a requirement of ``randomLines`` at version
+        ``1.0.0-rc1`` in a tool, to rewrite to our specific plugin as ``random-lines`` at
+        version ``1.0``. cwltool has such a test tool called ``random_lines_mapping.cwl``
+        that contains such a source ``SoftwareRequirement``. To try out this example with
+        mapping, execute the following command from the cwltool root directory::
+        
+          cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf_rewrite.yml \
+              tests/random_lines_mapping.cwl \
+              tests/random_lines_job.json
+        
+        The previous examples demonstrated leveraging existing infrastructure to
+        provide requirements for CWL tools. If instead a real package manager is used
+        cwltool has the oppertunity to install requirements as needed. While initial
+        support for Homebrew/Linuxbrew plugins is available, the most developed such
+        plugin is for the `Conda <https://conda.io/docs/#>`__ package manager. Conda has the nice properties
+        of allowing multiple versions of a package to be installed simultaneously,
+        not requiring evalated permissions to install Conda itself or packages using
+        Conda, and being cross platform. For these reasons, cwltool may run as a normal
+        user, install its own Conda environment and manage multiple versions of Conda packages
+        on both Linux and Mac OS X.
+        
+        The Conda plugin can be endlessly configured, but a sensible set of defaults
+        that has proven a powerful stack for dependency management within the Galaxy tool 
+        development ecosystem can be enabled by simply passing cwltool the
+        ``--beta-conda-dependencies`` flag.
+        
+        With this we can use the seqtk example above without Docker and without
+        any externally managed services - cwltool should install everything it needs
+        and create an environment for the tool. Try it out with the follwing command::
+        
+          cwltool --beta-conda-dependencies tests/seqtk_seq.cwl tests/seqtk_seq_job.json
+        
+        The CWL specification allows URIs to be attached to ``SoftwareRequirement`` s
+        that allow disambiguation of package names. If the mapping files described above
+        allow deployers to adapt tools to their infrastructure, this mechanism allows
+        tools to adapt their requirements to multiple package managers. To demonstrate
+        this within the context of the seqtk, we can simply break the package name we
+        use and then specify a specific Conda package as follows:
+        
+        .. code:: yaml
+        
+          hints:
+            SoftwareRequirement:
+              packages:
+              - package: seqtk_seq
+                version:
+                - '1.2'
+                specs:
+                - https://anaconda.org/bioconda/seqtk
+                - https://packages.debian.org/sid/seqtk
+        
+        The example can be executed using the command::
+        
+          cwltool --beta-conda-dependencies tests/seqtk_seq_wrong_name.cwl tests/seqtk_seq_job.json
+        
+        The plugin framework for managing resolution of these software requirements
+        as maintained as part of `galaxy-lib <https://github.com/galaxyproject/galaxy-lib>`__ - a small, portable subset of the Galaxy
+        project. More information on configuration and implementation can be found
+        at the following links:
+        
+        - `Dependency Resolvers in Galaxy <https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__
+        - `Conda for [Galaxy] Tool Dependencies <https://docs.galaxyproject.org/en/latest/admin/conda_faq.html>`__
+        - `Mapping Files - Implementation <https://github.com/galaxyproject/galaxy/commit/495802d229967771df5b64a2f79b88a0eaf00edb>`__
+        - `Specifications - Implementation <https://github.com/galaxyproject/galaxy/commit/81d71d2e740ee07754785306e4448f8425f890bc>`__
+        - `Initial cwltool Integration Pull Request <https://github.com/common-workflow-language/cwltool/pull/214>`__
+        
+        Cwltool control flow
+        --------------------
+        
+        Technical outline of how cwltool works internally, for maintainers.
+        
+        #. Use CWL `load_tool()` to load document.
+        
+           #. Fetches the document from file or URL
+           #. Applies preprocessing (syntax/identifier expansion and normalization)
+           #. Validates the document based on cwlVersion
+           #. If necessary, updates the document to latest spec
+           #. Constructs a Process object using `make_tool()` callback.  This yields a
+              CommandLineTool, Workflow, or ExpressionTool.  For workflows, this
+              recursively constructs each workflow step.
+           #. To construct custom types for CommandLineTool, Workflow, or
+              ExpressionTool, provide a custom `make_tool()`
+        
+        #. Iterate on the `job()` method of the Process object to get back runnable jobs.
+        
+           #. `job()` is a generator method (uses the Python iterator protocol)
+           #. Each time the `job()` method is invoked in an iteration, it returns one
+              of: a runnable item (an object with a `run()` method), `None` (indicating
+              there is currently no work ready to run) or end of iteration (indicating
+              the process is complete.)
+           #. Invoke the runnable item by calling `run()`.  This runs the tool and gets output.
+           #. Output of a process is reported by an output callback.
+           #. `job()` may be iterated over multiple times.  It will yield all the work
+              that is currently ready to run and then yield None.
+        
+        #. "Workflow" objects create a corresponding "WorkflowJob" and "WorkflowJobStep" objects to hold the workflow state for the duration of the job invocation.
+        
+           #. The WorkflowJob iterates over each WorkflowJobStep and determines if the
+              inputs the step are ready.
+           #. When a step is ready, it constructs an input object for that step and
+              iterates on the `job()` method of the workflow job step.
+           #. Each runnable item is yielded back up to top level run loop
+           #. When a step job completes and receives an output callback, the
+              job outputs are assigned to the output of the workflow step.
+           #. When all steps are complete, the intermediate files are moved to a final
+              workflow output, intermediate directories are deleted, and the output
+              callback for the workflow is called.
+        
+        #. "CommandLineTool" job() objects yield a single runnable object.
+        
+           #. The CommandLineTool `job()` method calls `makeJobRunner()` to create a
+              `CommandLineJob` object
+           #. The job method configures the CommandLineJob object by setting public
+              attributes
+           #. The job method iterates over file and directories inputs to the
+              CommandLineTool and creates a "path map".
+           #. Files are mapped from their "resolved" location to a "target" path where
+              they will appear at tool invocation (for example, a location inside a
+              Docker container.)  The target paths are used on the command line.
+           #. Files are staged to targets paths using either Docker volume binds (when
+              using containers) or symlinks (if not).  This staging step enables files
+              to be logically rearranged or renamed independent of their source layout.
+           #. The run() method of CommandLineJob executes the command line tool or
+              Docker container, waits for it to complete, collects output, and makes
+              the output callback.
+        
+        
+        Extension points
+        ----------------
+        
+        The following functions can be provided to main(), to load_tool(), or to the
+        executor to override or augment the listed behaviors.
+        
+        executor
+          ::
+        
+            executor(tool, job_order_object, **kwargs)
+              (Process, Dict[Text, Any], **Any) -> Tuple[Dict[Text, Any], Text]
+        
+          A toplevel workflow execution loop, should synchronously execute a process
+          object and return an output object.
+        
+        makeTool
+          ::
+        
+            makeTool(toolpath_object, **kwargs)
+              (Dict[Text, Any], **Any) -> Process
+        
+          Construct a Process object from a document.
+        
+        selectResources
+          ::
+        
+            selectResources(request)
+              (Dict[Text, int]) -> Dict[Text, int]
+        
+          Take a resource request and turn it into a concrete resource assignment.
+        
+        versionfunc
+          ::
+        
+            ()
+              () -> Text
+        
+          Return version string.
+        
+        make_fs_access
+          ::
+        
+            make_fs_access(basedir)
+              (Text) -> StdFsAccess
+        
+          Return a file system access object.
+        
+        fetcher_constructor
+          ::
+        
+            fetcher_constructor(cache, session)
+              (Dict[unicode, unicode], requests.sessions.Session) -> Fetcher
+        
+          Construct a Fetcher object with the supplied cache and HTTP session.
+        
+        resolver
+          ::
+        
+            resolver(document_loader, document)
+              (Loader, Union[Text, dict[Text, Any]]) -> Text
+        
+          Resolve a relative document identifier to an absolute one which can be fetched.
+        
+        logger_handler
+          ::
+        
+            logger_handler
+              logging.Handler
+        
+          Handler object for logging.
+        
 Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Healthcare Industry
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Natural Language :: English
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: OS Independent
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: Microsoft :: Windows :: Windows 10
+Classifier: Operating System :: Microsoft :: Windows :: Windows 8.1
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Topic :: Scientific/Engineering :: Atmospheric Science
+Classifier: Topic :: Scientific/Engineering :: Information Analysis
+Classifier: Topic :: Scientific/Engineering :: Medical Science Apps.
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: Utilities
diff --git a/README.rst b/README.rst
index b351bc5..369504e 100644
--- a/README.rst
+++ b/README.rst
@@ -4,6 +4,11 @@ Common workflow language tool description reference implementation
 
 CWL Conformance test: |Build Status|
 
+Travis: |Unix Build Status|
+
+.. |Unix Build Status| image:: https://img.shields.io/travis/common-workflow-language/cwltool/master.svg?label=unix%20build
+   :target: https://travis-ci.org/common-workflow-language/cwltool
+
 This is the reference implementation of the Common Workflow Language.  It is
 intended to be feature complete and provide comprehensive validation of CWL
 files as well as provide other tools related to working with CWL.
@@ -26,7 +31,7 @@ well)::
 
   pip install cwlref-runner
 
-If installling alongside another CWL implementation then::
+If installing alongside another CWL implementation then::
 
   pip install cwltool
 
@@ -38,7 +43,24 @@ To install from source::
 
 Remember, if co-installing multiple CWL implementations then you need to
 maintain which implementation ``cwl-runner`` points to via a symbolic file
-system link or [another facility](https://wiki.debian.org/DebianAlternatives).
+system link or `another facility <https://wiki.debian.org/DebianAlternatives>`_.
+
+Running tests locally
+---------------------
+
+-  Running basic tests ``(/tests)``:
+
+.. code:: bash
+
+    python setup.py test
+
+-  Running the entire suite of CWL conformance tests:
+
+The GitHub repository for the CWL specifications contains a script that tests a CWL
+implementation against a wide array of valid CWL files using the `cwltest <https://github.com/common-workflow-language/cwltest>`_
+program
+
+Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/master/CONFORMANCE_TESTS.md
 
 Run on the command line
 -----------------------
@@ -52,19 +74,10 @@ the default cwl-runner use::
 
   cwltool [tool-or-workflow-description] [input-job-settings]
 
-Import as a module
-----------------
-
-Add::
-
-  import cwltool
-
-to your script.
-
 Use with boot2docker
 --------------------
 boot2docker is running docker inside a virtual machine and it only mounts ``Users``
-on it. The default behavoir of CWL is to create temporary directories under e.g.
+on it. The default behavior of CWL is to create temporary directories under e.g.
 ``/Var`` which is not accessible to Docker containers.
 
 To run CWL successfully with boot2docker you need to set the ``--tmpdir-prefix``
@@ -86,3 +99,381 @@ documents using absolute or relative local filesytem paths. If a relative path
 is referenced and that document isn't found in the current directory then the
 following locations will be searched:
 http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
+
+
+Use with GA4GH Tool Registry API
+--------------------------------
+
+Cwltool can launch tools directly from `GA4GH Tool Registry API`_ endpoints.
+
+By default, cwltool searches https://dockstore.org/ .  Use --add-tool-registry to add other registries to the search path.
+
+For example ::
+
+  cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats:master test.json
+
+and (defaults to latest when a version is not specified) ::
+
+  cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats test.json
+
+For this example, grab the test.json (and input file) from https://github.com/CancerCollaboratory/dockstore-tool-bamstats
+
+.. _`GA4GH Tool Registry API`: https://github.com/ga4gh/tool-registry-schemas
+
+Import as a module
+------------------
+
+Add::
+
+  import cwltool
+
+to your script.
+
+The easiest way to use cwltool to run a tool or workflow from Python is to use a Factory::
+
+  import cwltool.factory
+  fac = cwltool.factory.Factory()
+
+  echo = f.make("echo.cwl")
+  result = echo(inp="foo")
+
+  # result["out"] == "foo"
+
+Leveraging SoftwareRequirements (Beta)
+--------------------------------------
+
+CWL tools may be decoarated with ``SoftwareRequirement`` hints that cwltool
+may in turn use to resolve to packages in various package managers or
+dependency management systems such as `Environment Modules
+<http://modules.sourceforge.net/>`__.
+
+Utilizing ``SoftwareRequirement`` hints using cwltool requires an optional
+dependency, for this reason be sure to use specify the ``deps`` modifier when
+installing cwltool. For instance::
+
+  $ pip install 'cwltool[deps]'
+
+Installing cwltool in this fashion enables several new command line options.
+The most general of these options is ``--beta-dependency-resolvers-configuration``.
+This option allows one to specify a dependency resolvers configuration file.
+This file may be specified as either XML or YAML and very simply describes various
+plugins to enable to "resolve" ``SoftwareRequirement`` dependencies.
+
+To discuss some of these plugins and how to configure them, first consider the
+following ``hint`` definition for an example CWL tool.
+
+.. code:: yaml
+
+  SoftwareRequirement:
+    packages:
+    - package: seqtk
+      version:
+      - r93
+
+Now imagine deploying cwltool on a cluster with Software Modules installed
+and that a ``seqtk`` module is avaialble at version ``r93``. This means cluster
+users likely won't have the ``seqtk`` the binary on their ``PATH`` by default but after
+sourcing this module with the command ``modulecmd sh load seqtk/r93`` ``seqtk`` is
+available on the ``PATH``. A simple dependency resolvers configuration file, called
+``dependency-resolvers-conf.yml`` for instance, that would enable cwltool to source
+the correct module environment before executing the above tool would simply be:
+
+.. code:: yaml
+
+  - type: module
+
+The outer list indicates that one plugin is being enabled, the plugin parameters are
+defined as a dictionary for this one list item. There is only one required parameter
+for the plugin above, this is ``type`` and defines the plugin type. This parameter
+is required for all plugins. The available plugins and the parameters
+available for each are documented (incompletely) `here
+<https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__.
+Unfortunately, this documentation is in the context of Galaxy tool ``requirement`` s instead of CWL ``SoftwareRequirement`` s, but the concepts map fairly directly.
+
+cwltool is distributed with an example of such seqtk tool and sample corresponding
+job. It could executed from the cwltool root using a dependency resolvers 
+configuration file such as the above one using the command::
+
+  cwltool --beta-dependency-resolvers-configuration /path/to/dependency-resolvers-conf.yml \
+      tests/seqtk_seq.cwl \
+      tests/seqtk_seq_job.json
+
+This example demonstrates both that cwltool can leverage
+existing software installations and also handle workflows with dependencies
+on different versions of the same software and libraries. However the above
+example does require an existing module setup so it is impossible to test this example
+"out of the box" with cwltool. For a more isolated test that demonstrates all
+the same concepts - the resolver plugin type ``galaxy_packages`` can be used.
+
+"Galaxy packages" are a lighter weight alternative to Environment Modules that are
+really just defined by a way to lay out directories into packages and versions
+to find little scripts that are sourced to modify the environment. They have
+been used for years in Galaxy community to adapt Galaxy tools to cluster 
+environments but require neither knowledge of Galaxy nor any special tools to 
+setup. These should work just fine for CWL tools.
+
+The cwltool source code repository's test directory is setup with a very simple
+directory that defines a set of "Galaxy  packages" (but really just defines one
+package named ``random-lines``). The directory layout is simply::
+
+  tests/test_deps_env/
+    random-lines/
+      1.0/
+        env.sh
+
+If the ``galaxy_packages`` plugin is enabled and pointed at the
+``tests/test_deps_env`` directory in cwltool's root and a ``SoftwareRequirement``
+such as the following is encountered.
+
+.. code:: yaml
+
+  hints:
+    SoftwareRequirement:
+      packages:
+      - package: 'random-lines'
+        version:
+        - '1.0'
+
+Then cwltool will simply find that ``env.sh`` file and source it before executing
+the corresponding tool. That ``env.sh`` script is only responsible for modifying
+the job's ``PATH`` to add the required binaries.
+
+This is a full example that works since resolving "Galaxy packages" has no 
+external requirements. Try it out by executing the following command from cwltool's
+root directory::
+
+  cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf.yml \
+      tests/random_lines.cwl \
+      tests/random_lines_job.json
+
+The resolvers configuration file in the above example was simply:
+
+.. code:: yaml
+
+  - type: galaxy_packages
+    base_path: ./tests/test_deps_env
+
+It is possible that the ``SoftwareRequirement`` s in a given CWL tool will not
+match the module names for a given cluster. Such requirements can be re-mapped
+to specific deployed packages and/or versions using another file specified using
+the resolver plugin parameter `mapping_files`. We will
+demonstrate this using `galaxy_packages` but the concepts apply equally well
+to Environment Modules or Conda packages (described below) for instance.
+
+So consider the resolvers configuration file
+(`tests/test_deps_env_resolvers_conf_rewrite.yml`):
+
+.. code:: yaml
+
+  - type: galaxy_packages
+    base_path: ./tests/test_deps_env
+    mapping_files: ./tests/test_deps_mapping.yml
+
+And the corresponding mapping configuraiton file (`tests/test_deps_mapping.yml`):
+
+.. code:: yaml
+
+  - from:
+      name: randomLines
+      version: 1.0.0-rc1
+    to:
+      name: random-lines
+      version: '1.0'
+
+This is saying if cwltool encounters a requirement of ``randomLines`` at version
+``1.0.0-rc1`` in a tool, to rewrite to our specific plugin as ``random-lines`` at
+version ``1.0``. cwltool has such a test tool called ``random_lines_mapping.cwl``
+that contains such a source ``SoftwareRequirement``. To try out this example with
+mapping, execute the following command from the cwltool root directory::
+
+  cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf_rewrite.yml \
+      tests/random_lines_mapping.cwl \
+      tests/random_lines_job.json
+
+The previous examples demonstrated leveraging existing infrastructure to
+provide requirements for CWL tools. If instead a real package manager is used
+cwltool has the oppertunity to install requirements as needed. While initial
+support for Homebrew/Linuxbrew plugins is available, the most developed such
+plugin is for the `Conda <https://conda.io/docs/#>`__ package manager. Conda has the nice properties
+of allowing multiple versions of a package to be installed simultaneously,
+not requiring evalated permissions to install Conda itself or packages using
+Conda, and being cross platform. For these reasons, cwltool may run as a normal
+user, install its own Conda environment and manage multiple versions of Conda packages
+on both Linux and Mac OS X.
+
+The Conda plugin can be endlessly configured, but a sensible set of defaults
+that has proven a powerful stack for dependency management within the Galaxy tool 
+development ecosystem can be enabled by simply passing cwltool the
+``--beta-conda-dependencies`` flag.
+
+With this we can use the seqtk example above without Docker and without
+any externally managed services - cwltool should install everything it needs
+and create an environment for the tool. Try it out with the follwing command::
+
+  cwltool --beta-conda-dependencies tests/seqtk_seq.cwl tests/seqtk_seq_job.json
+
+The CWL specification allows URIs to be attached to ``SoftwareRequirement`` s
+that allow disambiguation of package names. If the mapping files described above
+allow deployers to adapt tools to their infrastructure, this mechanism allows
+tools to adapt their requirements to multiple package managers. To demonstrate
+this within the context of the seqtk, we can simply break the package name we
+use and then specify a specific Conda package as follows:
+
+.. code:: yaml
+
+  hints:
+    SoftwareRequirement:
+      packages:
+      - package: seqtk_seq
+        version:
+        - '1.2'
+        specs:
+        - https://anaconda.org/bioconda/seqtk
+        - https://packages.debian.org/sid/seqtk
+
+The example can be executed using the command::
+
+  cwltool --beta-conda-dependencies tests/seqtk_seq_wrong_name.cwl tests/seqtk_seq_job.json
+
+The plugin framework for managing resolution of these software requirements
+as maintained as part of `galaxy-lib <https://github.com/galaxyproject/galaxy-lib>`__ - a small, portable subset of the Galaxy
+project. More information on configuration and implementation can be found
+at the following links:
+
+- `Dependency Resolvers in Galaxy <https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__
+- `Conda for [Galaxy] Tool Dependencies <https://docs.galaxyproject.org/en/latest/admin/conda_faq.html>`__
+- `Mapping Files - Implementation <https://github.com/galaxyproject/galaxy/commit/495802d229967771df5b64a2f79b88a0eaf00edb>`__
+- `Specifications - Implementation <https://github.com/galaxyproject/galaxy/commit/81d71d2e740ee07754785306e4448f8425f890bc>`__
+- `Initial cwltool Integration Pull Request <https://github.com/common-workflow-language/cwltool/pull/214>`__
+
+Cwltool control flow
+--------------------
+
+Technical outline of how cwltool works internally, for maintainers.
+
+#. Use CWL `load_tool()` to load document.
+
+   #. Fetches the document from file or URL
+   #. Applies preprocessing (syntax/identifier expansion and normalization)
+   #. Validates the document based on cwlVersion
+   #. If necessary, updates the document to latest spec
+   #. Constructs a Process object using `make_tool()` callback.  This yields a
+      CommandLineTool, Workflow, or ExpressionTool.  For workflows, this
+      recursively constructs each workflow step.
+   #. To construct custom types for CommandLineTool, Workflow, or
+      ExpressionTool, provide a custom `make_tool()`
+
+#. Iterate on the `job()` method of the Process object to get back runnable jobs.
+
+   #. `job()` is a generator method (uses the Python iterator protocol)
+   #. Each time the `job()` method is invoked in an iteration, it returns one
+      of: a runnable item (an object with a `run()` method), `None` (indicating
+      there is currently no work ready to run) or end of iteration (indicating
+      the process is complete.)
+   #. Invoke the runnable item by calling `run()`.  This runs the tool and gets output.
+   #. Output of a process is reported by an output callback.
+   #. `job()` may be iterated over multiple times.  It will yield all the work
+      that is currently ready to run and then yield None.
+
+#. "Workflow" objects create a corresponding "WorkflowJob" and "WorkflowJobStep" objects to hold the workflow state for the duration of the job invocation.
+
+   #. The WorkflowJob iterates over each WorkflowJobStep and determines if the
+      inputs the step are ready.
+   #. When a step is ready, it constructs an input object for that step and
+      iterates on the `job()` method of the workflow job step.
+   #. Each runnable item is yielded back up to top level run loop
+   #. When a step job completes and receives an output callback, the
+      job outputs are assigned to the output of the workflow step.
+   #. When all steps are complete, the intermediate files are moved to a final
+      workflow output, intermediate directories are deleted, and the output
+      callback for the workflow is called.
+
+#. "CommandLineTool" job() objects yield a single runnable object.
+
+   #. The CommandLineTool `job()` method calls `makeJobRunner()` to create a
+      `CommandLineJob` object
+   #. The job method configures the CommandLineJob object by setting public
+      attributes
+   #. The job method iterates over file and directories inputs to the
+      CommandLineTool and creates a "path map".
+   #. Files are mapped from their "resolved" location to a "target" path where
+      they will appear at tool invocation (for example, a location inside a
+      Docker container.)  The target paths are used on the command line.
+   #. Files are staged to targets paths using either Docker volume binds (when
+      using containers) or symlinks (if not).  This staging step enables files
+      to be logically rearranged or renamed independent of their source layout.
+   #. The run() method of CommandLineJob executes the command line tool or
+      Docker container, waits for it to complete, collects output, and makes
+      the output callback.
+
+
+Extension points
+----------------
+
+The following functions can be provided to main(), to load_tool(), or to the
+executor to override or augment the listed behaviors.
+
+executor
+  ::
+
+    executor(tool, job_order_object, **kwargs)
+      (Process, Dict[Text, Any], **Any) -> Tuple[Dict[Text, Any], Text]
+
+  A toplevel workflow execution loop, should synchronously execute a process
+  object and return an output object.
+
+makeTool
+  ::
+
+    makeTool(toolpath_object, **kwargs)
+      (Dict[Text, Any], **Any) -> Process
+
+  Construct a Process object from a document.
+
+selectResources
+  ::
+
+    selectResources(request)
+      (Dict[Text, int]) -> Dict[Text, int]
+
+  Take a resource request and turn it into a concrete resource assignment.
+
+versionfunc
+  ::
+
+    ()
+      () -> Text
+
+  Return version string.
+
+make_fs_access
+  ::
+
+    make_fs_access(basedir)
+      (Text) -> StdFsAccess
+
+  Return a file system access object.
+
+fetcher_constructor
+  ::
+
+    fetcher_constructor(cache, session)
+      (Dict[unicode, unicode], requests.sessions.Session) -> Fetcher
+
+  Construct a Fetcher object with the supplied cache and HTTP session.
+
+resolver
+  ::
+
+    resolver(document_loader, document)
+      (Loader, Union[Text, dict[Text, Any]]) -> Text
+
+  Resolve a relative document identifier to an absolute one which can be fetched.
+
+logger_handler
+  ::
+
+    logger_handler
+      logging.Handler
+
+  Handler object for logging.
diff --git a/cwltool.egg-info/PKG-INFO b/cwltool.egg-info/PKG-INFO
index eea3cfe..e5d73f5 100644
--- a/cwltool.egg-info/PKG-INFO
+++ b/cwltool.egg-info/PKG-INFO
@@ -1,11 +1,11 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20170114120503
+Version: 1.0.20170803160545
 Summary: Common workflow language reference implementation
 Home-page: https://github.com/common-workflow-language/cwltool
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
-License: Apache 2.0
+License: UNKNOWN
 Download-URL: https://github.com/common-workflow-language/cwltool
 Description: ==================================================================
         Common workflow language tool description reference implementation
@@ -13,6 +13,11 @@ Description: ==================================================================
         
         CWL Conformance test: |Build Status|
         
+        Travis: |Unix Build Status|
+        
+        .. |Unix Build Status| image:: https://img.shields.io/travis/common-workflow-language/cwltool/master.svg?label=unix%20build
+           :target: https://travis-ci.org/common-workflow-language/cwltool
+        
         This is the reference implementation of the Common Workflow Language.  It is
         intended to be feature complete and provide comprehensive validation of CWL
         files as well as provide other tools related to working with CWL.
@@ -35,7 +40,7 @@ Description: ==================================================================
         
           pip install cwlref-runner
         
-        If installling alongside another CWL implementation then::
+        If installing alongside another CWL implementation then::
         
           pip install cwltool
         
@@ -47,7 +52,24 @@ Description: ==================================================================
         
         Remember, if co-installing multiple CWL implementations then you need to
         maintain which implementation ``cwl-runner`` points to via a symbolic file
-        system link or [another facility](https://wiki.debian.org/DebianAlternatives).
+        system link or `another facility <https://wiki.debian.org/DebianAlternatives>`_.
+        
+        Running tests locally
+        ---------------------
+        
+        -  Running basic tests ``(/tests)``:
+        
+        .. code:: bash
+        
+            python setup.py test
+        
+        -  Running the entire suite of CWL conformance tests:
+        
+        The GitHub repository for the CWL specifications contains a script that tests a CWL
+        implementation against a wide array of valid CWL files using the `cwltest <https://github.com/common-workflow-language/cwltest>`_
+        program
+        
+        Instructions for running these tests can be found in the Common Workflow Language Specification repository at https://github.com/common-workflow-language/common-workflow-language/blob/master/CONFORMANCE_TESTS.md
         
         Run on the command line
         -----------------------
@@ -61,19 +83,10 @@ Description: ==================================================================
         
           cwltool [tool-or-workflow-description] [input-job-settings]
         
-        Import as a module
-        ----------------
-        
-        Add::
-        
-          import cwltool
-        
-        to your script.
-        
         Use with boot2docker
         --------------------
         boot2docker is running docker inside a virtual machine and it only mounts ``Users``
-        on it. The default behavoir of CWL is to create temporary directories under e.g.
+        on it. The default behavior of CWL is to create temporary directories under e.g.
         ``/Var`` which is not accessible to Docker containers.
         
         To run CWL successfully with boot2docker you need to set the ``--tmpdir-prefix``
@@ -96,4 +109,411 @@ Description: ==================================================================
         following locations will be searched:
         http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
         
+        
+        Use with GA4GH Tool Registry API
+        --------------------------------
+        
+        Cwltool can launch tools directly from `GA4GH Tool Registry API`_ endpoints.
+        
+        By default, cwltool searches https://dockstore.org/ .  Use --add-tool-registry to add other registries to the search path.
+        
+        For example ::
+        
+          cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats:master test.json
+        
+        and (defaults to latest when a version is not specified) ::
+        
+          cwltool --non-strict quay.io/collaboratory/dockstore-tool-bamstats test.json
+        
+        For this example, grab the test.json (and input file) from https://github.com/CancerCollaboratory/dockstore-tool-bamstats
+        
+        .. _`GA4GH Tool Registry API`: https://github.com/ga4gh/tool-registry-schemas
+        
+        Import as a module
+        ------------------
+        
+        Add::
+        
+          import cwltool
+        
+        to your script.
+        
+        The easiest way to use cwltool to run a tool or workflow from Python is to use a Factory::
+        
+          import cwltool.factory
+          fac = cwltool.factory.Factory()
+        
+          echo = f.make("echo.cwl")
+          result = echo(inp="foo")
+        
+          # result["out"] == "foo"
+        
+        Leveraging SoftwareRequirements (Beta)
+        --------------------------------------
+        
+        CWL tools may be decoarated with ``SoftwareRequirement`` hints that cwltool
+        may in turn use to resolve to packages in various package managers or
+        dependency management systems such as `Environment Modules
+        <http://modules.sourceforge.net/>`__.
+        
+        Utilizing ``SoftwareRequirement`` hints using cwltool requires an optional
+        dependency, for this reason be sure to use specify the ``deps`` modifier when
+        installing cwltool. For instance::
+        
+          $ pip install 'cwltool[deps]'
+        
+        Installing cwltool in this fashion enables several new command line options.
+        The most general of these options is ``--beta-dependency-resolvers-configuration``.
+        This option allows one to specify a dependency resolvers configuration file.
+        This file may be specified as either XML or YAML and very simply describes various
+        plugins to enable to "resolve" ``SoftwareRequirement`` dependencies.
+        
+        To discuss some of these plugins and how to configure them, first consider the
+        following ``hint`` definition for an example CWL tool.
+        
+        .. code:: yaml
+        
+          SoftwareRequirement:
+            packages:
+            - package: seqtk
+              version:
+              - r93
+        
+        Now imagine deploying cwltool on a cluster with Software Modules installed
+        and that a ``seqtk`` module is avaialble at version ``r93``. This means cluster
+        users likely won't have the ``seqtk`` the binary on their ``PATH`` by default but after
+        sourcing this module with the command ``modulecmd sh load seqtk/r93`` ``seqtk`` is
+        available on the ``PATH``. A simple dependency resolvers configuration file, called
+        ``dependency-resolvers-conf.yml`` for instance, that would enable cwltool to source
+        the correct module environment before executing the above tool would simply be:
+        
+        .. code:: yaml
+        
+          - type: module
+        
+        The outer list indicates that one plugin is being enabled, the plugin parameters are
+        defined as a dictionary for this one list item. There is only one required parameter
+        for the plugin above, this is ``type`` and defines the plugin type. This parameter
+        is required for all plugins. The available plugins and the parameters
+        available for each are documented (incompletely) `here
+        <https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__.
+        Unfortunately, this documentation is in the context of Galaxy tool ``requirement`` s instead of CWL ``SoftwareRequirement`` s, but the concepts map fairly directly.
+        
+        cwltool is distributed with an example of such seqtk tool and sample corresponding
+        job. It could executed from the cwltool root using a dependency resolvers 
+        configuration file such as the above one using the command::
+        
+          cwltool --beta-dependency-resolvers-configuration /path/to/dependency-resolvers-conf.yml \
+              tests/seqtk_seq.cwl \
+              tests/seqtk_seq_job.json
+        
+        This example demonstrates both that cwltool can leverage
+        existing software installations and also handle workflows with dependencies
+        on different versions of the same software and libraries. However the above
+        example does require an existing module setup so it is impossible to test this example
+        "out of the box" with cwltool. For a more isolated test that demonstrates all
+        the same concepts - the resolver plugin type ``galaxy_packages`` can be used.
+        
+        "Galaxy packages" are a lighter weight alternative to Environment Modules that are
+        really just defined by a way to lay out directories into packages and versions
+        to find little scripts that are sourced to modify the environment. They have
+        been used for years in Galaxy community to adapt Galaxy tools to cluster 
+        environments but require neither knowledge of Galaxy nor any special tools to 
+        setup. These should work just fine for CWL tools.
+        
+        The cwltool source code repository's test directory is setup with a very simple
+        directory that defines a set of "Galaxy  packages" (but really just defines one
+        package named ``random-lines``). The directory layout is simply::
+        
+          tests/test_deps_env/
+            random-lines/
+              1.0/
+                env.sh
+        
+        If the ``galaxy_packages`` plugin is enabled and pointed at the
+        ``tests/test_deps_env`` directory in cwltool's root and a ``SoftwareRequirement``
+        such as the following is encountered.
+        
+        .. code:: yaml
+        
+          hints:
+            SoftwareRequirement:
+              packages:
+              - package: 'random-lines'
+                version:
+                - '1.0'
+        
+        Then cwltool will simply find that ``env.sh`` file and source it before executing
+        the corresponding tool. That ``env.sh`` script is only responsible for modifying
+        the job's ``PATH`` to add the required binaries.
+        
+        This is a full example that works since resolving "Galaxy packages" has no 
+        external requirements. Try it out by executing the following command from cwltool's
+        root directory::
+        
+          cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf.yml \
+              tests/random_lines.cwl \
+              tests/random_lines_job.json
+        
+        The resolvers configuration file in the above example was simply:
+        
+        .. code:: yaml
+        
+          - type: galaxy_packages
+            base_path: ./tests/test_deps_env
+        
+        It is possible that the ``SoftwareRequirement`` s in a given CWL tool will not
+        match the module names for a given cluster. Such requirements can be re-mapped
+        to specific deployed packages and/or versions using another file specified using
+        the resolver plugin parameter `mapping_files`. We will
+        demonstrate this using `galaxy_packages` but the concepts apply equally well
+        to Environment Modules or Conda packages (described below) for instance.
+        
+        So consider the resolvers configuration file
+        (`tests/test_deps_env_resolvers_conf_rewrite.yml`):
+        
+        .. code:: yaml
+        
+          - type: galaxy_packages
+            base_path: ./tests/test_deps_env
+            mapping_files: ./tests/test_deps_mapping.yml
+        
+        And the corresponding mapping configuraiton file (`tests/test_deps_mapping.yml`):
+        
+        .. code:: yaml
+        
+          - from:
+              name: randomLines
+              version: 1.0.0-rc1
+            to:
+              name: random-lines
+              version: '1.0'
+        
+        This is saying if cwltool encounters a requirement of ``randomLines`` at version
+        ``1.0.0-rc1`` in a tool, to rewrite to our specific plugin as ``random-lines`` at
+        version ``1.0``. cwltool has such a test tool called ``random_lines_mapping.cwl``
+        that contains such a source ``SoftwareRequirement``. To try out this example with
+        mapping, execute the following command from the cwltool root directory::
+        
+          cwltool --beta-dependency-resolvers-configuration tests/test_deps_env_resolvers_conf_rewrite.yml \
+              tests/random_lines_mapping.cwl \
+              tests/random_lines_job.json
+        
+        The previous examples demonstrated leveraging existing infrastructure to
+        provide requirements for CWL tools. If instead a real package manager is used
+        cwltool has the oppertunity to install requirements as needed. While initial
+        support for Homebrew/Linuxbrew plugins is available, the most developed such
+        plugin is for the `Conda <https://conda.io/docs/#>`__ package manager. Conda has the nice properties
+        of allowing multiple versions of a package to be installed simultaneously,
+        not requiring evalated permissions to install Conda itself or packages using
+        Conda, and being cross platform. For these reasons, cwltool may run as a normal
+        user, install its own Conda environment and manage multiple versions of Conda packages
+        on both Linux and Mac OS X.
+        
+        The Conda plugin can be endlessly configured, but a sensible set of defaults
+        that has proven a powerful stack for dependency management within the Galaxy tool 
+        development ecosystem can be enabled by simply passing cwltool the
+        ``--beta-conda-dependencies`` flag.
+        
+        With this we can use the seqtk example above without Docker and without
+        any externally managed services - cwltool should install everything it needs
+        and create an environment for the tool. Try it out with the follwing command::
+        
+          cwltool --beta-conda-dependencies tests/seqtk_seq.cwl tests/seqtk_seq_job.json
+        
+        The CWL specification allows URIs to be attached to ``SoftwareRequirement`` s
+        that allow disambiguation of package names. If the mapping files described above
+        allow deployers to adapt tools to their infrastructure, this mechanism allows
+        tools to adapt their requirements to multiple package managers. To demonstrate
+        this within the context of the seqtk, we can simply break the package name we
+        use and then specify a specific Conda package as follows:
+        
+        .. code:: yaml
+        
+          hints:
+            SoftwareRequirement:
+              packages:
+              - package: seqtk_seq
+                version:
+                - '1.2'
+                specs:
+                - https://anaconda.org/bioconda/seqtk
+                - https://packages.debian.org/sid/seqtk
+        
+        The example can be executed using the command::
+        
+          cwltool --beta-conda-dependencies tests/seqtk_seq_wrong_name.cwl tests/seqtk_seq_job.json
+        
+        The plugin framework for managing resolution of these software requirements
+        as maintained as part of `galaxy-lib <https://github.com/galaxyproject/galaxy-lib>`__ - a small, portable subset of the Galaxy
+        project. More information on configuration and implementation can be found
+        at the following links:
+        
+        - `Dependency Resolvers in Galaxy <https://docs.galaxyproject.org/en/latest/admin/dependency_resolvers.html>`__
+        - `Conda for [Galaxy] Tool Dependencies <https://docs.galaxyproject.org/en/latest/admin/conda_faq.html>`__
+        - `Mapping Files - Implementation <https://github.com/galaxyproject/galaxy/commit/495802d229967771df5b64a2f79b88a0eaf00edb>`__
+        - `Specifications - Implementation <https://github.com/galaxyproject/galaxy/commit/81d71d2e740ee07754785306e4448f8425f890bc>`__
+        - `Initial cwltool Integration Pull Request <https://github.com/common-workflow-language/cwltool/pull/214>`__
+        
+        Cwltool control flow
+        --------------------
+        
+        Technical outline of how cwltool works internally, for maintainers.
+        
+        #. Use CWL `load_tool()` to load document.
+        
+           #. Fetches the document from file or URL
+           #. Applies preprocessing (syntax/identifier expansion and normalization)
+           #. Validates the document based on cwlVersion
+           #. If necessary, updates the document to latest spec
+           #. Constructs a Process object using `make_tool()` callback.  This yields a
+              CommandLineTool, Workflow, or ExpressionTool.  For workflows, this
+              recursively constructs each workflow step.
+           #. To construct custom types for CommandLineTool, Workflow, or
+              ExpressionTool, provide a custom `make_tool()`
+        
+        #. Iterate on the `job()` method of the Process object to get back runnable jobs.
+        
+           #. `job()` is a generator method (uses the Python iterator protocol)
+           #. Each time the `job()` method is invoked in an iteration, it returns one
+              of: a runnable item (an object with a `run()` method), `None` (indicating
+              there is currently no work ready to run) or end of iteration (indicating
+              the process is complete.)
+           #. Invoke the runnable item by calling `run()`.  This runs the tool and gets output.
+           #. Output of a process is reported by an output callback.
+           #. `job()` may be iterated over multiple times.  It will yield all the work
+              that is currently ready to run and then yield None.
+        
+        #. "Workflow" objects create a corresponding "WorkflowJob" and "WorkflowJobStep" objects to hold the workflow state for the duration of the job invocation.
+        
+           #. The WorkflowJob iterates over each WorkflowJobStep and determines if the
+              inputs the step are ready.
+           #. When a step is ready, it constructs an input object for that step and
+              iterates on the `job()` method of the workflow job step.
+           #. Each runnable item is yielded back up to top level run loop
+           #. When a step job completes and receives an output callback, the
+              job outputs are assigned to the output of the workflow step.
+           #. When all steps are complete, the intermediate files are moved to a final
+              workflow output, intermediate directories are deleted, and the output
+              callback for the workflow is called.
+        
+        #. "CommandLineTool" job() objects yield a single runnable object.
+        
+           #. The CommandLineTool `job()` method calls `makeJobRunner()` to create a
+              `CommandLineJob` object
+           #. The job method configures the CommandLineJob object by setting public
+              attributes
+           #. The job method iterates over file and directories inputs to the
+              CommandLineTool and creates a "path map".
+           #. Files are mapped from their "resolved" location to a "target" path where
+              they will appear at tool invocation (for example, a location inside a
+              Docker container.)  The target paths are used on the command line.
+           #. Files are staged to targets paths using either Docker volume binds (when
+              using containers) or symlinks (if not).  This staging step enables files
+              to be logically rearranged or renamed independent of their source layout.
+           #. The run() method of CommandLineJob executes the command line tool or
+              Docker container, waits for it to complete, collects output, and makes
+              the output callback.
+        
+        
+        Extension points
+        ----------------
+        
+        The following functions can be provided to main(), to load_tool(), or to the
+        executor to override or augment the listed behaviors.
+        
+        executor
+          ::
+        
+            executor(tool, job_order_object, **kwargs)
+              (Process, Dict[Text, Any], **Any) -> Tuple[Dict[Text, Any], Text]
+        
+          A toplevel workflow execution loop, should synchronously execute a process
+          object and return an output object.
+        
+        makeTool
+          ::
+        
+            makeTool(toolpath_object, **kwargs)
+              (Dict[Text, Any], **Any) -> Process
+        
+          Construct a Process object from a document.
+        
+        selectResources
+          ::
+        
+            selectResources(request)
+              (Dict[Text, int]) -> Dict[Text, int]
+        
+          Take a resource request and turn it into a concrete resource assignment.
+        
+        versionfunc
+          ::
+        
+            ()
+              () -> Text
+        
+          Return version string.
+        
+        make_fs_access
+          ::
+        
+            make_fs_access(basedir)
+              (Text) -> StdFsAccess
+        
+          Return a file system access object.
+        
+        fetcher_constructor
+          ::
+        
+            fetcher_constructor(cache, session)
+              (Dict[unicode, unicode], requests.sessions.Session) -> Fetcher
+        
+          Construct a Fetcher object with the supplied cache and HTTP session.
+        
+        resolver
+          ::
+        
+            resolver(document_loader, document)
+              (Loader, Union[Text, dict[Text, Any]]) -> Text
+        
+          Resolve a relative document identifier to an absolute one which can be fetched.
+        
+        logger_handler
+          ::
+        
+            logger_handler
+              logging.Handler
+        
+          Handler object for logging.
+        
 Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: Intended Audience :: Healthcare Industry
+Classifier: License :: OSI Approved :: Apache Software License
+Classifier: Natural Language :: English
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: POSIX :: Linux
+Classifier: Operating System :: OS Independent
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: Microsoft :: Windows :: Windows 10
+Classifier: Operating System :: Microsoft :: Windows :: Windows 8.1
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
+Classifier: Topic :: Scientific/Engineering :: Astronomy
+Classifier: Topic :: Scientific/Engineering :: Atmospheric Science
+Classifier: Topic :: Scientific/Engineering :: Information Analysis
+Classifier: Topic :: Scientific/Engineering :: Medical Science Apps.
+Classifier: Topic :: System :: Distributed Computing
+Classifier: Topic :: Utilities
diff --git a/cwltool.egg-info/SOURCES.txt b/cwltool.egg-info/SOURCES.txt
index 74f153e..0308f25 100644
--- a/cwltool.egg-info/SOURCES.txt
+++ b/cwltool.egg-info/SOURCES.txt
@@ -2,7 +2,6 @@ MANIFEST.in
 Makefile
 README.rst
 cwltool.py
-ez_setup.py
 gittaggers.py
 setup.cfg
 setup.py
@@ -12,20 +11,23 @@ cwltool/builder.py
 cwltool/cwlNodeEngine.js
 cwltool/cwlrdf.py
 cwltool/docker.py
-cwltool/docker_uid.py
+cwltool/docker_id.py
 cwltool/draft2tool.py
 cwltool/errors.py
 cwltool/expression.py
+cwltool/extensions.yml
 cwltool/factory.py
 cwltool/flatten.py
 cwltool/job.py
 cwltool/load_tool.py
 cwltool/main.py
+cwltool/mutation.py
 cwltool/pack.py
 cwltool/pathmapper.py
 cwltool/process.py
 cwltool/resolver.py
 cwltool/sandboxjs.py
+cwltool/software_requirements.py
 cwltool/stdfsaccess.py
 cwltool/update.py
 cwltool/utils.py
@@ -96,9 +98,17 @@ cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_proc.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_schema.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_src.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_src.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_src.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_proc.yml
 cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_schema.yml
@@ -134,4 +144,65 @@ cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/salad.md
 cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res.yml
 cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_proc.yml
 cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_schema.yml
-cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml
\ No newline at end of file
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml
+tests/2.fasta
+tests/2.fastq
+tests/__init__.py
+tests/echo-cwlrun-job.yaml
+tests/echo-job.yaml
+tests/echo.cwl
+tests/listing-job.yml
+tests/random_lines.cwl
+tests/random_lines_job.json
+tests/random_lines_mapping.cwl
+tests/seqtk_seq.cwl
+tests/seqtk_seq_job.json
+tests/seqtk_seq_with_docker.cwl
+tests/seqtk_seq_wrong_name.cwl
+tests/test_bad_outputs_wf.cwl
+tests/test_check.py
+tests/test_cwl_version.py
+tests/test_default_path.py
+tests/test_deps_env_resolvers_conf.yml
+tests/test_deps_env_resolvers_conf_rewrite.yml
+tests/test_deps_mapping.yml
+tests/test_docker_warning.py
+tests/test_examples.py
+tests/test_ext.py
+tests/test_fetch.py
+tests/test_js_sandbox.py
+tests/test_pack.py
+tests/test_pathmapper.py
+tests/test_relax_path_checks.py
+tests/test_toolargparse.py
+tests/util.py
+tests/tmp1/tmp2/tmp3/.gitkeep
+tests/wf/badout1.cwl
+tests/wf/badout2.cwl
+tests/wf/badout3.cwl
+tests/wf/cat.cwl
+tests/wf/default_path.cwl
+tests/wf/echo.cwl
+tests/wf/empty.ttl
+tests/wf/expect_packed.cwl
+tests/wf/hello.txt
+tests/wf/listing_deep.cwl
+tests/wf/listing_none.cwl
+tests/wf/listing_shallow.cwl
+tests/wf/listing_v1_0.cwl
+tests/wf/missing_cwlVersion.cwl
+tests/wf/mut.cwl
+tests/wf/mut2.cwl
+tests/wf/mut3.cwl
+tests/wf/revsort-job.json
+tests/wf/revsort.cwl
+tests/wf/revtool.cwl
+tests/wf/scatterfail.cwl
+tests/wf/sorttool.cwl
+tests/wf/updatedir.cwl
+tests/wf/updatedir_inplace.cwl
+tests/wf/updateval.cwl
+tests/wf/updateval.py
+tests/wf/updateval_inplace.cwl
+tests/wf/wffail.cwl
+tests/wf/wrong_cwlVersion.cwl
\ No newline at end of file
diff --git a/cwltool.egg-info/requires.txt b/cwltool.egg-info/requires.txt
index eaeae5f..5e81b5a 100644
--- a/cwltool.egg-info/requires.txt
+++ b/cwltool.egg-info/requires.txt
@@ -1,7 +1,11 @@
 setuptools
-requests >= 1.0
-ruamel.yaml >= 0.12.4
-rdflib >= 4.2.0, < 4.3.0
-shellescape >= 3.4.1, < 3.5
-schema-salad >= 2.2.20170111180227, < 3
-typing >= 3.5.2, < 3.6
+requests>=1.0
+ruamel.yaml<0.15,>=0.12.4
+rdflib<4.3.0,>=4.2.2
+shellescape<3.5,>=3.4.1
+schema-salad<3,>=2.6
+typing>=3.5.3
+six>=1.8.0
+
+[deps]
+galaxy-lib>=17.09.3
diff --git a/cwltool.py b/cwltool.py
index a3b9c69..45515b5 100755
--- a/cwltool.py
+++ b/cwltool.py
@@ -1,4 +1,5 @@
 #!/usr/bin/env python
+from __future__ import absolute_import
 """Convienance entry point for cwltool.
 
 This can be used instead of the recommended method of `./setup.py install`
@@ -6,6 +7,7 @@ or `./setup.py develop` and then using the generated `cwltool` executable.
 """
 
 import sys
+
 from cwltool import main
 
 if __name__ == "__main__":
diff --git a/cwltool/__init__.py b/cwltool/__init__.py
index 70d587a..d41b822 100644
--- a/cwltool/__init__.py
+++ b/cwltool/__init__.py
@@ -1 +1,2 @@
+from __future__ import absolute_import
 __author__ = 'peter.amstutz at curoverse.com'
diff --git a/cwltool/__main__.py b/cwltool/__main__.py
index 2b15c84..1420d69 100644
--- a/cwltool/__main__.py
+++ b/cwltool/__main__.py
@@ -1,4 +1,6 @@
-from . import main
+from __future__ import absolute_import
 import sys
 
+from . import main
+
 sys.exit(main.main())
diff --git a/cwltool/builder.py b/cwltool/builder.py
index 40cb789..c74e2e6 100644
--- a/cwltool/builder.py
+++ b/cwltool/builder.py
@@ -1,13 +1,24 @@
+from __future__ import absolute_import
 import copy
-from .utils import aslist
-from . import expression
+import os
+from typing import Any, Callable, Dict, List, Text, Type, Union
+
+import six
+from six import iteritems, string_types
+
 import avro
 import schema_salad.validate as validate
 from schema_salad.sourceline import SourceLine
-from typing import Any, Callable, Text, Type, Union
+
+from . import expression
 from .errors import WorkflowException
+from .mutation import MutationManager
+from .pathmapper import (PathMapper, get_listing, normalizeFilesDirs,
+                         visit_class)
 from .stdfsaccess import StdFsAccess
-from .pathmapper import PathMapper, adjustFileObjs, adjustDirObjs, normalizeFilesDirs
+from .utils import aslist, get_feature, docker_windows_path_adjust, onWindows
+
+AvroSchemaFromJSONData = avro.schema.make_avsc_object
 
 CONTENT_LIMIT = 64 * 1024
 
@@ -18,8 +29,8 @@ def substitute(value, replace):  # type: (Text, Text) -> Text
     else:
         return value + replace
 
-class Builder(object):
 
+class Builder(object):
     def __init__(self):  # type: () -> None
         self.names = None  # type: avro.schema.Names
         self.schemaDefs = None  # type: Dict[Text, Dict[Text, Any]]
@@ -36,11 +47,30 @@ class Builder(object):
         self.pathmapper = None  # type: PathMapper
         self.stagedir = None  # type: Text
         self.make_fs_access = None  # type: Type[StdFsAccess]
-        self.build_job_script = None  # type: Callable[[List[str]], Text]
         self.debug = False  # type: bool
+        self.mutation_manager = None  # type: MutationManager
+
+        # One of "no_listing", "shallow_listing", "deep_listing"
+        # Will be default "no_listing" for CWL v1.1
+        self.loadListing = "deep_listing"  # type: Union[None, str]
 
-    def bind_input(self, schema, datum, lead_pos=[], tail_pos=[]):
+        self.find_default_container = None  # type: Callable[[], Text]
+        self.job_script_provider = None  # type: Any
+
+    def build_job_script(self, commands):
+        # type: (List[bytes]) -> Text
+        build_job_script_method = getattr(self.job_script_provider, "build_job_script", None)  # type: Callable[[Builder, List[bytes]], Text]
+        if build_job_script_method:
+            return build_job_script_method(self, commands)
+        else:
+            return None
+
+    def bind_input(self, schema, datum, lead_pos=None, tail_pos=None):
         # type: (Dict[Text, Any], Any, Union[int, List[int]], List[int]) -> List[Dict[Text, Any]]
+        if tail_pos is None:
+            tail_pos = []
+        if lead_pos is None:
+            lead_pos = []
         bindings = []  # type: List[Dict[Text,Text]]
         binding = None  # type: Dict[Text,Any]
         if "inputBinding" in schema and isinstance(schema["inputBinding"], dict):
@@ -61,7 +91,7 @@ class Builder(object):
                 elif isinstance(t, dict) and "name" in t and self.names.has_name(t["name"], ""):
                     avsc = self.names.get_name(t["name"], "")
                 else:
-                    avsc = avro.schema.make_avsc_object(t, self.names)
+                    avsc = AvroSchemaFromJSONData(t, self.names)
                 if validate.validate(avsc, datum):
                     schema = copy.deepcopy(schema)
                     schema["type"] = t
@@ -105,9 +135,10 @@ class Builder(object):
 
             if schema["type"] == "File":
                 self.files.append(datum)
-                if binding and binding.get("loadContents"):
-                    with self.fs_access.open(datum["location"], "rb") as f:
-                        datum["contents"] = f.read(CONTENT_LIMIT)
+                if binding:
+                    if binding.get("loadContents"):
+                        with self.fs_access.open(datum["location"], "rb") as f:
+                            datum["contents"] = f.read(CONTENT_LIMIT)
 
                 if "secondaryFiles" in schema:
                     if "secondaryFiles" not in datum:
@@ -115,7 +146,7 @@ class Builder(object):
                     for sf in aslist(schema["secondaryFiles"]):
                         if isinstance(sf, dict) or "$(" in sf or "${" in sf:
                             secondary_eval = self.do_eval(sf, context=datum)
-                            if isinstance(secondary_eval, basestring):
+                            if isinstance(secondary_eval, string_types):
                                 sfpath = {"location": secondary_eval,
                                           "class": "File"}
                             else:
@@ -132,12 +163,14 @@ class Builder(object):
                     self.files.append(f)
                     return f
 
-                adjustFileObjs(datum.get("secondaryFiles", []), _capture_files)
+                visit_class(datum.get("secondaryFiles", []), ("File", "Directory"), _capture_files)
 
             if schema["type"] == "Directory":
+                ll = self.loadListing or (binding and binding.get("loadListing"))
+                if ll and ll != "no_listing":
+                    get_listing(self.fs_access, datum, (ll == "deep_listing"))
                 self.files.append(datum)
 
-
         # Position to front of the sort key
         if binding:
             for bi in bindings:
@@ -150,6 +183,11 @@ class Builder(object):
         if isinstance(value, dict) and value.get("class") in ("File", "Directory"):
             if "path" not in value:
                 raise WorkflowException(u"%s object missing \"path\": %s" % (value["class"], value))
+
+            # Path adjust for windows file path when passing to docker, docker accepts unix like path only
+            (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
+            if onWindows() and docker_req is not None:  # docker_req is none only when there is no dockerRequirement mentioned in hints and Requirement
+                return docker_windows_path_adjust(value["path"])
             return value["path"]
         else:
             return Text(value)
@@ -198,7 +236,7 @@ class Builder(object):
         # type: (Union[Dict[Text, Text], Text], Any, bool, bool) -> Any
         if recursive:
             if isinstance(ex, dict):
-                return {k: self.do_eval(v, context, pull_image, recursive) for k,v in ex.iteritems()}
+                return {k: self.do_eval(v, context, pull_image, recursive) for k, v in iteritems(ex)}
             if isinstance(ex, list):
                 return [self.do_eval(v, context, pull_image, recursive) for v in ex]
 
diff --git a/cwltool/cwlNodeEngine.js b/cwltool/cwlNodeEngine.js
index ca75f61..ce43516 100755
--- a/cwltool/cwlNodeEngine.js
+++ b/cwltool/cwlNodeEngine.js
@@ -1,7 +1,7 @@
 "use strict";
-process.stdin.setEncoding('utf8');
+process.stdin.setEncoding("utf8");
 var incoming = "";
-process.stdin.on('data', function(chunk) {
+process.stdin.on("data", function(chunk) {
   incoming += chunk;
   var i = incoming.indexOf("\n");
   if (i > -1) {
@@ -10,4 +10,4 @@ process.stdin.on('data', function(chunk) {
     process.stdout.write(JSON.stringify(require("vm").runInNewContext(fn, {})) + "\n");
   }
 });
-process.stdin.on('end', process.exit);
+process.stdin.on("end", process.exit);
diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py
index 950357b..fc662db 100644
--- a/cwltool/cwlrdf.py
+++ b/cwltool/cwlrdf.py
@@ -1,11 +1,14 @@
-import json
-import urlparse
-from .process import Process
-from schema_salad.ref_resolver import Loader, ContextType
+from __future__ import absolute_import
+from typing import IO, Any, Dict, Text
+
+from rdflib import Graph
+
 from schema_salad.jsonld_context import makerdf
-from rdflib import Graph, plugin, URIRef
-from rdflib.serializer import Serializer
-from typing import Any, Dict, IO, Text, Union
+from schema_salad.ref_resolver import ContextType
+from six.moves import urllib
+
+from .process import Process
+
 
 def gather(tool, ctx):  # type: (Process, ContextType) -> Graph
     g = Graph()
@@ -16,14 +19,16 @@ def gather(tool, ctx):  # type: (Process, ContextType) -> Graph
     tool.visit(visitor)
     return g
 
+
 def printrdf(wf, ctx, sr, stdout):
     # type: (Process, ContextType, Text, IO[Any]) -> None
     stdout.write(gather(wf, ctx).serialize(format=sr))
 
+
 def lastpart(uri):  # type: (Any) -> Text
     uri = Text(uri)
     if "/" in uri:
-        return uri[uri.rindex("/")+1:]
+        return uri[uri.rindex("/") + 1:]
     else:
         return uri
 
@@ -84,6 +89,7 @@ def dot_with_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
     for (inp,) in qres:
         stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(inp)))
 
+
 def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
     dotname = {}  # type: Dict[Text,Text]
     clusternode = {}
@@ -129,7 +135,7 @@ def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
                 currentwf = None
 
         if Text(runtype) != "https://w3id.org/cwl/cwl#Workflow":
-            stdout.write(u'"%s" [label="%s"]\n' % (dotname[step], urlparse.urldefrag(Text(step))[1]))
+            stdout.write(u'"%s" [label="%s"]\n' % (dotname[step], urllib.parse.urldefrag(Text(step))[1]))
 
     if currentwf is not None:
         stdout.write("}\n")
@@ -163,7 +169,7 @@ def printdot(wf, ctx, stdout, include_parameters=False):
 
     stdout.write("digraph {")
 
-    #g.namespace_manager.qname(predicate)
+    # g.namespace_manager.qname(predicate)
 
     if include_parameters:
         dot_with_parameters(g, stdout)
diff --git a/cwltool/docker.py b/cwltool/docker.py
index 812852f..9db4dc1 100644
--- a/cwltool/docker.py
+++ b/cwltool/docker.py
@@ -1,15 +1,20 @@
-import subprocess
+from __future__ import absolute_import
 import logging
-import sys
-import requests
 import os
-from .errors import WorkflowException
 import re
+import subprocess
+import sys
 import tempfile
-from typing import Any, Text, Union
+from io import open
+from typing import Dict, List, Text
+
+import requests
+
+from .errors import WorkflowException
 
 _logger = logging.getLogger("cwltool")
 
+
 def get_image(dockerRequirement, pull_image, dry_run=False):
     # type: (Dict[Text, Text], bool, bool) -> bool
     found = False
@@ -18,12 +23,23 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
         dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
 
     for ln in subprocess.check_output(
-            ["docker", "images", "--no-trunc", "--all"]).splitlines():
+            ["docker", "images", "--no-trunc", "--all"]).decode('utf-8').splitlines():
         try:
             m = re.match(r"^([^ ]+)\s+([^ ]+)\s+([^ ]+)", ln)
             sp = dockerRequirement["dockerImageId"].split(":")
             if len(sp) == 1:
                 sp.append("latest")
+            elif len(sp) == 2:
+                #  if sp[1] doesn't  match valid tag names, it is a part of repository
+                if not re.match(r'[\w][\w.-]{0,127}', sp[1]):
+                    sp[0] = sp[0] + ":" + sp[1]
+                    sp[1] = "latest"
+            elif len(sp) == 3:
+                if re.match(r'[\w][\w.-]{0,127}', sp[2]):
+                    sp[0] = sp[0] + ":" + sp[1]
+                    sp[1] = sp[2]
+                    del sp[2]
+
             # check for repository:tag match or image id match
             if ((sp[0] == m.group(1) and sp[1] == m.group(2)) or dockerRequirement["dockerImageId"] == m.group(3)):
                 found = True
@@ -32,7 +48,7 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
             pass
 
     if not found and pull_image:
-        cmd = []  # type: List[str]
+        cmd = []  # type: List[Text]
         if "dockerPull" in dockerRequirement:
             cmd = ["docker", "pull", str(dockerRequirement["dockerPull"])]
             _logger.info(Text(cmd))
@@ -41,10 +57,10 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
                 found = True
         elif "dockerFile" in dockerRequirement:
             dockerfile_dir = str(tempfile.mkdtemp())
-            with open(os.path.join(dockerfile_dir, "Dockerfile"), "w") as df:
-                df.write(dockerRequirement["dockerFile"])
+            with open(os.path.join(dockerfile_dir, "Dockerfile"), "wb") as df:
+                df.write(dockerRequirement["dockerFile"].encode('utf-8'))
             cmd = ["docker", "build", "--tag=%s" %
-                str(dockerRequirement["dockerImageId"]), dockerfile_dir]
+                   str(dockerRequirement["dockerImageId"]), dockerfile_dir]
             _logger.info(Text(cmd))
             if not dry_run:
                 subprocess.check_call(cmd, stdout=sys.stderr)
@@ -62,7 +78,7 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
                     _logger.info(u"Sending GET request to %s", dockerRequirement["dockerLoad"])
                     req = requests.get(dockerRequirement["dockerLoad"], stream=True)
                     n = 0
-                    for chunk in req.iter_content(1024*1024):
+                    for chunk in req.iter_content(1024 * 1024):
                         n += len(chunk)
                         _logger.info("\r%i bytes" % (n))
                         loadproc.stdin.write(chunk)
@@ -73,7 +89,7 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
                 found = True
         elif "dockerImport" in dockerRequirement:
             cmd = ["docker", "import", str(dockerRequirement["dockerImport"]),
-                str(dockerRequirement["dockerImageId"])]
+                   str(dockerRequirement["dockerImageId"])]
             _logger.info(Text(cmd))
             if not dry_run:
                 subprocess.check_call(cmd, stdout=sys.stderr)
diff --git a/cwltool/docker_uid.py b/cwltool/docker_id.py
similarity index 72%
rename from cwltool/docker_uid.py
rename to cwltool/docker_id.py
index 11223c7..b570f93 100644
--- a/cwltool/docker_uid.py
+++ b/cwltool/docker_id.py
@@ -1,23 +1,26 @@
+from __future__ import print_function
+from __future__ import absolute_import
+
 import subprocess
-from typing import Text, Union
+from typing import List, Text, Tuple
 
 
-def docker_vm_uid():  # type: () -> int
+def docker_vm_id():  # type: () -> Tuple[int, int]
     """
-    Returns the UID of the default docker user inside the VM
+    Returns the User ID and Group ID of the default docker user inside the VM
 
     When a host is using boot2docker or docker-machine to run docker with
     boot2docker.iso (As on Mac OS X), the UID that mounts the shared filesystem
     inside the VirtualBox VM is likely different than the user's UID on the host.
-    :return: The numeric UID (as a string) of the docker account inside
+    :return: A tuple containing numeric User ID and Group ID of the docker account inside
     the boot2docker VM
     """
     if boot2docker_running():
-        return boot2docker_uid()
+        return boot2docker_id()
     elif docker_machine_running():
-        return docker_machine_uid()
+        return docker_machine_id()
     else:
-        return None
+        return (None, None)
 
 
 def check_output_and_strip(cmd):  # type: (List[Text]) -> Text
@@ -89,25 +92,29 @@ def cmd_output_to_int(cmd):  # type: (List[Text]) -> int
         except ValueError:
             # ValueError is raised if int conversion fails
             return None
+    return None
 
 
-def boot2docker_uid():  # type: () -> int
+def boot2docker_id():  # type: () -> Tuple[int, int]
     """
-    Gets the UID of the docker user inside a running boot2docker vm
-    :return: the UID, or None if error (e.g. boot2docker not present or stopped)
+    Gets the UID and GID of the docker user inside a running boot2docker vm
+    :return: Tuple (UID, GID), or (None, None) if error (e.g. boot2docker not present or stopped)
     """
-    return cmd_output_to_int(['boot2docker', 'ssh', 'id', '-u'])
-
+    uid = cmd_output_to_int(['boot2docker', 'ssh', 'id', '-u'])
+    gid = cmd_output_to_int(['boot2docker', 'ssh', 'id', '-g'])
+    return (uid, gid)
 
-def docker_machine_uid():  # type: () -> int
+def docker_machine_id():  # type: () -> Tuple[int, int]
     """
     Asks docker-machine for active machine and gets the UID of the docker user
     inside the vm
-    :return: the UID, or None if error (e.g. docker-machine not present or stopped)
+    :return: tuple (UID, GID), or (None, None) if error (e.g. docker-machine not present or stopped)
     """
     machine_name = docker_machine_name()
-    return cmd_output_to_int(['docker-machine', 'ssh', machine_name, "id -u"])
+    uid = cmd_output_to_int(['docker-machine', 'ssh', machine_name, "id -u"])
+    gid = cmd_output_to_int(['docker-machine', 'ssh', machine_name, "id -g"])
+    return (uid, gid)
 
 
 if __name__ == '__main__':
-    print docker_vm_uid()
+    print(docker_vm_id())
diff --git a/cwltool/draft2tool.py b/cwltool/draft2tool.py
index 9ca34d6..d2288f4 100644
--- a/cwltool/draft2tool.py
+++ b/cwltool/draft2tool.py
@@ -1,41 +1,55 @@
-import shutil
-from functools import partial
-import json
+from __future__ import absolute_import
 import copy
-import os
-import glob
-import logging
 import hashlib
+import json
+import logging
+import os
 import re
-import urlparse
+import shutil
 import tempfile
-import errno
+from functools import partial
+from typing import Any, Callable, Dict, Generator, List, Optional, Set, Text, Union, cast
+
+from six import string_types, u
 
-import avro.schema
 import schema_salad.validate as validate
-from schema_salad.ref_resolver import file_uri, uri_file_path
 import shellescape
-from typing import Any, Callable, cast, Generator, Text, Union
+from schema_salad.ref_resolver import file_uri, uri_file_path
+from schema_salad.sourceline import SourceLine, indent
+from six.moves import urllib
 
-from .process import Process, shortname, uniquename, getListing, normalizeFilesDirs, compute_checksums
+from .builder import CONTENT_LIMIT, Builder, substitute
 from .errors import WorkflowException
-from .utils import aslist
-from . import expression
-from .builder import CONTENT_LIMIT, substitute, Builder, adjustFileObjs, adjustDirObjs
-from .pathmapper import PathMapper
-from .job import CommandLineJob
+from .flatten import flatten
+from .job import CommandLineJob, DockerCommandLineJob, JobBase
+from .pathmapper import (PathMapper, adjustDirObjs, adjustFileObjs,
+                         get_listing, trim_listing, visit_class)
+from .process import (Process, UnsupportedRequirement,
+                      _logger_validation_warnings, compute_checksums,
+                      normalizeFilesDirs, shortname, uniquename)
 from .stdfsaccess import StdFsAccess
-
-from schema_salad.sourceline import SourceLine, indent
+from .utils import aslist, docker_windows_path_adjust, convert_pathsep_to_unix, windows_default_container_id, onWindows
+from six.moves import map
 
 ACCEPTLIST_EN_STRICT_RE = re.compile(r"^[a-zA-Z0-9._+-]+$")
-ACCEPTLIST_EN_RELAXED_RE = re.compile(r"^[ a-zA-Z0-9._+-]+$")  # with spaces
+ACCEPTLIST_EN_RELAXED_RE = re.compile(r".*")  # Accept anything
 ACCEPTLIST_RE = ACCEPTLIST_EN_STRICT_RE
+DEFAULT_CONTAINER_MSG="""We are on Microsoft Windows and not all components of this CWL description have a
+container specified. This means that these steps will be executed in the default container, 
+which is %s.
 
-from .flatten import flatten
+Note, this could affect portability if this CWL description relies on non-POSIX features
+or commands in this container. For best results add the following to your CWL
+description's hints section:
+
+hints:
+  DockerRequirement:
+    dockerPull: %s
+"""
 
 _logger = logging.getLogger("cwltool")
 
+
 class ExpressionTool(Process):
     def __init__(self, toolpath_object, **kwargs):
         # type: (Dict[Text, Any], **Any) -> None
@@ -59,18 +73,22 @@ class ExpressionTool(Process):
                 normalizeFilesDirs(ev)
                 self.output_callback(ev, "success")
             except Exception as e:
-                _logger.warn(u"Failed to evaluate expression:\n%s",
-                        e, exc_info=kwargs.get('debug'))
+                _logger.warning(u"Failed to evaluate expression:\n%s",
+                             e, exc_info=kwargs.get('debug'))
                 self.output_callback({}, "permanentFail")
 
-    def job(self, joborder, output_callback, **kwargs):
-        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator[ExpressionTool.ExpressionJob, None, None]
-        builder = self._init_job(joborder, **kwargs)
+    def job(self,
+            job_order,  # type: Dict[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            **kwargs  # type: Any
+            ):
+        # type: (...) -> Generator[ExpressionTool.ExpressionJob, None, None]
+        builder = self._init_job(job_order, **kwargs)
 
         j = ExpressionTool.ExpressionJob()
         j.builder = builder
         j.script = self.tool["expression"]
-        j.output_callback = output_callback
+        j.output_callback = output_callbacks
         j.requirements = self.requirements
         j.hints = self.hints
         j.outdir = None
@@ -83,6 +101,7 @@ def remove_path(f):  # type: (Dict[Text, Any]) -> None
     if "path" in f:
         del f["path"]
 
+
 def revmap_file(builder, outdir, f):
     # type: (Builder, Text, Dict[Text, Any]) -> Union[Dict[Text, Any], None]
 
@@ -93,18 +112,21 @@ def revmap_file(builder, outdir, f):
     internal output directories to the external directory.
     """
 
-    split = urlparse.urlsplit(outdir)
+    split = urllib.parse.urlsplit(outdir)
     if not split.scheme:
         outdir = file_uri(str(outdir))
 
     if "location" in f:
         if f["location"].startswith("file://"):
-            path = uri_file_path(f["location"])
+            path = convert_pathsep_to_unix(uri_file_path(f["location"]))
             revmap_f = builder.pathmapper.reversemap(path)
-            if revmap_f:
-                f["location"] = revmap_f[1]
+            if revmap_f and not builder.pathmapper.mapper(revmap_f[0]).type.startswith("Writable"):
+                f["basename"] = os.path.basename(path)
+                f["location"] = revmap_f[0]
+            elif path == builder.outdir:
+                f["location"] = outdir
             elif path.startswith(builder.outdir):
-                f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir)+1:])
+                f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir) + 1:])
         return f
 
     if "path" in f:
@@ -115,10 +137,11 @@ def revmap_file(builder, outdir, f):
             f["location"] = revmap_f[1]
             return f
         elif path.startswith(builder.outdir):
-            f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir)+1:])
+            f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir) + 1:])
             return f
         else:
-            raise WorkflowException(u"Output file path %s must be within designated output directory (%s) or an input file pass through." % (path, builder.outdir))
+            raise WorkflowException(u"Output file path %s must be within designated output directory (%s) or an input "
+                                    u"file pass through." % (path, builder.outdir))
 
     raise WorkflowException(u"Output File object is missing both `location` and `path` fields: %s" % f)
 
@@ -139,11 +162,13 @@ class CallbackJob(object):
             self.outdir,
             kwargs.get("compute_checksum", True)), "success")
 
+
 # map files to assigned path inside a container. We need to also explicitly
 # walk over input as implicit reassignment doesn't reach everything in builder.bindings
 def check_adjust(builder, f):
     # type: (Builder, Dict[Text, Any]) -> Dict[Text, Any]
-    f["path"] = builder.pathmapper.mapper(f["location"])[1]
+
+    f["path"] = docker_windows_path_adjust(builder.pathmapper.mapper(f["location"])[1])
     f["dirname"], f["basename"] = os.path.split(f["path"])
     if f["class"] == "File":
         f["nameroot"], f["nameext"] = os.path.splitext(f["basename"])
@@ -151,21 +176,65 @@ def check_adjust(builder, f):
         raise WorkflowException("Invalid filename: '%s' contains illegal characters" % (f["basename"]))
     return f
 
+def check_valid_locations(fs_access, ob):
+    if ob["location"].startswith("_:"):
+        pass
+    if ob["class"] == "File" and not fs_access.isfile(ob["location"]):
+        raise validate.ValidationException("Does not exist or is not a File: '%s'" % ob["location"])
+    if ob["class"] == "Directory" and not fs_access.isdir(ob["location"]):
+        raise validate.ValidationException("Does not exist or is not a Directory: '%s'" % ob["location"])
+
 class CommandLineTool(Process):
     def __init__(self, toolpath_object, **kwargs):
         # type: (Dict[Text, Any], **Any) -> None
         super(CommandLineTool, self).__init__(toolpath_object, **kwargs)
+        self.find_default_container = kwargs.get("find_default_container", None)
 
-    def makeJobRunner(self):  # type: () -> CommandLineJob
-        return CommandLineJob()
+    def makeJobRunner(self, use_container=True):  # type: (Optional[bool]) -> JobBase
+        dockerReq, _ = self.get_requirement("DockerRequirement")
+        if not dockerReq and use_container:
+            if self.find_default_container:
+                default_container = self.find_default_container(self)
+                if default_container:
+                    self.requirements.insert(0, {
+                        "class": "DockerRequirement",
+                        "dockerPull": default_container
+                    })
+                    dockerReq = self.requirements[0]
+                    if default_container == windows_default_container_id and use_container and onWindows():
+                        _logger.warning(DEFAULT_CONTAINER_MSG%(windows_default_container_id, windows_default_container_id))
+
+        if dockerReq and use_container:
+            return DockerCommandLineJob()
+        else:
+            for t in reversed(self.requirements):
+                if t["class"] == "DockerRequirement":
+                    raise UnsupportedRequirement(
+                        "--no-container, but this CommandLineTool has "
+                        "DockerRequirement under 'requirements'.")
+            return CommandLineJob()
 
     def makePathMapper(self, reffiles, stagedir, **kwargs):
         # type: (List[Any], Text, **Any) -> PathMapper
-        dockerReq, _ = self.get_requirement("DockerRequirement")
         return PathMapper(reffiles, kwargs["basedir"], stagedir)
 
-    def job(self, joborder, output_callback, **kwargs):
-        # type: (Dict[Text, Text], Callable[..., Any], **Any) -> Generator[Union[CommandLineJob, CallbackJob], None, None]
+    def updatePathmap(self, outdir, pathmap, fn):
+        # type: (Text, PathMapper, Dict) -> None
+        if "location" in fn and fn["location"] in pathmap:
+            pathmap.update(fn["location"], pathmap.mapper(fn["location"]).resolved,
+                           os.path.join(outdir, fn["basename"]),
+                           ("Writable" if fn.get("writable") else "") + fn["class"], False)
+        for sf in fn.get("secondaryFiles", []):
+            self.updatePathmap(outdir, pathmap, sf)
+        for ls in fn.get("listing", []):
+            self.updatePathmap(os.path.join(outdir, fn["basename"]), pathmap, ls)
+
+    def job(self,
+            job_order,  # type: Dict[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            **kwargs  # type: Any
+            ):
+        # type: (...) -> Generator[Union[JobBase, CallbackJob], None, None]
 
         jobname = uniquename(kwargs.get("name", shortname(self.tool.get("id", "job"))))
 
@@ -174,27 +243,37 @@ class CommandLineTool(Process):
             cacheargs["outdir"] = "/out"
             cacheargs["tmpdir"] = "/tmp"
             cacheargs["stagedir"] = "/stage"
-            cachebuilder = self._init_job(joborder, **cacheargs)
+            cachebuilder = self._init_job(job_order, **cacheargs)
             cachebuilder.pathmapper = PathMapper(cachebuilder.files,
                                                  kwargs["basedir"],
                                                  cachebuilder.stagedir,
                                                  separateDirs=False)
             _check_adjust = partial(check_adjust, cachebuilder)
-            adjustFileObjs(cachebuilder.files, _check_adjust)
-            adjustFileObjs(cachebuilder.bindings, _check_adjust)
-            adjustDirObjs(cachebuilder.files, _check_adjust)
-            adjustDirObjs(cachebuilder.bindings, _check_adjust)
-            cmdline = flatten(map(cachebuilder.generate_arg, cachebuilder.bindings))
+            visit_class([cachebuilder.files, cachebuilder.bindings],
+                       ("File", "Directory"), _check_adjust)
+
+            cmdline = flatten(list(map(cachebuilder.generate_arg, cachebuilder.bindings)))
             (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
-            if docker_req and kwargs.get("use_container") is not False:
+            if docker_req and kwargs.get("use_container"):
                 dockerimg = docker_req.get("dockerImageId") or docker_req.get("dockerPull")
+            elif kwargs.get("default_container", None) is not None and kwargs.get("use_container"):
+                dockerimg = kwargs.get("default_container")
+
+            if dockerimg:
                 cmdline = ["docker", "run", dockerimg] + cmdline
             keydict = {u"cmdline": cmdline}
 
-            for _,f in cachebuilder.pathmapper.items():
+            for location, f in cachebuilder.pathmapper.items():
                 if f.type == "File":
+                    checksum = next((e['checksum'] for e in cachebuilder.files
+                            if 'location' in e and e['location'] == location
+                            and 'checksum' in e
+                            and e['checksum'] != 'sha1$hash'), None)
                     st = os.stat(f.resolved)
-                    keydict[f.resolved] = [st.st_size, int(st.st_mtime * 1000)]
+                    if checksum:
+                        keydict[f.resolved] = [st.st_size, checksum]
+                    else:
+                        keydict[f.resolved] = [st.st_size, int(st.st_mtime * 1000)]
 
             interesting = {"DockerRequirement",
                            "EnvVarRequirement",
@@ -205,23 +284,23 @@ class CommandLineTool(Process):
                     if r["class"] in interesting and r["class"] not in keydict:
                         keydict[r["class"]] = r
 
-            keydictstr = json.dumps(keydict, separators=(',',':'), sort_keys=True)
-            cachekey = hashlib.md5(keydictstr).hexdigest()
+            keydictstr = json.dumps(keydict, separators=(',', ':'), sort_keys=True)
+            cachekey = hashlib.md5(keydictstr.encode('utf-8')).hexdigest()
 
             _logger.debug("[job %s] keydictstr is %s -> %s", jobname,
-                    keydictstr, cachekey)
+                          keydictstr, cachekey)
 
             jobcache = os.path.join(kwargs["cachedir"], cachekey)
             jobcachepending = jobcache + ".pending"
 
             if os.path.isdir(jobcache) and not os.path.isfile(jobcachepending):
-                if docker_req and kwargs.get("use_container") is not False:
+                if docker_req and kwargs.get("use_container"):
                     cachebuilder.outdir = kwargs.get("docker_outdir") or "/var/spool/cwl"
                 else:
                     cachebuilder.outdir = jobcache
 
                 _logger.info("[job %s] Using cached output in %s", jobname, jobcache)
-                yield CallbackJob(self, output_callback, cachebuilder, jobcache)
+                yield CallbackJob(self, output_callbacks, cachebuilder, jobcache)
                 return
             else:
                 _logger.info("[job %s] Output of job will be cached in %s", jobname, jobcache)
@@ -230,22 +309,23 @@ class CommandLineTool(Process):
                 kwargs["outdir"] = jobcache
                 open(jobcachepending, "w").close()
 
-                def rm_pending_output_callback(output_callback, jobcachepending,
+                def rm_pending_output_callback(output_callbacks, jobcachepending,
                                                outputs, processStatus):
                     if processStatus == "success":
                         os.remove(jobcachepending)
-                    output_callback(outputs, processStatus)
-                output_callback = cast(
+                    output_callbacks(outputs, processStatus)
+
+                output_callbacks = cast(
                     Callable[..., Any],  # known bug in mypy
                     # https://github.com/python/mypy/issues/797
-                    partial(rm_pending_output_callback, output_callback,
-                        jobcachepending))
+                    partial(rm_pending_output_callback, output_callbacks,
+                            jobcachepending))
 
-        builder = self._init_job(joborder, **kwargs)
+        builder = self._init_job(job_order, **kwargs)
 
         reffiles = copy.deepcopy(builder.files)
 
-        j = self.makeJobRunner()
+        j = self.makeJobRunner(kwargs.get("use_container"))
         j.builder = builder
         j.joborder = builder.job
         j.stdin = None
@@ -260,11 +340,10 @@ class CommandLineTool(Process):
 
         if _logger.isEnabledFor(logging.DEBUG):
             _logger.debug(u"[job %s] initializing from %s%s",
-                         j.name,
-                         self.tool.get("id", ""),
-                         u" as part of %s" % kwargs["part_of"] if "part_of" in kwargs else "")
-            _logger.debug(u"[job %s] %s", j.name, json.dumps(joborder, indent=4))
-
+                          j.name,
+                          self.tool.get("id", ""),
+                          u" as part of %s" % kwargs["part_of"] if "part_of" in kwargs else "")
+            _logger.debug(u"[job %s] %s", j.name, json.dumps(job_order, indent=4))
 
         builder.pathmapper = None
         make_path_mapper_kwargs = kwargs
@@ -274,15 +353,52 @@ class CommandLineTool(Process):
         builder.pathmapper = self.makePathMapper(reffiles, builder.stagedir, **make_path_mapper_kwargs)
         builder.requirements = j.requirements
 
-        if _logger.isEnabledFor(logging.DEBUG):
-            _logger.debug(u"[job %s] path mappings is %s", j.name, json.dumps({p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4))
-
         _check_adjust = partial(check_adjust, builder)
 
-        adjustFileObjs(builder.files, _check_adjust)
-        adjustFileObjs(builder.bindings, _check_adjust)
-        adjustDirObjs(builder.files, _check_adjust)
-        adjustDirObjs(builder.bindings, _check_adjust)
+        visit_class([builder.files, builder.bindings], ("File", "Directory"), _check_adjust)
+
+        initialWorkdir = self.get_requirement("InitialWorkDirRequirement")[0]
+        j.generatefiles = {"class": "Directory", "listing": [], "basename": ""}
+        if initialWorkdir:
+            ls = []  # type: List[Dict[Text, Any]]
+            if isinstance(initialWorkdir["listing"], (str, Text)):
+                ls = builder.do_eval(initialWorkdir["listing"])
+            else:
+                for t in initialWorkdir["listing"]:
+                    if "entry" in t:
+                        et = {u"entry": builder.do_eval(t["entry"])}
+                        if "entryname" in t:
+                            et["entryname"] = builder.do_eval(t["entryname"])
+                        else:
+                            et["entryname"] = None
+                        et["writable"] = t.get("writable", False)
+                        ls.append(et)
+                    else:
+                        ls.append(builder.do_eval(t))
+            for i, t in enumerate(ls):
+                if "entry" in t:
+                    if isinstance(t["entry"], string_types):
+                        ls[i] = {
+                            "class": "File",
+                            "basename": t["entryname"],
+                            "contents": t["entry"],
+                            "writable": t.get("writable")
+                        }
+                    else:
+                        if t.get("entryname") or t.get("writable"):
+                            t = copy.deepcopy(t)
+                            if t.get("entryname"):
+                                t["entry"]["basename"] = t["entryname"]
+                            t["entry"]["writable"] = t.get("writable")
+                        ls[i] = t["entry"]
+            j.generatefiles[u"listing"] = ls
+            for l in ls:
+                self.updatePathmap(builder.outdir, builder.pathmapper, l)
+            visit_class([builder.files, builder.bindings], ("File", "Directory"), _check_adjust)
+
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[job %s] path mappings is %s", j.name,
+                          json.dumps({p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4))
 
         if self.tool.get("stdin"):
             with SourceLine(self.tool, "stdin", validate.ValidationException):
@@ -316,43 +432,39 @@ class CommandLineTool(Process):
             j.tmpdir = builder.tmpdir
             j.stagedir = builder.stagedir
 
-        initialWorkdir = self.get_requirement("InitialWorkDirRequirement")[0]
-        j.generatefiles = {"class": "Directory", "listing": [], "basename": ""}
-        if initialWorkdir:
-            ls = []  # type: List[Dict[Text, Any]]
-            if isinstance(initialWorkdir["listing"], (str, Text)):
-                ls = builder.do_eval(initialWorkdir["listing"])
-            else:
-                for t in initialWorkdir["listing"]:
-                    if "entry" in t:
-                        et = {u"entry": builder.do_eval(t["entry"])}
-                        if "entryname" in t:
-                            et["entryname"] = builder.do_eval(t["entryname"])
-                        else:
-                            et["entryname"] = None
-                        et["writable"] = t.get("writable", False)
-                        ls.append(et)
-                    else:
-                        ls.append(builder.do_eval(t))
-            for i,t in enumerate(ls):
-                if "entry" in t:
-                    if isinstance(t["entry"], basestring):
-                        ls[i] = {
-                            "class": "File",
-                            "basename": t["entryname"],
-                            "contents": t["entry"],
-                            "writable": t.get("writable")
-                        }
-                    else:
-                        if t["entryname"]:
-                            t = copy.deepcopy(t)
-                            t["entry"]["basename"] = t["entryname"]
-                            t["entry"]["writable"] = t.get("writable")
-                        ls[i] = t["entry"]
-            j.generatefiles[u"listing"] = ls
+        inplaceUpdateReq = self.get_requirement("http://commonwl.org/cwltool#InplaceUpdateRequirement")[0]
 
+        if inplaceUpdateReq:
+            j.inplace_update = inplaceUpdateReq["inplaceUpdate"]
         normalizeFilesDirs(j.generatefiles)
 
+        readers = {}
+        muts = set()
+
+        if builder.mutation_manager:
+            def register_mut(f):
+                muts.add(f["location"])
+                builder.mutation_manager.register_mutation(j.name, f)
+
+            def register_reader(f):
+                if f["location"] not in muts:
+                    builder.mutation_manager.register_reader(j.name, f)
+                    readers[f["location"]] = f
+
+            for li in j.generatefiles["listing"]:
+                li = cast(Dict[Text, Any], li)
+                if li.get("writable") and j.inplace_update:
+                    adjustFileObjs(li, register_mut)
+                    adjustDirObjs(li, register_mut)
+                else:
+                    adjustFileObjs(li, register_reader)
+                    adjustDirObjs(li, register_reader)
+
+            adjustFileObjs(builder.files, register_reader)
+            adjustFileObjs(builder.bindings, register_reader)
+            adjustDirObjs(builder.files, register_reader)
+            adjustDirObjs(builder.bindings, register_reader)
+
         j.environment = {}
         evr = self.get_requirement("EnvVarRequirement")[0]
         if evr:
@@ -369,20 +481,22 @@ class CommandLineTool(Process):
                 cmd.extend(aslist(arg))
             j.command_line = ["/bin/sh", "-c", " ".join(cmd)]
         else:
-            j.command_line = flatten(map(builder.generate_arg, builder.bindings))
+            j.command_line = flatten(list(map(builder.generate_arg, builder.bindings)))
 
         j.pathmapper = builder.pathmapper
         j.collect_outputs = partial(
             self.collect_output_ports, self.tool["outputs"], builder,
-            compute_checksum=kwargs.get("compute_checksum", True))
-        j.output_callback = output_callback
+            compute_checksum=kwargs.get("compute_checksum", True),
+            jobname=jobname,
+            readers=readers)
+        j.output_callback = output_callbacks
 
         yield j
 
-    def collect_output_ports(self, ports, builder, outdir, compute_checksum=True):
-        # type: (Set[Dict[Text, Any]], Builder, Text, bool) -> Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
+    def collect_output_ports(self, ports, builder, outdir, compute_checksum=True, jobname="", readers=None):
+        # type: (Set[Dict[Text, Any]], Builder, Text, bool, Text, Dict[Text, Any]) -> Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
+        ret = {}  # type: Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
         try:
-            ret = {}  # type: Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
             fs_access = builder.make_fs_access(outdir)
             custom_output = fs_access.join(outdir, "cwl.output.json")
             if fs_access.exists(custom_output):
@@ -395,30 +509,38 @@ class CommandLineTool(Process):
                     with SourceLine(ports, i, WorkflowException):
                         fragment = shortname(port["id"])
                         try:
-                            ret[fragment] = self.collect_output(port, builder, outdir, fs_access, compute_checksum=compute_checksum)
+                            ret[fragment] = self.collect_output(port, builder, outdir, fs_access,
+                                                                compute_checksum=compute_checksum)
                         except Exception as e:
                             _logger.debug(
                                 u"Error collecting output for parameter '%s'"
                                 % shortname(port["id"]), exc_info=True)
                             raise WorkflowException(
                                 u"Error collecting output for parameter '%s':\n%s"
-                                % (shortname(port["id"]), indent(unicode(e))))
+                                % (shortname(port["id"]), indent(u(str(e)))))
 
             if ret:
-                adjustFileObjs(ret,
-                        cast(Callable[[Any], Any],  # known bug in mypy
-                            # https://github.com/python/mypy/issues/797
-                            partial(revmap_file, builder, outdir)))
-                adjustFileObjs(ret, remove_path)
-                adjustDirObjs(ret, remove_path)
+                revmap = partial(revmap_file, builder, outdir)
+                adjustDirObjs(ret, trim_listing)
+                visit_class(ret, ("File", "Directory"), cast(Callable[[Any], Any], revmap))
+                visit_class(ret, ("File", "Directory"), remove_path)
                 normalizeFilesDirs(ret)
+                if builder.mutation_manager:
+                    adjustFileObjs(ret, builder.mutation_manager.set_generation)
+                visit_class(ret, ("File", "Directory"), partial(check_valid_locations, fs_access))
+
                 if compute_checksum:
                     adjustFileObjs(ret, partial(compute_checksums, fs_access))
 
-            validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret)
+            validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret,
+                                 strict=False, logger=_logger_validation_warnings)
             return ret if ret is not None else {}
         except validate.ValidationException as e:
-            raise WorkflowException("Error validating output record, " + Text(e) + "\n in " + json.dumps(ret, indent=4))
+            raise WorkflowException("Error validating output record. " + Text(e) + "\n in " + json.dumps(ret, indent=4))
+        finally:
+            if builder.mutation_manager and readers:
+                for r in readers.values():
+                    builder.mutation_manager.release_reader(jobname, r)
 
     def collect_output(self, schema, builder, outdir, fs_access, compute_checksum=True):
         # type: (Dict[Text, Any], Builder, Text, StdFsAccess, bool) -> Union[Dict[Text, Any], List[Union[Dict[Text, Any], Text]]]
@@ -438,7 +560,7 @@ class CommandLineTool(Process):
 
                     for gb in globpatterns:
                         if gb.startswith(outdir):
-                            gb = gb[len(outdir)+1:]
+                            gb = gb[len(outdir) + 1:]
                         elif gb == ".":
                             gb = outdir
                         elif gb.startswith("/"):
@@ -448,23 +570,28 @@ class CommandLineTool(Process):
                                        "class": "File" if fs_access.isfile(g) else "Directory"}
                                       for g in fs_access.glob(fs_access.join(outdir, gb))])
                         except (OSError, IOError) as e:
-                            _logger.warn(Text(e))
+                            _logger.warning(Text(e))
+                        except:
+                            _logger.error("Unexpected error from fs_access", exc_info=True)
+                            raise
 
                 for files in r:
-                    if files["class"] == "Directory" and "listing" not in files:
-                        getListing(fs_access, files)
+                    if files["class"] == "Directory":
+                        ll = builder.loadListing or (binding and binding.get("loadListing"))
+                        if ll and ll != "no_listing":
+                            get_listing(fs_access, files, (ll == "deep_listing"))
                     else:
                         with fs_access.open(files["location"], "rb") as f:
-                            contents = ""
+                            contents = b""
                             if binding.get("loadContents") or compute_checksum:
                                 contents = f.read(CONTENT_LIMIT)
                             if binding.get("loadContents"):
                                 files["contents"] = contents
                             if compute_checksum:
                                 checksum = hashlib.sha1()
-                                while contents != "":
+                                while contents != b"":
                                     checksum.update(contents)
-                                    contents = f.read(1024*1024)
+                                    contents = f.read(1024 * 1024)
                                 files["checksum"] = "sha1$%s" % checksum.hexdigest()
                             f.seek(0, 2)
                             filesize = f.tell()
@@ -511,7 +638,7 @@ class CommandLineTool(Process):
                             for sf in aslist(schema["secondaryFiles"]):
                                 if isinstance(sf, dict) or "$(" in sf or "${" in sf:
                                     sfpath = builder.do_eval(sf, context=primary)
-                                    if isinstance(sfpath, basestring):
+                                    if isinstance(sfpath, string_types):
                                         sfpath = revmap({"location": sfpath, "class": "File"})
                                 else:
                                     sfpath = {"location": substitute(primary["location"], sf), "class": "File"}
@@ -523,8 +650,7 @@ class CommandLineTool(Process):
             if not r and optional:
                 r = None
 
-        if (not r and isinstance(schema["type"], dict) and
-                schema["type"]["type"] == "record"):
+        if (not r and isinstance(schema["type"], dict) and schema["type"]["type"] == "record"):
             out = {}
             for f in schema["type"]["fields"]:
                 out[shortname(f["name"])] = self.collect_output(  # type: ignore
diff --git a/cwltool/errors.py b/cwltool/errors.py
index 6bf187c..9df1236 100644
--- a/cwltool/errors.py
+++ b/cwltool/errors.py
@@ -1,5 +1,6 @@
 class WorkflowException(Exception):
     pass
 
+
 class UnsupportedRequirement(WorkflowException):
     pass
diff --git a/cwltool/expression.py b/cwltool/expression.py
index 96cbe99..777e9f9 100644
--- a/cwltool/expression.py
+++ b/cwltool/expression.py
@@ -1,39 +1,50 @@
-import subprocess
+from __future__ import absolute_import
+import copy
 import json
 import logging
-import os
 import re
-import copy
+from typing import Any, AnyStr, Dict, List, Text, Union
+from .utils import docker_windows_path_adjust
+import six
+from six import u
 
-from typing import Any, AnyStr, Union, Text, Dict, List
-import schema_salad.validate as validate
-import schema_salad.ref_resolver
-
-from .utils import aslist, get_feature
-from .errors import WorkflowException
 from . import sandboxjs
-from . import docker
+from .errors import WorkflowException
+from .utils import bytes2str_in_dicts
 
 _logger = logging.getLogger("cwltool")
 
+
 def jshead(engineConfig, rootvars):
     # type: (List[Text], Dict[Text, Any]) -> Text
+
+    # make sure all the byte strings are converted
+    # to str in `rootvars` dict.
+    # TODO: need to make sure the `rootvars dict`
+    # contains no bytes type in the first place.
+    if six.PY3:
+        rootvars = bytes2str_in_dicts(rootvars)  # type: ignore
+
     return u"\n".join(engineConfig + [u"var %s = %s;" % (k, json.dumps(v, indent=4)) for k, v in rootvars.items()])
 
 
+# decode all raw strings to unicode
 seg_symbol = r"""\w+"""
 seg_single = r"""\['([^']|\\')+'\]"""
 seg_double = r"""\["([^"]|\\")+"\]"""
 seg_index = r"""\[[0-9]+\]"""
 segments = r"(\.%s|%s|%s|%s)" % (seg_symbol, seg_single, seg_double, seg_index)
-segment_re = re.compile(segments, flags=re.UNICODE)
-param_re = re.compile(r"\((%s)%s*\)$" % (seg_symbol, segments), flags=re.UNICODE)
+segment_re = re.compile(u(segments), flags=re.UNICODE)
+param_str = r"\((%s)%s*\)$" % (seg_symbol, segments)
+param_re = re.compile(u(param_str), flags=re.UNICODE)
+
+JSON = Union[Dict[Any, Any], List[Any], Text, int, float, bool, None]
 
-JSON = Union[Dict[Any,Any], List[Any], Text, int, long, float, bool, None]
 
 class SubstitutionError(Exception):
     pass
 
+
 def scanner(scan):  # type: (Text) -> List[int]
     DEFAULT = 0
     DOLLAR = 1
@@ -58,13 +69,13 @@ def scanner(scan):  # type: (Text) -> List[int]
         elif state == BACKSLASH:
             stack.pop()
             if stack[-1] == DEFAULT:
-                return [i-1, i+1]
+                return [i - 1, i + 1]
         elif state == DOLLAR:
             if c == '(':
-                start = i-1
+                start = i - 1
                 stack.append(PAREN)
             elif c == '{':
-                start = i-1
+                start = i - 1
                 stack.append(BRACE)
             else:
                 stack.pop()
@@ -74,7 +85,7 @@ def scanner(scan):  # type: (Text) -> List[int]
             elif c == ')':
                 stack.pop()
                 if stack[-1] == DOLLAR:
-                    return [start, i+1]
+                    return [start, i + 1]
             elif c == "'":
                 stack.append(SINGLE_QUOTE)
             elif c == '"':
@@ -85,7 +96,7 @@ def scanner(scan):  # type: (Text) -> List[int]
             elif c == '}':
                 stack.pop()
                 if stack[-1] == DOLLAR:
-                    return [start, i+1]
+                    return [start, i + 1]
             elif c == "'":
                 stack.append(SINGLE_QUOTE)
             elif c == '"':
@@ -103,14 +114,16 @@ def scanner(scan):  # type: (Text) -> List[int]
         i += 1
 
     if len(stack) > 1:
-        raise SubstitutionError("Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
+        raise SubstitutionError(
+            "Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
     else:
         return None
 
+
 def next_seg(remain, obj):  # type: (Text, Any) -> Any
     if remain:
         m = segment_re.match(remain)
-        key = None  # type: Union[str, int]
+        key = None  # type: Union[Text, int]
         if m.group(0)[0] == '.':
             key = m.group(0)[1:]
         elif m.group(0)[1] in ("'", '"'):
@@ -127,7 +140,7 @@ def next_seg(remain, obj):  # type: (Text, Any) -> Any
             try:
                 key = int(m.group(0)[1:-1])
             except ValueError as v:
-                raise WorkflowException(unicode(v))
+                raise WorkflowException(u(str(v)))
             if not isinstance(obj, list):
                 raise WorkflowException(" is a %s, cannot index on int '%s'" % (type(obj).__name__, key))
             if key >= len(obj):
@@ -139,10 +152,13 @@ def next_seg(remain, obj):  # type: (Text, Any) -> Any
     else:
         return obj
 
+
 def evaluator(ex, jslib, obj, fullJS=False, timeout=None, debug=False):
     # type: (Text, Text, Dict[Text, Any], bool, int, bool) -> JSON
     m = param_re.match(ex)
     if m:
+        if m.end(1)+1 == len(ex) and m.group(1) == "null":
+            return None
         try:
             return next_seg(m.group(0)[m.end(1) - m.start(0):-1], obj[m.group(1)])
         except Exception as w:
@@ -150,7 +166,10 @@ def evaluator(ex, jslib, obj, fullJS=False, timeout=None, debug=False):
     elif fullJS:
         return sandboxjs.execjs(ex, jslib, timeout=timeout, debug=debug)
     else:
-        raise sandboxjs.JavascriptException("Syntax error in parameter reference '%s' or used Javascript code without specifying InlineJavascriptRequirement.", ex)
+        raise sandboxjs.JavascriptException(
+            "Syntax error in parameter reference '%s' or used Javascript code without specifying InlineJavascriptRequirement.",
+            ex)
+
 
 def interpolate(scan, rootvars,
                 timeout=None, fullJS=None, jslib="", debug=False):
@@ -162,7 +181,7 @@ def interpolate(scan, rootvars,
         parts.append(scan[0:w[0]])
 
         if scan[w[0]] == '$':
-            e = evaluator(scan[w[0]+1:w[1]], jslib, rootvars, fullJS=fullJS,
+            e = evaluator(scan[w[0] + 1:w[1]], jslib, rootvars, fullJS=fullJS,
                           timeout=timeout, debug=debug)
             if w[0] == 0 and w[1] == len(scan):
                 return e
@@ -171,7 +190,7 @@ def interpolate(scan, rootvars,
                 leaf = leaf[1:-1]
             parts.append(leaf)
         elif scan[w[0]] == '\\':
-            e = scan[w[1]-1]
+            e = scan[w[1] - 1]
             parts.append(e)
 
         scan = scan[w[1]:]
@@ -179,18 +198,19 @@ def interpolate(scan, rootvars,
     parts.append(scan)
     return ''.join(parts)
 
+
 def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources,
             context=None, pull_image=True, timeout=None, debug=False):
     # type: (Union[dict, AnyStr], Dict[Text, Union[Dict, List, Text]], List[Dict[Text, Any]], Text, Text, Dict[Text, Union[int, Text]], Any, bool, int, bool) -> Any
 
     runtime = copy.copy(resources)
-    runtime["tmpdir"] = tmpdir
-    runtime["outdir"] = outdir
+    runtime["tmpdir"] = docker_windows_path_adjust(tmpdir)
+    runtime["outdir"] = docker_windows_path_adjust(outdir)
 
     rootvars = {
         u"inputs": jobinput,
         u"self": context,
-        u"runtime": runtime }
+        u"runtime": runtime}
 
     if isinstance(ex, (str, Text)):
         fullJS = False
diff --git a/cwltool/extensions.yml b/cwltool/extensions.yml
new file mode 100644
index 0000000..2cfd1ea
--- /dev/null
+++ b/cwltool/extensions.yml
@@ -0,0 +1,36 @@
+$base: http://commonwl.org/cwltool#
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+$graph:
+- $import: https://w3id.org/cwl/CommonWorkflowLanguage.yml
+
+- name: LoadListingRequirement
+  type: record
+  extends: cwl:ProcessRequirement
+  inVocab: false
+  fields:
+    class:
+      type: string
+      doc: "Always 'LoadListingRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    loadListing:
+      type:
+        - type: enum
+          name: LoadListingEnum
+          symbols: [no_listing, shallow_listing, deep_listing]
+
+- name: InplaceUpdateRequirement
+  type: record
+  inVocab: false
+  extends: cwl:ProcessRequirement
+  fields:
+    class:
+      type: string
+      doc: "Always 'InplaceUpdateRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    inplaceUpdate:
+      type: boolean
diff --git a/cwltool/factory.py b/cwltool/factory.py
index be2468b..4ca2f33 100644
--- a/cwltool/factory.py
+++ b/cwltool/factory.py
@@ -1,11 +1,11 @@
-from . import main
-from . import load_tool
-from . import workflow
+from __future__ import absolute_import
 import os
-from .process import Process
-from typing import Any, Text, Union, Tuple
 from typing import Callable as tCallable
-import argparse
+from typing import Any, Dict, Text, Tuple, Union
+
+from . import load_tool, main, workflow
+from .process import Process
+
 
 class WorkflowStatus(Exception):
     def __init__(self, out, status):
@@ -14,6 +14,7 @@ class WorkflowStatus(Exception):
         self.out = out
         self.status = status
 
+
 class Callable(object):
     def __init__(self, t, factory):  # type: (Process, Factory) -> None
         self.t = t
@@ -29,11 +30,15 @@ class Callable(object):
         else:
             return out
 
+
 class Factory(object):
-    def __init__(self, makeTool=workflow.defaultMakeTool,
-                 executor=main.single_job_executor,
-                 **execkwargs):
-        # type: (tCallable[[Dict[Text, Any], Any], Process],tCallable[...,Tuple[Dict[Text,Any], Text]], **Any) -> None
+    def __init__(self,
+                 makeTool=workflow.defaultMakeTool,  # type: tCallable[[Any], Process]
+                 # should be tCallable[[Dict[Text, Any], Any], Process] ?
+                 executor=main.single_job_executor,  # type: tCallable[...,Tuple[Dict[Text,Any], Text]]
+                 **execkwargs  # type: Any
+                 ):
+        # type: (...) -> None
         self.makeTool = makeTool
         self.executor = executor
         self.execkwargs = execkwargs
diff --git a/cwltool/flatten.py b/cwltool/flatten.py
index 477444a..e3c0345 100644
--- a/cwltool/flatten.py
+++ b/cwltool/flatten.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
 from typing import Any, Callable, List, cast
 
 # http://rightfootin.blogspot.com/2006/09/more-on-python-flatten.html
diff --git a/cwltool/job.py b/cwltool/job.py
index 8b331a8..7227e99 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -1,26 +1,30 @@
-import subprocess
+from __future__ import absolute_import
+import functools
 import io
-import os
-import tempfile
-import glob
 import json
 import logging
-import sys
-import requests
-from . import docker
-from .process import get_feature, empty_subtree, stageFiles
-from .errors import WorkflowException
+import os
+import re
 import shutil
 import stat
-import re
+import subprocess
+import sys
+import tempfile
+from io import open
+from typing import (IO, Any, Callable, Dict, Iterable, List, MutableMapping, Text,
+                    Tuple, Union, cast)
+
 import shellescape
-import string
-from .docker_uid import docker_vm_uid
+
+from .utils import copytree_with_merge, docker_windows_path_adjust, onWindows
+from . import docker
 from .builder import Builder
-from typing import (Any, Callable, Union, Iterable, Mapping, MutableMapping,
-        IO, cast, Text, Tuple)
+from .docker_id import docker_vm_id
+from .errors import WorkflowException
 from .pathmapper import PathMapper
-import functools
+from .process import (UnsupportedRequirement, empty_subtree, get_feature,
+                      stageFiles)
+from .utils import bytes2str_in_dicts
 
 _logger = logging.getLogger("cwltool")
 
@@ -34,6 +38,7 @@ python "run_job.py" "job.json"
 
 PYTHON_RUN_SCRIPT = """
 import json
+import os
 import sys
 import subprocess
 
@@ -42,6 +47,7 @@ with open(sys.argv[1], "r") as f:
     commands = popen_description["commands"]
     cwd = popen_description["cwd"]
     env = popen_description["env"]
+    env["PATH"] = os.environ.get("PATH")
     stdin_path = popen_description["stdin_path"]
     stdout_path = popen_description["stdout_path"]
     stderr_path = popen_description["stderr_path"]
@@ -68,7 +74,7 @@ with open(sys.argv[1], "r") as f:
     if sp.stdin:
         sp.stdin.close()
     rcode = sp.wait()
-    if isinstance(stdin, file):
+    if stdin is not subprocess.PIPE:
         stdin.close()
     if stdout is not sys.stderr:
         stdout.close()
@@ -83,6 +89,7 @@ def deref_links(outputs):  # type: (Any) -> None
         if outputs.get("class") == "File":
             st = os.lstat(outputs["path"])
             if stat.S_ISLNK(st.st_mode):
+                outputs["basename"] = os.path.basename(outputs["path"])
                 outputs["path"] = os.readlink(outputs["path"])
         else:
             for v in outputs.values():
@@ -91,8 +98,26 @@ def deref_links(outputs):  # type: (Any) -> None
         for v in outputs:
             deref_links(v)
 
-class CommandLineJob(object):
+def relink_initialworkdir(pathmapper, inplace_update=False):
+    # type: (PathMapper, bool) -> None
+    for src, vol in pathmapper.items():
+        if not vol.staged:
+            continue
+        if vol.type in ("File", "Directory") or (inplace_update and
+                                                 vol.type in ("WritableFile", "WritableDirectory")):
+            if os.path.islink(vol.target) or os.path.isfile(vol.target):
+                os.remove(vol.target)
+            elif os.path.isdir(vol.target):
+                shutil.rmtree(vol.target)
+            if onWindows():
+                if vol.type in ("File", "WritableFile"):
+                    shutil.copy(vol.resolved,vol.target)
+                elif vol.type in ("Directory", "WritableDirectory"):
+                    copytree_with_merge(vol.resolved, vol.target)
+            else:
+                os.symlink(vol.resolved, vol.target)
 
+class JobBase(object):
     def __init__(self):  # type: () -> None
         self.builder = None  # type: Builder
         self.joborder = None  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
@@ -107,6 +132,7 @@ class CommandLineJob(object):
         self.name = None  # type: Text
         self.command_line = None  # type: List[Text]
         self.pathmapper = None  # type: PathMapper
+        self.generatemapper = None  # type: PathMapper
         self.collect_outputs = None  # type: Union[Callable[[Any], Any], functools.partial[Any]]
         self.output_callback = None  # type: Callable[[Any, Any], Any]
         self.outdir = None  # type: Text
@@ -114,100 +140,31 @@ class CommandLineJob(object):
         self.environment = None  # type: MutableMapping[Text, Text]
         self.generatefiles = None  # type: Dict[Text, Union[List[Dict[Text, Text]], Dict[Text, Text], Text]]
         self.stagedir = None  # type: Text
+        self.inplace_update = None  # type: bool
 
-    def run(self, dry_run=False, pull_image=True, rm_container=True,
-            rm_tmpdir=True, move_outputs="move", **kwargs):
-        # type: (bool, bool, bool, bool, Text, **Any) -> Union[Tuple[Text, Dict[None, None]], None]
+    def _setup(self):  # type: () -> None
         if not os.path.exists(self.outdir):
             os.makedirs(self.outdir)
 
-        #with open(os.path.join(outdir, "cwl.input.json"), "w") as fp:
-        #    json.dump(self.joborder, fp)
-
-        runtime = []  # type: List[Text]
-
-        (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
-
         for knownfile in self.pathmapper.files():
             p = self.pathmapper.mapper(knownfile)
-            if p.type == "File" and not os.path.isfile(p[0]):
+            if p.type == "File" and not os.path.isfile(p[0]) and p.staged:
                 raise WorkflowException(
                     u"Input file %s (at %s) not found or is not a regular "
                     "file." % (knownfile, self.pathmapper.mapper(knownfile)[0]))
 
-        img_id = None
-        env = None  # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
-        if docker_req and kwargs.get("use_container") is not False:
-            env = os.environ
-            img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
-        elif kwargs.get("default_container", None) is not None:
-            env = os.environ
-            img_id = kwargs.get("default_container")
-
-        if docker_is_req and img_id is None:
-            raise WorkflowException("Docker is required for running this tool.")
-
-        if img_id:
-            runtime = ["docker", "run", "-i"]
-            for src in self.pathmapper.files():
-                vol = self.pathmapper.mapper(src)
-                if vol.type == "File":
-                    runtime.append(u"--volume=%s:%s:ro" % (vol.resolved, vol.target))
-                if vol.type == "CreateFile":
-                    createtmp = os.path.join(self.stagedir, os.path.basename(vol.target))
-                    with open(createtmp, "w") as f:
-                        f.write(vol.resolved.encode("utf-8"))
-                    runtime.append(u"--volume=%s:%s:ro" % (createtmp, vol.target))
-            runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.outdir), self.builder.outdir))
-            runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.tmpdir), "/tmp"))
-            runtime.append(u"--workdir=%s" % (self.builder.outdir))
-            runtime.append("--read-only=true")
-
-            if kwargs.get("custom_net", None) is not None:
-                runtime.append("--net={0}".format(kwargs.get("custom_net")))
-            elif kwargs.get("disable_net", None):
-                runtime.append("--net=none")
+        if self.generatefiles["listing"]:
+            self.generatemapper = PathMapper(cast(List[Any], self.generatefiles["listing"]),
+                                             self.outdir, self.outdir, separateDirs=False)
+            _logger.debug(u"[job %s] initial work dir %s", self.name,
+                          json.dumps({p: self.generatemapper.mapper(p) for p in self.generatemapper.files()}, indent=4))
 
-            if self.stdout:
-                runtime.append("--log-driver=none")
-
-            euid = docker_vm_uid() or os.geteuid()
-
-            if kwargs.get("no_match_user",None) is False:
-                runtime.append(u"--user=%s" % (euid))
-
-            if rm_container:
-                runtime.append("--rm")
-
-            runtime.append("--env=TMPDIR=/tmp")
-
-            # spec currently says "HOME must be set to the designated output
-            # directory." but spec might change to designated temp directory.
-            # runtime.append("--env=HOME=/tmp")
-            runtime.append("--env=HOME=%s" % self.builder.outdir)
-
-            for t,v in self.environment.items():
-                runtime.append(u"--env=%s=%s" % (t, v))
-
-            runtime.append(img_id)
-        else:
-            env = self.environment
-            if not os.path.exists(self.tmpdir):
-                os.makedirs(self.tmpdir)
-            vars_to_preserve = kwargs.get("preserve_environment")
-            if kwargs.get("preserve_entire_environment"):
-                vars_to_preserve = os.environ
-            if vars_to_preserve is not None:
-                for key, value in os.environ.items():
-                    if key in vars_to_preserve and key not in env:
-                        env[key] = value
-            env["HOME"] = self.outdir
-            env["TMPDIR"] = self.tmpdir
-
-            stageFiles(self.pathmapper, os.symlink)
+    def _execute(self, runtime, env, rm_tmpdir=True, move_outputs="move"):
+        # type: (List[Text], MutableMapping[Text, Text], bool, Text) -> None
 
         scr, _ = get_feature(self, "ShellCommandRequirement")
 
+        shouldquote = None  # type: Callable[[Any], Any]
         if scr:
             shouldquote = lambda x: False
         else:
@@ -216,32 +173,15 @@ class CommandLineJob(object):
         _logger.info(u"[job %s] %s$ %s%s%s%s",
                      self.name,
                      self.outdir,
-                     " \\\n    ".join([shellescape.quote(Text(arg)) if shouldquote(Text(arg)) else Text(arg) for arg in (runtime + self.command_line)]),
+                     " \\\n    ".join([shellescape.quote(Text(arg)) if shouldquote(Text(arg)) else Text(arg) for arg in
+                                       (runtime + self.command_line)]),
                      u' < %s' % self.stdin if self.stdin else '',
                      u' > %s' % os.path.join(self.outdir, self.stdout) if self.stdout else '',
                      u' 2> %s' % os.path.join(self.outdir, self.stderr) if self.stderr else '')
 
-        if dry_run:
-            return (self.outdir, {})
-
         outputs = {}  # type: Dict[Text,Text]
 
         try:
-            if self.generatefiles["listing"]:
-                generatemapper = PathMapper([self.generatefiles], self.outdir,
-                                            self.outdir, separateDirs=False)
-                _logger.debug(u"[job %s] initial work dir %s", self.name,
-                              json.dumps({p: generatemapper.mapper(p) for p in generatemapper.files()}, indent=4))
-
-                def linkoutdir(src, tgt):
-                    # Need to make the link to the staged file (may be inside
-                    # the container)
-                    for _, item in self.pathmapper.items():
-                        if src == item.resolved:
-                            os.symlink(item.target, tgt)
-                            break
-                stageFiles(generatemapper, linkoutdir)
-
             stdin_path = None
             if self.stdin:
                 stdin_path = self.pathmapper.reversemap(self.stdin)[1]
@@ -262,15 +202,19 @@ class CommandLineJob(object):
                     os.makedirs(dn)
                 stdout_path = absout
 
-            build_job_script = self.builder.build_job_script  # type: Callable[[List[str]], Text]
+            commands = [Text(x).encode('utf-8') for x in runtime + self.command_line]
+            job_script_contents = None  # type: Text
+            builder = getattr(self, "builder", None)  # type: Builder
+            if builder is not None:
+                job_script_contents = builder.build_job_script(commands)
             rcode = _job_popen(
-                [Text(x).encode('utf-8') for x in runtime + self.command_line],
+                commands,
                 stdin_path=stdin_path,
                 stdout_path=stdout_path,
                 stderr_path=stderr_path,
                 env=env,
                 cwd=self.outdir,
-                build_job_script=build_job_script,
+                job_script_contents=job_script_contents,
             )
 
             if self.successCodes and rcode in self.successCodes:
@@ -285,15 +229,10 @@ class CommandLineJob(object):
                 processStatus = "permanentFail"
 
             if self.generatefiles["listing"]:
-                def linkoutdir(src, tgt):
-                    # Need to make the link to the staged file (may be inside
-                    # the container)
-                    if os.path.islink(tgt):
-                        os.remove(tgt)
-                        os.symlink(src, tgt)
-                stageFiles(generatemapper, linkoutdir, ignoreWritable=True)
+                relink_initialworkdir(self.generatemapper, inplace_update=self.inplace_update)
 
             outputs = self.collect_outputs(self.outdir)
+            outputs = bytes2str_in_dicts(outputs)  # type: ignore
 
         except OSError as e:
             if e.errno == 2:
@@ -312,7 +251,7 @@ class CommandLineJob(object):
             processStatus = "permanentFail"
 
         if processStatus != "success":
-            _logger.warn(u"[job %s] completed %s", self.name, processStatus)
+            _logger.warning(u"[job %s] completed %s", self.name, processStatus)
         else:
             _logger.info(u"[job %s] completed %s", self.name, processStatus)
 
@@ -329,27 +268,165 @@ class CommandLineJob(object):
             _logger.debug(u"[job %s] Removing temporary directory %s", self.name, self.tmpdir)
             shutil.rmtree(self.tmpdir, True)
 
-        if move_outputs == "move" and empty_subtree(self.outdir):
-            _logger.debug(u"[job %s] Removing empty output directory %s", self.name, self.outdir)
-            shutil.rmtree(self.outdir, True)
+
+class CommandLineJob(JobBase):
+
+    def run(self, pull_image=True, rm_container=True,
+            rm_tmpdir=True, move_outputs="move", **kwargs):
+        # type: (bool, bool, bool, Text, **Any) -> None
+
+        self._setup()
+
+        env = self.environment
+        if not os.path.exists(self.tmpdir):
+            os.makedirs(self.tmpdir)
+        vars_to_preserve = kwargs.get("preserve_environment")
+        if kwargs.get("preserve_entire_environment"):
+            vars_to_preserve = os.environ
+        if vars_to_preserve is not None:
+            for key, value in os.environ.items():
+                if key in vars_to_preserve and key not in env:
+                    # On Windows, subprocess env can't handle unicode.
+                    env[key] = str(value) if onWindows() else value
+        env["HOME"] = str(self.outdir) if onWindows() else self.outdir
+        env["TMPDIR"] = str(self.tmpdir) if onWindows() else self.tmpdir
+        if "PATH" not in env:
+            env["PATH"] = str(os.environ["PATH"]) if onWindows() else os.environ["PATH"]
+
+        stageFiles(self.pathmapper, ignoreWritable=True, symLink=True)
+        if self.generatemapper:
+            stageFiles(self.generatemapper, ignoreWritable=self.inplace_update, symLink=True)
+            relink_initialworkdir(self.generatemapper, inplace_update=self.inplace_update)
+
+        self._execute([], env, rm_tmpdir=rm_tmpdir, move_outputs=move_outputs)
+
+
+class DockerCommandLineJob(JobBase):
+
+    def add_volumes(self, pathmapper, runtime, stage_output):
+        # type: (PathMapper, List[Text], bool) -> None
+
+        host_outdir = self.outdir
+        container_outdir = self.builder.outdir
+        for src, vol in pathmapper.items():
+            if not vol.staged:
+                continue
+            if stage_output:
+                containertgt = container_outdir + vol.target[len(host_outdir):]
+            else:
+                containertgt = vol.target
+            if vol.type in ("File", "Directory"):
+                if not vol.resolved.startswith("_:"):
+                    runtime.append(u"--volume=%s:%s:ro" % (docker_windows_path_adjust(vol.resolved), docker_windows_path_adjust(containertgt)))
+            elif vol.type == "WritableFile":
+                if self.inplace_update:
+                    runtime.append(u"--volume=%s:%s:rw" % (docker_windows_path_adjust(vol.resolved), docker_windows_path_adjust(containertgt)))
+                else:
+                    shutil.copy(vol.resolved, vol.target)
+            elif vol.type == "WritableDirectory":
+                if vol.resolved.startswith("_:"):
+                    os.makedirs(vol.target, 0o0755)
+                else:
+                    if self.inplace_update:
+                        runtime.append(u"--volume=%s:%s:rw" % (docker_windows_path_adjust(vol.resolved), docker_windows_path_adjust(containertgt)))
+                    else:
+                        shutil.copytree(vol.resolved, vol.target)
+            elif vol.type == "CreateFile":
+                createtmp = os.path.join(host_outdir, os.path.basename(vol.target))
+                with open(createtmp, "wb") as f:
+                    f.write(vol.resolved.encode("utf-8"))
+                runtime.append(u"--volume=%s:%s:ro" % (docker_windows_path_adjust(createtmp), docker_windows_path_adjust(vol.target)))
+
+
+    def run(self, pull_image=True, rm_container=True,
+            rm_tmpdir=True, move_outputs="move", **kwargs):
+        # type: (bool, bool, bool, Text, **Any) -> None
+
+        (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
+
+        img_id = None
+        env = None  # type: MutableMapping[Text, Text]
+        try:
+            env = cast(MutableMapping[Text, Text], os.environ)
+            if docker_req and kwargs.get("use_container"):
+                img_id = docker.get_from_requirements(docker_req, True, pull_image)
+            if img_id is None:
+                if self.builder.find_default_container:
+                    default_container = self.builder.find_default_container()
+                    if default_container:
+                        img_id = default_container
+                        env = cast(MutableMapping[Text, Text], os.environ)
+
+            if docker_req and img_id is None and kwargs.get("use_container"):
+                raise Exception("Docker image not available")
+        except Exception as e:
+            _logger.debug("Docker error", exc_info=True)
+            if docker_is_req:
+                raise UnsupportedRequirement(
+                    "Docker is required to run this tool: %s" % e)
+            else:
+                raise WorkflowException(
+                    "Docker is not available for this tool, try --no-container"
+                    " to disable Docker: %s" % e)
+
+        self._setup()
+
+        runtime = [u"docker", u"run", u"-i"]
+
+        runtime.append(u"--volume=%s:%s:rw" % (docker_windows_path_adjust(os.path.realpath(self.outdir)), self.builder.outdir))
+        runtime.append(u"--volume=%s:%s:rw" % (docker_windows_path_adjust(os.path.realpath(self.tmpdir)), "/tmp"))
+
+        self.add_volumes(self.pathmapper, runtime, False)
+        if self.generatemapper:
+            self.add_volumes(self.generatemapper, runtime, True)
+
+        runtime.append(u"--workdir=%s" % (docker_windows_path_adjust(self.builder.outdir)))
+        runtime.append(u"--read-only=true")
+
+        if kwargs.get("custom_net", None) is not None:
+            runtime.append(u"--net={0}".format(kwargs.get("custom_net")))
+        elif kwargs.get("disable_net", None):
+            runtime.append(u"--net=none")
+
+        if self.stdout:
+            runtime.append("--log-driver=none")
+
+        euid, egid = docker_vm_id()
+        if not onWindows():  # MS Windows does not have getuid() or geteuid() functions
+            euid, egid = euid or os.geteuid(), egid or os.getgid()
+
+        if kwargs.get("no_match_user", None) is False and (euid, egid) != (None, None):
+            runtime.append(u"--user=%d:%d" % (euid, egid))
+
+        if rm_container:
+            runtime.append(u"--rm")
+
+        runtime.append(u"--env=TMPDIR=/tmp")
+
+        # spec currently says "HOME must be set to the designated output
+        # directory." but spec might change to designated temp directory.
+        # runtime.append("--env=HOME=/tmp")
+        runtime.append(u"--env=HOME=%s" % self.builder.outdir)
+
+        for t, v in self.environment.items():
+            runtime.append(u"--env=%s=%s" % (t, v))
+
+        runtime.append(img_id)
+
+        self._execute(runtime, env, rm_tmpdir=rm_tmpdir, move_outputs=move_outputs)
 
 
 def _job_popen(
-    commands,  # type: List[str]
-    stdin_path,  # type: Text
-    stdout_path,  # type: Text
-    stderr_path,  # type: Text
-    env,  # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
-    cwd,  # type: Text
-    job_dir=None,  # type: Text
-    build_job_script=None,  # type: Callable[[List[str]], Text]
+        commands,  # type: List[bytes]
+        stdin_path,  # type: Text
+        stdout_path,  # type: Text
+        stderr_path,  # type: Text
+        env,  # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
+        cwd,  # type: Text
+        job_dir=None,  # type: Text
+        job_script_contents=None,  # type: Text
 ):
     # type: (...) -> int
-
-    job_script_contents = None  # type: Text
-    if build_job_script:
-        job_script_contents = build_job_script(commands)
-
     if not job_script_contents and not FORCE_SHELLED_POPEN:
 
         stdin = None  # type: Union[IO[Any], int]
@@ -373,7 +450,7 @@ def _job_popen(
 
         sp = subprocess.Popen(commands,
                               shell=False,
-                              close_fds=True,
+                              close_fds=not onWindows(),
                               stdin=stdin,
                               stdout=stdout,
                               stderr=stderr,
@@ -385,7 +462,7 @@ def _job_popen(
 
         rcode = sp.wait()
 
-        if isinstance(stdin, file):
+        if isinstance(stdin, io.IOBase):
             stdin.close()
 
         if stdout is not sys.stderr:
@@ -403,8 +480,8 @@ def _job_popen(
             job_script_contents = SHELL_COMMAND_TEMPLATE
 
         env_copy = {}
+        key = None  # type: Any
         for key in env:
-            key = key.encode("utf-8")
             env_copy[key] = env[key]
 
         job_description = dict(
@@ -415,21 +492,21 @@ def _job_popen(
             stderr_path=stderr_path,
             stdin_path=stdin_path,
         )
-        with open(os.path.join(job_dir, "job.json"), "w") as f:
+        with open(os.path.join(job_dir, "job.json"), "wb") as f:
             json.dump(job_description, f)
         try:
             job_script = os.path.join(job_dir, "run_job.bash")
-            with open(job_script, "w") as f:
-                f.write(job_script_contents)
+            with open(job_script, "wb") as f:
+                f.write(job_script_contents.encode('utf-8'))
             job_run = os.path.join(job_dir, "run_job.py")
-            with open(job_run, "w") as f:
+            with open(job_run, "wb") as f:
                 f.write(PYTHON_RUN_SCRIPT)
             sp = subprocess.Popen(
                 ["bash", job_script.encode("utf-8")],
                 shell=False,
                 cwd=job_dir,
-                stdout=subprocess.PIPE,
-                stderr=subprocess.PIPE,
+                stdout=sys.stderr,  # The nested script will output the paths to the correct files if they need
+                stderr=sys.stderr,  # to be captured. Else just write everything to stderr (same as above).
                 stdin=subprocess.PIPE,
             )
             if sp.stdin:
diff --git a/cwltool/load_tool.py b/cwltool/load_tool.py
index 3070f58..491fc3a 100644
--- a/cwltool/load_tool.py
+++ b/cwltool/load_tool.py
@@ -1,45 +1,55 @@
+from __future__ import absolute_import
 # pylint: disable=unused-import
 """Loads a CWL document."""
 
-import os
-import uuid
 import logging
+import os
 import re
-import urlparse
+import uuid
+from typing import Any, Callable, Dict, List, Text, Tuple, Union, cast
 
-from typing import Any, AnyStr, Callable, cast, Dict, Text, Tuple, Union
-from ruamel.yaml.comments import CommentedSeq, CommentedMap
-from avro.schema import Names
 import requests.sessions
+from six import itervalues, string_types
 
-from schema_salad.ref_resolver import Loader, Fetcher, file_uri
-import schema_salad.validate as validate
-from schema_salad.validate import ValidationException
 import schema_salad.schema as schema
+from avro.schema import Names
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
+from schema_salad.ref_resolver import Fetcher, Loader, file_uri
 from schema_salad.sourceline import cmap
+from schema_salad.validate import ValidationException
+from six.moves import urllib
 
-from . import update
-from . import process
-from .process import Process, shortname
+from . import process, update
 from .errors import WorkflowException
+from .process import Process, shortname
+from .update import ALLUPDATES
 
 _logger = logging.getLogger("cwltool")
 
-def fetch_document(argsworkflow,   # type: Union[Text, dict[Text, Any]]
-                   resolver=None,  # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
-                   fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+jobloaderctx = {
+    u"cwl": "https://w3id.org/cwl/cwl#",
+    u"path": {u"@type": u"@id"},
+    u"location": {u"@type": u"@id"},
+    u"format": {u"@type": u"@id"},
+    u"id": u"@id"
+}
+
+def fetch_document(argsworkflow,  # type: Union[Text, Dict[Text, Any]]
+                   resolver=None,  # type: Callable[[Loader, Union[Text, Dict[Text, Any]]], Text]
+                   fetcher_constructor=None
+                   # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]
                    ):
     # type: (...) -> Tuple[Loader, CommentedMap, Text]
     """Retrieve a CWL document."""
 
-    document_loader = Loader({"cwl": "https://w3id.org/cwl/cwl#", "id": "@id"},
-                             fetcher_constructor=fetcher_constructor)
+    document_loader = Loader(jobloaderctx, fetcher_constructor=fetcher_constructor)  # type: ignore
 
     uri = None  # type: Text
     workflowobj = None  # type: CommentedMap
-    if isinstance(argsworkflow, basestring):
-        split = urlparse.urlsplit(argsworkflow)
-        if split.scheme:
+    if isinstance(argsworkflow, string_types):
+        split = urllib.parse.urlsplit(argsworkflow)
+        # In case of Windows path, urlsplit misjudge Drive letters as scheme, here we are skipping that
+        if split.scheme and split.scheme in [u'http',u'https',u'file']:
             uri = argsworkflow
         elif os.path.exists(os.path.abspath(argsworkflow)):
             uri = file_uri(str(os.path.abspath(argsworkflow)))
@@ -52,7 +62,7 @@ def fetch_document(argsworkflow,   # type: Union[Text, dict[Text, Any]]
         if argsworkflow != uri:
             _logger.info("Resolved '%s' to '%s'", argsworkflow, uri)
 
-        fileuri = urlparse.urldefrag(uri)[0]
+        fileuri = urllib.parse.urldefrag(uri)[0]
         workflowobj = document_loader.fetch(fileuri)
     elif isinstance(argsworkflow, dict):
         uri = "#" + Text(id(argsworkflow))
@@ -62,57 +72,72 @@ def fetch_document(argsworkflow,   # type: Union[Text, dict[Text, Any]]
 
     return document_loader, workflowobj, uri
 
+
 def _convert_stdstreams_to_files(workflowobj):
     # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> None
 
     if isinstance(workflowobj, dict):
-        if ('class' in workflowobj
-                and workflowobj['class'] == 'CommandLineTool'):
-            if 'outputs' in workflowobj:
-                for out in workflowobj['outputs']:
-                    for streamtype in ['stdout', 'stderr']:
-                        if out['type'] == streamtype:
-                            if 'outputBinding' in out:
-                                raise ValidationException(
-                                    "Not allowed to specify outputBinding when"
-                                    " using %s shortcut." % streamtype)
-                            if streamtype in workflowobj:
-                                filename = workflowobj[streamtype]
-                            else:
-                                filename = Text(uuid.uuid4())
-                                workflowobj[streamtype] = filename
-                            out['type'] = 'File'
-                            out['outputBinding'] = {'glob': filename}
-            if 'inputs' in workflowobj:
-                for inp in workflowobj['inputs']:
-                    if inp['type'] == 'stdin':
-                        if 'inputBinding' in inp:
-                            raise ValidationException(
-                                "Not allowed to specify inputBinding when"
-                                " using stdin shortcut.")
-                        if 'stdin' in workflowobj:
+        if workflowobj.get('class') == 'CommandLineTool':
+            for out in workflowobj.get('outputs', []):
+                for streamtype in ['stdout', 'stderr']:
+                    if out.get('type') == streamtype:
+                        if 'outputBinding' in out:
                             raise ValidationException(
-                                "Not allowed to specify stdin path when"
-                                " using stdin type shortcut.")
+                                "Not allowed to specify outputBinding when"
+                                " using %s shortcut." % streamtype)
+                        if streamtype in workflowobj:
+                            filename = workflowobj[streamtype]
                         else:
-                            workflowobj['stdin'] = \
-                                "$(inputs.%s.path)" % \
-                                inp['id'].rpartition('#')[2]
-                            inp['type'] = 'File'
+                            filename = Text(uuid.uuid4())
+                            workflowobj[streamtype] = filename
+                        out['type'] = 'File'
+                        out['outputBinding'] = {'glob': filename}
+            for inp in workflowobj.get('inputs', []):
+                if inp.get('type') == 'stdin':
+                    if 'inputBinding' in inp:
+                        raise ValidationException(
+                            "Not allowed to specify inputBinding when"
+                            " using stdin shortcut.")
+                    if 'stdin' in workflowobj:
+                        raise ValidationException(
+                            "Not allowed to specify stdin path when"
+                            " using stdin type shortcut.")
+                    else:
+                        workflowobj['stdin'] = \
+                            "$(inputs.%s.path)" % \
+                            inp['id'].rpartition('#')[2]
+                        inp['type'] = 'File'
         else:
-            for entry in workflowobj.itervalues():
+            for entry in itervalues(workflowobj):
                 _convert_stdstreams_to_files(entry)
     if isinstance(workflowobj, list):
         for entry in workflowobj:
             _convert_stdstreams_to_files(entry)
 
-def validate_document(document_loader,   # type: Loader
-                      workflowobj,       # type: CommentedMap
-                      uri,               # type: Text
+def _add_blank_ids(workflowobj):
+    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> None
+
+    if isinstance(workflowobj, dict):
+        if ("run" in workflowobj and
+            isinstance(workflowobj["run"], dict) and
+            "id" not in workflowobj["run"] and
+            "$import" not in workflowobj["run"]):
+            workflowobj["run"]["id"] = Text(uuid.uuid4())
+        for entry in itervalues(workflowobj):
+            _add_blank_ids(entry)
+    if isinstance(workflowobj, list):
+        for entry in workflowobj:
+            _add_blank_ids(entry)
+
+def validate_document(document_loader,  # type: Loader
+                      workflowobj,  # type: CommentedMap
+                      uri,  # type: Text
                       enable_dev=False,  # type: bool
-                      strict=True,       # type: bool
-                      preprocess_only=False,    # type: bool
-                      fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+                      strict=True,  # type: bool
+                      preprocess_only=False,  # type: bool
+                      fetcher_constructor=None,
+                      skip_schemas=None
+                      # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]
                       ):
     # type: (...) -> Tuple[Loader, Names, Union[Dict[Text, Any], List[Dict[Text, Any]]], Dict[Text, Any], Text]
     """Validate a CWL document."""
@@ -123,26 +148,32 @@ def validate_document(document_loader,   # type: Loader
         }
 
     if not isinstance(workflowobj, dict):
-        raise ValueError("workflowjobj must be a dict")
+        raise ValueError("workflowjobj must be a dict, got '%s': %s" % (type(workflowobj), workflowobj))
 
     jobobj = None
     if "cwl:tool" in workflowobj:
         jobobj, _ = document_loader.resolve_all(workflowobj, uri)
-        uri = urlparse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
+        uri = urllib.parse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
         del cast(dict, jobobj)["https://w3id.org/cwl/cwl#tool"]
         workflowobj = fetch_document(uri, fetcher_constructor=fetcher_constructor)[1]
 
-    fileuri = urlparse.urldefrag(uri)[0]
+    fileuri = urllib.parse.urldefrag(uri)[0]
 
     if "cwlVersion" in workflowobj:
         if not isinstance(workflowobj["cwlVersion"], (str, Text)):
             raise Exception("'cwlVersion' must be a string, got %s" % type(workflowobj["cwlVersion"]))
+        # strip out version
         workflowobj["cwlVersion"] = re.sub(
             r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "",
             workflowobj["cwlVersion"])
+        if workflowobj["cwlVersion"] not in list(ALLUPDATES):
+            # print out all the Supported Versions of cwlVersion
+            versions = list(ALLUPDATES) # ALLUPDATES is a dict
+            versions.sort()
+            raise ValidationException("'cwlVersion' not valid. Supported CWL versions are: \n{}".format("\n".join(versions)))
     else:
-        _logger.warn("No cwlVersion found, treating this file as draft-2.")
-        workflowobj["cwlVersion"] = "draft-2"
+        raise ValidationException("No cwlVersion found."
+            "Use the following syntax in your CWL workflow to declare version: cwlVersion: <version>")
 
     if workflowobj["cwlVersion"] == "draft-2":
         workflowobj = cast(CommentedMap, cmap(update._draft2toDraft3dev1(
@@ -157,10 +188,12 @@ def validate_document(document_loader,   # type: Loader
     if isinstance(avsc_names, Exception):
         raise avsc_names
 
-    processobj = None  # type: Union[CommentedMap, CommentedSeq, unicode]
+    processobj = None  # type: Union[CommentedMap, CommentedSeq, Text]
     document_loader = Loader(sch_document_loader.ctx, schemagraph=sch_document_loader.graph,
-                  idx=document_loader.idx, cache=sch_document_loader.cache,
-                             fetcher_constructor=fetcher_constructor)
+                             idx=document_loader.idx, cache=sch_document_loader.cache,
+                             fetcher_constructor=fetcher_constructor, skip_schemas=skip_schemas)
+
+    _add_blank_ids(workflowobj)
 
     workflowobj["id"] = fileuri
     processobj, metadata = document_loader.resolve_all(workflowobj, fileuri)
@@ -171,9 +204,9 @@ def validate_document(document_loader,   # type: Loader
         if not isinstance(processobj, dict):
             raise ValidationException("Draft-2 workflows must be a dict.")
         metadata = cast(CommentedMap, cmap({"$namespaces": processobj.get("$namespaces", {}),
-                         "$schemas": processobj.get("$schemas", []),
-                         "cwlVersion": processobj["cwlVersion"]},
-                        fn=fileuri))
+                                            "$schemas": processobj.get("$schemas", []),
+                                            "cwlVersion": processobj["cwlVersion"]},
+                                           fn=fileuri))
 
     _convert_stdstreams_to_files(workflowobj)
 
@@ -193,11 +226,11 @@ def validate_document(document_loader,   # type: Loader
 
 
 def make_tool(document_loader,  # type: Loader
-              avsc_names,       # type: Names
-              metadata,         # type: Dict[Text, Any]
-              uri,              # type: Text
-              makeTool,         # type: Callable[..., Process]
-              kwargs            # type: dict
+              avsc_names,  # type: Names
+              metadata,  # type: Dict[Text, Any]
+              uri,  # type: Text
+              makeTool,  # type: Callable[..., Process]
+              kwargs  # type: dict
               ):
     # type: (...) -> Process
     """Make a Python CWL object."""
@@ -210,7 +243,7 @@ def make_tool(document_loader,  # type: Loader
             raise WorkflowException(
                 u"Tool file contains graph of multiple objects, must specify "
                 "one of #%s" % ", #".join(
-                    urlparse.urldefrag(i["id"])[1] for i in resolveduri
+                    urllib.parse.urldefrag(i["id"])[1] for i in resolveduri
                     if "id" in i))
     elif isinstance(resolveduri, dict):
         processobj = resolveduri
@@ -236,16 +269,17 @@ def make_tool(document_loader,  # type: Loader
 
 
 def load_tool(argsworkflow,  # type: Union[Text, Dict[Text, Any]]
-              makeTool,      # type: Callable[..., Process]
-              kwargs=None,   # type: dict
+              makeTool,  # type: Callable[..., Process]
+              kwargs=None,  # type: Dict
               enable_dev=False,  # type: bool
-              strict=True,       # type: bool
-              resolver=None,     # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
-              fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+              strict=True,  # type: bool
+              resolver=None,  # type: Callable[[Loader, Union[Text, Dict[Text, Any]]], Text]
+              fetcher_constructor=None  # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]
               ):
     # type: (...) -> Process
 
-    document_loader, workflowobj, uri = fetch_document(argsworkflow, resolver=resolver, fetcher_constructor=fetcher_constructor)
+    document_loader, workflowobj, uri = fetch_document(argsworkflow, resolver=resolver,
+                                                       fetcher_constructor=fetcher_constructor)
     document_loader, avsc_names, processobj, metadata, uri = validate_document(
         document_loader, workflowobj, uri, enable_dev=enable_dev,
         strict=strict, fetcher_constructor=fetcher_constructor)
diff --git a/cwltool/main.py b/cwltool/main.py
index 35c4f2e..9eaeab1 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -1,39 +1,47 @@
 #!/usr/bin/env python
+from __future__ import print_function
+from __future__ import absolute_import
 
 import argparse
+import collections
+import functools
 import json
+import logging
 import os
 import sys
-import logging
-import copy
 import tempfile
-import ruamel.yaml as yaml
-import urlparse
-import hashlib
-import pkg_resources  # part of setuptools
-import functools
+from typing import (IO, Any, AnyStr, Callable, Dict, List, Sequence, Text, Tuple,
+                    Union, cast)
 
-import rdflib
+import pkg_resources  # part of setuptools
 import requests
-from typing import (Union, Any, AnyStr, cast, Callable, Dict, Sequence, Text,
-    Tuple, Type, IO)
+import six
+import string
 
-from schema_salad.ref_resolver import Loader, Fetcher, file_uri, uri_file_path
+import ruamel.yaml as yaml
 import schema_salad.validate as validate
-import schema_salad.jsonld_context
-import schema_salad.makedoc
+from schema_salad.ref_resolver import Fetcher, Loader, file_uri, uri_file_path
 from schema_salad.sourceline import strip_dup_lineno
 
-from . import workflow
-from .errors import WorkflowException, UnsupportedRequirement
-from .cwlrdf import printrdf, printdot
-from .process import shortname, Process, getListing, relocateOutputs, cleanIntermediate, scandeps, normalizeFilesDirs
-from .load_tool import fetch_document, validate_document, make_tool
-from . import draft2tool
-from .resolver import tool_resolver
-from .builder import adjustFileObjs, adjustDirObjs
-from .stdfsaccess import StdFsAccess
+from . import draft2tool, workflow
+from .builder import Builder
+from .cwlrdf import printdot, printrdf
+from .errors import UnsupportedRequirement, WorkflowException
+from .load_tool import fetch_document, make_tool, validate_document, jobloaderctx
+from .mutation import MutationManager
 from .pack import pack
+from .pathmapper import (adjustDirObjs, adjustFileObjs, get_listing,
+                         trim_listing, visit_class)
+from .process import (Process, cleanIntermediate, normalizeFilesDirs,
+                      relocateOutputs, scandeps, shortname, use_custom_schema,
+                      use_standard_schema)
+from .resolver import ga4gh_tool_registries, tool_resolver
+from .software_requirements import DependenciesConfiguration, get_container_from_software_requirements, SOFTWARE_REQUIREMENTS_ENABLED
+from .stdfsaccess import StdFsAccess
+from .update import ALLUPDATES, UPDATES
+from .utils import onWindows, windows_default_container_id
+from ruamel.yaml.comments import Comment, CommentedSeq, CommentedMap
+
 
 _logger = logging.getLogger("cwltool")
 
@@ -65,12 +73,12 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--rm-container", action="store_true", default=True,
-                        help="Delete Docker container used by jobs after they exit (default)",
-                        dest="rm_container")
+                         help="Delete Docker container used by jobs after they exit (default)",
+                         dest="rm_container")
 
     exgroup.add_argument("--leave-container", action="store_false",
-                        default=True, help="Do not delete Docker container used by jobs after they exit",
-                        dest="rm_container")
+                         default=True, help="Do not delete Docker container used by jobs after they exit",
+                         dest="rm_container")
 
     parser.add_argument("--tmpdir-prefix", type=Text,
                         help="Path prefix for temporary directories",
@@ -78,29 +86,29 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--tmp-outdir-prefix", type=Text,
-                        help="Path prefix for intermediate output directories",
-                        default="tmp")
+                         help="Path prefix for intermediate output directories",
+                         default="tmp")
 
     exgroup.add_argument("--cachedir", type=Text, default="",
-                        help="Directory to cache intermediate workflow outputs to avoid recomputing steps.")
+                         help="Directory to cache intermediate workflow outputs to avoid recomputing steps.")
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--rm-tmpdir", action="store_true", default=True,
-                        help="Delete intermediate temporary directories (default)",
-                        dest="rm_tmpdir")
+                         help="Delete intermediate temporary directories (default)",
+                         dest="rm_tmpdir")
 
     exgroup.add_argument("--leave-tmpdir", action="store_false",
-                        default=True, help="Do not delete intermediate temporary directories",
-                        dest="rm_tmpdir")
+                         default=True, help="Do not delete intermediate temporary directories",
+                         dest="rm_tmpdir")
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--move-outputs", action="store_const", const="move", default="move",
-                        help="Move output files to the workflow output directory and delete intermediate output directories (default).",
-                        dest="move_outputs")
+                         help="Move output files to the workflow output directory and delete intermediate output directories (default).",
+                         dest="move_outputs")
 
     exgroup.add_argument("--leave-outputs", action="store_const", const="leave", default="move",
-                        help="Leave output files in intermediate output directories.",
-                        dest="move_outputs")
+                         help="Leave output files in intermediate output directories.",
+                         dest="move_outputs")
 
     exgroup.add_argument("--copy-outputs", action="store_const", const="copy", default="move",
                          help="Copy output files to the workflow output directory, don't delete intermediate output directories.",
@@ -108,10 +116,10 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--enable-pull", default=True, action="store_true",
-                        help="Try to pull Docker images", dest="enable_pull")
+                         help="Try to pull Docker images", dest="enable_pull")
 
     exgroup.add_argument("--disable-pull", default=True, action="store_false",
-                        help="Do not try to pull Docker images", dest="enable_pull")
+                         help="Do not try to pull Docker images", dest="enable_pull")
 
     parser.add_argument("--rdf-serializer",
                         help="Output RDF serialization format used by --print-rdf (one of turtle (default), n3, nt, xml)",
@@ -124,35 +132,61 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--print-rdf", action="store_true",
-                        help="Print corresponding RDF graph for workflow and exit")
-    exgroup.add_argument("--print-dot", action="store_true", help="Print workflow visualization in graphviz format and exit")
+                         help="Print corresponding RDF graph for workflow and exit")
+    exgroup.add_argument("--print-dot", action="store_true",
+                         help="Print workflow visualization in graphviz format and exit")
     exgroup.add_argument("--print-pre", action="store_true", help="Print CWL document after preprocessing.")
     exgroup.add_argument("--print-deps", action="store_true", help="Print CWL document dependencies.")
     exgroup.add_argument("--print-input-deps", action="store_true", help="Print input object document dependencies.")
     exgroup.add_argument("--pack", action="store_true", help="Combine components into single document and print.")
     exgroup.add_argument("--version", action="store_true", help="Print version and exit")
     exgroup.add_argument("--validate", action="store_true", help="Validate CWL document only.")
+    exgroup.add_argument("--print-supported-versions", action="store_true", help="Print supported CWL specs.")
 
     exgroup = parser.add_mutually_exclusive_group()
-    exgroup.add_argument("--strict", action="store_true", help="Strict validation (unrecognized or out of place fields are error)",
+    exgroup.add_argument("--strict", action="store_true",
+                         help="Strict validation (unrecognized or out of place fields are error)",
                          default=True, dest="strict")
     exgroup.add_argument("--non-strict", action="store_false", help="Lenient validation (ignore unrecognized fields)",
                          default=True, dest="strict")
 
+    parser.add_argument("--skip-schemas", action="store_true",
+            help="Skip loading of schemas", default=True, dest="skip_schemas")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--verbose", action="store_true", help="Default logging")
     exgroup.add_argument("--quiet", action="store_true", help="Only print warnings and errors.")
     exgroup.add_argument("--debug", action="store_true", help="Print even more logging")
 
+    dependency_resolvers_configuration_help = argparse.SUPPRESS
+    dependencies_directory_help = argparse.SUPPRESS
+    use_biocontainers_help = argparse.SUPPRESS
+    conda_dependencies = argparse.SUPPRESS
+
+    if SOFTWARE_REQUIREMENTS_ENABLED:
+        dependency_resolvers_configuration_help = "Dependency resolver configuration file describing how to adapt 'SoftwareRequirement' packages to current system."
+        dependencies_directory_help = "Defaut root directory used by dependency resolvers configuration."
+        use_biocontainers_help = "Use biocontainers for tools without an explicitly annotated Docker container."
+        conda_dependencies = "Short cut to use Conda to resolve 'SoftwareRequirement' packages."
+
+    parser.add_argument("--beta-dependency-resolvers-configuration", default=None, help=dependency_resolvers_configuration_help)
+    parser.add_argument("--beta-dependencies-directory", default=None, help=dependencies_directory_help)
+    parser.add_argument("--beta-use-biocontainers", default=None, help=use_biocontainers_help, action="store_true")
+    parser.add_argument("--beta-conda-dependencies", default=None, help=conda_dependencies, action="store_true")
+
     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
 
     parser.add_argument("--relative-deps", choices=['primary', 'cwd'],
-        default="primary", help="When using --print-deps, print paths "
-        "relative to primary file or current working directory.")
+                        default="primary", help="When using --print-deps, print paths "
+                                                "relative to primary file or current working directory.")
 
     parser.add_argument("--enable-dev", action="store_true",
-                        help="Allow loading and running development versions "
-                        "of CWL spec.", default=False)
+                        help="Enable loading and running development versions "
+                             "of CWL spec.", default=False)
+
+    parser.add_argument("--enable-ext", action="store_true",
+                        help="Enable loading and running cwltool extensions "
+                             "to CWL spec.", default=False)
 
     parser.add_argument("--default-container",
                         help="Specify a default docker container that will be used if the workflow fails to specify one.")
@@ -160,34 +194,50 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         help="Disable passing the current uid to 'docker run --user`")
     parser.add_argument("--disable-net", action="store_true",
                         help="Use docker's default networking for containers;"
-                        " the default is to enable networking.")
+                             " the default is to enable networking.")
     parser.add_argument("--custom-net", type=Text,
                         help="Will be passed to `docker run` as the '--net' "
-                        "parameter. Implies '--enable-net'.")
+                             "parameter. Implies '--enable-net'.")
+
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--enable-ga4gh-tool-registry", action="store_true", help="Enable resolution using GA4GH tool registry API",
+                        dest="enable_ga4gh_tool_registry", default=True)
+    exgroup.add_argument("--disable-ga4gh-tool-registry", action="store_false", help="Disable resolution using GA4GH tool registry API",
+                        dest="enable_ga4gh_tool_registry", default=True)
 
-    parser.add_argument("--on-error", type=str,
+    parser.add_argument("--add-ga4gh-tool-registry", action="append", help="Add a GA4GH tool registry endpoint to use for resolution, default %s" % ga4gh_tool_registries,
+                        dest="ga4gh_tool_registries", default=[])
+
+    parser.add_argument("--on-error",
                         help="Desired workflow behavior when a step fails.  One of 'stop' or 'continue'. "
-                        "Default is 'stop'.", default="stop", choices=("stop", "continue"))
+                             "Default is 'stop'.", default="stop", choices=("stop", "continue"))
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--compute-checksum", action="store_true", default=True,
-                        help="Compute checksum of contents while collecting outputs",
-                        dest="compute_checksum")
+                         help="Compute checksum of contents while collecting outputs",
+                         dest="compute_checksum")
     exgroup.add_argument("--no-compute-checksum", action="store_false",
-                        help="Do not compute checksum of contents while collecting outputs",
-                        dest="compute_checksum")
+                         help="Do not compute checksum of contents while collecting outputs",
+                         dest="compute_checksum")
 
     parser.add_argument("--relax-path-checks", action="store_true",
-            default=False, help="Relax requirements on path names. Currently "
-            "allows spaces.", dest="relax_path_checks")
+                        default=False, help="Relax requirements on path names to permit "
+                        "spaces and hash characters.", dest="relax_path_checks")
+    exgroup.add_argument("--make-template", action="store_true",
+                         help="Generate a template input object")
+
+
     parser.add_argument("workflow", type=Text, nargs="?", default=None)
     parser.add_argument("job_order", nargs=argparse.REMAINDER)
 
     return parser
 
 
-def single_job_executor(t, job_order_object, **kwargs):
-    # type: (Process, Dict[Text, Any], **Any) -> Tuple[Dict[Text, Any], Text]
+def single_job_executor(t,  # type: Process
+                        job_order_object,  # type: Dict[Text, Any]
+                        **kwargs  # type: Any
+                        ):
+    # type: (...) -> Tuple[Dict[Text, Any], Text]
     final_output = []
     final_status = []
 
@@ -199,15 +249,16 @@ def single_job_executor(t, job_order_object, **kwargs):
         raise WorkflowException("Must provide 'basedir' in kwargs")
 
     output_dirs = set()
-    finaloutdir = kwargs.get("outdir")
-    kwargs["outdir"] = tempfile.mkdtemp(prefix=kwargs["tmp_outdir_prefix"]) if kwargs.get("tmp_outdir_prefix") else tempfile.mkdtemp()
+    finaloutdir = os.path.abspath(kwargs.get("outdir")) if kwargs.get("outdir") else None
+    kwargs["outdir"] = tempfile.mkdtemp(prefix=kwargs["tmp_outdir_prefix"]) if kwargs.get(
+        "tmp_outdir_prefix") else tempfile.mkdtemp()
     output_dirs.add(kwargs["outdir"])
+    kwargs["mutation_manager"] = MutationManager()
 
     jobReqs = None
     if "cwl:requirements" in job_order_object:
         jobReqs = job_order_object["cwl:requirements"]
-    elif ("cwl:defaults" in t.metadata and "cwl:requirements" in
-            t.metadata["cwl:defaults"]):
+    elif ("cwl:defaults" in t.metadata and "cwl:requirements" in t.metadata["cwl:defaults"]):
         jobReqs = t.metadata["cwl:defaults"]["cwl:requirements"]
     if jobReqs:
         for req in jobReqs:
@@ -220,6 +271,9 @@ def single_job_executor(t, job_order_object, **kwargs):
     try:
         for r in jobiter:
             if r:
+                builder = kwargs.get("builder", None)  # type: Builder
+                if builder is not None:
+                    r.builder = builder
                 if r.outdir:
                     output_dirs.add(r.outdir)
                 r.run(**kwargs)
@@ -234,7 +288,8 @@ def single_job_executor(t, job_order_object, **kwargs):
 
     if final_output and final_output[0] and finaloutdir:
         final_output[0] = relocateOutputs(final_output[0], finaloutdir,
-                                          output_dirs, kwargs.get("move_outputs"))
+                                          output_dirs, kwargs.get("move_outputs"),
+                                          kwargs["make_fs_access"](""))
 
     if kwargs.get("rm_tmpdir"):
         cleanIntermediate(output_dirs)
@@ -244,6 +299,7 @@ def single_job_executor(t, job_order_object, **kwargs):
     else:
         return (None, "permanentFail")
 
+
 class FSAction(argparse.Action):
     objclass = None  # type: Text
 
@@ -256,9 +312,10 @@ class FSAction(argparse.Action):
     def __call__(self, parser, namespace, values, option_string=None):
         # type: (argparse.ArgumentParser, argparse.Namespace, Union[AnyStr, Sequence[Any], None], AnyStr) -> None
         setattr(namespace,
-            self.dest,  # type: ignore
-            {"class": self.objclass,
-             "location": file_uri(str(os.path.abspath(cast(AnyStr, values))))})
+                self.dest,  # type: ignore
+                {"class": self.objclass,
+                 "location": file_uri(str(os.path.abspath(cast(AnyStr, values))))})
+
 
 class FSAppendAction(argparse.Action):
     objclass = None  # type: Text
@@ -283,90 +340,94 @@ class FSAppendAction(argparse.Action):
             {"class": self.objclass,
              "location": file_uri(str(os.path.abspath(cast(AnyStr, values))))})
 
+
 class FileAction(FSAction):
     objclass = "File"
 
+
 class DirectoryAction(FSAction):
     objclass = "Directory"
 
+
 class FileAppendAction(FSAppendAction):
     objclass = "File"
 
+
 class DirectoryAppendAction(FSAppendAction):
     objclass = "Directory"
 
 
 def add_argument(toolparser, name, inptype, records, description="",
-        default=None):
+                 default=None):
     # type: (argparse.ArgumentParser, Text, Any, List[Text], Text, Any) -> None
-        if len(name) == 1:
-            flag = "-"
-        else:
-            flag = "--"
-
-        required = True
-        if isinstance(inptype, list):
-            if inptype[0] == "null":
-                required = False
-                if len(inptype) == 2:
-                    inptype = inptype[1]
-                else:
-                    _logger.debug(u"Can't make command line argument from %s", inptype)
-                    return None
-
-        ahelp = description.replace("%", "%%")
-        action = None  # type: Union[argparse.Action, Text]
-        atype = None  # type: Any
-
-        if inptype == "File":
-            action = cast(argparse.Action, FileAction)
-        elif inptype == "Directory":
-            action = cast(argparse.Action, DirectoryAction)
-        elif isinstance(inptype, dict) and inptype["type"] == "array":
-            if inptype["items"] == "File":
-                action = cast(argparse.Action, FileAppendAction)
-            elif inptype["items"] == "Directory":
-                action = cast(argparse.Action, DirectoryAppendAction)
-            else:
-                action = "append"
-        elif isinstance(inptype, dict) and inptype["type"] == "enum":
-            atype = Text
-        elif isinstance(inptype, dict) and inptype["type"] == "record":
-            records.append(name)
-            for field in inptype['fields']:
-                fieldname = name+"."+shortname(field['name'])
-                fieldtype = field['type']
-                fielddescription = field.get("doc", "")
-                add_argument(
-                    toolparser, fieldname, fieldtype, records,
-                    fielddescription)
-            return
-        if inptype == "string":
-            atype = Text
-        elif inptype == "int":
-            atype = int
-        elif inptype == "double":
-            atype = float
-        elif inptype == "float":
-            atype = float
-        elif inptype == "boolean":
-            action = "store_true"
-
-        if default:
-            required = False
-
-        if not atype and not action:
-            _logger.debug(u"Can't make command line argument from %s", inptype)
-            return None
+    if len(name) == 1:
+        flag = "-"
+    else:
+        flag = "--"
 
-        if inptype != "boolean":
-            typekw = { 'type': atype }
+    required = True
+    if isinstance(inptype, list):
+        if inptype[0] == "null":
+            required = False
+            if len(inptype) == 2:
+                inptype = inptype[1]
+            else:
+                _logger.debug(u"Can't make command line argument from %s", inptype)
+                return None
+
+    ahelp = description.replace("%", "%%")
+    action = None  # type: Union[argparse.Action, Text]
+    atype = None  # type: Any
+
+    if inptype == "File":
+        action = cast(argparse.Action, FileAction)
+    elif inptype == "Directory":
+        action = cast(argparse.Action, DirectoryAction)
+    elif isinstance(inptype, dict) and inptype["type"] == "array":
+        if inptype["items"] == "File":
+            action = cast(argparse.Action, FileAppendAction)
+        elif inptype["items"] == "Directory":
+            action = cast(argparse.Action, DirectoryAppendAction)
         else:
-            typekw = {}
+            action = "append"
+    elif isinstance(inptype, dict) and inptype["type"] == "enum":
+        atype = Text
+    elif isinstance(inptype, dict) and inptype["type"] == "record":
+        records.append(name)
+        for field in inptype['fields']:
+            fieldname = name + "." + shortname(field['name'])
+            fieldtype = field['type']
+            fielddescription = field.get("doc", "")
+            add_argument(
+                toolparser, fieldname, fieldtype, records,
+                fielddescription)
+        return
+    if inptype == "string":
+        atype = Text
+    elif inptype == "int":
+        atype = int
+    elif inptype == "double":
+        atype = float
+    elif inptype == "float":
+        atype = float
+    elif inptype == "boolean":
+        action = "store_true"
+
+    if default:
+        required = False
+
+    if not atype and not action:
+        _logger.debug(u"Can't make command line argument from %s", inptype)
+        return None
+
+    if inptype != "boolean":
+        typekw = {'type': atype}
+    else:
+        typekw = {}
 
-        toolparser.add_argument(  # type: ignore
-            flag + name, required=required, help=ahelp, action=action,
-            default=default, **typekw)
+    toolparser.add_argument(  # type: ignore
+        flag + name, required=required, help=ahelp, action=action,
+        default=default, **typekw)
 
 
 def generate_parser(toolparser, tool, namemap, records):
@@ -384,26 +445,74 @@ def generate_parser(toolparser, tool, namemap, records):
 
     return toolparser
 
+def generate_example_input(inptype):
+    # type: (Union[Text, Dict[Text, Any]]) -> Any
+    defaults = { 'null': 'null',
+                 'Any': 'null',
+                 'boolean': False,
+                 'int': 0,
+                 'long': 0,
+                 'float': 0.1,
+                 'double': 0.1,
+                 'string': 'default_string',
+                 'File': { 'class': 'File',
+                           'path': 'default/file/path' },
+                 'Directory': { 'class': 'Directory',
+                                'path': 'default/directory/path' } }
+    if (not isinstance(inptype, str) and
+        not isinstance(inptype, collections.Mapping)
+        and isinstance(inptype, collections.MutableSet)):
+        if len(inptype) == 2 and 'null' in inptype:
+            inptype.remove('null')
+            return generate_example_input(inptype[0])
+            # TODO: indicate that this input is optional
+        else:
+            raise Exception("multi-types other than optional not yet supported"
+                            " for generating example input objects: %s"
+                            % inptype)
+    if isinstance(inptype, collections.Mapping) and 'type' in inptype:
+        if inptype['type'] == 'array':
+            return [ generate_example_input(inptype['items']) ]
+        elif inptype['type'] == 'enum':
+            return 'valid_enum_value'
+            # TODO: list valid values in a comment
+        elif inptype['type'] == 'record':
+            record = {}
+            for field in inptype['fields']:
+                record[shortname(field['name'])] = generate_example_input(
+                    field['type'])
+            return record
+    elif isinstance(inptype, str):
+        return defaults.get(inptype, 'custom_type')
+        # TODO: support custom types, complex arrays
+
+
+def generate_input_template(tool):
+    # type: (Process) -> Dict[Text, Any]
+    template = {}
+    for inp in tool.tool["inputs"]:
+        name = shortname(inp["id"])
+        inptype = inp["type"]
+        template[name] = generate_example_input(inptype)
+    return template
+
+
 
 def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
                    stdout=sys.stdout, make_fs_access=None, fetcher_constructor=None):
-    # type: (argparse.Namespace, Process, IO[Any], bool, bool, IO[Any], Callable[[Text], StdFsAccess], Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]) -> Union[int, Tuple[Dict[Text, Any], Text]]
+    # type: (argparse.Namespace, Process, IO[Any], bool, bool, IO[Any], Callable[[Text], StdFsAccess], Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]) -> Union[int, Tuple[Dict[Text, Any], Text]]
 
     job_order_object = None
 
-    jobloaderctx = {
-        u"path": {u"@type": u"@id"},
-        u"location": {u"@type": u"@id"},
-        u"format": {u"@type": u"@id"},
-        u"id": u"@id"}
-    jobloaderctx.update(t.metadata.get("$namespaces", {}))
-    loader = Loader(jobloaderctx, fetcher_constructor=fetcher_constructor)
+    _jobloaderctx = jobloaderctx.copy()
+    _jobloaderctx.update(t.metadata.get("$namespaces", {}))
+    loader = Loader(_jobloaderctx, fetcher_constructor=fetcher_constructor)  # type: ignore
 
     if len(args.job_order) == 1 and args.job_order[0][0] != "-":
         job_order_file = args.job_order[0]
     elif len(args.job_order) == 1 and args.job_order[0] == "-":
-        job_order_object = yaml.load(stdin)
-        job_order_object, _ = loader.resolve_all(job_order_object, "")
+        job_order_object = yaml.round_trip_load(stdin)
+        job_order_object, _ = loader.resolve_all(job_order_object, file_uri(os.getcwd()) + "/")
     else:
         job_order_file = None
 
@@ -431,16 +540,17 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
             for record_name in records:
                 record = {}
                 record_items = {
-                    k:v for k,v in cmd_line.iteritems()
+                    k: v for k, v in six.iteritems(cmd_line)
                     if k.startswith(record_name)}
-                for key, value in record_items.iteritems():
-                    record[key[len(record_name)+1:]] = value
+                for key, value in six.iteritems(record_items):
+                    record[key[len(record_name) + 1:]] = value
                     del cmd_line[key]
                 cmd_line[str(record_name)] = record
 
             if cmd_line["job_order"]:
                 try:
-                    input_basedir = args.basedir if args.basedir else os.path.abspath(os.path.dirname(cmd_line["job_order"]))
+                    input_basedir = args.basedir if args.basedir else os.path.abspath(
+                        os.path.dirname(cmd_line["job_order"]))
                     job_order_object = loader.resolve_ref(cmd_line["job_order"])
                 except Exception as e:
                     _logger.error(Text(e), exc_info=args.debug)
@@ -448,7 +558,9 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
             else:
                 job_order_object = {"id": args.workflow}
 
-            job_order_object.update({namemap[k]: v for k,v in cmd_line.items()})
+            del cmd_line["job_order"]
+
+            job_order_object.update({namemap[k]: v for k, v in cmd_line.items()})
 
             if _logger.isEnabledFor(logging.DEBUG):
                 _logger.debug(u"Parsed job order from command line: %s", json.dumps(job_order_object, indent=4))
@@ -463,7 +575,7 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
 
     if not job_order_object and len(t.tool["inputs"]) > 0:
         if toolparser:
-            print u"\nOptions for %s " % args.workflow
+            print(u"\nOptions for {} ".format(args.workflow))
             toolparser.print_help()
         _logger.error("")
         _logger.error("Input object required, use --help for details")
@@ -471,7 +583,7 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
 
     if print_input_deps:
         printdeps(job_order_object, loader, stdout, relative_deps, "",
-                  basedir=file_uri(input_basedir+"/"))
+                  basedir=file_uri(input_basedir + "/"))
         return 0
 
     def pathToLoc(p):
@@ -479,11 +591,21 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
             p["location"] = p["path"]
             del p["path"]
 
-    adjustDirObjs(job_order_object, pathToLoc)
-    adjustFileObjs(job_order_object, pathToLoc)
+    def addSizes(p):
+        if 'location' in p:
+            try:
+                p["size"] = os.stat(p["location"][7:]).st_size  # strip off file://
+            except OSError:
+                pass
+        elif 'contents' in p:
+                p["size"] = len(p['contents'])
+        else:
+            return  # best effort
+
+    visit_class(job_order_object, ("File", "Directory"), pathToLoc)
+    visit_class(job_order_object, ("File"), addSizes)
+    adjustDirObjs(job_order_object, trim_listing)
     normalizeFilesDirs(job_order_object)
-    adjustDirObjs(job_order_object, cast(Callable[..., Any],
-        functools.partial(getListing, make_fs_access(input_basedir))))
 
     if "cwl:tool" in job_order_object:
         del job_order_object["cwl:tool"]
@@ -492,6 +614,7 @@ def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
 
     return (job_order_object, input_basedir)
 
+
 def makeRelative(base, ob):
     u = ob.get("location", ob.get("path"))
     if ":" in u.split("/")[0] and not u.startswith("file://"):
@@ -501,6 +624,7 @@ def makeRelative(base, ob):
             u = uri_file_path(u)
             ob["location"] = os.path.relpath(u, base)
 
+
 def printdeps(obj, document_loader, stdout, relative_deps, uri, basedir=None):
     # type: (Dict[Text, Any], Loader, IO[Any], bool, Text, Text) -> None
     deps = {"class": "File",
@@ -510,8 +634,8 @@ def printdeps(obj, document_loader, stdout, relative_deps, uri, basedir=None):
         return document_loader.fetch(document_loader.fetcher.urljoin(b, u))
 
     sf = scandeps(
-        basedir if basedir else uri, obj, set(("$import", "run")),
-        set(("$include", "$schemas", "location")), loadref)
+        basedir if basedir else uri, obj, {"$import", "run"},
+        {"$include", "$schemas", "location"}, loadref)
     if sf:
         deps["secondaryFiles"] = sf
 
@@ -523,19 +647,20 @@ def printdeps(obj, document_loader, stdout, relative_deps, uri, basedir=None):
         else:
             raise Exception(u"Unknown relative_deps %s" % relative_deps)
 
-        adjustFileObjs(deps, functools.partial(makeRelative, base))
-        adjustDirObjs(deps, functools.partial(makeRelative, base))
+        visit_class(deps, ("File", "Directory"), functools.partial(makeRelative, base))
 
     stdout.write(json.dumps(deps, indent=4))
 
+
 def print_pack(document_loader, processobj, uri, metadata):
-    # type: (Loader, Union[Dict[unicode, Any], List[Dict[unicode, Any]]], unicode, Dict[unicode, Any]) -> str
+    # type: (Loader, Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Dict[Text, Any]) -> str
     packed = pack(document_loader, processobj, uri, metadata)
     if len(packed["$graph"]) > 1:
         return json.dumps(packed, indent=4)
     else:
         return json.dumps(packed["$graph"][0], indent=4)
 
+
 def versionstring():
     # type: () -> Text
     pkg = pkg_resources.require("cwltool")
@@ -544,9 +669,18 @@ def versionstring():
     else:
         return u"%s %s" % (sys.argv[0], "unknown version")
 
+def supportedCWLversions(enable_dev):
+    # type: (bool) -> List[Text]
+    # ALLUPDATES and UPDATES are dicts
+    if enable_dev:
+        versions = list(ALLUPDATES)
+    else:
+        versions = list(UPDATES)
+    versions.sort()
+    return versions
 
 def main(argsl=None,  # type: List[str]
-         args=None,   # type: argparse.Namespace
+         args=None,  # type: argparse.Namespace
          executor=single_job_executor,  # type: Callable[..., Tuple[Dict[Text, Any], Text]]
          makeTool=workflow.defaultMakeTool,  # type: Callable[..., Process]
          selectResources=None,  # type: Callable[[Dict[Text, int]], Dict[Text, int]]
@@ -556,9 +690,10 @@ def main(argsl=None,  # type: List[str]
          versionfunc=versionstring,  # type: Callable[[], Text]
          job_order_object=None,  # type: Union[Tuple[Dict[Text, Any], Text], int]
          make_fs_access=StdFsAccess,  # type: Callable[[Text], StdFsAccess]
-         fetcher_constructor=None,  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+         fetcher_constructor=None,  # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]
          resolver=tool_resolver,
-         logger_handler=None
+         logger_handler=None,
+         custom_schema_callback=None  # type: Callable[[], None]
          ):
     # type: (...) -> int
 
@@ -574,32 +709,44 @@ def main(argsl=None,  # type: List[str]
                 argsl = sys.argv[1:]
             args = arg_parser().parse_args(argsl)
 
+        # If On windows platform, A default Docker Container is Used if not explicitely provided by user
+        if onWindows() and not args.default_container:
+            # This docker image is a minimal alpine image with bash installed(size 6 mb). source: https://github.com/frol/docker-alpine-bash
+            args.default_container = windows_default_container_id
+
         # If caller provided custom arguments, it may be not every expected
         # option is set, so fill in no-op defaults to avoid crashing when
         # dereferencing them in args.
-        for k,v in {'print_deps': False,
-                    'print_pre': False,
-                    'print_rdf': False,
-                    'print_dot': False,
-                    'relative_deps': False,
-                    'tmp_outdir_prefix': 'tmp',
-                    'tmpdir_prefix': 'tmp',
-                    'print_input_deps': False,
-                    'cachedir': None,
-                    'quiet': False,
-                    'debug': False,
-                    'version': False,
-                    'enable_dev': False,
-                    'strict': True,
-                    'rdf_serializer': None,
-                    'basedir': None,
-                    'tool_help': False,
-                    'workflow': None,
-                    'job_order': None,
-                    'pack': False,
-                    'on_error': 'continue',
-                    'relax_path_checks': False,
-                    'validate': False}.iteritems():
+        for k, v in six.iteritems({'print_deps': False,
+                     'print_pre': False,
+                     'print_rdf': False,
+                     'print_dot': False,
+                     'relative_deps': False,
+                     'tmp_outdir_prefix': 'tmp',
+                     'tmpdir_prefix': 'tmp',
+                     'print_input_deps': False,
+                     'cachedir': None,
+                     'quiet': False,
+                     'debug': False,
+                     'version': False,
+                     'enable_dev': False,
+                     'enable_ext': False,
+                     'strict': True,
+                     'skip_schemas': False,
+                     'rdf_serializer': None,
+                     'basedir': None,
+                     'tool_help': False,
+                     'workflow': None,
+                     'job_order': None,
+                     'pack': False,
+                     'on_error': 'continue',
+                     'relax_path_checks': False,
+                     'validate': False,
+                     'enable_ga4gh_tool_registry': False,
+                     'ga4gh_tool_registries': [],
+                     'find_default_container': None,
+                     'make_template': False
+        }):
             if not hasattr(args, k):
                 setattr(args, k, v)
 
@@ -609,23 +756,43 @@ def main(argsl=None,  # type: List[str]
             _logger.setLevel(logging.DEBUG)
 
         if args.version:
-            print versionfunc()
+            print(versionfunc())
             return 0
         else:
             _logger.info(versionfunc())
 
+        if args.print_supported_versions:
+            print("\n".join(supportedCWLversions(args.enable_dev)))
+            return 0
+
         if not args.workflow:
             if os.path.isfile("CWLFile"):
                 setattr(args, "workflow", "CWLFile")
             else:
                 _logger.error("")
-                _logger.error("CWL document required, try --help for details")
+                _logger.error("CWL document required, no input file was provided")
+                arg_parser().print_help()
                 return 1
         if args.relax_path_checks:
             draft2tool.ACCEPTLIST_RE = draft2tool.ACCEPTLIST_EN_RELAXED_RE
 
+        if args.ga4gh_tool_registries:
+            ga4gh_tool_registries[:] = args.ga4gh_tool_registries
+        if not args.enable_ga4gh_tool_registry:
+            del ga4gh_tool_registries[:]
+
+        if custom_schema_callback:
+            custom_schema_callback()
+        elif args.enable_ext:
+            res = pkg_resources.resource_stream(__name__, 'extensions.yml')
+            use_custom_schema("v1.0", "http://commonwl.org/cwltool", res.read())
+            res.close()
+        else:
+            use_standard_schema("v1.0")
+
         try:
-            document_loader, workflowobj, uri = fetch_document(args.workflow, resolver=resolver, fetcher_constructor=fetcher_constructor)
+            document_loader, workflowobj, uri = fetch_document(args.workflow, resolver=resolver,
+                                                               fetcher_constructor=fetcher_constructor)
 
             if args.print_deps:
                 printdeps(workflowobj, document_loader, stdout, args.relative_deps, uri)
@@ -635,10 +802,8 @@ def main(argsl=None,  # type: List[str]
                 = validate_document(document_loader, workflowobj, uri,
                                     enable_dev=args.enable_dev, strict=args.strict,
                                     preprocess_only=args.print_pre or args.pack,
-                                    fetcher_constructor=fetcher_constructor)
-
-            if args.validate:
-                return 0
+                                    fetcher_constructor=fetcher_constructor,
+                                    skip_schemas=args.skip_schemas)
 
             if args.pack:
                 stdout.write(print_pack(document_loader, processobj, uri, metadata))
@@ -648,8 +813,28 @@ def main(argsl=None,  # type: List[str]
                 stdout.write(json.dumps(processobj, indent=4))
                 return 0
 
+            conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)  # Text
+            use_conda_dependencies = getattr(args, "beta_conda_dependencies", None)  # Text
+
+            make_tool_kwds = vars(args)
+
+            job_script_provider = None  # type: Callable[[Any, List[str]], Text]
+            if conf_file or use_conda_dependencies:
+                dependencies_configuration = DependenciesConfiguration(args)  # type: DependenciesConfiguration
+                make_tool_kwds["job_script_provider"] = dependencies_configuration
+
+            make_tool_kwds["find_default_container"] = functools.partial(find_default_container, args)
+
             tool = make_tool(document_loader, avsc_names, metadata, uri,
-                    makeTool, vars(args))
+                             makeTool, make_tool_kwds)
+            if args.make_template:
+                yaml.safe_dump(generate_input_template(tool), sys.stdout,
+                               default_flow_style=False, indent=4,
+                               block_seq_indent=2)
+                return 0
+
+            if args.validate:
+                return 0
 
             if args.print_rdf:
                 printrdf(tool, document_loader.ctx, args.rdf_serializer, stdout)
@@ -682,7 +867,7 @@ def main(argsl=None,  # type: List[str]
             if getattr(args, dirprefix) and getattr(args, dirprefix) != 'tmp':
                 sl = "/" if getattr(args, dirprefix).endswith("/") or dirprefix == "cachedir" else ""
                 setattr(args, dirprefix,
-                        os.path.abspath(getattr(args, dirprefix))+sl)
+                        os.path.abspath(getattr(args, dirprefix)) + sl)
                 if not os.path.exists(os.path.dirname(getattr(args, dirprefix))):
                     try:
                         os.makedirs(os.path.dirname(getattr(args, dirprefix)))
@@ -695,13 +880,16 @@ def main(argsl=None,  # type: List[str]
                 setattr(args, 'move_outputs', "copy")
             setattr(args, "tmp_outdir_prefix", args.cachedir)
 
-        if job_order_object is None:
-            job_order_object = load_job_order(args, tool, stdin,
-                                              print_input_deps=args.print_input_deps,
-                                              relative_deps=args.relative_deps,
-                                              stdout=stdout,
-                                              make_fs_access=make_fs_access,
-                                              fetcher_constructor=fetcher_constructor)
+        try:
+            if job_order_object is None:
+                    job_order_object = load_job_order(args, tool, stdin,
+                                                      print_input_deps=args.print_input_deps,
+                                                      relative_deps=args.relative_deps,
+                                                      stdout=stdout,
+                                                      make_fs_access=make_fs_access,
+                                                      fetcher_constructor=fetcher_constructor)
+        except SystemExit as e:
+            return e.code
 
         if isinstance(job_order_object, int):
             return job_order_object
@@ -711,21 +899,21 @@ def main(argsl=None,  # type: List[str]
             del args.workflow
             del args.job_order
             (out, status) = executor(tool, job_order_object[0],
-                           makeTool=makeTool,
-                           select_resources=selectResources,
-                           make_fs_access=make_fs_access,
-                           **vars(args))
+                                     makeTool=makeTool,
+                                     select_resources=selectResources,
+                                     make_fs_access=make_fs_access,
+                                     **vars(args))
 
             # This is the workflow output, it needs to be written
             if out is not None:
+
                 def locToPath(p):
                     if p["location"].startswith("file://"):
                         p["path"] = uri_file_path(p["location"])
 
-                adjustDirObjs(out, locToPath)
-                adjustFileObjs(out, locToPath)
+                visit_class(out, ("File", "Directory"), locToPath)
 
-                if isinstance(out, basestring):
+                if isinstance(out, six.string_types):
                     stdout.write(out)
                 else:
                     stdout.write(json.dumps(out, indent=4))
@@ -733,7 +921,7 @@ def main(argsl=None,  # type: List[str]
                 stdout.flush()
 
             if status != "success":
-                _logger.warn(u"Final process status is %s", status)
+                _logger.warning(u"Final process status is %s", status)
                 return 1
             else:
                 _logger.info(u"Final process status is %s", status)
@@ -741,7 +929,7 @@ def main(argsl=None,  # type: List[str]
 
         except (validate.ValidationException) as exc:
             _logger.error(u"Input object failed validation:\n%s", exc,
-                    exc_info=args.debug)
+                          exc_info=args.debug)
             return 1
         except UnsupportedRequirement as exc:
             _logger.error(
@@ -751,7 +939,7 @@ def main(argsl=None,  # type: List[str]
         except WorkflowException as exc:
             _logger.error(
                 u"Workflow error, try again with --debug for more "
-                "information:\n%s", strip_dup_lineno(unicode(exc)), exc_info=args.debug)
+                "information:\n%s", strip_dup_lineno(six.text_type(exc)), exc_info=args.debug)
             return 1
         except Exception as exc:
             _logger.error(
@@ -759,11 +947,20 @@ def main(argsl=None,  # type: List[str]
                 "  %s", exc, exc_info=args.debug)
             return 1
 
-        return 0
     finally:
         _logger.removeHandler(stderr_handler)
         _logger.addHandler(defaultStreamHandler)
 
 
+def find_default_container(args, builder):
+    default_container = None
+    if args.default_container:
+        default_container = args.default_container
+    elif args.beta_use_biocontainers:
+        default_container = get_container_from_software_requirements(args, builder)
+
+    return default_container
+
+
 if __name__ == "__main__":
     sys.exit(main(sys.argv[1:]))
diff --git a/cwltool/mutation.py b/cwltool/mutation.py
new file mode 100644
index 0000000..d7f3dbe
--- /dev/null
+++ b/cwltool/mutation.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import
+from collections import namedtuple
+from typing import Any, Callable, Dict, Generator, Iterable, List, Text, Union, cast
+
+from .errors import WorkflowException
+
+MutationState = namedtuple("MutationTracker", ["generation", "readers", "stepname"])
+
+_generation = "http://commonwl.org/cwltool#generation"
+
+class MutationManager(object):
+    """Lock manager for checking correctness of in-place update of files.
+
+    Used to validate that in-place file updates happen sequentially, and that a
+    file which is registered for in-place update cannot be read or updated by
+    any other steps.
+
+    """
+
+    def __init__(self):
+        # type: () -> None
+        self.generations = {}  # type: Dict[Text, MutationState]
+
+    def register_reader(self, stepname, obj):
+        # type: (Text, Dict[Text, Any]) -> None
+        loc = obj["location"]
+        current = self.generations.get(loc, MutationState(0, [], ""))
+        obj_generation = obj.get(_generation, 0)
+
+        if obj_generation != current.generation:
+            raise WorkflowException("[job %s] wants to read %s from generation %i but current generation is %s (last updated by %s)" % (
+                                    stepname, loc, obj_generation, current.generation, current.stepname))
+
+        current.readers.append(stepname)
+        self.generations[loc] = current
+
+    def release_reader(self, stepname, obj):
+        # type: (Text, Dict[Text, Any]) -> None
+        loc = obj["location"]
+        current = self.generations.get(loc, MutationState(0, [], ""))
+        obj_generation = obj.get(_generation, 0)
+
+        if obj_generation != current.generation:
+            raise WorkflowException("[job %s] wants to release reader on %s from generation %i but current generation is %s (last updated by %s)" % (
+                                    stepname, loc, obj_generation, current.generation, current.stepname))
+
+        self.generations[loc].readers.remove(stepname)
+
+    def register_mutation(self, stepname, obj):
+        # type: (Text, Dict[Text, Any]) -> None
+        loc = obj["location"]
+        current = self.generations.get(loc, MutationState(0,[], ""))
+        obj_generation = obj.get(_generation, 0)
+
+        if len(current.readers) > 0:
+            raise WorkflowException("[job %s] wants to modify %s but has readers: %s" % (
+                stepname, loc, current.readers))
+
+        if obj_generation != current.generation:
+            raise WorkflowException("[job %s] wants to modify %s from generation %i but current generation is %s (last updated by %s)" % (
+                                    stepname, loc, obj_generation, current.generation, current.stepname))
+
+        self.generations[loc] = MutationState(current.generation+1, current.readers, stepname)
+
+    def set_generation(self, obj):
+        # type: (Dict) -> None
+        loc = obj["location"]
+        current = self.generations.get(loc, MutationState(0,[], ""))
+        obj[_generation] = current.generation
diff --git a/cwltool/pack.py b/cwltool/pack.py
index f58091a..b51d7ad 100644
--- a/cwltool/pack.py
+++ b/cwltool/pack.py
@@ -1,11 +1,13 @@
+from __future__ import absolute_import
 import copy
-import json
+from typing import Any, Callable, Dict, List, Set, Text, Union, cast
 
 from schema_salad.ref_resolver import Loader
+from six.moves import urllib
 
-from .process import scandeps, shortname, uniquename
+from .process import shortname, uniquename
+import six
 
-from typing import Union, Any, cast, Callable, Dict, Tuple, Type, IO, Text
 
 def flatten_deps(d, files):  # type: (Any, Set[Text]) -> None
     if isinstance(d, list):
@@ -19,68 +21,140 @@ def flatten_deps(d, files):  # type: (Any, Set[Text]) -> None
         if "listing" in d:
             flatten_deps(d["listing"], files)
 
-def find_run(d, runs):  # type: (Any, Set[Text]) -> None
+
+def find_run(d, loadref, runs):  # type: (Any, Callable[[Text, Text], Union[Dict, List, Text]], Set[Text]) -> None
+    if isinstance(d, list):
+        for s in d:
+            find_run(s, loadref, runs)
+    elif isinstance(d, dict):
+        if "run" in d and isinstance(d["run"], six.string_types):
+            if d["run"] not in runs:
+                runs.add(d["run"])
+                find_run(loadref(None, d["run"]), loadref, runs)
+        for s in d.values():
+            find_run(s, loadref, runs)
+
+
+def find_ids(d, ids):  # type: (Any, Set[Text]) -> None
     if isinstance(d, list):
         for s in d:
-            find_run(s, runs)
+            find_ids(s, ids)
     elif isinstance(d, dict):
-        if "run" in d and isinstance(d["run"], (str, unicode)):
-            runs.add(d["run"])
+        for i in ("id", "name"):
+            if i in d and isinstance(d[i], six.string_types):
+                ids.add(d[i])
         for s in d.values():
-            find_run(s, runs)
+            find_ids(s, ids)
+
 
 def replace_refs(d, rewrite, stem, newstem):
     # type: (Any, Dict[Text, Text], Text, Text) -> None
     if isinstance(d, list):
-        for s,v in enumerate(d):
-            if isinstance(v, (str, unicode)) and v.startswith(stem):
-                d[s] = newstem + v[len(stem):]
+        for s, v in enumerate(d):
+            if isinstance(v, six.string_types):
+                if v in rewrite:
+                    d[s] = rewrite[v]
+                elif v.startswith(stem):
+                    d[s] = newstem + v[len(stem):]
             else:
                 replace_refs(v, rewrite, stem, newstem)
     elif isinstance(d, dict):
-        if "run" in d and isinstance(d["run"], (str, unicode)):
-            d["run"] = rewrite[d["run"]]
-        for s,v in d.items():
-            if isinstance(v, (str, unicode)) and v.startswith(stem):
-                d[s] = newstem + v[len(stem):]
+        for s, v in d.items():
+            if isinstance(v, six.string_types):
+                if v in rewrite:
+                    d[s] = rewrite[v]
+                elif v.startswith(stem):
+                    d[s] = newstem + v[len(stem):]
             replace_refs(v, rewrite, stem, newstem)
 
+def import_embed(d, seen):
+    # type: (Any, Set[Text]) -> None
+    if isinstance(d, list):
+        for v in d:
+            import_embed(v, seen)
+    elif isinstance(d, dict):
+        for n in ("id", "name"):
+            if n in d:
+                if d[n] in seen:
+                    this = d[n]
+                    d.clear()
+                    d["$import"] = this
+                else:
+                    this = d[n]
+                    seen.add(this)
+                    break
+
+        for v in d.values():
+            import_embed(v, seen)
+
+
 def pack(document_loader, processobj, uri, metadata):
     # type: (Loader, Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Dict[Text, Text]) -> Dict[Text, Any]
     def loadref(b, u):
         # type: (Text, Text) -> Union[Dict, List, Text]
         return document_loader.resolve_ref(u, base_url=b)[0]
-    deps = scandeps(uri, processobj, set(("run",)), set(), loadref)
 
-    fdeps = set((uri,))
-    flatten_deps(deps, fdeps)
+    runs = {uri}
+    find_run(processobj, loadref, runs)
 
-    runs = set()  # type: Set[Text]
-    for f in fdeps:
-        find_run(document_loader.idx[f], runs)
+    ids = set()  # type: Set[Text]
+    for f in runs:
+        find_ids(document_loader.resolve_ref(f)[0], ids)
 
     names = set()  # type: Set[Text]
-    rewrite = {}
-    if isinstance(processobj, list):
-        for p in processobj:
-            rewrite[p["id"]] = "#" + uniquename(shortname(p["id"]), names)
-    else:
-        rewrite[uri] = "#main"
+    rewrite = {}  # type: Dict[Text, Text]
 
-    for r in sorted(runs):
-        rewrite[r] = "#" + uniquename(shortname(r), names)
+    mainpath, _ = urllib.parse.urldefrag(uri)
+
+    def rewrite_id(r, mainuri):
+        # type: (Text, Text) -> None
+        if r == mainuri:
+            rewrite[r] = "#main"
+        elif r.startswith(mainuri) and r[len(mainuri)] in ("#", "/"):
+            pass
+        else:
+            path, frag = urllib.parse.urldefrag(r)
+            if path == mainpath:
+                rewrite[r] = "#" + uniquename(frag, names)
+            else:
+                if path not in rewrite:
+                    rewrite[path] = "#" + uniquename(shortname(path), names)
+
+    sortedids = sorted(ids)
+
+    for r in sortedids:
+        if r in document_loader.idx:
+            rewrite_id(r, uri)
 
     packed = {"$graph": [], "cwlVersion": metadata["cwlVersion"]
-            }  # type: Dict[Text, Any]
+              }  # type: Dict[Text, Any]
 
-    for r in sorted(rewrite.keys()):
+    schemas = set()  # type: Set[Text]
+    for r in sorted(runs):
+        dcr, metadata = document_loader.resolve_ref(r)
+        if not isinstance(dcr, dict):
+            continue
+        for doc in (dcr, metadata):
+            if "$schemas" in doc:
+                for s in doc["$schemas"]:
+                    schemas.add(s)
+        if dcr.get("class") not in ("Workflow", "CommandLineTool", "ExpressionTool"):
+            continue
+        dc = cast(Dict[Text, Any], copy.deepcopy(dcr))
         v = rewrite[r]
-        dc = cast(Dict[Text, Any], copy.deepcopy(document_loader.idx[r]))
         dc["id"] = v
-        for n in ("name", "cwlVersion"):
+        for n in ("name", "cwlVersion", "$namespaces", "$schemas"):
             if n in dc:
                 del dc[n]
-        replace_refs(dc, rewrite, r+"/" if "#" in r else r+"#", v+"/")
         packed["$graph"].append(dc)
 
+    if schemas:
+        packed["$schemas"] = list(schemas)
+
+    for r in rewrite:
+        v = rewrite[r]
+        replace_refs(packed, rewrite, r + "/" if "#" in r else r + "#", v + "/")
+
+    import_embed(packed, set())
+
     return packed
diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py
index 3e1aa2e..6802a91 100644
--- a/cwltool/pathmapper.py
+++ b/cwltool/pathmapper.py
@@ -1,19 +1,25 @@
-import os
+from __future__ import absolute_import
+import collections
 import logging
+import os
 import stat
-import collections
 import uuid
-import urllib
-import urlparse
 from functools import partial
-from typing import Any, Callable, Set, Text, Tuple, Union
+from typing import Any, Callable, Dict, Iterable, List, Set, Text, Tuple, Union
+
 import schema_salad.validate as validate
-from schema_salad.sourceline import SourceLine
 from schema_salad.ref_resolver import uri_file_path
+from schema_salad.sourceline import SourceLine
+from six.moves import urllib
+
+from .utils import convert_pathsep_to_unix
+
+from .stdfsaccess import StdFsAccess, abspath
 
 _logger = logging.getLogger("cwltool")
 
-MapperEnt = collections.namedtuple("MapperEnt", ["resolved", "target", "type"])
+MapperEnt = collections.namedtuple("MapperEnt", ["resolved", "target", "type", "staged"])
+
 
 def adjustFiles(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
     """Apply a mapping function to each File path in the object `rec`."""
@@ -27,30 +33,26 @@ def adjustFiles(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]
         for d in rec:
             adjustFiles(d, op)
 
-def adjustFileObjs(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
-    """Apply an update function to each File object in the object `rec`."""
+def visit_class(rec, cls, op):  # type: (Any, Iterable, Union[Callable[..., Any], partial[Any]]) -> None
+    """Apply a function to with "class" in cls."""
 
     if isinstance(rec, dict):
-        if rec.get("class") == "File":
+        if "class" in rec and rec.get("class") in cls:
             op(rec)
         for d in rec:
-            adjustFileObjs(rec[d], op)
+            visit_class(rec[d], cls, op)
     if isinstance(rec, list):
         for d in rec:
-            adjustFileObjs(d, op)
+            visit_class(d, cls, op)
+
+def adjustFileObjs(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+    """Apply an update function to each File object in the object `rec`."""
+    visit_class(rec, ("File",), op)
 
 def adjustDirObjs(rec, op):
     # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
     """Apply an update function to each Directory object in the object `rec`."""
-
-    if isinstance(rec, dict):
-        if rec.get("class") == "Directory":
-            op(rec)
-        for key in rec:
-            adjustDirObjs(rec[key], op)
-    if isinstance(rec, list):
-        for d in rec:
-            adjustDirObjs(d, op)
+    visit_class(rec, ("Directory",), op)
 
 def normalizeFilesDirs(job):
     # type: (Union[List[Dict[Text, Any]], Dict[Text, Any]]) -> None
@@ -59,25 +61,30 @@ def normalizeFilesDirs(job):
             if d["class"] == "File" and ("contents" not in d):
                 raise validate.ValidationException("Anonymous file object must have 'contents' and 'basename' fields.")
             if d["class"] == "Directory" and ("listing" not in d or "basename" not in d):
-                raise validate.ValidationException("Anonymous directory object must have 'listing' and 'basename' fields.")
+                raise validate.ValidationException(
+                    "Anonymous directory object must have 'listing' and 'basename' fields.")
             d["location"] = "_:" + Text(uuid.uuid4())
             if "basename" not in d:
-                d["basename"] = Text(uuid.uuid4())
+                d["basename"] = d["location"][2:]
+
+        parse = urllib.parse.urlparse(d["location"])
+        path = parse.path
+        # strip trailing slash
+        if path.endswith("/"):
+            if d["class"] != "Directory":
+                raise validate.ValidationException(
+                    "location '%s' ends with '/' but is not a Directory" % d["location"])
+            path = path.rstrip("/")
+            d["location"] = urllib.parse.urlunparse((parse.scheme, parse.netloc, path, parse.params, parse.query, parse.fragment))
 
         if "basename" not in d:
-            parse = urlparse.urlparse(d["location"])
-            d["basename"] = os.path.basename(urllib.url2pathname(parse.path))
+            d["basename"] = os.path.basename(urllib.request.url2pathname(path))
 
-    adjustFileObjs(job, addLocation)
-    adjustDirObjs(job, addLocation)
+        if d["class"] == "File":
+            d["nameroot"], d["nameext"] = os.path.splitext(d["basename"])
 
+    visit_class(job, ("File", "Directory"), addLocation)
 
-def abspath(src, basedir):  # type: (Text, Text) -> Text
-    if src.startswith(u"file://"):
-        ab = unicode(uri_file_path(str(src)))
-    else:
-        ab = src if os.path.isabs(src) else os.path.join(basedir, src)
-    return ab
 
 def dedup(listing):  # type: (List[Any]) -> List[Any]
     marksub = set()
@@ -100,9 +107,40 @@ def dedup(listing):  # type: (List[Any]) -> List[Any]
 
     return dd
 
+def get_listing(fs_access, rec, recursive=True):
+    # type: (StdFsAccess, Dict[Text, Any], bool) -> None
+    if "listing" in rec:
+        return
+    listing = []
+    loc = rec["location"]
+    for ld in fs_access.listdir(loc):
+        parse = urllib.parse.urlparse(ld)
+        bn = os.path.basename(urllib.request.url2pathname(parse.path))
+        if fs_access.isdir(ld):
+            ent = {u"class": u"Directory",
+                   u"location": ld,
+                   u"basename": bn}
+            if recursive:
+                get_listing(fs_access, ent, recursive)
+            listing.append(ent)
+        else:
+            listing.append({"class": "File", "location": ld, "basename": bn})
+    rec["listing"] = listing
 
-class PathMapper(object):
+def trim_listing(obj):
+    """Remove 'listing' field from Directory objects that are file references.
+
+    It redundant and potentially expensive to pass fully enumerated Directory
+    objects around if not explicitly needed, so delete the 'listing' field when
+    it is safe to do so.
+
+    """
+
+    if obj.get("location", "").startswith("file://") and "listing" in obj:
+        del obj["listing"]
 
+
+class PathMapper(object):
     """Mapping of files from relative path provided in the file to a tuple of
     (absolute local path, absolute container path)
 
@@ -143,44 +181,43 @@ class PathMapper(object):
         self.separateDirs = separateDirs
         self.setup(dedup(referenced_files), basedir)
 
-    def visitlisting(self, listing, stagedir, basedir):
-        # type: (List[Dict[Text, Any]], Text, Text) -> None
+    def visitlisting(self, listing, stagedir, basedir, copy=False, staged=False):
+        # type: (List[Dict[Text, Any]], Text, Text, bool, bool) -> None
         for ld in listing:
-            tgt = os.path.join(stagedir, ld["basename"])
-            if ld["class"] == "Directory":
-                self.visit(ld, stagedir, basedir, copy=ld.get("writable", False))
-            else:
-                self.visit(ld, stagedir, basedir, copy=ld.get("writable", False))
-
-    def visit(self, obj, stagedir, basedir, copy=False):
-        # type: (Dict[Text, Any], Text, Text, bool) -> None
-        tgt = os.path.join(stagedir, obj["basename"])
+            self.visit(ld, stagedir, basedir, copy=ld.get("writable", copy), staged=staged)
+
+    def visit(self, obj, stagedir, basedir, copy=False, staged=False):
+        # type: (Dict[Text, Any], Text, Text, bool, bool) -> None
+        tgt = convert_pathsep_to_unix(
+            os.path.join(stagedir, obj["basename"]))
+        if obj["location"] in self._pathmap:
+            return
         if obj["class"] == "Directory":
-            self._pathmap[obj["location"]] = MapperEnt(obj["location"], tgt, "Directory")
-            self.visitlisting(obj.get("listing", []), tgt, basedir)
+            if obj["location"].startswith("file://"):
+                resolved = uri_file_path(obj["location"])
+            else:
+                resolved = obj["location"]
+            self._pathmap[obj["location"]] = MapperEnt(resolved, tgt, "WritableDirectory" if copy else "Directory", staged)
+            if obj["location"].startswith("file://"):
+                staged = False
+            self.visitlisting(obj.get("listing", []), tgt, basedir, copy=copy, staged=staged)
         elif obj["class"] == "File":
             path = obj["location"]
-            if path in self._pathmap:
-                return
             ab = abspath(path, basedir)
             if "contents" in obj and obj["location"].startswith("_:"):
-                self._pathmap[obj["location"]] = MapperEnt(obj["contents"], tgt, "CreateFile")
+                self._pathmap[obj["location"]] = MapperEnt(obj["contents"], tgt, "CreateFile", staged)
             else:
-                if copy:
-                    self._pathmap[path] = MapperEnt(ab, tgt, "WritableFile")
-                else:
-                    with SourceLine(obj, "location", validate.ValidationException):
-                        # Dereference symbolic links
-                        deref = ab
+                with SourceLine(obj, "location", validate.ValidationException):
+                    # Dereference symbolic links
+                    deref = ab
+                    st = os.lstat(deref)
+                    while stat.S_ISLNK(st.st_mode):
+                        rl = os.readlink(deref)
+                        deref = rl if os.path.isabs(rl) else os.path.join(
+                            os.path.dirname(deref), rl)
                         st = os.lstat(deref)
-                        while stat.S_ISLNK(st.st_mode):
-                            rl = os.readlink(deref)
-                            deref = rl if os.path.isabs(rl) else os.path.join(
-                                os.path.dirname(deref), rl)
-                            st = os.lstat(deref)
-
-                    self._pathmap[path] = MapperEnt(deref, tgt, "File")
-                self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
+                    self._pathmap[path] = MapperEnt(deref, tgt, "WritableFile" if copy else "File", staged)
+                    self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir, copy=copy, staged=staged)
 
     def setup(self, referenced_files, basedir):
         # type: (List[Any], Text) -> None
@@ -191,24 +228,30 @@ class PathMapper(object):
         for fob in referenced_files:
             if self.separateDirs:
                 stagedir = os.path.join(self.stagedir, "stg%s" % uuid.uuid4())
-            self.visit(fob, stagedir, basedir)
+            self.visit(fob, stagedir, basedir, copy=fob.get("writable"), staged=True)
 
     def mapper(self, src):  # type: (Text) -> MapperEnt
         if u"#" in src:
             i = src.index(u"#")
             p = self._pathmap[src[:i]]
-            return MapperEnt(p.resolved, p.target + src[i:], None)
+            return MapperEnt(p.resolved, p.target + src[i:], p.type, p.staged)
         else:
             return self._pathmap[src]
 
     def files(self):  # type: () -> List[Text]
-        return self._pathmap.keys()
+        return list(self._pathmap.keys())
 
     def items(self):  # type: () -> List[Tuple[Text, MapperEnt]]
-        return self._pathmap.items()
+        return list(self._pathmap.items())
 
     def reversemap(self, target):  # type: (Text) -> Tuple[Text, Text]
         for k, v in self._pathmap.items():
             if v[1] == target:
                 return (k, v[0])
         return None
+
+    def update(self, key, resolved, target, type, stage):  # type: (Text, Text, Text, Text, bool) -> None
+        self._pathmap[key] = MapperEnt(resolved, target, type, stage)
+
+    def __contains__(self, key):
+        return key in self._pathmap
diff --git a/cwltool/process.py b/cwltool/process.py
index ca0b195..30fdefb 100644
--- a/cwltool/process.py
+++ b/cwltool/process.py
@@ -1,41 +1,62 @@
-import os
-import json
+from __future__ import absolute_import
+
+import abc
 import copy
+import errno
+import functools
+import hashlib
+import json
 import logging
-import pprint
+import os
+import shutil
 import stat
 import tempfile
-import glob
-import urlparse
-from collections import Iterable
-import errno
-import shutil
 import uuid
-import hashlib
+from collections import Iterable
+from io import open
+from functools import cmp_to_key
+from typing import (Any, Callable, Dict, Generator, List, Set, Text,
+                    Tuple, Union, cast)
 
-import abc
-import schema_salad.validate as validate
+import avro.schema
 import schema_salad.schema
+import schema_salad.validate as validate
+import six
+from pkg_resources import resource_stream
+from rdflib import Graph, URIRef
+from rdflib.namespace import OWL, RDFS
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
 from schema_salad.ref_resolver import Loader, file_uri
 from schema_salad.sourceline import SourceLine
-import avro.schema
-from typing import (Any, AnyStr, Callable, cast, Dict, List, Generator, IO, Text,
-        Tuple, Union)
-from rdflib import URIRef
-from rdflib.namespace import RDFS, OWL
-from rdflib import Graph
-from pkg_resources import resource_stream
+from six.moves import urllib
+
+from .utils import cmp_like_py2
+from .builder import Builder
+from .errors import UnsupportedRequirement, WorkflowException
+from .pathmapper import (PathMapper, adjustDirObjs, get_listing,
+                         normalizeFilesDirs, visit_class, trim_listing)
+from .stdfsaccess import StdFsAccess
+from .utils import aslist, get_feature, copytree_with_merge, onWindows
 
+# if six.PY3:
+# AvroSchemaFromJSONData = avro.schema.SchemaFromJSONData
+# else:
+AvroSchemaFromJSONData = avro.schema.make_avsc_object
 
-from ruamel.yaml.comments import CommentedSeq, CommentedMap
+class LogAsDebugFilter(logging.Filter):
+    def __init__(self, name, parent):  # type: (Text, logging.Logger) -> None
+        name = str(name)
+        super(LogAsDebugFilter, self).__init__(name)
+        self.parent = parent
+
+    def filter(self, record):
+        return self.parent.isEnabledFor(logging.DEBUG)
 
-from .utils import aslist, get_feature
-from .stdfsaccess import StdFsAccess
-from .builder import Builder, adjustFileObjs, adjustDirObjs
-from .errors import WorkflowException, UnsupportedRequirement
-from .pathmapper import PathMapper, abspath, normalizeFilesDirs
 
 _logger = logging.getLogger("cwltool")
+_logger_validation_warnings = logging.getLogger("cwltool.validation_warnings")
+_logger_validation_warnings.setLevel(_logger.getEffectiveLevel())
+_logger_validation_warnings.addFilter(LogAsDebugFilter("cwltool.validation_warnings", _logger))
 
 supportedProcessRequirements = ["DockerRequirement",
                                 "SchemaDefRequirement",
@@ -47,7 +68,9 @@ supportedProcessRequirements = ["DockerRequirement",
                                 "ShellCommandRequirement",
                                 "StepInputExpressionRequirement",
                                 "ResourceRequirement",
-                                "InitialWorkDirRequirement"]
+                                "InitialWorkDirRequirement",
+                                "http://commonwl.org/cwltool#LoadListingRequirement",
+                                "http://commonwl.org/cwltool#InplaceUpdateRequirement"]
 
 cwl_files = (
     "Workflow.yml",
@@ -61,38 +84,53 @@ cwl_files = (
 
 salad_files = ('metaschema.yml',
                'metaschema_base.yml',
-              'salad.md',
-              'field_name.yml',
-              'import_include.md',
-              'link_res.yml',
-              'ident_res.yml',
-              'vocab_res.yml',
-              'vocab_res.yml',
-              'field_name_schema.yml',
-              'field_name_src.yml',
-              'field_name_proc.yml',
-              'ident_res_schema.yml',
-              'ident_res_src.yml',
-              'ident_res_proc.yml',
-              'link_res_schema.yml',
-              'link_res_src.yml',
-              'link_res_proc.yml',
-              'vocab_res_schema.yml',
-              'vocab_res_src.yml',
-              'vocab_res_proc.yml')
+               'salad.md',
+               'field_name.yml',
+               'import_include.md',
+               'link_res.yml',
+               'ident_res.yml',
+               'vocab_res.yml',
+               'vocab_res.yml',
+               'field_name_schema.yml',
+               'field_name_src.yml',
+               'field_name_proc.yml',
+               'ident_res_schema.yml',
+               'ident_res_src.yml',
+               'ident_res_proc.yml',
+               'link_res_schema.yml',
+               'link_res_src.yml',
+               'link_res_proc.yml',
+               'vocab_res_schema.yml',
+               'vocab_res_src.yml',
+               'vocab_res_proc.yml')
 
 SCHEMA_CACHE = {}  # type: Dict[Text, Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text, Any], Loader]]
 SCHEMA_FILE = None  # type: Dict[Text, Any]
 SCHEMA_DIR = None  # type: Dict[Text, Any]
 SCHEMA_ANY = None  # type: Dict[Text, Any]
 
+custom_schemas = {}  # type: Dict[Text, Tuple[Text, Text]]
+
+def use_standard_schema(version):
+    # type: (Text) -> None
+    if version in custom_schemas:
+        del custom_schemas[version]
+    if version in SCHEMA_CACHE:
+        del SCHEMA_CACHE[version]
+
+def use_custom_schema(version, name, text):
+    # type: (Text, Text, Text) -> None
+    custom_schemas[version] = (name, text)
+    if version in SCHEMA_CACHE:
+        del SCHEMA_CACHE[version]
+
 def get_schema(version):
     # type: (Text) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text,Any], Loader]
 
     if version in SCHEMA_CACHE:
         return SCHEMA_CACHE[version]
 
-    cache = {}
+    cache = {}  # type: Dict[Text, Union[bytes, Text]]
     version = version.split("#")[-1]
     if '.dev' in version:
         version = ".".join(version.split(".")[:-1])
@@ -108,26 +146,33 @@ def get_schema(version):
         try:
             res = resource_stream(
                 __name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
-                % (version, f))
+                          % (version, f))
             cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
                   + f] = res.read()
             res.close()
         except IOError:
             pass
 
-    SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
-        "https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
+    if version in custom_schemas:
+        cache[custom_schemas[version][0]] = custom_schemas[version][1]
+        SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
+            custom_schemas[version][0], cache=cache)
+    else:
+        SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
+            "https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
 
     return SCHEMA_CACHE[version]
 
+
 def shortname(inputid):
     # type: (Text) -> Text
-    d = urlparse.urlparse(inputid)
+    d = urllib.parse.urlparse(inputid)
     if d.fragment:
         return d.fragment.split(u"/")[-1]
     else:
         return d.path.split(u"/")[-1]
 
+
 def checkRequirements(rec, supportedProcessRequirements):
     # type: (Any, Iterable[Any]) -> None
     if isinstance(rec, dict):
@@ -142,6 +187,7 @@ def checkRequirements(rec, supportedProcessRequirements):
         for d in rec:
             checkRequirements(d, supportedProcessRequirements)
 
+
 def adjustFilesWithSecondary(rec, op, primary=None):
     """Apply a mapping function to each File path in the object `rec`, propagating
     the primary file associated with a group of secondary files.
@@ -159,34 +205,41 @@ def adjustFilesWithSecondary(rec, op, primary=None):
         for d in rec:
             adjustFilesWithSecondary(d, op, primary)
 
-def getListing(fs_access, rec):
-    # type: (StdFsAccess, Dict[Text, Any]) -> None
-    if "listing" not in rec:
-        listing = []
-        loc = rec["location"]
-        for ld in fs_access.listdir(loc):
-            if fs_access.isdir(ld):
-                ent = {u"class": u"Directory",
-                       u"location": ld}
-                getListing(fs_access, ent)
-                listing.append(ent)
-            else:
-                listing.append({"class": "File", "location": ld})
-        rec["listing"] = listing
 
-def stageFiles(pm, stageFunc, ignoreWritable=False):
-    # type: (PathMapper, Callable[..., Any], bool) -> None
+def stageFiles(pm, stageFunc=None, ignoreWritable=False, symLink=True):
+    # type: (PathMapper, Callable[..., Any], bool, bool) -> None
     for f, p in pm.items():
+        if not p.staged:
+            continue
         if not os.path.exists(os.path.dirname(p.target)):
-            os.makedirs(os.path.dirname(p.target), 0755)
-        if p.type == "File":
-            stageFunc(p.resolved, p.target)
+            os.makedirs(os.path.dirname(p.target), 0o0755)
+        if p.type in ("File", "Directory") and (os.path.exists(p.resolved)):
+            if symLink:  # Use symlink func if allowed
+                if onWindows():
+                    if p.type == "File":
+                        shutil.copy(p.resolved, p.target)
+                    elif p.type == "Directory":
+                        if os.path.exists(p.target) and os.path.isdir(p.target):
+                            shutil.rmtree(p.target)
+                        copytree_with_merge(p.resolved, p.target)
+                else:
+                    os.symlink(p.resolved, p.target)
+            elif stageFunc is not None:
+                stageFunc(p.resolved, p.target)
+        elif p.type == "Directory" and not os.path.exists(p.target) and p.resolved.startswith("_:"):
+            os.makedirs(p.target, 0o0755)
         elif p.type == "WritableFile" and not ignoreWritable:
             shutil.copy(p.resolved, p.target)
+        elif p.type == "WritableDirectory" and not ignoreWritable:
+            if p.resolved.startswith("_:"):
+                os.makedirs(p.target, 0o0755)
+            else:
+                shutil.copytree(p.resolved, p.target)
         elif p.type == "CreateFile" and not ignoreWritable:
-            with open(p.target, "w") as n:
+            with open(p.target, "wb") as n:
                 n.write(p.resolved.encode("utf-8"))
 
+
 def collectFilesAndDirs(obj, out):
     # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], List[Dict[Text, Any]]) -> None
     if isinstance(obj, dict):
@@ -199,39 +252,80 @@ def collectFilesAndDirs(obj, out):
         for l in obj:
             collectFilesAndDirs(l, out)
 
-def relocateOutputs(outputObj, outdir, output_dirs, action):
-    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Set[Text], Text) -> Union[Dict[Text, Any], List[Dict[Text, Any]]]
+
+def relocateOutputs(outputObj, outdir, output_dirs, action, fs_access):
+    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Set[Text], Text, StdFsAccess) -> Union[Dict[Text, Any], List[Dict[Text, Any]]]
+    adjustDirObjs(outputObj, functools.partial(get_listing, fs_access, recursive=True))
+
     if action not in ("move", "copy"):
         return outputObj
 
     def moveIt(src, dst):
         if action == "move":
             for a in output_dirs:
-                if src.startswith(a):
+                if src.startswith(a+"/"):
                     _logger.debug("Moving %s to %s", src, dst)
-                    shutil.move(src, dst)
+                    if os.path.isdir(src) and os.path.isdir(dst):
+                        # merge directories
+                        for root, dirs, files in os.walk(src):
+                            for f in dirs+files:
+                                moveIt(os.path.join(root, f), os.path.join(dst, f))
+                    else:
+                        shutil.move(src, dst)
                     return
-        _logger.debug("Copying %s to %s", src, dst)
-        shutil.copy(src, dst)
+        if src != dst:
+            _logger.debug("Copying %s to %s", src, dst)
+            if os.path.isdir(src):
+                shutil.copytree(src, dst)
+            else:
+                shutil.copy(src, dst)
 
     outfiles = []  # type: List[Dict[Text, Any]]
     collectFilesAndDirs(outputObj, outfiles)
     pm = PathMapper(outfiles, "", outdir, separateDirs=False)
-    stageFiles(pm, moveIt)
+    stageFiles(pm, stageFunc=moveIt, symLink=False)
 
     def _check_adjust(f):
         f["location"] = file_uri(pm.mapper(f["location"])[1])
         if "contents" in f:
             del f["contents"]
-        if f["class"] == "File":
-            compute_checksums(StdFsAccess(""), f)
         return f
 
-    adjustFileObjs(outputObj, _check_adjust)
-    adjustDirObjs(outputObj, _check_adjust)
+    visit_class(outputObj, ("File", "Directory"), _check_adjust)
+
+    visit_class(outputObj, ("File",), functools.partial(compute_checksums, fs_access))
+
+    # If there are symlinks to intermediate output directories, we want to move
+    # the real files into the final output location.  If a file is linked more than once,
+    # make an internal relative symlink.
+    if action == "move":
+        relinked = {}  # type: Dict[Text, Text]
+        for root, dirs, files in os.walk(outdir):
+            for f in dirs+files:
+                path = os.path.join(root, f)
+                rp = os.path.realpath(path)
+                if path != rp:
+                    if rp in relinked:
+                        if onWindows():
+                            if os.path.isfile(path):
+                                shutil.copy(os.path.relpath(relinked[rp], path), path)
+                            elif os.path.exists(path) and os.path.isdir(path):
+                                shutil.rmtree(path)
+                                copytree_with_merge(os.path.relpath(relinked[rp], path), path)
+                        else:
+                            os.unlink(path)
+                            os.symlink(os.path.relpath(relinked[rp], path), path)
+                    else:
+                        for od in output_dirs:
+                            if rp.startswith(od+"/"):
+                                os.unlink(path)
+                                os.rename(rp, path)
+                                relinked[rp] = path
+                                break
 
     return outputObj
 
+
 def cleanIntermediate(output_dirs):  # type: (Set[Text]) -> None
     for a in output_dirs:
         if os.path.exists(a) and empty_subtree(a):
@@ -256,17 +350,17 @@ def formatSubclassOf(fmt, cls, ontology, visited):
 
     uriRefFmt = URIRef(fmt)
 
-    for s,p,o in ontology.triples( (uriRefFmt, RDFS.subClassOf, None) ):
+    for s, p, o in ontology.triples((uriRefFmt, RDFS.subClassOf, None)):
         # Find parent classes of `fmt` and search upward
         if formatSubclassOf(o, cls, ontology, visited):
             return True
 
-    for s,p,o in ontology.triples( (uriRefFmt, OWL.equivalentClass, None) ):
+    for s, p, o in ontology.triples((uriRefFmt, OWL.equivalentClass, None)):
         # Find equivalent classes of `fmt` and search horizontally
         if formatSubclassOf(o, cls, ontology, visited):
             return True
 
-    for s,p,o in ontology.triples( (None, OWL.equivalentClass, uriRefFmt) ):
+    for s, p, o in ontology.triples((None, OWL.equivalentClass, uriRefFmt)):
         # Find equivalent classes of `fmt` and search horizontally
         if formatSubclassOf(s, cls, ontology, visited):
             return True
@@ -277,23 +371,28 @@ def formatSubclassOf(fmt, cls, ontology, visited):
 def checkFormat(actualFile, inputFormats, ontology):
     # type: (Union[Dict[Text, Any], List, Text], Union[List[Text], Text], Graph) -> None
     for af in aslist(actualFile):
+        if not af:
+            continue
         if "format" not in af:
             raise validate.ValidationException(u"Missing required 'format' for File %s" % af)
         for inpf in aslist(inputFormats):
             if af["format"] == inpf or formatSubclassOf(af["format"], inpf, ontology, set()):
                 return
-        raise validate.ValidationException(u"Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
+        raise validate.ValidationException(
+            u"Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
+
 
 def fillInDefaults(inputs, job):
     # type: (List[Dict[Text, Text]], Dict[Text, Union[Dict[Text, Any], List, Text]]) -> None
     for e, inp in enumerate(inputs):
         with SourceLine(inputs, e, WorkflowException):
-            if shortname(inp[u"id"]) in job:
-                pass
-            elif shortname(inp[u"id"]) not in job and u"default" in inp:
-                job[shortname(inp[u"id"])] = copy.copy(inp[u"default"])
-            elif shortname(inp[u"id"]) not in job and aslist(inp[u"type"])[0] == u"null":
+            fieldname = shortname(inp[u"id"])
+            if job.get(fieldname) is not None:
                 pass
+            elif job.get(fieldname) is None and u"default" in inp:
+                job[fieldname] = copy.copy(inp[u"default"])
+            elif job.get(fieldname) is None and u"null" in aslist(inp[u"type"]):
+                job[fieldname] = None
             else:
                 raise WorkflowException("Missing required input parameter `%s`" % shortname(inp["id"]))
 
@@ -309,16 +408,14 @@ def avroize_type(field_type, name_prefix=""):
     elif isinstance(field_type, dict):
         if field_type["type"] in ("enum", "record"):
             if "name" not in field_type:
-                field_type["name"] = name_prefix+Text(uuid.uuid4())
+                field_type["name"] = name_prefix + Text(uuid.uuid4())
         if field_type["type"] == "record":
             avroize_type(field_type["fields"], name_prefix)
         if field_type["type"] == "array":
             avroize_type(field_type["items"], name_prefix)
     return field_type
 
-class Process(object):
-    __metaclass__ = abc.ABCMeta
-
+class Process(six.with_metaclass(abc.ABCMeta, object)):
     def __init__(self, toolpath_object, **kwargs):
         # type: (Dict[Text, Any], **Any) -> None
         """
@@ -339,9 +436,9 @@ class Process(object):
         if SCHEMA_FILE is None:
             get_schema("v1.0")
             SCHEMA_ANY = cast(Dict[Text, Any],
-                    SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/salad#Any"])
+                              SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/salad#Any"])
             SCHEMA_FILE = cast(Dict[Text, Any],
-                    SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#File"])
+                               SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#File"])
             SCHEMA_DIR = cast(Dict[Text, Any],
                               SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#Directory"])
 
@@ -363,7 +460,7 @@ class Process(object):
 
         checkRequirements(self.tool, supportedProcessRequirements)
         self.validate_hints(kwargs["avsc_names"], self.tool.get("hints", []),
-                strict=kwargs.get("strict"))
+                            strict=kwargs.get("strict"))
 
         self.schemaDefs = {}  # type: Dict[Text,Dict[Text, Any]]
 
@@ -373,8 +470,8 @@ class Process(object):
             sdtypes = sd["types"]
             av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in avroize_type(sdtypes)}, set())
             for i in av:
-                self.schemaDefs[i["name"]] = i
-            avro.schema.make_avsc_object(av, self.names)
+                self.schemaDefs[i["name"]] = i  # type: ignore
+            AvroSchemaFromJSONData(av, self.names)  # type: ignore
 
         # Build record schema from inputs
         self.inputs_record_schema = {
@@ -404,17 +501,20 @@ class Process(object):
                     self.outputs_record_schema["fields"].append(c)
 
         try:
-            self.inputs_record_schema = schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set())
-            avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
+            self.inputs_record_schema = cast(Dict[six.text_type, Any], schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set()))
+            AvroSchemaFromJSONData(self.inputs_record_schema, self.names)
         except avro.schema.SchemaParseException as e:
-            raise validate.ValidationException(u"Got error `%s` while processing inputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.inputs_record_schema, indent=4)))
+            raise validate.ValidationException(u"Got error `%s` while processing inputs of %s:\n%s" %
+                                               (Text(e), self.tool["id"],
+                                                json.dumps(self.inputs_record_schema, indent=4)))
 
         try:
-            self.outputs_record_schema = schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set())
-            avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
+            self.outputs_record_schema = cast(Dict[six.text_type, Any], schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set()))
+            AvroSchemaFromJSONData(self.outputs_record_schema, self.names)
         except avro.schema.SchemaParseException as e:
-            raise validate.ValidationException(u"Got error `%s` while processing outputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.outputs_record_schema, indent=4)))
-
+            raise validate.ValidationException(u"Got error `%s` while processing outputs of %s:\n%s" %
+                                               (Text(e), self.tool["id"],
+                                                json.dumps(self.outputs_record_schema, indent=4)))
 
     def _init_job(self, joborder, **kwargs):
         # type: (Dict[Text, Text], **Any) -> Builder
@@ -436,13 +536,14 @@ class Process(object):
 
         builder = Builder()
         builder.job = cast(Dict[Text, Union[Dict[Text, Any], List,
-            Text]], copy.deepcopy(joborder))
+                                            Text]], copy.deepcopy(joborder))
 
         # Validate job order
         try:
             fillInDefaults(self.tool[u"inputs"], builder.job)
             normalizeFilesDirs(builder.job)
-            validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job)
+            validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job,
+                                 strict=False, logger=_logger_validation_warnings)
         except (validate.ValidationException, WorkflowException) as e:
             raise WorkflowException("Invalid job input record:\n" + Text(e))
 
@@ -455,19 +556,34 @@ class Process(object):
         builder.resources = {}
         builder.timeout = kwargs.get("eval_timeout")
         builder.debug = kwargs.get("debug")
-
-        dockerReq, is_req = self.get_requirement("DockerRequirement")
-
-        if dockerReq and is_req and not kwargs.get("use_container"):
-            raise WorkflowException("Document has DockerRequirement under 'requirements' but use_container is false.  DockerRequirement must be under 'hints' or use_container must be true.")
+        builder.mutation_manager = kwargs.get("mutation_manager")
 
         builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
         builder.fs_access = builder.make_fs_access(kwargs["basedir"])
 
-        if dockerReq and kwargs.get("use_container"):
-            builder.outdir = builder.fs_access.realpath(dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl")
-            builder.tmpdir = builder.fs_access.realpath(kwargs.get("docker_tmpdir") or "/tmp")
-            builder.stagedir = builder.fs_access.realpath(kwargs.get("docker_stagedir") or "/var/lib/cwl")
+        loadListingReq, _ = self.get_requirement("http://commonwl.org/cwltool#LoadListingRequirement")
+        if loadListingReq:
+            builder.loadListing = loadListingReq.get("loadListing")
+
+        dockerReq, is_req = self.get_requirement("DockerRequirement")
+        defaultDocker = None
+
+        if dockerReq is None and "default_container" in kwargs:
+            defaultDocker = kwargs["default_container"]
+
+        if (dockerReq or defaultDocker) and kwargs.get("use_container"):
+            if dockerReq:
+                # Check if docker output directory is absolute
+                if dockerReq.get("dockerOutputDirectory") and dockerReq.get("dockerOutputDirectory").startswith('/'):
+                    builder.outdir = dockerReq.get("dockerOutputDirectory")
+                else:
+                    builder.outdir = builder.fs_access.docker_compatible_realpath(
+                        dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl")
+            elif defaultDocker:
+                builder.outdir = builder.fs_access.docker_compatible_realpath(
+                    kwargs.get("docker_outdir") or "/var/spool/cwl")
+            builder.tmpdir = builder.fs_access.docker_compatible_realpath(kwargs.get("docker_tmpdir") or "/tmp")
+            builder.stagedir = builder.fs_access.docker_compatible_realpath(kwargs.get("docker_stagedir") or "/var/lib/cwl")
         else:
             builder.outdir = builder.fs_access.realpath(kwargs.get("outdir") or tempfile.mkdtemp())
             builder.tmpdir = builder.fs_access.realpath(kwargs.get("tmpdir") or tempfile.mkdtemp())
@@ -517,14 +633,20 @@ class Process(object):
                     cm.lc.filename = fn
                     builder.bindings.append(cm)
 
-        builder.bindings.sort(key=lambda a: a["position"])
-
+        # use python2 like sorting of heterogeneous lists
+        # (containing str and int types),
+        # TODO: unify for both runtime
+        if six.PY3:
+            key = cmp_to_key(cmp_like_py2)
+        else:  # PY2
+            key = lambda dict: dict["position"]
+        builder.bindings.sort(key=key)
         builder.resources = self.evalResources(builder, kwargs)
-
+        builder.job_script_provider = kwargs.get("job_script_provider", None)
         return builder
 
     def evalResources(self, builder, kwargs):
-        # type: (Builder, Dict[AnyStr, Any]) -> Dict[Text, Union[int, Text]]
+        # type: (Builder, Dict[str, Any]) -> Dict[Text, Union[int, Text]]
         resourceReq, _ = self.get_requirement("ResourceRequirement")
         if resourceReq is None:
             resourceReq = {}
@@ -541,18 +663,18 @@ class Process(object):
         for a in ("cores", "ram", "tmpdir", "outdir"):
             mn = None
             mx = None
-            if resourceReq.get(a+"Min"):
-                mn = builder.do_eval(resourceReq[a+"Min"])
-            if resourceReq.get(a+"Max"):
-                mx = builder.do_eval(resourceReq[a+"Max"])
+            if resourceReq.get(a + "Min"):
+                mn = builder.do_eval(resourceReq[a + "Min"])
+            if resourceReq.get(a + "Max"):
+                mx = builder.do_eval(resourceReq[a + "Max"])
             if mn is None:
                 mn = mx
             elif mx is None:
                 mx = mn
 
             if mn:
-                request[a+"Min"] = mn
-                request[a+"Max"] = mx
+                request[a + "Min"] = mn
+                request[a + "Max"] = mx
 
         if kwargs.get("select_resources"):
             return kwargs["select_resources"](request)
@@ -570,8 +692,8 @@ class Process(object):
             sl = SourceLine(hints, i, validate.ValidationException)
             with sl:
                 if avsc_names.get_name(r["class"], "") is not None:
-                    plain_hint = dict((key,r[key]) for key in r if key not in
-                            self.doc_loader.identifiers)  # strip identifiers
+                    plain_hint = dict((key, r[key]) for key in r if key not in
+                                      self.doc_loader.identifiers)  # strip identifiers
                     validate.validate_ex(
                         avsc_names.get_name(plain_hint["class"], ""),
                         plain_hint, strict=strict)
@@ -585,10 +707,15 @@ class Process(object):
         op(self.tool)
 
     @abc.abstractmethod
-    def job(self, job_order, output_callbacks, **kwargs):
-        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator[Any, None, None]
+    def job(self,
+            job_order,  # type: Dict[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            **kwargs  # type: Any
+            ):
+        # type: (...) -> Generator[Any, None, None]
         return None
 
+
 def empty_subtree(dirpath):  # type: (Text) -> bool
     # Test if a directory tree contains any files (does not count empty
     # subdirectories)
@@ -610,6 +737,7 @@ def empty_subtree(dirpath):  # type: (Text) -> bool
 
 _names = set()  # type: Set[Text]
 
+
 def uniquename(stem, names=None):  # type: (Text, Set[Text]) -> Text
     global _names
     if names is None:
@@ -622,6 +750,7 @@ def uniquename(stem, names=None):  # type: (Text, Set[Text]) -> Text
     names.add(u)
     return u
 
+
 def nestdir(base, deps):
     # type: (Text, Dict[Text, Any]) -> Dict[Text, Any]
     dirname = os.path.dirname(base) + "/"
@@ -639,6 +768,7 @@ def nestdir(base, deps):
             }
     return deps
 
+
 def mergedirs(listing):
     # type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
     r = []  # type: List[Dict[Text, Any]]
@@ -646,22 +776,23 @@ def mergedirs(listing):
     for e in listing:
         if e["basename"] not in ents:
             ents[e["basename"]] = e
-        elif e["class"] == "Directory":
-            ents[e["basename"]]["listing"].extend(e["listing"])
-    for e in ents.itervalues():
+        elif e["class"] == "Directory" and e.get("listing"):
+            ents[e["basename"]].setdefault("listing", []).extend(e["listing"])
+    for e in six.itervalues(ents):
         if e["class"] == "Directory" and "listing" in e:
             e["listing"] = mergedirs(e["listing"])
-    r.extend(ents.itervalues())
+    r.extend(six.itervalues(ents))
     return r
 
-def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urlparse.urljoin):
+
+def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urllib.parse.urljoin):
     # type: (Text, Any, Set[Text], Set[Text], Callable[[Text, Text], Any], Callable[[Text, Text], Text]) -> List[Dict[Text, Text]]
     r = []  # type: List[Dict[Text, Text]]
     deps = None  # type: Dict[Text, Any]
     if isinstance(doc, dict):
         if "id" in doc:
             if doc["id"].startswith("file://"):
-                df, _ = urlparse.urldefrag(doc["id"])
+                df, _ = urllib.parse.urldefrag(doc["id"])
                 if base != df:
                     r.append({
                         "class": "File",
@@ -688,7 +819,7 @@ def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urlparse.urljoin)
                 elif doc["class"] == "File" and "secondaryFiles" in doc:
                     r.extend(scandeps(base, doc["secondaryFiles"], reffields, urlfields, loadref, urljoin=urljoin))
 
-        for k, v in doc.iteritems():
+        for k, v in six.iteritems(doc):
             if k in reffields:
                 for u in aslist(v):
                     if isinstance(u, dict):
@@ -725,14 +856,15 @@ def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urlparse.urljoin)
 
     return r
 
+
 def compute_checksums(fs_access, fileobj):
     if "checksum" not in fileobj:
         checksum = hashlib.sha1()
         with fs_access.open(fileobj["location"], "rb") as f:
-            contents = f.read(1024*1024)
-            while contents != "":
+            contents = f.read(1024 * 1024)
+            while contents != b"":
                 checksum.update(contents)
-                contents = f.read(1024*1024)
+                contents = f.read(1024 * 1024)
             f.seek(0, 2)
             filesize = f.tell()
         fileobj["checksum"] = "sha1$%s" % checksum.hexdigest()
diff --git a/cwltool/resolver.py b/cwltool/resolver.py
index cf753f3..56e16e3 100644
--- a/cwltool/resolver.py
+++ b/cwltool/resolver.py
@@ -1,16 +1,18 @@
-import os
+from __future__ import absolute_import
 import logging
-import urllib
-import urlparse
+import os
+
+from six.moves import urllib
 
 from schema_salad.ref_resolver import file_uri
 
 _logger = logging.getLogger("cwltool")
 
+
 def resolve_local(document_loader, uri):
     if uri.startswith("/"):
         return None
-    shares = [os.environ.get("XDG_DATA_HOME", os.path.join(os.environ["HOME"], ".local", "share"))]
+    shares = [os.environ.get("XDG_DATA_HOME", os.path.join(os.path.expanduser('~'), ".local", "share"))]
     shares.extend(os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":"))
     shares = [os.path.join(s, "commonwl", uri) for s in shares]
     shares.insert(0, os.path.join(os.getcwd(), uri))
@@ -24,9 +26,27 @@ def resolve_local(document_loader, uri):
             return file_uri(s)
     return None
 
+
 def tool_resolver(document_loader, uri):
-    for r in [resolve_local]:
+    for r in [resolve_local, resolve_ga4gh_tool]:
         ret = r(document_loader, uri)
         if ret is not None:
             return ret
-    return file_uri(os.path.abspath(uri))
+    return file_uri(os.path.abspath(uri), split_frag=True)
+
+
+ga4gh_tool_registries = ["https://dockstore.org:8443"]
+
+def resolve_ga4gh_tool(document_loader, uri):
+    path, version = uri.partition(":")[::2]
+    if not version:
+        version = "latest"
+    for reg in ga4gh_tool_registries:
+        ds = "{0}/api/ga4gh/v1/tools/{1}/versions/{2}/plain-CWL/descriptor".format(reg, urllib.parse.quote(path, ""), urllib.parse.quote(version, ""))
+        try:
+            resp = document_loader.session.head(ds)
+            resp.raise_for_status()
+            return ds
+        except Exception:
+            pass
+    return None
diff --git a/cwltool/sandboxjs.py b/cwltool/sandboxjs.py
index 70a9f19..a4c92be 100644
--- a/cwltool/sandboxjs.py
+++ b/cwltool/sandboxjs.py
@@ -1,27 +1,57 @@
-import subprocess
-import json
-import threading
+from __future__ import absolute_import
 import errno
+import json
 import logging
-import select
 import os
-
-import cStringIO
-from cStringIO import StringIO
-from typing import Any, Dict, List, Mapping, Text, TypeVar, Union
+import select
+import subprocess
+import threading
+import sys
+from io import BytesIO
+from typing import Any, Dict, List, Mapping, Text, Tuple, Union
+from .utils import onWindows
 from pkg_resources import resource_stream
 
+import six
+
+try:
+    import queue  # type: ignore
+except ImportError:
+    import Queue as queue  # type: ignore
+
 class JavascriptException(Exception):
     pass
 
 
 _logger = logging.getLogger("cwltool")
 
-JSON = Union[Dict[Text,Any], List[Any], Text, int, long, float, bool, None]
+JSON = Union[Dict[Text, Any], List[Any], Text, int, float, bool, None]
 
 localdata = threading.local()
 
 have_node_slim = False
+# minimum acceptable version of nodejs engine
+minimum_node_version_str = '0.10.26'
+
+def check_js_threshold_version(working_alias):
+    # type: (str) -> bool
+
+    """Checks if the nodeJS engine version on the system
+    with the allowed minimum version.
+    https://github.com/nodejs/node/blob/master/CHANGELOG.md#nodejs-changelog
+    """
+    # parse nodejs version into int Tuple: 'v4.2.6\n' -> [4, 2, 6]
+    current_version_str = subprocess.check_output(
+        [working_alias, "-v"]).decode('utf-8')
+
+    current_version = [int(v) for v in current_version_str.strip().strip('v').split('.')]
+    minimum_node_version = [int(v) for v in minimum_node_version_str.split('.')]
+
+    if current_version >= minimum_node_version:
+        return True
+    else:
+        return False
+
 
 def new_js_proc():
     # type: () -> subprocess.Popen
@@ -29,26 +59,39 @@ def new_js_proc():
     res = resource_stream(__name__, 'cwlNodeEngine.js')
     nodecode = res.read()
 
+    required_node_version, docker = (False,)*2
     nodejs = None
     trynodes = ("nodejs", "node")
     for n in trynodes:
         try:
-            nodejs = subprocess.Popen([n, "--eval", nodecode], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-            break
+            if subprocess.check_output([n, "--eval", "process.stdout.write('t')"]).decode('utf-8') != "t":
+                continue
+            else:
+                nodejs = subprocess.Popen([n, "--eval", nodecode],
+                                          stdin=subprocess.PIPE,
+                                          stdout=subprocess.PIPE,
+                                          stderr=subprocess.PIPE)
+
+                required_node_version = check_js_threshold_version(n)
+                break
+        except subprocess.CalledProcessError:
+            pass
         except OSError as e:
             if e.errno == errno.ENOENT:
                 pass
             else:
                 raise
 
-    if nodejs is None:
+    if nodejs is None or nodejs is not None and required_node_version is False:
         try:
             nodeimg = "node:slim"
             global have_node_slim
             if not have_node_slim:
-                dockerimgs = subprocess.check_output(["docker", "images", nodeimg])
+                dockerimgs = subprocess.check_output(["docker", "images", "-q", nodeimg]).decode('utf-8')
+                # if output is an empty string
                 if len(dockerimgs.split("\n")) <= 1:
-                    nodejsimg = subprocess.check_output(["docker", "pull", nodeimg])
+                    # pull node:slim docker container
+                    nodejsimg = subprocess.check_output(["docker", "pull", nodeimg]).decode('utf-8')
                     _logger.info("Pulled Docker image %s %s", nodeimg, nodejsimg)
                 have_node_slim = True
             nodejs = subprocess.Popen(["docker", "run",
@@ -56,6 +99,7 @@ def new_js_proc():
                                        "--sig-proxy=true", "--interactive",
                                        "--rm", nodeimg, "node", "--eval", nodecode],
                                       stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            docker = True
         except OSError as e:
             if e.errno == errno.ENOENT:
                 pass
@@ -64,27 +108,36 @@ def new_js_proc():
         except subprocess.CalledProcessError:
             pass
 
+    # docker failed and nodejs not on system
     if nodejs is None:
         raise JavascriptException(
             u"cwltool requires Node.js engine to evaluate Javascript "
             "expressions, but couldn't find it.  Tried %s, docker run "
             "node:slim" % u", ".join(trynodes))
 
+    # docker failed, but nodejs is installed on system but the version is below the required version
+    if docker is False and required_node_version is False:
+        raise JavascriptException(
+            u'cwltool requires minimum v{} version of Node.js engine.'.format(minimum_node_version_str),
+            u'Try updating: https://docs.npmjs.com/getting-started/installing-node')
+
     return nodejs
 
 
 def execjs(js, jslib, timeout=None, debug=False):  # type: (Union[Mapping, Text], Any, int, bool) -> JSON
 
-    if not hasattr(localdata, "proc") or localdata.proc.poll() is not None:
+    if not hasattr(localdata, "proc") or localdata.proc.poll() is not None or onWindows():
         localdata.proc = new_js_proc()
 
     nodejs = localdata.proc
 
-    fn = u"\"use strict\";\n%s\n(function()%s)()" % (jslib, js if isinstance(js, basestring) and len(js) > 1 and js[0] == '{' else ("{return (%s);}" % js))
+    fn = u"\"use strict\";\n%s\n(function()%s)()" %\
+         (jslib, js if isinstance(js, six.string_types) and len(js) > 1 and js[0] == '{' else ("{return (%s);}" % js))
 
     killed = []
 
-    def term():
+    """ Kill the node process if it exceeds timeout limit"""
+    def terminate():
         try:
             killed.append(True)
             nodejs.kill()
@@ -94,33 +147,126 @@ def execjs(js, jslib, timeout=None, debug=False):  # type: (Union[Mapping, Text]
     if timeout is None:
         timeout = 20
 
-    tm = threading.Timer(timeout, term)
+    tm = threading.Timer(timeout, terminate)
     tm.start()
 
-    stdin_buf = StringIO(json.dumps(fn)+"\n")
-    stdout_buf = StringIO()
-    stderr_buf = StringIO()
-
-    completed = []  # type: List[Union[cStringIO.InputType, cStringIO.OutputType]]
-    while len(completed) < 3:
-        rready, wready, _ = select.select([nodejs.stdout, nodejs.stderr], [nodejs.stdin], [])
-        if nodejs.stdin in wready:
-            b = stdin_buf.read(select.PIPE_BUF)
-            if b:
-                os.write(nodejs.stdin.fileno(), b)
-            elif stdin_buf not in completed:
-                completed.append(stdin_buf)
-        for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
-            if pipes[0] in rready:
-                b = os.read(pipes[0].fileno(), select.PIPE_BUF)
+    stdin_buf = BytesIO((json.dumps(fn) + "\n").encode('utf-8'))
+    stdout_buf = BytesIO()
+    stderr_buf = BytesIO()
+
+    rselect = [nodejs.stdout, nodejs.stderr]  # type: List[BytesIO]
+    wselect = [nodejs.stdin]  # type: List[BytesIO]
+
+    # On windows system standard input/output are not handled properly by select module
+    # (modules like  pywin32, msvcrt, gevent don't work either)
+    if sys.platform=='win32':
+        READ_BYTES_SIZE = 512
+
+        # creating queue for reading from a thread to queue
+        input_queue = queue.Queue()
+        output_queue = queue.Queue()
+        error_queue = queue.Queue()
+
+        # To tell threads that output has ended and threads can safely exit
+        no_more_output = threading.Lock()
+        no_more_output.acquire()
+        no_more_error = threading.Lock()
+        no_more_error.acquire()
+
+        # put constructed command to input queue which then will be passed to nodejs's stdin
+        def put_input(input_queue):
+            while True:
+                b = stdin_buf.read(READ_BYTES_SIZE)
+                if b:
+                    input_queue.put(b)
+                else:
+                    break
+
+        # get the output from nodejs's stdout and continue till otuput ends
+        def get_output(output_queue):
+            while not no_more_output.acquire(False):
+                b=os.read(nodejs.stdout.fileno(), READ_BYTES_SIZE)
+                if b:
+                    output_queue.put(b)
+
+        # get the output from nodejs's stderr and continue till error output ends
+        def get_error(error_queue):
+            while not no_more_error.acquire(False):
+                b = os.read(nodejs.stderr.fileno(), READ_BYTES_SIZE)
                 if b:
-                    pipes[1].write(b)
-                elif pipes[1] not in completed:
-                    completed.append(pipes[1])
-        if stdout_buf.getvalue().endswith("\n"):
-            for buf in (stdout_buf, stderr_buf):
-                if buf not in completed:
-                    completed.append(buf)
+                    error_queue.put(b)
+
+        # Threads managing nodejs.stdin, nodejs.stdout and nodejs.stderr respectively
+        input_thread = threading.Thread(target=put_input, args=(input_queue,))
+        input_thread.daemon=True
+        input_thread.start()
+        output_thread = threading.Thread(target=get_output, args=(output_queue,))
+        output_thread.daemon=True
+        output_thread.start()
+        error_thread = threading.Thread(target=get_error, args=(error_queue,))
+        error_thread.daemon=True
+        error_thread.start()
+
+        # mark if output/error is ready
+        output_ready=False
+        error_ready=False
+
+        while (len(wselect) + len(rselect)) > 0:
+            try:
+                if nodejs.stdin in wselect:
+                    if not input_queue.empty():
+                        os.write(nodejs.stdin.fileno(), input_queue.get())
+                    elif not input_thread.is_alive():
+                        wselect = []
+                if nodejs.stdout in rselect:
+                    if not output_queue.empty():
+                        output_ready = True
+                        stdout_buf.write(output_queue.get())
+                    elif output_ready:
+                        rselect = []
+                        no_more_output.release()
+                        no_more_error.release()
+                        output_thread.join()
+
+                if nodejs.stderr in rselect:
+                    if not error_queue.empty():
+                        error_ready = True
+                        stderr_buf.write(error_queue.get())
+                    elif error_ready:
+                        rselect = []
+                        no_more_output.release()
+                        no_more_error.release()
+                        output_thread.join()
+                        error_thread.join()
+                if stdout_buf.getvalue().endswith("\n"):
+                    rselect = []
+                    no_more_output.release()
+                    no_more_error.release()
+                    output_thread.join()
+            except OSError as e:
+                break
+
+    else:
+        while (len(wselect) + len(rselect)) > 0:
+            rready, wready, _ = select.select(rselect, wselect, [])
+            try:
+                if nodejs.stdin in wready:
+                    b = stdin_buf.read(select.PIPE_BUF)
+                    if b:
+                        os.write(nodejs.stdin.fileno(), b)
+                    else:
+                        wselect = []
+                for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
+                    if pipes[0] in rready:
+                        b = os.read(pipes[0].fileno(), select.PIPE_BUF)
+                        if b:
+                            pipes[1].write(b)
+                        else:
+                            rselect.remove(pipes[0])
+                if stdout_buf.getvalue().endswith("\n".encode()):
+                    rselect = []
+            except OSError as e:
+                break
     tm.cancel()
 
     stdin_buf.close()
@@ -132,19 +278,22 @@ def execjs(js, jslib, timeout=None, debug=False):  # type: (Union[Mapping, Text]
         ofs = 0
         maxlines = 99
         if len(lines) > maxlines:
-            ofs = len(lines)-maxlines
+            ofs = len(lines) - maxlines
             lines = lines[-maxlines:]
-        return u"\n".join(u"%02i %s" % (i+ofs+1, b) for i, b in enumerate(lines))
+        return u"\n".join(u"%02i %s" % (i + ofs + 1, b) for i, b in enumerate(lines))
 
-    def stdfmt(data):  # type: (unicode) -> unicode
+    def stdfmt(data):  # type: (Text) -> Text
         if "\n" in data:
             return "\n" + data.strip()
         return data
 
+    nodejs.poll()
+
     if debug:
-        info = u"returncode was: %s\nscript was:\n%s\nstdout was: %s\nstderr was: %s\n" % (nodejs.returncode, fn_linenum(), stdfmt(stdoutdata), stdfmt(stderrdata))
+        info = u"returncode was: %s\nscript was:\n%s\nstdout was: %s\nstderr was: %s\n" %\
+               (nodejs.returncode, fn_linenum(), stdfmt(stdoutdata.decode('utf-8')), stdfmt(stderrdata.decode('utf-8')))
     else:
-        info = stdfmt(stderrdata)
+        info = stdfmt(stderrdata.decode('utf-8'))
 
     if nodejs.poll() not in (None, 0):
         if killed:
@@ -153,6 +302,10 @@ def execjs(js, jslib, timeout=None, debug=False):  # type: (Union[Mapping, Text]
             raise JavascriptException(info)
     else:
         try:
-            return json.loads(stdoutdata)
+            # On windows currently a new instance of nodejs process is used due to problem with blocking on read operation on windows
+            if onWindows():
+                nodejs.kill()
+            return json.loads(stdoutdata.decode('utf-8'))
         except ValueError as e:
-            raise JavascriptException(u"%s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" % (e, fn_linenum(), stdoutdata, stderrdata))
+            raise JavascriptException(u"%s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" %
+                                      (e, fn_linenum(), stdoutdata, stderrdata))
diff --git a/cwltool/schemas/draft-3/Workflow.yml b/cwltool/schemas/draft-3/Workflow.yml
index 4c36dfa..bdd8559 100644
--- a/cwltool/schemas/draft-3/Workflow.yml
+++ b/cwltool/schemas/draft-3/Workflow.yml
@@ -2,6 +2,7 @@ $base: "https://w3id.org/cwl/cwl#"
 
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
diff --git a/cwltool/schemas/v1.0/CommandLineTool.yml b/cwltool/schemas/v1.0/CommandLineTool.yml
index 181c51c..6696ae0 100644
--- a/cwltool/schemas/v1.0/CommandLineTool.yml
+++ b/cwltool/schemas/v1.0/CommandLineTool.yml
@@ -9,7 +9,7 @@ $graph:
   type: documentation
   doc:
     - |
-      # Common Workflow Language (CWL) Command Line Tool Description, v1.0
+      # Common Workflow Language (CWL) Command Line Tool Description, v1.0.1
 
       This version:
         * https://w3id.org/cwl/v1.0/
@@ -36,10 +36,30 @@ $graph:
     - {$include: intro.md}
 
     - |
-      ## Introduction to v1.0
+      ## Introduction to CWL Command Line Tool standard v1.0.1
 
-      This specification represents the first full release from the CWL group.
-      Since draft-3, version 1.0 introduces the following changes and additions:
+      This specification represents the second stable release from the CWL
+      group.  Since v1.0, v1.0.1 introduces the following updates to the CWL
+      Command Line Tool standard.  Documents should continue to use `cwlVersion: v1.0`
+      and existing v1.0 documents remain valid, however CWL documents that
+      relied on previously undefined or underspecified behavior may have
+      slightly different behavior in v1.0.1.
+
+        * 13 July 2016: Mark `baseCommand` as optional and update descriptive text.
+        * 12 March 2017:
+          * Mark `default` as not required for link checking.
+          * Add note that files in InitialWorkDir must have path in output directory.
+          * Add note that writable: true applies recursively.
+        * 23 July 2017: (v1.0.1)
+          * Add clarification about scattering over empty arrays.
+          * Clarify interpretation of `secondaryFiles` on inputs.
+          * Expanded discussion of semantics of `File` and `Directory` types
+          * Fixed typo "EMACScript" to "ECMAScript"
+          * Clarified application of input parameter default values when the input is `null` or undefined.
+          * Clarified valid types and meaning of the format field on inputs versus outputs
+
+      Since draft-3, v1.0 introduces the following changes and additions
+      to the CWL Command Line Tool standard:
 
         * The [Directory](#Directory) type.
         * Syntax simplifcations: denoted by the `map<>` syntax. Example: inputs
@@ -74,11 +94,6 @@ $graph:
           dependencies of a tool.
         * The common `description` field has been renamed to `doc`.
 
-      ## Errata
-
-      Post v1.0 release changes to the spec.
-
-        * 13 July 2016: Mark `baseCommand` as optional and update descriptive text.
 
       ## Purpose
 
@@ -196,11 +211,14 @@ $graph:
         If `valueFrom` is an expression, evaluate the expression to yield the
         actual value to use to build the command line and apply the binding
         rules above.  If the inputBinding is associated with an input
-        parameter, the value of `self` in the expression will be the value of the
-        input parameter.
+        parameter, the value of `self` in the expression will be the value of
+        the input parameter.  Input parameter defaults (as specified by the
+        `InputParameter.default` field) must be applied before evaluating the
+        expression.
 
         When a binding is part of the `CommandLineTool.arguments` field,
         the `valueFrom` field is required.
+
     - name: shellQuote
       type: boolean?
       doc: |
@@ -757,6 +775,9 @@ $graph:
         the original file or directory.  Default false (files and directories
         read-only by default).
 
+        A directory marked as `writable: true` implies that all files and
+        subdirectories are recursively writable as well.
+
 
 - name: InitialWorkDirRequirement
   type: record
@@ -787,6 +808,13 @@ $graph:
         May be an expression.  If so, the expression return value must validate
         as `{type: array, items: [File, Directory]}`.
 
+        Files or Directories which are listed in the input parameters and
+        appear in the `InitialWorkDirRequirement` listing must have their
+        `path` set to their staged location in the designated output directory.
+        If the same File or Directory appears more than once in the
+        `InitialWorkDirRequirement` listing, the implementation must choose
+        exactly one value for `path`; how this value is chosen is undefined.
+
 
 - name: EnvVarRequirement
   type: record
diff --git a/cwltool/schemas/v1.0/Process.yml b/cwltool/schemas/v1.0/Process.yml
index 37b2038..55ad70a 100644
--- a/cwltool/schemas/v1.0/Process.yml
+++ b/cwltool/schemas/v1.0/Process.yml
@@ -3,6 +3,7 @@ $base: "https://w3id.org/cwl/cwl#"
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
   sld: "https://w3id.org/cwl/salad#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
@@ -52,9 +53,73 @@ $graph:
   type: record
   docParent: "#CWLType"
   doc: |
-    Represents a file (or group of files if `secondaryFiles` is specified) that
-    must be accessible by tools using standard POSIX file system call API such as
+    Represents a file (or group of files when `secondaryFiles` is provided) that
+    will be accessible by tools using standard POSIX file system call API such as
     open(2) and read(2).
+
+    Files are represented as objects with `class` of `File`.  File objects have
+    a number of properties that provide metadata about the file.
+
+    The `location` property of a File is a URI that uniquely identifies the
+    file.  Implementations must support the file:// URI scheme and may support
+    other schemes such as http://.  The value of `location` may also be a
+    relative reference, in which case it must be resolved relative to the URI
+    of the document it appears in.  Alternately to `location`, implementations
+    must also accept the `path` property on File, which must be a filesystem
+    path available on the same host as the CWL runner (for inputs) or the
+    runtime environment of a command line tool execution (for command line tool
+    outputs).
+
+    If no `location` or `path` is specified, a file object must specify
+    `contents` with the UTF-8 text content of the file.  This is a "file
+    literal".  File literals do not correspond to external resources, but are
+    created on disk with `contents` with when needed for a executing a tool.
+    Where appropriate, expressions can return file literals to define new files
+    on a runtime.  The maximum size of `contents` is 64 kilobytes.
+
+    The `basename` property defines the filename on disk where the file is
+    staged.  This may differ from the resource name.  If not provided,
+    `basename` must be computed from the last path part of `location` and made
+    available to expressions.
+
+    The `secondaryFiles` property is a list of File or Directory objects that
+    must be staged in the same directory as the primary file.  It is an error
+    for file names to be duplicated in `secondaryFiles`.
+
+    The `size` property is the size in bytes of the File.  It must be computed
+    from the resource and made available to expressions.  The `checksum` field
+    contains a cryptographic hash of the file content for use it verifying file
+    contents.  Implementations may, at user option, enable or disable
+    computation of the `checksum` field for performance or other reasons.
+    However, the ability to compute output checksums is required to pass the
+    CWL conformance test suite.
+
+    When executing a CommandLineTool, the files and secondary files may be
+    staged to an arbitrary directory, but must use the value of `basename` for
+    the filename.  The `path` property must be file path in the context of the
+    tool execution runtime (local to the compute node, or within the executing
+    container).  All computed properties should be available to expressions.
+    File literals also must be staged and `path` must be set.
+
+    When collecting CommandLineTool outputs, `glob` matching returns file paths
+    (with the `path` property) and the derived properties. This can all be
+    modified by `outputEval`.  Alternately, if the file `cwl.outputs.json` is
+    present in the output, `outputBinding` is ignored.
+
+    File objects in the output must provide either a `location` URI or a `path`
+    property in the context of the tool execution runtime (local to the compute
+    node, or within the executing container).
+
+    When evaluating an ExpressionTool, file objects must be referenced via
+    `location` (the expression tool does not have access to files on disk so
+    `path` is meaningless) or as file literals.  It is legal to return a file
+    object with an existing `location` but a different `basename`.  The
+    `loadContents` field of ExpressionTool inputs behaves the same as on
+    CommandLineTool inputs, however it is not meaningful on the outputs.
+
+    An ExpressionTool may forward file references from input to output by using
+    the same value for `location`.
+
   fields:
     - name: class
       type:
@@ -220,6 +285,49 @@ $graph:
   docAfter: "#File"
   doc: |
     Represents a directory to present to a command line tool.
+
+    Directories are represented as objects with `class` of `Directory`.  Directory objects have
+    a number of properties that provide metadata about the directory.
+
+    The `location` property of a Directory is a URI that uniquely identifies
+    the directory.  Implementations must support the file:// URI scheme and may
+    support other schemes such as http://.  Alternately to `location`,
+    implementations must also accept the `path` property on Direcotry, which
+    must be a filesystem path available on the same host as the CWL runner (for
+    inputs) or the runtime environment of a command line tool execution (for
+    command line tool outputs).
+
+    A Directory object may have a `listing` field.  This is a list of File and
+    Directory objects that are contained in the Directory.  For each entry in
+    `listing`, the `basename` property defines the name of the File or
+    Subdirectory when staged to disk.  If `listing` is not provided, the
+    implementation must have some way of fetching the Directory listing at
+    runtime based on the `location` field.
+
+    If a Directory does not have `location`, it is a Directory literal.  A
+    Directory literal must provide `listing`.  Directory literals must be
+    created on disk at runtime as needed.
+
+    The resources in a Directory literal do not need to have any implied
+    relationship in their `location`.  For example, a Directory listing may
+    contain two files located on different hosts.  It is the responsibility of
+    the runtime to ensure that those files are staged to disk appropriately.
+    Secondary files associated with files in `listing` must also be staged to
+    the same Directory.
+
+    When executing a CommandLineTool, Directories must be recursively staged
+    first and have local values of `path` assigend.
+
+    Directory objects in CommandLineTool output must provide either a
+    `location` URI or a `path` property in the context of the tool execution
+    runtime (local to the compute node, or within the executing container).
+
+    An ExpressionTool may forward file references from input to output by using
+    the same value for `location`.
+
+    Name conflicts (the same `basename` appearing multiple times in `listing`
+    or in any entry in `secondaryFiles` in the listing) is a fatal error.
+
   fields:
     - name: class
       type:
@@ -335,12 +443,14 @@ $graph:
         Only valid when `type: File` or is an array of `items: File`.
 
         Describes files that must be included alongside the primary file(s).
+        All listed secondary files must be present.  An implementation may
+        fail workflow execution if a secondary file does not exist.
 
         If the value is an expression, the value of `self` in the expression
         must be the primary input or output File to which this binding applies.
 
         If the value is a string, it specifies that the following pattern
-        should be applied to the primary file:
+        should be applied to the `location` of the primary file:
 
           1. If string begins with one or more caret `^` characters, for each
             caret, remove the last file extension from the path (the last
@@ -348,28 +458,6 @@ $graph:
             extensions, the path is unchanged.
           2. Append the remainder of the string to the end of the file path.
 
-    - name: format
-      type:
-        - "null"
-        - string
-        - type: array
-          items: string
-        - Expression
-      jsonldPredicate:
-        _id: cwl:format
-        _type: "@id"
-        identity: true
-      doc: |
-        Only valid when `type: File` or is an array of `items: File`.
-
-        For input parameters, this must be one or more IRIs of concept nodes
-        that represents file formats which are allowed as input to this
-        parameter, preferrably defined within an ontology.  If no ontology is
-        available, file formats may be tested by exact match.
-
-        For output parameters, this is the file format that will be assigned to
-        the output parameter.
-
     - name: streamable
       type: boolean?
       doc: |
@@ -553,6 +641,26 @@ $graph:
       jsonldPredicate: "@id"
       doc: "The unique identifier for this parameter object."
 
+    - name: format
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+        - Expression
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        This must be one or more IRIs of concept nodes
+        that represents file formats which are allowed as input to this
+        parameter, preferrably defined within an ontology.  If no ontology is
+        available, file formats may be tested by exact match.
+
+
     - name: inputBinding
       type: InputBinding?
       jsonldPredicate: "cwl:inputBinding"
@@ -562,10 +670,14 @@ $graph:
 
     - name: default
       type: Any?
-      jsonldPredicate: "cwl:default"
+      jsonldPredicate:
+        _id: cwl:default
+        noLinkCheck: true
       doc: |
-        The default value for this parameter if not provided in the input
-        object.
+        The default value to use for this parameter if the parameter is missing
+        from the input object, or if the value of the parameter in the input
+        object is `null`.  Default values are applied before evaluating expressions
+        (e.g. dependent `valueFrom` fields).
 
     - name: type
       type:
@@ -603,6 +715,21 @@ $graph:
       jsonldPredicate: "cwl:outputBinding"
       doc: |
         Describes how to handle the outputs of a process.
+    - name: format
+      type:
+        - "null"
+        - string
+        - Expression
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        This is the file format that will be assigned to
+        the output parameter.
+
 
 
 - type: record
@@ -645,6 +772,13 @@ $graph:
         values.  Input parameters include a schema for each parameter which is
         used to validate the input object.  It may also be used to build a user
         interface for constructing the input object.
+
+        When accepting an input object, all input parameters must have a value.
+        If an input parameter is missing from the input object, it must be
+        assigned a value of `null` (or the value of `default` for that
+        parameter, if provided) for the purposes of validation and evaluation
+        of expressions.
+
     - name: outputs
       type:
         type: array
diff --git a/cwltool/schemas/v1.0/UserGuide.yml b/cwltool/schemas/v1.0/UserGuide.yml
index 9e3fd52..7b4a258 100644
--- a/cwltool/schemas/v1.0/UserGuide.yml
+++ b/cwltool/schemas/v1.0/UserGuide.yml
@@ -183,7 +183,7 @@
 
       The value of `position` is used to determine where parameter should
       appear on the command line.  Positions are relative to one another, not
-      abosolute.  As a result, positions do not have to be sequential, three
+      absolute.  As a result, positions do not have to be sequential, three
       parameters with positions `[1, 3, 5]` will result in the same command
       line as `[1, 2, 3]`.  More than one parameter can have the same position
       (ties are broken using the parameter name), and the position field itself
@@ -239,7 +239,7 @@
 
       ```
       outputs:
-        - id: example_out
+        example_out:
           type: File
           outputBinding:
             glob: hello.txt
@@ -356,7 +356,7 @@
       containers are also purposefully isolated from the host system, so in
       order to run a tool inside a Docker container there is additional work to
       ensure that input files are available inside the container and output
-      files can be recovered from the contianer.  CWL can perform this work
+      files can be recovered from the container.  CWL can perform this work
       automatically, allowing you to use Docker to simplify your software
       management while avoiding the complexity of invoking and managing Docker
       containers.
@@ -466,7 +466,7 @@
       tool is actually executed.  The `$(runtime.outdir)` parameter is the path
       to the designated output directory.  Other parameters include
       `$(runtime.tmpdir)`, `$(runtime.ram)`, `$(runtime.cores)`,
-      `$(runtime.ram)`, `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
+      `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
       the [Runtime Environment](CommandLineTool.html#Runtime_environment)
       section of the CWL specification for details.
 
@@ -522,7 +522,7 @@
     - |
       ```
 
-      *array-outpust-job.yml*
+      *array-outputs-job.yml*
       ```
     - $include: examples/array-outputs-job.yml
     - |
@@ -539,13 +539,13 @@
         "output": [
           {
             "size": 0,
-            "location": "/home/peter/work/common-workflow-language/draft-3/examples/foo.txt",
+            "location": "examples/foo.txt",
             "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
             "class": "File"
           },
           {
             "size": 0,
-            "location": "/home/peter/work/common-workflow-language/draft-3/examples/baz.txt",
+            "location": "examples/baz.txt",
             "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
             "class": "File"
           }
@@ -662,6 +662,16 @@
     - |
       ```
 
+      As this tool does not require any `inputs` we can run it with an (almost) empty job file:
+
+      *empty.yml*
+      ```
+      {}
+      |
+      ```
+
+      We can then run `expression.cwl`:
+
       ```
       $ cwl-runner expression.cwl empty.yml
       [job 140000594593168] /home/example$ echo -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
@@ -796,7 +806,7 @@
       class: Workflow
       ```
 
-      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      The `cwlVersion` field indicates the version of the CWL spec used by the
       document.  The `class` field indicates this document describes a workflow.
 
 
@@ -820,7 +830,7 @@
 
       The `outputs` section describes the outputs of the workflow.  This is a
       list of output parameters where each parameter consists of an identifier
-      and a data type.  The `source` connects the output parameter `classfile`
+      and a data type.  The `outputSource` connects the output parameter `classfile`
       of the `compile` step to the workflow output parameter `classout`.
 
       ```
@@ -867,3 +877,123 @@
       connecting the input parameter `src` to the output parameter of `untar`
       using `untar/example_out`.  The output of this step `classfile` is
       connected to the `outputs` section for the Workflow, described above.
+
+    - |
+      ## Nested workflows
+
+      Workflows are ways to combine multiple tools to perform a larger
+      operations. We can also think of a workflow as being a tool itself;
+      a CWL workflow can be used as a step in another CWL workflow, if the
+      workflow engine supports the `SubworkflowFeatureRequirement`:
+
+
+      ```
+      requirements:
+        - class: SubworkflowFeatureRequirement
+      ```
+
+      Here's an example workflow that uses our `1st-workflow.cwl` as a
+      nested workflow:
+
+      ```
+    - $include: examples/nestedworkflows.cwl
+    - |
+      ```
+
+      A CWL `Workflow` can be used as a `step` just like a
+      `CommandLineTool`, it's CWL file is included with `run`.
+      The workflow inputs (`inp` and `ex`)
+      and outputs (`classout`) then can be mapped to become the
+      step's input/outputs.
+
+      ```
+        compile:
+          run: 1st-workflow.cwl
+          in:
+            inp:
+              source: create-tar/tar
+            ex:
+              default: "Hello.java"
+          out: [classout]
+      ```
+
+      Our `1st-workflow.cwl` was parameterized with workflow inputs,
+      so when running it we had to provide a job file to denote
+      the tar file and `*.java` filename. This is generally best-practice,
+      as it means it can be reused in multiple parent workflows,
+      or even in multiple steps within the same workflow.
+
+      Here we use `default:` to hard-code
+      `"Hello.java"` as the `ex` input,
+      however our workflow also requires a tar file at `inp`,
+      which we will prepare in the `create-tar` step.
+      At this point it is probably a good idea to refactor
+      `1st-workflow.cwl` to have more specific input/output names,
+      as those also appear in its usage as a tool.
+
+      It is also possible to do a less generic approach and avoid
+      external dependencies in the job file. So in this workflow we can
+      generate a hard-coded `Hello.java` file using the
+      previously mentioned `InitialWorkDirRequirement` requirement, before
+      adding it to a tar file.
+
+      ```
+        create-tar:
+          requirements:
+            - class: InitialWorkDirRequirement
+              listing:
+                - entryname: Hello.java
+                  entry: |
+                    public class Hello {
+                      public static void main(String[] argv) {
+                          System.out.println("Hello from Java");
+                      }
+                    }
+      ```
+
+      In this case our step can assume `Hello.java` rather than be
+      parameterized, so we can use a simpler `arguments` form
+      as long as the CWL workflow wngine supports the
+      `ShellCommandRequirement`:
+
+      ```
+        run:
+          class: CommandLineTool
+          requirements:
+            - class: ShellCommandRequirement
+          arguments:
+            - shellQuote: false
+              valueFrom: >
+                tar cf hello.tar Hello.java
+      ```
+
+      Note the use of `shellQuote: false` here, otherwise the shell will try
+      to execute the quoted binary `"tar cf hello.tar Hello.java"`.
+
+      Here the `>` block means that newlines are stripped, so it's possible to write
+      the single command on multiple lines. Similarly, the `|` we used above will
+      preserve newlines, combined with `ShellCommandRequirement` this would
+      allow embedding a shell script.
+      Shell commands should however be used sparingly in CWL, as it
+      means you "jump out" of the workflow and no longer get
+      reusable components, provenance or scalability. For reproducibility
+      and portability it is recommended to only use shell commands together
+      with a `DockerRequirement` hint, so that
+      the commands are executed in a predictable shell environment.
+
+      Did you notice that we didn't split out the `tar cf` tool to a separate
+      file, but rather embedded it within the CWL Workflow file? This is generally
+      not best practice, as the tool then can't be reused. The reason for doing it
+      in this case is because the command line is hard-coded with filenames that
+      only make sense within this workflow.
+
+      In this example we had to prepare a tar file outside, but only because
+      our inner workflow was designed to take that as an input. A better
+      refactoring of the inner workflow would be to take a list of
+      Java files to compile, which would simplify its usage as a tool
+      step in other workflows.  
+
+      Nested workflows can be a powerful feature to generate higher-level
+      functional and reusable workflow units - but just like for creating a
+      CWL Tool description, care must be taken to improve its usability 
+      in multiple workflows.
diff --git a/cwltool/schemas/v1.0/Workflow.yml b/cwltool/schemas/v1.0/Workflow.yml
index 26bde8e..d16072d 100644
--- a/cwltool/schemas/v1.0/Workflow.yml
+++ b/cwltool/schemas/v1.0/Workflow.yml
@@ -2,6 +2,7 @@ $base: "https://w3id.org/cwl/cwl#"
 
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
@@ -9,7 +10,7 @@ $graph:
   type: documentation
   doc:
     - |
-      # Common Workflow Language (CWL) Workflow Description, v1.0
+      # Common Workflow Language (CWL) Workflow Description, v1.0.1
 
       This version:
         * https://w3id.org/cwl/v1.0/
@@ -33,10 +34,28 @@ $graph:
 
     - |
 
-      ## Introduction to v1.0
-
-      This specification represents the first full release from the CWL group.
-      Since draft-3, this draft introduces the following changes and additions:
+      ## Introduction to CWL Workflow standard v1.0.1
+
+      This specification represents the second stable release from the CWL
+      group.  Since v1.0, v1.0.1 introduces the following updates to the CWL
+      Workflow standard.  Documents should continue to use `cwlVersion: v1.0`
+      and existing v1.0 documents remain valid, however CWL documents that
+      relied on previously undefined or underspecified behavior may have
+      slightly different behavior in v1.0.1.
+
+        * 12 March 2017:
+          * Mark `default` as not required for link checking.
+          * Add note that recursive subworkflows is not allowed.
+          * Fix mistake in discussion of extracting field names from workflow step ids.
+        * 23 July 2017: (v1.0.1)
+          * Add clarification about scattering over empty arrays.
+          * Clarify interpretation of `secondaryFiles` on inputs.
+          * Expanded discussion of semantics of `File` and `Directory` types
+          * Fixed typo "EMACScript" to "ECMAScript"
+          * Clarified application of input parameter default values when the input is `null` or undefined.
+
+      Since draft-3, v1.0 introduces the following changes and additions
+      to the CWL Workflow standard:
 
         * The `inputs` and `outputs` fields have been renamed `in` and `out`.
         * Syntax simplifcations: denoted by the `map<>` syntax. Example: `in`
@@ -217,10 +236,11 @@ $graph:
     ## Input object
 
     A WorkflowStepInput object must contain an `id` field in the form
-    `#fieldname` or `#stepname.fieldname`.  When the `id` field contains a
-    period `.` the field name consists of the characters following the final
-    period.  This defines a field of the workflow step input object with the
-    value of the `source` parameter(s).
+    `#fieldname` or `#prefix/fieldname`.  When the `id` field contains a slash
+    `/` the field name consists of the characters following the final slash
+    (the prefix portion may contain one or more slashes to indicate scope).
+    This defines a field of the workflow step input object with the value of
+    the `source` parameter(s).
 
     ## Merging
 
@@ -257,9 +277,12 @@ $graph:
     - name: default
       type: ["null", Any]
       doc: |
-        The default value for this parameter if there is no `source`
-        field.
-      jsonldPredicate: "cwl:default"
+        The default value for this parameter to use if either there is no
+        `source` field, or the value produced by the `source` is `null`.  The
+        default must be applied prior to scattering or evaluating `valueFrom`.
+      jsonldPredicate:
+        _id: "cwl:default"
+        noLinkCheck: true
     - name: valueFrom
       type:
         - "null"
@@ -282,10 +305,10 @@ $graph:
 
         The value of `inputs` in the parameter reference or expression must be
         the input object to the workflow step after assigning the `source`
-        values and then scattering.  The order of evaluating `valueFrom` among
-        step input parameters is undefined and the result of evaluating
-        `valueFrom` on a parameter must not be visible to evaluation of
-        `valueFrom` on other parameters.
+        values, applying `default`, and then scattering.  The order of
+        evaluating `valueFrom` among step input parameters is undefined and the
+        result of evaluating `valueFrom` on a parameter must not be visible to
+        evaluation of `valueFrom` on other parameters.
 
 
 - type: record
@@ -338,13 +361,17 @@ $graph:
 
     The `scatter` field specifies one or more input parameters which will be
     scattered.  An input parameter may be listed more than once.  The declared
-    type of each input parameter is implicitly wrapped in an array for each
-    time it appears in the `scatter` field.  As a result, upstream parameters
-    which are connected to scattered parameters may be arrays.
+    type of each input parameter is implicitly becomes an array of items of the
+    input parameter type.  If a parameter is listed more than once, it becomes
+    a nested array.  As a result, upstream parameters which are connected to
+    scattered parameters must be arrays.
 
     All output parameter types are also implicitly wrapped in arrays.  Each job
     in the scatter results in an entry in the output array.
 
+    If any scattered parameter is empty at runtime, all outputs are set to
+    empty arrays and no work is done for the step.
+
     If `scatter` declares more than one input parameter, `scatterMethod`
     describes how to decompose the input into a discrete set of jobs.
 
@@ -368,6 +395,9 @@ $graph:
     [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) must be
     specified in the workflow or workflow step requirements.
 
+    It is a fatal error if a workflow directly or indirectly invokes itself as
+    a subworkflow (recursive workflows are not allowed).
+
   fields:
     - name: id
       type: string
diff --git a/cwltool/schemas/v1.0/concepts.md b/cwltool/schemas/v1.0/concepts.md
index cc4df5a..d65c7ce 100644
--- a/cwltool/schemas/v1.0/concepts.md
+++ b/cwltool/schemas/v1.0/concepts.md
@@ -6,7 +6,7 @@
 
 **YAML**: http://yaml.org
 
-**Avro**: https://avro.apache.org/docs/current/spec.html
+**Avro**: https://avro.apache.org/docs/1.8.1/spec.html
 
 **Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
 
@@ -322,7 +322,7 @@ Expressions are denoted by the syntax `$(...)` or `${...}`.  A code
 fragment wrapped in the `$(...)` syntax must be evaluated as a
 [ECMAScript expression](http://www.ecma-international.org/ecma-262/5.1/#sec-11).  A
 code fragment wrapped in the `${...}` syntax must be evaluated as a
-[EMACScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
+[ECMAScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
 for an anonymous, zero-argument function.  Expressions must return a valid JSON
 data type: one of null, string, number, boolean, array, object. Other return
 values must result in a `permanentFailure`. Implementations must permit any
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res.yml
new file mode 100644
index 0000000..bbcee48
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res.yml
@@ -0,0 +1,36 @@
+- |
+  ## Identifier maps
+
+  The schema may designate certain fields as having a `mapSubject`.  If the
+  value of the field is a JSON object, it must be transformed into an array of
+  JSON objects.  Each key-value pair from the source JSON object is a list
+  item, each list item must be a JSON objects, and the value of the key is
+  assigned to the field specified by `mapSubject`.
+
+  Fields which have `mapSubject` specified may also supply a `mapPredicate`.
+  If the value of a map item is not a JSON object, the item is transformed to a
+  JSON object with the key assigned to the field specified by `mapSubject` and
+  the value assigned to the field specified by `mapPredicate`.
+
+  ### Identifier map example
+
+  Given the following schema:
+
+  ```
+- $include: map_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: map_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: map_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_proc.yml
new file mode 100644
index 0000000..52e9c22
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_proc.yml
@@ -0,0 +1,12 @@
+{
+    "mapped": [
+        {
+            "value": "daphne",
+            "key": "fred"
+        },
+        {
+            "value": "scooby",
+            "key": "shaggy"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_schema.yml
new file mode 100644
index 0000000..086cc29
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_schema.yml
@@ -0,0 +1,30 @@
+{
+  "$graph": [{
+    "name": "MappedType",
+    "type": "record",
+    "documentRoot": true,
+    "fields": [{
+      "name": "mapped",
+      "type": {
+        "type": "array",
+        "items": "ExampleRecord"
+      },
+      "jsonldPredicate": {
+        "mapSubject": "key",
+        "mapPredicate": "value"
+      }
+    }],
+  },
+  {
+    "name": "ExampleRecord",
+    "type": "record",
+    "fields": [{
+      "name": "key",
+      "type": "string"
+      }, {
+      "name": "value",
+      "type": "string"
+      }
+    ]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_src.yml
new file mode 100644
index 0000000..9df0c35
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/map_res_src.yml
@@ -0,0 +1,8 @@
+{
+  "mapped": {
+    "shaggy": {
+      "value": "scooby"
+    },
+    "fred": "daphne"
+  }
+}
\ No newline at end of file
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
index d5472e9..28b9e66 100644
--- a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
@@ -18,6 +18,8 @@ $graph:
     - $import: link_res.yml
     - $import: vocab_res.yml
     - $include: import_include.md
+    - $import: map_res.yml
+    - $import: typedsl_res.yml
 
 - name: "Link_Validation"
   type: documentation
@@ -154,16 +156,24 @@ $graph:
 - name: NamedType
   type: record
   abstract: true
+  docParent: "#Schema"
   fields:
     - name: name
       type: string
       jsonldPredicate: "@id"
       doc: "The identifier for this type"
+    - name: inVocab
+      type: boolean?
+      doc: |
+        By default or if "true", include the short name of this type in the
+        vocabulary (the keys of the JSON-LD context).  If false, do not include
+        the short name in the vocabulary.
 
 
 - name: DocType
   type: record
   abstract: true
+  docParent: "#Schema"
   fields:
     - name: doc
       type:
@@ -240,6 +250,7 @@ $graph:
 
 
 - name: SaladRecordSchema
+  docParent: "#Schema"
   type: record
   extends: [NamedType, RecordSchema, SchemaDefinedType]
   documentRoot: true
@@ -277,6 +288,7 @@ $graph:
         mapPredicate: specializeTo
 
 - name: SaladEnumSchema
+  docParent: "#Schema"
   type: record
   extends: [EnumSchema, SchemaDefinedType]
   documentRoot: true
@@ -297,6 +309,7 @@ $graph:
 
 - name: Documentation
   type: record
+  docParent: "#Schema"
   extends: [NamedType, DocType]
   documentRoot: true
   doc: |
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
index 73511d1..d8bf0a3 100644
--- a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
@@ -8,6 +8,12 @@ $namespaces:
   xsd:  "http://www.w3.org/2001/XMLSchema#"
 
 $graph:
+
+- name: "Schema"
+  type: documentation
+  doc: |
+    # Schema
+
 - name: PrimitiveType
   type: enum
   symbols:
@@ -35,6 +41,7 @@ $graph:
 - name: Any
   type: enum
   symbols: ["#Any"]
+  docAfter: "#PrimitiveType"
   doc: |
     The **Any** type validates for any non-null value.
 
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
index 6dd3e6a..2d4681e 100644
--- a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
@@ -26,7 +26,7 @@ Web.
 
 This document is the product of the [Common Workflow Language working
 group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
-latest version of this document is available in the "schema_salad" directory at
+latest version of this document is available in the "schema_salad" repository at
 
 https://github.com/common-workflow-language/schema_salad
 
@@ -38,7 +38,7 @@ under the terms of the Apache License, version 2.0.
 # Introduction
 
 The JSON data model is an extremely popular way to represent structured
-data.  It is attractive because of it's relative simplicity and is a
+data.  It is attractive because of its relative simplicity and is a
 natural fit with the standard types of many programming languages.
 However, this simplicity means that basic JSON lacks expressive features
 useful for working with complex data structures and document formats, such
@@ -70,12 +70,17 @@ and RDF schema, and production of RDF triples by applying the JSON-LD
 context.  The schema language also provides for robust support of inline
 documentation.
 
-## Introduction to draft 1
+## Introduction to v1.0
 
-This is the first version of Schema Salad.  It is developed concurrently
-with draft 3 of the Common Workflow Language for use in specifying the
-Common Workflow Language, however Schema Salad is intended to be useful to
-a broader audience.
+This is the second version of of the Schema Salad specification.  It is
+developed concurrently with v1.0 of the Common Workflow Language for use in
+specifying the Common Workflow Language, however Schema Salad is intended to be
+useful to a broader audience.  Compared to the draft-1 schema salad
+specification, the following changes have been made:
+
+* Use of [mapSubject and mapPredicate](#Identifier_maps) to transform maps to lists of records.
+* Resolution of the [domain Specific Language for types](#Domain_Specific_Language_for_types)
+* Consolidation of the formal [schema into section 5](#Schema).
 
 ## References to Other Specifications
 
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res.yml
new file mode 100644
index 0000000..b1a0c1d
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res.yml
@@ -0,0 +1,33 @@
+- |
+  ## Domain Specific Language for types
+
+  Fields may be tagged `typeDSL: true`.  If so, the field is expanded using the
+  following micro-DSL for schema salad types:
+
+  * If the type ends with a question mark `?` it is expanded to a union with `null`
+  * If the type ends with square brackets `[]` it is expanded to an array with items of the preceeding type symbol
+  * The type may end with both `[]?` to indicate it is an optional array.
+  * Identifier resolution is applied after type DSL expansion.
+
+  ### Type DSL example
+
+  Given the following schema:
+
+  ```
+- $include: typedsl_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: typedsl_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: typedsl_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_proc.yml
new file mode 100644
index 0000000..8097a6a
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_proc.yml
@@ -0,0 +1,26 @@
+[
+    {
+        "extype": "string"
+    }, 
+    {
+        "extype": [
+            "null", 
+            "string"
+        ]
+    }, 
+    {
+        "extype": {
+            "type": "array", 
+            "items": "string"
+        }
+    }, 
+    {
+        "extype": [
+            "null", 
+            {
+                "type": "array", 
+                "items": "string"
+            }
+        ]
+    }
+]
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_schema.yml
new file mode 100644
index 0000000..52459a6
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_schema.yml
@@ -0,0 +1,17 @@
+{
+  "$graph": [
+  {"$import": "metaschema_base.yml"},
+  {
+    "name": "TypeDSLExample",
+    "type": "record",
+    "documentRoot": true,
+    "fields": [{
+      "name": "extype",
+      "type": "string",
+      "jsonldPredicate": {
+        _type: "@vocab",
+        "typeDSL": true
+      }
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_src.yml
new file mode 100644
index 0000000..6ecbd50
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/typedsl_res_src.yml
@@ -0,0 +1,9 @@
+[{
+  "extype": "string"
+}, {
+  "extype": "string?"
+}, {
+  "extype": "string[]"
+}, {
+  "extype": "string[]?"
+}]
diff --git a/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml b/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
index d98f85a..6bbfeac 100644
--- a/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
+++ b/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
@@ -747,7 +747,6 @@ $graph:
     - name: package
       type: string
       doc: "The common name of the software to be configured."
-      jsonldPredicate: "@id"
     - name: version
       type: string[]?
       doc: "The (optional) version of the software to configured."
@@ -811,6 +810,8 @@ $graph:
         the original file or directory.  Default false (files and directories
         read-only by default).
 
+        A directory marked as `writable: true` implies that all files and
+        subdirectories are recursively writable as well.
 
 - name: InitialWorkDirRequirement
   type: record
@@ -841,6 +842,13 @@ $graph:
         May be an expression.  If so, the expression return value must validate
         as `{type: array, items: [File, Directory]}`.
 
+        Files or Directories which are listed in the input parameters and
+        appear in the `InitialWorkDirRequirement` listing must have their
+        `path` set to their staged location in the designated output directory.
+        If the same File or Directory appears more than once in the
+        `InitialWorkDirRequirement` listing, the implementation must choose
+        exactly one value for `path`; how this value is chosen is undefined.
+
 
 - name: EnvVarRequirement
   type: record
diff --git a/cwltool/schemas/v1.1.0-dev1/Process.yml b/cwltool/schemas/v1.1.0-dev1/Process.yml
index cf8e03a..cd8a548 100644
--- a/cwltool/schemas/v1.1.0-dev1/Process.yml
+++ b/cwltool/schemas/v1.1.0-dev1/Process.yml
@@ -3,6 +3,7 @@ $base: "https://w3id.org/cwl/cwl#"
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
   sld: "https://w3id.org/cwl/salad#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
@@ -563,10 +564,13 @@ $graph:
 
     - name: default
       type: Any?
-      jsonldPredicate: "cwl:default"
+      jsonldPredicate:
+        _id: cwl:default
+        noLinkCheck: true
       doc: |
-        The default value for this parameter if not provided in the input
-        object.
+        The default value to use for this parameter if the parameter is missing
+        from the input object, or if the value of the parameter in the input
+        object is `null`.
 
 - name: RegularInputParameter
   type: record
@@ -595,7 +599,6 @@ $graph:
       doc: |
         Specify valid types of data that may be assigned to this parameter.
 
-
 - name: OutputParameter
   type: record
   extends: Parameter
@@ -651,6 +654,13 @@ $graph:
         values.  Input parameters include a schema for each parameter which is
         used to validate the input object.  It may also be used to build a user
         interface for constructing the input object.
+
+        When accepting an input object, all input parameters must have a value.
+        If an input parameter is missing from the input object, it must be
+        assigned a value of `null` (or the value of `default` for that
+        parameter, if provided) for the purposes of validation and evaluation
+        of expressions.
+
     - name: outputs
       type:
         type: array
diff --git a/cwltool/schemas/v1.1.0-dev1/UserGuide.yml b/cwltool/schemas/v1.1.0-dev1/UserGuide.yml
index 7464009..95688ce 100644
--- a/cwltool/schemas/v1.1.0-dev1/UserGuide.yml
+++ b/cwltool/schemas/v1.1.0-dev1/UserGuide.yml
@@ -183,7 +183,7 @@
 
       The value of `position` is used to determine where parameter should
       appear on the command line.  Positions are relative to one another, not
-      abosolute.  As a result, positions do not have to be sequential, three
+      absolute.  As a result, positions do not have to be sequential, three
       parameters with positions `[1, 3, 5]` will result in the same command
       line as `[1, 2, 3]`.  More than one parameter can have the same position
       (ties are broken using the parameter name), and the position field itself
@@ -239,7 +239,7 @@
 
       ```
       outputs:
-        - id: example_out
+        example_out:
           type: File
           outputBinding:
             glob: hello.txt
@@ -290,8 +290,8 @@
     - |
       ## Parameter references
 
-      In a previous example, we used extracted a file using the "tar" program.
-      However, that example was very limited becuase it assumed that the file
+      In a previous example, we extracted a file using the "tar" program.
+      However, that example was very limited because it assumed that the file
       we were interested in was called "hello.txt".  In this example, you will
       see how to reference the value of input parameters dynamically from other
       fields.
@@ -356,7 +356,7 @@
       containers are also purposefully isolated from the host system, so in
       order to run a tool inside a Docker container there is additional work to
       ensure that input files are available inside the container and output
-      files can be recovered from the contianer.  CWL can perform this work
+      files can be recovered from the container.  CWL can perform this work
       automatically, allowing you to use Docker to simplify your software
       management while avoiding the complexity of invoking and managing Docker
       containers.
@@ -466,7 +466,7 @@
       tool is actually executed.  The `$(runtime.outdir)` parameter is the path
       to the designated output directory.  Other parameters include
       `$(runtime.tmpdir)`, `$(runtime.ram)`, `$(runtime.cores)`,
-      `$(runtime.ram)`, `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
+      `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
       the [Runtime Environment](CommandLineTool.html#Runtime_environment)
       section of the CWL specification for details.
 
@@ -522,7 +522,7 @@
     - |
       ```
 
-      *array-outpust-job.yml*
+      *array-outputs-job.yml*
       ```
     - $include: examples/array-outputs-job.yml
     - |
@@ -662,6 +662,16 @@
     - |
       ```
 
+      As this tool does not require any `inputs` we can run it with an (almost) empty job file:
+
+      *empty.yml*
+      ```
+      {}
+      |
+      ```
+
+      We can then run `expression.cwl`:
+
       ```
       $ cwl-runner expression.cwl empty.yml
       [job 140000594593168] /home/example$ echo -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
@@ -796,7 +806,7 @@
       class: Workflow
       ```
 
-      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      The `cwlVersion` field indicates the version of the CWL spec used by the
       document.  The `class` field indicates this document describes a workflow.
 
 
@@ -820,7 +830,7 @@
 
       The `outputs` section describes the outputs of the workflow.  This is a
       list of output parameters where each parameter consists of an identifier
-      and a data type.  The `source` connects the output parameter `classfile`
+      and a data type.  The `outputSource` connects the output parameter `classfile`
       of the `compile` step to the workflow output parameter `classout`.
 
       ```
diff --git a/cwltool/schemas/v1.1.0-dev1/Workflow.yml b/cwltool/schemas/v1.1.0-dev1/Workflow.yml
index 3f2f9c1..6f3f085 100644
--- a/cwltool/schemas/v1.1.0-dev1/Workflow.yml
+++ b/cwltool/schemas/v1.1.0-dev1/Workflow.yml
@@ -2,6 +2,7 @@ $base: "https://w3id.org/cwl/cwl#"
 
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
@@ -224,10 +225,11 @@ $graph:
     ## Input object
 
     A WorkflowStepInput object must contain an `id` field in the form
-    `#fieldname` or `#stepname.fieldname`.  When the `id` field contains a
-    period `.` the field name consists of the characters following the final
-    period.  This defines a field of the workflow step input object with the
-    value of the `source` parameter(s).
+    `#fieldname` or `#prefix/fieldname`.  When the `id` field contains a slash
+    `/` the field name consists of the characters following the final slash
+    (the prefix portion may contain one or more slashes to indicate scope).
+    This defines a field of the workflow step input object with the value of
+    the `source` parameter(s).
 
     ## Merging
 
@@ -264,9 +266,11 @@ $graph:
     - name: default
       type: ["null", Any]
       doc: |
-        The default value for this parameter if there is no `source`
-        field.
-      jsonldPredicate: "cwl:default"
+        The default value for this parameter to use if either there is no
+        `source` field, or the value produced by the `source` is `null`.
+      jsonldPredicate:
+        _id: "cwl:default"
+        noLinkCheck: true
     - name: valueFrom
       type:
         - "null"
@@ -375,6 +379,9 @@ $graph:
     [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) must be
     specified in the workflow or workflow step requirements.
 
+    It is a fatal error if a workflow directly or indirectly invokes itself as
+    a subworkflow (recursive workflows are not allowed).
+
   fields:
     - name: id
       type: string
diff --git a/cwltool/schemas/v1.1.0-dev1/concepts.md b/cwltool/schemas/v1.1.0-dev1/concepts.md
index d3eac23..ed19350 100644
--- a/cwltool/schemas/v1.1.0-dev1/concepts.md
+++ b/cwltool/schemas/v1.1.0-dev1/concepts.md
@@ -89,7 +89,7 @@ preprocessing steps described in the
 [Semantic Annotations for Linked Avro Data (SALAD) Specification](SchemaSalad.html).
 An implementation may formally validate the structure of a CWL document using
 SALAD schemas located at
-https://github.com/common-workflow-language/common-workflow-language/tree/master/draft-4
+https://github.com/common-workflow-language/common-workflow-language/tree/master/v1.1.0-dev1
 
 ## Identifiers
 
@@ -323,7 +323,7 @@ Expressions are denoted by the syntax `$(...)` or `${...}`.  A code
 fragment wrapped in the `$(...)` syntax must be evaluated as a
 [ECMAScript expression](http://www.ecma-international.org/ecma-262/5.1/#sec-11).  A
 code fragment wrapped in the `${...}` syntax must be evaluated as a
-[EMACScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
+[ECMAScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
 for an anonymous, zero-argument function.  Expressions must return a valid JSON
 data type: one of null, string, number, boolean, array, object. Other return
 values must result in a `permanentFailure`. Implementations must permit any
diff --git a/cwltool/software_requirements.py b/cwltool/software_requirements.py
new file mode 100644
index 0000000..535b293
--- /dev/null
+++ b/cwltool/software_requirements.py
@@ -0,0 +1,122 @@
+"""This module handles resolution of SoftwareRequirement hints.
+
+This is accomplished mainly by adapting cwltool internals to galaxy-lib's
+concept of "dependencies". Despite the name, galaxy-lib is a light weight
+library that can be used to map SoftwareRequirements in all sorts of ways -
+Homebrew, Conda, custom scripts, environment modules. We'd be happy to find
+ways to adapt new packages managers and such as well.
+"""
+from __future__ import absolute_import
+import argparse
+import os
+import string
+from typing import (Any, Dict, List, Text)
+
+try:
+    from galaxy.tools.deps.requirements import ToolRequirement, ToolRequirements
+    from galaxy.tools import deps
+except ImportError:
+    ToolRequirement = None  # type: ignore
+    ToolRequirements = None  # type: ignore
+    deps = None
+
+from .utils import get_feature
+
+SOFTWARE_REQUIREMENTS_ENABLED = deps is not None
+
+COMMAND_WITH_DEPENDENCIES_TEMPLATE = string.Template("""#!/bin/bash
+$handle_dependencies
+python "run_job.py" "job.json"
+""")
+
+
+class DependenciesConfiguration(object):
+
+    def __init__(self, args):
+        # type: (argparse.Namespace) -> None
+        conf_file = getattr(args, "beta_dependency_resolvers_configuration", None)
+        tool_dependency_dir = getattr(args, "beta_dependencies_directory", None)
+        conda_dependencies = getattr(args, "beta_conda_dependencies", None)
+        if conf_file is not None and os.path.exists(conf_file):
+            self.use_tool_dependencies = True
+            if not tool_dependency_dir:
+                tool_dependency_dir = os.path.abspath(os.path.dirname(conf_file))
+            self.tool_dependency_dir = tool_dependency_dir
+            self.dependency_resolvers_config_file = conf_file
+        elif conda_dependencies:
+            if not tool_dependency_dir:
+                tool_dependency_dir = os.path.abspath("./cwltool_deps")
+            self.tool_dependency_dir = tool_dependency_dir
+            self.use_tool_dependencies = True
+            self.dependency_resolvers_config_file = None
+        else:
+            self.use_tool_dependencies = False
+
+    @property
+    def config_dict(self):
+        return {
+            'conda_auto_install': True,
+            'conda_auto_init': True,
+        }
+
+    def build_job_script(self, builder, command):
+        # type: (Any, List[str]) -> Text
+        ensure_galaxy_lib_available()
+        tool_dependency_manager = deps.build_dependency_manager(self)  # type: deps.DependencyManager
+        dependencies = get_dependencies(builder)
+        handle_dependencies = ""  # str
+        if dependencies:
+            handle_dependencies = "\n".join(tool_dependency_manager.dependency_shell_commands(dependencies, job_directory=builder.tmpdir))
+
+        template_kwds = dict(handle_dependencies=handle_dependencies)  # type: Dict[str, str]
+        job_script = COMMAND_WITH_DEPENDENCIES_TEMPLATE.substitute(template_kwds)
+        return job_script
+
+
+def get_dependencies(builder):
+    # type: (Any) -> ToolRequirements
+    (software_requirement, _) = get_feature(builder, "SoftwareRequirement")
+    dependencies = []  # type: List[ToolRequirement]
+    if software_requirement and software_requirement.get("packages"):
+        packages = software_requirement.get("packages")
+        for package in packages:
+            version = package.get("version", None)
+            if isinstance(version, list):
+                if version:
+                    version = version[0]
+                else:
+                    version = None
+            specs = [{"uri": s} for s in package.get("specs", [])]
+            dependencies.append(ToolRequirement.from_dict(dict(
+                name=package["package"].split("#")[-1],
+                version=version,
+                type="package",
+                specs=specs,
+            )))
+
+    return ToolRequirements.from_list(dependencies)
+
+
+def get_container_from_software_requirements(args, builder):
+    if args.beta_use_biocontainers:
+        ensure_galaxy_lib_available()
+        from galaxy.tools.deps.containers import ContainerRegistry, AppInfo, ToolInfo, DOCKER_CONTAINER_TYPE
+        app_info = AppInfo(
+            involucro_auto_init=True,
+            enable_beta_mulled_containers=True,
+            container_image_cache_path=".",
+        )  # type: AppInfo
+        container_registry = ContainerRegistry(app_info)  # type: ContainerRegistry
+        requirements = get_dependencies(builder)
+        tool_info = ToolInfo(requirements=requirements)  # type: ToolInfo
+        container_description = container_registry.find_best_container_description([DOCKER_CONTAINER_TYPE], tool_info)
+        if container_description:
+            return container_description.identifier
+
+    return None
+
+
+def ensure_galaxy_lib_available():
+    # type: () -> None
+    if not SOFTWARE_REQUIREMENTS_ENABLED:
+        raise Exception("Optional Python library galaxy-lib not available, it is required for this configuration.")
diff --git a/cwltool/stdfsaccess.py b/cwltool/stdfsaccess.py
index 2db2a5d..df5056b 100644
--- a/cwltool/stdfsaccess.py
+++ b/cwltool/stdfsaccess.py
@@ -1,11 +1,26 @@
-from typing import Any, BinaryIO, Text
-from .pathmapper import abspath
+from __future__ import absolute_import
 import glob
 import os
-from schema_salad.ref_resolver import file_uri
+from io import open
+from typing import BinaryIO, List, Union, Text, IO, overload
 
-class StdFsAccess(object):
+from .utils import onWindows
+
+import six
+from six.moves import urllib
+from schema_salad.ref_resolver import file_uri, uri_file_path
 
+def abspath(src, basedir):  # type: (Text, Text) -> Text
+    if src.startswith(u"file://"):
+        ab = six.text_type(uri_file_path(str(src)))
+    else:
+        if basedir.startswith(u"file://"):
+            ab = src if os.path.isabs(src) else basedir+ '/'+ src
+        else:
+            ab = src if os.path.isabs(src) else os.path.join(basedir, src)
+    return ab
+
+class StdFsAccess(object):
     def __init__(self, basedir):  # type: (Text) -> None
         self.basedir = basedir
 
@@ -15,7 +30,17 @@ class StdFsAccess(object):
     def glob(self, pattern):  # type: (Text) -> List[Text]
         return [file_uri(str(self._abs(l))) for l in glob.glob(self._abs(pattern))]
 
-    def open(self, fn, mode):  # type: (Text, Text) -> BinaryIO
+    # overload is related to mypy type checking and in no way
+    # modifies the behaviour of the function.
+    @overload
+    def open(self, fn, mode='rb'):  # type: (Text, str) -> IO[bytes]
+        pass
+
+    @overload
+    def open(self, fn, mode='r'):  # type: (Text, str) -> IO[str]
+        pass
+
+    def open(self, fn, mode):
         return open(self._abs(fn), mode)
 
     def exists(self, fn):  # type: (Text) -> bool
@@ -28,10 +53,18 @@ class StdFsAccess(object):
         return os.path.isdir(self._abs(fn))
 
     def listdir(self, fn):  # type: (Text) -> List[Text]
-        return [abspath(l, fn) for l in os.listdir(self._abs(fn))]
+        return [abspath(urllib.parse.quote(str(l)), fn) for l in os.listdir(self._abs(fn))]
 
     def join(self, path, *paths):  # type: (Text, *Text) -> Text
         return os.path.join(path, *paths)
 
     def realpath(self, path):  # type: (Text) -> Text
         return os.path.realpath(path)
+
+    # On windows os.path.realpath appends unecessary Drive, here we would avoid that
+    def docker_compatible_realpath(self, path):  # type: (Text) -> Text
+        if onWindows():
+            if path.startswith('/'):
+                return path
+            return '/'+path
+        return self.realpath(path)
diff --git a/cwltool/update.py b/cwltool/update.py
index 762ad03..8b42c88 100644
--- a/cwltool/update.py
+++ b/cwltool/update.py
@@ -1,15 +1,17 @@
-import sys
-import urlparse
+from __future__ import absolute_import
+import copy
 import json
 import re
 import traceback
-import copy
+from typing import (Any, Callable, Dict, Text,  # pylint: disable=unused-import
+                    Tuple, Union)
+from copy import deepcopy
 
-from schema_salad.ref_resolver import Loader
+import six
+from six.moves import urllib
 import schema_salad.validate
-from typing import Any, Callable, Dict, List, Text, Tuple, Union  # pylint: disable=unused-import
-
-from ruamel.yaml.comments import CommentedSeq, CommentedMap
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
+from schema_salad.ref_resolver import Loader
 
 from .utils import aslist
 
@@ -29,6 +31,7 @@ def findId(doc, frg):  # type: (Any, Any) -> Dict
                 return f
     return None
 
+
 def fixType(doc):  # type: (Any) -> Any
     if isinstance(doc, list):
         for i, f in enumerate(doc):
@@ -42,12 +45,13 @@ def fixType(doc):  # type: (Any) -> Any
             return "#" + doc
     return doc
 
+
 def _draft2toDraft3dev1(doc, loader, baseuri, update_steps=True):
     # type: (Any, Loader, Text, bool) -> Any
     try:
         if isinstance(doc, dict):
             if "import" in doc:
-                imp = urlparse.urljoin(baseuri, doc["import"])
+                imp = urllib.parse.urljoin(baseuri, doc["import"])
                 impLoaded = loader.fetch(imp)
                 r = None  # type: Dict[Text, Any]
                 if isinstance(impLoaded, list):
@@ -57,14 +61,14 @@ def _draft2toDraft3dev1(doc, loader, baseuri, update_steps=True):
                 else:
                     raise Exception("Unexpected code path.")
                 r["id"] = imp
-                _, frag = urlparse.urldefrag(imp)
+                _, frag = urllib.parse.urldefrag(imp)
                 if frag:
                     frag = "#" + frag
                     r = findId(r, frag)
                 return _draft2toDraft3dev1(r, loader, imp)
 
             if "include" in doc:
-                return loader.fetch_text(urlparse.urljoin(baseuri, doc["include"]))
+                return loader.fetch_text(urllib.parse.urljoin(baseuri, doc["include"]))
 
             for typename in ("type", "items"):
                 if typename in doc:
@@ -82,7 +86,6 @@ def _draft2toDraft3dev1(doc, loader, baseuri, update_steps=True):
                                 doc["requirements"] = []
                             doc["requirements"].append({"class": "MultipleInputFeatureRequirement"})
 
-
             for a in doc:
                 doc[a] = _draft2toDraft3dev1(doc[a], loader, baseuri)
 
@@ -99,6 +102,7 @@ def _draft2toDraft3dev1(doc, loader, baseuri, update_steps=True):
             err = doc["name"]
         raise Exception(u"Error updating '%s'\n  %s\n%s" % (err, e, traceback.format_exc()))
 
+
 def draft2toDraft3dev1(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (_draft2toDraft3dev1(doc, loader, baseuri), "draft-3.dev1")
@@ -106,6 +110,7 @@ def draft2toDraft3dev1(doc, loader, baseuri):
 
 digits = re.compile("\d+")
 
+
 def updateScript(sc):  # type: (Text) -> Text
     sc = sc.replace("$job", "inputs")
     sc = sc.replace("$tmpdir", "runtime.tmpdir")
@@ -124,7 +129,7 @@ def _updateDev2Script(ent):  # type: (Any) -> Any
                 if not sp[0]:
                     sp.pop(0)
                 front = sp.pop(0)
-                sp = [Text(i) if digits.match(i) else "'"+i+"'"
+                sp = [Text(i) if digits.match(i) else "'" + i + "'"
                       for i in sp]
                 if front == "job":
                     return u"$(inputs[%s])" % ']['.join(sp)
@@ -143,7 +148,7 @@ def _updateDev2Script(ent):  # type: (Any) -> Any
 def _draftDraft3dev1toDev2(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     doc = _updateDev2Script(doc)
-    if isinstance(doc, basestring):
+    if isinstance(doc, six.string_types):
         return doc
 
     # Convert expressions
@@ -167,7 +172,7 @@ def _draftDraft3dev1toDev2(doc, loader, baseuri):
                     if r["class"] == "ExpressionEngineRequirement":
                         if "engineConfig" in r:
                             doc["requirements"].append({
-                                "class":"InlineJavascriptRequirement",
+                                "class": "InlineJavascriptRequirement",
                                 "expressionLib": [updateScript(sc) for sc in aslist(r["engineConfig"])]
                             })
                             added = True
@@ -179,7 +184,7 @@ def _draftDraft3dev1toDev2(doc, loader, baseuri):
             else:
                 doc["requirements"] = []
             if not added:
-                doc["requirements"].append({"class":"InlineJavascriptRequirement"})
+                doc["requirements"].append({"class": "InlineJavascriptRequirement"})
 
     elif isinstance(doc, list):
         for i, a in enumerate(doc):
@@ -192,6 +197,7 @@ def draftDraft3dev1toDev2(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (_draftDraft3dev1toDev2(doc, loader, baseuri), "draft-3.dev2")
 
+
 def _draftDraft3dev2toDev3(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     try:
@@ -200,7 +206,7 @@ def _draftDraft3dev2toDev3(doc, loader, baseuri):
                 if doc["@import"][0] == "#":
                     return doc["@import"]
                 else:
-                    imp = urlparse.urljoin(baseuri, doc["@import"])
+                    imp = urllib.parse.urljoin(baseuri, doc["@import"])
                     impLoaded = loader.fetch(imp)
                     r = {}  # type: Dict[Text, Any]
                     if isinstance(impLoaded, list):
@@ -210,14 +216,14 @@ def _draftDraft3dev2toDev3(doc, loader, baseuri):
                     else:
                         raise Exception("Unexpected code path.")
                     r["id"] = imp
-                    frag = urlparse.urldefrag(imp)[1]
+                    frag = urllib.parse.urldefrag(imp)[1]
                     if frag:
                         frag = "#" + frag
                         r = findId(r, frag)
                     return _draftDraft3dev2toDev3(r, loader, imp)
 
             if "@include" in doc:
-                return loader.fetch_text(urlparse.urljoin(baseuri, doc["@include"]))
+                return loader.fetch_text(urllib.parse.urljoin(baseuri, doc["@include"]))
 
             for a in doc:
                 doc[a] = _draftDraft3dev2toDev3(doc[a], loader, baseuri)
@@ -236,6 +242,7 @@ def _draftDraft3dev2toDev3(doc, loader, baseuri):
         import traceback
         raise Exception(u"Error updating '%s'\n  %s\n%s" % (err, e, traceback.format_exc()))
 
+
 def draftDraft3dev2toDev3(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (_draftDraft3dev2toDev3(doc, loader, baseuri), "draft-3.dev3")
@@ -247,7 +254,7 @@ def traverseImport(doc, loader, baseuri, func):
         if doc["$import"][0] == "#":
             return doc["$import"]
         else:
-            imp = urlparse.urljoin(baseuri, doc["$import"])
+            imp = urllib.parse.urljoin(baseuri, doc["$import"])
             impLoaded = loader.fetch(imp)
             r = {}  # type: Dict[Text, Any]
             if isinstance(impLoaded, list):
@@ -257,7 +264,7 @@ def traverseImport(doc, loader, baseuri, func):
             else:
                 raise Exception("Unexpected code path.")
             r["id"] = imp
-            _, frag = urlparse.urldefrag(imp)
+            _, frag = urllib.parse.urldefrag(imp)
             if frag:
                 frag = "#" + frag
                 r = findId(r, frag)
@@ -298,6 +305,7 @@ def draftDraft3dev3toDev4(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (_draftDraft3dev3toDev4(doc, loader, baseuri), "draft-3.dev4")
 
+
 def _draftDraft3dev4toDev5(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     try:
@@ -332,16 +340,19 @@ def draftDraft3dev4toDev5(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (_draftDraft3dev4toDev5(doc, loader, baseuri), "draft-3.dev5")
 
+
 def draftDraft3dev5toFinal(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     return (doc, "draft-3")
 
+
 def _draft3toDraft4dev1(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     if isinstance(doc, dict):
         if "class" in doc and doc["class"] == "Workflow":
+
             def fixup(f):  # type: (Text) -> Text
-                doc, frg = urlparse.urldefrag(f)
+                doc, frg = urllib.parse.urldefrag(f)
                 frg = '/'.join(frg.rsplit('.', 1))
                 return doc + "#" + frg
 
@@ -362,6 +373,8 @@ def _draft3toDraft4dev1(doc, loader, baseuri):
             for out in doc["outputs"]:
                 out["source"] = fixup(out["source"])
         for key, value in doc.items():
+            if key == 'run':
+                value = deepcopy(value)
             doc[key] = _draft3toDraft4dev1(value, loader, baseuri)
     elif isinstance(doc, list):
         for i, a in enumerate(doc):
@@ -369,11 +382,13 @@ def _draft3toDraft4dev1(doc, loader, baseuri):
 
     return doc
 
+
 def draft3toDraft4dev1(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for draft-3 to draft-4.dev1."""
     return (_draft3toDraft4dev1(doc, loader, baseuri), "draft-4.dev1")
 
+
 def _draft4Dev1toDev2(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     if isinstance(doc, dict):
@@ -382,6 +397,8 @@ def _draft4Dev1toDev2(doc, loader, baseuri):
                 out["outputSource"] = out["source"]
                 del out["source"]
         for key, value in doc.items():
+            if key == 'run':
+                value = deepcopy(value)
             doc[key] = _draft4Dev1toDev2(value, loader, baseuri)
     elif isinstance(doc, list):
         for i, a in enumerate(doc):
@@ -389,6 +406,7 @@ def _draft4Dev1toDev2(doc, loader, baseuri):
 
     return doc
 
+
 def draft4Dev1toDev2(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for draft-4.dev1 to draft-4.dev2."""
@@ -423,11 +441,13 @@ def _draft4Dev2toDev3(doc, loader, baseuri):
 
     return doc
 
+
 def draft4Dev2toDev3(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for draft-4.dev2 to draft-4.dev3."""
     return (_draft4Dev2toDev3(doc, loader, baseuri), "draft-4.dev3")
 
+
 def _draft4Dev3to1_0dev4(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Any
     if isinstance(doc, dict):
@@ -441,16 +461,19 @@ def _draft4Dev3to1_0dev4(doc, loader, baseuri):
             doc[i] = _draft4Dev3to1_0dev4(a, loader, baseuri)
     return doc
 
+
 def draft4Dev3to1_0dev4(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for draft-4.dev3 to v1.0.dev4."""
     return (_draft4Dev3to1_0dev4(doc, loader, baseuri), "v1.0.dev4")
 
+
 def v1_0dev4to1_0(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for v1.0.dev4 to v1.0."""
     return (doc, "v1.0")
 
+
 def v1_0to1_1_0dev1(doc, loader, baseuri):
     # type: (Any, Loader, Text) -> Tuple[Any, Text]
     """Public updater for v1.0 to v1.1.0-dev1."""
@@ -482,11 +505,13 @@ ALLUPDATES.update(DEVUPDATES)
 
 LATEST = "v1.0"
 
+
 def identity(doc, loader, baseuri):  # pylint: disable=unused-argument
     # type: (Any, Loader, Text) -> Tuple[Any, Union[Text, Text]]
     """The default, do-nothing, CWL document upgrade function."""
     return (doc, doc["cwlVersion"])
 
+
 def checkversion(doc, metadata, enable_dev):
     # type: (Union[CommentedSeq, CommentedMap], CommentedMap, bool) -> Tuple[Dict[Text, Any], Text]  # pylint: disable=line-too-long
     """Checks the validity of the version of the give CWL document.
@@ -519,13 +544,14 @@ def checkversion(doc, metadata, enable_dev):
                     "Update your document to a stable version (%s) or use "
                     "--enable-dev to enable support for development and "
                     "deprecated versions." % (version, ", ".join(
-                        UPDATES.keys())))
+                        list(UPDATES.keys()))))
         else:
             raise schema_salad.validate.ValidationException(
                 u"Unrecognized version %s" % version)
 
     return (cdoc, version)
 
+
 def update(doc, loader, baseuri, enable_dev, metadata):
     # type: (Union[CommentedSeq, CommentedMap], Loader, Text, bool, Any) -> dict
 
diff --git a/cwltool/utils.py b/cwltool/utils.py
index 2f1abf3..abddfcf 100644
--- a/cwltool/utils.py
+++ b/cwltool/utils.py
@@ -1,6 +1,16 @@
+from __future__ import absolute_import
+
 # no imports from cwltool allowed
 
-from typing import Any, Tuple
+import os
+import shutil
+import stat
+import six
+from six.moves import urllib
+from six.moves import zip_longest
+from typing import Any,Callable, Dict, List, Tuple, Text, Union
+
+windows_default_container_id = "frolvlad/alpine-bash"
 
 def aslist(l):  # type: (Any) -> List[Any]
     if isinstance(l, list):
@@ -8,6 +18,7 @@ def aslist(l):  # type: (Any) -> List[Any]
     else:
         return [l]
 
+
 def get_feature(self, feature):  # type: (Any, Any) -> Tuple[Any, bool]
     for t in reversed(self.requirements):
         if t["class"] == feature:
@@ -16,3 +27,148 @@ def get_feature(self, feature):  # type: (Any, Any) -> Tuple[Any, bool]
         if t["class"] == feature:
             return (t, False)
     return (None, None)
+
+
+def copytree_with_merge(src, dst, symlinks=False, ignore=None):
+    # type: (Text, Text, bool, Callable[..., Any]) -> None
+    if not os.path.exists(dst):
+        os.makedirs(dst)
+        shutil.copystat(src, dst)
+    lst = os.listdir(src)
+    if ignore:
+        excl = ignore(src, lst)
+        lst = [x for x in lst if x not in excl]
+    for item in lst:
+        s = os.path.join(src, item)
+        d = os.path.join(dst, item)
+        if symlinks and os.path.islink(s):
+            if os.path.lexists(d):
+                os.remove(d)
+            os.symlink(os.readlink(s), d)
+            try:
+                st = os.lstat(s)
+                mode = stat.S_IMODE(st.st_mode)
+                os.lchmod(d, mode)
+            except:
+                pass  # lchmod not available, only available on unix
+        elif os.path.isdir(s):
+            copytree_with_merge(s, d, symlinks, ignore)
+        else:
+            shutil.copy2(s, d)
+
+
+# changes windowspath(only) appropriately to be passed to docker run command
+# as docker treat them as unix paths so convert C:\Users\foo to /C/Users/foo
+def docker_windows_path_adjust(path):
+    # type: (Text) -> (Text)
+    if path is not None and onWindows():
+        sp=path.split(':')
+        if len(sp)==2:
+            sp[0]=sp[0].capitalize()  # Capitalizing windows Drive letters
+            path=':'.join(sp)
+        path = path.replace(':', '').replace('\\', '/')
+        return path if path[0] == '/' else '/' + path
+    return path
+
+
+# changes docker path(only on windows os) appropriately back to Windows path
+# so convert /C/Users/foo to C:\Users\foo
+def docker_windows_reverse_path_adjust(path):
+    # type: (Text) -> (Text)
+    if path is not None and onWindows():
+        if path[0] == '/':
+            path=path[1:]
+        else:
+            raise ValueError("not a docker path")
+        splitpath=path.split('/')
+        splitpath[0]= splitpath[0]+':'
+        return '\\'.join(splitpath)
+    return path
+
+
+# On docker in windows fileuri do not contain : in path
+# To convert this file uri to windows compatible add : after drove letter,
+# so file:///E/var becomes file:///E:/var
+def docker_windows_reverse_fileuri_adjust(fileuri):
+    # type: (Text) -> (Text)
+    if fileuri is not None and onWindows():
+        if urllib.parse.urlsplit(fileuri).scheme == "file":
+            filesplit= fileuri.split("/")
+            if filesplit[3][-1] != ':':
+                filesplit[3]=filesplit[3]+':'
+                return '/'.join(filesplit)
+            else:
+                return fileuri
+        else:
+            raise ValueError("not a file URI")
+    return fileuri
+
+
+# Check if we are on windows OS
+def onWindows():
+    # type: () -> (bool)
+    return os.name == 'nt'
+
+
+
+# On windows os.path.join would use backslash to join path, since we would use these paths in Docker we would convert it to /
+def convert_pathsep_to_unix(path):  # type: (Text) -> (Text)
+    if path is not None and onWindows():
+        return path.replace('\\', '/')
+    return path
+
+# comparision function to be used in sorting
+# python3 doesn't allow sorting of different
+# types like str() and int().
+# this function re-creates sorting nature in py2
+# of heterogeneous list of `int` and `str`
+def cmp_like_py2(dict1, dict2):  # type: (Dict[Text, Any], Dict[Text, Any]) -> int
+    # extract lists from both dicts
+    a, b = dict1["position"], dict2["position"]
+    # iterate through both list till max of their size
+    for i,j in zip_longest(a,b):
+        if i == j:
+            continue
+        # in case 1st list is smaller
+        # should come first in sorting
+        if i is None:
+            return -1
+        # if 1st list is longer,
+        # it should come later in sort
+        elif j is None:
+            return 1
+
+        # if either of the list contains str element
+        # at any index, both should be str before comparing
+        if isinstance(i, str) or isinstance(j, str):
+            return 1 if str(i) > str(j) else -1
+        # int comparison otherwise
+        return 1 if i > j else -1
+    # if both lists are equal
+    return 0
+
+
+# util function to convert any present byte string
+# to unicode string. input is a dict of nested dicts and lists
+def bytes2str_in_dicts(a):
+    # type: (Union[Dict[Text, Any], List[Any], Any]) -> Union[Text, List[Any], Dict[Text, Any]]
+
+    # if input is dict, recursively call for each value
+    if isinstance(a, dict):
+        for k, v in dict.items(a):
+            a[k] = bytes2str_in_dicts(v)
+        return a
+
+    # if list, iterate through list and fn call
+    # for all its elements
+    if isinstance(a, list):
+        for idx, value in enumerate(a):
+            a[idx] = bytes2str_in_dicts(value)
+            return a
+
+    # if value is bytes, return decoded string,
+    elif isinstance(a, bytes):
+        return a.decode('utf-8')
+
+    # simply return elements itself
+    return a
diff --git a/cwltool/workflow.py b/cwltool/workflow.py
index 68d9bd5..4bd926b 100644
--- a/cwltool/workflow.py
+++ b/cwltool/workflow.py
@@ -1,34 +1,34 @@
+from __future__ import absolute_import
 import copy
+import functools
+import json
 import logging
 import random
-import os
-from collections import namedtuple
-import functools
-import urlparse
 import tempfile
-import shutil
-import json
-
-from typing import Any, Callable, cast, Generator, Iterable, List, Text, Union
+from collections import namedtuple
+from typing import Any, Callable, Dict, Generator, Iterable, List, Text, Union, cast
 
 import schema_salad.validate as validate
-from schema_salad.sourceline import SourceLine
+from ruamel.yaml.comments import CommentedMap, CommentedSeq
+from schema_salad.sourceline import SourceLine, cmap
 
-from . import job
-from . import draft2tool
-from .utils import aslist
-from .process import Process, get_feature, empty_subtree, shortname, uniquename
+from . import draft2tool, expression
 from .errors import WorkflowException
-from . import expression
 from .load_tool import load_tool
-
+from .process import Process, shortname, uniquename
+from .utils import aslist
+import six
+from six.moves import range
 
 _logger = logging.getLogger("cwltool")
 
-WorkflowStateItem = namedtuple('WorkflowStateItem', ['parameter', 'value'])
+WorkflowStateItem = namedtuple('WorkflowStateItem', ['parameter', 'value', 'success'])
 
-def defaultMakeTool(toolpath_object, **kwargs):
-    # type: (Dict[Text, Any], **Any) -> Process
+
+def defaultMakeTool(toolpath_object,  # type: Dict[Text, Any]
+                    **kwargs  # type: Any
+                   ):
+    # type: (...) -> Process
     if not isinstance(toolpath_object, dict):
         raise WorkflowException(u"Not a dict: `%s`" % toolpath_object)
     if "class" in toolpath_object:
@@ -39,7 +39,10 @@ def defaultMakeTool(toolpath_object, **kwargs):
         elif toolpath_object["class"] == "Workflow":
             return Workflow(toolpath_object, **kwargs)
 
-    raise WorkflowException(u"Missing or invalid 'class' field in %s, expecting one of: CommandLineTool, ExpressionTool, Workflow" % toolpath_object["id"])
+    raise WorkflowException(
+        u"Missing or invalid 'class' field in %s, expecting one of: CommandLineTool, ExpressionTool, Workflow" %
+        toolpath_object["id"])
+
 
 def findfiles(wo, fn=None):  # type: (Any, List) -> List[Dict[Text, Any]]
     if fn is None:
@@ -66,61 +69,114 @@ def match_types(sinktype, src, iid, inputobj, linkMerge, valueFrom):
                 return True
     elif isinstance(src.parameter["type"], list):
         # Source is union type
-        # Check that every source type is compatible with the sink.
+        # Check that at least one source type is compatible with the sink.
         for st in src.parameter["type"]:
             srccopy = copy.deepcopy(src)
             srccopy.parameter["type"] = st
-            if not match_types(st, srccopy, iid, inputobj, linkMerge, valueFrom):
-                return False
-        return True
+            if match_types(sinktype, srccopy, iid, inputobj, linkMerge, valueFrom):
+                return True
+        return False
     elif linkMerge:
-            if iid not in inputobj:
-                inputobj[iid] = []
-            if linkMerge == "merge_nested":
-                inputobj[iid].append(src.value)
-            elif linkMerge == "merge_flattened":
-                if isinstance(src.value, list):
-                    inputobj[iid].extend(src.value)
-                else:
-                    inputobj[iid].append(src.value)
+        if iid not in inputobj:
+            inputobj[iid] = []
+        if linkMerge == "merge_nested":
+            inputobj[iid].append(src.value)
+        elif linkMerge == "merge_flattened":
+            if isinstance(src.value, list):
+                inputobj[iid].extend(src.value)
             else:
-                raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
-            return True
+                inputobj[iid].append(src.value)
+        else:
+            raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
+        return True
     elif valueFrom is not None or can_assign_src_to_sink(src.parameter["type"], sinktype) or sinktype == "Any":
         # simply assign the value from state to input
         inputobj[iid] = copy.deepcopy(src.value)
         return True
     return False
 
-def can_assign_src_to_sink(src, sink):  # type: (Any, Any) -> bool
+
+def check_types(srctype, sinktype, linkMerge, valueFrom):
+    # type: (Any, Any, Text, Text) -> Text
+    """Check if the source and sink types are "pass", "warning", or "exception".
+    """
+
+    if valueFrom:
+        return "pass"
+    elif not linkMerge:
+        if can_assign_src_to_sink(srctype, sinktype, strict=True):
+            return "pass"
+        elif can_assign_src_to_sink(srctype, sinktype, strict=False):
+            return "warning"
+        else:
+            return "exception"
+    elif linkMerge == "merge_nested":
+        return check_types({"items": srctype, "type": "array"}, sinktype, None, None)
+    elif linkMerge == "merge_flattened":
+        return check_types(merge_flatten_type(srctype), sinktype, None, None)
+    else:
+        raise WorkflowException(u"Unrecognized linkMerge enu_m '%s'" % linkMerge)
+
+
+def merge_flatten_type(src):
+    # type: (Any) -> Any
+    """Return the merge flattened type of the source type
+    """
+
+    if isinstance(src, list):
+        return [merge_flatten_type(t) for t in src]
+    elif isinstance(src, dict) and src.get("type") == "array":
+        return src
+    else:
+        return {"items": src, "type": "array"}
+
+
+def can_assign_src_to_sink(src, sink, strict=False):  # type: (Any, Any, bool) -> bool
     """Check for identical type specifications, ignoring extra keys like inputBinding.
+
+    src: admissible source types
+    sink: admissible sink types
+
+    In non-strict comparison, at least one source type must match one sink type.
+    In strict comparison, all source types must match at least one sink type.
     """
+
     if sink == "Any":
         return True
     if isinstance(src, dict) and isinstance(sink, dict):
         if src["type"] == "array" and sink["type"] == "array":
-            return can_assign_src_to_sink(src["items"], sink["items"])
+            return can_assign_src_to_sink(src["items"], sink["items"], strict)
         elif src["type"] == "record" and sink["type"] == "record":
-            return _compare_records(src, sink)
+            return _compare_records(src, sink, strict)
+        return False
     elif isinstance(src, list):
-        for t in src:
-            if can_assign_src_to_sink(t, sink):
-                return True
+        if strict:
+            for t in src:
+                if not can_assign_src_to_sink(t, sink):
+                    return False
+            return True
+        else:
+            for t in src:
+                if can_assign_src_to_sink(t, sink):
+                    return True
+            return False
     elif isinstance(sink, list):
         for t in sink:
             if can_assign_src_to_sink(src, t):
                 return True
+        return False
     else:
         return src == sink
-    return False
 
-def _compare_records(src, sink):
-    # type: (Dict[Text, Any], Dict[Text, Any]) -> bool
+
+def _compare_records(src, sink, strict=False):
+    # type: (Dict[Text, Any], Dict[Text, Any], bool) -> bool
     """Compare two records, ensuring they have compatible fields.
 
     This handles normalizing record names, which will be relative to workflow
     step, so that they can be compared.
     """
+
     def _rec_fields(rec):  # type: (Dict[Text, Any]) -> Dict[Text, Any]
         out = {}
         for field in rec["fields"]:
@@ -130,17 +186,18 @@ def _compare_records(src, sink):
 
     srcfields = _rec_fields(src)
     sinkfields = _rec_fields(sink)
-    for key in sinkfields.iterkeys():
+    for key in six.iterkeys(sinkfields):
         if (not can_assign_src_to_sink(
-                srcfields.get(key, "null"), sinkfields.get(key, "null"))
-                and sinkfields.get(key) is not None):
+                srcfields.get(key, "null"), sinkfields.get(key, "null"), strict)
+            and sinkfields.get(key) is not None):
             _logger.info("Record comparison failure for %s and %s\n"
                          "Did not match fields for %s: %s and %s" %
                          (src["name"], sink["name"], key, srcfields.get(key),
-                             sinkfields.get(key)))
+                          sinkfields.get(key)))
             return False
     return True
 
+
 def object_from_state(state, parms, frag_only, supportsMultipleInput, sourceField, incomplete=False):
     # type: (Dict[Text, WorkflowStateItem], List[Dict[Text, Any]], bool, bool, Text, bool) -> Dict[Text, Any]
     inputobj = {}  # type: Dict[Text, Any]
@@ -150,41 +207,43 @@ def object_from_state(state, parms, frag_only, supportsMultipleInput, sourceFiel
             iid = shortname(iid)
         if sourceField in inp:
             if (isinstance(inp[sourceField], list) and not
-                    supportsMultipleInput):
+            supportsMultipleInput):
                 raise WorkflowException(
                     "Workflow contains multiple inbound links to a single "
                     "parameter but MultipleInputFeatureRequirement is not "
                     "declared.")
             connections = aslist(inp[sourceField])
             for src in connections:
-                if src in state and state[src] is not None:
+                if src in state and state[src] is not None and (state[src].success == "success" or incomplete):
                     if not match_types(
                             inp["type"], state[src], iid, inputobj,
                             inp.get("linkMerge", ("merge_nested"
-                                if len(connections) > 1 else None)),
+                                                  if len(connections) > 1 else None)),
                             valueFrom=inp.get("valueFrom")):
                         raise WorkflowException(
                             u"Type mismatch between source '%s' (%s) and "
                             "sink '%s' (%s)" % (src,
-                                state[src].parameter["type"], inp["id"],
-                                inp["type"]))
+                                                state[src].parameter["type"], inp["id"],
+                                                inp["type"]))
                 elif src not in state:
                     raise WorkflowException(
                         u"Connect source '%s' on parameter '%s' does not "
                         "exist" % (src, inp["id"]))
                 elif not incomplete:
                     return None
-        elif "default" in inp:
-            inputobj[iid] = inp["default"]
-        elif "valueFrom" in inp:
+
+        if inputobj.get(iid) is None and "default" in inp:
+            inputobj[iid] = copy.copy(inp["default"])
+
+        if iid not in inputobj and ("valueFrom" in inp or incomplete):
             inputobj[iid] = None
-        else:
+
+        if iid not in inputobj:
             raise WorkflowException(u"Value for %s not specified" % (inp["id"]))
     return inputobj
 
 
 class WorkflowJobStep(object):
-
     def __init__(self, step):  # type: (Any) -> None
         self.step = step
         self.tool = step.tool
@@ -198,12 +257,14 @@ class WorkflowJobStep(object):
         # type: (Dict[Text, Text], functools.partial[None], **Any) -> Generator
         kwargs["part_of"] = self.name
         kwargs["name"] = shortname(self.id)
+
+        _logger.info(u"[%s] start", self.name)
+
         for j in self.step.job(joborder, output_callback, **kwargs):
             yield j
 
 
 class WorkflowJob(object):
-
     def __init__(self, workflow, **kwargs):
         # type: (Workflow, **Any) -> None
         self.workflow = workflow
@@ -211,6 +272,8 @@ class WorkflowJob(object):
         self.steps = [WorkflowJobStep(s) for s in workflow.steps]
         self.state = None  # type: Dict[Text, WorkflowStateItem]
         self.processStatus = None  # type: Text
+        self.did_callback = False
+
         if "outdir" in kwargs:
             self.outdir = kwargs["outdir"]
         elif "tmp_outdir_prefix" in kwargs:
@@ -221,15 +284,35 @@ class WorkflowJob(object):
 
         self.name = uniquename(u"workflow %s" % kwargs.get("name", shortname(self.workflow.tool.get("id", "embedded"))))
 
-        _logger.debug(u"[%s] initialized from %s", self.name, self.tool.get("id", "workflow embedded in %s" % kwargs.get("part_of")))
+        _logger.debug(u"[%s] initialized from %s", self.name,
+                      self.tool.get("id", "workflow embedded in %s" % kwargs.get("part_of")))
 
-    def receive_output(self, step, outputparms, jobout, processStatus):
-        # type: (WorkflowJobStep, List[Dict[Text,Text]], Dict[Text,Text], Text) -> None
+    def do_output_callback(self, final_output_callback):
+        # type: (Callable[[Any, Any], Any]) -> None
+
+        supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
+
+        try:
+            wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput, "outputSource",
+                                   incomplete=True)
+        except WorkflowException as e:
+            _logger.error(u"[%s] Cannot collect workflow output: %s", self.name, e)
+            wo = {}
+            self.processStatus = "permanentFail"
+
+        _logger.info(u"[%s] completed %s", self.name, self.processStatus)
+
+        self.did_callback = True
+
+        final_output_callback(wo, self.processStatus)
+
+    def receive_output(self, step, outputparms, final_output_callback, jobout, processStatus):
+        # type: (WorkflowJobStep, List[Dict[Text,Text]], Callable[[Any, Any], Any], Dict[Text,Text], Text) -> None
 
         for i in outputparms:
             if "id" in i:
                 if i["id"] in jobout:
-                    self.state[i["id"]] = WorkflowStateItem(i, jobout[i["id"]])
+                    self.state[i["id"]] = WorkflowStateItem(i, jobout[i["id"]], processStatus)
                 else:
                     _logger.error(u"[%s] Output is missing expected field %s", step.name, i["id"])
                     processStatus = "permanentFail"
@@ -241,14 +324,19 @@ class WorkflowJob(object):
             if self.processStatus != "permanentFail":
                 self.processStatus = processStatus
 
-            _logger.warn(u"[%s] completed %s", step.name, processStatus)
+            _logger.warning(u"[%s] completed %s", step.name, processStatus)
         else:
             _logger.info(u"[%s] completed %s", step.name, processStatus)
 
         step.completed = True
+        self.made_progress = True
+
+        completed = sum(1 for s in self.steps if s.completed)
+        if completed == len(self.steps):
+            self.do_output_callback(final_output_callback)
 
-    def try_make_job(self, step, **kwargs):
-        # type: (WorkflowJobStep, **Any) -> Generator
+    def try_make_job(self, step, final_output_callback, **kwargs):
+        # type: (WorkflowJobStep, Callable[[Any, Any], Any], **Any) -> Generator
         inputparms = step.tool["inputs"]
         outputparms = step.tool["outputs"]
 
@@ -267,20 +355,21 @@ class WorkflowJob(object):
 
             _logger.debug(u"[%s] starting %s", self.name, step.name)
 
-            callback = functools.partial(self.receive_output, step, outputparms)
+            callback = functools.partial(self.receive_output, step, outputparms, final_output_callback)
 
             valueFrom = {
                 i["id"]: i["valueFrom"] for i in step.tool["inputs"]
                 if "valueFrom" in i}
 
             if len(valueFrom) > 0 and not bool(self.workflow.get_requirement("StepInputExpressionRequirement")[0]):
-                raise WorkflowException("Workflow step contains valueFrom but StepInputExpressionRequirement not in requirements")
+                raise WorkflowException(
+                    "Workflow step contains valueFrom but StepInputExpressionRequirement not in requirements")
 
-            vfinputs = {shortname(k): v for k,v in inputobj.iteritems()}
+            vfinputs = {shortname(k): v for k, v in six.iteritems(inputobj)}
 
             def postScatterEval(io):
                 # type: (Dict[Text, Any]) -> Dict[Text, Any]
-                shortio = {shortname(k): v for k,v in io.iteritems()}
+                shortio = {shortname(k): v for k, v in six.iteritems(io)}
 
                 def valueFromFunc(k, v):  # type: (Any, Any) -> Any
                     if k in valueFrom:
@@ -289,7 +378,8 @@ class WorkflowJob(object):
                             None, None, {}, context=v)
                     else:
                         return v
-                return {k: valueFromFunc(k, v) for k,v in io.items()}
+
+                return {k: valueFromFunc(k, v) for k, v in io.items()}
 
             if "scatter" in step.tool:
                 scatter = aslist(step.tool["scatter"])
@@ -298,24 +388,29 @@ class WorkflowJob(object):
                     raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
                 kwargs["postScatterEval"] = postScatterEval
 
+                tot = 1
+                emptyscatter = [shortname(s) for s in scatter if len(inputobj[s]) == 0]
+                if emptyscatter:
+                    _logger.warning(u"[job %s] Notice: scattering over empty input in '%s'.  All outputs will be empty.", step.name, "', '".join(emptyscatter))
+
                 if method == "dotproduct" or method is None:
                     jobs = dotproduct_scatter(step, inputobj, scatter,
                                               cast(  # known bug with mypy
                                                   # https://github.com/python/mypy/issues/797
-                                                  Callable[[Any], Any],callback), **kwargs)
+                                                  Callable[[Any], Any], callback), **kwargs)
                 elif method == "nested_crossproduct":
                     jobs = nested_crossproduct_scatter(step, inputobj,
-                        scatter, cast(Callable[[Any], Any], callback),
-                        # known bug in mypy
-                        # https://github.com/python/mypy/issues/797
-                        **kwargs)
+                                                       scatter, cast(Callable[[Any], Any], callback),
+                                                       # known bug in mypy
+                                                       # https://github.com/python/mypy/issues/797
+                                                       **kwargs)
                 elif method == "flat_crossproduct":
                     jobs = cast(Generator,
                                 flat_crossproduct_scatter(step, inputobj,
                                                           scatter,
                                                           cast(Callable[[Any], Any],
-                                                         # known bug in mypy
-                                                         # https://github.com/python/mypy/issues/797
+                                                               # known bug in mypy
+                                                               # https://github.com/python/mypy/issues/797
                                                                callback), 0, **kwargs))
             else:
                 if _logger.isEnabledFor(logging.DEBUG):
@@ -339,7 +434,7 @@ class WorkflowJob(object):
             step.completed = True
 
     def run(self, **kwargs):
-        _logger.debug(u"[%s] workflow starting", self.name)
+        _logger.info(u"[%s] start", self.name)
 
     def job(self, joborder, output_callback, **kwargs):
         # type: (Dict[Text, Any], Callable[[Any, Any], Any], **Any) -> Generator
@@ -353,11 +448,12 @@ class WorkflowJob(object):
             with SourceLine(self.tool["inputs"], e, WorkflowException):
                 iid = shortname(i["id"])
                 if iid in joborder:
-                    self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(joborder[iid]))
+                    self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(joborder[iid]), "success")
                 elif "default" in i:
-                    self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(i["default"]))
+                    self.state[i["id"]] = WorkflowStateItem(i, copy.deepcopy(i["default"]), "success")
                 else:
-                    raise WorkflowException(u"Input '%s' not in input object and does not have a default value." % (i["id"]))
+                    raise WorkflowException(
+                        u"Input '%s' not in input object and does not have a default value." % (i["id"]))
 
         for s in self.steps:
             for out in s.tool["outputs"]:
@@ -365,7 +461,7 @@ class WorkflowJob(object):
 
         completed = 0
         while completed < len(self.steps):
-            made_progress = False
+            self.made_progress = False
 
             for step in self.steps:
                 if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
@@ -373,7 +469,7 @@ class WorkflowJob(object):
 
                 if not step.submitted:
                     try:
-                        step.iterable = self.try_make_job(step, **kwargs)
+                        step.iterable = self.try_make_job(step, output_callback, **kwargs)
                     except WorkflowException as e:
                         _logger.error(u"[%s] Cannot make job: %s", step.name, e)
                         _logger.debug("", exc_info=True)
@@ -385,7 +481,7 @@ class WorkflowJob(object):
                             if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
                                 break
                             if newjob:
-                                made_progress = True
+                                self.made_progress = True
                                 yield newjob
                             else:
                                 break
@@ -396,24 +492,14 @@ class WorkflowJob(object):
 
             completed = sum(1 for s in self.steps if s.completed)
 
-            if not made_progress and completed < len(self.steps):
+            if not self.made_progress and completed < len(self.steps):
                 if self.processStatus != "success":
                     break
                 else:
                     yield None
 
-        supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
-
-        try:
-            wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput, "outputSource", incomplete=True)
-        except WorkflowException as e:
-            _logger.error(u"[%s] Cannot collect workflow output: %s", self.name, e)
-            wo = {}
-            self.processStatus = "permanentFail"
-
-        _logger.info(u"[%s] outdir is %s", self.name, self.outdir)
-
-        output_callback(wo, self.processStatus)
+        if not self.did_callback:
+            self.do_output_callback(output_callback)
 
 
 class Workflow(Process):
@@ -425,20 +511,45 @@ class Workflow(Process):
         kwargs["hints"] = self.hints
 
         makeTool = kwargs.get("makeTool")
-        self.steps = [WorkflowStep(step, n, **kwargs) for n,step in enumerate(self.tool.get("steps", []))]
+        self.steps = []  # type: List[WorkflowStep]
+        validation_errors = []
+        for n, step in enumerate(self.tool.get("steps", [])):
+            try:
+                self.steps.append(WorkflowStep(step, n, **kwargs))
+            except validate.ValidationException as v:
+                validation_errors.append(v)
+
+        if validation_errors:
+            raise validate.ValidationException("\n".join(str(v) for v in validation_errors))
+
         random.shuffle(self.steps)
 
-        # TODO: statically validate data links instead of doing it at runtime.
+        # statically validate data links instead of doing it at runtime.
+        workflow_inputs = self.tool["inputs"]
+        workflow_outputs = self.tool["outputs"]
 
-    def job(self, joborder, output_callback, **kwargs):
-        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator
-        builder = self._init_job(joborder, **kwargs)
+        step_inputs = []  # type: List[Any]
+        step_outputs = []  # type: List[Any]
+        for step in self.steps:
+            step_inputs.extend(step.tool["inputs"])
+            step_outputs.extend(step.tool["outputs"])
+
+        static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs)
+
+
+    def job(self,
+            job_order,  # type: Dict[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            **kwargs  # type: Any
+            ):
+        # type: (...) -> Generator[Any, None, None]
+        builder = self._init_job(job_order, **kwargs)
         wj = WorkflowJob(self, **kwargs)
         yield wj
 
         kwargs["part_of"] = u"workflow %s" % wj.name
 
-        for w in wj.job(builder.job, output_callback, **kwargs):
+        for w in wj.job(builder.job, output_callbacks, **kwargs):
             yield w
 
     def visit(self, op):
@@ -447,8 +558,103 @@ class Workflow(Process):
             s.visit(op)
 
 
-class WorkflowStep(Process):
+def static_checker(workflow_inputs, workflow_outputs, step_inputs, step_outputs):
+    # type: (List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]], List[Dict[Text, Any]]) -> None
+    """Check if all source and sink types of a workflow are compatible before run time.
+    """
+
+    # source parameters: workflow_inputs and step_outputs
+    # sink parameters: step_inputs and workflow_outputs
+
+    # make a dictionary of source parameters, indexed by the "id" field
+    src_parms = workflow_inputs + step_outputs
+    src_dict = {}
+    for parm in src_parms:
+        src_dict[parm["id"]] = parm
+
+    step_inputs_val = check_all_types(src_dict, step_inputs, "source")
+    workflow_outputs_val = check_all_types(src_dict, workflow_outputs, "outputSource")
+
+    warnings = step_inputs_val["warning"] + workflow_outputs_val["warning"]
+    exceptions = step_inputs_val["exception"] + workflow_outputs_val["exception"]
+
+    warning_msgs = []
+    exception_msgs = []
+    for warning in warnings:
+        src = warning.src
+        sink = warning.sink
+        linkMerge = warning.linkMerge
+        msg = SourceLine(src, "type").makeError(
+            "Source '%s' of type %s is partially incompatible"
+            % (shortname(src["id"]), json.dumps(src["type"]))) + "\n" + \
+            SourceLine(sink, "type").makeError(
+            "  with sink '%s' of type %s"
+            % (shortname(sink["id"]), json.dumps(sink["type"])))
+        if linkMerge:
+            msg += "\n" + SourceLine(sink).makeError("  source has linkMerge method %s" % linkMerge)
+        warning_msgs.append(msg)
+    for exception in exceptions:
+        src = exception.src
+        sink = exception.sink
+        linkMerge = exception.linkMerge
+        msg = SourceLine(src, "type").makeError(
+            "Source '%s' of type %s is incompatible"
+            % (shortname(src["id"]), json.dumps(src["type"]))) + "\n" + \
+            SourceLine(sink, "type").makeError(
+            "  with sink '%s' of type %s"
+            % (shortname(sink["id"]), json.dumps(sink["type"])))
+        if linkMerge:
+            msg += "\n" + SourceLine(sink).makeError("  source has linkMerge method %s" % linkMerge)
+        exception_msgs.append(msg)
+
+    for sink in step_inputs:
+        if ('null' != sink["type"] and 'null' not in sink["type"]
+            and "source" not in sink and "default" not in sink and "valueFrom" not in sink):
+            msg = SourceLine(sink).makeError(
+                "Required parameter '%s' does not have source, default, or valueFrom expression"
+                % shortname(sink["id"]))
+            exception_msgs.append(msg)
+
+    all_warning_msg = "\n".join(warning_msgs)
+    all_exception_msg = "\n".join(exception_msgs)
+
+    if warnings:
+        _logger.warning("Workflow checker warning:")
+        _logger.warning(all_warning_msg)
+    if exceptions:
+        raise validate.ValidationException(all_exception_msg)
+
+
+SrcSink = namedtuple("SrcSink", ["src", "sink", "linkMerge"])
+
+def check_all_types(src_dict, sinks, sourceField):
+    # type: (Dict[Text, Any], List[Dict[Text, Any]], Text) -> Dict[Text, List[SrcSink]]
+    # sourceField is either "soure" or "outputSource"
+    """Given a list of sinks, check if their types match with the types of their sources.
+    """
+
+    validation = {"warning": [], "exception": []}  # type: Dict[Text, List[SrcSink]]
+    for sink in sinks:
+        if sourceField in sink:
+            valueFrom = sink.get("valueFrom")
+            if isinstance(sink[sourceField], list):
+                srcs_of_sink = [src_dict[parm_id] for parm_id in sink[sourceField]]
+                linkMerge = sink.get("linkMerge", ("merge_nested"
+                                                   if len(sink[sourceField]) > 1 else None))
+            else:
+                parm_id = sink[sourceField]
+                srcs_of_sink = [src_dict[parm_id]]
+                linkMerge = None
+            for src in srcs_of_sink:
+                check_result = check_types(src["type"], sink["type"], linkMerge, valueFrom)
+                if check_result == "warning":
+                    validation["warning"].append(SrcSink(src, sink, linkMerge))
+                elif check_result == "exception":
+                    validation["exception"].append(SrcSink(src, sink, linkMerge))
+    return validation
 
+
+class WorkflowStep(Process):
     def __init__(self, toolpath_object, pos, **kwargs):
         # type: (Dict[Text, Any], int, **Any) -> None
         if "id" in toolpath_object:
@@ -467,21 +673,24 @@ class WorkflowStep(Process):
                     toolpath_object["run"], kwargs.get("makeTool"), kwargs,
                     enable_dev=kwargs.get("enable_dev"),
                     strict=kwargs.get("strict"),
-                    fetcher_constructor=kwargs.get("fetcher_constructor"))
+                    fetcher_constructor=kwargs.get("fetcher_constructor"),
+                    resolver=kwargs.get("resolver"))
         except validate.ValidationException as v:
             raise WorkflowException(
                 u"Tool definition %s failed validation:\n%s" %
                 (toolpath_object["run"], validate.indent(str(v))))
 
+        validation_errors = []
         self.tool = toolpath_object = copy.deepcopy(toolpath_object)
+        bound = set()
         for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
             toolpath_object[toolfield] = []
-            for step_entry in toolpath_object[stepfield]:
-                if isinstance(step_entry, (str, unicode)):
-                    param = {}  # type: Dict[Text, Any]
+            for n, step_entry in enumerate(toolpath_object[stepfield]):
+                if isinstance(step_entry, six.string_types):
+                    param = CommentedMap()  # type: CommentedMap
                     inputid = step_entry
                 else:
-                    param = copy.copy(step_entry)
+                    param = CommentedMap(six.iteritems(step_entry))
                     inputid = step_entry["id"]
 
                 shortinputid = shortname(inputid)
@@ -491,22 +700,46 @@ class WorkflowStep(Process):
                     if frag == shortinputid:
                         param.update(tool_entry)
                         found = True
+                        bound.add(frag)
                         break
                 if not found:
                     if stepfield == "in":
                         param["type"] = "Any"
                     else:
-                        raise WorkflowException("[%s] Workflow step output '%s' not found in the outputs of the tool (expected one of '%s')" % (
-                            self.id, shortname(step_entry), "', '".join([shortname(tool_entry["id"]) for tool_entry in self.embedded_tool.tool[toolfield]])))
+                        validation_errors.append(
+                            SourceLine(self.tool["out"], n).makeError(
+                                "Workflow step output '%s' does not correspond to" % shortname(step_entry))
+                            + "\n" + SourceLine(self.embedded_tool.tool, "outputs").makeError(
+                                "  tool output (expected '%s')" % (
+                                    "', '".join(
+                                        [shortname(tool_entry["id"]) for tool_entry in
+                                         self.embedded_tool.tool[toolfield]]))))
                 param["id"] = inputid
+                param.lc.line = toolpath_object[stepfield].lc.data[n][0]
+                param.lc.col = toolpath_object[stepfield].lc.data[n][1]
+                param.lc.filename = toolpath_object[stepfield].lc.filename
                 toolpath_object[toolfield].append(param)
 
+        missing = []
+        for i, tool_entry in enumerate(self.embedded_tool.tool["inputs"]):
+            if shortname(tool_entry["id"]) not in bound:
+                if "null" not in tool_entry["type"] and "default" not in tool_entry:
+                    missing.append(shortname(tool_entry["id"]))
+
+        if missing:
+            validation_errors.append(SourceLine(self.tool, "in").makeError(
+                "Step is missing required parameter%s '%s'" % ("s" if len(missing) > 1 else "", "', '".join(missing))))
+
+        if validation_errors:
+            raise validate.ValidationException("\n".join(validation_errors))
+
         super(WorkflowStep, self).__init__(toolpath_object, **kwargs)
 
         if self.embedded_tool.tool["class"] == "Workflow":
             (feature, _) = self.get_requirement("SubworkflowFeatureRequirement")
             if not feature:
-                raise WorkflowException("Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements")
+                raise WorkflowException(
+                    "Workflow contains embedded workflow but SubworkflowFeatureRequirement not in requirements")
 
         if "scatter" in self.tool:
             (feature, _) = self.get_requirement("ScatterFeatureRequirement")
@@ -519,12 +752,14 @@ class WorkflowStep(Process):
 
             method = self.tool.get("scatterMethod")
             if method is None and len(scatter) != 1:
-                raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
+                raise validate.ValidationException("Must specify scatterMethod when scattering over multiple inputs")
 
             inp_map = {i["id"]: i for i in inputparms}
             for s in scatter:
                 if s not in inp_map:
-                    raise WorkflowException(u"Scatter parameter '%s' does not correspond to an input parameter of this step, inputs are %s" % (s, inp_map.keys()))
+                    raise validate.ValidationException(
+                        SourceLine(self.tool, "scatter").makeError(u"Scatter parameter '%s' does not correspond to an input parameter of this "
+                                                                   u"step, expecting '%s'" % (shortname(s), "', '".join(shortname(k) for k in inp_map.keys()))))
 
                 inp_map[s]["type"] = {"type": "array", "items": inp_map[s]["type"]}
 
@@ -533,15 +768,15 @@ class WorkflowStep(Process):
             else:
                 nesting = 1
 
-            for r in xrange(0, nesting):
-                for i in outputparms:
-                    i["type"] = {"type": "array", "items": i["type"]}
+            for r in range(0, nesting):
+                for op in outputparms:
+                    op["type"] = {"type": "array", "items": op["type"]}
             self.tool["inputs"] = inputparms
             self.tool["outputs"] = outputparms
 
     def receive_output(self, output_callback, jobout, processStatus):
         # type: (Callable[...,Any], Dict[Text, Text], Text) -> None
-        #_logger.debug("WorkflowStep output from run is %s", jobout)
+        # _logger.debug("WorkflowStep output from run is %s", jobout)
         output = {}
         for i in self.tool["outputs"]:
             field = shortname(i["id"])
@@ -551,17 +786,23 @@ class WorkflowStep(Process):
                 processStatus = "permanentFail"
         output_callback(output, processStatus)
 
-    def job(self, joborder, output_callback, **kwargs):
-        # type: (Dict[Text, Any], Callable[...,Any], **Any) -> Generator
+    def job(self,
+            job_order,  # type: Dict[Text, Text]
+            output_callbacks,  # type: Callable[[Any, Any], Any]
+            **kwargs  # type: Any
+            ):
+        # type: (...) -> Generator[Any, None, None]
         for i in self.tool["inputs"]:
             p = i["id"]
             field = shortname(p)
-            joborder[field] = joborder[i["id"]]
-            del joborder[i["id"]]
+            job_order[field] = job_order[i["id"]]
+            del job_order[i["id"]]
 
         try:
-            for t in self.embedded_tool.job(joborder,
-                                            functools.partial(self.receive_output, output_callback),
+            for t in self.embedded_tool.job(job_order,
+                                            functools.partial(
+                                                self.receive_output,
+                                                output_callbacks),
                                             **kwargs):
                 yield t
         except WorkflowException:
@@ -576,7 +817,6 @@ class WorkflowStep(Process):
 
 
 class ReceiveScatterOutput(object):
-
     def __init__(self, output_callback, dest):
         # type: (Callable[..., Any], Dict[Text,List[Text]]) -> None
         self.dest = dest
@@ -587,7 +827,7 @@ class ReceiveScatterOutput(object):
 
     def receive_scatter_output(self, index, jobout, processStatus):
         # type: (int, Dict[Text, Text], Text) -> None
-        for k,v in jobout.items():
+        for k, v in jobout.items():
             self.dest[k][index] = v
 
         if processStatus != "success":
@@ -604,10 +844,11 @@ class ReceiveScatterOutput(object):
         if self.completed == self.total:
             self.output_callback(self.dest, self.processStatus)
 
+
 def parallel_steps(steps, rc, kwargs):  # type: (List[Generator], ReceiveScatterOutput, Dict[str, Any]) -> Generator
     while rc.completed < rc.total:
         made_progress = False
-        for index in xrange(len(steps)):
+        for index in range(len(steps)):
             step = steps[index]
             if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
                 break
@@ -627,6 +868,7 @@ def parallel_steps(steps, rc, kwargs):  # type: (List[Generator], ReceiveScatter
         if not made_progress and rc.completed < rc.total:
             yield None
 
+
 def dotproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
     # type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
     l = None
@@ -676,16 +918,17 @@ def nested_crossproduct_scatter(process, joborder, scatter_keys, output_callback
             jo = kwargs["postScatterEval"](jo)
             steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
         else:
+            # known bug with mypy, https://github.com/python/mypy/issues/797
+            casted = cast(Callable[[Any], Any], functools.partial(rc.receive_scatter_output, n))
             steps.append(nested_crossproduct_scatter(process, jo,
-                    scatter_keys[1:], cast(  # known bug with mypy
-                        # https://github.com/python/mypy/issues/797g
-                        Callable[[Any], Any],
-                        functools.partial(rc.receive_scatter_output, n)), **kwargs))
+                                                     scatter_keys[1:],
+                                                     casted, **kwargs))
 
     rc.setTotal(l)
 
     return parallel_steps(steps, rc, kwargs)
 
+
 def crossproduct_size(joborder, scatter_keys):
     # type: (Dict[Text, Any], List[Text]) -> int
     scatter_key = scatter_keys[0]
@@ -699,6 +942,7 @@ def crossproduct_size(joborder, scatter_keys):
             sum += crossproduct_size(joborder, scatter_keys[1:])
     return sum
 
+
 def flat_crossproduct_scatter(process, joborder, scatter_keys, output_callback, startindex, **kwargs):
     # type: (WorkflowJobStep, Dict[Text, Any], List[Text], Union[ReceiveScatterOutput,Callable[..., Any]], int, **Any) -> Union[List[Generator], Generator]
     scatter_key = scatter_keys[0]
diff --git a/ez_setup.py b/ez_setup.py
deleted file mode 100755
index 50e0dfc..0000000
--- a/ez_setup.py
+++ /dev/null
@@ -1,391 +0,0 @@
-#!/usr/bin/env python
-
-"""
-Setuptools bootstrapping installer.
-
-Run this script to install or upgrade setuptools.
-"""
-
-import os
-import shutil
-import sys
-import tempfile
-import zipfile
-import optparse
-import subprocess
-import platform
-import textwrap
-import contextlib
-import warnings
-
-from distutils import log
-
-try:
-    from urllib.request import urlopen
-except ImportError:
-    from urllib2 import urlopen
-
-try:
-    from site import USER_SITE
-except ImportError:
-    USER_SITE = None
-
-DEFAULT_VERSION = "18.5"
-DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
-DEFAULT_SAVE_DIR = os.curdir
-
-
-def _python_cmd(*args):
-    """
-    Execute a command.
-
-    Return True if the command succeeded.
-    """
-    args = (sys.executable,) + args
-    return subprocess.call(args) == 0
-
-
-def _install(archive_filename, install_args=()):
-    """Install Setuptools."""
-    with archive_context(archive_filename):
-        # installing
-        log.warn('Installing Setuptools')
-        if not _python_cmd('setup.py', 'install', *install_args):
-            log.warn('Something went wrong during the installation.')
-            log.warn('See the error message above.')
-            # exitcode will be 2
-            return 2
-
-
-def _build_egg(egg, archive_filename, to_dir):
-    """Build Setuptools egg."""
-    with archive_context(archive_filename):
-        # building an egg
-        log.warn('Building a Setuptools egg in %s', to_dir)
-        _python_cmd('setup.py', '-q', 'bdist_egg', '--dist-dir', to_dir)
-    # returning the result
-    log.warn(egg)
-    if not os.path.exists(egg):
-        raise IOError('Could not build the egg.')
-
-
-class ContextualZipFile(zipfile.ZipFile):
-
-    """Supplement ZipFile class to support context manager for Python 2.6."""
-
-    def __enter__(self):
-        return self
-
-    def __exit__(self, type, value, traceback):
-        self.close()
-
-    def __new__(cls, *args, **kwargs):
-        """Construct a ZipFile or ContextualZipFile as appropriate."""
-        if hasattr(zipfile.ZipFile, '__exit__'):
-            return zipfile.ZipFile(*args, **kwargs)
-        return super(ContextualZipFile, cls).__new__(cls)
-
-
- at contextlib.contextmanager
-def archive_context(filename):
-    """
-    Unzip filename to a temporary directory, set to the cwd.
-
-    The unzipped target is cleaned up after.
-    """
-    tmpdir = tempfile.mkdtemp()
-    log.warn('Extracting in %s', tmpdir)
-    old_wd = os.getcwd()
-    try:
-        os.chdir(tmpdir)
-        with ContextualZipFile(filename) as archive:
-            archive.extractall()
-
-        # going in the directory
-        subdir = os.path.join(tmpdir, os.listdir(tmpdir)[0])
-        os.chdir(subdir)
-        log.warn('Now working in %s', subdir)
-        yield
-
-    finally:
-        os.chdir(old_wd)
-        shutil.rmtree(tmpdir)
-
-
-def _do_download(version, download_base, to_dir, download_delay):
-    """Download Setuptools."""
-    egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
-                       % (version, sys.version_info[0], sys.version_info[1]))
-    if not os.path.exists(egg):
-        archive = download_setuptools(version, download_base,
-                                      to_dir, download_delay)
-        _build_egg(egg, archive, to_dir)
-    sys.path.insert(0, egg)
-
-    # Remove previously-imported pkg_resources if present (see
-    # https://bitbucket.org/pypa/setuptools/pull-request/7/ for details).
-    if 'pkg_resources' in sys.modules:
-        del sys.modules['pkg_resources']
-
-    import setuptools
-    setuptools.bootstrap_install_from = egg
-
-
-def use_setuptools(
-        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=DEFAULT_SAVE_DIR, download_delay=15):
-    """
-    Ensure that a setuptools version is installed.
-
-    Return None. Raise SystemExit if the requested version
-    or later cannot be installed.
-    """
-    to_dir = os.path.abspath(to_dir)
-
-    # prior to importing, capture the module state for
-    # representative modules.
-    rep_modules = 'pkg_resources', 'setuptools'
-    imported = set(sys.modules).intersection(rep_modules)
-
-    try:
-        import pkg_resources
-        pkg_resources.require("setuptools>=" + version)
-        # a suitable version is already installed
-        return
-    except ImportError:
-        # pkg_resources not available; setuptools is not installed; download
-        pass
-    except pkg_resources.DistributionNotFound:
-        # no version of setuptools was found; allow download
-        pass
-    except pkg_resources.VersionConflict as VC_err:
-        if imported:
-            _conflict_bail(VC_err, version)
-
-        # otherwise, unload pkg_resources to allow the downloaded version to
-        #  take precedence.
-        del pkg_resources
-        _unload_pkg_resources()
-
-    return _do_download(version, download_base, to_dir, download_delay)
-
-
-def _conflict_bail(VC_err, version):
-    """
-    Setuptools was imported prior to invocation, so it is
-    unsafe to unload it. Bail out.
-    """
-    conflict_tmpl = textwrap.dedent("""
-        The required version of setuptools (>={version}) is not available,
-        and can't be installed while this script is running. Please
-        install a more recent version first, using
-        'easy_install -U setuptools'.
-
-        (Currently using {VC_err.args[0]!r})
-        """)
-    msg = conflict_tmpl.format(**locals())
-    sys.stderr.write(msg)
-    sys.exit(2)
-
-
-def _unload_pkg_resources():
-    del_modules = [
-        name for name in sys.modules
-        if name.startswith('pkg_resources')
-    ]
-    for mod_name in del_modules:
-        del sys.modules[mod_name]
-
-
-def _clean_check(cmd, target):
-    """
-    Run the command to download target.
-
-    If the command fails, clean up before re-raising the error.
-    """
-    try:
-        subprocess.check_call(cmd)
-    except subprocess.CalledProcessError:
-        if os.access(target, os.F_OK):
-            os.unlink(target)
-        raise
-
-
-def download_file_powershell(url, target):
-    """
-    Download the file at url to target using Powershell.
-
-    Powershell will validate trust.
-    Raise an exception if the command cannot complete.
-    """
-    target = os.path.abspath(target)
-    ps_cmd = (
-        "[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
-        "[System.Net.CredentialCache]::DefaultCredentials; "
-        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
-        % vars()
-    )
-    cmd = [
-        'powershell',
-        '-Command',
-        ps_cmd,
-    ]
-    _clean_check(cmd, target)
-
-
-def has_powershell():
-    """Determine if Powershell is available."""
-    if platform.system() != 'Windows':
-        return False
-    cmd = ['powershell', '-Command', 'echo test']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_powershell.viable = has_powershell
-
-
-def download_file_curl(url, target):
-    cmd = ['curl', url, '--silent', '--output', target]
-    _clean_check(cmd, target)
-
-
-def has_curl():
-    cmd = ['curl', '--version']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_curl.viable = has_curl
-
-
-def download_file_wget(url, target):
-    cmd = ['wget', url, '--quiet', '--output-document', target]
-    _clean_check(cmd, target)
-
-
-def has_wget():
-    cmd = ['wget', '--version']
-    with open(os.path.devnull, 'wb') as devnull:
-        try:
-            subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
-        except Exception:
-            return False
-    return True
-download_file_wget.viable = has_wget
-
-
-def download_file_insecure(url, target):
-    """Use Python to download the file, without connection authentication."""
-    src = urlopen(url)
-    try:
-        # Read all the data in one block.
-        data = src.read()
-    finally:
-        src.close()
-
-    # Write all the data in one block to avoid creating a partial file.
-    with open(target, "wb") as dst:
-        dst.write(data)
-download_file_insecure.viable = lambda: True
-
-
-def get_best_downloader():
-    downloaders = (
-        download_file_powershell,
-        download_file_curl,
-        download_file_wget,
-        download_file_insecure,
-    )
-    viable_downloaders = (dl for dl in downloaders if dl.viable())
-    return next(viable_downloaders, None)
-
-
-def download_setuptools(
-        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=DEFAULT_SAVE_DIR, delay=15,
-        downloader_factory=get_best_downloader):
-    """
-    Download setuptools from a specified location and return its filename.
-
-    `version` should be a valid setuptools version number that is available
-    as an sdist for download under the `download_base` URL (which should end
-    with a '/'). `to_dir` is the directory where the egg will be downloaded.
-    `delay` is the number of seconds to pause before an actual download
-    attempt.
-
-    ``downloader_factory`` should be a function taking no arguments and
-    returning a function for downloading a URL to a target.
-    """
-    # making sure we use the absolute path
-    to_dir = os.path.abspath(to_dir)
-    zip_name = "setuptools-%s.zip" % version
-    url = download_base + zip_name
-    saveto = os.path.join(to_dir, zip_name)
-    if not os.path.exists(saveto):  # Avoid repeated downloads
-        log.warn("Downloading %s", url)
-        downloader = downloader_factory()
-        downloader(url, saveto)
-    return os.path.realpath(saveto)
-
-
-def _build_install_args(options):
-    """
-    Build the arguments to 'python setup.py install' on the setuptools package.
-
-    Returns list of command line arguments.
-    """
-    return ['--user'] if options.user_install else []
-
-
-def _parse_args():
-    """Parse the command line for options."""
-    parser = optparse.OptionParser()
-    parser.add_option(
-        '--user', dest='user_install', action='store_true', default=False,
-        help='install in user site package (requires Python 2.6 or later)')
-    parser.add_option(
-        '--download-base', dest='download_base', metavar="URL",
-        default=DEFAULT_URL,
-        help='alternative URL from where to download the setuptools package')
-    parser.add_option(
-        '--insecure', dest='downloader_factory', action='store_const',
-        const=lambda: download_file_insecure, default=get_best_downloader,
-        help='Use internal, non-validating downloader'
-    )
-    parser.add_option(
-        '--version', help="Specify which version to download",
-        default=DEFAULT_VERSION,
-    )
-    parser.add_option(
-    	'--to-dir',
-    	help="Directory to save (and re-use) package",
-    	default=DEFAULT_SAVE_DIR,
-    )
-    options, args = parser.parse_args()
-    # positional arguments are ignored
-    return options
-
-
-def _download_args(options):
-	"""Return args for download_setuptools function from cmdline args."""
-	return dict(
-		version=options.version,
-		download_base=options.download_base,
-		downloader_factory=options.downloader_factory,
-		to_dir=options.to_dir,
-	)
-
-
-def main():
-    """Install or upgrade setuptools and EasyInstall."""
-    options = _parse_args()
-    archive = download_setuptools(**_download_args(options))
-    return _install(archive, _build_install_args(options))
-
-if __name__ == '__main__':
-    sys.exit(main())
diff --git a/gittaggers.py b/gittaggers.py
index 53dda8f..d15f688 100644
--- a/gittaggers.py
+++ b/gittaggers.py
@@ -1,10 +1,10 @@
-from setuptools.command.egg_info import egg_info
 import subprocess
 import time
 
+from setuptools.command.egg_info import egg_info
+
 
 class EggInfoFromGit(egg_info):
-
     """Tag the build with git commit timestamp.
 
     If a build tag has already been set (e.g., "egg_info -b", building
diff --git a/setup.cfg b/setup.cfg
index b202cbf..e30e824 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -2,10 +2,17 @@
 ignore = E124,E128,E129,E201,E202,E225,E226,E231,E265,E271,E302,E303,F401,E402,E501,W503,E731,F811,F821,F841
 exclude = cwltool/schemas
 
-[easy_install]
+[bdist_wheel]
+universal = 1
+
+[aliases]
+test = pytest
+
+[tool:pytest]
+addopts = --ignore cwltool/schemas
+testpaths = tests
 
 [egg_info]
-tag_build = .20170114120503
+tag_build = .20170803160545
 tag_date = 0
-tag_svn_revision = 0
 
diff --git a/setup.py b/setup.py
index a3d6106..1045b3d 100755
--- a/setup.py
+++ b/setup.py
@@ -1,22 +1,23 @@
 #!/usr/bin/env python
-import ez_setup
-ez_setup.use_setuptools()
 import os
 import sys
-import shutil
 
 import setuptools.command.egg_info as egg_info_cmd
-from setuptools import setup, find_packages
+from setuptools import setup
 
 SETUP_DIR = os.path.dirname(__file__)
 README = os.path.join(SETUP_DIR, 'README.rst')
 
 try:
     import gittaggers
+
     tagger = gittaggers.EggInfoFromGit
 except ImportError:
     tagger = egg_info_cmd.egg_info
 
+needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
+pytest_runner = ['pytest-runner'] if needs_pytest else []
+
 setup(name='cwltool',
       version='1.0',
       description='Common workflow language reference implementation',
@@ -25,8 +26,10 @@ setup(name='cwltool',
       author_email='common-workflow-language at googlegroups.com',
       url="https://github.com/common-workflow-language/cwltool",
       download_url="https://github.com/common-workflow-language/cwltool",
-      license='Apache 2.0',
-      packages=["cwltool"],
+      # platforms='',  # empty as is conveyed by the classifier below
+      # license='',  # empty as is conveyed by the classifier below
+      packages=["cwltool", 'cwltool.tests'],
+      package_dir={'cwltool.tests': 'tests'},
       package_data={'cwltool': ['schemas/draft-2/*.yml',
                                 'schemas/draft-3/*.yml',
                                 'schemas/draft-3/*.md',
@@ -40,21 +43,61 @@ setup(name='cwltool',
                                 'schemas/v1.1.0-dev1/*.md',
                                 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',
                                 'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',
-                                'cwlNodeEngine.js']},
+                                'cwlNodeEngine.js',
+                                'extensions.yml']},
+      include_package_data=True,
       install_requires=[
           'setuptools',
           'requests >= 1.0',
-          'ruamel.yaml >= 0.12.4',
-          'rdflib >= 4.2.0, < 4.3.0',
+          'ruamel.yaml >= 0.12.4, < 0.15',
+          'rdflib >= 4.2.2, < 4.3.0',
           'shellescape >= 3.4.1, < 3.5',
-          'schema-salad >= 2.2.20170111180227, < 3',
-          'typing >= 3.5.2, < 3.6'
+          'schema-salad >= 2.6, < 3',
+          'typing >= 3.5.3',
+          'six >= 1.8.0',
       ],
+      extras_require={
+          'deps': ["galaxy-lib >= 17.09.3"]
+      },
+      setup_requires=[] + pytest_runner,
       test_suite='tests',
-      tests_require=[],
+      tests_require=['pytest', 'mock >= 2.0.0',],
       entry_points={
-          'console_scripts': [ "cwltool=cwltool.main:main" ]
+          'console_scripts': ["cwltool=cwltool.main:main"]
       },
       zip_safe=True,
       cmdclass={'egg_info': tagger},
-)
+      classifiers=[
+          'Development Status :: 5 - Production/Stable',
+          'Environment :: Console',
+          'Intended Audience :: Developers',
+          'Intended Audience :: Science/Research',
+          'Intended Audience :: Healthcare Industry',
+          'License :: OSI Approved :: Apache Software License',
+          'Natural Language :: English',
+          'Operating System :: MacOS :: MacOS X',
+          'Operating System :: POSIX',
+          'Operating System :: POSIX :: Linux',
+          'Operating System :: OS Independent',
+          'Operating System :: Microsoft :: Windows',
+          'Operating System :: Microsoft :: Windows :: Windows 10',
+          'Operating System :: Microsoft :: Windows :: Windows 8.1',
+          # 'Operating System :: Microsoft :: Windows :: Windows 8',  # not tested
+          # 'Operating System :: Microsoft :: Windows :: Windows 7',  # not tested
+          'Programming Language :: Python :: 2',
+          'Programming Language :: Python :: 2.7',
+          'Programming Language :: Python :: 3',
+          'Programming Language :: Python :: 3.3',
+          'Programming Language :: Python :: 3.4',
+          'Programming Language :: Python :: 3.5',
+          'Programming Language :: Python :: 3.6',
+          'Topic :: Scientific/Engineering',
+          'Topic :: Scientific/Engineering :: Bio-Informatics',
+          'Topic :: Scientific/Engineering :: Astronomy',
+          'Topic :: Scientific/Engineering :: Atmospheric Science',
+          'Topic :: Scientific/Engineering :: Information Analysis',
+          'Topic :: Scientific/Engineering :: Medical Science Apps.',
+          'Topic :: System :: Distributed Computing',
+          'Topic :: Utilities',
+      ]
+      )
diff --git a/tests/2.fasta b/tests/2.fasta
new file mode 100644
index 0000000..3bfe7d3
--- /dev/null
+++ b/tests/2.fasta
@@ -0,0 +1,11 @@
+>Sequence 561 BP; 135 A; 106 C; 98 G; 222 T; 0 other;
+gttcgatgcc taaaatacct tcttttgtcc ctacacagac cacagttttc ctaatggctt
+tacaccgact agaaattctt gtgcaagcac taattgaaag cggttggcct agagtgttac
+cggtttgtat agctgagcgc gtctcttgcc ctgatcaaag gttcattttc tctactttgg
+aagacgttgt ggaagaatac aacaagtacg agtctctccc ccctggtttg ctgattactg
+gatacagttg taataccctt cgcaacaccg cgtaactatc tatatgaatt attttccctt
+tattatatgt agtaggttcg tctttaatct tcctttagca agtcttttac tgttttcgac
+ctcaatgttc atgttcttag gttgttttgg ataatatgcg gtcagtttaa tcttcgttgt
+ttcttcttaa aatatttatt catggtttaa tttttggttt gtacttgttc aggggccagt
+tcattattta ctctgtttgt atacagcagt tcttttattt ttagtatgat tttaatttaa
+aacaattcta atggtcaaaa a
\ No newline at end of file
diff --git a/tests/2.fastq b/tests/2.fastq
new file mode 100644
index 0000000..436c05f
--- /dev/null
+++ b/tests/2.fastq
@@ -0,0 +1,12 @@
+ at EAS54_6_R1_2_1_413_324
+CCCTTCTTGTCTTCAGCGTTTCTCC
++
+;;3;;;;;;;;;;;;7;;;;;;;88
+ at EAS54_6_R1_2_1_540_792
+TTGGCAGGCCAAGGCCGATGGATCA
++
+;;;;;;;;;;;7;;;;;-;;;3;83
+ at EAS54_6_R1_2_1_443_348
+GTTGCTTCTGGCGTGGGTGGGGGGG
++EAS54_6_R1_2_1_443_348
+;;;;;;;;;;;9;7;;.7;393333
\ No newline at end of file
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/echo-cwlrun-job.yaml b/tests/echo-cwlrun-job.yaml
new file mode 100644
index 0000000..d0858c5
--- /dev/null
+++ b/tests/echo-cwlrun-job.yaml
@@ -0,0 +1,6 @@
+cwl:tool: echo.cwl
+cwl:requirements:
+ - class: DockerRequirement
+   dockerPull: debian
+
+inp: "Hoopla!"
diff --git a/tests/echo-job.yaml b/tests/echo-job.yaml
new file mode 100644
index 0000000..616d4ac
--- /dev/null
+++ b/tests/echo-job.yaml
@@ -0,0 +1,5 @@
+cwl:requirements:
+ - class: DockerRequirement
+   dockerPull: debian
+
+inp: "Howdy!"
diff --git a/tests/echo.cwl b/tests/echo.cwl
new file mode 100644
index 0000000..4c9fa87
--- /dev/null
+++ b/tests/echo.cwl
@@ -0,0 +1,15 @@
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  - id: inp
+    type: string
+    inputBinding: {}
+outputs:
+  - id: out
+    type: string
+    outputBinding:
+      glob: out.txt
+      loadContents: true
+      outputEval: $(self[0].contents)
+baseCommand: echo
+stdout: out.txt
\ No newline at end of file
diff --git a/tests/listing-job.yml b/tests/listing-job.yml
new file mode 100644
index 0000000..b885342
--- /dev/null
+++ b/tests/listing-job.yml
@@ -0,0 +1,3 @@
+d:
+  class: Directory
+  location: tmp1
\ No newline at end of file
diff --git a/tests/random_lines.cwl b/tests/random_lines.cwl
new file mode 100644
index 0000000..b352474
--- /dev/null
+++ b/tests/random_lines.cwl
@@ -0,0 +1,29 @@
+cwlVersion: v1.0
+class: CommandLineTool
+id: "random_lines"
+doc: "Select random lines from a file"
+inputs:
+  - id: seed
+    type: int
+    inputBinding:
+      position: 1
+      prefix: -s
+  - id: input1
+    type: File
+    inputBinding:
+      position: 2
+  - id: num_lines
+    type: int
+    inputBinding:
+      position: 3
+outputs:
+  output1:
+    type: stdout
+baseCommand: ["random-lines"]
+arguments: []
+hints:
+  SoftwareRequirement:
+    packages:
+    - package: 'random-lines'
+      version:
+      - '1.0'
diff --git a/tests/random_lines_job.json b/tests/random_lines_job.json
new file mode 100644
index 0000000..e1859c0
--- /dev/null
+++ b/tests/random_lines_job.json
@@ -0,0 +1,8 @@
+{
+  "input1": {
+    "class": "File",
+    "location": "2.fastq"
+  },
+  "seed": 5,
+  "num_lines": 2
+}
diff --git a/tests/random_lines_mapping.cwl b/tests/random_lines_mapping.cwl
new file mode 100644
index 0000000..b526b3c
--- /dev/null
+++ b/tests/random_lines_mapping.cwl
@@ -0,0 +1,29 @@
+cwlVersion: v1.0
+class: CommandLineTool
+id: "random_lines"
+doc: "Select random lines from a file"
+inputs:
+  - id: seed
+    type: int
+    inputBinding:
+      position: 1
+      prefix: -s
+  - id: input1
+    type: File
+    inputBinding:
+      position: 2
+  - id: num_lines
+    type: int
+    inputBinding:
+      position: 3
+outputs:
+  output1:
+    type: stdout
+baseCommand: ["random-lines"]
+arguments: []
+hints:
+  SoftwareRequirement:
+    packages:
+    - package: randomLines
+      version:
+      - '1.0.0-rc1'
diff --git a/tests/seqtk_seq.cwl b/tests/seqtk_seq.cwl
new file mode 100644
index 0000000..b97d6c2
--- /dev/null
+++ b/tests/seqtk_seq.cwl
@@ -0,0 +1,24 @@
+cwlVersion: v1.0
+class: CommandLineTool
+id: "seqtk_seq"
+doc: "Convert to FASTA (seqtk)"
+inputs:
+  - id: input1
+    type: File
+    inputBinding:
+      position: 1
+      prefix: "-a"
+outputs:
+  - id: output1
+    type: File
+    outputBinding:
+      glob: out
+baseCommand: ["seqtk", "seq"]
+arguments: []
+stdout: out
+hints:
+  SoftwareRequirement:
+    packages:
+    - package: seqtk
+      version:
+      - r93
diff --git a/tests/seqtk_seq_job.json b/tests/seqtk_seq_job.json
new file mode 100644
index 0000000..79ea46c
--- /dev/null
+++ b/tests/seqtk_seq_job.json
@@ -0,0 +1,6 @@
+{
+  "input1": {
+    "class": "File",
+    "location": "2.fastq"
+  }
+}
diff --git a/tests/seqtk_seq_with_docker.cwl b/tests/seqtk_seq_with_docker.cwl
new file mode 100644
index 0000000..8c78347
--- /dev/null
+++ b/tests/seqtk_seq_with_docker.cwl
@@ -0,0 +1,26 @@
+cwlVersion: v1.0
+class: CommandLineTool
+id: "seqtk_seq"
+doc: "Convert to FASTA (seqtk)"
+inputs:
+  - id: input1
+    type: File
+    inputBinding:
+      position: 1
+      prefix: "-a"
+outputs:
+  - id: output1
+    type: File
+    outputBinding:
+      glob: out
+baseCommand: ["seqtk", "seq"]
+arguments: []
+stdout: out
+hints:
+  SoftwareRequirement:
+    packages:
+    - package: seqtk
+      version:
+      - '1.2'
+  DockerRequirement:
+    dockerPull: quay.io/biocontainers/seqtk:1.2--0
diff --git a/tests/seqtk_seq_wrong_name.cwl b/tests/seqtk_seq_wrong_name.cwl
new file mode 100644
index 0000000..5e4665b
--- /dev/null
+++ b/tests/seqtk_seq_wrong_name.cwl
@@ -0,0 +1,27 @@
+cwlVersion: v1.0
+class: CommandLineTool
+id: "seqtk_seq"
+doc: "Convert to FASTA (seqtk)"
+inputs:
+  - id: input1
+    type: File
+    inputBinding:
+      position: 1
+      prefix: "-a"
+outputs:
+  - id: output1
+    type: File
+    outputBinding:
+      glob: out
+baseCommand: ["seqtk", "seq"]
+arguments: []
+stdout: out
+hints:
+  SoftwareRequirement:
+    packages:
+    - package: seqtk_seq
+      version:
+      - '1.2'
+      specs:
+      - https://anaconda.org/bioconda/seqtk
+      - https://packages.debian.org/sid/seqtk
diff --git a/tests/test_bad_outputs_wf.cwl b/tests/test_bad_outputs_wf.cwl
new file mode 100644
index 0000000..838e46b
--- /dev/null
+++ b/tests/test_bad_outputs_wf.cwl
@@ -0,0 +1,33 @@
+cwlVersion: v1.0
+class: Workflow
+inputs: []
+outputs:
+  b:
+    type: string
+    outputSource: step2/c
+steps:
+  step1:
+    in: []
+    out: [c]
+    run:
+      class: CommandLineTool
+      id: subtool
+      inputs: []
+      outputs:
+        b:
+          type: string
+          outputBinding:
+            outputEval: "qq"
+      baseCommand: echo
+  step2:
+    in:
+      a: step1/c
+    out: [c]
+    run:
+      class: CommandLineTool
+      id: subtool
+      inputs:
+        a: string
+      outputs:
+        b: string
+      baseCommand: echo
\ No newline at end of file
diff --git a/tests/test_check.py b/tests/test_check.py
new file mode 100644
index 0000000..cd71541
--- /dev/null
+++ b/tests/test_check.py
@@ -0,0 +1,22 @@
+from __future__ import absolute_import
+import unittest
+
+import cwltool.expression as expr
+import cwltool.factory
+import cwltool.pathmapper
+import cwltool.process
+import cwltool.workflow
+import pytest
+from cwltool.main import main
+from cwltool.utils import onWindows
+
+from .util import get_data
+
+
+class TestCheck(unittest.TestCase):
+    @pytest.mark.skipif(onWindows(),
+                        reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
+    def test_output_checking(self):
+        self.assertEquals(main([get_data('tests/wf/badout1.cwl')]), 1)
+        self.assertEquals(main([get_data('tests/wf/badout2.cwl')]), 1)
+        self.assertEquals(main([get_data('tests/wf/badout3.cwl')]), 1)
diff --git a/tests/test_cwl_version.py b/tests/test_cwl_version.py
new file mode 100644
index 0000000..c78a013
--- /dev/null
+++ b/tests/test_cwl_version.py
@@ -0,0 +1,14 @@
+from __future__ import absolute_import
+import unittest
+
+from cwltool.main import main
+
+from .util import get_data
+
+class CWL_Version_Checks(unittest.TestCase):
+    # no cwlVersion in the workflow
+    def test_missing_cwl_version(self):
+        self.assertEqual(main([get_data('tests/wf/missing_cwlVersion.cwl')]), 1)
+    # using cwlVersion: v0.1 in the workflow
+    def test_incorrect_cwl_version(self):
+        self.assertEqual(main([get_data('tests/wf/wrong_cwlVersion.cwl')]), 1)
diff --git a/tests/test_default_path.py b/tests/test_default_path.py
new file mode 100644
index 0000000..873656c
--- /dev/null
+++ b/tests/test_default_path.py
@@ -0,0 +1,15 @@
+import unittest
+from cwltool.load_tool import fetch_document, validate_document
+from .util import get_data
+from schema_salad.ref_resolver import Loader
+
+class TestDefaultPath(unittest.TestCase):
+    # Testing that error is not raised when default path is not present
+    def test_default_path(self):
+        document_loader, workflowobj, uri = fetch_document(
+            get_data("tests/wf/default_path.cwl"))
+        document_loader, avsc_names, processobj, metadata, uri = validate_document(
+            document_loader, workflowobj, uri)
+
+        self.assertIsInstance(document_loader,Loader)
+        self.assertIn("cwlVersion",processobj)
diff --git a/tests/test_deps_env_resolvers_conf.yml b/tests/test_deps_env_resolvers_conf.yml
new file mode 100644
index 0000000..e1e190c
--- /dev/null
+++ b/tests/test_deps_env_resolvers_conf.yml
@@ -0,0 +1,3 @@
+- type: galaxy_packages
+  base_path: ./tests/test_deps_env
+
diff --git a/tests/test_deps_env_resolvers_conf_rewrite.yml b/tests/test_deps_env_resolvers_conf_rewrite.yml
new file mode 100644
index 0000000..a80b6d4
--- /dev/null
+++ b/tests/test_deps_env_resolvers_conf_rewrite.yml
@@ -0,0 +1,3 @@
+- type: galaxy_packages
+  base_path: ./tests/test_deps_env
+  mapping_files: ./tests/test_deps_mapping.yml
diff --git a/tests/test_deps_mapping.yml b/tests/test_deps_mapping.yml
new file mode 100644
index 0000000..e09af5a
--- /dev/null
+++ b/tests/test_deps_mapping.yml
@@ -0,0 +1,6 @@
+- from:
+    name: randomLines
+    version: 1.0.0-rc1
+  to:
+    name: random-lines
+    version: '1.0'
diff --git a/tests/test_docker_warning.py b/tests/test_docker_warning.py
new file mode 100644
index 0000000..8040b38
--- /dev/null
+++ b/tests/test_docker_warning.py
@@ -0,0 +1,25 @@
+from __future__ import absolute_import
+import unittest
+from mock import mock
+from cwltool.utils import windows_default_container_id
+from cwltool.draft2tool import DEFAULT_CONTAINER_MSG, CommandLineTool
+
+
+class TestDefaultDockerWarning(unittest.TestCase):
+
+    # Test to check warning when default docker Container is used on Windows
+    @mock.patch("cwltool.draft2tool.onWindows",return_value = True)
+    @mock.patch("cwltool.draft2tool._logger")
+    def test_default_docker_warning(self,mock_logger,mock_windows):
+
+        class TestCommandLineTool(CommandLineTool):
+            def __init__(self, **kwargs):
+                self.requirements=[]
+                self.hints=[]
+
+            def find_default_container(args, builder):
+                return windows_default_container_id
+
+        TestObject = TestCommandLineTool()
+        TestObject.makeJobRunner()
+        mock_logger.warning.assert_called_with(DEFAULT_CONTAINER_MSG%(windows_default_container_id, windows_default_container_id))
diff --git a/tests/test_examples.py b/tests/test_examples.py
new file mode 100644
index 0000000..641cb5b
--- /dev/null
+++ b/tests/test_examples.py
@@ -0,0 +1,520 @@
+from __future__ import absolute_import
+import unittest
+
+import cwltool.expression as expr
+import cwltool.factory
+import cwltool.pathmapper
+import cwltool.process
+import cwltool.workflow
+import schema_salad.validate
+from cwltool.main import main
+
+from .util import get_data
+
+
+class TestParamMatching(unittest.TestCase):
+    def test_params(self):
+        self.assertTrue(expr.param_re.match("(foo)"))
+        self.assertTrue(expr.param_re.match("(foo.bar)"))
+        self.assertTrue(expr.param_re.match("(foo['bar'])"))
+        self.assertTrue(expr.param_re.match("(foo[\"bar\"])"))
+        self.assertTrue(expr.param_re.match("(foo.bar.baz)"))
+        self.assertTrue(expr.param_re.match("(foo['bar'].baz)"))
+        self.assertTrue(expr.param_re.match("(foo['bar']['baz'])"))
+        self.assertTrue(expr.param_re.match("(foo['b\\'ar']['baz'])"))
+        self.assertTrue(expr.param_re.match("(foo['b ar']['baz'])"))
+        self.assertTrue(expr.param_re.match("(foo_bar)"))
+
+        self.assertFalse(expr.param_re.match("(foo.[\"bar\"])"))
+        self.assertFalse(expr.param_re.match("(.foo[\"bar\"])"))
+        self.assertFalse(expr.param_re.match("(foo [\"bar\"])"))
+        self.assertFalse(expr.param_re.match("( foo[\"bar\"])"))
+        self.assertFalse(expr.param_re.match("(foo[bar].baz)"))
+        self.assertFalse(expr.param_re.match("(foo['bar\"].baz)"))
+        self.assertFalse(expr.param_re.match("(foo['bar].baz)"))
+        self.assertFalse(expr.param_re.match("{foo}"))
+        self.assertFalse(expr.param_re.match("(foo.bar"))
+        self.assertFalse(expr.param_re.match("foo.bar)"))
+        self.assertFalse(expr.param_re.match("foo.b ar)"))
+        self.assertFalse(expr.param_re.match("foo.b\'ar)"))
+        self.assertFalse(expr.param_re.match("(foo+bar"))
+        self.assertFalse(expr.param_re.match("(foo bar"))
+
+        inputs = {
+            "foo": {
+                "bar": {
+                    "baz": "zab1"
+                },
+                "b ar": {
+                    "baz": 2
+                },
+                "b'ar": {
+                    "baz": True
+                },
+                'b"ar': {
+                    "baz": None
+                }
+            },
+            "lst": ["A", "B"]
+        }
+
+        self.assertEqual(expr.interpolate("$(foo)", inputs), inputs["foo"])
+
+        for pattern in ("$(foo.bar)",
+                        "$(foo['bar'])",
+                        "$(foo[\"bar\"])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), inputs["foo"]["bar"])
+
+        for pattern in ("$(foo.bar.baz)",
+                        "$(foo['bar'].baz)",
+                        "$(foo['bar'][\"baz\"])",
+                        "$(foo.bar['baz'])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), "zab1")
+
+        self.assertEqual(expr.interpolate("$(foo['b ar'].baz)", inputs), 2)
+        self.assertEqual(expr.interpolate("$(foo['b\\'ar'].baz)", inputs), True)
+        self.assertEqual(expr.interpolate("$(foo[\"b'ar\"].baz)", inputs), True)
+        self.assertEqual(expr.interpolate("$(foo['b\\\"ar'].baz)", inputs), None)
+
+        self.assertEqual(expr.interpolate("$(lst[0])", inputs), "A")
+        self.assertEqual(expr.interpolate("$(lst[1])", inputs), "B")
+        self.assertEqual(expr.interpolate("$(lst.length)", inputs), 2)
+        self.assertEqual(expr.interpolate("$(lst['length'])", inputs), 2)
+
+        for pattern in ("-$(foo.bar)",
+                        "-$(foo['bar'])",
+                        "-$(foo[\"bar\"])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), """-{"baz": "zab1"}""")
+
+        for pattern in ("-$(foo.bar.baz)",
+                        "-$(foo['bar'].baz)",
+                        "-$(foo['bar'][\"baz\"])",
+                        "-$(foo.bar['baz'])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), "-zab1")
+
+        self.assertEqual(expr.interpolate("-$(foo['b ar'].baz)", inputs), "-2")
+        self.assertEqual(expr.interpolate("-$(foo['b\\'ar'].baz)", inputs), "-true")
+        self.assertEqual(expr.interpolate("-$(foo[\"b\\'ar\"].baz)", inputs), "-true")
+        self.assertEqual(expr.interpolate("-$(foo['b\\\"ar'].baz)", inputs), "-null")
+
+        for pattern in ("$(foo.bar) $(foo.bar)",
+                        "$(foo['bar']) $(foo['bar'])",
+                        "$(foo[\"bar\"]) $(foo[\"bar\"])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), """{"baz": "zab1"} {"baz": "zab1"}""")
+
+        for pattern in ("$(foo.bar.baz) $(foo.bar.baz)",
+                        "$(foo['bar'].baz) $(foo['bar'].baz)",
+                        "$(foo['bar'][\"baz\"]) $(foo['bar'][\"baz\"])",
+                        "$(foo.bar['baz']) $(foo.bar['baz'])"):
+            self.assertEqual(expr.interpolate(pattern, inputs), "zab1 zab1")
+
+        self.assertEqual(expr.interpolate("$(foo['b ar'].baz) $(foo['b ar'].baz)", inputs), "2 2")
+        self.assertEqual(expr.interpolate("$(foo['b\\'ar'].baz) $(foo['b\\'ar'].baz)", inputs), "true true")
+        self.assertEqual(expr.interpolate("$(foo[\"b\\'ar\"].baz) $(foo[\"b\\'ar\"].baz)", inputs), "true true")
+        self.assertEqual(expr.interpolate("$(foo['b\\\"ar'].baz) $(foo['b\\\"ar'].baz)", inputs), "null null")
+
+
+class TestFactory(unittest.TestCase):
+    def test_factory(self):
+        f = cwltool.factory.Factory()
+        echo = f.make(get_data("tests/echo.cwl"))
+        self.assertEqual(echo(inp="foo"), {"out": "foo\n"})
+
+    def test_partial_scatter(self):
+        f = cwltool.factory.Factory(on_error="continue")
+        fail = f.make(get_data("tests/wf/scatterfail.cwl"))
+        try:
+            fail()
+        except cwltool.factory.WorkflowStatus as e:
+            self.assertEquals('sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e', e.out["out"][0]["checksum"])
+            self.assertIsNone(e.out["out"][1])
+            self.assertEquals('sha1$a3db5c13ff90a36963278c6a39e4ee3c22e2a436', e.out["out"][2]["checksum"])
+        else:
+            self.fail("Should have raised WorkflowStatus")
+
+    def test_partial_output(self):
+        f = cwltool.factory.Factory(on_error="continue")
+        fail = f.make(get_data("tests/wf/wffail.cwl"))
+        try:
+            fail()
+        except cwltool.factory.WorkflowStatus as e:
+            self.assertEquals('sha1$e5fa44f2b31c1fb553b6021e7360d07d5d91ff5e', e.out["out1"]["checksum"])
+            self.assertIsNone(e.out["out2"])
+        else:
+            self.fail("Should have raised WorkflowStatus")
+
+
+class TestScanDeps(unittest.TestCase):
+    def test_scandeps(self):
+        obj = {
+            "id": "file:///example/foo.cwl",
+            "steps": [
+                {
+                    "id": "file:///example/foo.cwl#step1",
+                    "inputs": [{
+                        "id": "file:///example/foo.cwl#input1",
+                        "default": {
+                            "class": "File",
+                            "location": "file:///example/data.txt"
+                        }
+                    }],
+                    "run": {
+                        "id": "file:///example/bar.cwl",
+                        "inputs": [{
+                            "id": "file:///example/bar.cwl#input2",
+                            "default": {
+                                "class": "Directory",
+                                "location": "file:///example/data2",
+                                "listing": [{
+                                    "class": "File",
+                                    "location": "file:///example/data3.txt",
+                                    "secondaryFiles": [{
+                                        "class": "File",
+                                        "location": "file:///example/data5.txt"
+                                    }]
+                                }]
+                            },
+                        }, {
+                            "id": "file:///example/bar.cwl#input3",
+                            "default": {
+                                "class": "Directory",
+                                "listing": [{
+                                    "class": "File",
+                                    "location": "file:///example/data4.txt"
+                                }]
+                            }
+                        }, {
+                            "id": "file:///example/bar.cwl#input4",
+                            "default": {
+                                "class": "File",
+                                "contents": "file literal"
+                            }
+                        }]
+                    }
+                }
+            ]
+        }
+
+        def loadref(base, p):
+            if isinstance(p, dict):
+                return p
+            else:
+                raise Exception("test case can't load things")
+
+        sc = cwltool.process.scandeps(obj["id"], obj,
+                                      {"$import", "run"},
+                                      {"$include", "$schemas", "location"},
+                                      loadref)
+
+        sc.sort(key=lambda k: k["basename"])
+
+        self.assertEquals([{
+            "basename": "bar.cwl",
+            "nameroot": "bar",
+            "class": "File",
+            "nameext": ".cwl",
+            "location": "file:///example/bar.cwl"
+        },
+            {
+                "basename": "data.txt",
+                "nameroot": "data",
+                "class": "File",
+                "nameext": ".txt",
+                "location": "file:///example/data.txt"
+            },
+            {
+                "basename": "data2",
+                "class": "Directory",
+                "location": "file:///example/data2",
+                "listing": [{
+                    "basename": "data3.txt",
+                    "nameroot": "data3",
+                    "class": "File",
+                    "nameext": ".txt",
+                    "location": "file:///example/data3.txt",
+                    "secondaryFiles": [{
+                        "class": "File",
+                        "basename": "data5.txt",
+                        "location": "file:///example/data5.txt",
+                        "nameext": ".txt",
+                        "nameroot": "data5"
+                    }]
+                }]
+            }, {
+                "basename": "data4.txt",
+                "nameroot": "data4",
+                "class": "File",
+                "nameext": ".txt",
+                "location": "file:///example/data4.txt"
+            }], sc)
+
+        sc = cwltool.process.scandeps(obj["id"], obj,
+                                      set(("run"), ),
+                                      set(), loadref)
+
+        sc.sort(key=lambda k: k["basename"])
+
+        self.assertEquals([{
+            "basename": "bar.cwl",
+            "nameroot": "bar",
+            "class": "File",
+            "nameext": ".cwl",
+            "location": "file:///example/bar.cwl"
+        }], sc)
+
+
+class TestDedup(unittest.TestCase):
+    def test_dedup(self):
+        ex = [{
+            "class": "File",
+            "location": "file:///example/a"
+        },
+            {
+                "class": "File",
+                "location": "file:///example/a"
+            },
+            {
+                "class": "File",
+                "location": "file:///example/d"
+            },
+            {
+                "class": "Directory",
+                "location": "file:///example/c",
+                "listing": [{
+                    "class": "File",
+                    "location": "file:///example/d"
+                }]
+            }]
+
+        self.assertEquals([{
+            "class": "File",
+            "location": "file:///example/a"
+        },
+            {
+                "class": "Directory",
+                "location": "file:///example/c",
+                "listing": [{
+                    "class": "File",
+                    "location": "file:///example/d"
+                }]
+            }], cwltool.pathmapper.dedup(ex))
+
+
+class TestTypeCompare(unittest.TestCase):
+    def test_typecompare(self):
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string', 'null'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'}))
+
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'}))
+
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string', 'null'], 'type': 'array'},
+            {'items': ['string'], 'type': 'array'}))
+
+        self.assertFalse(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string'], 'type': 'array'},
+            {'items': ['int'], 'type': 'array'}))
+
+    def test_typecomparestrict(self):
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            ['string', 'null'], ['string', 'null'], strict=True))
+
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            ['string'], ['string', 'null'], strict=True))
+
+        self.assertFalse(cwltool.workflow.can_assign_src_to_sink(
+            ['string', 'int'], ['string', 'null'], strict=True))
+
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'}, strict=True))
+
+        self.assertFalse(cwltool.workflow.can_assign_src_to_sink(
+            {'items': ['string', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'}, strict=True))
+
+    def test_recordcompare(self):
+        src = {
+            'fields': [{
+                'type': {'items': 'string', 'type': 'array'},
+                'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/description'
+            },
+                {
+                    'type': {'items': 'File', 'type': 'array'},
+                    'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec/vrn_file'
+                }],
+            'type': 'record',
+            'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/wf-variantcall.cwl#vc_rec/vc_rec'
+        }
+        sink = {
+            'fields': [{
+                'type': {'items': 'string', 'type': 'array'},
+                'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/steps/vc_output_record.cwl#vc_rec/vc_rec/description'
+            },
+                {
+                    'type': {'items': 'File', 'type': 'array'},
+                    'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/steps/vc_output_record.cwl#vc_rec/vc_rec/vrn_file'
+                }],
+            'type': 'record',
+            'name': u'file:///home/chapmanb/drive/work/cwl/test_bcbio_cwl/run_info-cwl-workflow/steps/vc_output_record.cwl#vc_rec/vc_rec'}
+
+        self.assertTrue(cwltool.workflow.can_assign_src_to_sink(src, sink))
+
+        self.assertFalse(cwltool.workflow.can_assign_src_to_sink(src, {'items': 'string', 'type': 'array'}))
+
+    def test_typecheck(self):
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'], ['string', 'int', 'null'], linkMerge=None, valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'], ['string', 'null'], linkMerge=None, valueFrom=None),
+            "warning")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['File', 'int'], ['string', 'null'], linkMerge=None, valueFrom=None),
+            "exception")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['string', 'int'], 'type': 'array'},
+            {'items': ['string', 'int', 'null'], 'type': 'array'},
+            linkMerge=None, valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['string', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge=None, valueFrom=None),
+            "warning")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['File', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge=None, valueFrom=None),
+            "exception")
+
+        # check linkMerge when sinktype is not an array
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'], ['string', 'int', 'null'],
+            linkMerge="merge_nested", valueFrom=None),
+            "exception")
+
+        # check linkMerge: merge_nested
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'],
+            {'items': ['string', 'int', 'null'], 'type': 'array'},
+            linkMerge="merge_nested", valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'],
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_nested", valueFrom=None),
+            "warning")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['File', 'int'],
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_nested", valueFrom=None),
+            "exception")
+
+        # check linkMerge: merge_nested and sinktype is "Any"
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'], "Any",
+            linkMerge="merge_nested", valueFrom=None),
+            "pass")
+
+        # check linkMerge: merge_flattened
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'],
+            {'items': ['string', 'int', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'],
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "warning")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            ['File', 'int'],
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "exception")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['string', 'int'], 'type': 'array'},
+            {'items': ['string', 'int', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['string', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "warning")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['File', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "exception")
+
+        # check linkMerge: merge_flattened and sinktype is "Any"
+        self.assertEquals(cwltool.workflow.check_types(
+            ['string', 'int'], "Any",
+            linkMerge="merge_flattened", valueFrom=None),
+            "pass")
+
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['string', 'int'], 'type': 'array'}, "Any",
+            linkMerge="merge_flattened", valueFrom=None),
+            "pass")
+
+        # check linkMerge: merge_flattened when srctype is a list
+        self.assertEquals(cwltool.workflow.check_types(
+            [{'items': 'string', 'type': 'array'}],
+            {'items': 'string', 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom=None),
+            "pass")
+
+        # check valueFrom
+        self.assertEquals(cwltool.workflow.check_types(
+            {'items': ['File', 'int'], 'type': 'array'},
+            {'items': ['string', 'null'], 'type': 'array'},
+            linkMerge="merge_flattened", valueFrom="special value"),
+            "pass")
+
+
+    def test_lifting(self):
+        # check that lifting the types of the process outputs to the workflow step
+        # fails if the step 'out' doesn't match.
+        with self.assertRaises(schema_salad.validate.ValidationException):
+            f = cwltool.factory.Factory()
+            echo = f.make(get_data("tests/test_bad_outputs_wf.cwl"))
+            self.assertEqual(echo(inp="foo"), {"out": "foo\n"})
+
+
+    def test_checker(self):
+        # check that the static checker raises exception when a source type
+        # mismatches its sink type.
+        with self.assertRaises(schema_salad.validate.ValidationException):
+            f = cwltool.factory.Factory()
+            f.make("tests/checker_wf/broken-wf.cwl")
+        with self.assertRaises(schema_salad.validate.ValidationException):
+            f = cwltool.factory.Factory()
+            f.make("tests/checker_wf/broken-wf2.cwl")
+
+
+class TestPrintDot(unittest.TestCase):
+    def test_print_dot(self):
+        # Require that --enable-ext is provided.
+        self.assertEquals(main(["--print-dot", get_data('tests/wf/revsort.cwl')]), 0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_ext.py b/tests/test_ext.py
new file mode 100644
index 0000000..6f690d3
--- /dev/null
+++ b/tests/test_ext.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import
+import os
+import shutil
+import tempfile
+import unittest
+import pytest
+
+import cwltool.expression as expr
+import cwltool.factory
+import cwltool.pathmapper
+import cwltool.process
+import cwltool.workflow
+from cwltool.main import main
+from cwltool.utils import onWindows
+from .util import get_data
+
+
+ at pytest.mark.skipif(onWindows(),
+                    reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
+class TestListing(unittest.TestCase):
+    def test_missing_enable_ext(self):
+        # Require that --enable-ext is provided.
+        self.assertEquals(main([get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 1)
+
+    def test_listing_deep(self):
+        # Should succeed.
+        self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 0)
+
+    def test_listing_shallow(self):
+        # This fails on purpose, because it tries to access listing in a subdirectory the same way that listing_deep does,
+        # but it shouldn't be expanded.
+        self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_shallow.cwl'), get_data('tests/listing-job.yml')]), 1)
+
+    def test_listing_none(self):
+        # This fails on purpose, because it tries to access listing but it shouldn't be there.
+        self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_none.cwl'), get_data('tests/listing-job.yml')]), 1)
+
+    def test_listing_v1_0(self):
+         # Default behavior in 1.0 is deep expansion.
+         self.assertEquals(main([get_data('tests/wf/listing_v1_0.cwl'), get_data('tests/listing-job.yml')]), 0)
+
+    # def test_listing_v1_1(self):
+    #     # Default behavior in 1.1 will be no expansion
+    #     self.assertEquals(main([get_data('tests/wf/listing_v1_1.cwl'), get_data('tests/listing-job.yml')]), 1)
+
+ at pytest.mark.skipif(onWindows(),
+                    reason="InplaceUpdate uses symlinks,does not run on windows without admin privileges")
+class TestInplaceUpdate(unittest.TestCase):
+
+    def test_updateval(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+            out = tempfile.mkdtemp()
+            self.assertEquals(main(["--outdir", out, get_data('tests/wf/updateval.cwl'), "-r", os.path.join(tmp, "value")]), 0)
+
+            with open(os.path.join(tmp, "value"), "r") as f:
+                self.assertEquals("1", f.read())
+            with open(os.path.join(out, "value"), "r") as f:
+                self.assertEquals("2", f.read())
+        finally:
+            shutil.rmtree(tmp)
+            shutil.rmtree(out)
+
+    def test_updateval_inplace(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+            out = tempfile.mkdtemp()
+            self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updateval_inplace.cwl'), "-r", os.path.join(tmp, "value")]), 0)
+
+            with open(os.path.join(tmp, "value"), "r") as f:
+                self.assertEquals("2", f.read())
+            self.assertFalse(os.path.exists(os.path.join(out, "value")))
+        finally:
+            shutil.rmtree(tmp)
+            shutil.rmtree(out)
+
+    def test_write_write_conflict(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+
+            self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut.cwl'), "-a", os.path.join(tmp, "value")]), 1)
+            with open(os.path.join(tmp, "value"), "r") as f:
+                self.assertEquals("2", f.read())
+        finally:
+            shutil.rmtree(tmp)
+
+    def test_sequencing(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+
+            self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut2.cwl'), "-a", os.path.join(tmp, "value")]), 0)
+            with open(os.path.join(tmp, "value"), "r") as f:
+                self.assertEquals("3", f.read())
+        finally:
+            shutil.rmtree(tmp)
+
+    # def test_read_write_conflict(self):
+    #     try:
+    #         tmp = tempfile.mkdtemp()
+    #         with open(os.path.join(tmp, "value"), "w") as f:
+    #             f.write("1")
+
+    #         self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut3.cwl'), "-a", os.path.join(tmp, "value")]), 0)
+    #     finally:
+    #         shutil.rmtree(tmp)
+
+    def test_updatedir(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+            out = tempfile.mkdtemp()
+
+            self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
+            self.assertFalse(os.path.exists(os.path.join(out, "blurb")))
+
+            self.assertEquals(main(["--outdir", out, get_data('tests/wf/updatedir.cwl'), "-r", tmp]), 0)
+
+            self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
+            self.assertTrue(os.path.exists(os.path.join(out, "inp/blurb")))
+        finally:
+            shutil.rmtree(tmp)
+            shutil.rmtree(out)
+
+    def test_updatedir_inplace(self):
+        try:
+            tmp = tempfile.mkdtemp()
+            with open(os.path.join(tmp, "value"), "w") as f:
+                f.write("1")
+            out = tempfile.mkdtemp()
+
+            self.assertFalse(os.path.exists(os.path.join(tmp, "blurb")))
+            self.assertFalse(os.path.exists(os.path.join(out, "blurb")))
+
+            self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updatedir_inplace.cwl'), "-r", tmp]), 0)
+
+            self.assertTrue(os.path.exists(os.path.join(tmp, "blurb")))
+            self.assertFalse(os.path.exists(os.path.join(out, "inp/blurb")))
+        finally:
+            shutil.rmtree(tmp)
+            shutil.rmtree(out)
diff --git a/tests/test_fetch.py b/tests/test_fetch.py
new file mode 100644
index 0000000..e30fb47
--- /dev/null
+++ b/tests/test_fetch.py
@@ -0,0 +1,54 @@
+from __future__ import absolute_import
+import unittest
+
+from six.moves import urllib
+
+import schema_salad.main
+import schema_salad.ref_resolver
+import schema_salad.schema
+from cwltool.load_tool import load_tool
+from cwltool.main import main
+from cwltool.workflow import defaultMakeTool
+
+
+class FetcherTest(unittest.TestCase):
+    def test_fetcher(self):
+        class TestFetcher(schema_salad.ref_resolver.Fetcher):
+            def __init__(self, a, b):
+                pass
+
+            def fetch_text(self, url):  # type: (unicode) -> unicode
+                if url == "baz:bar/foo.cwl":
+                    return """
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+inputs: []
+outputs: []
+"""
+                else:
+                    raise RuntimeError("Not foo.cwl")
+
+            def check_exists(self, url):  # type: (unicode) -> bool
+                if url == "baz:bar/foo.cwl":
+                    return True
+                else:
+                    return False
+
+            def urljoin(self, base, url):
+                    urlsp = urllib.parse.urlsplit(url)
+                    if urlsp.scheme:
+                        return url
+                    basesp = urllib.parse.urlsplit(base)
+
+                    if basesp.scheme == "keep":
+                        return base + "/" + url
+                    return urllib.parse.urljoin(base, url)
+
+        def test_resolver(d, a):
+            return "baz:bar/" + a
+
+        load_tool("foo.cwl", defaultMakeTool, resolver=test_resolver, fetcher_constructor=TestFetcher)
+
+        self.assertEquals(0, main(["--print-pre", "--debug", "foo.cwl"], resolver=test_resolver,
+                                  fetcher_constructor=TestFetcher))
diff --git a/tests/test_js_sandbox.py b/tests/test_js_sandbox.py
new file mode 100644
index 0000000..e2ea137
--- /dev/null
+++ b/tests/test_js_sandbox.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+import unittest
+
+# we should modify the subprocess imported from cwltool.sandboxjs
+from cwltool.sandboxjs import (check_js_threshold_version,
+                               minimum_node_version_str, subprocess)
+from mock import Mock, patch
+
+
+class Javascript_Sanity_Checks(unittest.TestCase):
+
+    def setUp(self):
+        self.check_output = subprocess.check_output 
+
+    def tearDown(self):
+        subprocess.check_output = self.check_output
+
+    def test_node_version(self):
+        subprocess.check_output = Mock(return_value=b'v0.8.26\n')
+        self.assertEquals(check_js_threshold_version('node'), False)
+
+        subprocess.check_output = Mock(return_value=b'v0.10.25\n')
+        self.assertEquals(check_js_threshold_version('node'), False)
+
+        subprocess.check_output = Mock(return_value=b'v0.10.26\n')
+        self.assertEquals(check_js_threshold_version('node'), True)
+
+        subprocess.check_output = Mock(return_value=b'v4.4.2\n')
+        self.assertEquals(check_js_threshold_version('node'), True)
+
+        subprocess.check_output = Mock(return_value=b'v7.7.3\n')
+        self.assertEquals(check_js_threshold_version('node'), True)
+
+    def test_is_javascript_installed(self):
+            pass
diff --git a/tests/test_pack.py b/tests/test_pack.py
new file mode 100644
index 0000000..e49cd21
--- /dev/null
+++ b/tests/test_pack.py
@@ -0,0 +1,35 @@
+from __future__ import absolute_import
+import json
+import os
+import unittest
+from functools import partial
+
+import cwltool.pack
+import cwltool.workflow
+from cwltool.load_tool import fetch_document, validate_document
+from cwltool.main import makeRelative
+from cwltool.pathmapper import adjustDirObjs, adjustFileObjs
+
+from .util import get_data
+
+
+class TestPack(unittest.TestCase):
+    def test_pack(self):
+        self.maxDiff = None
+
+        document_loader, workflowobj, uri = fetch_document(
+            get_data("tests/wf/revsort.cwl"))
+        document_loader, avsc_names, processobj, metadata, uri = validate_document(
+            document_loader, workflowobj, uri)
+        packed = cwltool.pack.pack(document_loader, processobj, uri, metadata)
+        with open(get_data("tests/wf/expect_packed.cwl")) as f:
+            expect_packed = json.load(f)
+        adjustFileObjs(packed, partial(makeRelative,
+            os.path.abspath(get_data("tests/wf"))))
+        adjustDirObjs(packed, partial(makeRelative,
+            os.path.abspath(get_data("tests/wf"))))
+        self.assertIn("$schemas", packed)
+        del packed["$schemas"]
+        del expect_packed["$schemas"]
+
+        self.assertEqual(expect_packed, packed)
diff --git a/tests/test_pathmapper.py b/tests/test_pathmapper.py
new file mode 100644
index 0000000..ac73d82
--- /dev/null
+++ b/tests/test_pathmapper.py
@@ -0,0 +1,56 @@
+from __future__ import absolute_import
+import unittest
+
+from cwltool.pathmapper import PathMapper, normalizeFilesDirs
+
+
+class TestPathMapper(unittest.TestCase):
+    def test_subclass(self):
+        class SubPathMapper(PathMapper):
+            def __init__(self, referenced_files, basedir, stagedir, new):
+                super(SubPathMapper, self).__init__(referenced_files, basedir, stagedir)
+                self.new = new
+
+        a = SubPathMapper([], '', '', "new")
+        self.assertTrue(a.new, "new")
+
+    def test_strip_trailing(self):
+        d = {
+                "class": "Directory",
+                "location": "/foo/bar/"
+            }
+        normalizeFilesDirs(d)
+        self.assertEqual(
+            {
+                "class": "Directory",
+                "location": "/foo/bar",
+                "basename": "bar"
+            },
+            d)
+
+    def test_basename_field_generation(self):
+        base_file = {
+            "class": "File",
+            "location": "/foo/"
+        }
+        # (filename, expected: (nameroot, nameext))
+        testdata = [
+            ("foo.bar",     ("foo",     ".bar")),
+            ("foo",         ("foo",     '')),
+            (".foo",        (".foo",    '')),
+            ("foo.",        ("foo",    '.')),
+            ("foo.bar.baz", ("foo.bar", ".baz"))
+        ]
+
+        for filename, (nameroot, nameext) in testdata:
+            file = dict(base_file)
+            file["location"] = file["location"] + filename
+
+            expected = dict(file)
+            expected["basename"] = filename
+            expected["nameroot"] = nameroot
+            expected["nameext"] = nameext
+
+            normalizeFilesDirs(file)
+            self.assertEqual(file, expected)
+
diff --git a/tests/test_relax_path_checks.py b/tests/test_relax_path_checks.py
new file mode 100644
index 0000000..52bf5e4
--- /dev/null
+++ b/tests/test_relax_path_checks.py
@@ -0,0 +1,46 @@
+from __future__ import absolute_import
+import unittest
+import pytest
+from tempfile import NamedTemporaryFile
+
+from cwltool.main import main
+from cwltool.utils import onWindows
+
+
+class ToolArgparse(unittest.TestCase):
+    script = '''
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  - id: input
+    type: File
+    inputBinding:
+      position: 0
+outputs:
+  - id: output
+    type: File
+    outputBinding:
+      glob: test.txt
+stdout: test.txt
+baseCommand: [cat]
+'''
+
+    @pytest.mark.skipif(onWindows(),
+                        reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
+    def test_spaces_in_input_files(self):
+        with NamedTemporaryFile(mode='w', delete=False) as f:
+            f.write(self.script)
+            f.flush()
+            f.close()
+            with NamedTemporaryFile(prefix="test with spaces", delete=False) as spaces:
+                spaces.close()
+                self.assertEquals(
+                    main(["--debug", f.name, '--input', spaces.name]), 1)
+                self.assertEquals(
+                    main(["--debug", "--relax-path-checks", f.name, '--input',
+                          spaces.name]), 0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/test_toolargparse.py b/tests/test_toolargparse.py
new file mode 100644
index 0000000..62dcca1
--- /dev/null
+++ b/tests/test_toolargparse.py
@@ -0,0 +1,117 @@
+from __future__ import absolute_import
+import unittest
+import pytest
+from tempfile import NamedTemporaryFile
+
+from cwltool.main import main
+from cwltool.utils import onWindows
+
+from .util import get_data
+
+class ToolArgparse(unittest.TestCase):
+    script = '''
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+doc: "This tool is developed for SMC-RNA Challenge for detecting gene fusions (STAR fusion)"
+inputs:
+  #Give it a list of input files
+  - id: input
+    type: File
+    inputBinding:
+      position: 0
+outputs:
+  - id: output
+    type: File
+    outputBinding:
+      glob: test.txt
+stdout: test.txt
+baseCommand: [cat]
+'''
+
+    script2 = '''
+#!/usr/bin/env cwl-runner
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  - id: bdg
+    type: "boolean"
+outputs:
+  - id: output
+    type: File
+    outputBinding:
+      glob: foo
+baseCommand:
+  - echo
+  - "ff"
+stdout: foo
+'''
+
+    script3 = '''
+#!/usr/bin/env cwl-runner
+
+cwlVersion: v1.0
+class: ExpressionTool
+
+inputs:
+  foo:
+    type:
+      type: record
+      fields:
+        one: File
+        two: string
+
+expression: $(inputs.foo.two)
+
+outputs: []
+'''
+
+    @pytest.mark.skipif(onWindows(),
+                        reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
+    def test_help(self):
+        with NamedTemporaryFile(mode='w', delete=False) as f:
+            f.write(self.script)
+            f.flush()
+            f.close()
+            self.assertEquals(main(["--debug", f.name, '--input',
+                get_data('tests/echo.cwl')]), 0)
+            self.assertEquals(main(["--debug", f.name, '--input',
+                get_data('tests/echo.cwl')]), 0)
+
+
+    @pytest.mark.skipif(onWindows(),
+                        reason="Instance of Cwltool is used, On windows that invoke a default docker Container")
+    def test_bool(self):
+        with NamedTemporaryFile(mode='w', delete=False) as f:
+            f.write(self.script2)
+            f.flush()
+            f.close()
+            try:
+                self.assertEquals(main([f.name, '--help']), 0)
+            except SystemExit as e:
+                self.assertEquals(e.code, 0)
+
+    def test_record_help(self):
+        with NamedTemporaryFile(mode='w', delete=False) as f:
+            f.write(self.script3)
+            f.flush()
+            f.close()
+            try:
+                self.assertEquals(main([f.name, '--help']), 0)
+            except SystemExit as e:
+                self.assertEquals(e.code, 0)
+
+    def test_record(self):
+        with NamedTemporaryFile(mode='w', delete=False) as f:
+            f.write(self.script3)
+            f.flush()
+            f.close()
+            try:
+                self.assertEquals(main([f.name, '--foo.one',
+                    get_data('tests/echo.cwl'), '--foo.two', 'test']), 0)
+            except SystemExit as e:
+                self.assertEquals(e.code, 0)
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/tests/tmp1/tmp2/tmp3/.gitkeep b/tests/tmp1/tmp2/tmp3/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/tests/util.py b/tests/util.py
new file mode 100644
index 0000000..3195a30
--- /dev/null
+++ b/tests/util.py
@@ -0,0 +1,19 @@
+from __future__ import absolute_import
+import os
+
+from pkg_resources import (Requirement, ResolutionError,  # type: ignore
+                           resource_filename)
+
+
+def get_data(filename):
+    filename = os.path.normpath(
+        filename)  # normalizing path depending on OS or else it will cause problem when joining path
+    filepath = None
+    try:
+        filepath = resource_filename(
+            Requirement.parse("cwltool"), filename)
+    except ResolutionError:
+        pass
+    if not filepath or not os.path.isfile(filepath):
+        filepath = os.path.join(os.path.dirname(__file__), os.pardir, filename)
+    return filepath
diff --git a/tests/wf/badout1.cwl b/tests/wf/badout1.cwl
new file mode 100644
index 0000000..1142b71
--- /dev/null
+++ b/tests/wf/badout1.cwl
@@ -0,0 +1,13 @@
+class: CommandLineTool
+cwlVersion: v1.0
+baseCommand: touch
+arguments: [file1]
+requirements:
+  InlineJavascriptRequirement: {}
+inputs: []
+outputs:
+  out:
+    type: File
+    outputBinding:
+      outputEval: |
+        $({"class": "File", "path": runtime.outdir+"/file2"})
\ No newline at end of file
diff --git a/tests/wf/badout2.cwl b/tests/wf/badout2.cwl
new file mode 100644
index 0000000..cdf5de8
--- /dev/null
+++ b/tests/wf/badout2.cwl
@@ -0,0 +1,13 @@
+class: CommandLineTool
+cwlVersion: v1.0
+baseCommand: touch
+arguments: [file1]
+requirements:
+  InlineJavascriptRequirement: {}
+inputs: []
+outputs:
+  out:
+    type: Directory
+    outputBinding:
+      outputEval: |
+        $({"class": "Directory", "path": runtime.outdir+"/file1"})
\ No newline at end of file
diff --git a/tests/wf/badout3.cwl b/tests/wf/badout3.cwl
new file mode 100644
index 0000000..8fc4b8c
--- /dev/null
+++ b/tests/wf/badout3.cwl
@@ -0,0 +1,13 @@
+class: CommandLineTool
+cwlVersion: v1.0
+baseCommand: touch
+arguments: [file1]
+requirements:
+  InlineJavascriptRequirement: {}
+inputs: []
+outputs:
+  out:
+    type: Directory
+    outputBinding:
+      outputEval: |
+        $({"class": "File", "path": runtime.outdir+"/file1"})
\ No newline at end of file
diff --git a/tests/wf/cat.cwl b/tests/wf/cat.cwl
new file mode 100644
index 0000000..a63cf80
--- /dev/null
+++ b/tests/wf/cat.cwl
@@ -0,0 +1,6 @@
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+  r: File
+outputs: []
+arguments: [cat, $(inputs.r.path)]
\ No newline at end of file
diff --git a/tests/wf/default_path.cwl b/tests/wf/default_path.cwl
new file mode 100644
index 0000000..273fe56
--- /dev/null
+++ b/tests/wf/default_path.cwl
@@ -0,0 +1,11 @@
+cwlVersion: v1.0
+class: CommandLineTool
+inputs:
+  - id: "file1"
+    type: File
+    default:
+      class: File
+      path: default.txt
+outputs: []
+arguments: [cat, $(inputs.file1.path)]
+
diff --git a/tests/wf/echo.cwl b/tests/wf/echo.cwl
new file mode 100644
index 0000000..0dbbffc
--- /dev/null
+++ b/tests/wf/echo.cwl
@@ -0,0 +1,24 @@
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+  r: string
+  script:
+    type: string
+    default: |
+      from __future__ import print_function
+      import sys
+      print(sys.argv[1])
+      if sys.argv[1] == "2":
+        exit(1)
+      else:
+        f = open("foo"+sys.argv[1]+".txt", "wb")
+        content = sys.argv[1]+"\n"
+        f.write(content.encode('utf-8'))
+      if sys.argv[1] == "5":
+        exit(1)
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: foo$(inputs.r).txt
+arguments: [python, -c, $(inputs.script), $(inputs.r)]
diff --git a/tests/wf/empty.ttl b/tests/wf/empty.ttl
new file mode 100644
index 0000000..e69de29
diff --git a/tests/wf/expect_packed.cwl b/tests/wf/expect_packed.cwl
new file mode 100644
index 0000000..a3c84ad
--- /dev/null
+++ b/tests/wf/expect_packed.cwl
@@ -0,0 +1,130 @@
+{
+    "cwlVersion": "v1.0",
+    "$schemas": ["file:///home/peter/work/cwltool/tests/wf/empty.ttl"],
+    "$graph": [
+        {
+            "inputs": [
+                {
+                    "doc": "The input file to be processed.",
+                    "type": "File",
+                    "id": "#main/input",
+                    "default": {
+                      "class": "File",
+                      "location": "hello.txt"
+                    }
+                },
+                {
+                    "default": true,
+                    "doc": "If true, reverse (decending) sort",
+                    "type": "boolean",
+                    "id": "#main/reverse_sort"
+                }
+            ],
+            "doc": "Reverse the lines in a document, then sort those lines.",
+            "class": "Workflow",
+            "steps": [
+                {
+                    "out": [
+                        "#main/rev/output"
+                    ],
+                    "run": "#revtool.cwl",
+                    "id": "#main/rev",
+                    "in": [
+                        {
+                            "source": "#main/input",
+                            "id": "#main/rev/input"
+                        }
+                    ]
+                },
+                {
+                    "out": [
+                        "#main/sorted/output"
+                    ],
+                    "run": "#sorttool.cwl",
+                    "id": "#main/sorted",
+                    "in": [
+                        {
+                            "source": "#main/rev/output",
+                            "id": "#main/sorted/input"
+                        },
+                        {
+                            "source": "#main/reverse_sort",
+                            "id": "#main/sorted/reverse"
+                        }
+                    ]
+                }
+            ],
+            "outputs": [
+                {
+                    "outputSource": "#main/sorted/output",
+                    "type": "File",
+                    "id": "#main/output",
+                    "doc": "The output with the lines reversed and sorted."
+                }
+            ],
+            "id": "#main",
+            "hints": [
+                {
+                    "dockerPull": "debian:8",
+                    "class": "DockerRequirement"
+                }
+            ]
+        },
+        {
+            "inputs": [
+                {
+                    "inputBinding": {},
+                    "type": "File",
+                    "id": "#revtool.cwl/input"
+                }
+            ],
+            "stdout": "output.txt",
+            "doc": "Reverse each line using the `rev` command",
+            "baseCommand": "rev",
+            "class": "CommandLineTool",
+            "outputs": [
+                {
+                    "outputBinding": {
+                        "glob": "output.txt"
+                    },
+                    "type": "File",
+                    "id": "#revtool.cwl/output"
+                }
+            ],
+            "id": "#revtool.cwl"
+        },
+        {
+            "inputs": [
+                {
+                    "inputBinding": {
+                        "position": 1,
+                        "prefix": "--reverse"
+                    },
+                    "type": "boolean",
+                    "id": "#sorttool.cwl/reverse"
+                },
+                {
+                    "inputBinding": {
+                        "position": 2
+                    },
+                    "type": "File",
+                    "id": "#sorttool.cwl/input"
+                }
+            ],
+            "stdout": "output.txt",
+            "doc": "Sort lines using the `sort` command",
+            "baseCommand": "sort",
+            "class": "CommandLineTool",
+            "outputs": [
+                {
+                    "outputBinding": {
+                        "glob": "output.txt"
+                    },
+                    "type": "File",
+                    "id": "#sorttool.cwl/output"
+                }
+            ],
+            "id": "#sorttool.cwl"
+        }
+    ]
+}
\ No newline at end of file
diff --git a/tests/wf/hello.txt b/tests/wf/hello.txt
new file mode 100644
index 0000000..bbd22bd
--- /dev/null
+++ b/tests/wf/hello.txt
@@ -0,0 +1,6 @@
+Hello
+world
+testing
+one
+two
+three.
diff --git a/tests/wf/listing_deep.cwl b/tests/wf/listing_deep.cwl
new file mode 100644
index 0000000..62bff5c
--- /dev/null
+++ b/tests/wf/listing_deep.cwl
@@ -0,0 +1,12 @@
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: deep_listing
+inputs:
+  d: Directory
+outputs: []
+arguments:
+  [echo, "$(inputs.d.listing[0].listing[0])"]
diff --git a/tests/wf/listing_none.cwl b/tests/wf/listing_none.cwl
new file mode 100644
index 0000000..9e2a511
--- /dev/null
+++ b/tests/wf/listing_none.cwl
@@ -0,0 +1,12 @@
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: no_listing
+inputs:
+  d: Directory
+outputs: []
+arguments:
+  [echo, "$(inputs.d.listing[0])"]
diff --git a/tests/wf/listing_shallow.cwl b/tests/wf/listing_shallow.cwl
new file mode 100644
index 0000000..6e2b569
--- /dev/null
+++ b/tests/wf/listing_shallow.cwl
@@ -0,0 +1,12 @@
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+requirements:
+  cwltool:LoadListingRequirement:
+    loadListing: shallow_listing
+inputs:
+  d: Directory
+outputs: []
+arguments:
+  [echo, "$(inputs.d.listing[0].listing[0])"]
diff --git a/tests/wf/listing_v1_0.cwl b/tests/wf/listing_v1_0.cwl
new file mode 100644
index 0000000..38aff19
--- /dev/null
+++ b/tests/wf/listing_v1_0.cwl
@@ -0,0 +1,7 @@
+class: CommandLineTool
+cwlVersion: v1.0
+inputs:
+  d: Directory
+outputs: []
+arguments:
+  [echo, "$(inputs.d.listing[0].listing[0])"]
diff --git a/tests/wf/missing_cwlVersion.cwl b/tests/wf/missing_cwlVersion.cwl
new file mode 100644
index 0000000..951842d
--- /dev/null
+++ b/tests/wf/missing_cwlVersion.cwl
@@ -0,0 +1,31 @@
+#!/usr/bin/env cwl-runner
+class: Workflow
+
+label: "Hello World"
+doc: "Outputs a message using echo"
+
+inputs: []
+
+outputs:
+  response:
+    outputSource: step0/response
+    type: File
+
+steps:
+  step0:
+    run:
+      class: CommandLineTool
+      inputs:
+        message:
+          type: string
+          doc: "The message to print"
+          default: "Hello World"
+          inputBinding:
+            position: 1
+      baseCommand: echo
+      stdout: response.txt
+      outputs:
+        response:
+          type: stdout
+    in: []
+    out: [response]
diff --git a/tests/wf/mut.cwl b/tests/wf/mut.cwl
new file mode 100644
index 0000000..6c3a473
--- /dev/null
+++ b/tests/wf/mut.cwl
@@ -0,0 +1,16 @@
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  a: File
+outputs: []
+steps:
+  step1:
+    in:
+      r: a
+    out: []
+    run: updateval_inplace.cwl
+  step2:
+    in:
+      r: a
+    out: []
+    run: updateval_inplace.cwl
diff --git a/tests/wf/mut2.cwl b/tests/wf/mut2.cwl
new file mode 100644
index 0000000..6f7fd94
--- /dev/null
+++ b/tests/wf/mut2.cwl
@@ -0,0 +1,19 @@
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  a: File
+outputs:
+  out:
+    type: File
+    outputSource: step2/out
+steps:
+  step1:
+    in:
+      r: a
+    out: [out]
+    run: updateval_inplace.cwl
+  step2:
+    in:
+      r: step1/out
+    out: [out]
+    run: updateval_inplace.cwl
diff --git a/tests/wf/mut3.cwl b/tests/wf/mut3.cwl
new file mode 100644
index 0000000..cf19c72
--- /dev/null
+++ b/tests/wf/mut3.cwl
@@ -0,0 +1,21 @@
+cwlVersion: v1.0
+class: Workflow
+inputs:
+  a: File
+outputs: []
+steps:
+  step1:
+    in:
+      r: a
+    out: []
+    run: cat.cwl
+  step2:
+    in:
+      r: a
+    out: []
+    run: cat.cwl
+  step3:
+    in:
+      r: a
+    out: []
+    run: updateval_inplace.cwl
diff --git a/tests/wf/revsort-job.json b/tests/wf/revsort-job.json
new file mode 100644
index 0000000..f5671aa
--- /dev/null
+++ b/tests/wf/revsort-job.json
@@ -0,0 +1,6 @@
+{
+  "input": {
+    "class": "File",
+    "location": "whale.txt"
+  }
+}
diff --git a/tests/wf/revsort.cwl b/tests/wf/revsort.cwl
new file mode 100644
index 0000000..d4d5a47
--- /dev/null
+++ b/tests/wf/revsort.cwl
@@ -0,0 +1,68 @@
+#
+# This is a two-step workflow which uses "revtool" and "sorttool" defined above.
+#
+class: Workflow
+doc: "Reverse the lines in a document, then sort those lines."
+cwlVersion: v1.0
+
+# Requirements & hints specify prerequisites and extensions to the workflow.
+# In this example, DockerRequirement specifies a default Docker container
+# in which the command line tools will execute.
+hints:
+  - class: DockerRequirement
+    dockerPull: debian:8
+
+
+# The inputs array defines the structure of the input object that describes
+# the inputs to the workflow.
+#
+# The "reverse_sort" input parameter demonstrates the "default" field.  If the
+# field "reverse_sort" is not provided in the input object, the default value will
+# be used.
+inputs:
+  input:
+    type: File
+    doc: "The input file to be processed."
+    default:
+      class: File
+      location: hello.txt
+  reverse_sort:
+    type: boolean
+    default: true
+    doc: "If true, reverse (decending) sort"
+
+# The "outputs" array defines the structure of the output object that describes
+# the outputs of the workflow.
+#
+# Each output field must be connected to the output of one of the workflow
+# steps using the "connect" field.  Here, the parameter "#output" of the
+# workflow comes from the "#sorted" output of the "sort" step.
+outputs:
+  output:
+    type: File
+    outputSource: sorted/output
+    doc: "The output with the lines reversed and sorted."
+
+# The "steps" array lists the executable steps that make up the workflow.
+# The tool to execute each step is listed in the "run" field.
+#
+# In the first step, the "inputs" field of the step connects the upstream
+# parameter "#input" of the workflow to the input parameter of the tool
+# "revtool.cwl#input"
+#
+# In the second step, the "inputs" field of the step connects the output
+# parameter "#reversed" from the first step to the input parameter of the
+# tool "sorttool.cwl#input".
+steps:
+  rev:
+    in:
+      input: input
+    out: [output]
+    run: revtool.cwl
+
+  sorted:
+    in:
+      input: rev/output
+      reverse: reverse_sort
+    out: [output]
+    run: sorttool.cwl
diff --git a/tests/wf/revtool.cwl b/tests/wf/revtool.cwl
new file mode 100644
index 0000000..b5e5af9
--- /dev/null
+++ b/tests/wf/revtool.cwl
@@ -0,0 +1,39 @@
+#
+# Simplest example command line program wrapper for the Unix tool "rev".
+#
+class: CommandLineTool
+cwlVersion: v1.0
+doc: "Reverse each line using the `rev` command"
+$schemas:
+  - empty.ttl
+
+# The "inputs" array defines the structure of the input object that describes
+# the inputs to the underlying program.  Here, there is one input field
+# defined that will be called "input" and will contain a "File" object.
+#
+# The input binding indicates that the input value should be turned into a
+# command line argument.  In this example inputBinding is an empty object,
+# which indicates that the file name should be added to the command line at
+# a default location.
+inputs:
+  input:
+    type: File
+    inputBinding: {}
+
+# The "outputs" array defines the structure of the output object that
+# describes the outputs of the underlying program.  Here, there is one
+# output field defined that will be called "output", must be a "File" type,
+# and after the program executes, the output value will be the file
+# output.txt in the designated output directory.
+outputs:
+  output:
+    type: File
+    outputBinding:
+      glob: output.txt
+
+# The actual program to execute.
+baseCommand: rev
+
+# Specify that the standard output stream must be redirected to a file called
+# output.txt in the designated output directory.
+stdout: output.txt
diff --git a/tests/wf/scatterfail.cwl b/tests/wf/scatterfail.cwl
new file mode 100644
index 0000000..33b1faa
--- /dev/null
+++ b/tests/wf/scatterfail.cwl
@@ -0,0 +1,39 @@
+class: Workflow
+cwlVersion: v1.0
+requirements:
+  ScatterFeatureRequirement: {}
+  SubworkflowFeatureRequirement: {}
+inputs:
+  range:
+    type: string[]
+    default: ["1", "2", "3"]
+outputs:
+  out:
+    type: File[]
+    outputSource: step1/out
+steps:
+  step1:
+    in:
+      r: range
+    scatter: r
+    out: [out]
+    run:
+      class: Workflow
+      id: subtool
+      inputs:
+        r: string
+      outputs:
+        out:
+          type: File
+          outputSource: sstep1/out
+      steps:
+        sstep1:
+          in:
+            r: r
+          out: [out]
+          run: echo.cwl
+        sstep2:
+          in:
+            r: sstep1/out
+          out: []
+          run: cat.cwl
diff --git a/tests/wf/sorttool.cwl b/tests/wf/sorttool.cwl
new file mode 100644
index 0000000..a485321
--- /dev/null
+++ b/tests/wf/sorttool.cwl
@@ -0,0 +1,35 @@
+# Example command line program wrapper for the Unix tool "sort"
+# demonstrating command line flags.
+class: CommandLineTool
+doc: "Sort lines using the `sort` command"
+cwlVersion: v1.0
+
+# This example is similar to the previous one, with an additional input
+# parameter called "reverse".  It is a boolean parameter, which is
+# intepreted as a command line flag.  The value of "prefix" is used for
+# flag to put on the command line if "reverse" is true, if "reverse" is
+# false, no flag is added.
+#
+# This example also introduced the "position" field.  This indicates the
+# sorting order of items on the command line.  Lower numbers are placed
+# before higher numbers.  Here, the "--reverse" flag (if present) will be
+# added to the command line before the input file path.
+inputs:
+  - id: reverse
+    type: boolean
+    inputBinding:
+      position: 1
+      prefix: "--reverse"
+  - id: input
+    type: File
+    inputBinding:
+      position: 2
+
+outputs:
+  - id: output
+    type: File
+    outputBinding:
+      glob: output.txt
+
+baseCommand: sort
+stdout: output.txt
diff --git a/tests/wf/updatedir.cwl b/tests/wf/updatedir.cwl
new file mode 100644
index 0000000..3a32221
--- /dev/null
+++ b/tests/wf/updatedir.cwl
@@ -0,0 +1,16 @@
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.r)
+        entryname: inp
+        writable: true
+inputs:
+  r: Directory
+outputs:
+  out:
+    type: Directory
+    outputBinding:
+      glob: inp
+arguments: [touch, inp/blurb]
\ No newline at end of file
diff --git a/tests/wf/updatedir_inplace.cwl b/tests/wf/updatedir_inplace.cwl
new file mode 100644
index 0000000..9c98a5d
--- /dev/null
+++ b/tests/wf/updatedir_inplace.cwl
@@ -0,0 +1,20 @@
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: http://commonwl.org/cwltool#
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.r)
+        entryname: inp
+        writable: true
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+inputs:
+  r: Directory
+outputs:
+  out:
+    type: Directory
+    outputBinding:
+      glob: inp
+arguments: [touch, inp/blurb]
\ No newline at end of file
diff --git a/tests/wf/updateval.cwl b/tests/wf/updateval.cwl
new file mode 100644
index 0000000..63e2837
--- /dev/null
+++ b/tests/wf/updateval.cwl
@@ -0,0 +1,20 @@
+class: CommandLineTool
+cwlVersion: v1.0
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.r)
+        writable: true
+inputs:
+  r: File
+  script:
+    type: File
+    default:
+      class: File
+      location: updateval.py
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: $(inputs.r.basename)
+arguments: [python, $(inputs.script), $(inputs.r.basename)]
\ No newline at end of file
diff --git a/tests/wf/updateval.py b/tests/wf/updateval.py
new file mode 100644
index 0000000..abd9a40
--- /dev/null
+++ b/tests/wf/updateval.py
@@ -0,0 +1,6 @@
+import sys
+f = open(sys.argv[1], "r+")
+val = int(f.read())
+f.seek(0)
+f.write(str(val+1))
+f.close()
diff --git a/tests/wf/updateval_inplace.cwl b/tests/wf/updateval_inplace.cwl
new file mode 100644
index 0000000..6c032fb
--- /dev/null
+++ b/tests/wf/updateval_inplace.cwl
@@ -0,0 +1,24 @@
+class: CommandLineTool
+cwlVersion: v1.0
+$namespaces:
+  cwltool: "http://commonwl.org/cwltool#"
+requirements:
+  InitialWorkDirRequirement:
+    listing:
+      - entry: $(inputs.r)
+        writable: true
+  cwltool:InplaceUpdateRequirement:
+    inplaceUpdate: true
+inputs:
+  r: File
+  script:
+    type: File
+    default:
+      class: File
+      location: updateval.py
+outputs:
+  out:
+    type: File
+    outputBinding:
+      glob: $(inputs.r.basename)
+arguments: [python, $(inputs.script), $(inputs.r.basename)]
\ No newline at end of file
diff --git a/tests/wf/wffail.cwl b/tests/wf/wffail.cwl
new file mode 100644
index 0000000..cc6e76d
--- /dev/null
+++ b/tests/wf/wffail.cwl
@@ -0,0 +1,38 @@
+class: Workflow
+cwlVersion: v1.0
+inputs: []
+requirements:
+  StepInputExpressionRequirement: {}
+outputs:
+  out1:
+    type: File
+    outputSource: step1/out
+  out2:
+    type: File
+    outputSource: step2/out
+  out4:
+    type: File
+    outputSource: step4/out
+steps:
+  step1:
+    in:
+      r: {default: "1"}
+    out: [out]
+    run: echo.cwl
+  step2:
+    in:
+      r:  {default: "2"}
+    out: [out]
+    run: echo.cwl
+  step3:
+    in:
+      r:  {default: "5"}
+    out: [out]
+    run: echo.cwl
+  step4:
+    in:
+      r:
+        source: step3/out
+        valueFrom: $(inputs.r.basename)
+    out: [out]
+    run: echo.cwl
diff --git a/tests/wf/wrong_cwlVersion.cwl b/tests/wf/wrong_cwlVersion.cwl
new file mode 100644
index 0000000..3bac958
--- /dev/null
+++ b/tests/wf/wrong_cwlVersion.cwl
@@ -0,0 +1,32 @@
+#!/usr/bin/env cwl-runner
+cwlVersion: v0.1
+class: Workflow
+
+label: "Hello World"
+doc: "Outputs a message using echo"
+
+inputs: []
+
+outputs:
+  response:
+    outputSource: step0/response
+    type: File
+
+steps:
+  step0:
+    run:
+      class: CommandLineTool
+      inputs:
+        message:
+          type: string
+          doc: "The message to print"
+          default: "Hello World"
+          inputBinding:
+            position: 1
+      baseCommand: echo
+      stdout: response.txt
+      outputs:
+        response:
+          type: stdout
+    in: []
+    out: [response]

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cwltool.git



More information about the debian-med-commit mailing list