[med-svn] [cwltool] 02/10: New upstream version 1.0.20161207161158

Andreas Tille tille at debian.org
Sun Dec 18 07:35:13 UTC 2016


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository cwltool.

commit d2d28ba105762cdcabf81fb73651796de1a6aac8
Author: Andreas Tille <tille at debian.org>
Date:   Fri Dec 9 10:29:21 2016 +0100

    New upstream version 1.0.20161207161158
---
 MANIFEST.in                                        |    2 +-
 Makefile                                           |  161 ++
 PKG-INFO                                           |   44 +-
 README.rst                                         |   38 +-
 cwltool.egg-info/PKG-INFO                          |   44 +-
 cwltool.egg-info/SOURCES.txt                       |   74 +-
 cwltool.egg-info/entry_points.txt                  |    1 -
 cwltool.egg-info/pbr.json                          |    1 +
 cwltool.egg-info/requires.txt                      |   13 +-
 cwltool/builder.py                                 |  126 +-
 cwltool/cwlNodeEngine.js                           |   13 +
 cwltool/cwlrdf.py                                  |   60 +-
 cwltool/cwltest.py                                 |  210 ---
 cwltool/docker.py                                  |   30 +-
 cwltool/docker_uid.py                              |   16 +-
 cwltool/draft2tool.py                              |  449 +++--
 cwltool/errors.py                                  |    3 +
 cwltool/expression.py                              |  240 ++-
 cwltool/factory.py                                 |   21 +-
 cwltool/job.py                                     |  365 +++-
 cwltool/load_tool.py                               |  250 +++
 cwltool/main.py                                    |  811 ++++----
 cwltool/pack.py                                    |   82 +
 cwltool/pathmapper.py                              |  264 ++-
 cwltool/process.py                                 |  505 +++--
 cwltool/resolver.py                                |   30 +
 cwltool/sandboxjs.py                               |  197 +-
 cwltool/schemas/draft-2/CommonWorkflowLanguage.yml | 1966 ++++++++++++++++++++
 cwltool/schemas/draft-3/Process.yml                |   20 +-
 cwltool/schemas/draft-3/README.md                  |    5 +-
 cwltool/schemas/draft-3/UserGuide.yml              |  859 +++++++++
 cwltool/schemas/draft-3/Workflow.yml               |    4 +-
 .../salad/schema_salad/metaschema/metaschema.yml   |    4 +-
 cwltool/schemas/draft-3/userguide-intro.md         |   21 +-
 .../schemas/v1.0/CommandLineTool-standalone.yml    |    2 +
 cwltool/schemas/v1.0/CommandLineTool.yml           |  894 +++++++++
 cwltool/schemas/v1.0/CommonWorkflowLanguage.yml    |   11 +
 cwltool/schemas/v1.0/Process.yml                   |  743 ++++++++
 cwltool/schemas/{draft-3 => v1.0}/README.md        |    4 +-
 cwltool/schemas/v1.0/UserGuide.yml                 |  869 +++++++++
 cwltool/schemas/{draft-3 => v1.0}/Workflow.yml     |  313 +++-
 cwltool/schemas/v1.0/concepts.md                   |  388 ++++
 cwltool/schemas/v1.0/contrib.md                    |   19 +
 cwltool/schemas/v1.0/intro.md                      |   21 +
 cwltool/schemas/v1.0/invocation.md                 |  153 ++
 .../salad/schema_salad/metaschema/field_name.yml   |   46 +
 .../schema_salad/metaschema/field_name_proc.yml    |    8 +
 .../schema_salad/metaschema/field_name_schema.yml  |   14 +
 .../schema_salad/metaschema/field_name_src.yml     |    8 +
 .../salad/schema_salad/metaschema/ident_res.yml    |   53 +
 .../schema_salad/metaschema/ident_res_proc.yml     |   20 +
 .../schema_salad/metaschema/ident_res_schema.yml   |   14 +
 .../schema_salad/metaschema/ident_res_src.yml      |   20 +
 .../schema_salad/metaschema/import_include.md      |  176 ++
 .../salad/schema_salad/metaschema/link_res.yml     |   55 +
 .../schema_salad/metaschema/link_res_proc.yml      |   21 +
 .../schema_salad/metaschema/link_res_schema.yml    |   16 +
 .../salad/schema_salad/metaschema/link_res_src.yml |   21 +
 .../salad/schema_salad/metaschema/metaschema.yml   |  270 +--
 .../schema_salad/metaschema/metaschema_base.yml    |  164 ++
 .../v1.0/salad/schema_salad/metaschema/salad.md    |  256 +++
 .../salad/schema_salad/metaschema/vocab_res.yml    |   35 +
 .../schema_salad/metaschema/vocab_res_proc.yml     |   15 +
 .../schema_salad/metaschema/vocab_res_schema.yml   |   21 +
 .../schema_salad/metaschema/vocab_res_src.yml      |   15 +
 cwltool/schemas/v1.0/userguide-intro.md            |   28 +
 .../v1.1.0-dev1/CommandLineTool-standalone.yml     |    2 +
 cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml    |  948 ++++++++++
 .../schemas/v1.1.0-dev1/CommonWorkflowLanguage.yml |   11 +
 cwltool/schemas/v1.1.0-dev1/Process.yml            |  749 ++++++++
 cwltool/schemas/{draft-3 => v1.1.0-dev1}/README.md |    4 +-
 cwltool/schemas/v1.1.0-dev1/UserGuide.yml          |  869 +++++++++
 .../schemas/{draft-3 => v1.1.0-dev1}/Workflow.yml  |  328 ++--
 cwltool/schemas/v1.1.0-dev1/concepts.md            |  389 ++++
 cwltool/schemas/v1.1.0-dev1/contrib.md             |   19 +
 cwltool/schemas/v1.1.0-dev1/intro.md               |   21 +
 cwltool/schemas/v1.1.0-dev1/invocation.md          |  153 ++
 .../salad/schema_salad/metaschema/field_name.yml   |   46 +
 .../schema_salad/metaschema/field_name_proc.yml    |    8 +
 .../schema_salad/metaschema/field_name_schema.yml  |   14 +
 .../schema_salad/metaschema/field_name_src.yml     |    8 +
 .../salad/schema_salad/metaschema/ident_res.yml    |   53 +
 .../schema_salad/metaschema/ident_res_proc.yml     |   20 +
 .../schema_salad/metaschema/ident_res_schema.yml   |   14 +
 .../schema_salad/metaschema/ident_res_src.yml      |   20 +
 .../schema_salad/metaschema/import_include.md      |  176 ++
 .../salad/schema_salad/metaschema/link_res.yml     |   55 +
 .../schema_salad/metaschema/link_res_proc.yml      |   21 +
 .../schema_salad/metaschema/link_res_schema.yml    |   16 +
 .../salad/schema_salad/metaschema/link_res_src.yml |   21 +
 .../salad/schema_salad/metaschema/metaschema.yml   |  270 +--
 .../schema_salad/metaschema/metaschema_base.yml    |  164 ++
 .../salad/schema_salad/metaschema/salad.md         |  256 +++
 .../salad/schema_salad/metaschema/vocab_res.yml    |   35 +
 .../schema_salad/metaschema/vocab_res_proc.yml     |   15 +
 .../schema_salad/metaschema/vocab_res_schema.yml   |   21 +
 .../schema_salad/metaschema/vocab_res_src.yml      |   15 +
 cwltool/schemas/v1.1.0-dev1/userguide-intro.md     |   28 +
 cwltool/stdfsaccess.py                             |   30 +-
 cwltool/update.py                                  |  291 ++-
 cwltool/utils.py                                   |    2 +-
 cwltool/workflow.py                                |  413 ++--
 ez_setup.py                                        |  259 ++-
 setup.cfg                                          |    5 +-
 setup.py                                           |   37 +-
 105 files changed, 15030 insertions(+), 2398 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
index 624b1d1..713b22b 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1 +1 @@
-include gittaggers.py ez_setup.py
+include gittaggers.py ez_setup.py Makefile
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..6544feb
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,161 @@
+# This file is part of cwltool,
+# https://github.com/common-workflow-language/cwltool/, and is
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Contact: common-workflow-language at googlegroups.com
+
+# make pep8 to check for basic Python code compliance
+# make autopep8 to fix most pep8 errors
+# make pylint to check Python code for enhanced compliance including naming
+#  and documentation
+# make coverage-report to check coverage of the python scripts by the tests
+
+MODULE=cwltool
+
+# `SHELL=bash` doesn't work for some, so don't use BASH-isms like
+# `[[` conditional expressions.
+PYSOURCES=$(wildcard ${MODULE}/**.py tests/*.py) setup.py
+DEVPKGS=pep8 diff_cover autopep8 pylint coverage pep257 flake8
+DEBDEVPKGS=pep8 python-autopep8 pylint python-coverage pep257 sloccount python-flake8
+VERSION=1.0.$(shell date +%Y%m%d%H%M%S --date=`git log --first-parent \
+	--max-count=1 --format=format:%cI`)
+mkfile_dir := $(dir $(abspath $(lastword $(MAKEFILE_LIST))))
+
+## all         : default task
+all:
+	./setup.py develop
+
+## help        : print this help message and exit
+help: Makefile
+	@sed -n 's/^##//p' $<
+
+## install-dep : install most of the development dependencies via pip
+install-dep:
+	pip install --upgrade $(DEVPKGS)
+
+## install-deb-dep: install most of the dev dependencies via apt-get
+install-deb-dep:
+	sudo apt-get install $(DEBDEVPKGS)
+
+## install     : install the ${MODULE} module and schema-salad-tool
+install: FORCE
+	./setup.py build install
+
+## dist        : create a module package for distribution
+dist: dist/${MODULE}-$(VERSION).tar.gz
+
+dist/${MODULE}-$(VERSION).tar.gz: $(SOURCES)
+	./setup.py sdist
+
+## clean       : clean up all temporary / machine-generated files
+clean: FORCE
+	rm -f ${MODILE}/*.pyc tests/*.pyc
+	./setup.py clean --all || true
+	rm -Rf .coverage
+	rm -f diff-cover.html
+
+## pep8        : check Python code style
+pep8: $(PYSOURCES)
+	pep8 --exclude=_version.py  --show-source --show-pep8 $^ || true
+
+pep8_report.txt: $(PYSOURCES)
+	pep8 --exclude=_version.py $^ > pep8_report.txt || true
+
+diff_pep8_report: pep8_report.txt
+	diff-quality --violations=pep8 pep8_report.txt
+
+## pep257      : check Python code style
+pep257: $(PYSOURCES)
+	pep257 --ignore=D100,D101,D102,D103 $^ || true
+
+pep257_report.txt: $(PYSOURCES)
+	pep257 setup.py $^ > pep257_report.txt 2>&1 || true
+
+diff_pep257_report: pep257_report.txt
+	diff-quality --violations=pep8 pep257_report.txt
+
+## autopep8    : fix most Python code indentation and formatting
+autopep8: $(PYSOURCES)
+	autopep8 --recursive --in-place --ignore E309 $^
+
+# A command to automatically run astyle and autopep8 on appropriate files
+## format      : check/fix all code indentation and formatting (runs autopep8)
+format: autopep8
+	# Do nothing
+
+## pylint      : run static code analysis on Python code
+pylint: $(PYSOURCES)
+	pylint --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" \
+                $^ || true
+
+pylint_report.txt: ${PYSOURCES}
+	pylint --msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" \
+		$^ > pylint_report.txt || true
+
+diff_pylint_report: pylint_report.txt
+	diff-quality --violations=pylint pylint_report.txt
+
+.coverage: $(PYSOURCES) all
+	export COVERAGE_PROCESS_START=${mkfile_dir}.coveragerc; \
+	       cd ${CWL}; ./run_test.sh RUNNER=cwltool
+	coverage run setup.py test
+	coverage combine ${CWL} ${CWL}/draft-3/ ./
+
+coverage.xml: .coverage
+	python-coverage xml
+
+coverage.html: htmlcov/index.html
+
+htmlcov/index.html: .coverage
+	python-coverage html
+	@echo Test coverage of the Python code is now in htmlcov/index.html
+
+coverage-report: .coverage
+	python-coverage report
+
+diff-cover: coverage-gcovr.xml coverage.xml
+	diff-cover coverage-gcovr.xml coverage.xml
+
+diff-cover.html: coverage-gcovr.xml coverage.xml
+	diff-cover coverage-gcovr.xml coverage.xml \
+		--html-report diff-cover.html
+
+## test        : run the ${MODULE} test suite
+test: FORCE
+	./setup.py test
+
+sloccount.sc: ${PYSOURCES} Makefile
+	sloccount --duplicates --wide --details $^ > sloccount.sc
+
+## sloccount   : count lines of code
+sloccount: ${PYSOURCES} Makefile
+	sloccount $^
+
+list-author-emails:
+	@echo 'name, E-Mail Address'
+	@git log --format='%aN,%aE' | sort -u | grep -v 'root'
+
+
+mypy: ${PYSOURCES}
+	rm -Rf typeshed/2.7/ruamel/yaml
+	ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
+		typeshed/2.7/ruamel/yaml
+	rm -Rf typeshed/2.7/schema_salad
+	ln -s $(shell python -c 'from __future__ import print_function; import schema_salad; import os.path; print(os.path.dirname(schema_salad.__file__))') \
+		typeshed/2.7/schema_salad
+	MYPYPATH=typeshed/2.7 mypy --py2 --disallow-untyped-calls \
+		 --warn-redundant-casts --warn-unused-ignores --fast-parser \
+		 cwltool
+
+FORCE:
diff --git a/PKG-INFO b/PKG-INFO
index a4cb71a..b5c4a12 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,12 +1,12 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20160504183010
+Version: 1.0.20161207161158
 Summary: Common workflow language reference implementation
-Home-page: https://github.com/common-workflow-language/common-workflow-language
+Home-page: https://github.com/common-workflow-language/cwltool
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
 License: Apache 2.0
-Download-URL: https://github.com/common-workflow-language/common-workflow-language
+Download-URL: https://github.com/common-workflow-language/cwltool
 Description: ==================================================================
         Common workflow language tool description reference implementation
         ==================================================================
@@ -23,29 +23,43 @@ Description: ==================================================================
         is the primary Python module containing the reference implementation in the
         "cwltool" module and console executable by the same name.
         
-        The "cwl-runner" package is optional and provides an additional entry point
+        The "cwlref-runner" package is optional and provides an additional entry point
         under the alias "cwl-runner", which is the implementation-agnostic name for the
         default CWL interpreter installed on a host.
         
         Install
         -------
         
-        Installing the official package from PyPi (will install "cwltool" package as well)::
+        Installing the official package from PyPi (will install "cwltool" package as
+        well)::
         
-          pip install cwl-runner
+          pip install cwlref-runner
         
-        Or from source::
+        If installling alongside another CWL implementation then::
+        
+          pip install cwltool
+        
+        To install from source::
         
           git clone https://github.com/common-workflow-language/cwltool.git
           cd cwltool && python setup.py install
-          cd cwl-runner && python setup.py install
+          cd cwlref-runner && python setup.py install  # co-installing? skip this
+        
+        Remember, if co-installing multiple CWL implementations then you need to
+        maintain which implementation ``cwl-runner`` points to via a symbolic file
+        system link or [another facility](https://wiki.debian.org/DebianAlternatives).
         
         Run on the command line
         -----------------------
         
         Simple command::
         
-          cwl-runner [tool] [job]
+          cwl-runner [tool-or-workflow-description] [input-job-settings]
+        
+        Or if you have multiple CWL implementations installed and you want to override
+        the default cwl-runner use::
+        
+          cwltool [tool-or-workflow-description] [input-job-settings]
         
         Import as a module
         ----------------
@@ -70,4 +84,16 @@ Description: ==================================================================
         .. |Build Status| image:: https://ci.commonwl.org/buildStatus/icon?job=cwltool-conformance
            :target: https://ci.commonwl.org/job/cwltool-conformance/
         
+        Tool or workflow loading from remote or local locations
+        -------------------------------------------------------
+        
+        ``cwltool`` can run tool and workflow descriptions on both local and remote
+        systems via its support for HTTP[S] URLs.
+        
+        Input job files and Workflow steps (via the `run` directive) can reference CWL
+        documents using absolute or relative local filesytem paths. If a relative path
+        is referenced and that document isn't found in the current directory then the
+        following locations will be searched:
+        http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
+        
 Platform: UNKNOWN
diff --git a/README.rst b/README.rst
index 31f8cab..b351bc5 100644
--- a/README.rst
+++ b/README.rst
@@ -14,29 +14,43 @@ The reference implementation consists of two packages.  The "cwltool" package
 is the primary Python module containing the reference implementation in the
 "cwltool" module and console executable by the same name.
 
-The "cwl-runner" package is optional and provides an additional entry point
+The "cwlref-runner" package is optional and provides an additional entry point
 under the alias "cwl-runner", which is the implementation-agnostic name for the
 default CWL interpreter installed on a host.
 
 Install
 -------
 
-Installing the official package from PyPi (will install "cwltool" package as well)::
+Installing the official package from PyPi (will install "cwltool" package as
+well)::
 
-  pip install cwl-runner
+  pip install cwlref-runner
 
-Or from source::
+If installling alongside another CWL implementation then::
+
+  pip install cwltool
+
+To install from source::
 
   git clone https://github.com/common-workflow-language/cwltool.git
   cd cwltool && python setup.py install
-  cd cwl-runner && python setup.py install
+  cd cwlref-runner && python setup.py install  # co-installing? skip this
+
+Remember, if co-installing multiple CWL implementations then you need to
+maintain which implementation ``cwl-runner`` points to via a symbolic file
+system link or [another facility](https://wiki.debian.org/DebianAlternatives).
 
 Run on the command line
 -----------------------
 
 Simple command::
 
-  cwl-runner [tool] [job]
+  cwl-runner [tool-or-workflow-description] [input-job-settings]
+
+Or if you have multiple CWL implementations installed and you want to override
+the default cwl-runner use::
+
+  cwltool [tool-or-workflow-description] [input-job-settings]
 
 Import as a module
 ----------------
@@ -60,3 +74,15 @@ and ``--tmp-outdir-prefix`` to somewhere under ``/Users``::
 
 .. |Build Status| image:: https://ci.commonwl.org/buildStatus/icon?job=cwltool-conformance
    :target: https://ci.commonwl.org/job/cwltool-conformance/
+
+Tool or workflow loading from remote or local locations
+-------------------------------------------------------
+
+``cwltool`` can run tool and workflow descriptions on both local and remote
+systems via its support for HTTP[S] URLs.
+
+Input job files and Workflow steps (via the `run` directive) can reference CWL
+documents using absolute or relative local filesytem paths. If a relative path
+is referenced and that document isn't found in the current directory then the
+following locations will be searched:
+http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
diff --git a/cwltool.egg-info/PKG-INFO b/cwltool.egg-info/PKG-INFO
index a4cb71a..b5c4a12 100644
--- a/cwltool.egg-info/PKG-INFO
+++ b/cwltool.egg-info/PKG-INFO
@@ -1,12 +1,12 @@
 Metadata-Version: 1.1
 Name: cwltool
-Version: 1.0.20160504183010
+Version: 1.0.20161207161158
 Summary: Common workflow language reference implementation
-Home-page: https://github.com/common-workflow-language/common-workflow-language
+Home-page: https://github.com/common-workflow-language/cwltool
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
 License: Apache 2.0
-Download-URL: https://github.com/common-workflow-language/common-workflow-language
+Download-URL: https://github.com/common-workflow-language/cwltool
 Description: ==================================================================
         Common workflow language tool description reference implementation
         ==================================================================
@@ -23,29 +23,43 @@ Description: ==================================================================
         is the primary Python module containing the reference implementation in the
         "cwltool" module and console executable by the same name.
         
-        The "cwl-runner" package is optional and provides an additional entry point
+        The "cwlref-runner" package is optional and provides an additional entry point
         under the alias "cwl-runner", which is the implementation-agnostic name for the
         default CWL interpreter installed on a host.
         
         Install
         -------
         
-        Installing the official package from PyPi (will install "cwltool" package as well)::
+        Installing the official package from PyPi (will install "cwltool" package as
+        well)::
         
-          pip install cwl-runner
+          pip install cwlref-runner
         
-        Or from source::
+        If installling alongside another CWL implementation then::
+        
+          pip install cwltool
+        
+        To install from source::
         
           git clone https://github.com/common-workflow-language/cwltool.git
           cd cwltool && python setup.py install
-          cd cwl-runner && python setup.py install
+          cd cwlref-runner && python setup.py install  # co-installing? skip this
+        
+        Remember, if co-installing multiple CWL implementations then you need to
+        maintain which implementation ``cwl-runner`` points to via a symbolic file
+        system link or [another facility](https://wiki.debian.org/DebianAlternatives).
         
         Run on the command line
         -----------------------
         
         Simple command::
         
-          cwl-runner [tool] [job]
+          cwl-runner [tool-or-workflow-description] [input-job-settings]
+        
+        Or if you have multiple CWL implementations installed and you want to override
+        the default cwl-runner use::
+        
+          cwltool [tool-or-workflow-description] [input-job-settings]
         
         Import as a module
         ----------------
@@ -70,4 +84,16 @@ Description: ==================================================================
         .. |Build Status| image:: https://ci.commonwl.org/buildStatus/icon?job=cwltool-conformance
            :target: https://ci.commonwl.org/job/cwltool-conformance/
         
+        Tool or workflow loading from remote or local locations
+        -------------------------------------------------------
+        
+        ``cwltool`` can run tool and workflow descriptions on both local and remote
+        systems via its support for HTTP[S] URLs.
+        
+        Input job files and Workflow steps (via the `run` directive) can reference CWL
+        documents using absolute or relative local filesytem paths. If a relative path
+        is referenced and that document isn't found in the current directory then the
+        following locations will be searched:
+        http://www.commonwl.org/v1.0/CommandLineTool.html#Discovering_CWL_documents_on_a_local_filesystem
+        
 Platform: UNKNOWN
diff --git a/cwltool.egg-info/SOURCES.txt b/cwltool.egg-info/SOURCES.txt
index 55d0211..885743c 100644
--- a/cwltool.egg-info/SOURCES.txt
+++ b/cwltool.egg-info/SOURCES.txt
@@ -1,4 +1,5 @@
 MANIFEST.in
+Makefile
 README.rst
 ez_setup.py
 gittaggers.py
@@ -7,8 +8,8 @@ setup.py
 cwltool/__init__.py
 cwltool/__main__.py
 cwltool/builder.py
+cwltool/cwlNodeEngine.js
 cwltool/cwlrdf.py
-cwltool/cwltest.py
 cwltool/docker.py
 cwltool/docker_uid.py
 cwltool/draft2tool.py
@@ -17,9 +18,12 @@ cwltool/expression.py
 cwltool/factory.py
 cwltool/flatten.py
 cwltool/job.py
+cwltool/load_tool.py
 cwltool/main.py
+cwltool/pack.py
 cwltool/pathmapper.py
 cwltool/process.py
+cwltool/resolver.py
 cwltool/sandboxjs.py
 cwltool/stdfsaccess.py
 cwltool/update.py
@@ -29,9 +33,11 @@ cwltool.egg-info/PKG-INFO
 cwltool.egg-info/SOURCES.txt
 cwltool.egg-info/dependency_links.txt
 cwltool.egg-info/entry_points.txt
+cwltool.egg-info/pbr.json
 cwltool.egg-info/requires.txt
 cwltool.egg-info/top_level.txt
 cwltool.egg-info/zip-safe
+cwltool/schemas/draft-2/CommonWorkflowLanguage.yml
 cwltool/schemas/draft-2/cwl-avro.yml
 cwltool/schemas/draft-3/CommandLineTool-standalone.yml
 cwltool/schemas/draft-3/CommandLineTool.yml
@@ -64,4 +70,68 @@ cwltool/schemas/draft-3/salad/schema_salad/metaschema/salad.md
 cwltool/schemas/draft-3/salad/schema_salad/metaschema/vocab_res.yml
 cwltool/schemas/draft-3/salad/schema_salad/metaschema/vocab_res_proc.yml
 cwltool/schemas/draft-3/salad/schema_salad/metaschema/vocab_res_schema.yml
-cwltool/schemas/draft-3/salad/schema_salad/metaschema/vocab_res_src.yml
\ No newline at end of file
+cwltool/schemas/draft-3/salad/schema_salad/metaschema/vocab_res_src.yml
+cwltool/schemas/v1.0/CommandLineTool-standalone.yml
+cwltool/schemas/v1.0/CommandLineTool.yml
+cwltool/schemas/v1.0/CommonWorkflowLanguage.yml
+cwltool/schemas/v1.0/Process.yml
+cwltool/schemas/v1.0/README.md
+cwltool/schemas/v1.0/UserGuide.yml
+cwltool/schemas/v1.0/Workflow.yml
+cwltool/schemas/v1.0/concepts.md
+cwltool/schemas/v1.0/contrib.md
+cwltool/schemas/v1.0/intro.md
+cwltool/schemas/v1.0/invocation.md
+cwltool/schemas/v1.0/userguide-intro.md
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_src.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_src.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/import_include.md
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_src.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_proc.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_schema.yml
+cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_src.yml
+cwltool/schemas/v1.1.0-dev1/CommandLineTool-standalone.yml
+cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
+cwltool/schemas/v1.1.0-dev1/CommonWorkflowLanguage.yml
+cwltool/schemas/v1.1.0-dev1/Process.yml
+cwltool/schemas/v1.1.0-dev1/README.md
+cwltool/schemas/v1.1.0-dev1/UserGuide.yml
+cwltool/schemas/v1.1.0-dev1/Workflow.yml
+cwltool/schemas/v1.1.0-dev1/concepts.md
+cwltool/schemas/v1.1.0-dev1/contrib.md
+cwltool/schemas/v1.1.0-dev1/intro.md
+cwltool/schemas/v1.1.0-dev1/invocation.md
+cwltool/schemas/v1.1.0-dev1/userguide-intro.md
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_proc.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_schema.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_src.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_proc.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_schema.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_src.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/import_include.md
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_proc.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_schema.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_src.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema_base.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/salad.md
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_proc.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_schema.yml
+cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml
\ No newline at end of file
diff --git a/cwltool.egg-info/entry_points.txt b/cwltool.egg-info/entry_points.txt
index 1664a6f..f0089b0 100644
--- a/cwltool.egg-info/entry_points.txt
+++ b/cwltool.egg-info/entry_points.txt
@@ -1,4 +1,3 @@
 [console_scripts]
-cwltest = cwltool.cwltest:main
 cwltool = cwltool.main:main
 
diff --git a/cwltool.egg-info/pbr.json b/cwltool.egg-info/pbr.json
new file mode 100644
index 0000000..8f35d5c
--- /dev/null
+++ b/cwltool.egg-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "ab9e86e"}
\ No newline at end of file
diff --git a/cwltool.egg-info/requires.txt b/cwltool.egg-info/requires.txt
index f1b2098..f443740 100644
--- a/cwltool.egg-info/requires.txt
+++ b/cwltool.egg-info/requires.txt
@@ -1,7 +1,8 @@
-requests
-PyYAML
-rdflib >= 4.1.0
-rdflib-jsonld >= 0.3.0
+setuptools
+requests>=1.0
+ruamel.yaml == 0.12.4
+rdflib >= 4.2.0, < 4.3.0
 shellescape
-schema_salad == 1.7.20160316203940
-typing
+schema-salad >= 1.21.20161206204028, < 2
+typing >= 3.5.2
+cwltest >= 1.0.20160907111242
diff --git a/cwltool/builder.py b/cwltool/builder.py
index 36d4663..8ddc797 100644
--- a/cwltool/builder.py
+++ b/cwltool/builder.py
@@ -3,52 +3,44 @@ from .utils import aslist
 from . import expression
 import avro
 import schema_salad.validate as validate
-from typing import Any, Union, AnyStr, Callable
+from typing import Any, Callable, Text, Type, Union
 from .errors import WorkflowException
 from .stdfsaccess import StdFsAccess
-from .pathmapper import PathMapper
+from .pathmapper import PathMapper, adjustFileObjs, adjustDirObjs, normalizeFilesDirs
 
 CONTENT_LIMIT = 64 * 1024
 
 
-def substitute(value, replace):  # type: (str, str) -> str
+def substitute(value, replace):  # type: (Text, Text) -> Text
     if replace[0] == "^":
         return substitute(value[0:value.rindex('.')], replace[1:])
     else:
         return value + replace
 
-def adjustFileObjs(rec, op):  # type: (Any, Callable[[Any], Any]) -> None
-    """Apply an update function to each File object in the object `rec`."""
-
-    if isinstance(rec, dict):
-        if rec.get("class") == "File":
-            op(rec)
-        for d in rec:
-            adjustFileObjs(rec[d], op)
-    if isinstance(rec, list):
-        for d in rec:
-            adjustFileObjs(d, op)
-
 class Builder(object):
 
     def __init__(self):  # type: () -> None
         self.names = None  # type: avro.schema.Names
-        self.schemaDefs = None  # type: Dict[str,Dict[unicode, Any]]
-        self.files = None  # type: List[Dict[str, str]]
+        self.schemaDefs = None  # type: Dict[Text, Dict[Text, Any]]
+        self.files = None  # type: List[Dict[Text, Text]]
         self.fs_access = None  # type: StdFsAccess
-        self.job = None  # type: Dict[str, Any]
-        self.requirements = None  # type: List[Dict[str,Any]]
-        self.outdir = None  # type: str
-        self.tmpdir = None  # type: str
-        self.resources = None  # type: Dict[str, Union[int, str]]
-        self.bindings = []  # type: List[Dict[str, Any]]
+        self.job = None  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+        self.requirements = None  # type: List[Dict[Text, Any]]
+        self.hints = None  # type: List[Dict[Text, Any]]
+        self.outdir = None  # type: Text
+        self.tmpdir = None  # type: Text
+        self.resources = None  # type: Dict[Text, Union[int, Text]]
+        self.bindings = []  # type: List[Dict[Text, Any]]
         self.timeout = None  # type: int
-        self.pathmapper = None # type: PathMapper
+        self.pathmapper = None  # type: PathMapper
+        self.stagedir = None  # type: Text
+        self.make_fs_access = None  # type: Type[StdFsAccess]
+        self.build_job_script = None  # type: Callable[[List[str]], Text]
 
     def bind_input(self, schema, datum, lead_pos=[], tail_pos=[]):
-        # type: (Dict[unicode, Any], Any, List[int], List[int]) -> List[Dict[str, Any]]
-        bindings = []  # type: List[Dict[str,str]]
-        binding = None  # type: Dict[str,Any]
+        # type: (Dict[Text, Any], Any, Union[int, List[int]], List[int]) -> List[Dict[Text, Any]]
+        bindings = []  # type: List[Dict[Text,Text]]
+        binding = None  # type: Dict[Text,Any]
         if "inputBinding" in schema and isinstance(schema["inputBinding"], dict):
             binding = copy.copy(schema["inputBinding"])
 
@@ -57,14 +49,12 @@ class Builder(object):
             else:
                 binding["position"] = aslist(lead_pos) + [0] + aslist(tail_pos)
 
-            if "valueFrom" in binding:
-                binding["do_eval"] = binding["valueFrom"]
-            binding["valueFrom"] = datum
+            binding["datum"] = datum
 
         # Handle union types
         if isinstance(schema["type"], list):
             for t in schema["type"]:
-                if isinstance(t, (str, unicode)) and self.names.has_name(t, ""):
+                if isinstance(t, (str, Text)) and self.names.has_name(t, ""):
                     avsc = self.names.get_name(t, "")
                 elif isinstance(t, dict) and "name" in t and self.names.has_name(t["name"], ""):
                     avsc = self.names.get_name(t["name"], "")
@@ -77,8 +67,11 @@ class Builder(object):
             raise validate.ValidationException(u"'%s' is not a valid union %s" % (datum, schema["type"]))
         elif isinstance(schema["type"], dict):
             st = copy.deepcopy(schema["type"])
-            if binding and "inputBinding" not in st and "itemSeparator" not in binding and st["type"] in ("array", "map"):
+            if binding and "inputBinding" not in st and st["type"] == "array" and "itemSeparator" not in binding:
                 st["inputBinding"] = {}
+            for k in ("secondaryFiles", "format", "streamable"):
+                if k in schema:
+                    st[k] = schema[k]
             bindings.extend(self.bind_input(st, datum, lead_pos=lead_pos, tail_pos=tail_pos))
         else:
             if schema["type"] in self.schemaDefs:
@@ -91,30 +84,27 @@ class Builder(object):
                     else:
                         datum[f["name"]] = f.get("default")
 
-            if schema["type"] == "map":
-                for n, item in datum.items():
-                    b2 = None
-                    if binding:
-                        b2 = copy.deepcopy(binding)
-                        b2["valueFrom"] = [n, item]
-                    bindings.extend(self.bind_input({"type": schema["values"], "inputBinding": b2},
-                                                    item, lead_pos=n, tail_pos=tail_pos))
-                binding = None
-
             if schema["type"] == "array":
                 for n, item in enumerate(datum):
                     b2 = None
                     if binding:
                         b2 = copy.deepcopy(binding)
-                        b2["valueFrom"] = item
-                    bindings.extend(self.bind_input({"type": schema["items"], "inputBinding": b2},
-                                                    item, lead_pos=n, tail_pos=tail_pos))
+                        b2["datum"] = item
+                    itemschema = {
+                        u"type": schema["items"],
+                        u"inputBinding": b2
+                    }
+                    for k in ("secondaryFiles", "format", "streamable"):
+                        if k in schema:
+                            itemschema[k] = schema[k]
+                    bindings.extend(
+                        self.bind_input(itemschema, item, lead_pos=n, tail_pos=tail_pos))
                 binding = None
 
             if schema["type"] == "File":
                 self.files.append(datum)
                 if binding and binding.get("loadContents"):
-                    with self.fs_access.open(datum["path"], "rb") as f:
+                    with self.fs_access.open(datum["location"], "rb") as f:
                         datum["contents"] = f.read(CONTENT_LIMIT)
 
                 if "secondaryFiles" in schema:
@@ -124,15 +114,17 @@ class Builder(object):
                         if isinstance(sf, dict) or "$(" in sf or "${" in sf:
                             secondary_eval = self.do_eval(sf, context=datum)
                             if isinstance(secondary_eval, basestring):
-                                sfpath = {"path": secondary_eval, "class": "File"}
+                                sfpath = {"location": secondary_eval,
+                                          "class": "File"}
                             else:
                                 sfpath = secondary_eval
                         else:
-                            sfpath = {"path": substitute(datum["path"], sf), "class": "File"}
+                            sfpath = {"location": substitute(datum["location"], sf), "class": "File"}
                         if isinstance(sfpath, list):
                             datum["secondaryFiles"].extend(sfpath)
                         else:
                             datum["secondaryFiles"].append(sfpath)
+                    normalizeFilesDirs(datum["secondaryFiles"])
 
                 def _capture_files(f):
                     self.files.append(f)
@@ -140,6 +132,10 @@ class Builder(object):
 
                 adjustFileObjs(datum.get("secondaryFiles", []), _capture_files)
 
+            if schema["type"] == "Directory":
+                self.files.append(datum)
+
+
         # Position to front of the sort key
         if binding:
             for bi in bindings:
@@ -148,34 +144,34 @@ class Builder(object):
 
         return bindings
 
-    def tostr(self, value):  # type: (Any) -> str
-        if isinstance(value, dict) and value.get("class") == "File":
+    def tostr(self, value):  # type: (Any) -> Text
+        if isinstance(value, dict) and value.get("class") in ("File", "Directory"):
             if "path" not in value:
-                raise WorkflowException(u"File object must have \"path\": %s" % (value))
+                raise WorkflowException(u"%s object missing \"path\": %s" % (value["class"], value))
             return value["path"]
         else:
-            return str(value)
+            return Text(value)
 
-    def generate_arg(self, binding):  # type: (Dict[str,Any]) -> List[str]
-        value = binding["valueFrom"]
-        if "do_eval" in binding:
-            value = self.do_eval(binding["do_eval"], context=value)
+    def generate_arg(self, binding):  # type: (Dict[Text,Any]) -> List[Text]
+        value = binding.get("datum")
+        if "valueFrom" in binding:
+            value = self.do_eval(binding["valueFrom"], context=value)
 
         prefix = binding.get("prefix")
         sep = binding.get("separate", True)
 
-        l = []  # type: List[Dict[str,str]]
+        l = []  # type: List[Dict[Text,Text]]
         if isinstance(value, list):
             if binding.get("itemSeparator"):
                 l = [binding["itemSeparator"].join([self.tostr(v) for v in value])]
-            elif binding.get("do_eval"):
-                value = [v["path"] if isinstance(v, dict) and v.get("class") == "File" else v for v in value]
+            elif binding.get("valueFrom"):
+                value = [self.tostr(v) for v in value]
                 return ([prefix] if prefix else []) + value
             elif prefix:
                 return [prefix]
             else:
                 return []
-        elif isinstance(value, dict) and value.get("class") == "File":
+        elif isinstance(value, dict) and value.get("class") in ("File", "Directory"):
             l = [value]
         elif isinstance(value, dict):
             return [prefix] if prefix else []
@@ -195,8 +191,14 @@ class Builder(object):
 
         return [a for a in args if a is not None]
 
-    def do_eval(self, ex, context=None, pull_image=True):
-        # type: (Dict[str,str], Any, bool) -> Any
+    def do_eval(self, ex, context=None, pull_image=True, recursive=False):
+        # type: (Union[Dict[Text, Text], Text], Any, bool, bool) -> Any
+        if recursive:
+            if isinstance(ex, dict):
+                return {k: self.do_eval(v, context, pull_image, recursive) for k,v in ex.iteritems()}
+            if isinstance(ex, list):
+                return [self.do_eval(v, context, pull_image, recursive) for v in ex]
+
         return expression.do_eval(ex, self.job, self.requirements,
                                   self.outdir, self.tmpdir,
                                   self.resources,
diff --git a/cwltool/cwlNodeEngine.js b/cwltool/cwlNodeEngine.js
new file mode 100755
index 0000000..ca75f61
--- /dev/null
+++ b/cwltool/cwlNodeEngine.js
@@ -0,0 +1,13 @@
+"use strict";
+process.stdin.setEncoding('utf8');
+var incoming = "";
+process.stdin.on('data', function(chunk) {
+  incoming += chunk;
+  var i = incoming.indexOf("\n");
+  if (i > -1) {
+    var fn = JSON.parse(incoming.substr(0, i));
+    incoming = incoming.substr(i+1);
+    process.stdout.write(JSON.stringify(require("vm").runInNewContext(fn, {})) + "\n");
+  }
+});
+process.stdin.on('end', process.exit);
diff --git a/cwltool/cwlrdf.py b/cwltool/cwlrdf.py
index 8e603b7..b3b6739 100644
--- a/cwltool/cwlrdf.py
+++ b/cwltool/cwlrdf.py
@@ -1,41 +1,27 @@
 import json
 import urlparse
+from .process import Process
 from schema_salad.ref_resolver import Loader
+from schema_salad.jsonld_context import makerdf
 from rdflib import Graph, plugin, URIRef
 from rdflib.serializer import Serializer
-from typing import Any, Union, Dict, IO
-
-def makerdf(workflow, wf, ctx):
-    # type: (str, Dict[str,Any], Loader.ContextType) -> Graph
-    prefixes = {}
-    for k,v in ctx.iteritems():
-        if isinstance(v, dict):
-            url = v["@id"]
-        else:
-            url = v
-        doc_url, frg = urlparse.urldefrag(url)
-        if "/" in frg:
-            p, _ = frg.split("/")
-            prefixes[p] = u"%s#%s/" % (doc_url, p)
-
-    wf["@context"] = ctx
-    g = Graph().parse(data=json.dumps(wf), format='json-ld', location=workflow)
-
-    # Bug in json-ld loader causes @id fields to be added to the graph
-    for s,p,o in g.triples((None, URIRef("@id"), None)):
-        g.remove((s, p, o))
-
-    for k2,v2 in prefixes.iteritems():
-        g.namespace_manager.bind(k2, v2)
+from typing import Any, Dict, IO, Text, Union
 
+def gather(tool, ctx):  # type: (Process, Loader.ContextType) -> Graph
+    g = Graph()
+
+    def visitor(t):
+        makerdf(t["id"], t, ctx, graph=g)
+
+    tool.visit(visitor)
     return g
 
-def printrdf(workflow, wf, ctx, sr, stdout):
-    # type: (str, Dict[str,Any], Loader.ContextType, str, IO[Any]) -> None
-    stdout.write(makerdf(workflow, wf, ctx).serialize(format=sr))
+def printrdf(wf, ctx, sr, stdout):
+    # type: (Process, Loader.ContextType, Text, IO[Any]) -> None
+    stdout.write(gather(wf, ctx).serialize(format=sr))
 
-def lastpart(uri):  # type: (Any) -> str
-    uri = str(uri)
+def lastpart(uri):  # type: (Any) -> Text
+    uri = Text(uri)
     if "/" in uri:
         return uri[uri.rindex("/")+1:]
     else:
@@ -99,7 +85,7 @@ def dot_with_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
         stdout.write(u'"%s" [shape=octagon]\n' % (lastpart(inp)))
 
 def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
-    dotname = {}  # type: Dict[str,str]
+    dotname = {}  # type: Dict[Text,Text]
     clusternode = {}
 
     stdout.write("compound=true\n")
@@ -142,8 +128,8 @@ def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
             else:
                 currentwf = None
 
-        if str(runtype) != "https://w3id.org/cwl/cwl#Workflow":
-            stdout.write(u'"%s" [label="%s"]\n' % (dotname[step], urlparse.urldefrag(str(step))[1]))
+        if Text(runtype) != "https://w3id.org/cwl/cwl#Workflow":
+            stdout.write(u'"%s" [label="%s"]\n' % (dotname[step], urlparse.urldefrag(Text(step))[1]))
 
     if currentwf is not None:
         stdout.write("}\n")
@@ -153,9 +139,9 @@ def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
            WHERE {
               ?wf1 Workflow:steps ?src .
               ?wf2 Workflow:steps ?sink .
-              ?src cwl:outputs ?out .
+              ?src cwl:out ?out .
               ?inp cwl:source ?out .
-              ?sink cwl:inputs ?inp .
+              ?sink cwl:in ?inp .
               ?src cwl:run ?srcrun .
               ?sink cwl:run ?sinkrun .
            }""")
@@ -171,9 +157,9 @@ def dot_without_parameters(g, stdout):  # type: (Graph, IO[Any]) -> None
         stdout.write(u'"%s" -> "%s" [%s]\n' % (dotname[src], dotname[sink], attr))
 
 
-def printdot(workflow, wf, ctx, stdout, include_parameters=False):
-    # type: (str, Dict[str,Any], Loader.ContextType, Any, bool) -> None
-    g = makerdf(workflow, wf, ctx)
+def printdot(wf, ctx, stdout, include_parameters=False):
+    # type: (Process, Loader.ContextType, Any, bool) -> None
+    g = gather(wf, ctx)
 
     stdout.write("digraph {")
 
diff --git a/cwltool/cwltest.py b/cwltool/cwltest.py
deleted file mode 100755
index cb6b6f7..0000000
--- a/cwltool/cwltest.py
+++ /dev/null
@@ -1,210 +0,0 @@
-#!/usr/bin/env python
-
-import argparse
-import json
-import os
-import subprocess
-import sys
-import shutil
-import tempfile
-import yaml
-import yaml.scanner
-import pipes
-import logging
-import schema_salad.ref_resolver
-from typing import Any, Union
-
-_logger = logging.getLogger("cwltest")
-_logger.addHandler(logging.StreamHandler())
-_logger.setLevel(logging.INFO)
-
-UNSUPPORTED_FEATURE = 33
-
-class CompareFail(Exception):
-    pass
-
-
-def compare(a, b):  # type: (Any, Any) -> bool
-    try:
-        if isinstance(a, dict):
-            if a.get("class") == "File":
-                if not (b["path"].endswith("/" + a["path"]) or ("/" not in b["path"] and a["path"] == b["path"])):
-                    raise CompareFail(u"%s does not end with %s" %(b["path"], a["path"]))
-                # ignore empty collections
-                b = {k: v for k, v in b.iteritems()
-                     if not isinstance(v, (list, dict)) or len(v) > 0}
-            if len(a) != len(b):
-                raise CompareFail(u"expected %s\ngot %s" % (json.dumps(a, indent=4, sort_keys=True), json.dumps(b, indent=4, sort_keys=True)))
-            for c in a:
-                if a.get("class") != "File" or c != "path":
-                    if c not in b:
-                        raise CompareFail(u"%s not in %s" % (c, b))
-                    if not compare(a[c], b[c]):
-                        return False
-            return True
-        elif isinstance(a, list):
-            if len(a) != len(b):
-                raise CompareFail(u"expected %s\ngot %s" % (json.dumps(a, indent=4, sort_keys=True), json.dumps(b, indent=4, sort_keys=True)))
-            for c in xrange(0, len(a)):
-                if not compare(a[c], b[c]):
-                    return False
-            return True
-        else:
-            if a != b:
-                raise CompareFail(u"%s != %s" % (a, b))
-            else:
-                return True
-    except Exception as e:
-        raise CompareFail(str(e))
-
-
-def run_test(args, i, t):  # type: (argparse.Namespace, Any, Dict[str,str]) -> int
-    out = {}  # type: Dict[str,Any]
-    outdir = None
-    try:
-        if "output" in t:
-            test_command = [args.tool]
-            # Add prefixes if running on MacOSX so that boot2docker writes to /Users
-            if 'darwin' in sys.platform:
-                outdir = tempfile.mkdtemp(prefix=os.path.abspath(os.path.curdir))
-                test_command.extend(["--tmp-outdir-prefix={}".format(outdir), "--tmpdir-prefix={}".format(outdir)])
-            else:
-                outdir = tempfile.mkdtemp()
-            test_command.extend(["--outdir={}".format(outdir),
-                                 "--quiet",
-                                 t["tool"],
-                                 t["job"]])
-            outstr = subprocess.check_output(test_command)
-            out = {"output": json.loads(outstr)}
-        else:
-            test_command = [args.tool,
-                            "--conformance-test",
-                            "--basedir=" + args.basedir,
-                            "--no-container",
-                            "--quiet",
-                            t["tool"],
-                            t["job"]]
-
-            outstr = subprocess.check_output(test_command)
-            out = yaml.load(outstr)
-            if not isinstance(out, dict):
-                raise ValueError("Non-dict value parsed from output string.")
-    except ValueError as v:
-        _logger.error(str(v))
-        _logger.error(outstr)
-    except subprocess.CalledProcessError as err:
-        if err.returncode == UNSUPPORTED_FEATURE:
-            return UNSUPPORTED_FEATURE
-        else:
-            _logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
-            _logger.error(t.get("doc"))
-            _logger.error("Returned non-zero")
-            return 1
-    except yaml.scanner.ScannerError as e:
-        _logger.error(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
-        _logger.error(outstr)
-        _logger.error(u"Parse error %s", str(e))
-
-    pwd = os.path.abspath(os.path.dirname(t["job"]))
-    # t["args"] = map(lambda x: x.replace("$PWD", pwd), t["args"])
-    # if "stdin" in t:
-    #     t["stdin"] = t["stdin"].replace("$PWD", pwd)
-
-    failed = False
-    if "output" in t:
-        checkkeys = ["output"]
-    else:
-        checkkeys = ["args", "stdin", "stdout", "createfiles"]
-
-    for key in checkkeys:
-        try:
-            compare(t.get(key), out.get(key))
-        except CompareFail as ex:
-            _logger.warn(u"""Test failed: %s""", " ".join([pipes.quote(tc) for tc in test_command]))
-            _logger.warn(t.get("doc"))
-            _logger.warn(u"%s expected %s\n got %s", key,
-                                                            json.dumps(t.get(key), indent=4, sort_keys=True),
-                                                            json.dumps(out.get(key), indent=4, sort_keys=True))
-            _logger.warn(u"Compare failure %s", ex)
-            failed = True
-
-    if outdir:
-        shutil.rmtree(outdir, True)  # type: ignore
-        # Weird AnyStr != basestring issue
-
-    if failed:
-        return 1
-    else:
-        return 0
-
-
-def main():  # type: () -> int
-    parser = argparse.ArgumentParser(description='Compliance tests for cwltool')
-    parser.add_argument("--test", type=str, help="YAML file describing test cases", required=True)
-    parser.add_argument("--basedir", type=str, help="Basedir to use for tests", default=".")
-    parser.add_argument("-l", action="store_true", help="List tests then exit")
-    parser.add_argument("-n", type=str, default=None, help="Run a specific tests, format is 1,3-6,9")
-    parser.add_argument("--tool", type=str, default="cwl-runner",
-                        help="CWL runner executable to use (default 'cwl-runner'")
-    parser.add_argument("--only-tools", action="store_true", help="Only test tools")
-
-    args = parser.parse_args()
-
-    if not args.test:
-        parser.print_help()
-        return 1
-
-    with open(args.test) as f:
-        tests = yaml.load(f)
-
-    failures = 0
-    unsupported = 0
-
-    if args.only_tools:
-        alltests = tests
-        tests = []
-        for t in alltests:
-            loader = schema_salad.ref_resolver.Loader({"id": "@id"})
-            cwl = loader.resolve_ref(t["tool"])[0]
-            if isinstance(cwl, dict):
-                if cwl["class"] == "CommandLineTool":
-                    tests.append(t)
-            else:
-                raise Exception("Unexpected code path.")
-
-    if args.l:
-        for i, t in enumerate(tests):
-            print u"[%i] %s" % (i+1, t["doc"].strip())
-        return 0
-
-    if args.n is not None:
-        ntest = []
-        for s in args.n.split(","):
-            sp = s.split("-")
-            if len(sp) == 2:
-                ntest.extend(range(int(sp[0])-1, int(sp[1])))
-            else:
-                ntest.append(int(s)-1)
-    else:
-        ntest = range(0, len(tests))
-
-    for i in ntest:
-        t = tests[i]
-        sys.stderr.write("\rTest [%i/%i] " % (i+1, len(tests)))
-        sys.stderr.flush()
-        rt = run_test(args, i, t)
-        if rt == 1:
-            failures += 1
-        elif rt == UNSUPPORTED_FEATURE:
-            unsupported += 1
-
-    if failures == 0 and unsupported == 0:
-         _logger.info("All tests passed")
-         return 0
-    else:
-        _logger.warn("%i failures, %i unsupported features", failures, unsupported)
-        return 1
-
-
-if __name__ == "__main__":
-    sys.exit(main())
diff --git a/cwltool/docker.py b/cwltool/docker.py
index 1a5da7e..812852f 100644
--- a/cwltool/docker.py
+++ b/cwltool/docker.py
@@ -6,18 +6,19 @@ import os
 from .errors import WorkflowException
 import re
 import tempfile
-from typing import Any, Union
+from typing import Any, Text, Union
 
 _logger = logging.getLogger("cwltool")
 
 def get_image(dockerRequirement, pull_image, dry_run=False):
-    # type: (Dict[str,str], bool, bool) -> bool
+    # type: (Dict[Text, Text], bool, bool) -> bool
     found = False
 
     if "dockerImageId" not in dockerRequirement and "dockerPull" in dockerRequirement:
         dockerRequirement["dockerImageId"] = dockerRequirement["dockerPull"]
 
-    for ln in subprocess.check_output(["docker", "images", "--no-trunc", "--all"]).splitlines():
+    for ln in subprocess.check_output(
+            ["docker", "images", "--no-trunc", "--all"]).splitlines():
         try:
             m = re.match(r"^([^ ]+)\s+([^ ]+)\s+([^ ]+)", ln)
             sp = dockerRequirement["dockerImageId"].split(":")
@@ -31,9 +32,10 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
             pass
 
     if not found and pull_image:
+        cmd = []  # type: List[str]
         if "dockerPull" in dockerRequirement:
-            cmd = ["docker", "pull", dockerRequirement["dockerPull"]]
-            _logger.info(str(cmd))
+            cmd = ["docker", "pull", str(dockerRequirement["dockerPull"])]
+            _logger.info(Text(cmd))
             if not dry_run:
                 subprocess.check_call(cmd, stdout=sys.stderr)
                 found = True
@@ -41,14 +43,15 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
             dockerfile_dir = str(tempfile.mkdtemp())
             with open(os.path.join(dockerfile_dir, "Dockerfile"), "w") as df:
                 df.write(dockerRequirement["dockerFile"])
-            cmd = ["docker", "build", "--tag=%s" % dockerRequirement["dockerImageId"], dockerfile_dir]
-            _logger.info(str(cmd))
+            cmd = ["docker", "build", "--tag=%s" %
+                str(dockerRequirement["dockerImageId"]), dockerfile_dir]
+            _logger.info(Text(cmd))
             if not dry_run:
                 subprocess.check_call(cmd, stdout=sys.stderr)
                 found = True
         elif "dockerLoad" in dockerRequirement:
             cmd = ["docker", "load"]
-            _logger.info(str(cmd))
+            _logger.info(Text(cmd))
             if not dry_run:
                 if os.path.exists(dockerRequirement["dockerLoad"]):
                     _logger.info(u"Loading docker image from %s", dockerRequirement["dockerLoad"])
@@ -69,8 +72,9 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
                     raise WorkflowException("Docker load returned non-zero exit status %i" % (rcode))
                 found = True
         elif "dockerImport" in dockerRequirement:
-            cmd = ["docker", "import", dockerRequirement["dockerImport"], dockerRequirement["dockerImageId"]]
-            _logger.info(str(cmd))
+            cmd = ["docker", "import", str(dockerRequirement["dockerImport"]),
+                str(dockerRequirement["dockerImageId"])]
+            _logger.info(Text(cmd))
             if not dry_run:
                 subprocess.check_call(cmd, stdout=sys.stderr)
                 found = True
@@ -79,15 +83,15 @@ def get_image(dockerRequirement, pull_image, dry_run=False):
 
 
 def get_from_requirements(r, req, pull_image, dry_run=False):
-    # type: (Dict[str,str], bool, bool, bool) -> Union[None,str]
+    # type: (Dict[Text, Text], bool, bool, bool) -> Text
     if r:
         errmsg = None
         try:
             subprocess.check_output(["docker", "version"])
         except subprocess.CalledProcessError as e:
-            errmsg = "Cannot communicate with docker daemon: " + str(e)
+            errmsg = "Cannot communicate with docker daemon: " + Text(e)
         except OSError as e:
-            errmsg = "'docker' executable not found: " + str(e)
+            errmsg = "'docker' executable not found: " + Text(e)
 
         if errmsg:
             if req:
diff --git a/cwltool/docker_uid.py b/cwltool/docker_uid.py
index 4482f77..11223c7 100644
--- a/cwltool/docker_uid.py
+++ b/cwltool/docker_uid.py
@@ -1,8 +1,8 @@
 import subprocess
-from typing import Union
+from typing import Text, Union
 
 
-def docker_vm_uid():  # type: () -> Union[int,None]
+def docker_vm_uid():  # type: () -> int
     """
     Returns the UID of the default docker user inside the VM
 
@@ -20,7 +20,7 @@ def docker_vm_uid():  # type: () -> Union[int,None]
         return None
 
 
-def check_output_and_strip(cmd):  # type: (List[str]) -> Union[str,None]
+def check_output_and_strip(cmd):  # type: (List[Text]) -> Text
     """
     Passes a command list to subprocess.check_output, returning None
     if an expected exception is raised
@@ -37,7 +37,7 @@ def check_output_and_strip(cmd):  # type: (List[str]) -> Union[str,None]
         return None
 
 
-def docker_machine_name():  # type: () -> Union[str,None]
+def docker_machine_name():  # type: () -> Text
     """
     Get the machine name of the active docker-machine machine
     :return: Name of the active machine or None if error
@@ -46,7 +46,7 @@ def docker_machine_name():  # type: () -> Union[str,None]
 
 
 def cmd_output_matches(check_cmd, expected_status):
-    # type: (List[str], str) -> bool
+    # type: (List[Text], Text) -> bool
     """
     Runs a command and compares output to expected
     :param check_cmd: Command list to execute
@@ -76,7 +76,7 @@ def docker_machine_running():  # type: () -> bool
     return cmd_output_matches(['docker-machine', 'status', machine_name], 'Running')
 
 
-def cmd_output_to_int(cmd):  # type: (List[str]) -> Union[int,None]
+def cmd_output_to_int(cmd):  # type: (List[Text]) -> int
     """
     Runs the provided command and returns the integer value of the result
     :param cmd: The command to run
@@ -91,7 +91,7 @@ def cmd_output_to_int(cmd):  # type: (List[str]) -> Union[int,None]
             return None
 
 
-def boot2docker_uid():  # type: () -> Union[int,None]
+def boot2docker_uid():  # type: () -> int
     """
     Gets the UID of the docker user inside a running boot2docker vm
     :return: the UID, or None if error (e.g. boot2docker not present or stopped)
@@ -99,7 +99,7 @@ def boot2docker_uid():  # type: () -> Union[int,None]
     return cmd_output_to_int(['boot2docker', 'ssh', 'id', '-u'])
 
 
-def docker_machine_uid():  # type: () -> Union[int,None]
+def docker_machine_uid():  # type: () -> int
     """
     Asks docker-machine for active machine and gets the UID of the docker user
     inside the vm
diff --git a/cwltool/draft2tool.py b/cwltool/draft2tool.py
index 47d6c04..a1b2a09 100644
--- a/cwltool/draft2tool.py
+++ b/cwltool/draft2tool.py
@@ -1,60 +1,68 @@
-import avro.schema
+import shutil
+from functools import partial
 import json
 import copy
-from .flatten import flatten
-from functools import partial
 import os
-from .pathmapper import PathMapper, DockerPathMapper
-from .job import CommandLineJob
-import yaml
 import glob
 import logging
 import hashlib
-import random
-from .process import Process, shortname, uniquename, adjustFileObjs
-from .errors import WorkflowException
-import schema_salad.validate as validate
-from .utils import aslist
-from . import expression
 import re
 import urlparse
 import tempfile
-from .builder import CONTENT_LIMIT, substitute, Builder
-import shellescape
 import errno
-from typing import Callable, Any, Union, Generator, cast
-import hashlib
-import shutil
+
+import avro.schema
+import schema_salad.validate as validate
+import shellescape
+from typing import Any, Callable, cast, Generator, Text, Union
+
+from .process import Process, shortname, uniquename, getListing, normalizeFilesDirs, compute_checksums
+from .errors import WorkflowException
+from .utils import aslist
+from . import expression
+from .builder import CONTENT_LIMIT, substitute, Builder, adjustFileObjs, adjustDirObjs
+from .pathmapper import PathMapper
+from .job import CommandLineJob
+from .stdfsaccess import StdFsAccess
+
+ACCEPTLIST_EN_STRICT_RE = re.compile(r"^[a-zA-Z0-9._+-]+$")
+ACCEPTLIST_EN_RELAXED_RE = re.compile(r"^[ a-zA-Z0-9._+-]+$")  # with spaces
+ACCEPTLIST_RE = ACCEPTLIST_EN_STRICT_RE
+
+from .flatten import flatten
 
 _logger = logging.getLogger("cwltool")
 
 class ExpressionTool(Process):
     def __init__(self, toolpath_object, **kwargs):
-        # type: (Dict[str,List[None]], **Any) -> None
+        # type: (Dict[Text, Any], **Any) -> None
         super(ExpressionTool, self).__init__(toolpath_object, **kwargs)
 
     class ExpressionJob(object):
 
         def __init__(self):  # type: () -> None
             self.builder = None  # type: Builder
-            self.requirements = None  # type: Dict[str,str]
-            self.hints = None  # type: Dict[str,str]
+            self.requirements = None  # type: Dict[Text, Text]
+            self.hints = None  # type: Dict[Text, Text]
             self.collect_outputs = None  # type: Callable[[Any], Any]
             self.output_callback = None  # type: Callable[[Any, Any], Any]
-            self.outdir = None  # type: str
-            self.tmpdir = None  # type: str
-            self.script = None  # type: Dict[str,str]
+            self.outdir = None  # type: Text
+            self.tmpdir = None  # type: Text
+            self.script = None  # type: Dict[Text, Text]
 
         def run(self, **kwargs):  # type: (**Any) -> None
             try:
-                self.output_callback(self.builder.do_eval(self.script), "success")
+                ev = self.builder.do_eval(self.script)
+                normalizeFilesDirs(ev)
+                self.output_callback(ev, "success")
             except Exception as e:
-                _logger.warn(u"Failed to evaluate expression:\n%s", e, exc_info=(e if kwargs.get('debug') else False))
+                _logger.warn(u"Failed to evaluate expression:\n%s",
+                        e, exc_info=kwargs.get('debug'))
                 self.output_callback({}, "permanentFail")
 
-    def job(self, joborder, input_basedir, output_callback, **kwargs):
-        # type: (Dict[str,str], str, Callable[[Any, Any], Any], **Any) -> Generator[ExpressionTool.ExpressionJob, None, None]
-        builder = self._init_job(joborder, input_basedir, **kwargs)
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator[ExpressionTool.ExpressionJob, None, None]
+        builder = self._init_job(joborder, **kwargs)
 
         j = ExpressionTool.ExpressionJob()
         j.builder = builder
@@ -68,37 +76,53 @@ class ExpressionTool(Process):
         yield j
 
 
-def remove_hostfs(f):  # type: (Dict[str, Any]) -> None
-    if "hostfs" in f:
-        del f["hostfs"]
-
+def remove_path(f):  # type: (Dict[Text, Any]) -> None
+    if "path" in f:
+        del f["path"]
 
 def revmap_file(builder, outdir, f):
-    # type: (Builder,str,Dict[str,Any]) -> Union[Dict[str,Any],None]
-    """Remap a file back to original path. For Docker, this is outside the container.
+    # type: (Builder, Text, Dict[Text, Any]) -> Union[Dict[Text, Any], None]
 
-    Uses either files in the pathmapper or remaps internal output directories
-    to the external directory.
-    """
+    """Remap a file from internal path to external path.
 
-    if f.get("hostfs"):
-        return None
+    For Docker, this maps from the path inside tho container to the path
+    outside the container. Recognizes files in the pathmapper or remaps
+    internal output directories to the external directory.
+    """
 
-    revmap_f = builder.pathmapper.reversemap(f["path"])
-    if revmap_f:
-        f["path"] = revmap_f[1]
-        f["hostfs"] = True
+    split = urlparse.urlsplit(outdir)
+    if not split.scheme:
+        outdir = "file://" + outdir
+
+    if "location" in f:
+        if f["location"].startswith("file://"):
+            path = f["location"][7:]
+            revmap_f = builder.pathmapper.reversemap(path)
+            if revmap_f:
+                f["location"] = revmap_f[1]
+            elif path.startswith(builder.outdir):
+                f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir)+1:])
         return f
-    elif f["path"].startswith(builder.outdir):
-        f["path"] = os.path.join(outdir, f["path"][len(builder.outdir)+1:])
-        f["hostfs"] = True
-        return f
-    else:
-        raise WorkflowException(u"Output file path %s must be within designated output directory (%s) or an input file pass through." % (f["path"], builder.outdir))
+
+    if "path" in f:
+        path = f["path"]
+        del f["path"]
+        revmap_f = builder.pathmapper.reversemap(path)
+        if revmap_f:
+            f["location"] = revmap_f[1]
+            return f
+        elif path.startswith(builder.outdir):
+            f["location"] = builder.fs_access.join(outdir, path[len(builder.outdir)+1:])
+            return f
+        else:
+            raise WorkflowException(u"Output file path %s must be within designated output directory (%s) or an input file pass through." % (path, builder.outdir))
+
+    raise WorkflowException(u"Output File object is missing both `location` and `path` fields: %s" % f)
+
 
 class CallbackJob(object):
     def __init__(self, job, output_callback, cachebuilder, jobcache):
-        # type: (CommandLineTool, Callable[[Any, Any], Any], Builder, str) -> None
+        # type: (CommandLineTool, Callable[[Any, Any], Any], Builder, Text) -> None
         self.job = job
         self.output_callback = output_callback
         self.cachebuilder = cachebuilder
@@ -106,33 +130,43 @@ class CallbackJob(object):
 
     def run(self, **kwargs):
         # type: (**Any) -> None
-        self.output_callback(self.job.collect_output_ports(self.job.tool["outputs"],
-                                                           self.cachebuilder, self.outdir),
-                                            "success")
-
+        self.output_callback(self.job.collect_output_ports(
+            self.job.tool["outputs"],
+            self.cachebuilder,
+            self.outdir,
+            kwargs.get("compute_checksum", True)), "success")
+
+# map files to assigned path inside a container. We need to also explicitly
+# walk over input as implicit reassignment doesn't reach everything in builder.bindings
+def check_adjust(builder, f):
+    # type: (Builder, Dict[Text, Any]) -> Dict[Text, Any]
+    f["path"] = builder.pathmapper.mapper(f["location"])[1]
+    f["dirname"], f["basename"] = os.path.split(f["path"])
+    if f["class"] == "File":
+        f["nameroot"], f["nameext"] = os.path.splitext(f["basename"])
+    if not ACCEPTLIST_RE.match(f["basename"]):
+        raise WorkflowException("Invalid filename: '%s' contains illegal characters" % (f["basename"]))
+    return f
 
 class CommandLineTool(Process):
     def __init__(self, toolpath_object, **kwargs):
-        # type: (Dict[str,Any], **Any) -> None
+        # type: (Dict[Text, Any], **Any) -> None
         super(CommandLineTool, self).__init__(toolpath_object, **kwargs)
 
     def makeJobRunner(self):  # type: () -> CommandLineJob
         return CommandLineJob()
 
-    def makePathMapper(self, reffiles, input_basedir, **kwargs):
-        # type: (Set[str], str, **Any) -> PathMapper
+    def makePathMapper(self, reffiles, stagedir, **kwargs):
+        # type: (List[Any], Text, **Any) -> PathMapper
         dockerReq, _ = self.get_requirement("DockerRequirement")
         try:
-            if dockerReq and kwargs.get("use_container"):
-                return DockerPathMapper(reffiles, input_basedir)
-            else:
-                return PathMapper(reffiles, input_basedir)
+            return PathMapper(reffiles, kwargs["basedir"], stagedir)
         except OSError as e:
             if e.errno == errno.ENOENT:
                 raise WorkflowException(u"Missing input file %s" % e)
 
-    def job(self, joborder, input_basedir, output_callback, **kwargs):
-        # type: (Dict[str,str], str, Callable[..., Any], **Any) -> Generator[Union[CommandLineJob, CallbackJob], None, None]
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Text], Callable[..., Any], **Any) -> Generator[Union[CommandLineJob, CallbackJob], None, None]
 
         jobname = uniquename(kwargs.get("name", shortname(self.tool.get("id", "job"))))
 
@@ -140,20 +174,28 @@ class CommandLineTool(Process):
             cacheargs = kwargs.copy()
             cacheargs["outdir"] = "/out"
             cacheargs["tmpdir"] = "/tmp"
-            cachebuilder = self._init_job(joborder, input_basedir, **cacheargs)
-            cachebuilder.pathmapper = PathMapper(set((f["path"] for f in cachebuilder.files)),
-                                                 input_basedir)
-
+            cacheargs["stagedir"] = "/stage"
+            cachebuilder = self._init_job(joborder, **cacheargs)
+            cachebuilder.pathmapper = PathMapper(cachebuilder.files,
+                                                 kwargs["basedir"],
+                                                 cachebuilder.stagedir,
+                                                 separateDirs=False)
+            _check_adjust = partial(check_adjust, cachebuilder)
+            adjustFileObjs(cachebuilder.files, _check_adjust)
+            adjustFileObjs(cachebuilder.bindings, _check_adjust)
+            adjustDirObjs(cachebuilder.files, _check_adjust)
+            adjustDirObjs(cachebuilder.bindings, _check_adjust)
             cmdline = flatten(map(cachebuilder.generate_arg, cachebuilder.bindings))
             (docker_req, docker_is_req) = self.get_requirement("DockerRequirement")
             if docker_req and kwargs.get("use_container") is not False:
                 dockerimg = docker_req.get("dockerImageId") or docker_req.get("dockerPull")
                 cmdline = ["docker", "run", dockerimg] + cmdline
-            keydict = {"cmdline": cmdline}
+            keydict = {u"cmdline": cmdline}
 
             for _,f in cachebuilder.pathmapper.items():
-                st = os.stat(f[0])
-                keydict[f[0]] = [st.st_size, int(st.st_mtime * 1000)]
+                if f.type == "File":
+                    st = os.stat(f.resolved)
+                    keydict[f.resolved] = [st.st_size, int(st.st_mtime * 1000)]
 
             interesting = {"DockerRequirement",
                            "EnvVarRequirement",
@@ -167,7 +209,8 @@ class CommandLineTool(Process):
             keydictstr = json.dumps(keydict, separators=(',',':'), sort_keys=True)
             cachekey = hashlib.md5(keydictstr).hexdigest()
 
-            _logger.debug("[job %s] keydictstr is %s -> %s", jobname, keydictstr, cachekey)
+            _logger.debug("[job %s] keydictstr is %s -> %s", jobname,
+                    keydictstr, cachekey)
 
             jobcache = os.path.join(kwargs["cachedir"], cachekey)
             jobcachepending = jobcache + ".pending"
@@ -187,25 +230,27 @@ class CommandLineTool(Process):
                 os.makedirs(jobcache)
                 kwargs["outdir"] = jobcache
                 open(jobcachepending, "w").close()
+
                 def rm_pending_output_callback(output_callback, jobcachepending,
                                                outputs, processStatus):
                     if processStatus == "success":
                         os.remove(jobcachepending)
                     output_callback(outputs, processStatus)
                 output_callback = cast(
-                        Callable[..., Any],  # known bug in mypy
-                        # https://github.com/python/mypy/issues/797
-                        partial(rm_pending_output_callback, output_callback,
-                            jobcachepending))
+                    Callable[..., Any],  # known bug in mypy
+                    # https://github.com/python/mypy/issues/797
+                    partial(rm_pending_output_callback, output_callback,
+                        jobcachepending))
 
-        builder = self._init_job(joborder, input_basedir, **kwargs)
+        builder = self._init_job(joborder, **kwargs)
 
-        reffiles = set((f["path"] for f in builder.files))
+        reffiles = copy.deepcopy(builder.files)
 
         j = self.makeJobRunner()
         j.builder = builder
         j.joborder = builder.job
         j.stdin = None
+        j.stderr = None
         j.stdout = None
         j.successCodes = self.tool.get("successCodes")
         j.temporaryFailCodes = self.tool.get("temporaryFailCodes")
@@ -214,41 +259,48 @@ class CommandLineTool(Process):
         j.hints = self.hints
         j.name = jobname
 
-        _logger.debug(u"[job %s] initializing from %s%s",
-                     j.name,
-                     self.tool.get("id", ""),
-                     u" as part of %s" % kwargs["part_of"] if "part_of" in kwargs else "")
-        _logger.debug(u"[job %s] %s", j.name, json.dumps(joborder, indent=4))
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[job %s] initializing from %s%s",
+                         j.name,
+                         self.tool.get("id", ""),
+                         u" as part of %s" % kwargs["part_of"] if "part_of" in kwargs else "")
+            _logger.debug(u"[job %s] %s", j.name, json.dumps(joborder, indent=4))
 
 
         builder.pathmapper = None
+        make_path_mapper_kwargs = kwargs
+        if "stagedir" in make_path_mapper_kwargs:
+            make_path_mapper_kwargs = make_path_mapper_kwargs.copy()
+            del make_path_mapper_kwargs["stagedir"]
+        builder.pathmapper = self.makePathMapper(reffiles, builder.stagedir, **make_path_mapper_kwargs)
+        builder.requirements = j.requirements
+
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[job %s] path mappings is %s", j.name, json.dumps({p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4))
+
+        _check_adjust = partial(check_adjust, builder)
+
+        adjustFileObjs(builder.files, _check_adjust)
+        adjustFileObjs(builder.bindings, _check_adjust)
+        adjustDirObjs(builder.files, _check_adjust)
+        adjustDirObjs(builder.bindings, _check_adjust)
 
         if self.tool.get("stdin"):
             j.stdin = builder.do_eval(self.tool["stdin"])
-            reffiles.add(j.stdin)
+            reffiles.append({"class": "File", "path": j.stdin})
+
+        if self.tool.get("stderr"):
+            j.stderr = builder.do_eval(self.tool["stderr"])
+            if os.path.isabs(j.stderr) or ".." in j.stderr:
+                raise validate.ValidationException("stderr must be a relative path")
 
         if self.tool.get("stdout"):
             j.stdout = builder.do_eval(self.tool["stdout"])
-            if os.path.isabs(j.stdout) or ".." in j.stdout:
+            if os.path.isabs(j.stdout) or ".." in j.stdout or not j.stdout:
                 raise validate.ValidationException("stdout must be a relative path")
 
-        builder.pathmapper = self.makePathMapper(reffiles, input_basedir, **kwargs)
-        builder.requirements = j.requirements
-
-        # map files to assigned path inside a container. We need to also explicitly
-        # walk over input as implicit reassignment doesn't reach everything in builder.bindings
-        def _check_adjust(f):  # type: (Dict[str,Any]) -> Dict[str,Any]
-            if not f.get("containerfs"):
-                f["path"] = builder.pathmapper.mapper(f["path"])[1]
-                f["containerfs"] = True
-            return f
-
-        _logger.debug(u"[job %s] path mappings is %s", j.name, json.dumps({p: builder.pathmapper.mapper(p) for p in builder.pathmapper.files()}, indent=4))
-
-        adjustFileObjs(builder.files, _check_adjust)
-        adjustFileObjs(builder.bindings, _check_adjust)
-
-        _logger.debug(u"[job %s] command line bindings is %s", j.name, json.dumps(builder.bindings, indent=4))
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[job %s] command line bindings is %s", j.name, json.dumps(builder.bindings, indent=4))
 
         dockerReq = self.get_requirement("DockerRequirement")[0]
         if dockerReq and kwargs.get("use_container"):
@@ -256,15 +308,48 @@ class CommandLineTool(Process):
             j.outdir = kwargs.get("outdir") or tempfile.mkdtemp(prefix=out_prefix)
             tmpdir_prefix = kwargs.get('tmpdir_prefix')
             j.tmpdir = kwargs.get("tmpdir") or tempfile.mkdtemp(prefix=tmpdir_prefix)
+            j.stagedir = tempfile.mkdtemp(prefix=tmpdir_prefix)
         else:
             j.outdir = builder.outdir
             j.tmpdir = builder.tmpdir
+            j.stagedir = builder.stagedir
+
+        initialWorkdir = self.get_requirement("InitialWorkDirRequirement")[0]
+        j.generatefiles = {"class": "Directory", "listing": [], "basename": ""}
+        if initialWorkdir:
+            ls = []  # type: List[Dict[Text, Any]]
+            if isinstance(initialWorkdir["listing"], (str, Text)):
+                ls = builder.do_eval(initialWorkdir["listing"])
+            else:
+                for t in initialWorkdir["listing"]:
+                    if "entry" in t:
+                        et = {u"entry": builder.do_eval(t["entry"])}
+                        if "entryname" in t:
+                            et["entryname"] = builder.do_eval(t["entryname"])
+                        else:
+                            et["entryname"] = None
+                        et["writable"] = t.get("writable", False)
+                        ls.append(et)
+                    else:
+                        ls.append(builder.do_eval(t))
+            for i,t in enumerate(ls):
+                if "entry" in t:
+                    if isinstance(t["entry"], basestring):
+                        ls[i] = {
+                            "class": "File",
+                            "basename": t["entryname"],
+                            "contents": t["entry"],
+                            "writable": t.get("writable")
+                        }
+                    else:
+                        if t["entryname"]:
+                            t = copy.deepcopy(t)
+                            t["entry"]["basename"] = t["entryname"]
+                            t["entry"]["writable"] = t.get("writable")
+                        ls[i] = t["entry"]
+            j.generatefiles[u"listing"] = ls
 
-        createFiles = self.get_requirement("CreateFileRequirement")[0]
-        j.generatefiles = {}
-        if createFiles:
-            for t in createFiles["fileDef"]:
-                j.generatefiles[builder.do_eval(t["filename"])] = copy.deepcopy(builder.do_eval(t["fileContent"]))
+        normalizeFilesDirs(j.generatefiles)
 
         j.environment = {}
         evr = self.get_requirement("EnvVarRequirement")[0]
@@ -274,7 +359,7 @@ class CommandLineTool(Process):
 
         shellcmd = self.get_requirement("ShellCommandRequirement")[0]
         if shellcmd:
-            cmd = []  # type: List[str]
+            cmd = []  # type: List[Text]
             for b in builder.bindings:
                 arg = builder.generate_arg(b)
                 if b.get("shellQuote", True):
@@ -286,48 +371,58 @@ class CommandLineTool(Process):
 
         j.pathmapper = builder.pathmapper
         j.collect_outputs = partial(
-                self.collect_output_ports, self.tool["outputs"], builder)
+            self.collect_output_ports, self.tool["outputs"], builder,
+            compute_checksum=kwargs.get("compute_checksum", True))
         j.output_callback = output_callback
 
         yield j
 
-    def collect_output_ports(self, ports, builder, outdir):
-        # type: (Set[Dict[str,Any]], Builder, str) -> Dict[str,Union[str,List[Any],Dict[str,Any]]]
+    def collect_output_ports(self, ports, builder, outdir, compute_checksum=True):
+        # type: (Set[Dict[Text, Any]], Builder, Text, bool) -> Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
         try:
-            ret = {}  # type: Dict[str,Union[str,List[Any],Dict[str,Any]]]
-            custom_output = os.path.join(outdir, "cwl.output.json")
-            if builder.fs_access.exists(custom_output):
-                with builder.fs_access.open(custom_output, "r") as f:
-                    ret = yaml.load(f)
-                _logger.debug(u"Raw output from %s: %s", custom_output, json.dumps(ret, indent=4))
-                adjustFileObjs(ret, remove_hostfs)
+            ret = {}  # type: Dict[Text, Union[Text, List[Any], Dict[Text, Any]]]
+            fs_access = builder.make_fs_access(outdir)
+            custom_output = fs_access.join(outdir, "cwl.output.json")
+            if fs_access.exists(custom_output):
+                with fs_access.open(custom_output, "r") as f:
+                    ret = json.load(f)
+                if _logger.isEnabledFor(logging.DEBUG):
+                    _logger.debug(u"Raw output from %s: %s", custom_output, json.dumps(ret, indent=4))
+            else:
+                for port in ports:
+                    fragment = shortname(port["id"])
+                    try:
+                        ret[fragment] = self.collect_output(port, builder, outdir, fs_access, compute_checksum=compute_checksum)
+                    except Exception as e:
+                        _logger.debug(
+                            u"Error collecting output for parameter '%s'"
+                            % shortname(port["id"]), exc_info=True)
+                        raise WorkflowException(
+                            u"Error collecting output for parameter '%s': %s"
+                            % (shortname(port["id"]), e))
+
+            if ret:
                 adjustFileObjs(ret,
                         cast(Callable[[Any], Any],  # known bug in mypy
                             # https://github.com/python/mypy/issues/797
                             partial(revmap_file, builder, outdir)))
-                adjustFileObjs(ret, remove_hostfs)
-                validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret)
-                return ret
-
-            for port in ports:
-                fragment = shortname(port["id"])
-                try:
-                    ret[fragment] = self.collect_output(port, builder, outdir)
-                except Exception as e:
-                    raise WorkflowException(u"Error collecting output for parameter '%s': %s" % (shortname(port["id"]), e))
-            if ret:
-                adjustFileObjs(ret, remove_hostfs)
+                adjustFileObjs(ret, remove_path)
+                adjustDirObjs(ret, remove_path)
+                normalizeFilesDirs(ret)
+                if compute_checksum:
+                    adjustFileObjs(ret, partial(compute_checksums, fs_access))
+
             validate.validate_ex(self.names.get_name("outputs_record_schema", ""), ret)
             return ret if ret is not None else {}
         except validate.ValidationException as e:
-            raise WorkflowException("Error validating output record, " + str(e) + "\n in " + json.dumps(ret, indent=4))
+            raise WorkflowException("Error validating output record, " + Text(e) + "\n in " + json.dumps(ret, indent=4))
 
-    def collect_output(self, schema, builder, outdir):
-        # type: (Dict[str,Any], Builder, str) -> Union[Dict[str, Any], List[Union[Dict[str, Any], str]]]
+    def collect_output(self, schema, builder, outdir, fs_access, compute_checksum=True):
+        # type: (Dict[Text, Any], Builder, Text, StdFsAccess, bool) -> Union[Dict[Text, Any], List[Union[Dict[Text, Any], Text]]]
         r = []  # type: List[Any]
         if "outputBinding" in schema:
             binding = schema["outputBinding"]
-            globpatterns = []  # type: List[str]
+            globpatterns = []  # type: List[Text]
 
             revmap = partial(revmap_file, builder, outdir)
 
@@ -338,60 +433,55 @@ class CommandLineTool(Process):
                         globpatterns.extend(aslist(gb))
 
                 for gb in globpatterns:
-                    if gb.startswith("/"):
+                    if gb.startswith(outdir):
+                        gb = gb[len(outdir)+1:]
+                    elif gb == ".":
+                        gb = outdir
+                    elif gb.startswith("/"):
                         raise WorkflowException("glob patterns must not start with '/'")
                     try:
-                        r.extend([{"path": g, "class": "File", "hostfs": True}
-                                  for g in builder.fs_access.glob(os.path.join(outdir, gb))])
+                        r.extend([{"location": g,
+                                   "class": "File" if fs_access.isfile(g) else "Directory"}
+                                  for g in fs_access.glob(fs_access.join(outdir, gb))])
                     except (OSError, IOError) as e:
-                        _logger.warn(str(e))
+                        _logger.warn(Text(e))
 
                 for files in r:
-                    checksum = hashlib.sha1()
-                    with builder.fs_access.open(files["path"], "rb") as f:
-                        contents = f.read(CONTENT_LIMIT)
-                        if binding.get("loadContents"):
-                            files["contents"] = contents
-                        filesize = 0
-                        while contents != "":
-                            checksum.update(contents)
-                            filesize += len(contents)
-                            contents = f.read(1024*1024)
-                    files["checksum"] = "sha1$%s" % checksum.hexdigest()
-                    files["size"] = filesize
-                    if "format" in schema:
-                        files["format"] = builder.do_eval(schema["format"], context=files)
+                    if files["class"] == "Directory" and "listing" not in files:
+                        getListing(fs_access, files)
+                    else:
+                        with fs_access.open(files["location"], "rb") as f:
+                            contents = ""
+                            if binding.get("loadContents") or compute_checksum:
+                                contents = f.read(CONTENT_LIMIT)
+                            if binding.get("loadContents"):
+                                files["contents"] = contents
+                            if compute_checksum:
+                                checksum = hashlib.sha1()
+                                while contents != "":
+                                    checksum.update(contents)
+                                    contents = f.read(1024*1024)
+                                files["checksum"] = "sha1$%s" % checksum.hexdigest()
+                            f.seek(0, 2)
+                            filesize = f.tell()
+                        files["size"] = filesize
+                        if "format" in schema:
+                            files["format"] = builder.do_eval(schema["format"], context=files)
 
             optional = False
-            singlefile = False
+            single = False
             if isinstance(schema["type"], list):
                 if "null" in schema["type"]:
                     optional = True
-                if "File" in schema["type"]:
-                    singlefile = True
-            elif schema["type"] == "File":
-                singlefile = True
+                if "File" in schema["type"] or "Directory" in schema["type"]:
+                    single = True
+            elif schema["type"] == "File" or schema["type"] == "Directory":
+                single = True
 
             if "outputEval" in binding:
-                eout = builder.do_eval(binding["outputEval"], context=r)
-                if singlefile:
-                    # Handle single file outputs not wrapped in a list
-                    if eout is not None and not isinstance(eout, (list, tuple)):
-                        r = [eout]
-                    elif optional and eout is None:
-                        pass
-                    elif (eout is None or len(eout) != 1 or
-                            not isinstance(eout[0], dict)
-                            or "path" not in eout[0]):
-                        raise WorkflowException(
-                            u"Expression must return a file object for %s."
-                            % schema["id"])
-                    else:
-                        r = [eout]
-                else:
-                    r = eout
+                r = builder.do_eval(binding["outputEval"], context=r)
 
-            if singlefile:
+            if single:
                 if not r and not optional:
                     raise WorkflowException("Did not find output file with glob pattern: '{}'".format(globpatterns))
                 elif not r and optional:
@@ -413,14 +503,14 @@ class CommandLineTool(Process):
                         primary["secondaryFiles"] = []
                         for sf in aslist(schema["secondaryFiles"]):
                             if isinstance(sf, dict) or "$(" in sf or "${" in sf:
-                                sfpath = builder.do_eval(sf, context=r)
+                                sfpath = builder.do_eval(sf, context=primary)
                                 if isinstance(sfpath, basestring):
-                                    sfpath = revmap({"path": sfpath, "class": "File"})
+                                    sfpath = revmap({"location": sfpath, "class": "File"})
                             else:
-                                sfpath = {"path": substitute(primary["path"], sf), "class": "File", "hostfs": True}
+                                sfpath = {"location": substitute(primary["location"], sf), "class": "File"}
 
                             for sfitem in aslist(sfpath):
-                                if builder.fs_access.exists(sfitem["path"]):
+                                if fs_access.exists(sfitem["location"]):
                                     primary["secondaryFiles"].append(sfitem)
 
             if not r and optional:
@@ -431,6 +521,7 @@ class CommandLineTool(Process):
             out = {}
             for f in schema["type"]["fields"]:
                 out[shortname(f["name"])] = self.collect_output(  # type: ignore
-                        f, builder, outdir)
+                    f, builder, outdir, fs_access,
+                    compute_checksum=compute_checksum)
             return out
         return r
diff --git a/cwltool/errors.py b/cwltool/errors.py
index 59203d8..6bf187c 100644
--- a/cwltool/errors.py
+++ b/cwltool/errors.py
@@ -1,2 +1,5 @@
 class WorkflowException(Exception):
     pass
+
+class UnsupportedRequirement(WorkflowException):
+    pass
diff --git a/cwltool/expression.py b/cwltool/expression.py
index bea6758..fd38cfe 100644
--- a/cwltool/expression.py
+++ b/cwltool/expression.py
@@ -1,97 +1,112 @@
-from . import docker
 import subprocess
 import json
-from .utils import aslist, get_feature
 import logging
 import os
-from .errors import WorkflowException
-import yaml
+import re
+
+from typing import Any, AnyStr, Union, Text, Dict, List
 import schema_salad.validate as validate
 import schema_salad.ref_resolver
+
+from .utils import aslist, get_feature
+from .errors import WorkflowException
 from . import sandboxjs
-import re
-from typing import Any, AnyStr, Union
+from . import docker
 
 _logger = logging.getLogger("cwltool")
 
 def jshead(engineConfig, rootvars):
-    # type: (List[unicode],Dict[str,str]) -> unicode
+    # type: (List[Text], Dict[Text, Any]) -> Text
     return u"\n".join(engineConfig + [u"var %s = %s;" % (k, json.dumps(v, indent=4)) for k, v in rootvars.items()])
 
-def exeval(ex, jobinput, requirements, outdir, tmpdir, context, pull_image):
-    # type: (Dict[str,Any], Dict[str,str], List[Dict[str, Any]], str, str, Any, bool) -> sandboxjs.JSON
-
-    if ex["engine"] == "https://w3id.org/cwl/cwl#JavascriptEngine":
-        engineConfig = []  # type: List[unicode]
-        for r in reversed(requirements):
-            if r["class"] == "ExpressionEngineRequirement" and r["id"] == "https://w3id.org/cwl/cwl#JavascriptEngine":
-                engineConfig = r.get("engineConfig", [])
-                break
-        rootvars = {
-            "inputs": jobinput,
-            "self": context,
-            "runtime": {
-                "tmpdir": tmpdir,
-                "outdir": outdir
-                }
-        }
-        return sandboxjs.execjs(ex["script"], jshead(engineConfig, rootvars))
-
-    for r in reversed(requirements):
-        if r["class"] == "ExpressionEngineRequirement" and r["id"] == ex["engine"]:
-            runtime = []  # type: List[str]
-
-            class DR(object):
-                def __init__(self):  # type: ()->None
-                    self.requirements = None  # type: List[None]
-                    self.hints = None  # type: List[None]
-            dr = DR()
-            dr.requirements = r.get("requirements", [])
-            dr.hints = r.get("hints", [])
-
-            (docker_req, docker_is_req) = get_feature(dr, "DockerRequirement")
-            img_id = None
-            if docker_req:
-                img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
-            if img_id:
-                runtime = ["docker", "run", "-i", "--rm", img_id]
-
-            inp = {
-                "script": ex["script"],
-                "engineConfig": r.get("engineConfig", []),
-                "job": jobinput,
-                "context": context,
-                "outdir": outdir,
-                "tmpdir": tmpdir,
-            }
-
-            _logger.debug(u"Invoking expression engine %s with %s",
-                          runtime + aslist(r["engineCommand"]),
-                                           json.dumps(inp, indent=4))
-
-            sp = subprocess.Popen(runtime + aslist(r["engineCommand"]),
-                             shell=False,
-                             close_fds=True,
-                             stdin=subprocess.PIPE,
-                             stdout=subprocess.PIPE)
-
-            (stdoutdata, stderrdata) = sp.communicate(json.dumps(inp) + "\n\n")
-            if sp.returncode != 0:
-                raise WorkflowException(u"Expression engine returned non-zero exit code on evaluation of\n%s" % json.dumps(inp, indent=4))
-
-            return json.loads(stdoutdata)
-
-    raise WorkflowException(u"Unknown expression engine '%s'" % ex["engine"])
 
 seg_symbol = r"""\w+"""
 seg_single = r"""\['([^']|\\')+'\]"""
 seg_double = r"""\["([^"]|\\")+"\]"""
-seg_index  = r"""\[[0-9]+\]"""
+seg_index = r"""\[[0-9]+\]"""
 segments = r"(\.%s|%s|%s|%s)" % (seg_symbol, seg_single, seg_double, seg_index)
 segment_re = re.compile(segments, flags=re.UNICODE)
-param_re = re.compile(r"\$\((%s)%s*\)" % (seg_symbol, segments), flags=re.UNICODE)
+param_re = re.compile(r"\((%s)%s*\)$" % (seg_symbol, segments), flags=re.UNICODE)
+
+JSON = Union[Dict[Any,Any], List[Any], Text, int, long, float, bool, None]
+
+class SubstitutionError(Exception):
+    pass
+
+def scanner(scan):  # type: (Text) -> List[int]
+    DEFAULT = 0
+    DOLLAR = 1
+    PAREN = 2
+    BRACE = 3
+    SINGLE_QUOTE = 4
+    DOUBLE_QUOTE = 5
+    BACKSLASH = 6
+
+    i = 0
+    stack = [DEFAULT]
+    start = 0
+    while i < len(scan):
+        state = stack[-1]
+        c = scan[i]
+
+        if state == DEFAULT:
+            if c == '$':
+                stack.append(DOLLAR)
+            elif c == '\\':
+                stack.append(BACKSLASH)
+        elif state == BACKSLASH:
+            stack.pop()
+            if stack[-1] == DEFAULT:
+                return [i-1, i+1]
+        elif state == DOLLAR:
+            if c == '(':
+                start = i-1
+                stack.append(PAREN)
+            elif c == '{':
+                start = i-1
+                stack.append(BRACE)
+            else:
+                stack.pop()
+        elif state == PAREN:
+            if c == '(':
+                stack.append(PAREN)
+            elif c == ')':
+                stack.pop()
+                if stack[-1] == DOLLAR:
+                    return [start, i+1]
+            elif c == "'":
+                stack.append(SINGLE_QUOTE)
+            elif c == '"':
+                stack.append(DOUBLE_QUOTE)
+        elif state == BRACE:
+            if c == '{':
+                stack.append(BRACE)
+            elif c == '}':
+                stack.pop()
+                if stack[-1] == DOLLAR:
+                    return [start, i+1]
+            elif c == "'":
+                stack.append(SINGLE_QUOTE)
+            elif c == '"':
+                stack.append(DOUBLE_QUOTE)
+        elif state == SINGLE_QUOTE:
+            if c == "'":
+                stack.pop()
+            elif c == '\\':
+                stack.append(BACKSLASH)
+        elif state == DOUBLE_QUOTE:
+            if c == '"':
+                stack.pop()
+            elif c == '\\':
+                stack.append(BACKSLASH)
+        i += 1
+
+    if len(stack) > 1:
+        raise SubstitutionError("Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
+    else:
+        return None
 
-def next_seg(remain, obj):  # type: (str,Any)->str
+def next_seg(remain, obj):  # type: (Text, Any)->Text
     if remain:
         m = segment_re.match(remain)
         if m.group(0)[0] == '.':
@@ -105,46 +120,69 @@ def next_seg(remain, obj):  # type: (str,Any)->str
     else:
         return obj
 
-
-def param_interpolate(ex, obj, strip=True):
-    # type: (str, Dict[Any,Any], bool) -> Union[str, unicode]
-    m = param_re.search(ex)
+def evaluator(ex, jslib, obj, fullJS=False, timeout=None):
+    # type: (Text, Text, Dict[Text, Any], bool, int) -> JSON
+    m = param_re.match(ex)
     if m:
-        leaf = next_seg(m.group(0)[m.end(1) - m.start(0):-1], obj[m.group(1)])
-        if strip and len(ex.strip()) == len(m.group(0)):
-            return leaf
-        else:
-            leaf = json.dumps(leaf, sort_keys=True)
+        return next_seg(m.group(0)[m.end(1) - m.start(0):-1], obj[m.group(1)])
+    elif fullJS:
+        return sandboxjs.execjs(ex, jslib, timeout=timeout)
+    else:
+        raise sandboxjs.JavascriptException("Syntax error in parameter reference '%s' or used Javascript code without specifying InlineJavascriptRequirement.", ex)
+
+def interpolate(scan, rootvars,
+                timeout=None, fullJS=None, jslib=""):
+    # type: (Text, Dict[Text, Any], int, bool, Union[str, Text]) -> JSON
+    scan = scan.strip()
+    parts = []
+    w = scanner(scan)
+    while w:
+        parts.append(scan[0:w[0]])
+
+        if scan[w[0]] == '$':
+            e = evaluator(scan[w[0]+1:w[1]], jslib, rootvars, fullJS=fullJS,
+                          timeout=timeout)
+            if w[0] == 0 and w[1] == len(scan):
+                return e
+            leaf = json.dumps(e, sort_keys=True)
             if leaf[0] == '"':
                 leaf = leaf[1:-1]
-            return ex[0:m.start(0)] + leaf + param_interpolate(ex[m.end(0):], obj, False)
-    else:
-        if "$(" in ex or "${" in ex:
-            _logger.warn(u"Warning possible workflow bug: found '$(' or '${' in '%s' but did not match valid parameter reference and InlineJavascriptRequirement not specified.", ex)
-        return ex
+            parts.append(leaf)
+        elif scan[w[0]] == '\\':
+            e = scan[w[1]-1]
+            parts.append(e)
 
+        scan = scan[w[1]:]
+        w = scanner(scan)
+    parts.append(scan)
+    return ''.join(parts)
 
 def do_eval(ex, jobinput, requirements, outdir, tmpdir, resources,
             context=None, pull_image=True, timeout=None):
-    # type: (Any, Dict[str,str], List[Dict[str,Any]], str, str, Dict[str, Union[int, str]], Any, bool, int) -> Any
+    # type: (Union[dict, AnyStr], Dict[Text, Union[Dict, List, Text]], List[Dict[Text, Any]], Text, Text, Dict[Text, Union[int, Text]], Any, bool, int) -> Any
 
     runtime = resources.copy()
     runtime["tmpdir"] = tmpdir
     runtime["outdir"] = outdir
 
     rootvars = {
-            "inputs": jobinput,
-            "self": context,
-            "runtime": runtime
-        }
-
-    if isinstance(ex, dict) and "engine" in ex and "script" in ex:
-        return exeval(ex, jobinput, requirements, outdir, tmpdir, context, pull_image)
-    if isinstance(ex, basestring):
-        for r in requirements:
+        u"inputs": jobinput,
+        u"self": context,
+        u"runtime": runtime }
+
+    if isinstance(ex, (str, Text)):
+        fullJS = False
+        jslib = u""
+        for r in reversed(requirements):
             if r["class"] == "InlineJavascriptRequirement":
-                return sandboxjs.interpolate(str(ex), jshead(r.get("expressionLib", []), rootvars),
-                                             timeout=timeout)
-        return param_interpolate(str(ex), rootvars)
+                fullJS = True
+                jslib = jshead(r.get("expressionLib", []), rootvars)
+                break
+
+        return interpolate(ex,
+                           rootvars,
+                           timeout=timeout,
+                           fullJS=fullJS,
+                           jslib=jslib)
     else:
         return ex
diff --git a/cwltool/factory.py b/cwltool/factory.py
index 312732a..1b25a0b 100644
--- a/cwltool/factory.py
+++ b/cwltool/factory.py
@@ -1,8 +1,9 @@
 from . import main
+from . import load_tool
 from . import workflow
 import os
 from .process import Process
-from typing import Any, Union
+from typing import Any, Text, Union
 from typing import Callable as tCallable
 import argparse
 
@@ -11,20 +12,24 @@ class Callable(object):
         self.t = t
         self.factory = factory
 
-    def __call__(self, **kwargs):  # type: (**Any) -> Union[str,Dict[str,str]]
-        return self.factory.executor(self.t, kwargs, os.getcwd(), None, **self.factory.execkwargs)
+    def __call__(self, **kwargs):
+        # type: (**Any) -> Union[Text, Dict[Text, Text]]
+        execkwargs = self.factory.execkwargs.copy()
+        execkwargs["basedir"] = os.getcwd()
+        return self.factory.executor(self.t, kwargs, **execkwargs)
 
 class Factory(object):
     def __init__(self, makeTool=workflow.defaultMakeTool,
                  executor=main.single_job_executor,
                  **execkwargs):
-        # type: (tCallable[[Dict[str, Any], Any], Process],tCallable[...,Union[str,Dict[str,str]]], **Any) -> None
+        # type: (tCallable[[Dict[Text, Any], Any], Process],tCallable[...,Union[Text,Dict[Text,Text]]], **Any) -> None
         self.makeTool = makeTool
         self.executor = executor
         self.execkwargs = execkwargs
 
-    def make(self, cwl, frag=None, debug=False):
-        l = main.load_tool(cwl, False, True, self.makeTool, debug, urifrag=frag)
-        if type(l) == int:
+    def make(self, cwl):
+        """Instantiate a CWL object from a CWl document."""
+        load = load_tool.load_tool(cwl, self.makeTool)
+        if isinstance(load, int):
             raise Exception("Error loading tool")
-        return Callable(l, self)
+        return Callable(load, self)
diff --git a/cwltool/job.py b/cwltool/job.py
index 9a3f78b..40d369b 100644
--- a/cwltool/job.py
+++ b/cwltool/job.py
@@ -4,20 +4,21 @@ import os
 import tempfile
 import glob
 import json
-import yaml
 import logging
 import sys
 import requests
 from . import docker
-from .process import get_feature, empty_subtree
+from .process import get_feature, empty_subtree, stageFiles
 from .errors import WorkflowException
 import shutil
 import stat
 import re
 import shellescape
+import string
 from .docker_uid import docker_vm_uid
 from .builder import Builder
-from typing import Union, Iterable, Callable, Any, Mapping, IO, cast, Tuple
+from typing import (Any, Callable, Union, Iterable, Mapping, MutableMapping,
+        IO, cast, Text, Tuple)
 from .pathmapper import PathMapper
 import functools
 
@@ -25,6 +26,58 @@ _logger = logging.getLogger("cwltool")
 
 needs_shell_quoting_re = re.compile(r"""(^$|[\s|&;()<>\'"$@])""")
 
+FORCE_SHELLED_POPEN = os.getenv("CWLTOOL_FORCE_SHELL_POPEN", "0") == "1"
+
+SHELL_COMMAND_TEMPLATE = """#!/bin/bash
+python "run_job.py" "job.json"
+"""
+
+PYTHON_RUN_SCRIPT = """
+import json
+import sys
+import subprocess
+
+with open(sys.argv[1], "r") as f:
+    popen_description = json.load(f)
+    commands = popen_description["commands"]
+    cwd = popen_description["cwd"]
+    env = popen_description["env"]
+    stdin_path = popen_description["stdin_path"]
+    stdout_path = popen_description["stdout_path"]
+    stderr_path = popen_description["stderr_path"]
+    if stdin_path is not None:
+        stdin = open(stdin_path, "rb")
+    else:
+        stdin = subprocess.PIPE
+    if stdout_path is not None:
+        stdout = open(stdout_path, "wb")
+    else:
+        stdout = sys.stderr
+    if stderr_path is not None:
+        stderr = open(stderr_path, "wb")
+    else:
+        stderr = sys.stderr
+    sp = subprocess.Popen(commands,
+                          shell=False,
+                          close_fds=True,
+                          stdin=stdin,
+                          stdout=stdout,
+                          stderr=stderr,
+                          env=env,
+                          cwd=cwd)
+    if sp.stdin:
+        sp.stdin.close()
+    rcode = sp.wait()
+    if isinstance(stdin, file):
+        stdin.close()
+    if stdout is not sys.stderr:
+        stdout.close()
+    if stderr is not sys.stderr:
+        stderr.close()
+    sys.exit(rcode)
+"""
+
+
 def deref_links(outputs):  # type: (Any) -> None
     if isinstance(outputs, dict):
         if outputs.get("class") == "File":
@@ -42,46 +95,54 @@ class CommandLineJob(object):
 
     def __init__(self):  # type: () -> None
         self.builder = None  # type: Builder
-        self.joborder = None  # type: Dict[str,str]
-        self.stdin = None  # type: str
-        self.stdout = None  # type: str
+        self.joborder = None  # type: Dict[Text, Union[Dict[Text, Any], List, Text]]
+        self.stdin = None  # type: Text
+        self.stderr = None  # type: Text
+        self.stdout = None  # type: Text
         self.successCodes = None  # type: Iterable[int]
         self.temporaryFailCodes = None  # type: Iterable[int]
         self.permanentFailCodes = None  # type: Iterable[int]
-        self.requirements = None  # type: List[Dict[str, str]]
-        self.hints = None  # type: Dict[str,str]
-        self.name = None  # type: unicode
-        self.command_line = None  # type: List[unicode]
+        self.requirements = None  # type: List[Dict[Text, Text]]
+        self.hints = None  # type: Dict[Text,Text]
+        self.name = None  # type: Text
+        self.command_line = None  # type: List[Text]
         self.pathmapper = None  # type: PathMapper
-        self.collect_outputs = None  # type: Union[Callable[[Any], Any],functools.partial[Any]]
+        self.collect_outputs = None  # type: Union[Callable[[Any], Any], functools.partial[Any]]
         self.output_callback = None  # type: Callable[[Any, Any], Any]
-        self.outdir = None  # type: str
-        self.tmpdir = None  # type: str
-        self.environment = None  # type: Dict[str,str]
-        self.generatefiles = None  # type: Dict[str,Union[Dict[str,str],str]]
+        self.outdir = None  # type: Text
+        self.tmpdir = None  # type: Text
+        self.environment = None  # type: MutableMapping[Text, Text]
+        self.generatefiles = None  # type: Dict[Text, Union[List[Dict[Text, Text]], Dict[Text, Text], Text]]
+        self.stagedir = None  # type: Text
 
     def run(self, dry_run=False, pull_image=True, rm_container=True,
-            rm_tmpdir=True, move_outputs=True, **kwargs):
-        # type: (bool, bool, bool, bool, bool, **Any) -> Union[Tuple[str,Dict[None,None]],None]
+            rm_tmpdir=True, move_outputs="move", **kwargs):
+        # type: (bool, bool, bool, bool, Text, **Any) -> Union[Tuple[Text, Dict[None, None]], None]
         if not os.path.exists(self.outdir):
             os.makedirs(self.outdir)
 
         #with open(os.path.join(outdir, "cwl.input.json"), "w") as fp:
         #    json.dump(self.joborder, fp)
 
-        runtime = []  # type: List[unicode]
-        env = {"TMPDIR": self.tmpdir}  # type: Mapping[str,str]
+        runtime = []  # type: List[Text]
 
         (docker_req, docker_is_req) = get_feature(self, "DockerRequirement")
 
-        for f in self.pathmapper.files():
-            if not os.path.isfile(self.pathmapper.mapper(f)[0]):
-                raise WorkflowException(u"Required input file %s not found or is not a regular file." % self.pathmapper.mapper(f)[0])
+        for knownfile in self.pathmapper.files():
+            p = self.pathmapper.mapper(knownfile)
+            if p.type == "File" and not os.path.isfile(p[0]):
+                raise WorkflowException(
+                    u"Input file %s (at %s) not found or is not a regular "
+                    "file." % (knownfile, self.pathmapper.mapper(knownfile)[0]))
 
         img_id = None
+        env = None  # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
         if docker_req and kwargs.get("use_container") is not False:
             env = os.environ
             img_id = docker.get_from_requirements(docker_req, docker_is_req, pull_image)
+        elif kwargs.get("default_container", None) is not None:
+            env = os.environ
+            img_id = kwargs.get("default_container")
 
         if docker_is_req and img_id is None:
             raise WorkflowException("Docker is required for running this tool.")
@@ -90,28 +151,41 @@ class CommandLineJob(object):
             runtime = ["docker", "run", "-i"]
             for src in self.pathmapper.files():
                 vol = self.pathmapper.mapper(src)
-                runtime.append(u"--volume=%s:%s:ro" % vol)
-            runtime.append(u"--volume=%s:%s:rw" % (os.path.abspath(self.outdir), "/var/spool/cwl"))
-            runtime.append(u"--volume=%s:%s:rw" % (os.path.abspath(self.tmpdir), "/tmp"))
-            runtime.append(u"--workdir=%s" % ("/var/spool/cwl"))
+                if vol.type == "File":
+                    runtime.append(u"--volume=%s:%s:ro" % (vol.resolved, vol.target))
+                if vol.type == "CreateFile":
+                    createtmp = os.path.join(self.stagedir, os.path.basename(vol.target))
+                    with open(createtmp, "w") as f:
+                        f.write(vol.resolved.encode("utf-8"))
+                    runtime.append(u"--volume=%s:%s:ro" % (createtmp, vol.target))
+            runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.outdir), self.builder.outdir))
+            runtime.append(u"--volume=%s:%s:rw" % (os.path.realpath(self.tmpdir), "/tmp"))
+            runtime.append(u"--workdir=%s" % (self.builder.outdir))
             runtime.append("--read-only=true")
-            if (kwargs.get("enable_net", None) is None and
-                    kwargs.get("custom_net", None) is not None):
-                runtime.append("--net=none")
-            elif kwargs.get("custom_net", None) is not None:
+
+            if kwargs.get("custom_net", None) is not None:
                 runtime.append("--net={0}".format(kwargs.get("custom_net")))
+            elif kwargs.get("disable_net", None):
+                runtime.append("--net=none")
 
             if self.stdout:
                 runtime.append("--log-driver=none")
 
             euid = docker_vm_uid() or os.geteuid()
-            runtime.append(u"--user=%s" % (euid))
+
+            if kwargs.get("no_match_user",None) is False:
+                runtime.append(u"--user=%s" % (euid))
 
             if rm_container:
                 runtime.append("--rm")
 
             runtime.append("--env=TMPDIR=/tmp")
 
+            # spec currently says "HOME must be set to the designated output
+            # directory." but spec might change to designated temp directory.
+            # runtime.append("--env=HOME=/tmp")
+            runtime.append("--env=HOME=%s" % self.builder.outdir)
+
             for t,v in self.environment.items():
                 runtime.append(u"--env=%s=%s" % (t, v))
 
@@ -120,15 +194,17 @@ class CommandLineJob(object):
             env = self.environment
             if not os.path.exists(self.tmpdir):
                 os.makedirs(self.tmpdir)
-            env["TMPDIR"] = self.tmpdir
             vars_to_preserve = kwargs.get("preserve_environment")
+            if kwargs.get("preserve_entire_environment"):
+                vars_to_preserve = os.environ
             if vars_to_preserve is not None:
                 for key, value in os.environ.items():
                     if key in vars_to_preserve and key not in env:
                         env[key] = value
+            env["HOME"] = self.outdir
+            env["TMPDIR"] = self.tmpdir
 
-        stdin = None  # type: Union[IO[Any],int]
-        stdout = None  # type: IO[Any]
+            stageFiles(self.pathmapper, os.symlink)
 
         scr, _ = get_feature(self, "ShellCommandRequirement")
 
@@ -137,65 +213,65 @@ class CommandLineJob(object):
         else:
             shouldquote = needs_shell_quoting_re.search
 
-        _logger.info(u"[job %s] %s$ %s%s%s",
+        _logger.info(u"[job %s] %s$ %s%s%s%s",
                      self.name,
                      self.outdir,
-                     " ".join([shellescape.quote(str(arg)) if shouldquote(str(arg)) else str(arg) for arg in (runtime + self.command_line)]),
-                     u' < %s' % (self.stdin) if self.stdin else '',
-                     u' > %s' % os.path.join(self.outdir, self.stdout) if self.stdout else '')
+                     " \\\n    ".join([shellescape.quote(Text(arg)) if shouldquote(Text(arg)) else Text(arg) for arg in (runtime + self.command_line)]),
+                     u' < %s' % self.stdin if self.stdin else '',
+                     u' > %s' % os.path.join(self.outdir, self.stdout) if self.stdout else '',
+                     u' 2> %s' % os.path.join(self.outdir, self.stderr) if self.stderr else '')
 
         if dry_run:
             return (self.outdir, {})
 
-        outputs = {}  # type: Dict[str,str]
+        outputs = {}  # type: Dict[Text,Text]
 
         try:
-            for t in self.generatefiles:
-                entry = self.generatefiles[t]
-                if isinstance(entry, dict):
-                    src = entry["path"]
-                    dst = os.path.join(self.outdir, t)
-                    if os.path.dirname(self.pathmapper.reversemap(src)[1]) != self.outdir:
-                        _logger.debug(u"symlinking %s to %s", dst, src)
-                        os.symlink(src, dst)
-                elif isinstance(entry, str):
-                    with open(os.path.join(self.outdir, t), "w") as fout:
-                        fout.write(entry)
-                else:
-                    raise Exception("Unhandled type")
-
+            if self.generatefiles["listing"]:
+                generatemapper = PathMapper([self.generatefiles], self.outdir,
+                                            self.outdir, separateDirs=False)
+                _logger.debug(u"[job %s] initial work dir %s", self.name,
+                              json.dumps({p: generatemapper.mapper(p) for p in generatemapper.files()}, indent=4))
+
+                def linkoutdir(src, tgt):
+                    # Need to make the link to the staged file (may be inside
+                    # the container)
+                    for _, item in self.pathmapper.items():
+                        if src == item.resolved:
+                            os.symlink(item.target, tgt)
+                            break
+                stageFiles(generatemapper, linkoutdir)
+
+            stdin_path = None
             if self.stdin:
-                stdin = open(self.pathmapper.mapper(self.stdin)[0], "rb")
-            else:
-                stdin = subprocess.PIPE
+                stdin_path = self.pathmapper.reversemap(self.stdin)[1]
+
+            stderr_path = None
+            if self.stderr:
+                abserr = os.path.join(self.outdir, self.stderr)
+                dnerr = os.path.dirname(abserr)
+                if dnerr and not os.path.exists(dnerr):
+                    os.makedirs(dnerr)
+                stderr_path = abserr
 
+            stdout_path = None
             if self.stdout:
                 absout = os.path.join(self.outdir, self.stdout)
                 dn = os.path.dirname(absout)
                 if dn and not os.path.exists(dn):
                     os.makedirs(dn)
-                stdout = open(absout, "wb")
-            else:
-                stdout = sys.stderr
-
-            sp = subprocess.Popen([str(x) for x in runtime + self.command_line],
-                                  shell=False,
-                                  close_fds=True,
-                                  stdin=stdin,
-                                  stdout=stdout,
-                                  env=env,
-                                  cwd=self.outdir)
-
-            if sp.stdin:
-                sp.stdin.close()
-
-            rcode = sp.wait()
-
-            if isinstance(stdin, file):
-                stdin.close()
-
-            if stdout is not sys.stderr:
-                stdout.close()
+                stdout_path = absout
+
+            build_job_script = self.builder.build_job_script  # type: Callable[[List[str]], Text]
+            rcode = _job_popen(
+                [Text(x).encode('utf-8') for x in runtime + self.command_line],
+                stdin_path=stdin_path,
+                stdout_path=stdout_path,
+                stderr_path=stderr_path,
+                env=env,
+                cwd=self.outdir,
+                build_job_script=build_job_script,
+            )
 
             if self.successCodes and rcode in self.successCodes:
                 processStatus = "success"
@@ -208,13 +284,14 @@ class CommandLineJob(object):
             else:
                 processStatus = "permanentFail"
 
-            for t in self.generatefiles:
-                if isinstance(self.generatefiles[t], dict):
-                    src = cast(dict, self.generatefiles[t])["path"]
-                    dst = os.path.join(self.outdir, t)
-                    if os.path.dirname(self.pathmapper.reversemap(src)[1]) != self.outdir:
-                        os.remove(dst)
-                        os.symlink(self.pathmapper.reversemap(src)[1], dst)
+            if self.generatefiles["listing"]:
+                def linkoutdir(src, tgt):
+                    # Need to make the link to the staged file (may be inside
+                    # the container)
+                    if os.path.islink(tgt):
+                        os.remove(tgt)
+                        os.symlink(src, tgt)
+                stageFiles(generatemapper, linkoutdir, ignoreWritable=True)
 
             outputs = self.collect_outputs(self.outdir)
 
@@ -238,14 +315,128 @@ class CommandLineJob(object):
             _logger.warn(u"[job %s] completed %s", self.name, processStatus)
         else:
             _logger.debug(u"[job %s] completed %s", self.name, processStatus)
-        _logger.debug(u"[job %s] %s", self.name, json.dumps(outputs, indent=4))
+
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[job %s] %s", self.name, json.dumps(outputs, indent=4))
 
         self.output_callback(outputs, processStatus)
 
+        if self.stagedir and os.path.exists(self.stagedir):
+            _logger.debug(u"[job %s] Removing input staging directory %s", self.name, self.stagedir)
+            shutil.rmtree(self.stagedir, True)
+
         if rm_tmpdir:
             _logger.debug(u"[job %s] Removing temporary directory %s", self.name, self.tmpdir)
             shutil.rmtree(self.tmpdir, True)
 
-        if move_outputs and empty_subtree(self.outdir):
+        if move_outputs == "move" and empty_subtree(self.outdir):
             _logger.debug(u"[job %s] Removing empty output directory %s", self.name, self.outdir)
             shutil.rmtree(self.outdir, True)
+
+
+def _job_popen(
+    commands,  # type: List[str]
+    stdin_path,  # type: Text
+    stdout_path,  # type: Text
+    stderr_path,  # type: Text
+    env,  # type: Union[MutableMapping[Text, Text], MutableMapping[str, str]]
+    cwd,  # type: Text
+    job_dir=None,  # type: Text
+    build_job_script=None,  # type: Callable[[List[str]], Text]
+):
+    # type: (...) -> int
+
+    job_script_contents = None  # type: Text
+    if build_job_script:
+        job_script_contents = build_job_script(commands)
+
+    if not job_script_contents and not FORCE_SHELLED_POPEN:
+
+        stdin = None  # type: Union[IO[Any], int]
+        stderr = None  # type: IO[Any]
+        stdout = None  # type: IO[Any]
+
+        if stdin_path is not None:
+            stdin = open(stdin_path, "rb")
+        else:
+            stdin = subprocess.PIPE
+
+        if stdout_path is not None:
+            stdout = open(stdout_path, "wb")
+        else:
+            stdout = sys.stderr
+
+        if stderr_path is not None:
+            stderr = open(stderr_path, "wb")
+        else:
+            stderr = sys.stderr
+
+        sp = subprocess.Popen(commands,
+                              shell=False,
+                              close_fds=True,
+                              stdin=stdin,
+                              stdout=stdout,
+                              stderr=stderr,
+                              env=env,
+                              cwd=cwd)
+
+        if sp.stdin:
+            sp.stdin.close()
+
+        rcode = sp.wait()
+
+        if isinstance(stdin, file):
+            stdin.close()
+
+        if stdout is not sys.stderr:
+            stdout.close()
+
+        if stderr is not sys.stderr:
+            stderr.close()
+
+        return rcode
+    else:
+        if job_dir is None:
+            job_dir = tempfile.mkdtemp(prefix="cwltooljob")
+
+        if not job_script_contents:
+            job_script_contents = SHELL_COMMAND_TEMPLATE
+
+        env_copy = {}
+        for key in env:
+            key = key.encode("utf-8")
+            env_copy[key] = env[key]
+
+        job_description = dict(
+            commands=commands,
+            cwd=cwd,
+            env=env_copy,
+            stdout_path=stdout_path,
+            stderr_path=stderr_path,
+            stdin_path=stdin_path,
+        )
+        with open(os.path.join(job_dir, "job.json"), "w") as f:
+            json.dump(job_description, f)
+        try:
+            job_script = os.path.join(job_dir, "run_job.bash")
+            with open(job_script, "w") as f:
+                f.write(job_script_contents)
+            job_run = os.path.join(job_dir, "run_job.py")
+            with open(job_run, "w") as f:
+                f.write(PYTHON_RUN_SCRIPT)
+            sp = subprocess.Popen(
+                ["bash", job_script.encode("utf-8")],
+                shell=False,
+                cwd=job_dir,
+                stdout=subprocess.PIPE,
+                stderr=subprocess.PIPE,
+                stdin=subprocess.PIPE,
+            )
+            if sp.stdin:
+                sp.stdin.close()
+
+            rcode = sp.wait()
+
+            return rcode
+        finally:
+            shutil.rmtree(job_dir)
diff --git a/cwltool/load_tool.py b/cwltool/load_tool.py
new file mode 100644
index 0000000..2f3d887
--- /dev/null
+++ b/cwltool/load_tool.py
@@ -0,0 +1,250 @@
+# pylint: disable=unused-import
+"""Loads a CWL document."""
+
+import os
+import uuid
+import logging
+import re
+import urlparse
+
+from schema_salad.ref_resolver import Loader, Fetcher
+import schema_salad.validate as validate
+from schema_salad.validate import ValidationException
+import schema_salad.schema as schema
+import requests
+
+from typing import Any, AnyStr, Callable, cast, Dict, Text, Tuple, Union
+
+from avro.schema import Names
+
+from . import update
+from . import process
+from .process import Process, shortname
+from .errors import WorkflowException
+
+_logger = logging.getLogger("cwltool")
+
+def fetch_document(argsworkflow,   # type: Union[Text, dict[Text, Any]]
+                   resolver=None,  # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
+                   fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+                   ):
+    # type: (...) -> Tuple[Loader, Dict[Text, Any], Text]
+    """Retrieve a CWL document."""
+
+    document_loader = Loader({"cwl": "https://w3id.org/cwl/cwl#", "id": "@id"},
+                             fetcher_constructor=fetcher_constructor)
+
+    uri = None  # type: Text
+    workflowobj = None  # type: Dict[Text, Any]
+    if isinstance(argsworkflow, basestring):
+        split = urlparse.urlsplit(argsworkflow)
+        if split.scheme:
+            uri = argsworkflow
+        elif os.path.exists(os.path.abspath(argsworkflow)):
+            uri = "file://" + os.path.abspath(argsworkflow)
+        elif resolver:
+            uri = resolver(document_loader, argsworkflow)
+
+        if uri is None:
+            raise ValidationException("Not found: '%s'" % argsworkflow)
+
+        if argsworkflow != uri:
+            _logger.info("Resolved '%s' to '%s'", argsworkflow, uri)
+
+        fileuri = urlparse.urldefrag(uri)[0]
+        workflowobj = document_loader.fetch(fileuri)
+    elif isinstance(argsworkflow, dict):
+        workflowobj = argsworkflow
+        uri = "#" + Text(id(argsworkflow))
+    else:
+        raise ValidationException("Must be URI or object: '%s'" % argsworkflow)
+
+    return document_loader, workflowobj, uri
+
+def _convert_stdstreams_to_files(workflowobj):
+    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]]) -> None
+
+    if isinstance(workflowobj, dict):
+        if ('class' in workflowobj
+                and workflowobj['class'] == 'CommandLineTool'):
+            if 'outputs' in workflowobj:
+                for out in workflowobj['outputs']:
+                    for streamtype in ['stdout', 'stderr']:
+                        if out['type'] == streamtype:
+                            if 'outputBinding' in out:
+                                raise ValidationException(
+                                    "Not allowed to specify outputBinding when"
+                                    " using %s shortcut." % streamtype)
+                            if streamtype in workflowobj:
+                                filename = workflowobj[streamtype]
+                            else:
+                                filename = Text(uuid.uuid4())
+                                workflowobj[streamtype] = filename
+                            out['type'] = 'File'
+                            out['outputBinding'] = {'glob': filename}
+            if 'inputs' in workflowobj:
+                for inp in workflowobj['inputs']:
+                    if inp['type'] == 'stdin':
+                        if 'inputBinding' in inp:
+                            raise ValidationException(
+                                "Not allowed to specify inputBinding when"
+                                " using stdin shortcut.")
+                        if 'stdin' in workflowobj:
+                            raise ValidationException(
+                                "Not allowed to specify stdin path when"
+                                " using stdin type shortcut.")
+                        else:
+                            workflowobj['stdin'] = \
+                                "$(inputs.%s.path)" % \
+                                inp['id'].rpartition('#')[2]
+                            inp['type'] = 'File'
+        else:
+            for entry in workflowobj.itervalues():
+                _convert_stdstreams_to_files(entry)
+    if isinstance(workflowobj, list):
+        for entry in workflowobj:
+            _convert_stdstreams_to_files(entry)
+
+def validate_document(document_loader,   # type: Loader
+                      workflowobj,       # type: Dict[Text, Any]
+                      uri,               # type: Text
+                      enable_dev=False,  # type: bool
+                      strict=True,       # type: bool
+                      preprocess_only=False,    # type: bool
+                      fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+                      ):
+    # type: (...) -> Tuple[Loader, Names, Union[Dict[Text, Any], List[Dict[Text, Any]]], Dict[Text, Any], Text]
+    """Validate a CWL document."""
+
+    if isinstance(workflowobj, list):
+        workflowobj = {
+            "$graph": workflowobj
+        }
+
+    if not isinstance(workflowobj, dict):
+        raise ValueError("workflowjobj must be a dict")
+
+    jobobj = None
+    if "cwl:tool" in workflowobj:
+        jobobj, _ = document_loader.resolve_all(workflowobj, uri)
+        uri = urlparse.urljoin(uri, workflowobj["https://w3id.org/cwl/cwl#tool"])
+        del cast(dict, jobobj)["https://w3id.org/cwl/cwl#tool"]
+        workflowobj = fetch_document(uri, fetcher_constructor=fetcher_constructor)[1]
+
+    fileuri = urlparse.urldefrag(uri)[0]
+
+    if "cwlVersion" in workflowobj:
+        if not isinstance(workflowobj["cwlVersion"], (str, Text)):
+            raise Exception("'cwlVersion' must be a string, got %s" % type(workflowobj["cwlVersion"]))
+        workflowobj["cwlVersion"] = re.sub(
+            r"^(?:cwl:|https://w3id.org/cwl/cwl#)", "",
+            workflowobj["cwlVersion"])
+    else:
+        _logger.warn("No cwlVersion found, treating this file as draft-2.")
+        workflowobj["cwlVersion"] = "draft-2"
+
+    if workflowobj["cwlVersion"] == "draft-2":
+        workflowobj = update._draft2toDraft3dev1(
+            workflowobj, document_loader, uri, update_steps=False)
+        if "@graph" in workflowobj:
+            workflowobj["$graph"] = workflowobj["@graph"]
+            del workflowobj["@graph"]
+
+    (sch_document_loader, avsc_names) = \
+        process.get_schema(workflowobj["cwlVersion"])[:2]
+
+    if isinstance(avsc_names, Exception):
+        raise avsc_names
+
+    document_loader = Loader(sch_document_loader.ctx, schemagraph=sch_document_loader.graph,
+                  idx=document_loader.idx, cache=sch_document_loader.cache,
+                             fetcher_constructor=fetcher_constructor)
+
+    workflowobj["id"] = fileuri
+    processobj, metadata = document_loader.resolve_all(workflowobj, fileuri)
+    if not isinstance(processobj, (dict, list)):
+        raise ValidationException("Workflow must be a dict or list.")
+
+    if not metadata:
+        if not isinstance(processobj, dict):
+            raise ValidationException("Draft-2 workflows must be a dict.")
+        metadata = {"$namespaces": processobj.get("$namespaces", {}),
+                   "$schemas": processobj.get("$schemas", []),
+                   "cwlVersion": processobj["cwlVersion"]}
+
+    _convert_stdstreams_to_files(workflowobj)
+
+    if preprocess_only:
+        return document_loader, avsc_names, processobj, metadata, uri
+
+    schema.validate_doc(avsc_names, processobj, document_loader, strict)
+
+    if metadata.get("cwlVersion") != update.LATEST:
+        processobj = update.update(
+            processobj, document_loader, fileuri, enable_dev, metadata)
+
+    if jobobj:
+        metadata[u"cwl:defaults"] = jobobj
+
+    return document_loader, avsc_names, processobj, metadata, uri
+
+
+def make_tool(document_loader,  # type: Loader
+              avsc_names,       # type: Names
+              metadata,         # type: Dict[Text, Any]
+              uri,              # type: Text
+              makeTool,         # type: Callable[..., Process]
+              kwargs            # type: dict
+              ):
+    # type: (...) -> Process
+    """Make a Python CWL object."""
+    resolveduri = document_loader.resolve_ref(uri)[0]
+
+    if isinstance(resolveduri, list):
+        if len(resolveduri) == 1:
+            processobj = resolveduri[0]
+        else:
+            raise WorkflowException(
+                u"Tool file contains graph of multiple objects, must specify "
+                "one of #%s" % ", #".join(
+                    urlparse.urldefrag(i["id"])[1] for i in resolveduri
+                    if "id" in i))
+    elif isinstance(resolveduri, dict):
+        processobj = resolveduri
+    else:
+        raise Exception("Must resolve to list or dict")
+
+    kwargs = kwargs.copy()
+    kwargs.update({
+        "makeTool": makeTool,
+        "loader": document_loader,
+        "avsc_names": avsc_names,
+        "metadata": metadata
+    })
+    tool = makeTool(processobj, **kwargs)
+
+    if "cwl:defaults" in metadata:
+        jobobj = metadata["cwl:defaults"]
+        for inp in tool.tool["inputs"]:
+            if shortname(inp["id"]) in jobobj:
+                inp["default"] = jobobj[shortname(inp["id"])]
+
+    return tool
+
+
+def load_tool(argsworkflow,  # type: Union[Text, Dict[Text, Any]]
+              makeTool,      # type: Callable[..., Process]
+              kwargs=None,   # type: dict
+              enable_dev=False,  # type: bool
+              strict=True,       # type: bool
+              resolver=None,     # type: Callable[[Loader, Union[Text, dict[Text, Any]]], Text]
+              fetcher_constructor=None  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+              ):
+    # type: (...) -> Process
+
+    document_loader, workflowobj, uri = fetch_document(argsworkflow, resolver=resolver, fetcher_constructor=fetcher_constructor)
+    document_loader, avsc_names, processobj, metadata, uri = validate_document(
+        document_loader, workflowobj, uri, enable_dev=enable_dev,
+        strict=strict, fetcher_constructor=fetcher_constructor)
+    return make_tool(document_loader, avsc_names, metadata, uri,
+                     makeTool, kwargs if kwargs else {})
diff --git a/cwltool/main.py b/cwltool/main.py
index 1c502c5..b5f74b8 100755
--- a/cwltool/main.py
+++ b/cwltool/main.py
@@ -1,30 +1,38 @@
 #!/usr/bin/env python
 
-from . import draft2tool
 import argparse
-from schema_salad.ref_resolver import Loader
-import string
 import json
 import os
 import sys
 import logging
-from . import workflow
-import schema_salad.validate as validate
+import copy
 import tempfile
-import schema_salad.jsonld_context
-import schema_salad.makedoc
-import yaml
+import ruamel.yaml as yaml
 import urlparse
-from . import process
-from . import job
-from .cwlrdf import printrdf, printdot
+import hashlib
 import pkg_resources  # part of setuptools
-from . import update
-from .process import shortname, Process
+import functools
+
 import rdflib
-import hashlib
-from .utils import aslist
-from typing import Union, Any, cast, Callable, Dict, Tuple, IO
+import requests
+from typing import (Union, Any, AnyStr, cast, Callable, Dict, Sequence, Text,
+    Tuple, Type, IO)
+
+from schema_salad.ref_resolver import Loader, Fetcher
+import schema_salad.validate as validate
+import schema_salad.jsonld_context
+import schema_salad.makedoc
+
+from . import workflow
+from .errors import WorkflowException, UnsupportedRequirement
+from .cwlrdf import printrdf, printdot
+from .process import shortname, Process, getListing, relocateOutputs, cleanIntermediate, scandeps, normalizeFilesDirs
+from .load_tool import fetch_document, validate_document, make_tool
+from . import draft2tool
+from .resolver import tool_resolver
+from .builder import adjustFileObjs, adjustDirObjs
+from .stdfsaccess import StdFsAccess
+from .pack import pack
 
 _logger = logging.getLogger("cwltool")
 
@@ -36,20 +44,25 @@ _logger.setLevel(logging.INFO)
 def arg_parser():  # type: () -> argparse.ArgumentParser
     parser = argparse.ArgumentParser(description='Reference executor for Common Workflow Language')
     parser.add_argument("--conformance-test", action="store_true")
-    parser.add_argument("--basedir", type=str)
-    parser.add_argument("--outdir", type=str, default=os.path.abspath('.'),
+    parser.add_argument("--basedir", type=Text)
+    parser.add_argument("--outdir", type=Text, default=os.path.abspath('.'),
                         help="Output directory, default current directory")
 
     parser.add_argument("--no-container", action="store_false", default=True,
                         help="Do not execute jobs in a Docker container, even when specified by the CommandLineTool",
                         dest="use_container")
 
-    parser.add_argument("--preserve-environment", type=str, nargs='+',
-                        help="Preserve specified environment variables when running CommandLineTools",
-                        metavar=("VAR1,VAR2"),
-                        default=("PATH",),
+    parser.add_argument("--preserve-environment", type=Text, action="append",
+                        help="Preserve specific environment variable when running CommandLineTools.  May be provided multiple times.",
+                        metavar="ENVVAR",
+                        default=["PATH"],
                         dest="preserve_environment")
 
+    parser.add_argument("--preserve-entire-environment", action="store_true",
+                        help="Preserve entire parent environment when running CommandLineTools.",
+                        default=False,
+                        dest="preserve_entire_environment")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--rm-container", action="store_true", default=True,
                         help="Delete Docker container used by jobs after they exit (default)",
@@ -59,16 +72,16 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default=True, help="Do not delete Docker container used by jobs after they exit",
                         dest="rm_container")
 
-    parser.add_argument("--tmpdir-prefix", type=str,
+    parser.add_argument("--tmpdir-prefix", type=Text,
                         help="Path prefix for temporary directories",
                         default="tmp")
 
     exgroup = parser.add_mutually_exclusive_group()
-    exgroup.add_argument("--tmp-outdir-prefix", type=str,
+    exgroup.add_argument("--tmp-outdir-prefix", type=Text,
                         help="Path prefix for intermediate output directories",
                         default="tmp")
 
-    exgroup.add_argument("--cachedir", type=str, default="",
+    exgroup.add_argument("--cachedir", type=Text, default="",
                         help="Directory to cache intermediate workflow outputs to avoid recomputing steps.")
 
     exgroup = parser.add_mutually_exclusive_group()
@@ -81,14 +94,18 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         dest="rm_tmpdir")
 
     exgroup = parser.add_mutually_exclusive_group()
-    exgroup.add_argument("--move-outputs", action="store_true", default=True,
+    exgroup.add_argument("--move-outputs", action="store_const", const="move", default="move",
                         help="Move output files to the workflow output directory and delete intermediate output directories (default).",
                         dest="move_outputs")
 
-    exgroup.add_argument("--leave-outputs", action="store_false", default=True,
+    exgroup.add_argument("--leave-outputs", action="store_const", const="leave", default="move",
                         help="Leave output files in intermediate output directories.",
                         dest="move_outputs")
 
+    exgroup.add_argument("--copy-outputs", action="store_const", const="copy", default="move",
+                         help="Copy output files to the workflow output directory, don't delete intermediate output directories.",
+                         dest="move_outputs")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--enable-pull", default=True, action="store_true",
                         help="Try to pull Docker images", dest="enable_pull")
@@ -104,8 +121,9 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
                         default="turtle")
 
     parser.add_argument("--eval-timeout",
-                        help="Time to wait for a Javascript expression to evaluate before giving an error.",
-                        type=float)
+                        help="Time to wait for a Javascript expression to evaluate before giving an error, default 20s.",
+                        type=float,
+                        default=20)
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--print-rdf", action="store_true",
@@ -114,8 +132,8 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
     exgroup.add_argument("--print-pre", action="store_true", help="Print CWL document after preprocessing.")
     exgroup.add_argument("--print-deps", action="store_true", help="Print CWL document dependencies.")
     exgroup.add_argument("--print-input-deps", action="store_true", help="Print input object document dependencies.")
+    exgroup.add_argument("--pack", action="store_true", help="Combine components into single document and print.")
     exgroup.add_argument("--version", action="store_true", help="Print version and exit")
-    exgroup.add_argument("--update", action="store_true", help="Update to latest CWL version, print and exit")
 
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--strict", action="store_true", help="Strict validation (unrecognized or out of place fields are error)",
@@ -130,23 +148,48 @@ def arg_parser():  # type: () -> argparse.ArgumentParser
 
     parser.add_argument("--tool-help", action="store_true", help="Print command line help for tool")
 
-    parser.add_argument("--relative-deps", choices=['primary', 'cwd'], default="primary",
-                         help="When using --print-deps, print paths relative to primary file or current working directory.")
+    parser.add_argument("--relative-deps", choices=['primary', 'cwd'],
+        default="primary", help="When using --print-deps, print paths "
+        "relative to primary file or current working directory.")
+
+    parser.add_argument("--enable-dev", action="store_true",
+                        help="Allow loading and running development versions "
+                        "of CWL spec.", default=False)
+
+    parser.add_argument("--default-container",
+                        help="Specify a default docker container that will be used if the workflow fails to specify one.")
+    parser.add_argument("--no-match-user", action="store_true",
+                        help="Disable passing the current uid to 'docker run --user`")
+    parser.add_argument("--disable-net", action="store_true",
+                        help="Use docker's default networking for containers;"
+                        " the default is to enable networking.")
+    parser.add_argument("--custom-net", type=Text,
+                        help="Will be passed to `docker run` as the '--net' "
+                        "parameter. Implies '--enable-net'.")
+
+    parser.add_argument("--on-error", type=Text,
+                        help="Desired workflow behavior when a step fails.  One of 'stop' or 'continue'. "
+                        "Default is 'stop.", default="stop")
 
-    parser.add_argument("--enable-net", action="store_true",
-            help="Use docker's default networking for containers; the default is "
-            "to disable networking.")
-    parser.add_argument("--custom-net", type=str,
-            help="Will be passed to `docker run` as the '--net' parameter. "
-            "Implies '--enable-net'.")
-    parser.add_argument("workflow", type=str, nargs="?", default=None)
+    exgroup = parser.add_mutually_exclusive_group()
+    exgroup.add_argument("--compute-checksum", action="store_true", default=True,
+                        help="Compute checksum of contents while collecting outputs",
+                        dest="compute_checksum")
+    exgroup.add_argument("--no-compute-checksum", action="store_false",
+                        help="Do not compute checksum of contents while collecting outputs",
+                        dest="compute_checksum")
+
+    parser.add_argument("--relax-path-checks", action="store_true",
+            default=False, help="Relax requirements on path names. Currently "
+            "allows spaces.", dest="relax_path_checks")
+    parser.add_argument("workflow", type=Text, nargs="?", default=None)
     parser.add_argument("job_order", nargs=argparse.REMAINDER)
 
     return parser
 
 
-def single_job_executor(t, job_order, input_basedir, args, **kwargs):
-    # type: (Process, Dict[str,Any], str, argparse.Namespace,**Any) -> Union[str,Dict[str,str]]
+def single_job_executor(t, job_order_object, **kwargs):
+    # type: (Process, Dict[Text, Any], **Any) -> Union[Text, Dict[Text, Text]]
     final_output = []
     final_status = []
 
@@ -158,93 +201,115 @@ def single_job_executor(t, job_order, input_basedir, args, **kwargs):
             _logger.warn(u"Final process status is %s", processStatus)
         final_output.append(out)
 
-    if kwargs.get("outdir"):
-        pass
-    elif kwargs.get("dry_run"):
-        kwargs["outdir"] = "/tmp"
-    else:
-        kwargs["outdir"] = tempfile.mkdtemp()
-
-    jobiter = t.job(job_order,
-                    input_basedir,
+    if "basedir" not in kwargs:
+        raise WorkflowException("Must provide 'basedir' in kwargs")
+
+    output_dirs = set()
+    finaloutdir = kwargs.get("outdir")
+    kwargs["outdir"] = tempfile.mkdtemp(prefix=kwargs["tmp_outdir_prefix"]) if kwargs.get("tmp_outdir_prefix") else tempfile.mkdtemp()
+    output_dirs.add(kwargs["outdir"])
+
+    jobReqs = None
+    if "cwl:requirements" in job_order_object:
+        jobReqs = job_order_object["cwl:requirements"]
+    elif ("cwl:defaults" in t.metadata and "cwl:requirements" in
+            t.metadata["cwl:defaults"]):
+        jobReqs = t.metadata["cwl:defaults"]["cwl:requirements"]
+    if jobReqs:
+        for req in jobReqs:
+            t.requirements.append(req)
+
+    jobiter = t.job(job_order_object,
                     output_callback,
                     **kwargs)
 
-    if kwargs.get("conformance_test"):
-        job = jobiter.next()
-        a = {"args": job.command_line}
-        if job.stdin:
-            a["stdin"] = job.pathmapper.mapper(job.stdin)[1]
-        if job.stdout:
-            a["stdout"] = job.stdout
-        if job.generatefiles:
-            a["createfiles"] = job.generatefiles
-        return a
-    else:
-        try:
-            for r in jobiter:
-                if r:
-                    r.run(**kwargs)
-                else:
-                    raise workflow.WorkflowException("Workflow cannot make any more progress.")
-        except workflow.WorkflowException:
-            raise
-        except Exception as e:
-            _logger.exception("Got workflow error")
-            raise workflow.WorkflowException(unicode(e))
+    try:
+        for r in jobiter:
+            if r.outdir:
+                output_dirs.add(r.outdir)
 
-        if final_status[0] != "success":
-            raise workflow.WorkflowException(u"Process status is %s" % (final_status))
+            if r:
+                r.run(**kwargs)
+            else:
+                raise WorkflowException("Workflow cannot make any more progress.")
+    except WorkflowException:
+        raise
+    except Exception as e:
+        _logger.exception("Got workflow error")
+        raise WorkflowException(Text(e))
 
-        return final_output[0]
+    if final_status[0] != "success":
+        raise WorkflowException(u"Process status is %s" % (final_status))
 
+    if final_output[0] and finaloutdir:
+        final_output[0] = relocateOutputs(final_output[0], finaloutdir,
+                                          output_dirs, kwargs.get("move_outputs"))
 
-class FileAction(argparse.Action):
+    if kwargs.get("rm_tmpdir"):
+        cleanIntermediate(output_dirs)
+
+    return final_output[0]
+
+class FSAction(argparse.Action):
+    objclass = None  # type: Text
 
     def __init__(self, option_strings, dest, nargs=None, **kwargs):
-        # type: (List[str], str, Any, **Any) -> None
+        # type: (List[Text], Text, Any, **Any) -> None
         if nargs is not None:
             raise ValueError("nargs not allowed")
-        super(FileAction, self).__init__(option_strings, dest, **kwargs)
+        super(FSAction, self).__init__(option_strings, dest, **kwargs)
 
     def __call__(self, parser, namespace, values, option_string=None):
-        # type: (argparse.ArgumentParser, argparse.Namespace, str, Any) -> None
-        setattr(namespace, self.dest, {"class": "File", "path": values})
+        # type: (argparse.ArgumentParser, argparse.Namespace, Union[AnyStr, Sequence[Any], None], AnyStr) -> None
+        setattr(namespace,
+            self.dest,  # type: ignore
+            {"class": self.objclass,
+             "location": "file://%s" % os.path.abspath(cast(AnyStr, values))})
 
-
-class FileAppendAction(argparse.Action):
+class FSAppendAction(argparse.Action):
+    objclass = None  # type: Text
 
     def __init__(self, option_strings, dest, nargs=None, **kwargs):
-        # type: (List[str], str, Any, **Any) -> None
+        # type: (List[Text], Text, Any, **Any) -> None
         if nargs is not None:
             raise ValueError("nargs not allowed")
-        super(FileAppendAction, self).__init__(option_strings, dest, **kwargs)
+        super(FSAppendAction, self).__init__(option_strings, dest, **kwargs)
 
     def __call__(self, parser, namespace, values, option_string=None):
-        # type: (argparse.ArgumentParser, argparse.Namespace, str, Any) -> None
-        g = getattr(namespace, self.dest)
+        # type: (argparse.ArgumentParser, argparse.Namespace, Union[AnyStr, Sequence[Any], None], AnyStr) -> None
+        g = getattr(namespace,
+                    self.dest  # type: ignore
+                    )
         if not g:
             g = []
-            setattr(namespace, self.dest, g)
-        g.append({"class": "File", "path": values})
+            setattr(namespace,
+                    self.dest,  # type: ignore
+                    g)
+        g.append(
+            {"class": self.objclass,
+             "location": "file://%s" % os.path.abspath(cast(AnyStr, values))})
 
+class FileAction(FSAction):
+    objclass = "File"
 
-def generate_parser(toolparser, tool, namemap):
-    # type: (argparse.ArgumentParser, Process,Dict[str,str]) -> argparse.ArgumentParser
-    toolparser.add_argument("job_order", nargs="?", help="Job input json file")
-    namemap["job_order"] = "job_order"
+class DirectoryAction(FSAction):
+    objclass = "Directory"
 
-    for inp in tool.tool["inputs"]:
-        name = shortname(inp["id"])
+class FileAppendAction(FSAppendAction):
+    objclass = "File"
+
+class DirectoryAppendAction(FSAppendAction):
+    objclass = "Directory"
+
+
+def add_argument(toolparser, name, inptype, records, description="",
+        default=None):
+    # type: (argparse.ArgumentParser, Text, Any, List[Text], Text, Any) -> None
         if len(name) == 1:
             flag = "-"
         else:
             flag = "--"
 
-        namemap[name.replace("-", "_")] = name
-
-        inptype = inp["type"]
-
         required = True
         if isinstance(inptype, list):
             if inptype[0] == "null":
@@ -255,166 +320,90 @@ def generate_parser(toolparser, tool, namemap):
                     _logger.debug(u"Can't make command line argument from %s", inptype)
                     return None
 
-        ahelp = inp.get("description", "").replace("%", "%%")
-        action = None  # type: Union[argparse.Action,str]
-        atype = None # type: Any
-        default = None # type: Any
+        ahelp = description.replace("%", "%%")
+        action = None  # type: Union[argparse.Action, Text]
+        atype = None  # type: Any
 
         if inptype == "File":
             action = cast(argparse.Action, FileAction)
+        elif inptype == "Directory":
+            action = cast(argparse.Action, DirectoryAction)
         elif isinstance(inptype, dict) and inptype["type"] == "array":
             if inptype["items"] == "File":
                 action = cast(argparse.Action, FileAppendAction)
+            elif inptype["items"] == "Directory":
+                action = cast(argparse.Action, DirectoryAppendAction)
             else:
                 action = "append"
-
+        elif isinstance(inptype, dict) and inptype["type"] == "enum":
+            atype = Text
+        elif isinstance(inptype, dict) and inptype["type"] == "record":
+            records.append(name)
+            for field in inptype['fields']:
+                fieldname = name+"."+shortname(field['name'])
+                fieldtype = field['type']
+                fielddescription = field.get("doc", "")
+                add_argument(
+                    toolparser, fieldname, fieldtype, records,
+                    fielddescription)
+            return
         if inptype == "string":
-            atype = str
+            atype = Text
         elif inptype == "int":
             atype = int
+        elif inptype == "double":
+            atype = float
         elif inptype == "float":
             atype = float
         elif inptype == "boolean":
             action = "store_true"
 
-        if "default" in inp:
-            default = inp["default"]
+        if default:
             required = False
 
         if not atype and not action:
             _logger.debug(u"Can't make command line argument from %s", inptype)
             return None
 
-        toolparser.add_argument(flag + name, required=required,
-                help=ahelp, action=action, type=atype, default=default)
-
-    return toolparser
-
-
-def load_tool(argsworkflow, updateonly, strict, makeTool, debug,
-              print_pre=False,
-              print_rdf=False,
-              print_dot=False,
-              print_deps=False,
-              relative_deps=False,
-              rdf_serializer=None,
-              stdout=sys.stdout,
-              urifrag=None):
-    # type: (Union[str,unicode,dict[unicode,Any]], bool, bool, Callable[...,Process], bool, bool, bool, bool, bool, bool, Any, Any, Any) -> Any
-    (document_loader, avsc_names, schema_metadata) = process.get_schema()
-
-    if isinstance(avsc_names, Exception):
-        raise avsc_names
-
-    jobobj = None
-    uri = None  # type: str
-    workflowobj = None  # type: Dict[unicode, Any]
-    if isinstance(argsworkflow, (basestring)):
-        split = urlparse.urlsplit(cast(str, argsworkflow))
-        if split.scheme:
-            uri = cast(str, argsworkflow)
+        if inptype != "boolean":
+            typekw = { 'type': atype }
         else:
-            uri = "file://" + os.path.abspath(cast(str, argsworkflow))
-        fileuri, urifrag = urlparse.urldefrag(uri)
-        workflowobj = document_loader.fetch(fileuri)
-    elif isinstance(argsworkflow, dict):
-        workflowobj = argsworkflow
-        uri = urifrag
-        fileuri = "#"
-    else:
-        raise schema_salad.validate.ValidationException("Must be URI or dict")
-
-    if "cwl:tool" in workflowobj:
-        jobobj = workflowobj
-        uri = urlparse.urljoin(uri, jobobj["cwl:tool"])
-        fileuri, urifrag = urlparse.urldefrag(uri)
-        workflowobj = document_loader.fetch(fileuri)
-        del jobobj["cwl:tool"]
-
-    if isinstance(workflowobj, list):
-        # bare list without a version must be treated as draft-2
-        workflowobj = {"cwlVersion": "https://w3id.org/cwl/cwl#draft-2",
-                       "id": fileuri,
-                       "@graph": workflowobj}
-
-    workflowobj = update.update(workflowobj, document_loader, fileuri)
-    document_loader.idx.clear()
-
-    if updateonly:
-        stdout.write(json.dumps(workflowobj, indent=4))
-        return 0
+            typekw = {}
 
-    if print_deps:
-        printdeps(workflowobj, document_loader, stdout, relative_deps)
-        return 0
-
-    try:
-        processobj, metadata = schema_salad.schema.load_and_validate(
-                document_loader, avsc_names, workflowobj, strict)
-    except (schema_salad.validate.ValidationException, RuntimeError) as e:
-        _logger.error(u"Tool definition failed validation:\n%s", e,
-                exc_info=(e if debug else False))
-        return 1
+        toolparser.add_argument(  # type: ignore
+            flag + name, required=required, help=ahelp, action=action,
+            default=default, **typekw)
 
-    if print_pre:
-        stdout.write(json.dumps(processobj, indent=4))
-        return 0
-
-    if print_rdf:
-        printrdf(str(argsworkflow), processobj, document_loader.ctx, rdf_serializer, stdout)
-        return 0
 
-    if print_dot:
-        printdot(str(argsworkflow), processobj, document_loader.ctx, stdout)
-        return 0
-
-    if urifrag:
-        processobj, _ = document_loader.resolve_ref(uri)
-    elif isinstance(processobj, list):
-        if 1 == len(processobj):
-            processobj = processobj[0]
-        else:
-            _logger.error(u"Tool file contains graph of multiple objects, must specify one of #%s",
-                          ", #".join(urlparse.urldefrag(i["id"])[1]
-                                     for i in processobj if "id" in i))
-            return 1
-
-    try:
-        t = makeTool(processobj, strict=strict, makeTool=makeTool, loader=document_loader, avsc_names=avsc_names)
-    except (schema_salad.validate.ValidationException) as e:
-        _logger.error(u"Tool definition failed validation:\n%s", e, exc_info=(e if debug else False))
-        return 1
-    except (RuntimeError, workflow.WorkflowException) as e:
-        _logger.error(u"Tool definition failed initialization:\n%s", e, exc_info=(e if debug else False))
-        return 1
-
-    if jobobj:
-        for inp in t.tool["inputs"]:
-            if shortname(inp["id"]) in jobobj:
-                inp["default"] = jobobj[shortname(inp["id"])]
+def generate_parser(toolparser, tool, namemap, records):
+    # type: (argparse.ArgumentParser, Process, Dict[Text, Text], List[Text]) -> argparse.ArgumentParser
+    toolparser.add_argument("job_order", nargs="?", help="Job input json file")
+    namemap["job_order"] = "job_order"
 
-    if metadata:
-        t.metadata = metadata
-    else:
-        t.metadata = {"$namespaces": t.tool.get("$namespaces", {}), "$schemas": t.tool.get("$schemas", [])}
+    for inp in tool.tool["inputs"]:
+        name = shortname(inp["id"])
+        namemap[name.replace("-", "_")] = name
+        inptype = inp["type"]
+        description = inp.get("doc", "")
+        default = inp.get("default", None)
+        add_argument(toolparser, name, inptype, records, description, default)
 
-    return t
+    return toolparser
 
 
-def load_job_order(args, t, parser, stdin, print_input_deps=False, relative_deps=False, stdout=sys.stdout):
-    # type: (argparse.Namespace, Process, argparse.ArgumentParser, IO[Any], bool, bool, IO[Any]) -> Union[int,Tuple[Dict[str,Any],str]]
+def load_job_order(args, t, stdin, print_input_deps=False, relative_deps=False,
+                   stdout=sys.stdout, make_fs_access=None, fetcher_constructor=None):
+    # type: (argparse.Namespace, Process, IO[Any], bool, bool, IO[Any], Callable[[Text], StdFsAccess], Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]) -> Union[int, Tuple[Dict[Text, Any], Text]]
 
     job_order_object = None
 
-    if args.conformance_test:
-        loader = Loader({})
-    else:
-        jobloaderctx = {
-                "path": {"@type": "@id"},
-                "format": {"@type": "@id"},
-                "id": "@id"}
-        jobloaderctx.update(t.metadata.get("$namespaces", {}))
-        loader = Loader(jobloaderctx)
+    jobloaderctx = {
+        u"path": {u"@type": u"@id"},
+        u"location": {u"@type": u"@id"},
+        u"format": {u"@type": u"@id"},
+        u"id": u"@id"}
+    jobloaderctx.update(t.metadata.get("$namespaces", {}))
+    loader = Loader(jobloaderctx, fetcher_constructor=fetcher_constructor)
 
     if len(args.job_order) == 1 and args.job_order[0][0] != "-":
         job_order_file = args.job_order[0]
@@ -429,34 +418,46 @@ def load_job_order(args, t, parser, stdin, print_input_deps=False, relative_deps
     elif job_order_file:
         input_basedir = args.basedir if args.basedir else os.path.abspath(os.path.dirname(job_order_file))
         try:
-            job_order_object, _ = loader.resolve_ref(job_order_file)
+            job_order_object, _ = loader.resolve_ref(job_order_file, checklinks=False)
         except Exception as e:
-            _logger.error(str(e), exc_info=(e if args.debug else False))
+            _logger.error(Text(e), exc_info=args.debug)
             return 1
         toolparser = None
     else:
         input_basedir = args.basedir if args.basedir else os.getcwd()
-        namemap = {}  # type: Dict[str,str]
-        toolparser = generate_parser(argparse.ArgumentParser(prog=args.workflow), t, namemap)
+        namemap = {}  # type: Dict[Text, Text]
+        records = []  # type: List[Text]
+        toolparser = generate_parser(
+            argparse.ArgumentParser(prog=args.workflow), t, namemap, records)
         if toolparser:
             if args.tool_help:
                 toolparser.print_help()
                 return 0
             cmd_line = vars(toolparser.parse_args(args.job_order))
+            for record_name in records:
+                record = {}
+                record_items = {
+                    k:v for k,v in cmd_line.iteritems()
+                    if k.startswith(record_name)}
+                for key, value in record_items.iteritems():
+                    record[key[len(record_name)+1:]] = value
+                    del cmd_line[key]
+                cmd_line[str(record_name)] = record
 
             if cmd_line["job_order"]:
                 try:
                     input_basedir = args.basedir if args.basedir else os.path.abspath(os.path.dirname(cmd_line["job_order"]))
                     job_order_object = loader.resolve_ref(cmd_line["job_order"])
                 except Exception as e:
-                    _logger.error(str(e), exc_info=(e if args.debug else False))
+                    _logger.error(Text(e), exc_info=args.debug)
                     return 1
             else:
                 job_order_object = {"id": args.workflow}
 
             job_order_object.update({namemap[k]: v for k,v in cmd_line.items()})
 
-            _logger.debug(u"Parsed job order from command line: %s", json.dumps(job_order_object, indent=4))
+            if _logger.isEnabledFor(logging.DEBUG):
+                _logger.debug(u"Parsed job order from command line: %s", json.dumps(job_order_object, indent=4))
         else:
             job_order_object = None
 
@@ -467,19 +468,29 @@ def load_job_order(args, t, parser, stdin, print_input_deps=False, relative_deps
             job_order_object[shortname(inp["id"])] = inp["default"]
 
     if not job_order_object and len(t.tool["inputs"]) > 0:
-        parser.print_help()
         if toolparser:
             print u"\nOptions for %s " % args.workflow
             toolparser.print_help()
         _logger.error("")
-        _logger.error("Input object required")
+        _logger.error("Input object required, use --help for details")
         return 1
 
     if print_input_deps:
-        printdeps(job_order_object, loader, stdout, relative_deps,
+        printdeps(job_order_object, loader, stdout, relative_deps, "",
                   basedir=u"file://%s/" % input_basedir)
         return 0
 
+    def pathToLoc(p):
+        if "location" not in p and "path" in p:
+            p["location"] = p["path"]
+            del p["path"]
+
+    adjustDirObjs(job_order_object, pathToLoc)
+    adjustFileObjs(job_order_object, pathToLoc)
+    normalizeFilesDirs(job_order_object)
+    adjustDirObjs(job_order_object, cast(Callable[..., Any],
+        functools.partial(getListing, make_fs_access(input_basedir))))
+
     if "cwl:tool" in job_order_object:
         del job_order_object["cwl:tool"]
     if "id" in job_order_object:
@@ -487,38 +498,52 @@ def load_job_order(args, t, parser, stdin, print_input_deps=False, relative_deps
 
     return (job_order_object, input_basedir)
 
+def makeRelative(base, ob):
+    u = ob.get("location", ob.get("path"))
+    if ":" in u.split("/")[0] and not u.startswith("file://"):
+        pass
+    else:
+        if u.startswith("file://"):
+            u = u[7:]
+        ob["location"] = os.path.relpath(u, base)
 
-def printdeps(obj, document_loader, stdout, relative_deps, basedir=None):
-    # type: (Dict[unicode, Any], Loader, IO[Any], bool, str) -> None
+def printdeps(obj, document_loader, stdout, relative_deps, uri, basedir=None):
+    # type: (Dict[Text, Any], Loader, IO[Any], bool, Text, Text) -> None
     deps = {"class": "File",
-            "path": obj.get("id", "#")}
+            "location": uri}  # type: Dict[Text, Any]
 
     def loadref(b, u):
-        return document_loader.resolve_ref(u, base_url=b)[0]
+        return document_loader.fetch(document_loader.fetcher.urljoin(b, u))
 
-    sf = process.scandeps(basedir if basedir else obj["id"], obj,
-                          set(("$import", "run")),
-                          set(("$include", "$schemas", "path")), loadref)
+    sf = scandeps(
+        basedir if basedir else uri, obj, set(("$import", "run")),
+        set(("$include", "$schemas", "location")), loadref)
     if sf:
         deps["secondaryFiles"] = sf
 
     if relative_deps:
         if relative_deps == "primary":
-            base = basedir if basedir else os.path.dirname(obj["id"])
+            base = basedir if basedir else os.path.dirname(uri)
         elif relative_deps == "cwd":
             base = "file://" + os.getcwd()
         else:
             raise Exception(u"Unknown relative_deps %s" % relative_deps)
-        def makeRelative(u):
-            if ":" in u.split("/")[0] and not u.startswith("file://"):
-                return u
-            return os.path.relpath(u, base)
-        process.adjustFiles(deps, makeRelative)
+
+        adjustFileObjs(deps, functools.partial(makeRelative, base))
+        adjustDirObjs(deps, functools.partial(makeRelative, base))
 
     stdout.write(json.dumps(deps, indent=4))
 
+def print_pack(document_loader, processobj, uri, metadata):
+    # type: (Loader, Union[Dict[unicode, Any], List[Dict[unicode, Any]]], unicode, Dict[unicode, Any]) -> str
+    packed = pack(document_loader, processobj, uri, metadata)
+    if len(packed["$graph"]) > 1:
+        return json.dumps(packed, indent=4)
+    else:
+        return json.dumps(packed["$graph"][0], indent=4)
+
 def versionstring():
-    # type: () -> unicode
+    # type: () -> Text
     pkg = pkg_resources.require("cwltool")
     if pkg:
         return u"%s %s" % (sys.argv[0], pkg[0].version)
@@ -526,128 +551,210 @@ def versionstring():
         return u"%s %s" % (sys.argv[0], "unknown version")
 
 
-def main(argsl=None,
-         executor=single_job_executor,
-         makeTool=workflow.defaultMakeTool,
-         selectResources=None,
-         parser=None,
-         stdin=sys.stdin,
-         stdout=sys.stdout,
-         stderr=sys.stderr,
-         versionfunc=versionstring):
-    # type: (List[str],Callable[...,Union[str,Dict[str,str]]],Callable[...,Process],Callable[[Dict[str,int]],Dict[str,int]],argparse.ArgumentParser,IO[Any],IO[Any],IO[Any],Callable[[],unicode]) -> int
+def main(argsl=None,  # type: List[str]
+         args=None,   # type: argparse.Namespace
+         executor=single_job_executor,  # type: Callable[..., Union[Text, Dict[Text, Text]]]
+         makeTool=workflow.defaultMakeTool,  # type: Callable[..., Process]
+         selectResources=None,  # type: Callable[[Dict[Text, int]], Dict[Text, int]]
+         stdin=sys.stdin,  # type: IO[Any]
+         stdout=sys.stdout,  # type: IO[Any]
+         stderr=sys.stderr,  # type: IO[Any]
+         versionfunc=versionstring,  # type: Callable[[], Text]
+         job_order_object=None,  # type: Union[Tuple[Dict[Text, Any], Text], int]
+         make_fs_access=StdFsAccess,  # type: Callable[[Text], StdFsAccess]
+         fetcher_constructor=None,  # type: Callable[[Dict[unicode, unicode], requests.sessions.Session], Fetcher]
+         resolver=tool_resolver
+         ):
+    # type: (...) -> int
 
     _logger.removeHandler(defaultStreamHandler)
-    _logger.addHandler(logging.StreamHandler(stderr))
+    stderr_handler = logging.StreamHandler(stderr)
+    _logger.addHandler(stderr_handler)
+    try:
+        if args is None:
+            if argsl is None:
+                argsl = sys.argv[1:]
+            args = arg_parser().parse_args(argsl)
+
+        # If caller provided custom arguments, it may be not every expected
+        # option is set, so fill in no-op defaults to avoid crashing when
+        # dereferencing them in args.
+        for k,v in {'print_deps': False,
+                    'print_pre': False,
+                    'print_rdf': False,
+                    'print_dot': False,
+                    'relative_deps': False,
+                    'tmp_outdir_prefix': 'tmp',
+                    'tmpdir_prefix': 'tmp',
+                    'print_input_deps': False,
+                    'cachedir': None,
+                    'quiet': False,
+                    'debug': False,
+                    'version': False,
+                    'enable_dev': False,
+                    'strict': True,
+                    'rdf_serializer': None,
+                    'basedir': None,
+                    'tool_help': False,
+                    'workflow': None,
+                    'job_order': None,
+                    'pack': False,
+                    'on_error': 'continue'}.iteritems():
+            if not hasattr(args, k):
+                setattr(args, k, v)
+
+        if args.quiet:
+            _logger.setLevel(logging.WARN)
+        if args.debug:
+            _logger.setLevel(logging.DEBUG)
+
+        if args.version:
+            print versionfunc()
+            return 0
+        else:
+            _logger.info(versionfunc())
+
+        if not args.workflow:
+            if os.path.isfile("CWLFile"):
+                setattr(args, "workflow", "CWLFile")
+            else:
+                _logger.error("")
+                _logger.error("CWL document required, try --help for details")
+                return 1
+        if args.relax_path_checks:
+            draft2tool.ACCEPTLIST_RE = draft2tool.ACCEPTLIST_EN_RELAXED_RE
 
-    if argsl is None:
-        argsl = sys.argv[1:]
+        try:
+            document_loader, workflowobj, uri = fetch_document(args.workflow, resolver=resolver, fetcher_constructor=fetcher_constructor)
 
-    if parser is None:
-        parser = arg_parser()
+            if args.print_deps:
+                printdeps(workflowobj, document_loader, stdout, args.relative_deps, uri)
+                return 0
 
-    args = parser.parse_args(argsl)
+            document_loader, avsc_names, processobj, metadata, uri \
+                = validate_document(document_loader, workflowobj, uri,
+                                    enable_dev=args.enable_dev, strict=args.strict,
+                                    preprocess_only=args.print_pre or args.pack,
+                                    fetcher_constructor=fetcher_constructor)
 
-    if args.quiet:
-        _logger.setLevel(logging.WARN)
-    if args.debug:
-        _logger.setLevel(logging.DEBUG)
+            if args.pack:
+                stdout.write(print_pack(document_loader, processobj, uri, metadata))
+                return 0
 
-    if args.version:
-        print versionfunc()
-        return 0
-    else:
-        _logger.info(versionfunc())
+            if args.print_pre:
+                stdout.write(json.dumps(processobj, indent=4))
+                return 0
 
-    if not args.workflow:
-        parser.print_help()
-        _logger.error("")
-        _logger.error("CWL document required")
-        return 1
+            tool = make_tool(document_loader, avsc_names, metadata, uri,
+                    makeTool, vars(args))
 
-    try:
-        t = load_tool(args.workflow, args.update, args.strict, makeTool, args.debug,
-                      print_pre=args.print_pre,
-                      print_rdf=args.print_rdf,
-                      print_dot=args.print_dot,
-                      print_deps=args.print_deps,
-                      relative_deps=args.relative_deps,
-                      rdf_serializer=args.rdf_serializer,
-                      stdout=stdout)
-    except Exception as e:
-        _logger.error(u"I'm sorry, I couldn't load this CWL file, try again with --debug for more information.\n%s\n", e, exc_info=(e if args.debug else False))
-        return 1
+            if args.print_rdf:
+                printrdf(tool, document_loader.ctx, args.rdf_serializer, stdout)
+                return 0
 
-    if isinstance(t, int):
-        return t
+            if args.print_dot:
+                printdot(tool, document_loader.ctx, stdout)
+                return 0
 
-    if args.tmp_outdir_prefix != 'tmp':
-        # Use user defined temp directory (if it exists)
-        args.tmp_outdir_prefix = os.path.abspath(args.tmp_outdir_prefix)
-        if not os.path.exists(args.tmp_outdir_prefix):
-            _logger.error("Intermediate output directory prefix doesn't exist, reverting to default")
+        except (validate.ValidationException) as exc:
+            _logger.error(u"Tool definition failed validation:\n%s", exc,
+                          exc_info=args.debug)
             return 1
-
-    if args.tmpdir_prefix != 'tmp':
-        # Use user defined prefix (if the folder exists)
-        args.tmpdir_prefix = os.path.abspath(args.tmpdir_prefix)
-        if not os.path.exists(args.tmpdir_prefix):
-            _logger.error("Temporary directory prefix doesn't exist.")
+        except (RuntimeError, WorkflowException) as exc:
+            _logger.error(u"Tool definition failed initialization:\n%s", exc,
+                          exc_info=args.debug)
+            return 1
+        except Exception as exc:
+            _logger.error(
+                u"I'm sorry, I couldn't load this CWL file%s",
+                ", try again with --debug for more information.\nThe error was: "
+                "%s" % exc if not args.debug else ".  The error was:",
+                exc_info=args.debug)
             return 1
 
-    job_order_object = load_job_order(args, t, parser, stdin,
-                                      print_input_deps=args.print_input_deps,
-                                      relative_deps=args.relative_deps,
-                                      stdout=stdout)
-
-    if isinstance(job_order_object, int):
-        return job_order_object
-
-    if args.cachedir:
-        args.cachedir = os.path.abspath(args.cachedir)
-        args.move_outputs = False
+        if isinstance(tool, int):
+            return tool
+
+        for dirprefix in ("tmpdir_prefix", "tmp_outdir_prefix", "cachedir"):
+            if getattr(args, dirprefix) and getattr(args, dirprefix) != 'tmp':
+                sl = "/" if getattr(args, dirprefix).endswith("/") or dirprefix == "cachedir" else ""
+                setattr(args, dirprefix,
+                        os.path.abspath(getattr(args, dirprefix))+sl)
+                if not os.path.exists(os.path.dirname(getattr(args, dirprefix))):
+                    try:
+                        os.makedirs(os.path.dirname(getattr(args, dirprefix)))
+                    except Exception as e:
+                        _logger.error("Failed to create directory: %s", e)
+                        return 1
+
+        if args.cachedir:
+            if args.move_outputs == "move":
+                setattr(args, 'move_outputs', "copy")
+            setattr(args, "tmp_outdir_prefix", args.cachedir)
+
+        if job_order_object is None:
+            job_order_object = load_job_order(args, tool, stdin,
+                                              print_input_deps=args.print_input_deps,
+                                              relative_deps=args.relative_deps,
+                                              stdout=stdout,
+                                              make_fs_access=make_fs_access,
+                                              fetcher_constructor=fetcher_constructor)
+
+        if isinstance(job_order_object, int):
+            return job_order_object
 
-    try:
-        out = executor(t, job_order_object[0],
-                       job_order_object[1], args,
-                       conformance_test=args.conformance_test,
-                       dry_run=args.dry_run,
-                       outdir=args.outdir,
-                       tmp_outdir_prefix=args.cachedir if args.cachedir else args.tmp_outdir_prefix,
-                       use_container=args.use_container,
-                       preserve_environment=args.preserve_environment,
-                       pull_image=args.enable_pull,
-                       rm_container=args.rm_container,
-                       tmpdir_prefix=args.tmpdir_prefix,
-                       enable_net=args.enable_net,
-                       rm_tmpdir=args.rm_tmpdir,
-                       makeTool=makeTool,
-                       move_outputs=args.move_outputs,
-                       select_resources=selectResources,
-                       eval_timeout=args.eval_timeout,
-                       cachedir=args.cachedir
-                       )
-        # This is the workflow output, it needs to be written
-        if out is not None:
-            if isinstance(out, basestring):
-                stdout.write(out)
+        try:
+            setattr(args, 'basedir', job_order_object[1])
+            del args.workflow
+            del args.job_order
+            out = executor(tool, job_order_object[0],
+                           makeTool=makeTool,
+                           select_resources=selectResources,
+                           make_fs_access=make_fs_access,
+                           **vars(args))
+
+            # This is the workflow output, it needs to be written
+            if out is not None:
+                def locToPath(p):
+                    if p["location"].startswith("file://"):
+                        p["path"] = p["location"][7:]
+
+                adjustDirObjs(out, locToPath)
+                adjustFileObjs(out, locToPath)
+
+                if isinstance(out, basestring):
+                    stdout.write(out)
+                else:
+                    stdout.write(json.dumps(out, indent=4))
+                stdout.write("\n")
+                stdout.flush()
             else:
-                stdout.write(json.dumps(out, indent=4))
-            stdout.write("\n")
-            stdout.flush()
-        else:
+                return 1
+        except (validate.ValidationException) as exc:
+            _logger.error(u"Input object failed validation:\n%s", exc,
+                    exc_info=args.debug)
             return 1
-    except (validate.ValidationException) as e:
-        _logger.error(u"Input object failed validation:\n%s", e, exc_info=(e if args.debug else False))
-        return 1
-    except workflow.WorkflowException as e:
-        _logger.error(u"Workflow error, try again with --debug for more information:\n  %s", e, exc_info=(e if args.debug else False))
-        return 1
-    except Exception as e:
-        _logger.error(u"Unhandled error, try again with --debug for more information:\n  %s", e, exc_info=(e if args.debug else False))
-        return 1
+        except UnsupportedRequirement as exc:
+            _logger.error(
+                u"Workflow or tool uses unsupported feature:\n%s", exc,
+                exc_info=args.debug)
+            return 33
+        except WorkflowException as exc:
+            _logger.error(
+                u"Workflow error, try again with --debug for more "
+                "information:\n  %s", exc, exc_info=args.debug)
+            return 1
+        except Exception as exc:
+            _logger.error(
+                u"Unhandled error, try again with --debug for more information:\n"
+                "  %s", exc, exc_info=args.debug)
+            return 1
+
+        return 0
+    finally:
+        _logger.removeHandler(stderr_handler)
+        _logger.addHandler(defaultStreamHandler)
 
-    return 0
 
 if __name__ == "__main__":
     sys.exit(main(sys.argv[1:]))
diff --git a/cwltool/pack.py b/cwltool/pack.py
new file mode 100644
index 0000000..e2e2e3d
--- /dev/null
+++ b/cwltool/pack.py
@@ -0,0 +1,82 @@
+import copy
+import json
+
+from schema_salad.ref_resolver import Loader
+
+from .process import scandeps, shortname
+
+from typing import Union, Any, cast, Callable, Dict, Tuple, Type, IO, Text
+
+def flatten_deps(d, files):  # type: (Any, Set[Text]) -> None
+    if isinstance(d, list):
+        for s in d:
+            flatten_deps(s, files)
+    elif isinstance(d, dict):
+        files.add(d["location"])
+        if "secondaryFiles" in d:
+            flatten_deps(d["secondaryFiles"], files)
+
+def find_run(d, runs):  # type: (Any, Set[Text]) -> None
+    if isinstance(d, list):
+        for s in d:
+            find_run(s, runs)
+    elif isinstance(d, dict):
+        if "run" in d and isinstance(d["run"], (str, unicode)):
+            runs.add(d["run"])
+        for s in d.values():
+            find_run(s, runs)
+
+def replace_refs(d, rewrite, stem, newstem):
+    # type: (Any, Dict[Text, Text], Text, Text) -> None
+    if isinstance(d, list):
+        for s,v in enumerate(d):
+            if isinstance(v, (str, unicode)) and v.startswith(stem):
+                d[s] = newstem + v[len(stem):]
+            else:
+                replace_refs(v, rewrite, stem, newstem)
+    elif isinstance(d, dict):
+        if "run" in d and isinstance(d["run"], (str, unicode)):
+            d["run"] = rewrite[d["run"]]
+        for s,v in d.items():
+            if isinstance(v, (str, unicode)) and v.startswith(stem):
+                d[s] = newstem + v[len(stem):]
+            replace_refs(v, rewrite, stem, newstem)
+
+def pack(document_loader, processobj, uri, metadata):
+    # type: (Loader, Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Dict[Text, Text]) -> Dict[Text, Any]
+    def loadref(b, u):
+        # type: (Text, Text) -> Union[Dict, List, Text]
+        return document_loader.resolve_ref(u, base_url=b)[0]
+    deps = scandeps(uri, processobj, set(("run",)), set(), loadref)
+
+    fdeps = set((uri,))
+    flatten_deps(deps, fdeps)
+
+    runs = set()  # type: Set[Text]
+    for f in fdeps:
+        find_run(document_loader.idx[f], runs)
+
+    rewrite = {}
+    if isinstance(processobj, list):
+        for p in processobj:
+            rewrite[p["id"]] = "#" + shortname(p["id"])
+    else:
+        rewrite[uri] = "#main"
+
+    for r in runs:
+        rewrite[r] = "#" + shortname(r)
+
+    packed = {"$graph": [], "cwlVersion": metadata["cwlVersion"]
+            }  # type: Dict[Text, Any]
+
+    for r in sorted(rewrite.keys()):
+        v = rewrite[r]
+        dc = cast(Dict[Text, Any], copy.deepcopy(document_loader.idx[r]))
+        dc["id"] = v
+        for n in ("name", "cwlVersion"):
+            if n in dc:
+                del dc[n]
+        replace_refs(dc, rewrite, r+"/" if "#" in r else r+"#", v+"/")
+        packed["$graph"].append(dc)
+
+    return packed
diff --git a/cwltool/pathmapper.py b/cwltool/pathmapper.py
index 33e6f59..ac811d9 100644
--- a/cwltool/pathmapper.py
+++ b/cwltool/pathmapper.py
@@ -1,107 +1,215 @@
 import os
-import random
 import logging
 import stat
-from typing import Tuple, Set, Union, Any
+import collections
+import uuid
+import urlparse
+from functools import partial
+from typing import Any, Callable, Set, Text, Tuple, Union
+import schema_salad.validate as validate
 
 _logger = logging.getLogger("cwltool")
 
-
-def abspath(src, basedir):  # type: (str,str) -> str
-    if src.startswith("file://"):
+MapperEnt = collections.namedtuple("MapperEnt", ["resolved", "target", "type"])
+
+def adjustFiles(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+    """Apply a mapping function to each File path in the object `rec`."""
+
+    if isinstance(rec, dict):
+        if rec.get("class") == "File":
+            rec["path"] = op(rec["path"])
+        for d in rec:
+            adjustFiles(rec[d], op)
+    if isinstance(rec, list):
+        for d in rec:
+            adjustFiles(d, op)
+
+def adjustFileObjs(rec, op):  # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+    """Apply an update function to each File object in the object `rec`."""
+
+    if isinstance(rec, dict):
+        if rec.get("class") == "File":
+            op(rec)
+        for d in rec:
+            adjustFileObjs(rec[d], op)
+    if isinstance(rec, list):
+        for d in rec:
+            adjustFileObjs(d, op)
+
+def adjustDirObjs(rec, op):
+    # type: (Any, Union[Callable[..., Any], partial[Any]]) -> None
+    """Apply an update function to each Directory object in the object `rec`."""
+
+    if isinstance(rec, dict):
+        if rec.get("class") == "Directory":
+            op(rec)
+        for key in rec:
+            adjustDirObjs(rec[key], op)
+    if isinstance(rec, list):
+        for d in rec:
+            adjustDirObjs(d, op)
+
+def normalizeFilesDirs(job):
+    # type: (Union[List[Dict[Text, Any]], Dict[Text, Any]]) -> None
+    def addLocation(d):
+        if "location" not in d:
+            if d["class"] == "File" and ("contents" not in d):
+                raise validate.ValidationException("Anonymous file object must have 'contents' and 'basename' fields.")
+            if d["class"] == "Directory" and ("listing" not in d or "basename" not in d):
+                raise validate.ValidationException("Anonymous directory object must have 'listing' and 'basename' fields.")
+            d["location"] = "_:" + Text(uuid.uuid4())
+            if "basename" not in d:
+                d["basename"] = Text(uuid.uuid4())
+
+        if "basename" not in d:
+            parse = urlparse.urlparse(d["location"])
+            d["basename"] = os.path.basename(parse.path)
+
+    adjustFileObjs(job, addLocation)
+    adjustDirObjs(job, addLocation)
+
+
+def abspath(src, basedir):  # type: (Text, Text) -> Text
+    if src.startswith(u"file://"):
         ab = src[7:]
     else:
         ab = src if os.path.isabs(src) else os.path.join(basedir, src)
     return ab
 
+def dedup(listing):  # type: (List[Any]) -> List[Any]
+    marksub = set()
+
+    def mark(d):
+        marksub.add(d["location"])
+
+    for l in listing:
+        if l["class"] == "Directory":
+            for e in l.get("listing", []):
+                adjustFileObjs(e, mark)
+                adjustDirObjs(e, mark)
+
+    dd = []
+    markdup = set()  # type: Set[Text]
+    for r in listing:
+        if r["location"] not in marksub and r["location"] not in markdup:
+            dd.append(r)
+            markdup.add(r["location"])
+
+    return dd
+
 
 class PathMapper(object):
 
     """Mapping of files from relative path provided in the file to a tuple of
-    (absolute local path, absolute container path)"""
-
-    def __init__(self, referenced_files, basedir):
-        # type: (Set[str], str) -> None
-        self._pathmap = {}  # type: Dict[str, Tuple[str, str]]
-        self.setup(referenced_files, basedir)
+    (absolute local path, absolute container path)
+
+    The tao of PathMapper:
+
+    The initializer takes a list of File and Directory objects, a base
+    directory (for resolving relative references) and a staging directory
+    (where the files are mapped to).
+
+    The purpose of the setup method is to determine where each File or
+    Directory should be placed on the target file system (relative to
+    stagedir).
+
+    If separatedirs=True, unrelated files will be isolated in their own
+    directories under stagedir. If separatedirs=False, files and directories
+    will all be placed in stagedir (with the possibility for name
+    collisions...)
+
+    The path map maps the "location" of the input Files and Directory objects
+    to a tuple (resolved, target, type). The "resolved" field is the "real"
+    path on the local file system (after resolving relative paths and
+    traversing symlinks). The "target" is the path on the target file system
+    (under stagedir). The type is the object type (one of File, Directory,
+    CreateFile, WritableFile).
+
+    The latter two (CreateFile, WritableFile) are used by
+    InitialWorkDirRequirement to indicate files that are generated on the fly
+    (CreateFile, in this case "resolved" holds the file contents instead of the
+    path because they file doesn't exist) or copied into the output directory
+    so they can be opened for update ("r+" or "a") (WritableFile).
+
+    """
+
+    def __init__(self, referenced_files, basedir, stagedir, separateDirs=True):
+        # type: (List[Any], Text, Text, bool) -> None
+        self._pathmap = {}  # type: Dict[Text, MapperEnt]
+        self.stagedir = stagedir
+        self.separateDirs = separateDirs
+        self.setup(dedup(referenced_files), basedir)
+
+    def visitlisting(self, listing, stagedir, basedir):
+        # type: (List[Dict[Text, Any]], Text, Text) -> None
+        for ld in listing:
+            tgt = os.path.join(stagedir, ld["basename"])
+            if ld["class"] == "Directory":
+                self.visit(ld, stagedir, basedir, copy=ld.get("writable", False))
+            else:
+                self.visit(ld, stagedir, basedir, copy=ld.get("writable", False))
+
+    def visit(self, obj, stagedir, basedir, copy=False):
+        # type: (Dict[Text, Any], Text, Text, bool) -> None
+        tgt = os.path.join(stagedir, obj["basename"])
+        if obj["class"] == "Directory":
+            self._pathmap[obj["location"]] = MapperEnt(obj["location"], tgt, "Directory")
+            self.visitlisting(obj.get("listing", []), tgt, basedir)
+        elif obj["class"] == "File":
+            path = obj["location"]
+            if path in self._pathmap:
+                return
+            ab = abspath(path, basedir)
+            if "contents" in obj and obj["location"].startswith("_:"):
+                self._pathmap[obj["location"]] = MapperEnt(obj["contents"], tgt, "CreateFile")
+            else:
+                if copy:
+                    self._pathmap[path] = MapperEnt(ab, tgt, "WritableFile")
+                else:
+                    self._pathmap[path] = MapperEnt(ab, tgt, "File")
+                self.visitlisting(obj.get("secondaryFiles", []), stagedir, basedir)
 
     def setup(self, referenced_files, basedir):
-        # type: (Set[str], str) -> None
-        for src in referenced_files:
-            ab = abspath(src, basedir)
-            self._pathmap[src] = (ab, ab)
-
-    def mapper(self, src):  # type: (str) -> Tuple[str,str]
-        if "#" in src:
-            i = src.index("#")
+        # type: (List[Any], Text) -> None
+
+        # Go through each file and set the target to its own directory along
+        # with any secondary files.
+        stagedir = self.stagedir
+        for fob in referenced_files:
+            if self.separateDirs:
+                stagedir = os.path.join(self.stagedir, "stg%s" % uuid.uuid4())
+            self.visit(fob, stagedir, basedir)
+
+        # Dereference symbolic links
+        for path, (ab, tgt, type) in self._pathmap.items():
+            if type != "File":  # or not os.path.exists(ab):
+                continue
+            deref = ab
+            st = os.lstat(deref)
+            while stat.S_ISLNK(st.st_mode):
+                rl = os.readlink(deref)
+                deref = rl if os.path.isabs(rl) else os.path.join(
+                    os.path.dirname(deref), rl)
+                st = os.lstat(deref)
+
+            self._pathmap[path] = MapperEnt(deref, tgt, "File")
+
+    def mapper(self, src):  # type: (Text) -> MapperEnt
+        if u"#" in src:
+            i = src.index(u"#")
             p = self._pathmap[src[:i]]
-            return (p[0], p[1] + src[i:])
+            return MapperEnt(p.resolved, p.target + src[i:], None)
         else:
             return self._pathmap[src]
 
-    def files(self):  # type: () -> List[str]
+    def files(self):  # type: () -> List[Text]
         return self._pathmap.keys()
 
-    def items(self):  # type: () -> List[Tuple[str,Tuple[str,str]]]
+    def items(self):  # type: () -> List[Tuple[Text, MapperEnt]]
         return self._pathmap.items()
 
-    def reversemap(self, target):  # type: (str) -> Tuple[str, str]
+    def reversemap(self, target):  # type: (Text) -> Tuple[Text, Text]
         for k, v in self._pathmap.items():
             if v[1] == target:
                 return (k, v[0])
         return None
-
-
-class DockerPathMapper(PathMapper):
-
-    def __init__(self, referenced_files, basedir):
-        # type: (Set[str], str) -> None
-        self.dirs = {}  # type: Dict[str, Union[bool, str]]
-        super(DockerPathMapper, self).__init__(referenced_files, basedir)
-
-    def setup(self, referenced_files, basedir):
-        for src in referenced_files:
-            ab = abspath(src, basedir)
-            dirn, fn = os.path.split(ab)
-
-            subdir = False
-            for d in self.dirs:
-                if dirn.startswith(d):
-                  subdir = True
-                  break
-
-            if not subdir:
-                for d in list(self.dirs):
-                    if d.startswith(dirn):
-                        # 'dirn' is a parent of 'd'
-                        del self.dirs[d]
-                self.dirs[dirn] = True
-
-        prefix = "job" + str(random.randint(1, 1000000000)) + "_"
-
-        names = set()  # type: Set[str]
-        for d in self.dirs:
-            name = os.path.join("/var/lib/cwl", prefix + os.path.basename(d))
-            i = 1
-            while name in names:
-                i += 1
-                name = os.path.join("/var/lib/cwl",
-                        prefix + os.path.basename(d) + str(i))
-            names.add(name)
-            self.dirs[d] = name
-
-        for src in referenced_files:
-            ab = abspath(src, basedir)
-
-            deref = ab
-            st = os.lstat(deref)
-            while stat.S_ISLNK(st.st_mode):
-                rl = os.readlink(deref)
-                deref = rl if os.path.isabs(rl) else os.path.join(
-                        os.path.dirname(deref), rl)
-                st = os.lstat(deref)
-
-            for d in self.dirs:
-                if ab.startswith(d):
-                    self._pathmap[src] = (deref, os.path.join(
-                        self.dirs[d], ab[len(d)+1:]))
diff --git a/cwltool/process.py b/cwltool/process.py
index 94d9aab..b4e9001 100644
--- a/cwltool/process.py
+++ b/cwltool/process.py
@@ -1,57 +1,62 @@
-import abc
-import avro.schema
 import os
 import json
-import schema_salad.validate as validate
-import copy
-import yaml
 import copy
 import logging
 import pprint
-from .utils import aslist, get_feature
-import schema_salad.schema
-from schema_salad.ref_resolver import Loader
-import urlparse
-import pprint
-from pkg_resources import resource_stream
 import stat
-from .builder import Builder, adjustFileObjs
 import tempfile
 import glob
-from .errors import WorkflowException
-from .pathmapper import abspath
-from typing import Any, Callable, Generator, Union, IO, AnyStr, Tuple
+import urlparse
 from collections import Iterable
+import errno
+import shutil
+import uuid
+import hashlib
+
+import abc
+import schema_salad.validate as validate
+import schema_salad.schema
+from schema_salad.ref_resolver import Loader
+import avro.schema
+from typing import (Any, AnyStr, Callable, cast, Dict, List, Generator, IO, Text,
+        Tuple, Union)
 from rdflib import URIRef
 from rdflib.namespace import RDFS, OWL
-from .stdfsaccess import StdFsAccess
-import errno
 from rdflib import Graph
+from pkg_resources import resource_stream
+
+from .utils import aslist, get_feature
+from .stdfsaccess import StdFsAccess
+from .builder import Builder, adjustFileObjs, adjustDirObjs
+from .errors import WorkflowException, UnsupportedRequirement
+from .pathmapper import PathMapper, abspath, normalizeFilesDirs
 
 _logger = logging.getLogger("cwltool")
 
 supportedProcessRequirements = ["DockerRequirement",
                                 "SchemaDefRequirement",
                                 "EnvVarRequirement",
-                                "CreateFileRequirement",
                                 "ScatterFeatureRequirement",
                                 "SubworkflowFeatureRequirement",
                                 "MultipleInputFeatureRequirement",
                                 "InlineJavascriptRequirement",
                                 "ShellCommandRequirement",
                                 "StepInputExpressionRequirement",
-                                "ResourceRequirement"]
-
-cwl_files = ("Workflow.yml",
-              "CommandLineTool.yml",
-              "CommonWorkflowLanguage.yml",
-              "Process.yml",
-              "concepts.md",
-              "contrib.md",
-              "intro.md",
-              "invocation.md")
+                                "ResourceRequirement",
+                                "InitialWorkDirRequirement"]
+
+cwl_files = (
+    "Workflow.yml",
+    "CommandLineTool.yml",
+    "CommonWorkflowLanguage.yml",
+    "Process.yml",
+    "concepts.md",
+    "contrib.md",
+    "intro.md",
+    "invocation.md")
 
 salad_files = ('metaschema.yml',
+               'metaschema_base.yml',
               'salad.md',
               'field_name.yml',
               'import_include.md',
@@ -71,32 +76,53 @@ salad_files = ('metaschema.yml',
               'vocab_res_schema.yml',
               'vocab_res_src.yml',
               'vocab_res_proc.yml')
-def get_schema():
-    # type: () -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[unicode,Any]]
+
+SCHEMA_CACHE = {}  # type: Dict[Text, Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text, Any], Loader]]
+SCHEMA_FILE = None  # type: Dict[Text, Any]
+SCHEMA_DIR = None  # type: Dict[Text, Any]
+SCHEMA_ANY = None  # type: Dict[Text, Any]
+
+def get_schema(version):
+    # type: (Text) -> Tuple[Loader, Union[avro.schema.Names, avro.schema.SchemaParseException], Dict[Text,Any], Loader]
+
+    if version in SCHEMA_CACHE:
+        return SCHEMA_CACHE[version]
+
     cache = {}
+    version = version.split("#")[-1]
+    if '.dev' in version:
+        version = ".".join(version.split(".")[:-1])
     for f in cwl_files:
-        rs = resource_stream(__name__, 'schemas/draft-3/' + f)
-        cache["https://w3id.org/cwl/" + f] = rs.read()
-        rs.close()
+        try:
+            res = resource_stream(__name__, 'schemas/%s/%s' % (version, f))
+            cache["https://w3id.org/cwl/" + f] = res.read()
+            res.close()
+        except IOError:
+            pass
 
     for f in salad_files:
-        rs = resource_stream(__name__, 'schemas/draft-3/salad/schema_salad/metaschema/' + f)
-        cache["https://w3id.org/cwl/salad/schema_salad/metaschema/" + f] = rs.read()
-        rs.close()
+        try:
+            res = resource_stream(
+                __name__, 'schemas/%s/salad/schema_salad/metaschema/%s'
+                % (version, f))
+            cache["https://w3id.org/cwl/salad/schema_salad/metaschema/"
+                  + f] = res.read()
+            res.close()
+        except IOError:
+            pass
+
+    SCHEMA_CACHE[version] = schema_salad.schema.load_schema(
+        "https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
 
-    return schema_salad.schema.load_schema("https://w3id.org/cwl/CommonWorkflowLanguage.yml", cache=cache)
+    return SCHEMA_CACHE[version]
 
 def shortname(inputid):
-    # type: (str) -> str
+    # type: (Text) -> Text
     d = urlparse.urlparse(inputid)
     if d.fragment:
-        return d.fragment.split("/")[-1].split(".")[-1]
+        return d.fragment.split(u"/")[-1]
     else:
-        return d.path.split("/")[-1]
-
-
-class UnsupportedRequirement(Exception):
-    pass
+        return d.path.split(u"/")[-1]
 
 def checkRequirements(rec, supportedProcessRequirements):
     # type: (Any, Iterable[Any]) -> None
@@ -111,18 +137,6 @@ def checkRequirements(rec, supportedProcessRequirements):
         for d in rec:
             checkRequirements(d, supportedProcessRequirements)
 
-def adjustFiles(rec, op):  # type: (Any, Callable[..., Any]) -> None
-    """Apply a mapping function to each File path in the object `rec`."""
-
-    if isinstance(rec, dict):
-        if rec.get("class") == "File":
-            rec["path"] = op(rec["path"])
-        for d in rec:
-            adjustFiles(rec[d], op)
-    if isinstance(rec, list):
-        for d in rec:
-            adjustFiles(d, op)
-
 def adjustFilesWithSecondary(rec, op, primary=None):
     """Apply a mapping function to each File path in the object `rec`, propagating
     the primary file associated with a group of secondary files.
@@ -140,8 +154,88 @@ def adjustFilesWithSecondary(rec, op, primary=None):
         for d in rec:
             adjustFilesWithSecondary(d, op, primary)
 
+def getListing(fs_access, rec):
+    # type: (StdFsAccess, Dict[Text, Any]) -> None
+    if "listing" not in rec:
+        listing = []
+        loc = rec["location"]
+        for ld in fs_access.listdir(loc):
+            if fs_access.isdir(ld):
+                ent = {u"class": u"Directory",
+                       u"location": ld}
+                getListing(fs_access, ent)
+                listing.append(ent)
+            else:
+                listing.append({"class": "File", "location": ld})
+        rec["listing"] = listing
+
+def stageFiles(pm, stageFunc, ignoreWritable=False):
+    # type: (PathMapper, Callable[..., Any], bool) -> None
+    for f, p in pm.items():
+        if not os.path.exists(os.path.dirname(p.target)):
+            os.makedirs(os.path.dirname(p.target), 0755)
+        if p.type == "File":
+            stageFunc(p.resolved, p.target)
+        elif p.type == "WritableFile" and not ignoreWritable:
+            shutil.copy(p.resolved, p.target)
+        elif p.type == "CreateFile" and not ignoreWritable:
+            with open(p.target, "w") as n:
+                n.write(p.resolved.encode("utf-8"))
+
+def collectFilesAndDirs(obj, out):
+    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], List[Dict[Text, Any]]) -> None
+    if isinstance(obj, dict):
+        if obj.get("class") in ("File", "Directory"):
+            out.append(obj)
+        else:
+            for v in obj.values():
+                collectFilesAndDirs(v, out)
+    if isinstance(obj, list):
+        for l in obj:
+            collectFilesAndDirs(l, out)
+
+def relocateOutputs(outputObj, outdir, output_dirs, action):
+    # type: (Union[Dict[Text, Any], List[Dict[Text, Any]]], Text, Set[Text], Text) -> Union[Dict[Text, Any], List[Dict[Text, Any]]]
+    if action not in ("move", "copy"):
+        return outputObj
+
+    def moveIt(src, dst):
+        if action == "move":
+            for a in output_dirs:
+                if src.startswith(a):
+                    _logger.debug("Moving %s to %s", src, dst)
+                    shutil.move(src, dst)
+                    return
+        _logger.debug("Copying %s to %s", src, dst)
+        shutil.copy(src, dst)
+
+    outfiles = []  # type: List[Dict[Text, Any]]
+    collectFilesAndDirs(outputObj, outfiles)
+    pm = PathMapper(outfiles, "", outdir, separateDirs=False)
+    stageFiles(pm, moveIt)
+
+    def _check_adjust(f):
+        f["location"] = "file://" + pm.mapper(f["location"])[1]
+        if "contents" in f:
+            del f["contents"]
+        if f["class"] == "File":
+            compute_checksums(StdFsAccess(""), f)
+        return f
+
+    adjustFileObjs(outputObj, _check_adjust)
+    adjustDirObjs(outputObj, _check_adjust)
+
+    return outputObj
+
+def cleanIntermediate(output_dirs):  # type: (Set[Text]) -> None
+    for a in output_dirs:
+        if os.path.exists(a) and empty_subtree(a):
+            _logger.debug(u"Removing intermediate output directory %s", a)
+            shutil.rmtree(a, True)
+
+
 def formatSubclassOf(fmt, cls, ontology, visited):
-    # type: (str, str, Graph, Set[str]) -> bool
+    # type: (Text, Text, Graph, Set[Text]) -> bool
     """Determine if `fmt` is a subclass of `cls`."""
 
     if URIRef(fmt) == URIRef(cls):
@@ -176,7 +270,7 @@ def formatSubclassOf(fmt, cls, ontology, visited):
 
 
 def checkFormat(actualFile, inputFormats, ontology):
-    # type: (Union[Dict[str, Any], List[Dict[str, Any]]], Any, Graph) -> None 
+    # type: (Union[Dict[Text, Any], List, Text], Union[List[Text], Text], Graph) -> None
     for af in aslist(actualFile):
         if "format" not in af:
             raise validate.ValidationException(u"Missing required 'format' for File %s" % af)
@@ -186,29 +280,71 @@ def checkFormat(actualFile, inputFormats, ontology):
         raise validate.ValidationException(u"Incompatible file format %s required format(s) %s" % (af["format"], inputFormats))
 
 def fillInDefaults(inputs, job):
-    # type: (List[Dict[str, str]], Dict[str, str]) -> None
+    # type: (List[Dict[Text, Text]], Dict[Text, Union[Dict[Text, Any], List, Text]]) -> None
     for inp in inputs:
-        if shortname(inp["id"]) in job:
+        if shortname(inp[u"id"]) in job:
             pass
-        elif shortname(inp["id"]) not in job and "default" in inp:
-            job[shortname(inp["id"])] = copy.copy(inp["default"])
-        elif shortname(inp["id"]) not in job and inp["type"][0] == "null":
+        elif shortname(inp[u"id"]) not in job and u"default" in inp:
+            job[shortname(inp[u"id"])] = copy.copy(inp[u"default"])
+        elif shortname(inp[u"id"]) not in job and inp[u"type"][0] == u"null":
             pass
         else:
             raise validate.ValidationException("Missing input parameter `%s`" % shortname(inp["id"]))
 
+
+def avroize_type(field_type, name_prefix=""):
+    # type: (Union[List[Dict[Text, Any]], Dict[Text, Any]], Text) -> Any
+    """
+    adds missing information to a type so that CWL types are valid in schema_salad.
+    """
+    if isinstance(field_type, list):
+        for f in field_type:
+            avroize_type(f, name_prefix)
+    elif isinstance(field_type, dict):
+        if field_type["type"] in ("enum", "record"):
+            if "name" not in field_type:
+                field_type["name"] = name_prefix+Text(uuid.uuid4())
+        if field_type["type"] == "record":
+            avroize_type(field_type["fields"], name_prefix)
+        if field_type["type"] == "array":
+            avroize_type(field_type["items"], name_prefix)
+    return field_type
+
 class Process(object):
     __metaclass__ = abc.ABCMeta
 
     def __init__(self, toolpath_object, **kwargs):
-        # type: (Dict[str,Any], **Any) -> None
-        self.metadata = None  # type: Dict[str,Any]
+        # type: (Dict[Text, Any], **Any) -> None
+        """
+        kwargs:
+
+        metadata: tool document metadata
+        requirements: inherited requirements
+        hints: inherited hints
+        loader: schema_salad.ref_resolver.Loader used to load tool document
+        avsc_names: CWL Avro schema object used to validate document
+        strict: flag to determine strict validation (fail on unrecognized fields)
+        """
+
+        self.metadata = kwargs.get("metadata", {})  # type: Dict[Text,Any]
         self.names = None  # type: avro.schema.Names
-        n = get_schema()[1]
-        if isinstance(n, avro.schema.SchemaParseException):
-            raise n
+
+        global SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY  # pylint: disable=global-statement
+        if SCHEMA_FILE is None:
+            get_schema("v1.0")
+            SCHEMA_ANY = cast(Dict[Text, Any],
+                    SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/salad#Any"])
+            SCHEMA_FILE = cast(Dict[Text, Any],
+                    SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#File"])
+            SCHEMA_DIR = cast(Dict[Text, Any],
+                              SCHEMA_CACHE["v1.0"][3].idx["https://w3id.org/cwl/cwl#Directory"])
+
+        names = schema_salad.schema.make_avro_schema([SCHEMA_FILE, SCHEMA_DIR, SCHEMA_ANY],
+                                                     schema_salad.ref_resolver.Loader({}))[0]
+        if isinstance(names, avro.schema.SchemaParseException):
+            raise names
         else:
-            self.names = n
+            self.names = names
         self.tool = toolpath_object
         self.requirements = kwargs.get("requirements", []) + self.tool.get("requirements", [])
         self.hints = kwargs.get("hints", []) + self.tool.get("hints", [])
@@ -216,27 +352,31 @@ class Process(object):
         if "loader" in kwargs:
             self.formatgraph = kwargs["loader"].graph
 
+        self.doc_loader = kwargs["loader"]
+        self.doc_schema = kwargs["avsc_names"]
+
         checkRequirements(self.tool, supportedProcessRequirements)
-        self.validate_hints(self.tool.get("hints", []), strict=kwargs.get("strict"))
+        self.validate_hints(kwargs["avsc_names"], self.tool.get("hints", []),
+                strict=kwargs.get("strict"))
 
-        self.schemaDefs = {}  # type: Dict[str,Dict[unicode, Any]]
+        self.schemaDefs = {}  # type: Dict[Text,Dict[Text, Any]]
 
         sd, _ = self.get_requirement("SchemaDefRequirement")
 
         if sd:
             sdtypes = sd["types"]
-            av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in sdtypes}, set())
+            av = schema_salad.schema.make_valid_avro(sdtypes, {t["name"]: t for t in avroize_type(sdtypes)}, set())
             for i in av:
                 self.schemaDefs[i["name"]] = i
             avro.schema.make_avsc_object(av, self.names)
 
         # Build record schema from inputs
         self.inputs_record_schema = {
-                "name": "input_record_schema", "type": "record",
-                "fields": []}  # type: Dict[unicode, Any]
+            "name": "input_record_schema", "type": "record",
+            "fields": []}  # type: Dict[Text, Any]
         self.outputs_record_schema = {
-                "name": "outputs_record_schema", "type": "record",
-                "fields": []}  # type: Dict[unicode, Any]
+            "name": "outputs_record_schema", "type": "record",
+            "fields": []}  # type: Dict[Text, Any]
 
         for key in ("inputs", "outputs"):
             for i in self.tool[key]:
@@ -251,55 +391,81 @@ class Process(object):
                     c["type"] = ["null"] + aslist(c["type"])
                 else:
                     c["type"] = c["type"]
-
+                c["type"] = avroize_type(c["type"], c["name"])
                 if key == "inputs":
-                    self.inputs_record_schema["fields"].append(c)  # type: ignore
+                    self.inputs_record_schema["fields"].append(c)
                 elif key == "outputs":
-                    self.outputs_record_schema["fields"].append(c)  # type: ignore
+                    self.outputs_record_schema["fields"].append(c)
 
         try:
             self.inputs_record_schema = schema_salad.schema.make_valid_avro(self.inputs_record_schema, {}, set())
             avro.schema.make_avsc_object(self.inputs_record_schema, self.names)
         except avro.schema.SchemaParseException as e:
-            raise validate.ValidationException(u"Got error `%s` while prcoessing inputs of %s:\n%s" % (str(e), self.tool["id"], json.dumps(self.inputs_record_schema, indent=4)))
+            raise validate.ValidationException(u"Got error `%s` while processing inputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.inputs_record_schema, indent=4)))
 
         try:
             self.outputs_record_schema = schema_salad.schema.make_valid_avro(self.outputs_record_schema, {}, set())
             avro.schema.make_avsc_object(self.outputs_record_schema, self.names)
         except avro.schema.SchemaParseException as e:
-            raise validate.ValidationException(u"Got error `%s` while prcoessing outputs of %s:\n%s" % (str(e), self.tool["id"], json.dumps(self.outputs_record_schema, indent=4)))
-
+            raise validate.ValidationException(u"Got error `%s` while processing outputs of %s:\n%s" % (Text(e), self.tool["id"], json.dumps(self.outputs_record_schema, indent=4)))
+
+
+    def _init_job(self, joborder, **kwargs):
+        # type: (Dict[Text, Text], **Any) -> Builder
+        """
+        kwargs:
+
+        eval_timeout: javascript evaluation timeout
+        use_container: do/don't use Docker when DockerRequirement hint provided
+        make_fs_access: make an FsAccess() object with given basedir
+        basedir: basedir for FsAccess
+        docker_outdir: output directory inside docker for this job
+        docker_tmpdir: tmpdir inside docker for this job
+        docker_stagedir: stagedir inside docker for this job
+        outdir: outdir on host for this job
+        tmpdir: tmpdir on host for this job
+        stagedir: stagedir on host for this job
+        select_resources: callback to select compute resources
+        """
 
-    def _init_job(self, joborder, input_basedir, **kwargs):
-        # type: (Dict[str, str], str, **Any) -> Builder
         builder = Builder()
-        builder.job = copy.deepcopy(joborder)
+        builder.job = cast(Dict[Text, Union[Dict[Text, Any], List,
+            Text]], copy.deepcopy(joborder))
 
-        fillInDefaults(self.tool["inputs"], builder.job)
+        fillInDefaults(self.tool[u"inputs"], builder.job)
+        normalizeFilesDirs(builder.job)
 
         # Validate job order
         try:
             validate.validate_ex(self.names.get_name("input_record_schema", ""), builder.job)
         except validate.ValidationException as e:
-            raise WorkflowException("Error validating input record, " + str(e))
+            raise WorkflowException("Error validating input record, " + Text(e))
 
         builder.files = []
         builder.bindings = []
         builder.schemaDefs = self.schemaDefs
         builder.names = self.names
         builder.requirements = self.requirements
+        builder.hints = self.hints
         builder.resources = {}
         builder.timeout = kwargs.get("eval_timeout")
 
-        dockerReq, _ = self.get_requirement("DockerRequirement")
+        dockerReq, is_req = self.get_requirement("DockerRequirement")
+
+        if dockerReq and is_req and not kwargs.get("use_container"):
+            raise WorkflowException("Document has DockerRequirement under 'requirements' but use_container is false.  DockerRequirement must be under 'hints' or use_container must be true.")
+
+        builder.make_fs_access = kwargs.get("make_fs_access") or StdFsAccess
+        builder.fs_access = builder.make_fs_access(kwargs["basedir"])
+
         if dockerReq and kwargs.get("use_container"):
-            builder.outdir = kwargs.get("docker_outdir") or "/var/spool/cwl"
-            builder.tmpdir = kwargs.get("docker_tmpdir") or "/tmp"
+            builder.outdir = builder.fs_access.realpath(dockerReq.get("dockerOutputDirectory") or kwargs.get("docker_outdir") or "/var/spool/cwl")
+            builder.tmpdir = builder.fs_access.realpath(kwargs.get("docker_tmpdir") or "/tmp")
+            builder.stagedir = builder.fs_access.realpath(kwargs.get("docker_stagedir") or "/var/lib/cwl")
         else:
-            builder.outdir = kwargs.get("outdir") or tempfile.mkdtemp()
-            builder.tmpdir = kwargs.get("tmpdir") or tempfile.mkdtemp()
-
-        builder.fs_access = kwargs.get("fs_access") or StdFsAccess(input_basedir)
+            builder.outdir = builder.fs_access.realpath(kwargs.get("outdir") or tempfile.mkdtemp())
+            builder.tmpdir = builder.fs_access.realpath(kwargs.get("tmpdir") or tempfile.mkdtemp())
+            builder.stagedir = builder.fs_access.realpath(kwargs.get("stagedir") or tempfile.mkdtemp())
 
         if self.formatgraph:
             for i in self.tool["inputs"]:
@@ -313,7 +479,7 @@ class Process(object):
             for n, b in enumerate(aslist(self.tool["baseCommand"])):
                 builder.bindings.append({
                     "position": [-1000000, n],
-                    "valueFrom": b
+                    "datum": b
                 })
 
         if self.tool.get("arguments"):
@@ -324,14 +490,17 @@ class Process(object):
                         a["position"] = [a["position"], i]
                     else:
                         a["position"] = [0, i]
-                    a["do_eval"] = a["valueFrom"]
-                    a["valueFrom"] = None
                     builder.bindings.append(a)
-                else:
+                elif ("$(" in a) or ("${" in a):
                     builder.bindings.append({
                         "position": [0, i],
                         "valueFrom": a
                     })
+                else:
+                    builder.bindings.append({
+                        "position": [0, i],
+                        "datum": a
+                    })
 
         builder.bindings.sort(key=lambda a: a["position"])
 
@@ -340,7 +509,7 @@ class Process(object):
         return builder
 
     def evalResources(self, builder, kwargs):
-        # type: (Builder, Dict[str, Any]) -> Dict[str, Union[int, str]]
+        # type: (Builder, Dict[AnyStr, Any]) -> Dict[Text, Union[int, Text]]
         resourceReq, _ = self.get_requirement("ResourceRequirement")
         if resourceReq is None:
             resourceReq = {}
@@ -375,35 +544,39 @@ class Process(object):
         else:
             return {
                 "cores": request["coresMin"],
-                "ram":   request["ramMin"],
+                "ram": request["ramMin"],
                 "tmpdirSize": request["tmpdirMin"],
                 "outdirSize": request["outdirMin"],
             }
 
-    def validate_hints(self, hints, strict):
-        # type: (List[Dict[str, Any]], bool) -> None
+    def validate_hints(self, avsc_names, hints, strict):
+        # type: (Any, List[Dict[Text, Any]], bool) -> None
         for r in hints:
             try:
-                if self.names.get_name(r["class"], "") is not None:
-                    validate.validate_ex(self.names.get_name(r["class"], ""), r, strict=strict)
+                if avsc_names.get_name(r["class"], "") is not None:
+                    plain_hint = dict((key,r[key]) for key in r if key not in
+                            self.doc_loader.identifiers)  # strip identifiers
+                    validate.validate_ex(
+                        avsc_names.get_name(plain_hint["class"], ""),
+                        plain_hint, strict=strict)
                 else:
-                    _logger.info(str(validate.ValidationException(
-                    u"Unknown hint %s" % (r["class"]))))
+                    _logger.info(Text(validate.ValidationException(
+                        u"Unknown hint %s" % (r["class"]))))
             except validate.ValidationException as v:
-                raise validate.ValidationException(u"Validating hint `%s`: %s" % (r["class"], str(v)))
+                raise validate.ValidationException(u"Validating hint `%s`: %s" % (r["class"], Text(v)))
 
     def get_requirement(self, feature):  # type: (Any) -> Tuple[Any, bool]
         return get_feature(self, feature)
 
-    def visit(self, op):
+    def visit(self, op):  # type: (Callable[[Dict[Text, Any]], None]) -> None
         op(self.tool)
 
     @abc.abstractmethod
-    def job(self, job_order, input_basedir, output_callbacks, **kwargs):
-        # type: (Dict[str, str], str, Callable[[Any, Any], Any], **Any) -> Generator[Any, None, None]
+    def job(self, job_order, output_callbacks, **kwargs):
+        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator[Any, None, None]
         return None
 
-def empty_subtree(dirpath):  # type: (AnyStr) -> bool
+def empty_subtree(dirpath):  # type: (Text) -> bool
     # Test if a directory tree contains any files (does not count empty
     # subdirectories)
     for d in os.listdir(dirpath):
@@ -421,10 +594,11 @@ def empty_subtree(dirpath):  # type: (AnyStr) -> bool
                 raise
     return True
 
-_names = set()  # type: Set[unicode]
+
+_names = set()  # type: Set[Text]
 
 
-def uniquename(stem):  # type: (unicode) -> unicode
+def uniquename(stem):  # type: (Text) -> Text
     c = 1
     u = stem
     while u in _names:
@@ -433,9 +607,42 @@ def uniquename(stem):  # type: (unicode) -> unicode
     _names.add(u)
     return u
 
-def scandeps(base, doc, reffields, urlfields, loadref):
-    # type: (str, Any, Set[str], Set[str], Callable[[str, str], Any]) -> List[Dict[str, str]]
-    r = []
+def nestdir(base, deps):
+    # type: (Text, Dict[Text, Any]) -> Dict[Text, Any]
+    dirname = os.path.dirname(base) + "/"
+    subid = deps["location"]
+    if subid.startswith(dirname):
+        s2 = subid[len(dirname):]
+        sp = s2.split('/')
+        sp.pop()
+        while sp:
+            nx = sp.pop()
+            deps = {
+                "class": "Directory",
+                "basename": nx,
+                "listing": [deps]
+            }
+    return deps
+
+def mergedirs(listing):
+    # type: (List[Dict[Text, Any]]) -> List[Dict[Text, Any]]
+    r = []  # type: List[Dict[Text, Any]]
+    ents = {}  # type: Dict[Text, Any]
+    for e in listing:
+        if e["basename"] not in ents:
+            ents[e["basename"]] = e
+        elif e["class"] == "Directory":
+            ents[e["basename"]]["listing"].extend(e["listing"])
+    for e in ents.itervalues():
+        if e["class"] == "Directory" and "listing" in e:
+            e["listing"] = mergedirs(e["listing"])
+    r.extend(ents.itervalues())
+    return r
+
+def scandeps(base, doc, reffields, urlfields, loadref, urljoin=urlparse.urljoin):
+    # type: (Text, Any, Set[Text], Set[Text], Callable[[Text, Text], Any], Callable[[Text, Text], Text]) -> List[Dict[Text, Text]]
+    r = []  # type: List[Dict[Text, Text]]
+    deps = None  # type: Dict[Text, Any]
     if isinstance(doc, dict):
         if "id" in doc:
             if doc["id"].startswith("file://"):
@@ -443,35 +650,75 @@ def scandeps(base, doc, reffields, urlfields, loadref):
                 if base != df:
                     r.append({
                         "class": "File",
-                        "path": df
+                        "location": df
                     })
                     base = df
 
+        if doc.get("class") in ("File", "Directory") and "location" in urlfields:
+            u = doc.get("location", doc.get("path"))
+            if u and not u.startswith("_:"):
+                deps = {
+                    "class": doc["class"],
+                    "location": urljoin(base, u)
+                }
+                if doc["class"] == "Directory" and "listing" in doc:
+                    deps["listing"] = doc["listing"]
+                if doc["class"] == "File" and "secondaryFiles" in doc:
+                    deps["secondaryFiles"] = doc["secondaryFiles"]
+                deps = nestdir(base, deps)
+                r.append(deps)
+            else:
+                if doc["class"] == "Directory" and "listing" in doc:
+                    r.extend(scandeps(base, doc["listing"], reffields, urlfields, loadref, urljoin=urljoin))
+                elif doc["class"] == "File" and "secondaryFiles" in doc:
+                    r.extend(scandeps(base, doc["secondaryFiles"], reffields, urlfields, loadref, urljoin=urljoin))
+
         for k, v in doc.iteritems():
             if k in reffields:
                 for u in aslist(v):
                     if isinstance(u, dict):
-                        r.extend(scandeps(base, u, reffields, urlfields, loadref))
+                        r.extend(scandeps(base, u, reffields, urlfields, loadref, urljoin=urljoin))
                     else:
                         sub = loadref(base, u)
-                        subid = urlparse.urljoin(base, u)
+                        subid = urljoin(base, u)
                         deps = {
                             "class": "File",
-                            "path": subid
-                            }  # type: Dict[str, Any]
-                        sf = scandeps(subid, sub, reffields, urlfields, loadref)
+                            "location": subid
+                        }
+                        sf = scandeps(subid, sub, reffields, urlfields, loadref, urljoin=urljoin)
                         if sf:
                             deps["secondaryFiles"] = sf
+                        deps = nestdir(base, deps)
                         r.append(deps)
-            elif k in urlfields:
+            elif k in urlfields and k != "location":
                 for u in aslist(v):
-                    r.append({
+                    deps = {
                         "class": "File",
-                        "path": urlparse.urljoin(base, u)
-                    })
-            else:
-                r.extend(scandeps(base, v, reffields, urlfields, loadref))
+                        "location": urljoin(base, u)
+                    }
+                    deps = nestdir(base, deps)
+                    r.append(deps)
+            elif k not in ("listing", "secondaryFiles"):
+                r.extend(scandeps(base, v, reffields, urlfields, loadref, urljoin=urljoin))
     elif isinstance(doc, list):
         for d in doc:
-            r.extend(scandeps(base, d, reffields, urlfields, loadref))
+            r.extend(scandeps(base, d, reffields, urlfields, loadref, urljoin=urljoin))
+
+    if r:
+        normalizeFilesDirs(r)
+        r = mergedirs(r)
+
     return r
+
+def compute_checksums(fs_access, fileobj):
+    if "checksum" not in fileobj:
+        checksum = hashlib.sha1()
+        with fs_access.open(fileobj["location"], "rb") as f:
+            contents = f.read(1024*1024)
+            while contents != "":
+                checksum.update(contents)
+                contents = f.read(1024*1024)
+            f.seek(0, 2)
+            filesize = f.tell()
+        fileobj["checksum"] = "sha1$%s" % checksum.hexdigest()
+        fileobj["size"] = filesize
diff --git a/cwltool/resolver.py b/cwltool/resolver.py
new file mode 100644
index 0000000..73d4941
--- /dev/null
+++ b/cwltool/resolver.py
@@ -0,0 +1,30 @@
+import os
+import logging
+import urllib
+import urlparse
+
+_logger = logging.getLogger("cwltool")
+
+def resolve_local(document_loader, uri):
+    if uri.startswith("/"):
+        return None
+    shares = [os.environ.get("XDG_DATA_HOME", os.path.join(os.environ["HOME"], ".local", "share"))]
+    shares.extend(os.environ.get("XDG_DATA_DIRS", "/usr/local/share/:/usr/share/").split(":"))
+    shares = [os.path.join(s, "commonwl", uri) for s in shares]
+    shares.insert(0, os.path.join(os.getcwd(), uri))
+
+    _logger.debug("Search path is %s", shares)
+
+    for s in shares:
+        if os.path.exists(s):
+            return ("file://%s" % s)
+        if os.path.exists("%s.cwl" % s):
+            return ("file://%s.cwl" % s)
+    return None
+
+def tool_resolver(document_loader, uri):
+    for r in [resolve_local]:
+        ret = r(document_loader, uri)
+        if ret is not None:
+            return ret
+    return "file://" + os.path.abspath(uri)
diff --git a/cwltool/sandboxjs.py b/cwltool/sandboxjs.py
index 4144cb2..0885d71 100644
--- a/cwltool/sandboxjs.py
+++ b/cwltool/sandboxjs.py
@@ -3,22 +3,37 @@ import json
 import threading
 import errno
 import logging
-from typing import Any, Union, TypeVar, Dict, List, Mapping
+import select
+import os
 
+import cStringIO
+from cStringIO import StringIO
+from typing import Any, Dict, List, Mapping, Text, TypeVar, Union
+from pkg_resources import resource_stream
 
 class JavascriptException(Exception):
     pass
 
+
 _logger = logging.getLogger("cwltool")
 
-JSON = Union[Dict[Any,Any], List[Any], unicode, int, long, float, bool, None]
+JSON = Union[Dict[Text,Any], List[Any], Text, int, long, float, bool, None]
+
+localdata = threading.local()
+
+have_node_slim = False
+
+def new_js_proc():
+    # type: () -> subprocess.Popen
+
+    res = resource_stream(__name__, 'cwlNodeEngine.js')
+    nodecode = res.read()
 
-def execjs(js, jslib, timeout=None):  # type: (Union[Mapping,str], Any, int) -> JSON
     nodejs = None
     trynodes = ("nodejs", "node")
     for n in trynodes:
         try:
-            nodejs = subprocess.Popen([n], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+            nodejs = subprocess.Popen([n, "--eval", nodecode], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
             break
         except OSError as e:
             if e.errno == errno.ENOENT:
@@ -29,14 +44,17 @@ def execjs(js, jslib, timeout=None):  # type: (Union[Mapping,str], Any, int) ->
     if nodejs is None:
         try:
             nodeimg = "node:slim"
-            dlist = subprocess.check_output(["docker", "images", nodeimg])
-            if "node" not in dlist:
-                nodejsimg = subprocess.check_output(["docker", "pull", nodeimg])
-                _logger.info("Pulled Docker image %s %s", nodeimg, nodejsimg)
+            global have_node_slim
+            if not have_node_slim:
+                dockerimgs = subprocess.check_output(["docker", "images", nodeimg])
+                if len(dockerimgs.split("\n")) <= 1:
+                    nodejsimg = subprocess.check_output(["docker", "pull", nodeimg])
+                    _logger.info("Pulled Docker image %s %s", nodeimg, nodejsimg)
+                have_node_slim = True
             nodejs = subprocess.Popen(["docker", "run",
                                        "--attach=STDIN", "--attach=STDOUT", "--attach=STDERR",
                                        "--sig-proxy=true", "--interactive",
-                                       "--rm", nodeimg],
+                                       "--rm", nodeimg, "node", "--eval", nodecode],
                                       stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
         except OSError as e:
             if e.errno == errno.ENOENT:
@@ -48,18 +66,28 @@ def execjs(js, jslib, timeout=None):  # type: (Union[Mapping,str], Any, int) ->
 
     if nodejs is None:
         raise JavascriptException(
-                u"cwltool requires Node.js engine to evaluate Javascript "
-                "expressions, but couldn't find it.  Tried %s, docker run "
-                "node:slim" % u", ".join(trynodes))
+            u"cwltool requires Node.js engine to evaluate Javascript "
+            "expressions, but couldn't find it.  Tried %s, docker run "
+            "node:slim" % u", ".join(trynodes))
+
+    return nodejs
+
+
+def execjs(js, jslib, timeout=None):  # type: (Union[Mapping, Text], Any, int) -> JSON
+
+    if not hasattr(localdata, "proc") or localdata.proc.poll() is not None:
+        localdata.proc = new_js_proc()
+
+    nodejs = localdata.proc
 
     fn = u"\"use strict\";\n%s\n(function()%s)()" % (jslib, js if isinstance(js, basestring) and len(js) > 1 and js[0] == '{' else ("{return (%s);}" % js))
-    script = u"console.log(JSON.stringify(require(\"vm\").runInNewContext(%s, {})));\n" % json.dumps(fn)
 
     killed = []
+
     def term():
         try:
-            nodejs.kill()
             killed.append(True)
+            nodejs.kill()
         except OSError:
             pass
 
@@ -69,119 +97,46 @@ def execjs(js, jslib, timeout=None):  # type: (Union[Mapping,str], Any, int) ->
     tm = threading.Timer(timeout, term)
     tm.start()
 
-    stdoutdata, stderrdata = nodejs.communicate(script)
+    stdin_buf = StringIO(json.dumps(fn)+"\n")
+    stdout_buf = StringIO()
+    stderr_buf = StringIO()
+
+    completed = []  # type: List[Union[cStringIO.InputType, cStringIO.OutputType]]
+    while len(completed) < 3:
+        rready, wready, _ = select.select([nodejs.stdout, nodejs.stderr], [nodejs.stdin], [])
+        if nodejs.stdin in wready:
+            b = stdin_buf.read(select.PIPE_BUF)
+            if b:
+                os.write(nodejs.stdin.fileno(), b)
+            elif stdin_buf not in completed:
+                completed.append(stdin_buf)
+        for pipes in ((nodejs.stdout, stdout_buf), (nodejs.stderr, stderr_buf)):
+            if pipes[0] in rready:
+                b = os.read(pipes[0].fileno(), select.PIPE_BUF)
+                if b:
+                    pipes[1].write(b)
+                elif pipes[1] not in completed:
+                    completed.append(pipes[1])
+        if stdout_buf.getvalue().endswith("\n"):
+            for buf in (stdout_buf, stderr_buf):
+                if buf not in completed:
+                    completed.append(buf)
     tm.cancel()
 
-    def fn_linenum():  # type: () -> unicode
-        return u"\n".join(u"%04i %s" % (i+1, b) for i, b in enumerate(fn.split("\n")))
+    stdin_buf.close()
+    stdoutdata = stdout_buf.getvalue()
+    stderrdata = stderr_buf.getvalue()
 
-    if killed:
-        raise JavascriptException(u"Long-running script killed after %s seconds.\nscript was:\n%s\n" % (timeout, fn_linenum()))
+    def fn_linenum():  # type: () -> Text
+        return u"\n".join(u"%04i %s" % (i+1, b) for i, b in enumerate(fn.split("\n")))
 
-    if nodejs.returncode != 0:
-        raise JavascriptException(u"Returncode was: %s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" % (nodejs.returncode, fn_linenum(), stdoutdata, stderrdata))
+    if nodejs.poll() not in (None, 0):
+        if killed:
+            raise JavascriptException(u"Long-running script killed after %s seconds.\nscript was:\n%s\n" % (timeout, fn_linenum()))
+        else:
+            raise JavascriptException(u"Returncode was: %s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" % (nodejs.returncode, fn_linenum(), stdoutdata, stderrdata))
     else:
         try:
             return json.loads(stdoutdata)
         except ValueError as e:
             raise JavascriptException(u"%s\nscript was:\n%s\nstdout was: '%s'\nstderr was: '%s'\n" % (e, fn_linenum(), stdoutdata, stderrdata))
-
-class SubstitutionError(Exception):
-    pass
-
-
-def scanner(scan):  # type: (str) -> List[int]
-    DEFAULT = 0
-    DOLLAR = 1
-    PAREN = 2
-    BRACE = 3
-    SINGLE_QUOTE = 4
-    DOUBLE_QUOTE = 5
-    BACKSLASH = 6
-
-    i = 0
-    stack = [DEFAULT]
-    start = 0
-    while i < len(scan):
-        state = stack[-1]
-        c = scan[i]
-
-        if state == DEFAULT:
-            if c == '$':
-                stack.append(DOLLAR)
-            elif c == '\\':
-                stack.append(BACKSLASH)
-        elif state == BACKSLASH:
-            stack.pop()
-            if stack[-1] == DEFAULT:
-                return [i-1, i+1]
-        elif state == DOLLAR:
-            if c == '(':
-                start = i-1
-                stack.append(PAREN)
-            elif c == '{':
-                start = i-1
-                stack.append(BRACE)
-        elif state == PAREN:
-            if c == '(':
-                stack.append(PAREN)
-            elif c == ')':
-                stack.pop()
-                if stack[-1] == DOLLAR:
-                    return [start, i+1]
-            elif c == "'":
-                stack.append(SINGLE_QUOTE)
-            elif c == '"':
-                stack.append(DOUBLE_QUOTE)
-        elif state == BRACE:
-            if c == '{':
-                stack.append(BRACE)
-            elif c == '}':
-                stack.pop()
-                if stack[-1] == DOLLAR:
-                    return [start, i+1]
-            elif c == "'":
-                stack.append(SINGLE_QUOTE)
-            elif c == '"':
-                stack.append(DOUBLE_QUOTE)
-        elif state == SINGLE_QUOTE:
-            if c == "'":
-                stack.pop()
-            elif c == '\\':
-                stack.append(BACKSLASH)
-        elif state == DOUBLE_QUOTE:
-            if c == '"':
-                stack.pop()
-            elif c == '\\':
-                stack.append(BACKSLASH)
-        i += 1
-
-    if len(stack) > 1:
-        raise SubstitutionError("Substitution error, unfinished block starting at position {}: {}".format(start, scan[start:]))
-    else:
-        return None
-
-
-def interpolate(scan, jslib, timeout=None):  # type: (str, Union[str, unicode], int) -> JSON
-    scan = scan.strip()
-    parts = []
-    w = scanner(scan)
-    while w:
-        parts.append(scan[0:w[0]])
-
-        if scan[w[0]] == '$':
-            e = execjs(scan[w[0]+1:w[1]], jslib, timeout=timeout)
-            if w[0] == 0 and w[1] == len(scan):
-                return e
-            leaf = json.dumps(e, sort_keys=True)
-            if leaf[0] == '"':
-                leaf = leaf[1:-1]
-            parts.append(leaf)
-        elif scan[w[0]] == '\\':
-            e = scan[w[1]-1]
-            parts.append(e)
-
-        scan = scan[w[1]:]
-        w = scanner(scan)
-    parts.append(scan)
-    return ''.join(parts)
diff --git a/cwltool/schemas/draft-2/CommonWorkflowLanguage.yml b/cwltool/schemas/draft-2/CommonWorkflowLanguage.yml
new file mode 100644
index 0000000..6ac2021
--- /dev/null
+++ b/cwltool/schemas/draft-2/CommonWorkflowLanguage.yml
@@ -0,0 +1,1966 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
+
+$graph:
+- name: "Common Workflow Language, Draft 2"
+  type: documentation
+  doc: |
+    7 July 2015
+
+    This version:
+      * https://w3id.org/cwl/draft-2/
+
+    Current version:
+      * https://w3id.org/cwl/
+
+    Authors:
+
+    * Peter Amstutz <peter.amstutz at curoverse.com>, Curoverse
+    * Nebojša Tijanić <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics
+
+    Contributers:
+
+    * Luka Stojanovic <luka.stojanovic at sbgenomics.com>, Seven Bridges Genomics
+    * John Chilton <jmchilton at gmail.com>, Galaxy Project, Pennsylvania State University
+    * Michael R. Crusoe <mcrusoe at msu.edu>, Michigan State University
+    * Hervé Ménager <herve.menager at gmail.com>, Institut Pasteur
+    * Maxim Mikheev <mikhmv at biodatomics.com>, BioDatomics
+    * Stian Soiland-Reyes [soiland-reyes at cs.manchester.ac.uk](mailto:soiland-reyes at cs.manchester.ac.uk), University of Manchester
+
+    # Abstract
+
+    A Workflow is an analysis task represented by a directed graph describing a
+    sequence of operations that transform an input data set to output.  This
+    specification defines the Common Workflow Language (CWL), a vendor-neutral
+    standard for representing workflows and concrete process steps intended to
+    be portable across a variety of computing platforms.
+
+    # Status of This Document
+
+    This document is the product of the [Common Workflow Language working
+    group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
+    latest version of this document is available in the "specification" directory at
+
+    https://github.com/common-workflow-language/common-workflow-language
+
+    The products of the CWL working group (including this document) are made available
+    under the terms of the Apache License, version 2.0.
+
+    # Introduction
+
+    The Common Workflow Language (CWL) working group is an informal, multi-vendor
+    working group consisting of various organizations and individuals that have an
+    interest in portability of data analysis workflows.  The goal is to create
+    specifications like this one that enable data scientists to describe analysis
+    tools and workflows that are powerful, easy to use, portable, and support
+    reproducibility.
+
+    ## Introduction to draft 2
+
+    This specification represents the second milestone of the CWL group.  Since
+    draft-1, this draft introduces a number of major changes and additions:
+
+    * Use of Avro schema (instead of JSON-schema) and JSON-LD for data modeling.
+    * Significant refactoring of the Command Line Tool description.
+    * Data and execution model for Workflows.
+    * Extension mechanism though "hints" and "requirements".
+
+    ## Purpose
+
+    CWL is designed to express workflows for data-intensive science, such as
+    Bioinformatics, Chemistry, Physics, and Astronomy.  This specification is
+    intended to define a data and execution model for Workflows and Command Line
+    Tools that can be implemented on top of a variety of computing platforms,
+    ranging from an individual workstation to cluster, grid, cloud, and high
+    performance computing systems.
+
+    ## References to Other Specifications
+
+    * [JSON](http://json.org)
+    * [JSON-LD](http://json-ld.org)
+    * [JSON Pointer](https://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-04)
+    * [YAML](http://yaml.org)
+    * [Avro](https://avro.apache.org/docs/current/spec.html)
+    * [Uniform Resource Identifier (URI): Generic Syntax](https://tools.ietf.org/html/rfc3986)
+    * [UTF-8](https://www.ietf.org/rfc/rfc2279.txt)
+    * [Portable Operating System Interface (POSIX.1-2008)](http://pubs.opengroup.org/onlinepubs/9699919799/)
+    * [Resource Description Framework (RDF)](http://www.w3.org/RDF/)
+
+    ## Scope
+
+    This document describes the CWL syntax, execution, and object model.  It
+    is not intended to document a specific implementation of CWL, however it may
+    serve as a reference for the behavior of conforming implementations.
+
+    ## Terminology
+
+    The terminology used to describe CWL documents is defined in the
+    Concepts section of the specification. The terms defined in the
+    following list are used in building those definitions and in describing the
+    actions of an CWL implementation:
+
+    **may**: Conforming CWL documents and CWL implementations are permitted but
+    not required to behave as described.
+
+    **must**: Conforming CWL documents and CWL implementations are required to behave
+    as described; otherwise they are in error.
+
+    **error**: A violation of the rules of this specification; results are
+    undefined. Conforming implementations may detect and report an error and may
+    recover from it.
+
+    **fatal error**: A violation of the rules of this specification; results are
+    undefined. Conforming implementations must not continue to execute the current
+    process and may report an error.
+
+    **at user option**: Conforming software may or must (depending on the modal verb in
+    the sentence) behave as described; if it does, it must provide users a means to
+    enable or disable the behavior described.
+
+    # Data model
+
+    ## Data concepts
+
+    An **object** is a data structure equivalent to the "object" type in JSON,
+    consisting of a unordered set of name/value pairs (referred to here as
+    **fields**) and where the name is a string and the value is a string, number,
+    boolean, array, or object.
+
+    A **document** is a file containing a serialized object, or an array of objects.
+
+    A **process** is a basic unit of computation which accepts input data,
+    performs some computation, and produces output data.
+
+    An **input object** is an object describing the inputs to a invocation of process.
+
+    An **output object** is an object describing the output of an invocation of a process.
+
+    An **input schema** describes the valid format (required fields, data types)
+    for an input object.
+
+    An **output schema** describes the valid format for a output object.
+
+    **Metadata** is information about workflows, tools, or input items that is
+    not used directly in the computation.
+
+    ## Syntax
+
+    Documents containing CWL objects are serialized and loaded using YAML
+    syntax and UTF-8 text encoding.  A conforming implementation must accept
+    all valid YAML documents.
+
+    The CWL schema is defined using Avro Linked Data (avro-ld).  Avro-ld is an
+    extension of the Apache Avro schema language to support additional
+    annotations mapping Avro fields to RDF predicates via JSON-LD.
+
+    A CWL document may be validated by transforming the avro-ld schema to a
+    base Apache Avro schema.
+
+    An implementation may interpret a CWL document as
+    [JSON-LD](http://json-ld.org) and convert a CWL document to a [Resource
+    Description Framework (RDF)](http://www.w3.org/RDF/) using the
+    CWL [JSON-LD Context](https://w3id.org/cwl/draft-2/context) (extracted from the avro-ld schema).
+    The CWL [RDFS schema](https://w3id.org/cwl/draft-2/cwl.ttl) defines the classes and properties used by
+    CWL as JSON-LD.
+
+    The latest draft-2 schema is defined here:
+    https://github.com/common-workflow-language/common-workflow-language/blob/master/schemas/draft-2/cwl-avro.yml
+
+
+
+    ## Identifiers
+
+    If an object contains an `id` field, that is used to uniquely identify the
+    object in that document.  The value of the `id` field must be unique over the
+    entire document.  The format of the `id` field is that of a [relative fragment
+    identifier](https://tools.ietf.org/html/rfc3986#section-3.5), and must start
+    with a hash `#` character.
+
+    An implementation may choose to only honor references to object types for
+    which the `id` field is explicitly listed in this specification.
+
+    When loading a CWL document, an implementation may resolve relative
+    identifiers to absolute URI references.  For example, "my_tool.cwl" located
+    in the directory "/home/example/work/" may be transformed to
+    "file:///home/example/work/my_tool.cwl" and a relative fragment reference
+    "#input" in this file may be transformed to
+    "file:///home/example/work/my_tool.cwl#input".
+
+    ## Document preprocessing
+
+    An implementation must resolve `import` directives.  An `import` directive
+    is an object consisting of the field `import` specifying a URI.  The URI
+    referenced by `import` must be loaded as a CWL document (including
+    recursive preprocessing) and then the `import` object is implicitly
+    replaced by the external resource.  URIs may include document fragments
+    referring to objects identified by their `id` field, in which case the `import`
+    directive is replaced by only the fragment object.
+
+    An implementation must resolve `include` directives.  An `include`
+    directive is an object consisting of the field `include` specifying a URI.
+    The URI referenced by `include` must be loaded as UTF-8 encoded text
+    document and the `include` directive implicitly replaced by a string with
+    the contents of the document.  Because the loaded resource is unparsed,
+    URIs used with `include` must not include fragments.
+
+    ## Extensions and Metadata
+
+    Implementation extensions not required for correct
+    execution (for example, fields related to GUI rendering) may
+    be stored in [process hints](#requirements_and_hints).
+
+    Input metadata (for example, a lab sample identifier) may be explicitly
+    represented within a workflow using input parameters which are propagated
+    to output.  Future versions of this specification may define additional
+    facilities for working with input/output metadata.
+
+    Fields for tool and workflow metadata (for example, authorship for use in
+    citations) are not defined in this specification.  Future versions of this
+    specification may define such fields.
+
+    # Execution model
+
+    ## Execution concepts
+
+    A **parameter** is a named symbolic input or output of process, with an
+    associated datatype or schema.  During execution, values are assigned to
+    parameters to make the input object or output object used for concrete
+    process invocation.
+
+    A **command line tool** is a process characterized by the execution of a
+    standalone, non-interactive program which is invoked on some input,
+    produces output, and then terminates.
+
+    A **workflow** is a process characterized by multiple subprocess steps,
+    where step outputs are connected to the inputs of other downstream steps to
+    form a directed graph, and independent steps may run concurrently.
+
+    A **runtime environment** is the actual hardware and software environment when
+    executing a command line tool.  It includes, but is not limited to, the
+    hardware architecture, hardware resources, operating system, software runtime
+    (if applicable, such as the Python interpreter or the JVM), libraries, modules,
+    packages, utilities, and data files required to run the tool.
+
+    A **workflow platform** is a specific hardware and software implementation
+    capable of interpreting a CWL document and executing the processes specified by
+    the document.  The responsibilities of the workflow platform may include
+    scheduling process invocation, setting up the necessary runtime environment,
+    making input data available, invoking the tool process, and collecting output.
+
+    It is intended that the workflow platform has broad leeway outside of this
+    specification to optimize use of computing resources and enforce policies
+    not covered by this specifcation.  Some areas that are currently out of
+    scope for CWL specification but may be handled by a specific workflow
+    platform include:
+
+    * Data security and permissions.
+    * Scheduling tool invocations on remote cluster or cloud compute nodes.
+    * Using virtual machines or operating system containers to manage the runtime
+    (except as described in [DockerRequirement](#dockerrequirement)).
+    * Using remote or distributed file systems to manage input and output files.
+    * Translating or rewriting file paths.
+    * Determining if a process has previously been executed, skipping it and
+    reusing previous results.
+    * Pausing and resuming of processes or workflows.
+
+    Conforming CWL processes must not assume anything about the runtime
+    environment or workflow platform unless explicitly declared though the use
+    of [process requirements](#processrequirement).
+
+    ## Generic execution process
+
+    The generic execution sequence of a CWL process (including both workflows
+    and concrete process implementations) is as follows.
+
+    1. Load and validate CWL document, yielding a process object.
+    2. Load input object.
+    3. Validate the input object against the `inputs` schema for the process.
+    4. Validate that process requirements are met.
+    5. Perform any further setup required by the specific process type.
+    6. Execute the process.
+    7. Capture results of process execution into the output object.
+    8. Validate the output object against the `outputs` schema for the process.
+    9. Report the output object to the process caller.
+
+    ## Requirements and hints
+
+    A **[process requirement](#processrequirement)** modifies the semantics or runtime
+    environment of a process.  If an implementation cannot satisfy all
+    requirements, or a requirement is listed which is not recognized by the
+    implementation, it is a fatal error and the implementation must not attempt
+    to run the process, unless overridden at user option.
+
+    A **hint** is similar to a requirement, however it is not an error if an
+    implementation cannot satisfy all hints.  The implementation may report a
+    warning if a hint cannot be satisfied.
+
+    Requirements are inherited.  A requirement specified in a Workflow applies
+    to all workflow steps; a requirement specified on a workflow step will
+    apply to the process implementation.
+
+    If the same process requirement appears at different levels of the
+    workflow, the most specific instance of the requirement is used, that is,
+    an entry in `requirements` on a process implementation such as
+    CommandLineTool will take precendence over an entry in `requirements`
+    specified in a workflow step, and an entry in `requirements` on a workflow
+    step takes precedence over the workflow.  Entries in `hints` are resolved
+    the same way.
+
+    Requirements override hints.  If a process implementation provides a
+    process requirement in `hints` which is also provided in `requirements` by
+    an enclosing workflow or workflow step, the enclosing `requirements` takes
+    precedence.
+
+    Process requirements are the primary mechanism for specifying extensions to
+    the CWL core specification.
+
+    ## Expressions
+
+    An expression is a fragment of executable code which is evaluated by the
+    workflow platform to affect the inputs, outputs, or behavior of a process.
+    In the generic execution sequence, expressions may be evaluated during step
+    5 (process setup), step 6 (execute process), and/or step 7 (capture
+    output).  Expressions are distinct from regular processes in that they are
+    intended to modify the behavior of the workflow itself rather than perform
+    the primary work of the workflow.
+
+    An implementation must provide the predefined `cwl:JsonPointer` expression
+    engine.  This expression engine specifies a [JSON
+    Pointer](https://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-04)
+    into an expression input object consisting of the `job` and `context`
+    fields described below.
+
+    An expression engine defined with
+    [ExpressionEngineRequirement](#expressionenginerequirement) is a command
+    line program following the following protocol:
+
+      * On standard input, receive a JSON object with the following fields:
+
+        - **engineConfig**: A list of strings from the `engineConfig` field.
+          Null if `engineConfig` is not specified.
+
+        - **job**: The input object of the current Process (context dependent).
+
+        - **context**: The specific value being transformed (context dependent).  May
+          be null.
+
+        - **script**: The code fragment to evaluate.
+
+        - **outdir**: When used in the context of a CommandLineTool, this is
+          the designated output directory that will be used when executing the
+          tool.  Null if not applicable.
+
+        - **tmpdir**: When used in the context of a CommandLineTool, this is
+          the designated temporary directory that will be used when executing
+          the tool.  Null if not applicable.
+
+      * On standard output, print a single JSON value (string, number, array, object,
+        boolean, or null) for the return value.
+
+    Expressions must be evaluated in an isolated context (a "sandbox") which
+    permits no side effects to leak outside the context, and permit no outside
+    data to leak into the context.
+
+    Implementations may apply limits, such as process isolation, timeouts, and
+    operating system containers/jails to minimize the security risks associated
+    with running untrusted code.
+
+    The order in which expressions are evaluated within a process or workflow
+    is undefined.
+
+    ## Workflow graph
+
+    A workflow describes a set of **steps** and the **dependencies** between
+    those processes.  When a process produces output that will be consumed by a
+    second process, the first process is a dependency of the second process.
+    When there is a dependency, the workflow engine must execute the dependency
+    process and wait for it to successfully produce output before executing the
+    dependent process.  If two processes are defined in the workflow graph that
+    are not directly or indirectly dependent, these processes are
+    **independent**, and may execute in any order or execute concurrently.  A
+    workflow is complete when all steps have been executed.
+
+    ## Success and failure
+
+    A completed process must result in one of `success`, `temporaryFailure` or
+    `permanentFailure` states.  An implementation may choose to retry a process
+    execution which resulted in `temporaryFailure`.  An implementation may
+    choose to either continue running other steps of a workflow, or terminate
+    immediately upon `permanentFailure`.
+
+    * If any step of a workflow execution results in `permanentFailure`, then the
+    workflow status is `permanentFailure`.
+
+    * If one or more steps result in `temporaryFailure` and all other steps
+    complete `success` or are not executed, then the workflow status is
+    `temporaryFailure`.
+
+    * If all workflow steps are executed and complete with `success`, then the workflow
+    status is `success`.
+
+    ## Executing CWL documents as scripts
+
+    By convention, a CWL document may begin with `#!/usr/bin/env cwl-runner`
+    and be marked as executable (the POSIX "+x" permission bits) to enable it
+    to be executed directly.  A workflow platform may support this mode of
+    operation; if so, it must provide `cwl-runner` as an alias for the
+    platform's CWL implementation.
+
+    # Sample CWL workflow
+
+    revtool.cwl:
+    ```
+    #!/usr/bin/env cwl-runner
+    #
+    # Simplest example command line program wrapper for the Unix tool "rev".
+    #
+    class: CommandLineTool
+    description: "Reverse each line using the `rev` command"
+
+    # The "inputs" array defines the structure of the input object that describes
+    # the inputs to the underlying program.  Here, there is one input field
+    # defined that will be called "input" and will contain a "File" object.
+    #
+    # The input binding indicates that the input value should be turned into a
+    # command line argument.  In this example inputBinding is an empty object,
+    # which indicates that the file name should be added to the command line at
+    # a default location.
+    inputs:
+      - id: "#input"
+        type: File
+        inputBinding: {}
+
+    # The "outputs" array defines the structure of the output object that
+    # describes the outputs of the underlying program.  Here, there is one
+    # output field defined that will be called "output", must be a "File" type,
+    # and after the program executes, the output value will be the file
+    # output.txt in the designated output directory.
+    outputs:
+      - id: "#output"
+        type: File
+        outputBinding:
+          glob: output.txt
+
+    # The actual program to execute.
+    baseCommand: rev
+
+    # Specify that the standard output stream must be redirected to a file called
+    # output.txt in the designated output directory.
+    stdout: output.txt
+    ```
+
+    sorttool.cwl:
+    ```
+    #!/usr/bin/env cwl-runner
+    #
+    # Example command line program wrapper for the Unix tool "sort"
+    # demonstrating command line flags.
+    class: CommandLineTool
+    description: "Sort lines using the `sort` command"
+
+    # This example is similar to the previous one, with an additional input
+    # parameter called "reverse".  It is a boolean parameter, which is
+    # intepreted as a command line flag.  The value of "prefix" is used for
+    # flag to put on the command line if "reverse" is true.  If "reverse" is
+    # false, no flag is added.
+    #
+    # This example also introduced the "position" field.  This indicates the
+    # sorting order of items on the command line.  Lower numbers are placed
+    # before higher numbers.  Here, the "--reverse" flag (if present) will be
+    # added to the command line before the input file path.
+    inputs:
+      - id: "#reverse"
+        type: boolean
+        inputBinding:
+          position: 1
+          prefix: "--reverse"
+      - id: "#input"
+        type: File
+        inputBinding:
+          position: 2
+
+    outputs:
+      - id: "#output"
+        type: File
+        outputBinding:
+          glob: output.txt
+
+    baseCommand: sort
+    stdout: output.txt
+    ```
+
+    revsort.cwl:
+    ```
+    #!/usr/bin/env cwl-runner
+    #
+    # This is a two-step workflow which uses "revtool" and "sorttool" defined above.
+    #
+    class: Workflow
+    description: "Reverse the lines in a document, then sort those lines."
+
+    # Requirements specify prerequisites and extensions to the workflow.
+    # In this example, DockerRequirement specifies a default Docker container
+    # in which the command line tools will execute.
+    requirements:
+      - class: DockerRequirement
+        dockerPull: debian:8
+
+    # The inputs array defines the structure of the input object that describes
+    # the inputs to the workflow.
+    #
+    # The "reverse_sort" input parameter demonstrates the "default" field.  If the
+    # field "reverse_sort" is not provided in the input object, the default value will
+    # be used.
+    inputs:
+      - id: "#input"
+        type: File
+        description: "The input file to be processed."
+      - id: "#reverse_sort"
+        type: boolean
+        default: true
+        description: "If true, reverse (descending) sort"
+
+    # The "outputs" array defines the structure of the output object that describes
+    # the outputs of the workflow.
+    #
+    # Each output field must be connected to the output of one of the workflow
+    # steps using the "connect" field.  Here, the parameter "#output" of the
+    # workflow comes from the "#sorted" output of the "sort" step.
+    outputs:
+      - id: "#output"
+        type: File
+        source: "#sorted.output"
+        description: "The output with the lines reversed and sorted."
+
+    # The "steps" array lists the executable steps that make up the workflow.
+    # The tool to execute each step is listed in the "run" field.
+    #
+    # In the first step, the "inputs" field of the step connects the upstream
+    # parameter "#input" of the workflow to the input parameter of the tool
+    # "revtool.cwl#input".
+    #
+    # In the second step, the "inputs" field of the step connects the output
+    # parameter "#reversed" from the first step to the input parameter of the
+    # tool "sorttool.cwl#input".
+    steps:
+      - inputs:
+          - { id: "#rev.input", source: "#input" }
+        outputs:
+          - { id: "#rev.output" }
+        run: { import: revtool.cwl }
+
+      - inputs:
+          - { id: "#sorted.input", source: "#rev.output" }
+          - { id: "#sorted.reverse", source: "#reverse_sort" }
+        outputs:
+          - { id: "#sorted.output" }
+        run: { import: sorttool.cwl }
+    ```
+
+    Sample input object:
+    ```
+    {
+      "input": {
+        "class": "File",
+        "path": "whale.txt"
+      }
+    }
+    ```
+
+    Sample output object:
+    ```
+    {
+        "output": {
+            "path": "/tmp/tmpdeI_p_/output.txt",
+            "size": 1111,
+            "class": "File",
+            "checksum": "sha1$b9214658cc453331b62c2282b772a5c063dbd284"
+        }
+    }
+    ```
+
+- name: Reference
+  type: documentation
+  doc: This section specifies the core object types that make up a CWL document.
+
+- type: enum
+  name: CWLVersions
+  doc: "Version symbols for published CWL document versions."
+  symbols:
+    - cwl:draft-2
+
+- name: Datatype
+  type: enum
+  docAfter: "#ProcessRequirement"
+  symbols:
+    - "null"
+    - sld:boolean
+    - sld:int
+    - sld:long
+    - sld:float
+    - sld:double
+    - sld:bytes
+    - sld:string
+    - sld:record
+    - sld:enum
+    - sld:array
+    - sld:map
+    - cwl:File
+    - cwl:Any
+
+  doc: |
+   CWL data types are based on Avro schema declarations.  Refer to the [Avro
+   schema declaration
+   documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
+   detailed information.  In addition, CWL defines [`File`](#file)
+   as a special record type.
+
+   ## Primitive types
+
+   * **null**: no value
+   * **boolean**: a binary value
+   * **int**: 32-bit signed integer
+   * **long**: 64-bit signed integer
+   * **float**: single precision (32-bit) IEEE 754 floating-point number
+   * **double**: double precision (64-bit) IEEE 754 floating-point number
+   * **bytes**: sequence of uninterpreted 8-bit unsigned bytes
+   * **string**: Unicode character sequence
+
+   ## Complex types
+
+   * **record**: An object with one or more fields defined by name and type
+   * **enum**: A value from a finite set of symbolic values
+   * **array**: An ordered sequence of values
+   * **map**: An unordered collection of key/value pairs
+
+   ## File type
+
+   See [File](#file) below.
+
+   ## Any type
+
+   See [Any](#any) below.
+
+- name: File
+  type: record
+  docParent: "#Datatype"
+  doc: |
+    Represents a file (or group of files if `secondaryFiles` is specified) that
+    must be accessible by tools using standard POSIX file system call API such as
+    open(2) and read(2).
+  fields:
+    - name: "class"
+      type:
+        type: enum
+        name: "File_class"
+        symbols:
+          - cwl:File
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      doc: Must be `File` to indicate this object describes a file.
+    - name: "path"
+      type: "string"
+      doc: The path to the file.
+    - name: "checksum"
+      type: ["null", "string"]
+      doc: |
+        Optional hash code for validating file integrity.  Currently must be in the form
+        "sha1$ + hexidecimal string" using the SHA-1 algorithm.
+    - name: "size"
+      type: ["null", "long"]
+      doc: Optional file size.
+    - name: "cwl:secondaryFiles"
+      type:
+        - "null"
+        - type: array
+          items: "#File"
+      doc: |
+        A list of additional files that are associated with the primary file
+        and must be transferred alongside the primary file.  Examples include
+        indexes of the primary file, or external references which must be
+        included when loading primary document.  A file object listed in
+        `secondaryFiles` may itself include `secondaryFiles` for which the same
+        rules apply.
+
+
+- name: Any
+  type: enum
+  docParent: "#Datatype"
+  symbols: ["cwl:Any"]
+  doc: |
+    The **Any** type validates for any non-null value.
+
+
+- name: Schema
+  type: record
+  doc: "A schema defines a parameter type."
+  docParent: "#Parameter"
+  fields:
+    - name: type
+      doc: "The data type of this parameter."
+      type:
+        - "#Datatype"
+        - "#Schema"
+        - "string"
+        - type: "array"
+          items: [ "#Datatype", "#Schema", "string" ]
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+    - name: fields
+      type:
+        - "null"
+        - type: "array"
+          items: "#Schema"
+      jsonldPredicate:
+        "_id": "sld:fields"
+        "_container": "@list"
+      doc: "When `type` is `record`, defines the fields of the record."
+    - name: "symbols"
+      type:
+        - "null"
+        - type: "array"
+          items: "string"
+      jsonldPredicate:
+        "_id": "sld:symbols"
+        "_container": "@list"
+      doc: "When `type` is `enum`, defines the set of valid symbols."
+    - name: items
+      type:
+        - "null"
+        - "#Datatype"
+        - "#Schema"
+        - "string"
+        - type: "array"
+          items: [ "#Datatype", "#Schema", "string" ]
+      jsonldPredicate:
+        "_id": "sld:items"
+        "_container": "@list"
+      doc: "When `type` is `array`, defines the type of the array elements."
+    - name: "values"
+      type:
+        - "null"
+        - "#Datatype"
+        - "#Schema"
+        - "string"
+        - type: "array"
+          items: [ "#Datatype", "#Schema", "string" ]
+      jsonldPredicate:
+        "_id": "sld:values"
+        "_container": "@list"
+      doc: "When `type` is `map`, defines the value type for the key/value pairs."
+
+
+- name: Parameter
+  type: record
+  docParent: "#Process"
+  abstract: true
+  doc: |
+    Define an input or output parameter to a process.
+
+  fields:
+    - name: type
+      type:
+        - "null"
+        - "#Datatype"
+        - "#Schema"
+        - string
+        - type: array
+          items:
+            - "#Datatype"
+            - "#Schema"
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+    - name: label
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this parameter object."
+    - name: description
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:comment"
+      doc: "A long, human-readable description of this parameter object."
+    - name: streamable
+      type: ["null", "boolean"]
+      doc: |
+        Currently only applies if `type` is `File`.  A value of `true`
+        indicates that the file is read or written sequentially without
+        seeking.  An implementation may use this flag to indicate whether it is
+        valid to stream file contents using a named pipe.  Default: `false`.
+    - name: cwl:default
+      type: ["null", Any]
+      doc: |
+        The default value for this parameter if not provided in the input
+        object.
+
+
+- name: JsonPointer
+  type: enum
+  docParent: "#Expression"
+  symbols:
+    - "cwl:JsonPointer"
+
+
+- type: record
+  name: Expression
+  docAfter: "#ExpressionTool"
+  doc: |
+    Define an expression that will be evaluated and used to modify the behavior
+    of a tool or workflow.  See [Expressions](#expressions) for more
+    information about expressions
+    and [ExpressionEngineRequirement](#expressionenginerequirement) for
+    information on how to define a expression engine.
+  fields:
+    - name: engine
+      type:
+        - "#JsonPointer"
+        - string
+      doc: |
+        Either `cwl:JsonPointer` or a reference to an
+        ExpressionEngineRequirement defining which engine to use.
+      jsonldPredicate:
+        "_id": "cwl:engine"
+        "_type": "@id"
+    - name: script
+      type: string
+      doc: "The code to be executed by the expression engine."
+
+
+- name: Binding
+  type: record
+  docParent: "#Parameter"
+  fields:
+    - name: loadContents
+      type:
+        - "null"
+        - boolean
+      doc: |
+        Only applies when `type` is `File`.  Read up to the first 64 KiB of text from the file and place it in the
+        "contents" field of the file object for manipulation by expressions.
+    - name: cwl:secondaryFiles
+      type:
+        - "null"
+        - "string"
+        - "#Expression"
+        - type: "array"
+          items: ["string", "#Expression"]
+      doc: |
+        Only applies when `type` is `File`.  Describes files that must be
+        included alongside the primary file.
+
+        If the value is an expression, the context of the expression is the input
+        or output File parameter to which this binding applies.
+
+        If the value is a string, it specifies that the following pattern
+        should be applied to the primary file:
+
+          1. If string begins with one or more caret `^` characters, for each
+            caret, remove the last file extension from the path (the last
+            period `.` and all following characters).  If there are no file
+            extensions, the path is unchanged.
+          2. Append the remainder of the string to the end of the file path.
+
+
+- name: InputSchema
+  type: record
+  extends: "#Schema"
+  docParent: "#InputParameter"
+  specialize:
+    - specializeFrom: "#Schema"
+      specializeTo: "#InputSchema"
+  fields:
+    - name: cwl:inputBinding
+      type: [ "null", "#Binding" ]
+      doc: |
+        Describes how to handle a value in the input object convert it
+        into a concrete form for execution, such as command line parameters.
+
+
+- name: OutputSchema
+  type: record
+  extends: "#Schema"
+  docParent: "#OutputParameter"
+  specialize:
+    - specializeFrom: "#Schema"
+      specializeTo: "#OutputSchema"
+
+
+- name: InputParameter
+  type: record
+  extends: "#Parameter"
+  docAfter: "#Parameter"
+  specialize:
+    - specializeFrom: "#Schema"
+      specializeTo: "#InputSchema"
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+    - name: "cwl:inputBinding"
+      type: [ "null", "#Binding" ]
+      doc: |
+        Describes how to handle the inputs of a process and convert them
+        into a concrete form for execution, such as command line parameters.
+
+- name: OutputParameter
+  type: record
+  extends: "#Parameter"
+  docAfter: "#Parameter"
+  specialize:
+    - specializeFrom: "#Schema"
+      specializeTo: "#OutputSchema"
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+
+
+- type: record
+  name: "FileDef"
+  docParent: "#CreateFileRequirement"
+  doc: |
+    Define a file that must be placed in the designated output directory
+    prior to executing the command line tool.  May be the result of executing
+    an expression, such as building a configuration file from a template.
+  fields:
+    - name: "filename"
+      type: ["string", "#Expression"]
+      doc: "The name of the file to create in the output directory."
+    - name: "fileContent"
+      type: ["string", "#Expression"]
+      doc: |
+        If the value is a string literal or an expression which evaluates to a
+        string, a new file must be created with the string as the file contents.
+
+        If the value is an expression that evaluates to a File object, this
+        indicates the referenced file should be added to the designated output
+        directory prior to executing the tool.
+
+        Files added in this way may be read-only, and may be provided
+        by bind mounts or file system links to avoid
+        unnecessary copying of the input file.
+
+
+- type: record
+  name: EnvironmentDef
+  docParent: "#EnvVarRequirement"
+  doc: |
+    Define an environment variable that will be set in the runtime environment
+    by the workflow platform when executing the command line tool.  May be the
+    result of executing an expression, such as getting a parameter from input.
+  fields:
+    - name: "envName"
+      type: "string"
+      doc: The environment variable name
+    - name: "envValue"
+      type: ["string", "#Expression"]
+      doc: The environment variable value
+
+
+- type: record
+  name: SchemaDef
+  extends: "#InputSchema"
+  docParent: "#SchemaDefRequirement"
+  specialize:
+    - specializeFrom: "#InputSchema"
+      specializeTo: "#SchemaDef"
+    - specializeFrom: "#Binding"
+      specializeTo: "#CommandLineBinding"
+  fields:
+    - name: name
+      type: ["null", string]
+      doc: "The type name being defined."
+
+
+- type: record
+  name: ProcessRequirement
+  docAfter: "#ExpressionTool"
+  abstract: true
+  doc: |
+    A process requirement declares a prerequisite that may or must be fulfilled
+    before executing a process.  See [`Process.hints`](#process) and
+    [`Process.requirements`](#process).
+
+    Process requirements are the primary mechanism for specifying extensions to
+    the CWL core specification.
+
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "The specific requirement type."
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+
+
+- type: record
+  name: Process
+  abstract: true
+  docAfter: "#ProcessRequirement"
+  doc: |
+
+    The base executable type in CWL is the `Process` object defined by the
+    document.  Note that the `Process` object is abstract and cannot be
+    directly executed.
+
+  fields:
+    - name: id
+      type: ["null", string]
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this process object."
+    - name: cwl:inputs
+      type:
+        type: array
+        items: "#InputParameter"
+      doc: |
+        Defines the input parameters of the process.  The process is ready to
+        run when all required input parameters are associated with concrete
+        values.  Input parameters include a schema for each parameter which is
+        used to validate the input object.  It may also be used build a user
+        interface for constructing the input object.
+    - name: cwl:outputs
+      type:
+        type: array
+        items: "#OutputParameter"
+      doc: |
+        Defines the parameters representing the output of the process.  May be
+        used to generate and/or validate the output object.
+    - name: cwl:requirements
+      type:
+        - "null"
+        - type: array
+          items: "#ProcessRequirement"
+      doc: >
+        Declares requirements that apply to either the runtime environment or the
+        workflow engine that must be met in order to execute this process.  If
+        an implementation cannot satisfy all requirements, or a requirement is
+        listed which is not recognized by the implementation, it is a fatal
+        error and the implementation must not attempt to run the process,
+        unless overridden at user option.
+    - name: hints
+      type:
+        - "null"
+        - type: array
+          items: Any
+      doc: >
+        Declares hints applying to either the runtime environment or the
+        workflow engine that may be helpful in executing this process.  It is
+        not an error if an implementation cannot satisfy all hints, however
+        the implementation may report a warning.
+      jsonldPredicate:
+        _id: cwl:hints
+        noLinkCheck: true
+    - name: label
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+    - name: description
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:comment"
+      doc: "A long, human-readable description of this process object."
+    - name: cwlVersion
+      type:
+        - "null"
+        - "#CWLVersions"
+      doc: "CWL document version"
+      jsonldPredicate:
+        "_id": "cwl:cwlVersion"
+        "_type": "@vocab"
+
+- type: record
+  name: CommandLineBinding
+  extends: "#Binding"
+  docParent: "#CommandInputParameter"
+  doc: |
+
+    When listed under `inputBinding` in the input schema, the term
+    "value" refers to the the corresponding value in the input object.  For
+    binding objects listed in `CommandLineTool.arguments`, the term "value"
+    refers to the effective value after evaluating `valueFrom`.
+
+    The binding behavior when building the command line depends on the data
+    type of the value.  If there is a mismatch between the type described by
+    the input schema and the effective value, such as resulting from an
+    expression evaluation, an implementation must use the data type of the
+    effective value.
+
+      - **string**: Add `prefix` and the string to the command line.
+
+      - **number**: Add `prefix` and decimal representation to command line.
+
+      - **boolean**: If true, add `prefix` to the command line.  If false, add
+          nothing.
+
+      - **File**: Add `prefix` and the value of
+        [`File.path`](#file) to the command line.
+
+      - **array**: If `itemSeparator` is specified, add `prefix` and the join
+          the array into a single string with `itemSeparator` separating the
+          items.  Otherwise first add `prefix`, then recursively process
+          individual elements.
+
+      - **object**: Add `prefix` only, and recursively add object fields for
+          which `inputBinding` is specified.
+
+      - **null**: Add nothing.
+
+  fields:
+    - name: "position"
+      type: ["null", "int"]
+      doc: "The sorting key.  Default position is 0."
+    - name: "prefix"
+      type: [ "null", "string"]
+      doc: "Command line prefix to add before the value."
+    - name: "separate"
+      type: ["null", boolean]
+      doc: |
+        If true (default), then the prefix and value must be added as separate
+        command line arguments; if false, prefix and value must be concatenated
+        into a single command line argument.
+    - name: "itemSeparator"
+      type: ["null", "string"]
+      doc: |
+        Join the array elements into a single string with the elements
+        separated by by `itemSeparator`.
+    - name: "valueFrom"
+      type:
+        - "null"
+        - "string"
+        - "#Expression"
+      doc: |
+        If `valueFrom` is a constant string value, use this as the value and
+        apply the binding rules above.
+
+        If `valueFrom` is an expression, evaluate the expression to yield the
+        actual value to use to build the command line and apply the binding
+        rules above.  If the inputBinding is associated with an input
+        parameter, the "context" of the expression will be the value of the
+        input parameter.
+
+        When a binding is part of the `CommandLineTool.arguments` field,
+        the `valueFrom` field is required.
+
+
+- type: record
+  name: CommandOutputBinding
+  extends: "#Binding"
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Describes how to generate an output parameter based on the files produced
+    by a CommandLineTool.
+
+    The output parameter is generated by applying these operations in
+    the following order:
+
+      - glob
+      - loadContents
+      - outputEval
+  fields:
+    - name: glob
+      type:
+        - "null"
+        - string
+        - "#Expression"
+        - type: array
+          items: string
+      doc: |
+        Find files relative to the output directory, using POSIX glob(3)
+        pathname matching.  If provided an array, find files that match any
+        pattern in the array.  If provided an expression, the expression must
+        return a string or an array of strings, which will then be evaluated as
+        one or more glob patterns.  Only files which actually exist will be
+        matched and returned.
+    - name: outputEval
+      type:
+        - "null"
+        - "#Expression"
+      doc: |
+        Evaluate an expression to generate the output value.  If `glob` was
+        specified, the script `context` will be an array containing any files that were
+        matched.  Additionally, if `loadContents` is `true`, the File objects
+        will include up to the first 64 KiB of file contents in the `contents` field.
+
+
+- type: record
+  name: CommandInputSchema
+  extends: "#InputSchema"
+  docParent: "#CommandInputParameter"
+  specialize:
+    - specializeFrom: "#InputSchema"
+      specializeTo: "#CommandInputSchema"
+    - specializeFrom: "#Binding"
+      specializeTo: "#CommandLineBinding"
+
+
+- type: record
+  name: CommandOutputSchema
+  extends: "#OutputSchema"
+  docParent: "#CommandOutputParameter"
+  specialize:
+    - specializeFrom: "#OutputSchema"
+      specializeTo: "#CommandOutputSchema"
+  fields:
+    - name: "cwl:outputBinding"
+      type: [ "null", "#CommandOutputBinding" ]
+      doc: |
+        Describes how to handle the concrete outputs of a process step (such as
+        files created by a program) and describe them in the process output
+        parameter.
+
+
+- type: record
+  name: CommandInputParameter
+  extends: "#InputParameter"
+  docParent: "#CommandLineTool"
+  doc: An input parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: "#InputSchema"
+      specializeTo: "#CommandInputSchema"
+    - specializeFrom: "#Binding"
+      specializeTo: "#CommandLineBinding"
+
+
+- type: record
+  name: CommandOutputParameter
+  extends: "#OutputParameter"
+  docParent: "#CommandLineTool"
+  doc: An output parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: "#OutputSchema"
+      specializeTo: "#CommandOutputSchema"
+  fields:
+    - name: "cwl:outputBinding"
+      type: [ "null", "#CommandOutputBinding" ]
+      doc: |
+        Describes how to handle the concrete outputs of a process step (such as
+        files created by a program) and describe them in the process output
+        parameter.
+
+
+- type: record
+  name: CommandLineTool
+  extends: "#Process"
+  docAfter: "#Workflow"
+  specialize:
+    - specializeFrom: "#InputParameter"
+      specializeTo: "#CommandInputParameter"
+    - specializeFrom: "#OutputParameter"
+      specializeTo: "#CommandOutputParameter"
+  documentRoot: true
+  doc: |
+
+    A CommandLineTool process is a process implementation for executing a
+    non-interactive application in a POSIX environment.  To accommodate
+    the enormous variety in syntax and semantics for input, runtime
+    environment, invocation, and output of arbitrary programs, CommandLineTool
+    uses an "input binding" that describes how to translate input
+    parameters to an actual program invocation, and an "output binding" that
+    describes how to generate output parameters from program output.
+
+    # Input binding
+
+    The tool command line is built by applying command line bindings to the
+    input object.  Bindings are listed either as part of an [input
+    parameter](#commandinputparameter) using the `inputBinding` field, or
+    separately using the `arguments` field of the CommandLineTool.
+
+    The algorithm to build the command line is as follows.  In this algorithm,
+    the sort key is a list consisting of one or more numeric or string
+    elements.  Strings are sorted lexicographically based on UTF-8 encoding.
+
+      1. Collect `CommandLineBinding` objects from `arguments`.  Assign a sorting
+      key `[position, i]` where `position` is
+      [`CommandLineBinding.position`](#commandlinebinding) and `i`
+      is the index in the `arguments` list.
+
+      2. Collect `CommandLineBinding` objects from the `inputs` schema and
+      associate them with values from the input object.  Where the input type
+      is a record, array, or map, recursively walk the schema and input object,
+      collecting nested `CommandLineBinding` objects and associating them with
+      values from the input object.
+
+      3. Create a sorting key by taking the value of the `position` field at
+      each level leading to each leaf binding object.  If `position` is not
+      specified, it is not added to the sorting key.  For bindings on arrays
+      and maps, the sorting key must include the array index or map key
+      following the position.  If and only if two bindings have the same sort
+      key, the tie must be broken using the ordering of the field or parameter
+      name immediately containing the leaf binding.
+
+      4. Sort elements using the assigned sorting keys.  Numeric entries sort
+      before strings.
+
+      5. In the sorted order, apply the rules defined in
+      [`CommandLineBinding`](#commandlinebinding) to convert bindings to actual
+      command line elements.
+
+      6. Insert elements from `baseCommand` at the beginning of the command
+      line.
+
+    # Runtime environment
+
+    All files listed in the input object must be made available in the runtime
+    environment.  The implementation may use a shared or distributed file
+    system or transfer files via explicit download.  Implementations may choose
+    not to provide access to files not explicitly specified by the input object
+    or process requirements.
+
+    Output files produced by tool execution must be written to the **designated
+    output directory**.
+
+    The initial current working directory when executing the tool must be the
+    designated output directory.
+
+    When executing the tool, the child process must not inherit environment
+    variables from the parent process.  The tool must execute in a new, empty
+    environment, containing only environment variables defined by
+    [EnvVarRequirement](#envvarrequirement), the default environment of the
+    Docker container specified in [DockerRequirement](#dockerrequirement) (if
+    applicable), and `TMPDIR`.
+
+    The `TMPDIR` environment variable must be set in the runtime environment to
+    the **designated temporary directory**.  Any files written to the
+    designated temporary directory may be deleted by the workflow platform when
+    the tool invocation is complete.
+
+    An implementation may forbid the tool from writing to any location in the
+    runtime environment file system other than the designated temporary
+    directory and designated output directory.  An implementation may provide
+    read-only input files, and disallow in-place update of input files.
+
+    The standard input stream and standard output stream may be redirected as
+    described in the `stdin` and `stdout` fields.
+
+    ## Extensions
+
+    [DockerRequirement](#dockerrequirement),
+    [CreateFileRequirement](#createfilerequirement), and
+    [EnvVarRequirement](#envvarrequirement) are available as standard
+    extensions to core command line tool semantics for defining the runtime
+    environment.
+
+    # Execution
+
+    Once the command line is built and the runtime environment is created, the
+    actual tool is executed.
+
+    The standard error stream and standard output stream (unless redirected by
+    setting `stdout`) may be captured by platform logging facilities for
+    storage and reporting.
+
+    Tools may be multithreaded or spawn child processes; however, when the
+    parent process exits, the tool is considered finished regardless of whether
+    any detached child processes are still running.  Tools must not require any
+    kind of console, GUI, or web based user interaction in order to start and
+    run to completion.
+
+    The exit code of the process indicates if the process completed
+    successfully.  By convention, an exit code of zero is treated as success
+    and non-zero exit codes are treated as failure.  This may be customized by
+    providing the fields `successCodes`, `temporaryFailCodes`, and
+    `permanentFailCodes`.  An implementation may choose to default unspecified
+    non-zero exit codes to either `temporaryFailure` or `permanentFailure`.
+
+    # Output binding
+
+    If the output directory contains a file named "cwl.output.json", that file
+    must be loaded and used as the output object.  Otherwise, the output object
+    must be generated by walking the parameters listed in `outputs` and
+    applying output bindings to the tool output.  Output bindings are
+    associated with output parameters using the `outputBinding` field.  See
+    [`CommandOutputBinding`](#commandoutputbinding) for details.
+
+  fields:
+    - name: "class"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      type: string
+    - name: baseCommand
+      doc: |
+        Specifies the program to execute.  If the value is an array, the first
+        element is the program to execute, and subsequent elements are placed
+        at the beginning of the command line in prior to any command line
+        bindings.  If the program includes a path separator character it must
+        be an absolute path, otherwise it is an error.  If the program does not
+        include a path separator, search the `$PATH` variable in the runtime
+        environment of the workflow runner find the absolute path of the
+        executable.
+      type:
+        - string
+        - type: array
+          items: string
+      jsonldPredicate:
+        "_id": "cwl:baseCommand"
+        "_container": "@list"
+    - name: arguments
+      doc: |
+        Command line bindings which are not directly associated with input parameters.
+      type:
+        - "null"
+        - type: array
+          items: [string, "#CommandLineBinding"]
+      jsonldPredicate:
+        "_id": "cwl:arguments"
+        "_container": "@list"
+    - name: stdin
+      type: ["null", string, "#Expression"]
+      doc: |
+        A path to a file whose contents must be piped into the command's
+        standard input stream.
+    - name: stdout
+      type: ["null", string, "#Expression"]
+      doc: |
+        Capture the command's standard output stream to a file written to
+        the designated output directory.
+
+        If `stdout` is a string, it specifies the file name to use.
+
+        If `stdout` is an expression, the expression is evaluated and must
+        return a string with the file name to use to capture stdout.  If the
+        return value is not a string, or the resulting path contains illegal
+        characters (such as the path separator `/`) it is an error.
+    - name: successCodes
+      type:
+        - "null"
+        - type: array
+          items: int
+      doc: |
+        Exit codes that indicate the process completed successfully.
+
+    - name: temporaryFailCodes
+      type:
+        - "null"
+        - type: array
+          items: int
+      doc: |
+        Exit codes that indicate the process failed due to a possibly
+        temporary condition, where excuting the process with the same
+        runtime environment and inputs may produce different results.
+
+    - name: permanentFailCodes
+      type:
+        - "null"
+        - type: array
+          items: int
+      doc:
+        Exit codes that indicate the process failed due to a permanent logic
+        error, where excuting the process with the same runtime environment and
+        same inputs is expected to always fail.
+
+- type: record
+  name: ExpressionTool
+  extends: "#Process"
+  docAfter: "#CommandLineTool"
+  documentRoot: true
+  doc: |
+    Execute an expression as a process step.
+  fields:
+    - name: "class"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      type: string
+    - name: expression
+      type: "#Expression"
+      doc: |
+        The expression to execute.  The expression must return a JSON object which
+        matches the output parameters of the ExpressionTool.
+
+
+- name: LinkMergeMethod
+  type: enum
+  docParent: "#WorkflowStepInput"
+  doc: The input link merge method, described in [WorkflowStepInput](#workflowstepinput).
+  symbols:
+    - merge_nested
+    - merge_flattened
+
+
+- name: WorkflowOutputParameter
+  type: record
+  extends: "#OutputParameter"
+  docParent: "#Workflow"
+  doc: |
+    Describe an output parameter of a workflow.  The parameter must be
+    connected to one or more parameters defined in the workflow that will
+    provide the value of the output parameter.
+  fields:
+    - name: source
+      doc: |
+        Specifies one or more workflow parameters that will provide this output
+        value.
+      jsonldPredicate:
+        "_id": "cwl:source"
+        "_type": "@id"
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+    - name: cwl:linkMerge
+      type: ["null", "#LinkMergeMethod"]
+      doc: |
+        The method to use to merge multiple inbound links into a single array.
+        If not specified, the default method is "merge_nested".
+
+
+- type: record
+  name: WorkflowStepInput
+  docParent: "#WorkflowStep"
+  doc: |
+    The input of a workflow step connects an upstream parameter (from the
+    workflow inputs, or the outputs of other workflows steps) with the input
+    parameters of the underlying process.
+
+    ## Input object
+
+    A WorkflowStepInput object must contain an `id` field in the form
+    `#fieldname` or `#stepname.fieldname`.  When the `id` field contains a
+    period `.` the field name consists of the characters following the final
+    period.  This defines a field of the workflow step input object with the
+    value of the `source` parameter(s).
+
+    ## Merging
+
+    If the sink parameter is an array, or named in a [workflow
+    scatter](#workflowstep) operation, there may be multiple inbound data links
+    listed in the `connect` field.  The values from the input links are merged
+    depending on the method specified in the `linkMerge` field.  If not
+    specified, the default method is "merge_nested".
+
+    * **merge_nested**
+
+      The input must be an array consisting of exactly one entry for each
+      input link.  If "merge_nested" is specified with a single link, the value
+      from the link must be wrapped in a single-item list.
+
+    * **merge_flattened**
+
+      1. The source and sink parameters must be compatible types, or the source
+         type must be compatible with single element from the "items" type of
+         the destination array parameter.
+      2. Source parameters which are arrays are concatenated.
+         Source parameters which are single element types are appended as
+         single elements.
+
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "A unique identifier for this workflow input parameter."
+    - name: source
+      doc: |
+        Specifies one or more workflow parameters that will provide input to
+        the underlying process parameter.
+      jsonldPredicate:
+        "_id": "cwl:source"
+        "_type": "@id"
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+    - name: cwl:linkMerge
+      type: ["null", "#LinkMergeMethod"]
+      doc: |
+        The method to use to merge multiple inbound links into a single array.
+        If not specified, the default method is "merge_nested".
+    - name: cwl:default
+      type: ["null", Any]
+      doc: |
+        The default value for this parameter if there is no `source`
+        field.
+
+
+- type: record
+  name: WorkflowStepOutput
+  docParent: "#WorkflowStep"
+  doc: |
+    Associate an output parameter of the underlying process with a workflow
+    parameter.  The workflow parameter (given in the `id` field) be may be used
+    as a `source` to connect with input parameters of other workflow steps, or
+    with an output parameter of the process.
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: |
+        A unique identifier for this workflow output parameter.  This is the
+        identifier to use in the `source` field of `WorkflowStepInput` to
+        connect the output value to downstream parameters.
+
+
+- name: ScatterMethod
+  type: enum
+  docParent: "#WorkflowStep"
+  doc: The scatter method, as described in [workflow step scatter](#workflowstep).
+  symbols:
+    - dotproduct
+    - nested_crossproduct
+    - flat_crossproduct
+
+
+- name: WorkflowStep
+  type: record
+  docParent: "#Workflow"
+  doc: |
+    A workflow step is an executable element of a workflow.  It specifies the
+    underlying process implementation (such as `CommandLineTool`) in the `run`
+    field and connects the input and output parameters of the underlying
+    process to workflow parameters.
+
+    # Scatter/gather
+
+    To use scatter/gather,
+    [ScatterFeatureRequirement](#scatterfeaturerequirement) must be specified
+    in the workflow or workflow step requirements.
+
+    A "scatter" operation specifies that the associated workflow step or
+    subworkflow should execute separately over a list of input elements.  Each
+    job making up a scatter operaution is independent and may be executed
+    concurrently.
+
+    The `scatter` field specifies one or more input parameters which will be
+    scattered.  An input parameter may be listed more than once.  The declared
+    type of each input parameter is implicitly wrapped in an array for each
+    time it appears in the `scatter` field.  As a result, upstream parameters
+    which are connected to scattered parameters may be arrays.
+
+    All output parameter types are also implicitly wrapped in arrays.  Each job
+    in the scatter results in an entry in the output array.
+
+    If `scatter` declares more than one input parameter, `scatterMethod`
+    describes how to decompose the input into a discrete set of jobs.
+
+      * **dotproduct** specifies that each of the input arrays are aligned and one
+          element taken from each array to construct each job.  It is an error
+          if all input arrays are not the same length.
+
+      * **nested_crossproduct** specifies the Cartesian product of the inputs,
+          producing a job for every combination of the scattered inputs.  The
+          output must be nested arrays for each level of scattering, in the
+          order that the input arrays are listed in the `scatter` field.
+
+      * **flat_crossproduct** specifies the Cartesian product of the inputs,
+          producing a job for every combination of the scattered inputs.  The
+          output arrays must be flattened to a single level, but otherwise listed in the
+          order that the input arrays are listed in the `scatter` field.
+
+    # Subworkflows
+
+    To specify a nested workflow as part of a workflow step,
+    [SubworkflowFeatureRequirement](#subworkflowfeaturerequirement) must be
+    specified in the workflow or workflow step requirements.
+
+  fields:
+    - name: id
+      type: ["null", string]
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this workflow step."
+    - name: cwl:inputs
+      type:
+        type: array
+        items: "#WorkflowStepInput"
+      doc: |
+        Defines the input parameters of the workflow step.  The process is ready to
+        run when all required input parameters are associated with concrete
+        values.  Input parameters include a schema for each parameter which is
+        used to validate the input object.  It may also be used build a user
+        interface for constructing the input object.
+    - name: cwl:outputs
+      type:
+        type: array
+        items: "#WorkflowStepOutput"
+      doc: |
+        Defines the parameters representing the output of the process.  May be
+        used to generate and/or validate the output object.
+    - name: cwl:requirements
+      type:
+        - "null"
+        - type: array
+          items: "#ProcessRequirement"
+      doc: >
+        Declares requirements that apply to either the runtime environment or the
+        workflow engine that must be met in order to execute this workflow step.  If
+        an implementation cannot satisfy all requirements, or a requirement is
+        listed which is not recognized by the implementation, it is a fatal
+        error and the implementation must not attempt to run the process,
+        unless overridden at user option.
+    - name: hints
+      type:
+        - "null"
+        - type: array
+          items: Any
+      doc: >
+        Declares hints applying to either the runtime environment or the
+        workflow engine that may be helpful in executing this workflow step.  It is
+        not an error if an implementation cannot satisfy all hints, however
+        the implementation may report a warning.
+      jsonldPredicate:
+        _id: cwl:hints
+        noLinkCheck: true
+    - name: label
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+    - name: description
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:comment"
+      doc: "A long, human-readable description of this process object."
+    - name: run
+      type: "#Process"
+      doc: |
+        Specifies the process to run.
+    - name: scatter
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+      jsonldPredicate:
+        "_id": "cwl:scatter"
+        "_type": "@id"
+        "_container": "@list"
+    - name: scatterMethod
+      doc: |
+        Required if `scatter` is an array of more than one element.
+      type:
+        - "null"
+        - "#ScatterMethod"
+      jsonldPredicate:
+        "_id": "cwl:scatterMethod"
+        "_type": "@vocab"
+
+
+- name: Workflow
+  type: record
+  docParent: "#Reference"
+  extends: "#Process"
+  specialize:
+    - specializeFrom: "#OutputParameter"
+      specializeTo: "#WorkflowOutputParameter"
+  documentRoot: true
+  doc: |
+    A workflow is a process consisting of one or more `steps`.  Each
+    step has input and output parameters defined by the `inputs` and `outputs`
+    fields.  A workflow executes as described in [execution model](#workflow_graph).
+
+    # Dependencies
+
+    Dependencies between parameters are expressed using the `source` field on
+    [workflow step input parameters](#workflowstepinput) and [workflow output
+    parameters](#workflowoutputparameter).
+
+    The `source` field expresses the dependency of one parameter on another
+    such that when a value is associated with the parameter specified by
+    `source`, that value is propagated to the destination parameter.  When all
+    data links inbound to a given step are fufilled, the step is ready to
+    execute.
+
+    # Extensions
+
+    [ScatterFeatureRequirement](#scatterfeaturerequirement) and
+    [SubworkflowFeatureRequirement](#subworkflowfeaturerequirement) are
+    available as standard extensions to core workflow semantics.
+
+  fields:
+    - name: "class"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      type: string
+    - name: steps
+      doc: |
+        The individual steps that make up the workflow.  Each step is executed when all of its
+        input data links are fufilled.  An implementation may choose to execute
+        the steps in a different order than listed and/or execute steps
+        concurrently, provided that dependencies between steps are met.
+      type:
+        - type: array
+          items: "#WorkflowStep"
+
+
+- type: record
+  name: DockerRequirement
+  extends: "#ProcessRequirement"
+  doc: |
+    Indicates that a workflow component should be run in a
+    [Docker](http://docker.com) container, and specifies how to fetch or build
+    the image.
+
+    If a CommandLineTool lists `DockerRequirement` under
+    `hints` or `requirements`, it may (or must) be run in the specified Docker
+    container.
+
+    The platform must first acquire or install the correct Docker image as
+    specified by `dockerPull`, `dockerLoad` or `dockerFile`.
+
+    The platform must execute the tool in the container using `docker run` with
+    the appropriate Docker image and tool command line.
+
+    The workflow platform may provide input files and the designated output
+    directory through the use of volume bind mounts.  The platform may rewrite
+    file paths in the input object to correspond to the Docker bind mounted
+    locations.
+
+    When running a tool contained in Docker, the workflow platform must not
+    assume anything about the contents of the Docker container, such as the
+    presence or absence of specific software, except to assume that the
+    generated command line represents a valid command within the runtime
+    environment of the container.
+
+    ## Interaction with other requirements
+
+    If [EnvVarRequirement](#envvarrequirement) is specified alongside a
+    DockerRequirement, the environment variables must be provided to Docker
+    using `--env` or `--env-file` and interact with the container's preexisting
+    environment as defined by Docker.
+
+  fields:
+    - name: dockerPull
+      type: ["null", "string"]
+      doc: "Specify a Docker image to retrieve using `docker pull`."
+    - name: "dockerLoad"
+      type: ["null", "string"]
+      doc: "Specify a HTTP URL from which to download a Docker image using `docker load`."
+    - name: dockerFile
+      type: ["null", "string"]
+      doc: "Supply the contents of a Dockerfile which will be built using `docker build`."
+    - name: dockerImageId
+      type: ["null", "string"]
+      doc: |
+        The image id that will be used for `docker run`.  May be a
+        human-readable image name or the image identifier hash.  May be skipped
+        if `dockerPull` is specified, in which case the `dockerPull` image id
+        must be used.
+    - name: dockerOutputDirectory
+      type: ["null", "string"]
+      doc: |
+        Set the designated output directory to a specific location inside the
+        Docker container.
+
+
+- type: record
+  name: SubworkflowFeatureRequirement
+  extends: "#ProcessRequirement"
+  doc: |
+    Indicates that the workflow platform must support nested workflows in
+    the `run` field of (WorkflowStep)(#workflowstep).
+
+
+- name: CreateFileRequirement
+  type: record
+  extends: "#ProcessRequirement"
+  doc: |
+    Define a list of files that must be created by the workflow
+    platform in the designated output directory prior to executing the command
+    line tool.  See `FileDef` for details.
+  fields:
+    - name: fileDef
+      type:
+        type: "array"
+        items: "#FileDef"
+      doc: The list of files.
+
+
+- name: EnvVarRequirement
+  type: record
+  extends: "#ProcessRequirement"
+  doc: |
+    Define a list of environment variables which will be set in the
+    execution environment of the tool.  See `EnvironmentDef` for details.
+  fields:
+    - name: envDef
+      type:
+        type: "array"
+        items: "#EnvironmentDef"
+      doc: The list of environment variables.
+
+
+- name: ScatterFeatureRequirement
+  type: record
+  extends: "#ProcessRequirement"
+  doc: |
+    Indicates that the workflow platform must support the `scatter` and
+    `scatterMethod` fields of [WorkflowStep](#workflowstep).
+
+
+- name: SchemaDefRequirement
+  type: record
+  extends: "#ProcessRequirement"
+  doc: |
+        This field consists of an
+        array of type definitions which must be used when interpreting the `inputs` and
+        `outputs` fields.  When a symbolic type is encountered that is not in
+        [`Datatype`](#datatype), the implementation must check if
+        the type is defined in `schemaDefs` and use that definition.  If the type is not
+        found in `schemaDefs`, it is an error.  The entries in `schemaDefs` must be
+        processed in the order listed such that later schema definitions may refer to
+        earlier schema definitions.
+  fields:
+    - name: types
+      type:
+        type: array
+        items: "#SchemaDef"
+      doc: The list of type definitions.
+
+
+- type: record
+  name: ExpressionEngineRequirement
+  extends: "#ProcessRequirement"
+  doc: |
+    Define an expression engine, as described in [Expressions](#expressions).
+
+  fields:
+    - name: id
+      type: string
+      doc: "Used to identify the expression engine in the `engine` field of Expressions."
+      jsonldPredicate: "@id"
+    - name: cwl:requirements
+      type:
+        - "null"
+        - type: array
+          items: "#ProcessRequirement"
+      doc: |
+        Requirements to run this expression engine, such as DockerRequirement
+        for specifying a container to run the engine.
+    - name: engineCommand
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+      doc: "The command line to invoke the expression engine."
+    - name: engineConfig
+      type:
+        - "null"
+        - type: array
+          items: string
+      doc: |
+        Additional configuration or code fragments that will also be passed to
+        the expression engine.  The semantics of this field are defined by the
+        underlying expression engine.  Intended for uses such as providing
+        function definitions that will be called from CWL expressions.
diff --git a/cwltool/schemas/draft-3/Process.yml b/cwltool/schemas/draft-3/Process.yml
index 38b217c..6132cd6 100644
--- a/cwltool/schemas/draft-3/Process.yml
+++ b/cwltool/schemas/draft-3/Process.yml
@@ -3,6 +3,7 @@ $base: "https://w3id.org/cwl/cwl#"
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
   sld: "https://w3id.org/cwl/salad#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
 
 $graph:
 
@@ -24,18 +25,19 @@ $graph:
   name: CWLVersions
   doc: "Version symbols for published CWL document versions."
   symbols:
-    - draft-3.dev1
-    - draft-3.dev2
-    - draft-3.dev3
-    - draft-3.dev4
-    - draft-3.dev5
-    - draft-3
+    - cwl:draft-2
+    - cwl:draft-3.dev1
+    - cwl:draft-3.dev2
+    - cwl:draft-3.dev3
+    - cwl:draft-3.dev4
+    - cwl:draft-3.dev5
+    - cwl:draft-3
 
 - name: CWLType
   type: enum
   extends: "sld:PrimitiveType"
   symbols:
-    - File
+    - cwl:File
   doc:
     - "Extends primitive types with the concept of a file as a first class type."
     - "File: A File object"
@@ -506,11 +508,11 @@ $graph:
     - name: cwlVersion
       type:
         - "null"
-        - string
+        - "#CWLVersions"
       doc: "CWL document version"
       jsonldPredicate:
         "_id": "cwl:cwlVersion"
-        "_type": "@id"
+        "_type": "@vocab"
 
 - name: InlineJavascriptRequirement
   type: record
diff --git a/cwltool/schemas/draft-3/README.md b/cwltool/schemas/draft-3/README.md
index 142b728..96aad3b 100644
--- a/cwltool/schemas/draft-3/README.md
+++ b/cwltool/schemas/draft-3/README.md
@@ -2,10 +2,8 @@
 
 The CWL specifications are divided up into several documents.
 
-<!--
 The [User Guide](UserGuide.html) provides a gentle introduction to writing CWL
 command line tools and workflows.
--->
 
 The [Command Line Tool Description Specification](CommandLineTool.html)
 specifies the document schema and execution semantics for wrapping and
@@ -19,3 +17,6 @@ The
 [Semantic Annotations for Linked Avro Data (SALAD) Specification](SchemaSalad.html)
 specifies the preprocessing steps that must be applied when loading CWL
 documents and the schema language used to write the above specifications.
+
+If you use the CWL specifications or distribute CWL descriptions with a
+publication you should [cite the standard](https://dx.doi.org/10.6084/m9.figshare.3115156.v1)
diff --git a/cwltool/schemas/draft-3/UserGuide.yml b/cwltool/schemas/draft-3/UserGuide.yml
index 6bd8e70..2afbe03 100644
--- a/cwltool/schemas/draft-3/UserGuide.yml
+++ b/cwltool/schemas/draft-3/UserGuide.yml
@@ -2,3 +2,862 @@
   type: documentation
   doc:
     - $include: userguide-intro.md
+
+    - |
+      # Wrapping Command Line Tools
+
+    - |
+      ## First example
+
+      The simplest "hello world" program.  This accepts one input parameter,
+      writes a message to the terminal or job log, and produces no permanent
+      output.  CWL documents are written in [JSON](http://json.org) or
+      [YAML](http://yaml.org), or a mix of the two.
+
+      *1st-tool.cwl*
+      ```
+    - $include: examples/1st-tool.cwl
+    - |
+      ```
+
+      Use a YAML object in a separate file to describe the input of a run:
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner 1st-tool.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!'
+      Hello world!
+      Final process status is success
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: cwl:draft-3
+      class: CommandLineTool
+      ```
+
+      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a command
+      line tool.
+
+      ```
+      baseCommand: echo
+      ```
+
+      The `baseCommand` provides the name of program that will actually run
+      (echo)
+
+      ```
+      inputs:
+        - id: message
+          type: string
+          inputBinding:
+            position: 1
+      ```
+
+      The `inputs` section describes the inputs of the tool.  This is a list of input
+      parameters and each parameter includes an identifier, a data type, and
+      optionally an `inputBinding` which describes how this input parameter
+      should appear on the command line.  In this example, the `position` field
+      indicates where it should appear on the command line.
+
+      ```
+      outputs: []
+      ```
+
+      This tool has no formal output, so the `outputs` section is an empty list.
+
+    - |
+      ## Essential input parameters
+
+      The `inputs` of a tool is a list of input parameters that control how to
+      run the tool.  Each parameter has an `id` for the name of parameter, and
+      `type` describing what types of values are valid for that parameter.
+
+      Available primitive types are *string*, *int*, *long*, *float*, *double*,
+      and *null*; complex types are *array* and *record*; in addition there are
+      special types *File* and *Any*.
+
+      The following example demonstrates some input parameters with different
+      types and appearing on the command line in different ways:
+
+
+      *inp.cwl*
+      ```
+    - $include: examples/inp.cwl
+    - |
+      ```
+
+      *inp-job.yml*
+      ```
+    - $include: examples/inp-job.yml
+    - |
+      ```
+
+      Notice that "example_file", as a `File` type, must be provided as an
+      object with the fields `class: File` and `path`.
+
+      Next, create a whale.txt and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ touch whale.txt
+      $ cwl-runner inp.cwl inp-job.yml
+      [job 140020149614160] /home/example$ echo -f -i42 --example-string hello --file=/home/example/whale.txt
+      -f -i42 --example-string hello --file=/home/example/whale.txt
+      Final process status is success
+      ```
+
+      The field `inputBinding` is optional and indicates whether and how the
+      input parameter should be appear on the tool's command line.  If
+      `inputBinding` is missing, the parameter does not appear on the command
+      line.  Let's look at each example in detail.
+
+      ```
+      - id: example_flag
+        type: boolean
+        inputBinding:
+          position: 1
+          prefix: -f
+      ```
+
+      Boolean types are treated as a flag.  If the input parameter
+      "example_flag" is "true", then `prefix` will be added to the
+      command line.  If false, no flag is added.
+
+      ```
+      - id: example_string
+        type: string
+        inputBinding:
+          position: 3
+          prefix: --example-string
+      ```
+
+      String types appear on the command line as literal values.  The `prefix`
+      is optional, if provided, it appears as a separate argument on the
+      command line before the parameter .  In the example above, this is
+      rendered as `--example-string hello`.
+
+      ```
+      - id: example_int
+        type: int
+        inputBinding:
+          position: 2
+          prefix: -i
+          separate: false
+      ```
+
+      Integer (and floating point) types appear on the command line with
+      decimal text representation.  When the option `separate` is false (the
+      default value is true), the prefix and value are combined into a single
+      argument.  In the example above, this is rendered as `-i42`.
+
+
+      ```
+      - id: example_file
+        type: ["null", File]
+        inputBinding:
+          prefix: --file=
+          separate: false
+          position: 4
+      ```
+
+      File types appear on the command line as the path to the file.  When the
+      parameter type is a list, this indicates several alternate types are
+      valid for this parameter.  The most common use is to provide "null" as an
+      alternate parameter type, which indicates that the parameter is optional.
+      In the example above, this is rendered as
+      `--file=/home/example/whale.txt`.  However, if the "example_file"
+      parameter were not provided in the input, nothing would appear on the
+      command line.
+
+      Input files are read-only.  If you wish to update an input file, you must
+      first copy it to the output directory.
+
+      The value of `position` is used to determine where parameter should
+      appear on the command line.  Positions are relative to one another, not
+      abosolute.  As a result, positions do not have to be sequential, three
+      parameters with positions `[1, 3, 5]` will result in the same command
+      line as `[1, 2, 3]`.  More than one parameter can have the same position
+      (ties are broken using the parameter name), and the position field itself
+      is optional.  the default position is 0.
+
+      The `baseCommand` field always comes before parameters.
+
+    - |
+      ## Returning output files
+
+      The `outputs` of a tool is a list of output parameters that should be
+      returned after running the tool.  Each parameter has an `id` for the name
+      of parameter, and `type` describing what types of values are valid for
+      that parameter.
+
+      When a tool runs under CWL, the starting working directory is the
+      designated output directory.  The underlying tool or script must record
+      its results in the form of files created in the output directory.  The
+      output parameters returned by the CWL tool are either the output files
+      themselves, or come from examining the content of those files.
+
+      *tar.cwl*
+      ```
+    - $include: examples/tar.cwl
+    - |
+      ```
+
+      *tar-job.yml*
+      ```
+    - $include: examples/tar-job.yml
+    - |
+      ```
+
+      Next, create a tar file for the example and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ touch hello.txt && tar -cvf hello.tar hello.txt
+      $ cwl-runner tar.cwl tar-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar
+      Final process status is success
+      {
+      "example_out": {
+        "path": "hello.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      ```
+
+      The field `outputBinding` describes how to to set the value of each
+      output parameter.
+
+      ```
+      outputs:
+        - id: example_out
+          type: File
+          outputBinding:
+            glob: hello.txt
+      ```
+
+      The `glob` field consists of the name of a file in the output directory.
+      If you don't know name of the file in advance, you can use a wildcard
+      pattern.
+
+    - |
+      ## Capturing a tool's standard output stream
+
+      To capture a tool's standard output stream, add the `stdout` field with
+      the name of the file where the output stream should go.  Then use `glob`
+      on `outputBinding` to return the file.
+
+      *stdout.cwl*
+      ```
+    - $include: examples/stdout.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner stdout.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!' > output.txt
+      Final process status is success
+      {
+      "output": {
+        "path": "output.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      $ cat output.txt
+      Hello world!
+      ```
+
+    - |
+      ## Parameter references
+
+      In a previous example, we used extracted a file using the "tar" program.
+      However, that example was very limited becuase it assumed that the file
+      we were interested in was called "hello.txt".  In this example, you will
+      see how to reference the value of input parameters dynamically from other
+      fields.
+
+      *tar-param.cwl*
+      ```
+    - $include: examples/tar-param.cwl
+    - |
+      ```
+
+      *tar-param-job.yml*
+      ```
+    - $include: examples/tar-param-job.yml
+    - |
+      ```
+
+      Create your input files and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ rm hello.tar || true && touch goodbye.txt && tar -cvf hello.tar
+      $ cwl-runner tar-param.cwl tar-param-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar goodbye.txt
+      Final process status is success
+      {
+      "example_out": {
+        "path": "goodbye.txt",
+        "size": 24,
+        "class": "File",
+        "checksum": "sha1$dd0a4c4c49ba43004d6611771972b6cf969c1c01"
+        }
+      }
+      ```
+
+      Certain fields permit parameter references which are enclosed in `$(...)`.
+      These are evaluated and replaced with value being referenced.
+
+      ```
+      outputs:
+        - id: example_out
+          type: File
+          outputBinding:
+            glob: $(inputs.extractfile)
+      ```
+
+      References are written using a subset of Javascript syntax.  In this
+      example, `$(inputs.extractfile)`, `$(inputs["extractfile"])`, and
+      `$(inputs['extractfile'])` are equivalent.
+
+      The value of the "inputs" variable is the input object provided when the
+      CWL tool was invoked.
+
+      Note that because File parameters are objects, to get the path to an
+      input file you must reference the path field on a file object; to
+      reference the path to the tar file in the above example you would write
+      `$(inputs.tarfile.path)`.
+
+    - |
+      ## Running tools inside Docker
+
+      [Docker](http://docker.io) containers simplify software installation by providing a complete
+      known-good runtime for software and its dependencies.  However,
+      containers are also purposefully isolated from the host system, so in
+      order to run a tool inside a Docker container there is additional work to
+      ensure that input files are available inside the container and output
+      files can be recovered from the contianer.  CWL can perform this work
+      automatically, allowing you to use Docker to simplify your software
+      management while avoiding the complexity of invoking and managing Docker
+      containers.
+
+      This example runs a simple Node.js script inside a Docker container.
+
+      *docker.cwl*
+      ```
+    - $include: examples/docker.cwl
+    - |
+      ```
+
+      *docker-job.yml*
+      ```
+    - $include: examples/docker-job.yml
+    - |
+      ```
+
+      Provide a hello.js and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "console.log(\"Hello World\");" > hello.js
+      $ cwl-runner docker.cwl docker-job.yml
+      [job 140259721854416] /home/example$ docker run -i --volume=/home/example/hello.js:/var/lib/cwl/job369354770_examples/hello.js:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpDLs5hm:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp node:slim node /var/lib/cwl/job369354770_examples/hello.js
+      Hello world!
+      Final process status is success
+      ```
+
+      Notice the CWL runner has constructed a Docker command line to run the
+      script.  One of the responsibilies of the CWL runner is to the paths of
+      input files to reflect the location where they appear inside the
+      container.  In this example, the path to the script `hello.js` is
+      `/home/example/hello.js` outside the container but
+      `/var/lib/cwl/job369354770_examples/hello.js` inside the container, as
+      reflected in the invocation of the `node` command.
+
+    - |
+      ## Additional command line arguments and runtime parameters
+
+      Sometimes tools require additional command line options that don't
+      correspond exactly to input parameters.
+
+      In this example, we will wrap the Java compiler to compile a java source
+      file to a class file.  By default, `javac` will create the class files in
+      the same directory as the source file.  However, CWL input files (and the
+      directories in which they appear) may be read-only, so we need to
+      instruct javac to write the class file to the designated output directory
+      instead.
+
+      *arguments.cwl*
+      ```
+    - $include: examples/arguments.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now create a sample Java file and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "public class Hello {}" > Hello.java
+      $ cwl-runner arguments.cwl arguments-job.yml
+      [job 140051188854928] /home/example$ docker run -i --volume=/home/example/Hello.java:/var/lib/cwl/job710906416_example/Hello.java:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpdlQDWi:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac -d /var/spool/cwl /var/lib/cwl/job710906416_examples/Hello.java
+      Final process status is success
+      {
+        "classfile": {
+          "size": 416,
+          "path": "/home/example/Hello.class",
+          "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+          "class": "File"
+        }
+      }
+
+      ```
+
+      Here we use the `arguments` field to add an additional argument to the
+      command line that isn't tied to a specific input parameter.
+
+      ```
+      arguments:
+        - prefix: "-d"
+          valueFrom: $(runtime.outdir)
+      ```
+
+      This example references a runtime parameter.  Runtime parameters
+      provide information about the hardware or software environment when the
+      tool is actually executed.  The `$(runtime.outdir)` parameter is the path
+      to the designated output directory.  Other parameters include
+      `$(runtime.tmpdir)`, `$(runtime.ram)`, `$(runtime.cores)`,
+      `$(runtime.ram)`, `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
+      the [Runtime Environment](CommandLineTool.html#Runtime_environment)
+      section of the CWL specification for details.
+
+    - |
+      ## Array inputs
+
+      It is easy to add arrays of input parameters represented to the command
+      line.  To specify an array parameter, the array definition is nested
+      under the `type` field with `type: array` and `items` defining the valid
+      data types that may appear in the array.
+
+      *array-inputs.cwl*
+      ```
+    - $include: examples/array-inputs.cwl
+    - |
+      ```
+
+      *array-inputs-job.yml*
+      ```
+    - $include: examples/array-inputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-inputs.cwl array-inputs-job.yml
+      [job 140334923640912] /home/example$ echo -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      Final process status is success
+      {}
+      ```
+
+      The `inputBinding` can appear either on the outer array parameter
+      definition or the inner array element definition, and these produce
+      different behavior when constructing the command line, as shown above.
+      In addition, the `itemSeperator` field, if provided, specifies that array
+      values should be concatenated into a single argument separated by the
+      item separator string.
+
+      You can specify arrays of arrays, arrays of records, and other complex
+      types.
+
+    - |
+      ## Array outputs
+
+      You can also capture multiple output files into an array of files using `glob`.
+
+      *array-outputs.cwl*
+      ```
+    - $include: examples/array-outputs.cwl
+    - |
+      ```
+
+      *array-outpust-job.yml*
+      ```
+    - $include: examples/array-outputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-outputs.cwl array-outputs-job.yml
+      [job 140190876078160] /home/example$ touch foo.txt bar.dat baz.txt
+      Final process status is success
+      {
+        "output": [
+          {
+            "size": 0,
+            "path": "/home/peter/work/common-workflow-language/draft-3/examples/foo.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          },
+          {
+            "size": 0,
+            "path": "/home/peter/work/common-workflow-language/draft-3/examples/baz.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          }
+        ]
+      }
+      ```
+
+    - |
+      ## Record inputs, dependent and mutually exclusive parameters
+
+      Sometimes an underlying tool has several arguments that must be provided
+      together (they are dependent) or several arguments that cannot be
+      provided together (they are exclusive).  You can use records and type
+      unions to group parameters together to describe these two conditions.
+
+      *record.cwl*
+      ```
+    - $include: examples/record.cwl
+    - |
+      ```
+
+      *record-job1.yml*
+      ```
+    - $include: examples/record-job1.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job1.yml
+      Workflow error:
+        Error validating input record, could not validate field `dependent_parameters` because
+        missing required field `itemB`
+      ```
+
+      In the first example, you can't provide `itemA` without also providing `itemB`.
+
+      *record-job2.yml*
+      ```
+    - $include: examples/record-job2.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job2.yml
+      [job 140566927111376] /home/example$ echo -A one -B two -C three
+      -A one -B two -C three
+      Final process status is success
+      {}
+      ```
+
+      In the second example, `itemC` and `itemD` are exclusive, so only `itemC`
+      is added to the command line and `itemD` is ignored.
+
+      *record-job3.yml*
+      ```
+    - $include: examples/record-job2.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job3.yml
+      [job 140606932172880] /home/example$ echo -A one -B two -D four
+      -A one -B two -D four
+      Final process status is success
+      {}
+      ```
+
+      In the third example, only `itemD` is provided, so it appears on the
+      command line.
+
+    - |
+      ## Environment variables
+
+      Tools run in a restricted environment and do not inherit most environment
+      variables from the parent process.  You can set environment variables for
+      the tool using `EnvVarRequirement`.
+
+      *env.cwl*
+      ```
+    - $include: examples/env.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner env.cwl echo-job.yml
+      [job 140710387785808] /home/example$ env
+      PATH=/bin:/usr/bin:/usr/local/bin
+      HELLO=Hello world!
+      TMPDIR=/tmp/tmp63Obpk
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Javascript expressions
+
+      If you need to manipulate input parameters, include the requirement
+      `InlineJavascriptRequirement` and then anywhere a parameter reference is
+      legal you can provide a fragment of Javascript that will be evaluated by
+      the CWL runner.
+
+      *expression.cwl*
+      ```
+    - $include: examples/expression.cwl
+    - |
+      ```
+
+      ```
+      $ cwl-runner expression.cwl empty.yml
+      [job 140000594593168] /home/example$ echo -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      Final process status is success
+      {}
+      ```
+
+      You can only use expressions in certain fields.  These are: `filename`,
+      `fileContent`, `envValue`, `valueFrom`, `glob`, `outputEval`, `stdin`,
+      `stdout`, `coresMin`, `coresMax`, `ramMin`, `ramMax`, `tmpdirMin`,
+      `tmpdirMax`, `outdirMin`, and `outdirMax`.
+
+    - |
+      ## Creating files at runtime
+
+      Sometimes you need to create a file on the fly from input parameters,
+      such as tools which expect to read their input configuration from a file
+      rather than the command line parameters.  To do this, use
+      `CreateFileRequirement`.
+
+      *createfile.cwl*
+      ```
+    - $include: examples/createfile.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwltool createfile.cwl echo-job.yml
+      [job 140528604979344] /home/example$ cat example.conf
+      CONFIGVAR=Hello world!
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Staging input files in the output directory
+
+      Normally, input files are located in a read-only directory separate from
+      the output directory.  This causes problems if the underlying tool
+      expects to write its output files alongside the input file in the same
+      directory.  You use `CreateFileRequirement` to stage input files into the
+      output directory.  In this example, we use a Javascript expression to
+      extract the base name of the input file from its leading directory path.
+
+      *linkfile.cwl*
+      ```
+    - $include: examples/linkfile.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner linkfile.cwl arguments-job.yml
+      [job 139928309171664] /home/example$ docker run -i --volume=/home/example/Hello.java:/var/lib/cwl/job557617295_examples/Hello.java:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpmNbApw:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac Hello.java
+      Final process status is success
+      {
+      "classfile": {
+        "size": 416,
+        "path": "/home/example/Hello.class",
+        "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+        "class": "File"
+        }
+      }
+      ```
+
+    - |
+      # Writing Workflows
+
+      ## First workflow
+
+      This workflow extracts a java source file from a tar file and then
+      compiles it.
+
+      *1st-workflow.cwl*
+      ```
+    - $include: examples/1st-workflow.cwl
+    - |
+      ```
+
+      Use a JSON object in a separate file to describe the input of a run:
+
+      *1st-workflow-job.yml*
+      ```
+    - $include: examples/1st-workflow-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner 1st-workflow.cwl 1st-workflow-job.yml
+      [job untar] /tmp/tmp94qFiM$ tar xf /home/example/hello.tar Hello.java
+      [step untar] completion status is success
+      [job compile] /tmp/tmpu1iaKL$ docker run -i --volume=/tmp/tmp94qFiM/Hello.java:/var/lib/cwl/job301600808_tmp94qFiM/Hello.java:ro --volume=/tmp/tmpu1iaKL:/var/spool/cwl:rw --volume=/tmp/tmpfZnNdR:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac -d /var/spool/cwl /var/lib/cwl/job301600808_tmp94qFiM/Hello.java
+      [step compile] completion status is success
+      [workflow 1st-workflow.cwl] outdir is /home/example
+      Final process status is success
+      {
+        "classout": {
+          "path": "/home/example/Hello.class",
+          "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+          "class": "File",
+          "size": 416
+        }
+      }
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: cwl:draft-3
+      class: Workflow
+      ```
+
+      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a workflow.
+
+
+      ```
+      inputs:
+        - id: inp
+          type: File
+        - id: ex
+          type: string
+      ```
+
+      The `inputs` section describes the inputs of the workflow.  This is a
+      list of input parameters where each parameter consists of an identifier
+      and a data type.  These parameters can be used as sources for input to
+      specific workflows steps.
+
+      ```
+      outputs:
+        - id: classout
+          type: File
+          source: "#compile/classfile"
+      ```
+
+      The `outputs` section describes the outputs of the workflow.  This is a
+      list of output parameters where each parameter consists of an identifier
+      and a data type.  The `source` connects the output parameter `classfile`
+      of the `compile` step to the workflow output parameter `classout`.
+
+      ```
+      steps:
+        - id: untar
+          run: tar-param.cwl
+          inputs:
+            - id: tarfile
+              source: "#inp"
+            - id: extractfile
+              source: "#ex"
+          outputs:
+            - id: example_out
+      ```
+
+      The `steps` section describes the actual steps of the workflow.  In this
+      example, the first step extracts a file from a tar file, and the second
+      step compiles the file from the first step using the java compiler.
+      Workflow steps are not necessarily run in the order they are listed,
+      instead the order is determined by the dependencies between steps (using
+      `source`).  In addition, workflow steps which do not depend on one
+      another may run in parallel.
+
+      The first step, `untar` runs `tar-param.cwl` (described previously in
+      [Parameter references](#Parameter_references)).  This tool has two input
+      parameters, `tarfile` and `extractfile` and one output parameter
+      `example_out`.
+
+      The `inputs` section of the workflow step connects these two input
+      parameters to the inputs of the workflow, `#inp` and `#ex` using
+      `source`.  This means that when the workflow step is executed, the values
+      assigned to `#inp` and `#ex` will be used for the parameters `tarfile`
+      and `extractfile` in order to run the tool.
+
+      The `outputs` section of the workflow step lists the output parameters
+      that are expected from the tool.
+
+      ```
+        - id: compile
+          run: arguments.cwl
+          inputs:
+            - id: src
+              source: "#untar/example_out"
+          outputs:
+            - id: classfile
+      ```
+
+      The second step `compile` depends on the results from the first step by
+      connecting the input parameter `src` to the output parameter of `untar`
+      using `#untar/example_out`.  The output of this step `classfile` is
+      connected to the `outputs` section for the Workflow, described above.
diff --git a/cwltool/schemas/draft-3/Workflow.yml b/cwltool/schemas/draft-3/Workflow.yml
index 066a66e..4c36dfa 100644
--- a/cwltool/schemas/draft-3/Workflow.yml
+++ b/cwltool/schemas/draft-3/Workflow.yml
@@ -378,8 +378,8 @@ $graph:
   extends: "#Process"
   documentRoot: true
   specialize:
-    specializeFrom: "#OutputParameter"
-    specializeTo: "#WorkflowOutputParameter"
+    - specializeFrom: "#OutputParameter"
+      specializeTo: "#WorkflowOutputParameter"
   doc: |
     A workflow describes a set of **steps** and the **dependencies** between
     those processes.  When a process produces output that will be consumed by a
diff --git a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml b/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
index 6e90775..8fb2cf7 100644
--- a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
+++ b/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
@@ -302,8 +302,8 @@ $graph:
   extends: ["#NamedType", "#RecordSchema", "#SchemaDefinedType"]
   documentRoot: true
   specialize:
-    specializeFrom: "#RecordField"
-    specializeTo: "#SaladRecordField"
+    - specializeFrom: "#RecordField"
+      specializeTo: "#SaladRecordField"
   fields:
     - name: abstract
       type: ["null", boolean]
diff --git a/cwltool/schemas/draft-3/userguide-intro.md b/cwltool/schemas/draft-3/userguide-intro.md
index bf60a25..dbcd3e9 100644
--- a/cwltool/schemas/draft-3/userguide-intro.md
+++ b/cwltool/schemas/draft-3/userguide-intro.md
@@ -6,4 +6,23 @@ This guide will introduce you to writing tool wrappers and workflows using the
 Common Workflow Language (CWL).  This guide describes the current stable
 specification, draft 3.
 
-## What is CWL?
+Note: This document is a work in progress.  Not all features are covered, yet.
+
+<!--ToC-->
+
+# Introduction
+
+CWL is a way to describe command line tools and connect them together to create
+workflows.  Because CWL is a specification and not a specific piece of
+software, tools and workflows described using CWL are portable across a variety
+of platforms that support the CWL standard.
+
+CWL has roots in "make" and many similar tools that determine order of
+execution based on dependencies between tasks.  However unlike "make", CWL
+tasks are isolated and you must be explicit about your inputs and outputs.  The
+benefit of explicitness and isolation are flexibility, portability, and
+scalability: tools and workflows described with CWL can transparently leverage
+technologies such as Docker, be used with CWL implementations from different
+vendors, and is well suited for describing large-scale workflows in cluster,
+cloud and high performance computing environments where tasks are scheduled in
+parallel across many nodes.
diff --git a/cwltool/schemas/v1.0/CommandLineTool-standalone.yml b/cwltool/schemas/v1.0/CommandLineTool-standalone.yml
new file mode 100644
index 0000000..10dbffa
--- /dev/null
+++ b/cwltool/schemas/v1.0/CommandLineTool-standalone.yml
@@ -0,0 +1,2 @@
+- $import: Process.yml
+- $import: CommandLineTool.yml
\ No newline at end of file
diff --git a/cwltool/schemas/v1.0/CommandLineTool.yml b/cwltool/schemas/v1.0/CommandLineTool.yml
new file mode 100644
index 0000000..181c51c
--- /dev/null
+++ b/cwltool/schemas/v1.0/CommandLineTool.yml
@@ -0,0 +1,894 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+
+$graph:
+
+- name: CommandLineToolDoc
+  type: documentation
+  doc:
+    - |
+      # Common Workflow Language (CWL) Command Line Tool Description, v1.0
+
+      This version:
+        * https://w3id.org/cwl/v1.0/
+
+      Current version:
+        * https://w3id.org/cwl/
+    - "\n\n"
+    - {$include: contrib.md}
+    - "\n\n"
+    - |
+      # Abstract
+
+      A Command Line Tool is a non-interactive executable program that reads
+      some input, performs a computation, and terminates after producing some
+      output.  Command line programs are a flexible unit of code sharing and
+      reuse, unfortunately the syntax and input/output semantics among command
+      line programs is extremely heterogeneous. A common layer for describing
+      the syntax and semantics of programs can reduce this incidental
+      complexity by providing a consistent way to connect programs together.
+      This specification defines the Common Workflow Language (CWL) Command
+      Line Tool Description, a vendor-neutral standard for describing the
+      syntax and input/output semantics of command line programs.
+
+    - {$include: intro.md}
+
+    - |
+      ## Introduction to v1.0
+
+      This specification represents the first full release from the CWL group.
+      Since draft-3, version 1.0 introduces the following changes and additions:
+
+        * The [Directory](#Directory) type.
+        * Syntax simplifcations: denoted by the `map<>` syntax. Example: inputs
+          contains a list of items, each with an id. Now one can specify
+          a mapping of that identifier to the corresponding
+          `CommandInputParamater`.
+          ```
+          inputs:
+           - id: one
+             type: string
+             doc: First input parameter
+           - id: two
+             type: int
+             doc: Second input parameter
+          ```
+          can be
+          ```
+          inputs:
+           one:
+            type: string
+            doc: First input parameter
+           two:
+            type: int
+            doc: Second input parameter
+          ```
+        * [InitialWorkDirRequirement](#InitialWorkDirRequirement): list of
+          files and subdirectories to be present in the output directory prior
+          to execution.
+        * Shortcuts for specifying the standard [output](#stdout) and/or
+          [error](#stderr) streams as a (streamable) File output.
+        * [SoftwareRequirement](#SoftwareRequirement) for describing software
+          dependencies of a tool.
+        * The common `description` field has been renamed to `doc`.
+
+      ## Errata
+
+      Post v1.0 release changes to the spec.
+
+        * 13 July 2016: Mark `baseCommand` as optional and update descriptive text.
+
+      ## Purpose
+
+      Standalone programs are a flexible and interoperable form of code reuse.
+      Unlike monolithic applications, applications and analysis workflows which
+      are composed of multiple separate programs can be written in multiple
+      languages and execute concurrently on multiple hosts.  However, POSIX
+      does not dictate computer-readable grammar or semantics for program input
+      and output, resulting in extremely heterogeneous command line grammar and
+      input/output semantics among program.  This is a particular problem in
+      distributed computing (multi-node compute clusters) and virtualized
+      environments (such as Docker containers) where it is often necessary to
+      provision resources such as input files before executing the program.
+
+      Often this gap is filled by hard coding program invocation and
+      implicitly assuming requirements will be met, or abstracting program
+      invocation with wrapper scripts or descriptor documents.  Unfortunately,
+      where these approaches are application or platform specific it creates a
+      significant barrier to reproducibility and portability, as methods
+      developed for one platform must be manually ported to be used on new
+      platforms.  Similarly it creates redundant work, as wrappers for popular
+      tools must be rewritten for each application or platform in use.
+
+      The Common Workflow Language Command Line Tool Description is designed to
+      provide a common standard description of grammar and semantics for
+      invoking programs used in data-intensive fields such as Bioinformatics,
+      Chemistry, Physics, Astronomy, and Statistics.  This specification
+      defines a precise data and execution model for Command Line Tools that
+      can be implemented on a variety of computing platforms, ranging from a
+      single workstation to cluster, grid, cloud, and high performance
+      computing platforms.
+
+    - {$include: concepts.md}
+    - {$include: invocation.md}
+
+
+- type: record
+  name: EnvironmentDef
+  doc: |
+    Define an environment variable that will be set in the runtime environment
+    by the workflow platform when executing the command line tool.  May be the
+    result of executing an expression, such as getting a parameter from input.
+  fields:
+    - name: envName
+      type: string
+      doc: The environment variable name
+    - name: envValue
+      type: [string, Expression]
+      doc: The environment variable value
+
+- type: record
+  name: CommandLineBinding
+  extends: InputBinding
+  doc: |
+
+    When listed under `inputBinding` in the input schema, the term
+    "value" refers to the the corresponding value in the input object.  For
+    binding objects listed in `CommandLineTool.arguments`, the term "value"
+    refers to the effective value after evaluating `valueFrom`.
+
+    The binding behavior when building the command line depends on the data
+    type of the value.  If there is a mismatch between the type described by
+    the input schema and the effective value, such as resulting from an
+    expression evaluation, an implementation must use the data type of the
+    effective value.
+
+      - **string**: Add `prefix` and the string to the command line.
+
+      - **number**: Add `prefix` and decimal representation to command line.
+
+      - **boolean**: If true, add `prefix` to the command line.  If false, add
+          nothing.
+
+      - **File**: Add `prefix` and the value of
+        [`File.path`](#File) to the command line.
+
+      - **array**: If `itemSeparator` is specified, add `prefix` and the join
+          the array into a single string with `itemSeparator` separating the
+          items.  Otherwise first add `prefix`, then recursively process
+          individual elements.
+
+      - **object**: Add `prefix` only, and recursively add object fields for
+          which `inputBinding` is specified.
+
+      - **null**: Add nothing.
+
+  fields:
+    - name: position
+      type: int?
+      doc: "The sorting key.  Default position is 0."
+    - name: prefix
+      type: string?
+      doc: "Command line prefix to add before the value."
+    - name: separate
+      type: boolean?
+      doc: |
+        If true (default), then the prefix and value must be added as separate
+        command line arguments; if false, prefix and value must be concatenated
+        into a single command line argument.
+    - name: itemSeparator
+      type: string?
+      doc: |
+        Join the array elements into a single string with the elements
+        separated by by `itemSeparator`.
+    - name: valueFrom
+      type:
+        - "null"
+        - string
+        - Expression
+      jsonldPredicate: "cwl:valueFrom"
+      doc: |
+        If `valueFrom` is a constant string value, use this as the value and
+        apply the binding rules above.
+
+        If `valueFrom` is an expression, evaluate the expression to yield the
+        actual value to use to build the command line and apply the binding
+        rules above.  If the inputBinding is associated with an input
+        parameter, the value of `self` in the expression will be the value of the
+        input parameter.
+
+        When a binding is part of the `CommandLineTool.arguments` field,
+        the `valueFrom` field is required.
+    - name: shellQuote
+      type: boolean?
+      doc: |
+        If `ShellCommandRequirement` is in the requirements for the current command,
+        this controls whether the value is quoted on the command line (default is true).
+        Use `shellQuote: false` to inject metacharacters for operations such as pipes.
+
+- type: record
+  name: CommandOutputBinding
+  extends: OutputBinding
+  doc: |
+    Describes how to generate an output parameter based on the files produced
+    by a CommandLineTool.
+
+    The output parameter is generated by applying these operations in
+    the following order:
+
+      - glob
+      - loadContents
+      - outputEval
+  fields:
+    - name: glob
+      type:
+        - "null"
+        - string
+        - Expression
+        - type: array
+          items: string
+      doc: |
+        Find files relative to the output directory, using POSIX glob(3)
+        pathname matching.  If an array is provided, find files that match any
+        pattern in the array.  If an expression is provided, the expression must
+        return a string or an array of strings, which will then be evaluated as
+        one or more glob patterns.  Must only match and return files which
+        actually exist.
+    - name: loadContents
+      type:
+        - "null"
+        - boolean
+      jsonldPredicate: "cwl:loadContents"
+      doc: |
+        For each file matched in `glob`, read up to
+        the first 64 KiB of text from the file and place it in the `contents`
+        field of the file object for manipulation by `outputEval`.
+    - name: outputEval
+      type:
+        - "null"
+        - string
+        - Expression
+      doc: |
+        Evaluate an expression to generate the output value.  If `glob` was
+        specified, the value of `self` must be an array containing file objects
+        that were matched.  If no files were matched, `self` must be a zero
+        length array; if a single file was matched, the value of `self` is an
+        array of a single element.  Additionally, if `loadContents` is `true`,
+        the File objects must include up to the first 64 KiB of file contents
+        in the `contents` field.
+
+
+- name: CommandInputRecordField
+  type: record
+  extends: InputRecordField
+  specialize:
+    - specializeFrom: InputRecordSchema
+      specializeTo: CommandInputRecordSchema
+    - specializeFrom: InputEnumSchema
+      specializeTo: CommandInputEnumSchema
+    - specializeFrom: InputArraySchema
+      specializeTo: CommandInputArraySchema
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandInputRecordSchema
+  type: record
+  extends: InputRecordSchema
+  specialize:
+    - specializeFrom: InputRecordField
+      specializeTo: CommandInputRecordField
+
+
+- name: CommandInputEnumSchema
+  type: record
+  extends: InputEnumSchema
+  specialize:
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandInputArraySchema
+  type: record
+  extends: InputArraySchema
+  specialize:
+    - specializeFrom: InputRecordSchema
+      specializeTo: CommandInputRecordSchema
+    - specializeFrom: InputEnumSchema
+      specializeTo: CommandInputEnumSchema
+    - specializeFrom: InputArraySchema
+      specializeTo: CommandInputArraySchema
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandOutputRecordField
+  type: record
+  extends: OutputRecordField
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- name: CommandOutputRecordSchema
+  type: record
+  extends: OutputRecordSchema
+  specialize:
+    - specializeFrom: OutputRecordField
+      specializeTo: CommandOutputRecordField
+
+
+- name: CommandOutputEnumSchema
+  type: record
+  extends: OutputEnumSchema
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- name: CommandOutputArraySchema
+  type: record
+  extends: OutputArraySchema
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- type: record
+  name: CommandInputParameter
+  extends: InputParameter
+  doc: An input parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: InputRecordSchema
+      specializeTo: CommandInputRecordSchema
+    - specializeFrom: InputEnumSchema
+      specializeTo: CommandInputEnumSchema
+    - specializeFrom: InputArraySchema
+      specializeTo: CommandInputArraySchema
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+- type: record
+  name: CommandOutputParameter
+  extends: OutputParameter
+  doc: An output parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+  fields:
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - stdout
+        - stderr
+        - CommandOutputRecordSchema
+        - CommandOutputEnumSchema
+        - CommandOutputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - CommandOutputRecordSchema
+            - CommandOutputEnumSchema
+            - CommandOutputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+
+- name: stdout
+  type: enum
+  symbols: [ "cwl:stdout" ]
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Only valid as a `type` for a `CommandLineTool` output with no
+    `outputBinding` set.
+
+    The following
+    ```
+    outputs:
+       an_output_name:
+       type: stdout
+
+    stdout: a_stdout_file
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: a_stdout_file
+
+    stdout: a_stdout_file
+    ```
+
+    If there is no `stdout` name provided, a random filename will be created.
+    For example, the following
+    ```
+    outputs:
+      an_output_name:
+        type: stdout
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: random_stdout_filenameABCDEFG
+
+    stdout: random_stdout_filenameABCDEFG
+    ```
+
+
+- name: stderr
+  type: enum
+  symbols: [ "cwl:stderr" ]
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Only valid as a `type` for a `CommandLineTool` output with no
+    `outputBinding` set.
+
+    The following
+    ```
+    outputs:
+      an_output_name:
+      type: stderr
+
+    stderr: a_stderr_file
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: a_stderr_file
+
+    stderr: a_stderr_file
+    ```
+
+    If there is no `stderr` name provided, a random filename will be created.
+    For example, the following
+    ```
+    outputs:
+      an_output_name:
+        type: stderr
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: random_stderr_filenameABCDEFG
+
+    stderr: random_stderr_filenameABCDEFG
+    ```
+
+
+- type: record
+  name: CommandLineTool
+  extends: Process
+  documentRoot: true
+  specialize:
+    - specializeFrom: InputParameter
+      specializeTo: CommandInputParameter
+    - specializeFrom: OutputParameter
+      specializeTo: CommandOutputParameter
+  doc: |
+    This defines the schema of the CWL Command Line Tool Description document.
+
+  fields:
+    - name: class
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      type: string
+    - name: baseCommand
+      doc: |
+        Specifies the program to execute.  If an array, the first element of
+        the array is the command to execute, and subsequent elements are
+        mandatory command line arguments.  The elements in `baseCommand` must
+        appear before any command line bindings from `inputBinding` or
+        `arguments`.
+
+        If `baseCommand` is not provided or is an empty array, the first
+        element of the command line produced after processing `inputBinding` or
+        `arguments` must be used as the program to execute.
+
+        If the program includes a path separator character it must
+        be an absolute path, otherwise it is an error.  If the program does not
+        include a path separator, search the `$PATH` variable in the runtime
+        environment of the workflow runner find the absolute path of the
+        executable.
+      type:
+        - string?
+        - string[]?
+      jsonldPredicate:
+        "_id": "cwl:baseCommand"
+        "_container": "@list"
+    - name: arguments
+      doc: |
+        Command line bindings which are not directly associated with input parameters.
+      type:
+        - "null"
+        - type: array
+          items: [string, Expression, CommandLineBinding]
+      jsonldPredicate:
+        "_id": "cwl:arguments"
+        "_container": "@list"
+    - name: stdin
+      type: ["null", string, Expression]
+      doc: |
+        A path to a file whose contents must be piped into the command's
+        standard input stream.
+    - name: stderr
+      type: ["null", string, Expression]
+      jsonldPredicate: "https://w3id.org/cwl/cwl#stderr"
+      doc: |
+        Capture the command's standard error stream to a file written to
+        the designated output directory.
+
+        If `stderr` is a string, it specifies the file name to use.
+
+        If `stderr` is an expression, the expression is evaluated and must
+        return a string with the file name to use to capture stderr.  If the
+        return value is not a string, or the resulting path contains illegal
+        characters (such as the path separator `/`) it is an error.
+    - name: stdout
+      type: ["null", string, Expression]
+      jsonldPredicate: "https://w3id.org/cwl/cwl#stdout"
+      doc: |
+        Capture the command's standard output stream to a file written to
+        the designated output directory.
+
+        If `stdout` is a string, it specifies the file name to use.
+
+        If `stdout` is an expression, the expression is evaluated and must
+        return a string with the file name to use to capture stdout.  If the
+        return value is not a string, or the resulting path contains illegal
+        characters (such as the path separator `/`) it is an error.
+    - name: successCodes
+      type: int[]?
+      doc: |
+        Exit codes that indicate the process completed successfully.
+
+    - name: temporaryFailCodes
+      type: int[]?
+      doc: |
+        Exit codes that indicate the process failed due to a possibly
+        temporary condition, where executing the process with the same
+        runtime environment and inputs may produce different results.
+
+    - name: permanentFailCodes
+      type: int[]?
+      doc:
+        Exit codes that indicate the process failed due to a permanent logic
+        error, where executing the process with the same runtime environment and
+        same inputs is expected to always fail.
+
+
+- type: record
+  name: DockerRequirement
+  extends: ProcessRequirement
+  doc: |
+    Indicates that a workflow component should be run in a
+    [Docker](http://docker.com) container, and specifies how to fetch or build
+    the image.
+
+    If a CommandLineTool lists `DockerRequirement` under
+    `hints` (or `requirements`), it may (or must) be run in the specified Docker
+    container.
+
+    The platform must first acquire or install the correct Docker image as
+    specified by `dockerPull`, `dockerImport`, `dockerLoad` or `dockerFile`.
+
+    The platform must execute the tool in the container using `docker run` with
+    the appropriate Docker image and tool command line.
+
+    The workflow platform may provide input files and the designated output
+    directory through the use of volume bind mounts.  The platform may rewrite
+    file paths in the input object to correspond to the Docker bind mounted
+    locations.
+
+    When running a tool contained in Docker, the workflow platform must not
+    assume anything about the contents of the Docker container, such as the
+    presence or absence of specific software, except to assume that the
+    generated command line represents a valid command within the runtime
+    environment of the container.
+
+    ## Interaction with other requirements
+
+    If [EnvVarRequirement](#EnvVarRequirement) is specified alongside a
+    DockerRequirement, the environment variables must be provided to Docker
+    using `--env` or `--env-file` and interact with the container's preexisting
+    environment as defined by Docker.
+
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'DockerRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: dockerPull
+      type: string?
+      doc: "Specify a Docker image to retrieve using `docker pull`."
+    - name: dockerLoad
+      type: string?
+      doc: "Specify a HTTP URL from which to download a Docker image using `docker load`."
+    - name: dockerFile
+      type: string?
+      doc: "Supply the contents of a Dockerfile which will be built using `docker build`."
+    - name: dockerImport
+      type: string?
+      doc: "Provide HTTP URL to download and gunzip a Docker images using `docker import."
+    - name: dockerImageId
+      type: string?
+      doc: |
+        The image id that will be used for `docker run`.  May be a
+        human-readable image name or the image identifier hash.  May be skipped
+        if `dockerPull` is specified, in which case the `dockerPull` image id
+        must be used.
+    - name: dockerOutputDirectory
+      type: string?
+      doc: |
+        Set the designated output directory to a specific location inside the
+        Docker container.
+
+
+- type: record
+  name: SoftwareRequirement
+  extends: ProcessRequirement
+  doc: |
+    A list of software packages that should be configured in the environment of
+    the defined process.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'SoftwareRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: packages
+      type: SoftwarePackage[]
+      doc: "The list of software to be configured."
+      jsonldPredicate:
+        mapSubject: package
+        mapPredicate: specs
+
+- name: SoftwarePackage
+  type: record
+  fields:
+    - name: package
+      type: string
+      doc: "The common name of the software to be configured."
+    - name: version
+      type: string[]?
+      doc: "The (optional) version of the software to configured."
+    - name: specs
+      type: string[]?
+      doc: |
+        Must be one or more IRIs identifying resources for installing or
+        enabling the software.  Implementations may provide resolvers which map
+        well-known software spec IRIs to some configuration action.
+
+        For example, an IRI `https://packages.debian.org/jessie/bowtie` could
+        be resolved with `apt-get install bowtie`.  An IRI
+        `https://anaconda.org/bioconda/bowtie` could be resolved with `conda
+        install -c bioconda bowtie`.
+
+        Tools may also provide IRIs to index entries such as
+        [RRID](http://www.identifiers.org/rrid/), such as
+        `http://identifiers.org/rrid/RRID:SCR_005476`
+
+
+- name: Dirent
+  type: record
+  doc: |
+    Define a file or subdirectory that must be placed in the designated output
+    directory prior to executing the command line tool.  May be the result of
+    executing an expression, such as building a configuration file from a
+    template.
+  fields:
+    - name: entryname
+      type: ["null", string, Expression]
+      jsonldPredicate:
+        _id: cwl:entryname
+      doc: |
+        The name of the file or subdirectory to create in the output directory.
+        If `entry` is a File or Directory, this overrides `basename`.  Optional.
+    - name: entry
+      type: [string, Expression]
+      jsonldPredicate:
+        _id: cwl:entry
+      doc: |
+        If the value is a string literal or an expression which evaluates to a
+        string, a new file must be created with the string as the file contents.
+
+        If the value is an expression that evaluates to a `File` object, this
+        indicates the referenced file should be added to the designated output
+        directory prior to executing the tool.
+
+        If the value is an expression that evaluates to a `Dirent` object, this
+        indicates that the File or Directory in `entry` should be added to the
+        designated output directory with the name in `entryname`.
+
+        If `writable` is false, the file may be made available using a bind
+        mount or file system link to avoid unnecessary copying of the input
+        file.
+    - name: writable
+      type: boolean?
+      doc: |
+        If true, the file or directory must be writable by the tool.  Changes
+        to the file or directory must be isolated and not visible by any other
+        CommandLineTool process.  This may be implemented by making a copy of
+        the original file or directory.  Default false (files and directories
+        read-only by default).
+
+
+- name: InitialWorkDirRequirement
+  type: record
+  extends: ProcessRequirement
+  doc:
+    Define a list of files and subdirectories that must be created by the
+    workflow platform in the designated output directory prior to executing the
+    command line tool.
+  fields:
+    - name: class
+      type: string
+      doc: InitialWorkDirRequirement
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: listing
+      type:
+        - type: array
+          items: [File, Directory, Dirent, string, Expression]
+        - string
+        - Expression
+      jsonldPredicate:
+        _id: "cwl:listing"
+      doc: |
+        The list of files or subdirectories that must be placed in the
+        designated output directory prior to executing the command line tool.
+
+        May be an expression.  If so, the expression return value must validate
+        as `{type: array, items: [File, Directory]}`.
+
+
+- name: EnvVarRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    Define a list of environment variables which will be set in the
+    execution environment of the tool.  See `EnvironmentDef` for details.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'EnvVarRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: envDef
+      type: EnvironmentDef[]
+      doc: The list of environment variables.
+      jsonldPredicate:
+        mapSubject: envName
+        mapPredicate: envValue
+
+
+- type: record
+  name: ShellCommandRequirement
+  extends: ProcessRequirement
+  doc: |
+    Modify the behavior of CommandLineTool to generate a single string
+    containing a shell command line.  Each item in the argument list must be
+    joined into a string separated by single spaces and quoted to prevent
+    intepretation by the shell, unless `CommandLineBinding` for that argument
+    contains `shellQuote: false`.  If `shellQuote: false` is specified, the
+    argument is joined into the command string without quoting, which allows
+    the use of shell metacharacters such as `|` for pipes.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'ShellCommandRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+
+
+- type: record
+  name: ResourceRequirement
+  extends: ProcessRequirement
+  doc: |
+    Specify basic hardware resource requirements.
+
+    "min" is the minimum amount of a resource that must be reserved to schedule
+    a job. If "min" cannot be satisfied, the job should not be run.
+
+    "max" is the maximum amount of a resource that the job shall be permitted
+    to use. If a node has sufficient resources, multiple jobs may be scheduled
+    on a single node provided each job's "max" resource requirements are
+    met. If a job attempts to exceed its "max" resource allocation, an
+    implementation may deny additional resources, which may result in job
+    failure.
+
+    If "min" is specified but "max" is not, then "max" == "min"
+    If "max" is specified by "min" is not, then "min" == "max".
+
+    It is an error if max < min.
+
+    It is an error if the value of any of these fields is negative.
+
+    If neither "min" nor "max" is specified for a resource, an implementation may provide a default.
+
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'ResourceRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: coresMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved number of CPU cores
+
+    - name: coresMax
+      type: ["null", int, string, Expression]
+      doc: Maximum reserved number of CPU cores
+
+    - name: ramMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved RAM in mebibytes (2**20)
+
+    - name: ramMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved RAM in mebibytes (2**20)
+
+    - name: tmpdirMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)
+
+    - name: tmpdirMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)
+
+    - name: outdirMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)
+
+    - name: outdirMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)
diff --git a/cwltool/schemas/v1.0/CommonWorkflowLanguage.yml b/cwltool/schemas/v1.0/CommonWorkflowLanguage.yml
new file mode 100644
index 0000000..73921e8
--- /dev/null
+++ b/cwltool/schemas/v1.0/CommonWorkflowLanguage.yml
@@ -0,0 +1,11 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
+
+$graph:
+
+- $import: Process.yml
+- $import: CommandLineTool.yml
+- $import: Workflow.yml
diff --git a/cwltool/schemas/v1.0/Process.yml b/cwltool/schemas/v1.0/Process.yml
new file mode 100644
index 0000000..37b2038
--- /dev/null
+++ b/cwltool/schemas/v1.0/Process.yml
@@ -0,0 +1,743 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
+
+$graph:
+
+- name: "Common Workflow Language, v1.0"
+  type: documentation
+  doc: {$include: concepts.md}
+
+- $import: "salad/schema_salad/metaschema/metaschema_base.yml"
+
+- name: BaseTypesDoc
+  type: documentation
+  doc: |
+    ## Base types
+  docChild:
+    - "#CWLType"
+    - "#Process"
+
+- type: enum
+  name: CWLVersion
+  doc: "Version symbols for published CWL document versions."
+  symbols:
+    - cwl:draft-2
+    - cwl:draft-3.dev1
+    - cwl:draft-3.dev2
+    - cwl:draft-3.dev3
+    - cwl:draft-3.dev4
+    - cwl:draft-3.dev5
+    - cwl:draft-3
+    - cwl:draft-4.dev1
+    - cwl:draft-4.dev2
+    - cwl:draft-4.dev3
+    - cwl:v1.0.dev4
+    - cwl:v1.0
+
+- name: CWLType
+  type: enum
+  extends: "sld:PrimitiveType"
+  symbols:
+    - cwl:File
+    - cwl:Directory
+  doc:
+    - "Extends primitive types with the concept of a file and directory as a builtin type."
+    - "File: A File object"
+    - "Directory: A Directory object"
+
+- name: File
+  type: record
+  docParent: "#CWLType"
+  doc: |
+    Represents a file (or group of files if `secondaryFiles` is specified) that
+    must be accessible by tools using standard POSIX file system call API such as
+    open(2) and read(2).
+  fields:
+    - name: class
+      type:
+        type: enum
+        name: File_class
+        symbols:
+          - cwl:File
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+      doc: Must be `File` to indicate this object describes a file.
+    - name: location
+      type: string?
+      doc: |
+        An IRI that identifies the file resource.  This may be a relative
+        reference, in which case it must be resolved using the base IRI of the
+        document.  The location may refer to a local or remote resource; the
+        implementation must use the IRI to retrieve file content.  If an
+        implementation is unable to retrieve the file content stored at a
+        remote resource (due to unsupported protocol, access denied, or other
+        issue) it must signal an error.
+
+        If the `location` field is not provided, the `contents` field must be
+        provided.  The implementation must assign a unique identifier for
+        the `location` field.
+
+        If the `path` field is provided but the `location` field is not, an
+        implementation may assign the value of the `path` field to `location`,
+        then follow the rules above.
+      jsonldPredicate:
+        _id: "@id"
+        _type: "@id"
+    - name: path
+      type: string?
+      doc: |
+        The local host path where the File is available when a CommandLineTool is
+        executed.  This field must be set by the implementation.  The final
+        path component must match the value of `basename`.  This field
+        must not be used in any other context.  The command line tool being
+        executed must be able to to access the file at `path` using the POSIX
+        `open(2)` syscall.
+
+        As a special case, if the `path` field is provided but the `location`
+        field is not, an implementation may assign the value of the `path`
+        field to `location`, and remove the `path` field.
+
+        If the `path` contains [POSIX shell metacharacters](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)
+        (`|`,`&`, `;`, `<`, `>`, `(`,`)`, `$`,`` ` ``, `\`, `"`, `'`,
+        `<space>`, `<tab>`, and `<newline>`) or characters
+        [not allowed](http://www.iana.org/assignments/idna-tables-6.3.0/idna-tables-6.3.0.xhtml)
+        for [Internationalized Domain Names for Applications](https://tools.ietf.org/html/rfc6452)
+        then implementations may terminate the process with a
+        `permanentFailure`.
+      jsonldPredicate:
+        "_id": "cwl:path"
+        "_type": "@id"
+    - name: basename
+      type: string?
+      doc: |
+        The base name of the file, that is, the name of the file without any
+        leading directory path.  The base name must not contain a slash `/`.
+
+        If not provided, the implementation must set this field based on the
+        `location` field by taking the final path component after parsing
+        `location` as an IRI.  If `basename` is provided, it is not required to
+        match the value from `location`.
+
+        When this file is made available to a CommandLineTool, it must be named
+        with `basename`, i.e. the final component of the `path` field must match
+        `basename`.
+      jsonldPredicate: "cwl:basename"
+    - name: dirname
+      type: string?
+      doc: |
+        The name of the directory containing file, that is, the path leading up
+        to the final slash in the path such that `dirname + '/' + basename ==
+        path`.
+
+        The implementation must set this field based on the value of `path`
+        prior to evaluating parameter references or expressions in a
+        CommandLineTool document.  This field must not be used in any other
+        context.
+    - name: nameroot
+      type: string?
+      doc: |
+        The basename root such that `nameroot + nameext == basename`, and
+        `nameext` is empty or begins with a period and contains at most one
+        period.  For the purposess of path splitting leading periods on the
+        basename are ignored; a basename of `.cshrc` will have a nameroot of
+        `.cshrc`.
+
+        The implementation must set this field automatically based on the value
+        of `basename` prior to evaluating parameter references or expressions.
+    - name: nameext
+      type: string?
+      doc: |
+        The basename extension such that `nameroot + nameext == basename`, and
+        `nameext` is empty or begins with a period and contains at most one
+        period.  Leading periods on the basename are ignored; a basename of
+        `.cshrc` will have an empty `nameext`.
+
+        The implementation must set this field automatically based on the value
+        of `basename` prior to evaluating parameter references or expressions.
+    - name: checksum
+      type: string?
+      doc: |
+        Optional hash code for validating file integrity.  Currently must be in the form
+        "sha1$ + hexadecimal string" using the SHA-1 algorithm.
+    - name: size
+      type: long?
+      doc: Optional file size
+    - name: "secondaryFiles"
+      type:
+        - "null"
+        - type: array
+          items: [File, Directory]
+      jsonldPredicate: "cwl:secondaryFiles"
+      doc: |
+        A list of additional files that are associated with the primary file
+        and must be transferred alongside the primary file.  Examples include
+        indexes of the primary file, or external references which must be
+        included when loading primary document.  A file object listed in
+        `secondaryFiles` may itself include `secondaryFiles` for which the same
+        rules apply.
+    - name: format
+      type: string?
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        The format of the file: this must be an IRI of a concept node that
+        represents the file format, preferrably defined within an ontology.
+        If no ontology is available, file formats may be tested by exact match.
+
+        Reasoning about format compatability must be done by checking that an
+        input file format is the same, `owl:equivalentClass` or
+        `rdfs:subClassOf` the format required by the input parameter.
+        `owl:equivalentClass` is transitive with `rdfs:subClassOf`, e.g. if
+        `<B> owl:equivalentClass <C>` and `<B> owl:subclassOf <A>` then infer
+        `<C> owl:subclassOf <A>`.
+
+        File format ontologies may be provided in the "$schema" metadata at the
+        root of the document.  If no ontologies are specified in `$schema`, the
+        runtime may perform exact file format matches.
+    - name: contents
+      type: string?
+      doc: |
+        File contents literal.  Maximum of 64 KiB.
+
+        If neither `location` nor `path` is provided, `contents` must be
+        non-null.  The implementation must assign a unique identifier for the
+        `location` field.  When the file is staged as input to CommandLineTool,
+        the value of `contents` must be written to a file.
+
+        If `loadContents` of `inputBinding` or `outputBinding` is true and
+        `location` is valid, the implementation must read up to the first 64
+        KiB of text from the file and place it in the "contents" field.
+
+
+- name: Directory
+  type: record
+  docAfter: "#File"
+  doc: |
+    Represents a directory to present to a command line tool.
+  fields:
+    - name: class
+      type:
+        type: enum
+        name: Directory_class
+        symbols:
+          - cwl:Directory
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+      doc: Must be `Directory` to indicate this object describes a Directory.
+    - name: location
+      type: string?
+      doc: |
+        An IRI that identifies the directory resource.  This may be a relative
+        reference, in which case it must be resolved using the base IRI of the
+        document.  The location may refer to a local or remote resource.  If
+        the `listing` field is not set, the implementation must use the
+        location IRI to retrieve directory listing.  If an implementation is
+        unable to retrieve the directory listing stored at a remote resource (due to
+        unsupported protocol, access denied, or other issue) it must signal an
+        error.
+
+        If the `location` field is not provided, the `listing` field must be
+        provided.  The implementation must assign a unique identifier for
+        the `location` field.
+
+        If the `path` field is provided but the `location` field is not, an
+        implementation may assign the value of the `path` field to `location`,
+        then follow the rules above.
+      jsonldPredicate:
+        _id: "@id"
+        _type: "@id"
+    - name: path
+      type: string?
+      doc: |
+        The local path where the Directory is made available prior to executing a
+        CommandLineTool.  This must be set by the implementation.  This field
+        must not be used in any other context.  The command line tool being
+        executed must be able to to access the directory at `path` using the POSIX
+        `opendir(2)` syscall.
+
+        If the `path` contains [POSIX shell metacharacters](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)
+        (`|`,`&`, `;`, `<`, `>`, `(`,`)`, `$`,`` ` ``, `\`, `"`, `'`,
+        `<space>`, `<tab>`, and `<newline>`) or characters
+        [not allowed](http://www.iana.org/assignments/idna-tables-6.3.0/idna-tables-6.3.0.xhtml)
+        for [Internationalized Domain Names for Applications](https://tools.ietf.org/html/rfc6452)
+        then implementations may terminate the process with a
+        `permanentFailure`.
+      jsonldPredicate:
+        _id: "cwl:path"
+        _type: "@id"
+    - name: basename
+      type: string?
+      doc: |
+        The base name of the directory, that is, the name of the file without any
+        leading directory path.  The base name must not contain a slash `/`.
+
+        If not provided, the implementation must set this field based on the
+        `location` field by taking the final path component after parsing
+        `location` as an IRI.  If `basename` is provided, it is not required to
+        match the value from `location`.
+
+        When this file is made available to a CommandLineTool, it must be named
+        with `basename`, i.e. the final component of the `path` field must match
+        `basename`.
+      jsonldPredicate: "cwl:basename"
+    - name: listing
+      type:
+        - "null"
+        - type: array
+          items: [File, Directory]
+      doc: |
+        List of files or subdirectories contained in this directory.  The name
+        of each file or subdirectory is determined by the `basename` field of
+        each `File` or `Directory` object.  It is an error if a `File` shares a
+        `basename` with any other entry in `listing`.  If two or more
+        `Directory` object share the same `basename`, this must be treated as
+        equivalent to a single subdirectory with the listings recursively
+        merged.
+      jsonldPredicate:
+        _id: "cwl:listing"
+
+- name: SchemaBase
+  type: record
+  abstract: true
+  fields:
+    - name: label
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this object."
+
+
+- name: Parameter
+  type: record
+  extends: SchemaBase
+  abstract: true
+  doc: |
+    Define an input or output parameter to a process.
+
+  fields:
+    - name: secondaryFiles
+      type:
+        - "null"
+        - string
+        - Expression
+        - type: array
+          items: [string, Expression]
+      jsonldPredicate: "cwl:secondaryFiles"
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        Describes files that must be included alongside the primary file(s).
+
+        If the value is an expression, the value of `self` in the expression
+        must be the primary input or output File to which this binding applies.
+
+        If the value is a string, it specifies that the following pattern
+        should be applied to the primary file:
+
+          1. If string begins with one or more caret `^` characters, for each
+            caret, remove the last file extension from the path (the last
+            period `.` and all following characters).  If there are no file
+            extensions, the path is unchanged.
+          2. Append the remainder of the string to the end of the file path.
+
+    - name: format
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+        - Expression
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        For input parameters, this must be one or more IRIs of concept nodes
+        that represents file formats which are allowed as input to this
+        parameter, preferrably defined within an ontology.  If no ontology is
+        available, file formats may be tested by exact match.
+
+        For output parameters, this is the file format that will be assigned to
+        the output parameter.
+
+    - name: streamable
+      type: boolean?
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        A value of `true` indicates that the file is read or written
+        sequentially without seeking.  An implementation may use this flag to
+        indicate whether it is valid to stream file contents using a named
+        pipe.  Default: `false`.
+
+    - name: doc
+      type:
+        - string?
+        - string[]?
+      doc: "A documentation string for this type, or an array of strings which should be concatenated."
+      jsonldPredicate: "rdfs:comment"
+
+
+- type: enum
+  name: Expression
+  doc: |
+    'Expression' is not a real type.  It indicates that a field must allow
+    runtime parameter references.  If [InlineJavascriptRequirement](#InlineJavascriptRequirement)
+    is declared and supported by the platform, the field must also allow
+    Javascript expressions.
+  symbols:
+    - cwl:ExpressionPlaceholder
+
+
+- name: InputBinding
+  type: record
+  abstract: true
+  fields:
+    - name: loadContents
+      type:
+        - "null"
+        - boolean
+      jsonldPredicate: "cwl:loadContents"
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        Read up to the first 64 KiB of text from the file and place it in the
+        "contents" field of the file object for use by expressions.
+
+
+- name: OutputBinding
+  type: record
+  abstract: true
+
+
+- name: InputSchema
+  extends: SchemaBase
+  type: record
+  abstract: true
+
+
+- name: OutputSchema
+  extends: SchemaBase
+  type: record
+  abstract: true
+
+
+- name: InputRecordField
+  type: record
+  extends: "sld:RecordField"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: InputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: InputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: InputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+    - name: label
+      type: string?
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+
+
+- name: InputRecordSchema
+  type: record
+  extends: ["sld:RecordSchema", InputSchema]
+  specialize:
+    - specializeFrom: "sld:RecordField"
+      specializeTo: InputRecordField
+
+
+- name: InputEnumSchema
+  type: record
+  extends: ["sld:EnumSchema", InputSchema]
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+
+
+- name: InputArraySchema
+  type: record
+  extends: ["sld:ArraySchema", InputSchema]
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: InputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: InputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: InputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+
+
+- name: OutputRecordField
+  type: record
+  extends: "sld:RecordField"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: OutputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: OutputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: OutputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+
+- name: OutputRecordSchema
+  type: record
+  extends: ["sld:RecordSchema", "#OutputSchema"]
+  docParent: "#OutputParameter"
+  specialize:
+    - specializeFrom: "sld:RecordField"
+      specializeTo: OutputRecordField
+
+
+- name: OutputEnumSchema
+  type: record
+  extends: ["sld:EnumSchema", OutputSchema]
+  docParent: "#OutputParameter"
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+- name: OutputArraySchema
+  type: record
+  extends: ["sld:ArraySchema", OutputSchema]
+  docParent: "#OutputParameter"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: OutputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: OutputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: OutputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+
+- name: InputParameter
+  type: record
+  extends: Parameter
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+      doc: |
+        Describes how to handle the inputs of a process and convert them
+        into a concrete form for execution, such as command line parameters.
+
+    - name: default
+      type: Any?
+      jsonldPredicate: "cwl:default"
+      doc: |
+        The default value for this parameter if not provided in the input
+        object.
+
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - InputRecordSchema
+        - InputEnumSchema
+        - InputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - InputRecordSchema
+            - InputEnumSchema
+            - InputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+
+- name: OutputParameter
+  type: record
+  extends: Parameter
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+      doc: |
+        Describes how to handle the outputs of a process.
+
+
+- type: record
+  name: ProcessRequirement
+  abstract: true
+  doc: |
+    A process requirement declares a prerequisite that may or must be fulfilled
+    before executing a process.  See [`Process.hints`](#process) and
+    [`Process.requirements`](#process).
+
+    Process requirements are the primary mechanism for specifying extensions to
+    the CWL core specification.
+
+
+- type: record
+  name: Process
+  abstract: true
+  doc: |
+
+    The base executable type in CWL is the `Process` object defined by the
+    document.  Note that the `Process` object is abstract and cannot be
+    directly executed.
+
+  fields:
+    - name: id
+      type: string?
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this process object."
+    - name: inputs
+      type:
+        type: array
+        items: InputParameter
+      jsonldPredicate:
+        _id: "cwl:inputs"
+        mapSubject: id
+        mapPredicate: type
+      doc: |
+        Defines the input parameters of the process.  The process is ready to
+        run when all required input parameters are associated with concrete
+        values.  Input parameters include a schema for each parameter which is
+        used to validate the input object.  It may also be used to build a user
+        interface for constructing the input object.
+    - name: outputs
+      type:
+        type: array
+        items: OutputParameter
+      jsonldPredicate:
+        _id: "cwl:outputs"
+        mapSubject: id
+        mapPredicate: type
+      doc: |
+        Defines the parameters representing the output of the process.  May be
+        used to generate and/or validate the output object.
+    - name: requirements
+      type: ProcessRequirement[]?
+      jsonldPredicate:
+        _id: "cwl:requirements"
+        mapSubject: class
+      doc: |
+        Declares requirements that apply to either the runtime environment or the
+        workflow engine that must be met in order to execute this process.  If
+        an implementation cannot satisfy all requirements, or a requirement is
+        listed which is not recognized by the implementation, it is a fatal
+        error and the implementation must not attempt to run the process,
+        unless overridden at user option.
+    - name: hints
+      type: Any[]?
+      doc: |
+        Declares hints applying to either the runtime environment or the
+        workflow engine that may be helpful in executing this process.  It is
+        not an error if an implementation cannot satisfy all hints, however
+        the implementation may report a warning.
+      jsonldPredicate:
+        _id: cwl:hints
+        noLinkCheck: true
+        mapSubject: class
+    - name: label
+      type: string?
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+    - name: doc
+      type: string?
+      jsonldPredicate: "rdfs:comment"
+      doc: "A long, human-readable description of this process object."
+    - name: cwlVersion
+      type: CWLVersion?
+      doc: |
+        CWL document version. Always required at the document root. Not
+        required for a Process embedded inside another Process.
+      jsonldPredicate:
+        "_id": "cwl:cwlVersion"
+        "_type": "@vocab"
+
+- name: InlineJavascriptRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    Indicates that the workflow platform must support inline Javascript expressions.
+    If this requirement is not present, the workflow platform must not perform expression
+    interpolatation.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'InlineJavascriptRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: expressionLib
+      type: string[]?
+      doc: |
+        Additional code fragments that will also be inserted
+        before executing the expression code.  Allows for function definitions that may
+        be called from CWL expressions.
+
+
+- name: SchemaDefRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    This field consists of an array of type definitions which must be used when
+    interpreting the `inputs` and `outputs` fields.  When a `type` field
+    contain a IRI, the implementation must check if the type is defined in
+    `schemaDefs` and use that definition.  If the type is not found in
+    `schemaDefs`, it is an error.  The entries in `schemaDefs` must be
+    processed in the order listed such that later schema definitions may refer
+    to earlier schema definitions.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'SchemaDefRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: types
+      type:
+        type: array
+        items: InputSchema
+      doc: The list of type definitions.
diff --git a/cwltool/schemas/draft-3/README.md b/cwltool/schemas/v1.0/README.md
similarity index 92%
copy from cwltool/schemas/draft-3/README.md
copy to cwltool/schemas/v1.0/README.md
index 142b728..1e0bad6 100644
--- a/cwltool/schemas/draft-3/README.md
+++ b/cwltool/schemas/v1.0/README.md
@@ -1,11 +1,9 @@
-# Common Workflow Language Specifications, draft-3
+# Common Workflow Language Specifications, v1.0
 
 The CWL specifications are divided up into several documents.
 
-<!--
 The [User Guide](UserGuide.html) provides a gentle introduction to writing CWL
 command line tools and workflows.
--->
 
 The [Command Line Tool Description Specification](CommandLineTool.html)
 specifies the document schema and execution semantics for wrapping and
diff --git a/cwltool/schemas/v1.0/UserGuide.yml b/cwltool/schemas/v1.0/UserGuide.yml
new file mode 100644
index 0000000..9e3fd52
--- /dev/null
+++ b/cwltool/schemas/v1.0/UserGuide.yml
@@ -0,0 +1,869 @@
+- name: userguide
+  type: documentation
+  doc:
+    - $include: userguide-intro.md
+
+    - |
+      # Wrapping Command Line Tools
+
+    - |
+      ## First example
+
+      The simplest "hello world" program.  This accepts one input parameter,
+      writes a message to the terminal or job log, and produces no permanent
+      output.  CWL documents are written in [JSON](http://json.org) or
+      [YAML](http://yaml.org), or a mix of the two.
+
+      *1st-tool.cwl*
+      ```
+    - $include: examples/1st-tool.cwl
+    - |
+      ```
+
+      Use a YAML object in a separate file to describe the input of a run:
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner 1st-tool.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!'
+      Hello world!
+      Final process status is success
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: v1.0
+      class: CommandLineTool
+      ```
+
+      The `cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a command
+      line tool.
+
+      ```
+      baseCommand: echo
+      ```
+
+      The `baseCommand` provides the name of program that will actually run
+      (echo)
+
+      ```
+      inputs:
+        message:
+          type: string
+          inputBinding:
+            position: 1
+      ```
+
+      The `inputs` section describes the inputs of the tool.  This is a list of input
+      parameters and each parameter includes an identifier, a data type, and
+      optionally an `inputBinding` which describes how this input parameter
+      should appear on the command line.  In this example, the `position` field
+      indicates where it should appear on the command line.
+
+      ```
+      outputs: []
+      ```
+
+      This tool has no formal output, so the `outputs` section is an empty list.
+
+    - |
+      ## Essential input parameters
+
+      The `inputs` of a tool is a list of input parameters that control how to
+      run the tool.  Each parameter has an `id` for the name of parameter, and
+      `type` describing what types of values are valid for that parameter.
+
+      Available primitive types are *string*, *int*, *long*, *float*, *double*,
+      and *null*; complex types are *array* and *record*; in addition there are
+      special types *File*, *Directory* and *Any*.
+
+      The following example demonstrates some input parameters with different
+      types and appearing on the command line in different ways:
+
+
+      *inp.cwl*
+      ```
+    - $include: examples/inp.cwl
+    - |
+      ```
+
+      *inp-job.yml*
+      ```
+    - $include: examples/inp-job.yml
+    - |
+      ```
+
+      Notice that "example_file", as a `File` type, must be provided as an
+      object with the fields `class: File` and `path`.
+
+      Next, create a whale.txt and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ touch whale.txt
+      $ cwl-runner inp.cwl inp-job.yml
+      [job 140020149614160] /home/example$ echo -f -i42 --example-string hello --file=/home/example/whale.txt
+      -f -i42 --example-string hello --file=/home/example/whale.txt
+      Final process status is success
+      ```
+
+      The field `inputBinding` is optional and indicates whether and how the
+      input parameter should be appear on the tool's command line.  If
+      `inputBinding` is missing, the parameter does not appear on the command
+      line.  Let's look at each example in detail.
+
+      ```
+      example_flag:
+        type: boolean
+        inputBinding:
+          position: 1
+          prefix: -f
+      ```
+
+      Boolean types are treated as a flag.  If the input parameter
+      "example_flag" is "true", then `prefix` will be added to the
+      command line.  If false, no flag is added.
+
+      ```
+      example_string:
+        type: string
+        inputBinding:
+          position: 3
+          prefix: --example-string
+      ```
+
+      String types appear on the command line as literal values.  The `prefix`
+      is optional, if provided, it appears as a separate argument on the
+      command line before the parameter .  In the example above, this is
+      rendered as `--example-string hello`.
+
+      ```
+      example_int:
+        type: int
+        inputBinding:
+          position: 2
+          prefix: -i
+          separate: false
+      ```
+
+      Integer (and floating point) types appear on the command line with
+      decimal text representation.  When the option `separate` is false (the
+      default value is true), the prefix and value are combined into a single
+      argument.  In the example above, this is rendered as `-i42`.
+
+
+      ```
+      example_file:
+        type: File?
+        inputBinding:
+          prefix: --file=
+          separate: false
+          position: 4
+      ```
+
+      File types appear on the command line as the path to the file.  When the
+      parameter type ends with a question mark `?` it indicates that the
+      parameter is optional.  In the example above, this is rendered as
+      `--file=/home/example/whale.txt`.  However, if the "example_file"
+      parameter were not provided in the input, nothing would appear on the
+      command line.
+
+      Input files are read-only.  If you wish to update an input file, you must
+      first copy it to the output directory.
+
+      The value of `position` is used to determine where parameter should
+      appear on the command line.  Positions are relative to one another, not
+      abosolute.  As a result, positions do not have to be sequential, three
+      parameters with positions `[1, 3, 5]` will result in the same command
+      line as `[1, 2, 3]`.  More than one parameter can have the same position
+      (ties are broken using the parameter name), and the position field itself
+      is optional.  the default position is 0.
+
+      The `baseCommand` field always comes before parameters.
+
+    - |
+      ## Returning output files
+
+      The `outputs` of a tool is a list of output parameters that should be
+      returned after running the tool.  Each parameter has an `id` for the name
+      of parameter, and `type` describing what types of values are valid for
+      that parameter.
+
+      When a tool runs under CWL, the starting working directory is the
+      designated output directory.  The underlying tool or script must record
+      its results in the form of files created in the output directory.  The
+      output parameters returned by the CWL tool are either the output files
+      themselves, or come from examining the content of those files.
+
+      *tar.cwl*
+      ```
+    - $include: examples/tar.cwl
+    - |
+      ```
+
+      *tar-job.yml*
+      ```
+    - $include: examples/tar-job.yml
+    - |
+      ```
+
+      Next, create a tar file for the example and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ touch hello.txt && tar -cvf hello.tar hello.txt
+      $ cwl-runner tar.cwl tar-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar
+      Final process status is success
+      {
+      "example_out": {
+        "location": "hello.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      ```
+
+      The field `outputBinding` describes how to to set the value of each
+      output parameter.
+
+      ```
+      outputs:
+        - id: example_out
+          type: File
+          outputBinding:
+            glob: hello.txt
+      ```
+
+      The `glob` field consists of the name of a file in the output directory.
+      If you don't know name of the file in advance, you can use a wildcard
+      pattern.
+
+    - |
+      ## Capturing a tool's standard output stream
+
+      To capture a tool's standard output stream, add the `stdout` field with
+      the name of the file where the output stream should go.  Then add `type:
+      stdout` on the corresponding output parameter.
+
+      *stdout.cwl*
+      ```
+    - $include: examples/stdout.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner stdout.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!' > output.txt
+      Final process status is success
+      {
+      "output": {
+        "location": "output.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      $ cat output.txt
+      Hello world!
+      ```
+
+    - |
+      ## Parameter references
+
+      In a previous example, we extracted a file using the "tar" program.
+      However, that example was very limited because it assumed that the file
+      we were interested in was called "hello.txt".  In this example, you will
+      see how to reference the value of input parameters dynamically from other
+      fields.
+
+      *tar-param.cwl*
+      ```
+    - $include: examples/tar-param.cwl
+    - |
+      ```
+
+      *tar-param-job.yml*
+      ```
+    - $include: examples/tar-param-job.yml
+    - |
+      ```
+
+      Create your input files and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ rm hello.tar || true && touch goodbye.txt && tar -cvf hello.tar goodbye.txt
+      $ cwl-runner tar-param.cwl tar-param-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar goodbye.txt
+      Final process status is success
+      {
+      "example_out": {
+        "location": "goodbye.txt",
+        "size": 24,
+        "class": "File",
+        "checksum": "sha1$dd0a4c4c49ba43004d6611771972b6cf969c1c01"
+        }
+      }
+      ```
+
+      Certain fields permit parameter references which are enclosed in `$(...)`.
+      These are evaluated and replaced with value being referenced.
+
+      ```
+      outputs:
+        example_out:
+          type: File
+          outputBinding:
+            glob: $(inputs.extractfile)
+      ```
+
+      References are written using a subset of Javascript syntax.  In this
+      example, `$(inputs.extractfile)`, `$(inputs["extractfile"])`, and
+      `$(inputs['extractfile'])` are equivalent.
+
+      The value of the "inputs" variable is the input object provided when the
+      CWL tool was invoked.
+
+      Note that because File parameters are objects, to get the path to an
+      input file you must reference the path field on a file object; to
+      reference the path to the tar file in the above example you would write
+      `$(inputs.tarfile.path)`.
+
+    - |
+      ## Running tools inside Docker
+
+      [Docker](http://docker.io) containers simplify software installation by providing a complete
+      known-good runtime for software and its dependencies.  However,
+      containers are also purposefully isolated from the host system, so in
+      order to run a tool inside a Docker container there is additional work to
+      ensure that input files are available inside the container and output
+      files can be recovered from the contianer.  CWL can perform this work
+      automatically, allowing you to use Docker to simplify your software
+      management while avoiding the complexity of invoking and managing Docker
+      containers.
+
+      This example runs a simple Node.js script inside a Docker container.
+
+      *docker.cwl*
+      ```
+    - $include: examples/docker.cwl
+    - |
+      ```
+
+      *docker-job.yml*
+      ```
+    - $include: examples/docker-job.yml
+    - |
+      ```
+
+      Provide a hello.js and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "console.log(\"Hello World\");" > hello.js
+      $ cwl-runner docker.cwl docker-job.yml
+      [job 140259721854416] /home/example$ docker run -i --volume=/home/example/hello.js:/var/lib/cwl/job369354770_examples/hello.js:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpDLs5hm:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp node:slim node /var/lib/cwl/job369354770_examples/hello.js
+      Hello world!
+      Final process status is success
+      ```
+
+      Notice the CWL runner has constructed a Docker command line to run the
+      script.  One of the responsibilies of the CWL runner is to the paths of
+      input files to reflect the location where they appear inside the
+      container.  In this example, the path to the script `hello.js` is
+      `/home/example/hello.js` outside the container but
+      `/var/lib/cwl/job369354770_examples/hello.js` inside the container, as
+      reflected in the invocation of the `node` command.
+
+    - |
+      ## Additional command line arguments and runtime parameters
+
+      Sometimes tools require additional command line options that don't
+      correspond exactly to input parameters.
+
+      In this example, we will wrap the Java compiler to compile a java source
+      file to a class file.  By default, `javac` will create the class files in
+      the same directory as the source file.  However, CWL input files (and the
+      directories in which they appear) may be read-only, so we need to
+      instruct javac to write the class file to the designated output directory
+      instead.
+
+      *arguments.cwl*
+      ```
+    - $include: examples/arguments.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now create a sample Java file and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "public class Hello {}" > Hello.java
+      $ cwl-runner arguments.cwl arguments-job.yml
+      [job arguments.cwl] /tmp/tmpwYALo1$ docker \
+       run \
+       -i \
+       --volume=/home/peter/work/common-workflow-language/v1.0/examples/Hello.java:/var/lib/cwl/stg8939ac04-7443-4990-a518-1855b2322141/Hello.java:ro \
+       --volume=/tmp/tmpwYALo1:/var/spool/cwl:rw \
+       --volume=/tmp/tmpptIAJ8:/tmp:rw \
+       --workdir=/var/spool/cwl \
+       --read-only=true \
+       --user=1001 \
+       --rm \
+       --env=TMPDIR=/tmp \
+       --env=HOME=/var/spool/cwl \
+       java:7 \
+       javac \
+       -d \
+       /var/spool/cwl \
+       /var/lib/cwl/stg8939ac04-7443-4990-a518-1855b2322141/Hello.java
+      Final process status is success
+      {
+        "classfile": {
+          "size": 416,
+          "location": "/home/example/Hello.class",
+          "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+          "class": "File"
+        }
+      }
+
+      ```
+
+      Here we use the `arguments` field to add an additional argument to the
+      command line that isn't tied to a specific input parameter.
+
+      ```
+      arguments: ["-d", $(runtime.outdir)]
+      ```
+
+      This example references a runtime parameter.  Runtime parameters
+      provide information about the hardware or software environment when the
+      tool is actually executed.  The `$(runtime.outdir)` parameter is the path
+      to the designated output directory.  Other parameters include
+      `$(runtime.tmpdir)`, `$(runtime.ram)`, `$(runtime.cores)`,
+      `$(runtime.ram)`, `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
+      the [Runtime Environment](CommandLineTool.html#Runtime_environment)
+      section of the CWL specification for details.
+
+    - |
+      ## Array inputs
+
+      It is easy to add arrays of input parameters represented to the command
+      line.  To specify an array parameter, the array definition is nested
+      under the `type` field with `type: array` and `items` defining the valid
+      data types that may appear in the array.
+
+      *array-inputs.cwl*
+      ```
+    - $include: examples/array-inputs.cwl
+    - |
+      ```
+
+      *array-inputs-job.yml*
+      ```
+    - $include: examples/array-inputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-inputs.cwl array-inputs-job.yml
+      [job 140334923640912] /home/example$ echo -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      Final process status is success
+      {}
+      ```
+
+      The `inputBinding` can appear either on the outer array parameter
+      definition or the inner array element definition, and these produce
+      different behavior when constructing the command line, as shown above.
+      In addition, the `itemSeperator` field, if provided, specifies that array
+      values should be concatenated into a single argument separated by the
+      item separator string.
+
+      You can specify arrays of arrays, arrays of records, and other complex
+      types.
+
+    - |
+      ## Array outputs
+
+      You can also capture multiple output files into an array of files using `glob`.
+
+      *array-outputs.cwl*
+      ```
+    - $include: examples/array-outputs.cwl
+    - |
+      ```
+
+      *array-outpust-job.yml*
+      ```
+    - $include: examples/array-outputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-outputs.cwl array-outputs-job.yml
+      [job 140190876078160] /home/example$ touch foo.txt bar.dat baz.txt
+      Final process status is success
+      {
+        "output": [
+          {
+            "size": 0,
+            "location": "/home/peter/work/common-workflow-language/draft-3/examples/foo.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          },
+          {
+            "size": 0,
+            "location": "/home/peter/work/common-workflow-language/draft-3/examples/baz.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          }
+        ]
+      }
+      ```
+
+    - |
+      ## Record inputs, dependent and mutually exclusive parameters
+
+      Sometimes an underlying tool has several arguments that must be provided
+      together (they are dependent) or several arguments that cannot be
+      provided together (they are exclusive).  You can use records and type
+      unions to group parameters together to describe these two conditions.
+
+      *record.cwl*
+      ```
+    - $include: examples/record.cwl
+    - |
+      ```
+
+      *record-job1.yml*
+      ```
+    - $include: examples/record-job1.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job1.yml
+      Workflow error:
+        Error validating input record, could not validate field `dependent_parameters` because
+        missing required field `itemB`
+      ```
+
+      In the first example, you can't provide `itemA` without also providing `itemB`.
+
+      *record-job2.yml*
+      ```
+    - $include: examples/record-job2.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job2.yml
+      [job 140566927111376] /home/example$ echo -A one -B two -C three
+      -A one -B two -C three
+      Final process status is success
+      {}
+      ```
+
+      In the second example, `itemC` and `itemD` are exclusive, so only `itemC`
+      is added to the command line and `itemD` is ignored.
+
+      *record-job3.yml*
+      ```
+    - $include: examples/record-job3.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job3.yml
+      [job 140606932172880] /home/example$ echo -A one -B two -D four
+      -A one -B two -D four
+      Final process status is success
+      {}
+      ```
+
+      In the third example, only `itemD` is provided, so it appears on the
+      command line.
+
+    - |
+      ## Environment variables
+
+      Tools run in a restricted environment and do not inherit most environment
+      variables from the parent process.  You can set environment variables for
+      the tool using `EnvVarRequirement`.
+
+      *env.cwl*
+      ```
+    - $include: examples/env.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner env.cwl echo-job.yml
+      [job 140710387785808] /home/example$ env
+      PATH=/bin:/usr/bin:/usr/local/bin
+      HELLO=Hello world!
+      TMPDIR=/tmp/tmp63Obpk
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Javascript expressions
+
+      If you need to manipulate input parameters, include the requirement
+      `InlineJavascriptRequirement` and then anywhere a parameter reference is
+      legal you can provide a fragment of Javascript that will be evaluated by
+      the CWL runner.
+
+      *expression.cwl*
+      ```
+    - $include: examples/expression.cwl
+    - |
+      ```
+
+      ```
+      $ cwl-runner expression.cwl empty.yml
+      [job 140000594593168] /home/example$ echo -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      Final process status is success
+      {}
+      ```
+
+      You can only use expressions in certain fields.  These are: `filename`,
+      `fileContent`, `envValue`, `valueFrom`, `glob`, `outputEval`, `stdin`,
+      `stdout`, `coresMin`, `coresMax`, `ramMin`, `ramMax`, `tmpdirMin`,
+      `tmpdirMax`, `outdirMin`, and `outdirMax`.
+
+    - |
+      ## Creating files at runtime
+
+      Sometimes you need to create a file on the fly from input parameters,
+      such as tools which expect to read their input configuration from a file
+      rather than the command line parameters.  To do this, use
+      `InitialWorkDirRequirement`.
+
+      *createfile.cwl*
+      ```
+    - $include: examples/createfile.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwltool createfile.cwl echo-job.yml
+      [job 140528604979344] /home/example$ cat example.conf
+      CONFIGVAR=Hello world!
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Staging input files in the output directory
+
+      Normally, input files are located in a read-only directory separate from
+      the output directory.  This causes problems if the underlying tool
+      expects to write its output files alongside the input file in the same
+      directory.  You use `InitialWorkDirRequirement` to stage input files into the
+      output directory.  In this example, we use a Javascript expression to
+      extract the base name of the input file from its leading directory path.
+
+      *linkfile.cwl*
+      ```
+    - $include: examples/linkfile.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner linkfile.cwl arguments-job.yml
+      [job 139928309171664] /home/example$ docker run -i --volume=/home/example/Hello.java:/var/lib/cwl/job557617295_examples/Hello.java:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpmNbApw:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac Hello.java
+      Final process status is success
+      {
+      "classfile": {
+        "size": 416,
+        "location": "/home/example/Hello.class",
+        "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+        "class": "File"
+        }
+      }
+      ```
+
+    - |
+      # Writing Workflows
+
+      ## First workflow
+
+      This workflow extracts a java source file from a tar file and then
+      compiles it.
+
+      *1st-workflow.cwl*
+      ```
+    - $include: examples/1st-workflow.cwl
+    - |
+      ```
+
+      Use a JSON object in a separate file to describe the input of a run:
+
+      *1st-workflow-job.yml*
+      ```
+    - $include: examples/1st-workflow-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ echo "public class Hello {}" > Hello.java && tar -cvf hello.tar Hello.java
+      $ cwl-runner 1st-workflow.cwl 1st-workflow-job.yml
+      [job untar] /tmp/tmp94qFiM$ tar xf /home/example/hello.tar Hello.java
+      [step untar] completion status is success
+      [job compile] /tmp/tmpu1iaKL$ docker run -i --volume=/tmp/tmp94qFiM/Hello.java:/var/lib/cwl/job301600808_tmp94qFiM/Hello.java:ro --volume=/tmp/tmpu1iaKL:/var/spool/cwl:rw --volume=/tmp/tmpfZnNdR:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac -d /var/spool/cwl /var/lib/cwl/job301600808_tmp94qFiM/Hello.java
+      [step compile] completion status is success
+      [workflow 1st-workflow.cwl] outdir is /home/example
+      Final process status is success
+      {
+        "classout": {
+          "location": "/home/example/Hello.class",
+          "checksum": "sha1$e68df795c0686e9aa1a1195536bd900f5f417b18",
+          "class": "File",
+          "size": 416
+        }
+      }
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: v1.0
+      class: Workflow
+      ```
+
+      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a workflow.
+
+
+      ```
+      inputs:
+        inp: File
+        ex: string
+      ```
+
+      The `inputs` section describes the inputs of the workflow.  This is a
+      list of input parameters where each parameter consists of an identifier
+      and a data type.  These parameters can be used as sources for input to
+      specific workflows steps.
+
+      ```
+      outputs:
+        classout:
+          type: File
+          outputSource: compile/classfile
+      ```
+
+      The `outputs` section describes the outputs of the workflow.  This is a
+      list of output parameters where each parameter consists of an identifier
+      and a data type.  The `source` connects the output parameter `classfile`
+      of the `compile` step to the workflow output parameter `classout`.
+
+      ```
+      steps:
+        untar:
+          run: tar-param.cwl
+          in:
+            tarfile: inp
+            extractfile: ex
+          outputs: [example_out]
+      ```
+
+      The `steps` section describes the actual steps of the workflow.  In this
+      example, the first step extracts a file from a tar file, and the second
+      step compiles the file from the first step using the java compiler.
+      Workflow steps are not necessarily run in the order they are listed,
+      instead the order is determined by the dependencies between steps (using
+      `source`).  In addition, workflow steps which do not depend on one
+      another may run in parallel.
+
+      The first step, `untar` runs `tar-param.cwl` (described previously in
+      [Parameter references](#Parameter_references)).  This tool has two input
+      parameters, `tarfile` and `extractfile` and one output parameter
+      `example_out`.
+
+      The `inputs` section of the workflow step connects these two input
+      parameters to the inputs of the workflow, `inp` and `ex` using
+      `source`.  This means that when the workflow step is executed, the values
+      assigned to `inp` and `ex` will be used for the parameters `tarfile`
+      and `extractfile` in order to run the tool.
+
+      The `outputs` section of the workflow step lists the output parameters
+      that are expected from the tool.
+
+      ```
+        compile:
+          run: arguments.cwl
+          in:
+            src: untar/example_out
+          outputs: [classfile]
+      ```
+
+      The second step `compile` depends on the results from the first step by
+      connecting the input parameter `src` to the output parameter of `untar`
+      using `untar/example_out`.  The output of this step `classfile` is
+      connected to the `outputs` section for the Workflow, described above.
diff --git a/cwltool/schemas/draft-3/Workflow.yml b/cwltool/schemas/v1.0/Workflow.yml
similarity index 67%
copy from cwltool/schemas/draft-3/Workflow.yml
copy to cwltool/schemas/v1.0/Workflow.yml
index 066a66e..26bde8e 100644
--- a/cwltool/schemas/draft-3/Workflow.yml
+++ b/cwltool/schemas/v1.0/Workflow.yml
@@ -9,10 +9,10 @@ $graph:
   type: documentation
   doc:
     - |
-      # Common Workflow Language (CWL) Workflow Description, draft 3
+      # Common Workflow Language (CWL) Workflow Description, v1.0
 
       This version:
-        * https://w3id.org/cwl/draft-3/
+        * https://w3id.org/cwl/v1.0/
 
       Current version:
         * https://w3id.org/cwl/
@@ -22,32 +22,47 @@ $graph:
     - |
       # Abstract
 
-      A Workflow is an analysis task represented by a directed graph describing
-      a sequence of operations that transform an input data set to output.
-      This specification defines the Common Workflow Language (CWL) Workflow
-      description, a vendor-neutral standard for representing workflows
-      intended to be portable across a variety of computing platforms.
+      One way to define a workflow is: an analysis task represented by a
+      directed graph describing a sequence of operations that transform an
+      input data set to output. This specification defines the Common Workflow
+      Language (CWL) Workflow description, a vendor-neutral standard for
+      representing workflows intended to be portable across a variety of
+      computing platforms.
 
     - {$include: intro.md}
 
     - |
 
-      ## Introduction to draft 3
-
-      This specification represents the third milestone of the CWL group.  Since
-      draft-2, this draft introduces the following changes and additions:
-
-        * Greatly simplified naming within a document with scoped identifiers, as
-          described in the [Schema Salad specification](SchemaSalad.html).
-        * The draft-2 concept of pluggable expression engines has been replaced
-          by a [streamlined expression syntax)[#Parameter_references]
-          and standardization on [Javascript](#Expressions).
-        * [File](#File) objects can now include a `format` field to indicate
-          the file type.
-        * The addition of [MultipleInputFeatureRequirement](#MultipleInputFeatureRequirement).
-        * The addition of [StepInputExpressionRequirement](#StepInputExpressionRequirement).
-        * The separation of Workflow and CommandLineTool components into
-          separate specifications.
+      ## Introduction to v1.0
+
+      This specification represents the first full release from the CWL group.
+      Since draft-3, this draft introduces the following changes and additions:
+
+        * The `inputs` and `outputs` fields have been renamed `in` and `out`.
+        * Syntax simplifcations: denoted by the `map<>` syntax. Example: `in`
+          contains a list of items, each with an id. Now one can specify
+          a mapping of that identifier to the corresponding
+          `InputParameter`.
+          ```
+          in:
+           - id: one
+             type: string
+             doc: First input parameter
+           - id: two
+             type: int
+             doc: Second input parameter
+          ```
+          can be
+          ```
+          in:
+           one:
+            type: string
+            doc: First input parameter
+           two:
+            type: int
+            doc: Second input parameter
+          ```
+        * The common field `description` has been renamed to `doc`.
 
       ## Purpose
 
@@ -60,13 +75,42 @@ $graph:
 
     - {$include: concepts.md}
 
+- name: ExpressionToolOutputParameter
+  type: record
+  extends: OutputParameter
+  fields:
+    - name: type
+      type:
+        - "null"
+        - "#CWLType"
+        - "#OutputRecordSchema"
+        - "#OutputEnumSchema"
+        - "#OutputArraySchema"
+        - string
+        - type: array
+          items:
+            - "#CWLType"
+            - "#OutputRecordSchema"
+            - "#OutputEnumSchema"
+            - "#OutputArraySchema"
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
 
 - type: record
   name: ExpressionTool
-  extends: "#Process"
+  extends: Process
+  specialize:
+    - specializeFrom: "#OutputParameter"
+      specializeTo: "#ExpressionToolOutputParameter"
   documentRoot: true
   doc: |
-    Execute an expression as a process step.
+    Execute an expression as a Workflow step.
   fields:
     - name: "class"
       jsonldPredicate:
@@ -74,7 +118,7 @@ $graph:
         "_type": "@vocab"
       type: string
     - name: expression
-      type: [string, "#Expression"]
+      type: [string, Expression]
       doc: |
         The expression to execute.  The expression must return a JSON object which
         matches the output parameters of the ExpressionTool.
@@ -90,12 +134,52 @@ $graph:
 
 - name: WorkflowOutputParameter
   type: record
-  extends: ["#OutputParameter", "#Sink"]
+  extends: OutputParameter
   docParent: "#Workflow"
   doc: |
     Describe an output parameter of a workflow.  The parameter must be
     connected to one or more parameters defined in the workflow that will
     provide the value of the output parameter.
+  fields:
+    - name: outputSource
+      doc: |
+        Specifies one or more workflow parameters that supply the value of to
+        the output parameter.
+      jsonldPredicate:
+        "_id": "cwl:outputSource"
+        "_type": "@id"
+        refScope: 0
+      type:
+        - string?
+        - string[]?
+    - name: linkMerge
+      type: ["null", "#LinkMergeMethod"]
+      jsonldPredicate: "cwl:linkMerge"
+      doc: |
+        The method to use to merge multiple sources into a single array.
+        If not specified, the default method is "merge_nested".
+    - name: type
+      type:
+        - "null"
+        - "#CWLType"
+        - "#OutputRecordSchema"
+        - "#OutputEnumSchema"
+        - "#OutputArraySchema"
+        - string
+        - type: array
+          items:
+            - "#CWLType"
+            - "#OutputRecordSchema"
+            - "#OutputEnumSchema"
+            - "#OutputArraySchema"
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
 
 
 - name: Sink
@@ -105,17 +189,17 @@ $graph:
     - name: source
       doc: |
         Specifies one or more workflow parameters that will provide input to
-        the underlying process parameter.
+        the underlying step parameter.
       jsonldPredicate:
         "_id": "cwl:source"
         "_type": "@id"
+        refScope: 2
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
     - name: linkMerge
-      type: ["null", "#LinkMergeMethod"]
+      type: LinkMergeMethod?
+      jsonldPredicate: "cwl:linkMerge"
       doc: |
         The method to use to merge multiple inbound links into a single array.
         If not specified, the default method is "merge_nested".
@@ -123,12 +207,12 @@ $graph:
 
 - type: record
   name: WorkflowStepInput
-  extends: "#Sink"
+  extends: Sink
   docParent: "#WorkflowStep"
   doc: |
     The input of a workflow step connects an upstream parameter (from the
     workflow inputs, or the outputs of other workflows steps) with the input
-    parameters of the underlying process.
+    parameters of the underlying step.
 
     ## Input object
 
@@ -196,10 +280,12 @@ $graph:
         the value of the parameter(s) specified in the `source` field, or
         null if there is no `source` field.
 
-        The value of `inputs` in the parameter reference or expression is the
-        input object to the workflow step after assigning the `source` values,
-        but before evaluating any step with `valueFrom`.  The order of
-        evaluating `valueFrom` among step input parameters is undefined.
+        The value of `inputs` in the parameter reference or expression must be
+        the input object to the workflow step after assigning the `source`
+        values and then scattering.  The order of evaluating `valueFrom` among
+        step input parameters is undefined and the result of evaluating
+        `valueFrom` on a parameter must not be visible to evaluation of
+        `valueFrom` on other parameters.
 
 
 - type: record
@@ -235,9 +321,9 @@ $graph:
   docParent: "#Workflow"
   doc: |
     A workflow step is an executable element of a workflow.  It specifies the
-    underlying process implementation (such as `CommandLineTool`) in the `run`
-    field and connects the input and output parameters of the underlying
-    process to workflow parameters.
+    underlying process implementation (such as `CommandLineTool` or another
+    `Workflow`) in the `run` field and connects the input and output parameters
+    of the underlying process to workflow parameters.
 
     # Scatter/gather
 
@@ -287,31 +373,34 @@ $graph:
       type: string
       jsonldPredicate: "@id"
       doc: "The unique identifier for this workflow step."
-    - name: inputs
-      type:
-        type: array
-        items: "#WorkflowStepInput"
-      jsonldPredicate: "cwl:inputs"
+    - name: in
+      type: WorkflowStepInput[]
+      jsonldPredicate:
+        _id: "cwl:in"
+        mapSubject: id
+        mapPredicate: source
       doc: |
         Defines the input parameters of the workflow step.  The process is ready to
         run when all required input parameters are associated with concrete
         values.  Input parameters include a schema for each parameter which is
         used to validate the input object.  It may also be used build a user
         interface for constructing the input object.
-    - name: outputs
+    - name: out
       type:
-        type: array
-        items: "#WorkflowStepOutput"
-      jsonldPredicate: "cwl:outputs"
+        - type: array
+          items: [string, WorkflowStepOutput]
+      jsonldPredicate:
+        _id: "cwl:out"
+        _type: "@id"
+        identity: true
       doc: |
         Defines the parameters representing the output of the process.  May be
         used to generate and/or validate the output object.
     - name: requirements
-      type:
-        - "null"
-        - type: array
-          items: "#ProcessRequirement"
-      jsonldPredicate: "cwl:requirements"
+      type: ProcessRequirement[]?
+      jsonldPredicate:
+        _id: "cwl:requirements"
+        mapSubject: class
       doc: |
         Declares requirements that apply to either the runtime environment or the
         workflow engine that must be met in order to execute this workflow step.  If
@@ -320,33 +409,26 @@ $graph:
         error and the implementation must not attempt to run the process,
         unless overridden at user option.
     - name: hints
-      type:
-        - "null"
-        - type: array
-          items: "Any"
-      jsonldPredicate: "cwl:hints"
+      type: Any[]?
+      jsonldPredicate:
+        _id: "cwl:hints"
+        noLinkCheck: true
+        mapSubject: class
       doc: |
         Declares hints applying to either the runtime environment or the
         workflow engine that may be helpful in executing this workflow step.  It is
         not an error if an implementation cannot satisfy all hints, however
         the implementation may report a warning.
-      jsonldPredicate:
-        _id: cwl:hints
-        noLinkCheck: true
     - name: label
-      type:
-        - "null"
-        - string
+      type: string?
       jsonldPredicate: "rdfs:label"
       doc: "A short, human-readable label of this process object."
-    - name: description
-      type:
-        - "null"
-        - string
+    - name: doc
+      type: string?
       jsonldPredicate: "rdfs:comment"
       doc: "A long, human-readable description of this process object."
     - name: run
-      type: [string, "#Process"]
+      type: [string, Process]
       jsonldPredicate:
         "_id": "cwl:run"
         "_type": "@id"
@@ -354,20 +436,17 @@ $graph:
         Specifies the process to run.
     - name: scatter
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         "_id": "cwl:scatter"
         "_type": "@id"
         "_container": "@list"
+        refScope: 0
     - name: scatterMethod
       doc: |
         Required if `scatter` is an array of more than one element.
-      type:
-        - "null"
-        - "#ScatterMethod"
+      type: ScatterMethod?
       jsonldPredicate:
         "_id": "cwl:scatterMethod"
         "_type": "@vocab"
@@ -378,19 +457,19 @@ $graph:
   extends: "#Process"
   documentRoot: true
   specialize:
-    specializeFrom: "#OutputParameter"
-    specializeTo: "#WorkflowOutputParameter"
+    - specializeFrom: "#OutputParameter"
+      specializeTo: "#WorkflowOutputParameter"
   doc: |
     A workflow describes a set of **steps** and the **dependencies** between
-    those processes.  When a process produces output that will be consumed by a
-    second process, the first process is a dependency of the second process.
+    those steps.  When a step produces output that will be consumed by a
+    second step, the first step is a dependency of the second step.
 
     When there is a dependency, the workflow engine must execute the preceeding
-    process and wait for it to successfully produce output before executing the
-    dependent process.  If two processes are defined in the workflow graph that
-    are not directly or indirectly dependent, these processes are
-    **independent**, and may execute in any order or execute concurrently.  A
-    workflow is complete when all steps have been executed.
+    step and wait for it to successfully produce output before executing the
+    dependent step.  If two steps are defined in the workflow graph that
+    are not directly or indirectly dependent, these steps are **independent**,
+    and may execute in any order or execute concurrently.  A workflow is
+    complete when all steps have been executed.
 
     Dependencies between parameters are expressed using the `source` field on
     [workflow step input parameters](#WorkflowStepInput) and [workflow output
@@ -404,27 +483,28 @@ $graph:
 
     ## Workflow success and failure
 
-    A completed process must result in one of `success`, `temporaryFailure` or
-    `permanentFailure` states.  An implementation may choose to retry a process
+    A completed step must result in one of `success`, `temporaryFailure` or
+    `permanentFailure` states.  An implementation may choose to retry a step
     execution which resulted in `temporaryFailure`.  An implementation may
     choose to either continue running other steps of a workflow, or terminate
     immediately upon `permanentFailure`.
 
-    * If any step of a workflow execution results in `permanentFailure`, then the
-    workflow status is `permanentFailure`.
+    * If any step of a workflow execution results in `permanentFailure`, then
+    the workflow status is `permanentFailure`.
 
     * If one or more steps result in `temporaryFailure` and all other steps
     complete `success` or are not executed, then the workflow status is
     `temporaryFailure`.
 
-    * If all workflow steps are executed and complete with `success`, then the workflow
-    status is `success`.
+    * If all workflow steps are executed and complete with `success`, then the
+    workflow status is `success`.
 
     # Extensions
 
     [ScatterFeatureRequirement](#ScatterFeatureRequirement) and
     [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) are
-    available as standard extensions to core workflow semantics.
+    available as standard [extensions](#Extensions_and_Metadata) to core
+    workflow semantics.
 
   fields:
     - name: "class"
@@ -441,33 +521,62 @@ $graph:
       type:
         - type: array
           items: "#WorkflowStep"
-
+      jsonldPredicate:
+          mapSubject: id
 
 
 - type: record
   name: SubworkflowFeatureRequirement
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support nested workflows in
-    the `run` field of (WorkflowStep)(#WorkflowStep).
+    the `run` field of [WorkflowStep](#WorkflowStep).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'SubworkflowFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - name: ScatterFeatureRequirement
   type: record
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support the `scatter` and
     `scatterMethod` fields of [WorkflowStep](#WorkflowStep).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'ScatterFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - name: MultipleInputFeatureRequirement
   type: record
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support multiple inbound data links
     listed in the `source` field of [WorkflowStepInput](#WorkflowStepInput).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'MultipleInputFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - type: record
   name: StepInputExpressionRequirement
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicate that the workflow platform must support the `valueFrom` field
-    of [WorkflowStepInput](#WorkflowStepInput).
\ No newline at end of file
+    of [WorkflowStepInput](#WorkflowStepInput).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'StepInputExpressionRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
diff --git a/cwltool/schemas/v1.0/concepts.md b/cwltool/schemas/v1.0/concepts.md
new file mode 100644
index 0000000..cc4df5a
--- /dev/null
+++ b/cwltool/schemas/v1.0/concepts.md
@@ -0,0 +1,388 @@
+## References to other specifications
+
+**Javascript Object Notation (JSON)**: http://json.org
+
+**JSON Linked Data (JSON-LD)**: http://json-ld.org
+
+**YAML**: http://yaml.org
+
+**Avro**: https://avro.apache.org/docs/current/spec.html
+
+**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
+
+**Internationalized Resource Identifiers (IRIs)**:
+https://tools.ietf.org/html/rfc3987
+
+**Portable Operating System Interface (POSIX.1-2008)**: http://pubs.opengroup.org/onlinepubs/9699919799/
+
+**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
+
+## Scope
+
+This document describes CWL syntax, execution, and object model.  It
+is not intended to document a CWL specific implementation, however it may
+serve as a reference for the behavior of conforming implementations.
+
+## Terminology
+
+The terminology used to describe CWL documents is defined in the
+Concepts section of the specification. The terms defined in the
+following list are used in building those definitions and in describing the
+actions of a CWL implementation:
+
+**may**: Conforming CWL documents and CWL implementations are permitted but
+not required to behave as described.
+
+**must**: Conforming CWL documents and CWL implementations are required to behave
+as described; otherwise they are in error.
+
+**error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations may detect and report an error and may
+recover from it.
+
+**fatal error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations must not continue to execute the current
+process and may report an error.
+
+**at user option**: Conforming software may or must (depending on the modal verb in
+the sentence) behave as described; if it does, it must provide users a means to
+enable or disable the behavior described.
+
+**deprecated**: Conforming software may implement a behavior for backwards
+compatibility.  Portable CWL documents should not rely on deprecated behavior.
+Behavior marked as deprecated may be removed entirely from future revisions of
+the CWL specification.
+
+# Data model
+
+## Data concepts
+
+An **object** is a data structure equivalent to the "object" type in JSON,
+consisting of a unordered set of name/value pairs (referred to here as
+**fields**) and where the name is a string and the value is a string, number,
+boolean, array, or object.
+
+A **document** is a file containing a serialized object, or an array of objects.
+
+A **process** is a basic unit of computation which accepts input data,
+performs some computation, and produces output data. Examples include
+CommandLineTools, Workflows, and ExpressionTools.
+
+An **input object** is an object describing the inputs to an invocation of
+a process.
+
+An **output object** is an object describing the output resulting from an
+invocation of a process.
+
+An **input schema** describes the valid format (required fields, data types)
+for an input object.
+
+An **output schema** describes the valid format for an output object.
+
+**Metadata** is information about workflows, tools, or input items.
+
+## Syntax
+
+CWL documents must consist of an object or array of objects represented using
+JSON or YAML syntax.  Upon loading, a CWL implementation must apply the
+preprocessing steps described in the
+[Semantic Annotations for Linked Avro Data (SALAD) Specification](SchemaSalad.html).
+An implementation may formally validate the structure of a CWL document using
+SALAD schemas located at
+https://github.com/common-workflow-language/common-workflow-language/tree/master/v1.0
+
+## Identifiers
+
+If an object contains an `id` field, that is used to uniquely identify the
+object in that document.  The value of the `id` field must be unique over the
+entire document.  Identifiers may be resolved relative to either the document
+base and/or other identifiers following the rules are described in the
+[Schema Salad specification](SchemaSalad.html#Identifier_resolution).
+
+An implementation may choose to only honor references to object types for
+which the `id` field is explicitly listed in this specification.
+
+## Document preprocessing
+
+An implementation must resolve [$import](SchemaSalad.html#Import) and
+[$include](SchemaSalad.html#Import) directives as described in the
+[Schema Salad specification](SchemaSalad.html).
+
+Another transformation defined in Schema salad is simplification of data type definitions.
+Type `<T>` ending with `?` should be transformed to `[<T>, "null"]`.
+Type `<T>` ending with `[]` should be transformed to `{"type": "array", "items": <T>}`
+
+## Extensions and metadata
+
+Input metadata (for example, a lab sample identifier) may be represented within
+a tool or workflow using input parameters which are explicitly propagated to
+output.  Future versions of this specification may define additional facilities
+for working with input/output metadata.
+
+Implementation extensions not required for correct execution (for example,
+fields related to GUI presentation) and metadata about the tool or workflow
+itself (for example, authorship for use in citations) may be provided as
+additional fields on any object.  Such extensions fields must use a namespace
+prefix listed in the `$namespaces` section of the document as described in the
+[Schema Salad specification](SchemaSalad.html#Explicit_context).
+
+Implementation extensions which modify execution semantics must be [listed in
+the `requirements` field](#Requirements_and_hints).
+
+# Execution model
+
+## Execution concepts
+
+A **parameter** is a named symbolic input or output of process, with an
+associated datatype or schema.  During execution, values are assigned to
+parameters to make the input object or output object used for concrete
+process invocation.
+
+A **CommandLineTool** is a process characterized by the execution of a
+standalone, non-interactive program which is invoked on some input,
+produces output, and then terminates.
+
+A **workflow** is a process characterized by multiple subprocess steps,
+where step outputs are connected to the inputs of downstream steps to
+form a directed acylic graph, and independent steps may run concurrently.
+
+A **runtime environment** is the actual hardware and software environment when
+executing a command line tool.  It includes, but is not limited to, the
+hardware architecture, hardware resources, operating system, software runtime
+(if applicable, such as the specific Python interpreter or the specific Java
+virtual machine), libraries, modules, packages, utilities, and data files
+required to run the tool.
+
+A **workflow platform** is a specific hardware and software implementation
+capable of interpreting CWL documents and executing the processes specified by
+the document.  The responsibilities of the workflow platform may include
+scheduling process invocation, setting up the necessary runtime environment,
+making input data available, invoking the tool process, and collecting output.
+
+A workflow platform may choose to only implement the Command Line Tool
+Description part of the CWL specification.
+
+It is intended that the workflow platform has broad leeway outside of this
+specification to optimize use of computing resources and enforce policies
+not covered by this specification.  Some areas that are currently out of
+scope for CWL specification but may be handled by a specific workflow
+platform include:
+
+* Data security and permissions
+* Scheduling tool invocations on remote cluster or cloud compute nodes.
+* Using virtual machines or operating system containers to manage the runtime
+(except as described in [DockerRequirement](CommandLineTool.html#DockerRequirement)).
+* Using remote or distributed file systems to manage input and output files.
+* Transforming file paths.
+* Determining if a process has previously been executed, and if so skipping it
+and reusing previous results.
+* Pausing, resuming or checkpointing processes or workflows.
+
+Conforming CWL processes must not assume anything about the runtime
+environment or workflow platform unless explicitly declared though the use
+of [process requirements](#Requirements_and_hints).
+
+## Generic execution process
+
+The generic execution sequence of a CWL process (including workflows and
+command line line tools) is as follows.
+
+1. Load, process and validate a CWL document, yielding a process object.
+2. Load input object.
+3. Validate the input object against the `inputs` schema for the process.
+4. Validate process requirements are met.
+5. Perform any further setup required by the specific process type.
+6. Execute the process.
+7. Capture results of process execution into the output object.
+8. Validate the output object against the `outputs` schema for the process.
+9. Report the output object to the process caller.
+
+## Requirements and hints
+
+A **process requirement** modifies the semantics or runtime
+environment of a process.  If an implementation cannot satisfy all
+requirements, or a requirement is listed which is not recognized by the
+implementation, it is a fatal error and the implementation must not attempt
+to run the process, unless overridden at user option.
+
+A **hint** is similar to a requirement; however, it is not an error if an
+implementation cannot satisfy all hints.  The implementation may report a
+warning if a hint cannot be satisfied.
+
+Requirements are inherited.  A requirement specified in a Workflow applies
+to all workflow steps; a requirement specified on a workflow step will
+apply to the process implementation of that step and any of its substeps.
+
+If the same process requirement appears at different levels of the
+workflow, the most specific instance of the requirement is used, that is,
+an entry in `requirements` on a process implementation such as
+CommandLineTool will take precedence over an entry in `requirements`
+specified in a workflow step, and an entry in `requirements` on a workflow
+step takes precedence over the workflow.  Entries in `hints` are resolved
+the same way.
+
+Requirements override hints.  If a process implementation provides a
+process requirement in `hints` which is also provided in `requirements` by
+an enclosing workflow or workflow step, the enclosing `requirements` takes
+precedence.
+
+## Parameter references
+
+Parameter references are denoted by the syntax `$(...)` and may be used in any
+field permitting the pseudo-type `Expression`, as specified by this document.
+Conforming implementations must support parameter references.  Parameter
+references use the following subset of
+[Javascript/ECMAScript 5.1](http://www.ecma-international.org/ecma-262/5.1/)
+syntax, but they are designed to not require a Javascript engine for evaluation.
+
+In the following BNF grammar, character classes, and grammar rules are denoted
+in '{}', '-' denotes exclusion from a character class, '(())' denotes grouping,
+'|' denotes alternates, trailing '*' denotes zero or more repeats, '+' denote
+one or more repeats, '/' escapes these special characters, and all other
+characters are literal values.
+
+<p>
+<table class="table">
+<tr><td>symbol::             </td><td>{Unicode alphanumeric}+</td></tr>
+<tr><td>singleq::            </td><td>[' (( {character - '} | \' ))* ']</td></tr>
+<tr><td>doubleq::            </td><td>[" (( {character - "} | \" ))* "]</td></tr>
+<tr><td>index::              </td><td>[ {decimal digit}+ ]</td></tr>
+<tr><td>segment::            </td><td>. {symbol} | {singleq} | {doubleq} | {index}</td></tr>
+<tr><td>parameter reference::</td><td>$( {symbol} {segment}*)</td></tr>
+</table>
+</p>
+
+Use the following algorithm to resolve a parameter reference:
+
+  1. Match the leading symbol as the key
+  2. Look up the key in the parameter context (described below) to get the current value.
+     It is an error if the key is not found in the parameter context.
+  3. If there are no subsequent segments, terminate and return current value
+  4. Else, match the next segment
+  5. Extract the symbol, string, or index from the segment as the key
+  6. Look up the key in current value and assign as new current value.  If
+     the key is a symbol or string, the current value must be an object.
+     If the key is an index, the current value must be an array or string.
+     It is an error if the key does not match the required type, or the key is not found or out
+     of range.
+  7. Repeat steps 3-6
+
+The root namespace is the parameter context.  The following parameters must
+be provided:
+
+  * `inputs`: The input object to the current Process.
+  * `self`: A context-specific value.  The contextual values for 'self' are
+    documented for specific fields elsewhere in this specification.  If
+    a contextual value of 'self' is not documented for a field, it
+    must be 'null'.
+  * `runtime`: An object containing configuration details.  Specific to the
+    process type.  An implementation may provide
+    opaque strings for any or all fields of `runtime`.  These must be
+    filled in by the platform after processing the Tool but before actual
+    execution.  Parameter references and expressions may only use the
+    literal string value of the field and must not perform computation on
+    the contents, except where noted otherwise.
+
+If the value of a field has no leading or trailing non-whitespace
+characters around a parameter reference, the effective value of the field
+becomes the value of the referenced parameter, preserving the return type.
+
+If the value of a field has non-whitespace leading or trailing characters
+around a parameter reference, it is subject to string interpolation.  The
+effective value of the field is a string containing the leading characters,
+followed by the string value of the parameter reference, followed by the
+trailing characters.  The string value of the parameter reference is its
+textual JSON representation with the following rules:
+
+  * Leading and trailing quotes are stripped from strings
+  * Objects entries are sorted by key
+
+Multiple parameter references may appear in a single field.  This case
+must be treated as a string interpolation.  After interpolating the first
+parameter reference, interpolation must be recursively applied to the
+trailing characters to yield the final string value.
+
+## Expressions
+
+An expression is a fragment of [Javascript/ECMAScript
+5.1](http://www.ecma-international.org/ecma-262/5.1/) code evaluated by the
+workflow platform to affect the inputs, outputs, or
+behavior of a process.  In the generic execution sequence, expressions may
+be evaluated during step 5 (process setup), step 6 (execute process),
+and/or step 7 (capture output).  Expressions are distinct from regular
+processes in that they are intended to modify the behavior of the workflow
+itself rather than perform the primary work of the workflow.
+
+To declare the use of expressions, the document must include the process
+requirement `InlineJavascriptRequirement`.  Expressions may be used in any
+field permitting the pseudo-type `Expression`, as specified by this
+document.
+
+Expressions are denoted by the syntax `$(...)` or `${...}`.  A code
+fragment wrapped in the `$(...)` syntax must be evaluated as a
+[ECMAScript expression](http://www.ecma-international.org/ecma-262/5.1/#sec-11).  A
+code fragment wrapped in the `${...}` syntax must be evaluated as a
+[EMACScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
+for an anonymous, zero-argument function.  Expressions must return a valid JSON
+data type: one of null, string, number, boolean, array, object. Other return
+values must result in a `permanentFailure`. Implementations must permit any
+syntactically valid Javascript and account for nesting of parenthesis or braces
+and that strings that may contain parenthesis or braces when scanning for
+expressions.
+
+The runtime must include any code defined in the ["expressionLib" field of
+InlineJavascriptRequirement](#InlineJavascriptRequirement) prior to
+executing the actual expression.
+
+Before executing the expression, the runtime must initialize as global
+variables the fields of the parameter context described above.
+
+The effective value of the field after expression evaluation follows the
+same rules as parameter references discussed above.  Multiple expressions
+may appear in a single field.
+
+Expressions must be evaluated in an isolated context (a "sandbox") which
+permits no side effects to leak outside the context.  Expressions also must
+be evaluated in [Javascript strict mode](http://www.ecma-international.org/ecma-262/5.1/#sec-4.2.2).
+
+The order in which expressions are evaluated is undefined except where
+otherwise noted in this document.
+
+An implementation may choose to implement parameter references by
+evaluating as a Javascript expression.  The results of evaluating
+parameter references must be identical whether implemented by Javascript
+evaluation or some other means.
+
+Implementations may apply other limits, such as process isolation, timeouts,
+and operating system containers/jails to minimize the security risks associated
+with running untrusted code embedded in a CWL document.
+
+Exceptions thrown from an exception must result in a `permanentFailure` of the
+process.
+
+## Executing CWL documents as scripts
+
+By convention, a CWL document may begin with `#!/usr/bin/env cwl-runner`
+and be marked as executable (the POSIX "+x" permission bits) to enable it
+to be executed directly.  A workflow platform may support this mode of
+operation; if so, it must provide `cwl-runner` as an alias for the
+platform's CWL implementation.
+
+A CWL input object document may similarly begin with `#!/usr/bin/env
+cwl-runner` and be marked as executable.  In this case, the input object
+must include the field `cwl:tool` supplying an IRI to the default CWL
+document that should be executed using the fields of the input object as
+input parameters.
+
+## Discovering CWL documents on a local filesystem
+
+To discover CWL documents look in the following locations:
+
+`/usr/share/commonwl/`
+
+`/usr/local/share/commonwl/`
+
+`$XDG_DATA_HOME/commonwl/` (usually `$HOME/.local/share/commonwl`)
+
+`$XDF_DATA_HOME` is from the [XDG Base Directory
+Specification](http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html)
diff --git a/cwltool/schemas/v1.0/contrib.md b/cwltool/schemas/v1.0/contrib.md
new file mode 100644
index 0000000..af6f6e8
--- /dev/null
+++ b/cwltool/schemas/v1.0/contrib.md
@@ -0,0 +1,19 @@
+Authors:
+
+* Peter Amstutz <peter.amstutz at curoverse.com>, Arvados Project, Curoverse
+* Michael R. Crusoe <michael.crusoe at gmail.com>, Common Workflow Language
+  project
+* Nebojša Tijanić <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics
+
+Contributors:
+
+* Brad Chapman <bchapman at hsph.harvard.edu>, Harvard Chan School of Public Health
+* John Chilton <jmchilton at gmail.com>, Galaxy Project, Pennsylvania State University
+* Michael Heuer <heuermh at berkeley.edu,>,UC Berkeley AMPLab
+* Andrey Kartashov <Andrey.Kartashov at cchmc.org>, Cincinnati Children's Hospital
+* Dan Leehr <dan.leehr at duke.edu>, Duke University
+* Hervé Ménager <herve.menager at gmail.com>, Institut Pasteur
+* Maya Nedeljkovich <maja.nedeljkovic at sbgenomics.com>, Seven Bridges Genomics
+* Matt Scales <mscales at icr.ac.uk>, Institute of Cancer Research, London
+* Stian Soiland-Reyes [soiland-reyes at cs.manchester.ac.uk](mailto:soiland-reyes at cs.manchester.ac.uk), University of Manchester
+* Luka Stojanovic <luka.stojanovic at sbgenomics.com>, Seven Bridges Genomics
diff --git a/cwltool/schemas/v1.0/intro.md b/cwltool/schemas/v1.0/intro.md
new file mode 100644
index 0000000..5eac45b
--- /dev/null
+++ b/cwltool/schemas/v1.0/intro.md
@@ -0,0 +1,21 @@
+# Status of this document
+
+This document is the product of the [Common Workflow Language working
+group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
+latest version of this document is available in the "v1.0" directory at
+
+https://github.com/common-workflow-language/common-workflow-language
+
+The products of the CWL working group (including this document) are made available
+under the terms of the Apache License, version 2.0.
+
+<!--ToC-->
+
+# Introduction
+
+The Common Workflow Language (CWL) working group is an informal, multi-vendor
+working group consisting of various organizations and individuals that have an
+interest in portability of data analysis workflows.  The goal is to create
+specifications like this one that enable data scientists to describe analysis
+tools and workflows that are powerful, easy to use, portable, and support
+reproducibility.
diff --git a/cwltool/schemas/v1.0/invocation.md b/cwltool/schemas/v1.0/invocation.md
new file mode 100644
index 0000000..ce7524c
--- /dev/null
+++ b/cwltool/schemas/v1.0/invocation.md
@@ -0,0 +1,153 @@
+# Running a Command
+
+To accommodate the enormous variety in syntax and semantics for input, runtime
+environment, invocation, and output of arbitrary programs, a CommandLineTool
+defines an "input binding" that describes how to translate abstract input
+parameters to an concrete program invocation, and an "output binding" that
+describes how to generate output parameters from program output.
+
+## Input binding
+
+The tool command line is built by applying command line bindings to the
+input object.  Bindings are listed either as part of an [input
+parameter](#CommandInputParameter) using the `inputBinding` field, or
+separately using the `arguments` field of the CommandLineTool.
+
+The algorithm to build the command line is as follows.  In this algorithm,
+the sort key is a list consisting of one or more numeric or string
+elements.  Strings are sorted lexicographically based on UTF-8 encoding.
+
+  1. Collect `CommandLineBinding` objects from `arguments`.  Assign a sorting
+  key `[position, i]` where `position` is
+  [`CommandLineBinding.position`](#CommandLineBinding) and `i`
+  is the index in the `arguments` list.
+
+  2. Collect `CommandLineBinding` objects from the `inputs` schema and
+  associate them with values from the input object.  Where the input type
+  is a record, array, or map, recursively walk the schema and input object,
+  collecting nested `CommandLineBinding` objects and associating them with
+  values from the input object.
+
+  3. Create a sorting key by taking the value of the `position` field at
+  each level leading to each leaf binding object.  If `position` is not
+  specified, it is not added to the sorting key.  For bindings on arrays
+  and maps, the sorting key must include the array index or map key
+  following the position.  If and only if two bindings have the same sort
+  key, the tie must be broken using the ordering of the field or parameter
+  name immediately containing the leaf binding.
+
+  4. Sort elements using the assigned sorting keys.  Numeric entries sort
+  before strings.
+
+  5. In the sorted order, apply the rules defined in
+  [`CommandLineBinding`](#CommandLineBinding) to convert bindings to actual
+  command line elements.
+
+  6. Insert elements from `baseCommand` at the beginning of the command
+  line.
+
+## Runtime environment
+
+All files listed in the input object must be made available in the runtime
+environment.  The implementation may use a shared or distributed file
+system or transfer files via explicit download to the host.  Implementations
+may choose not to provide access to files not explicitly specified in the input
+object or process requirements.
+
+Output files produced by tool execution must be written to the **designated
+output directory**.  The initial current working directory when executing
+the tool must be the designated output directory.
+
+Files may also be written to the **designated temporary directory**.  This
+directory must be isolated and not shared with other processes.  Any files
+written to the designated temporary directory may be automatically deleted by
+the workflow platform immediately after the tool terminates.
+
+For compatibility, files may be written to the **system temporary directory**
+which must be located at `/tmp`.  Because the system temporary directory may be
+shared with other processes on the system, files placed in the system temporary
+directory are not guaranteed to be deleted automatically.  A tool
+must not use the system temporary directory as a backchannel communication with
+other tools.  It is valid for the system temporary directory to be the same as
+the designated temporary directory.
+
+When executing the tool, the tool must execute in a new, empty environment
+with only the environment variables described below; the child process must
+not inherit environment variables from the parent process except as
+specified or at user option.
+
+  * `HOME` must be set to the designated output directory.
+  * `TMPDIR` must be set to the designated temporary directory.
+  * `PATH` may be inherited from the parent process, except when run in a
+    container that provides its own `PATH`.
+  * Variables defined by [EnvVarRequirement](#EnvVarRequirement)
+  * The default environment of the container, such as when using
+    [DockerRequirement](#DockerRequirement)
+
+An implementation may forbid the tool from writing to any location in the
+runtime environment file system other than the designated temporary directory,
+system temporary directory, and designated output directory.  An implementation
+may provide read-only input files, and disallow in-place update of input files.
+The designated temporary directory, system temporary directory and designated
+output directory may each reside on different mount points on different file
+systems.
+
+An implementation may forbid the tool from directly accessing network
+resources.  Correct tools must not assume any network access.  Future versions
+of the specification may incorporate optional process requirements that
+describe the networking needs of a tool.
+
+The `runtime` section available in [parameter references](#Parameter_references)
+and [expressions](#Expressions) contains the following fields.  As noted
+earlier, an implementation may perform deferred resolution of runtime fields by providing
+opaque strings for any or all of the following fields; parameter references
+and expressions may only use the literal string value of the field and must
+not perform computation on the contents.
+
+  * `runtime.outdir`: an absolute path to the designated output directory
+  * `runtime.tmpdir`: an absolute path to the designated temporary directory
+  * `runtime.cores`:  number of CPU cores reserved for the tool process
+  * `runtime.ram`:    amount of RAM in mebibytes (2\*\*20) reserved for the tool process
+  * `runtime.outdirSize`: reserved storage space available in the designated output directory
+  * `runtime.tmpdirSize`: reserved storage space available in the designated temporary directory
+
+For `cores`, `ram`, `outdirSize` and `tmpdirSize`, if an implementation can't
+provide the actual number of reserved cores during the expression evaluation time,
+it should report back the minimal requested amount.
+
+See [ResourceRequirement](#ResourceRequirement) for details on how to
+describe the hardware resources required by a tool.
+
+The standard input stream and standard output stream may be redirected as
+described in the `stdin` and `stdout` fields.
+
+## Execution
+
+Once the command line is built and the runtime environment is created, the
+actual tool is executed.
+
+The standard error stream and standard output stream (unless redirected by
+setting `stdout` or `stderr`) may be captured by platform logging facilities
+for storage and reporting.
+
+Tools may be multithreaded or spawn child processes; however, when the
+parent process exits, the tool is considered finished regardless of whether
+any detached child processes are still running.  Tools must not require any
+kind of console, GUI, or web based user interaction in order to start and
+run to completion.
+
+The exit code of the process indicates if the process completed
+successfully.  By convention, an exit code of zero is treated as success
+and non-zero exit codes are treated as failure.  This may be customized by
+providing the fields `successCodes`, `temporaryFailCodes`, and
+`permanentFailCodes`.  An implementation may choose to default unspecified
+non-zero exit codes to either `temporaryFailure` or `permanentFailure`.
+
+## Output binding
+
+If the output directory contains a file named "cwl.output.json", that file
+must be loaded and used as the output object.  Otherwise, the output object
+must be generated by walking the parameters listed in `outputs` and
+applying output bindings to the tool output.  Output bindings are
+associated with output parameters using the `outputBinding` field.  See
+[`CommandOutputBinding`](#CommandOutputBinding) for details.
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name.yml
new file mode 100644
index 0000000..44e95a2
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name.yml
@@ -0,0 +1,46 @@
+- |
+  ## Field name resolution
+
+  The document schema declares the vocabulary of known field names.  During
+  preprocessing traversal, field name in the document which are not part of
+  the schema vocabulary must be resolved to absolute URIs.  Under "strict"
+  validation, it is an error for a document to include fields which are not
+  part of the vocabulary and not resolvable to absolute URIs.  Fields names
+  which are not part of the vocabulary are resolved using the following
+  rules:
+
+  * If an field name URI begins with a namespace prefix declared in the
+  document context (`@context`) followed by a colon `:`, the prefix and
+  colon must be replaced by the namespace declared in `@context`.
+
+  * If there is a vocabulary term which maps to the URI of a resolved
+  field, the field name must be replace with the vocabulary term.
+
+  * If a field name URI is an absolute URI consisting of a scheme and path
+  and is not part of the vocabulary, no processing occurs.
+
+  Field name resolution is not relative.  It must not be affected by the
+  base URI.
+
+  ### Field name resolution example
+
+  Given the following schema:
+
+  ```
+- $include: field_name_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: field_name_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: field_name_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_proc.yml
new file mode 100644
index 0000000..a53ef4b
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_proc.yml
@@ -0,0 +1,8 @@
+    {
+      "base": "one",
+      "form": {
+        "base": "two",
+        "http://example.com/three": "three",
+      },
+      "http://example.com/acid#four": "four"
+    }
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_schema.yml
new file mode 100644
index 0000000..5089c4b
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_schema.yml
@@ -0,0 +1,14 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "base",
+      "type": "string",
+      "jsonldPredicate": "http://example.com/base"
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_src.yml
new file mode 100644
index 0000000..1ed79b9
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/field_name_src.yml
@@ -0,0 +1,8 @@
+    {
+      "base": "one",
+      "form": {
+        "http://example.com/base": "two",
+        "http://example.com/three": "three",
+      },
+      "acid:four": "four"
+    }
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res.yml
new file mode 100644
index 0000000..45f4efb
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res.yml
@@ -0,0 +1,53 @@
+- |
+  ## Identifier resolution
+
+  The schema may designate one or more fields as identifier fields to identify
+  specific objects.  Processing must resolve relative identifiers to absolute
+  identifiers using the following rules:
+
+    * If an identifier URI is prefixed with `#` it is a URI relative
+      fragment identifier.  It is resolved relative to the base URI by setting
+      or replacing the fragment portion of the base URI.
+
+    * If an identifier URI does not contain a scheme and is not prefixed `#` it
+      is a parent relative fragment identifier.  It is resolved relative to the
+      base URI by the following rule: if the base URI does not contain a
+      document fragment, set the fragment portion of the base URI.  If the base
+      URI does contain a document fragment, append a slash `/` followed by the
+      identifier field to the fragment portion of the base URI.
+
+    * If an identifier URI begins with a namespace prefix declared in
+      `$namespaces` followed by a colon `:`, the prefix and colon must be
+      replaced by the namespace declared in `$namespaces`.
+
+    * If an identifier URI is an absolute URI consisting of a scheme and path,
+      no processing occurs.
+
+  When preprocessing visits a node containing an identifier, that identifier
+  must be used as the base URI to process child nodes.
+
+  It is an error for more than one object in a document to have the same
+  absolute URI.
+
+  ### Identifier resolution example
+
+  Given the following schema:
+
+  ```
+- $include: ident_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: ident_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: ident_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_proc.yml
new file mode 100644
index 0000000..24d3ea8
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_proc.yml
@@ -0,0 +1,20 @@
+{
+  "id": "http://example.com/base",
+  "form": {
+    "id": "http://example.com/base#one",
+    "things": [
+      {
+        "id": "http://example.com/base#one/two"
+      },
+      {
+        "id": "http://example.com/base#three"
+      },
+      {
+        "id": "http://example.com/four#five",
+      },
+      {
+        "id": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_schema.yml
new file mode 100644
index 0000000..8a7bb04
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_schema.yml
@@ -0,0 +1,14 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "id",
+      "type": "string",
+      "jsonldPredicate": "@id"
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_src.yml
new file mode 100644
index 0000000..bbbd96e
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/ident_res_src.yml
@@ -0,0 +1,20 @@
+    {
+      "id": "http://example.com/base",
+      "form": {
+        "id": "one",
+        "things": [
+          {
+            "id": "two"
+          },
+          {
+            "id": "#three",
+          },
+          {
+            "id": "four#five",
+          },
+          {
+            "id": "acid:six",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/import_include.md b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/import_include.md
new file mode 100644
index 0000000..1b9f37f
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/import_include.md
@@ -0,0 +1,176 @@
+## Import
+
+During preprocessing traversal, an implementation must resolve `$import`
+directives.  An `$import` directive is an object consisting of exactly one
+field `$import` specifying resource by URI string.  It is an error if there
+are additional fields in the `$import` object, such additional fields must
+be ignored.
+
+The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  Implementations must support
+loading from `file`, `http` and `https` resources.  The URI referenced by
+`$import` must be loaded and recursively preprocessed as a Salad document.
+The external imported document does not inherit the context of the
+importing document, and the default base URI for processing the imported
+document must be the URI used to retrieve the imported document.  If the
+`$import` URI includes a document fragment, the fragment must be excluded
+from the base URI used to preprocess the imported document.
+
+Once loaded and processed, the `$import` node is replaced in the document
+structure by the object or array yielded from the import operation.
+
+URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the `$import` node must be
+replaced by only the object with the appropriate fragment identifier.
+
+It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.
+
+### Import example
+
+import.yml:
+```
+{
+  "hello": "world"
+}
+
+```
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$import": "import.yml"
+      }
+  }
+}
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": {
+      "hello": "world"
+    }
+  }
+}
+```
+
+## Include
+
+During preprocessing traversal, an implementation must resolve `$include`
+directives.  An `$include` directive is an object consisting of exactly one
+field `$include` specifying a URI string.  It is an error if there are
+additional fields in the `$include` object, such additional fields must be
+ignored.
+
+The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  The URI referenced by `$include` must
+be loaded as a text data.  Implementations must support loading from
+`file`, `http` and `https` resources.  Implementations may transcode the
+character encoding of the text data to match that of the parent document,
+but must not interpret or parse the text document in any other way.
+
+Once loaded, the `$include` node is replaced in the document structure by a
+string containing the text data loaded from the resource.
+
+It is a fatal error if an import directive refers to an external resource
+which does not exist or is not accessible.
+
+### Include example
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$include": "include.txt"
+      }
+  }
+}
+
+```
+
+include.txt:
+```
+hello world
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": "hello world"
+  }
+}
+```
+
+
+## Mixin
+
+During preprocessing traversal, an implementation must resolve `$mixin`
+directives.  An `$mixin` directive is an object consisting of the field
+`$mixin` specifying resource by URI string.  If there are additional fields in
+the `$mixin` object, these fields override fields in the object which is loaded
+from the `$mixin` URI.
+
+The URI string must be resolved to an absolute URI using the link resolution
+rules described previously.  Implementations must support loading from `file`,
+`http` and `https` resources.  The URI referenced by `$mixin` must be loaded
+and recursively preprocessed as a Salad document.  The external imported
+document must inherit the context of the importing document, however the file
+URI for processing the imported document must be the URI used to retrieve the
+imported document.  The `$mixin` URI must not include a document fragment.
+
+Once loaded and processed, the `$mixin` node is replaced in the document
+structure by the object or array yielded from the import operation.
+
+URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the `$mixin` node must be
+replaced by only the object with the appropriate fragment identifier.
+
+It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.
+
+### Mixin example
+
+mixin.yml:
+```
+{
+  "hello": "world",
+  "carrot": "orange"
+}
+
+```
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$mixin": "mixin.yml"
+      "carrot": "cake"
+      }
+  }
+}
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": {
+      "hello": "world",
+      "carrot": "cake"
+    }
+  }
+}
+```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res.yml
new file mode 100644
index 0000000..9346f8a
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res.yml
@@ -0,0 +1,55 @@
+- |
+  ## Link resolution
+
+  The schema may designate one or more fields as link fields reference other
+  objects.  Processing must resolve links to either absolute URIs using the
+  following rules:
+
+  * If a reference URI is prefixed with `#` it is a relative
+  fragment identifier.  It is resolved relative to the base URI by setting
+  or replacing the fragment portion of the base URI.
+
+  * If a reference URI does not contain a scheme and is not prefixed with `#`
+  it is a path relative reference.  If the reference URI contains `#` in any
+  position other than the first character, the reference URI must be divided
+  into a path portion and a fragment portion split on the first instance of
+  `#`.  The path portion is resolved relative to the base URI by the following
+  rule: if the path portion of the base URI ends in a slash `/`, append the
+  path portion of the reference URI to the path portion of the base URI.  If
+  the path portion of the base URI does not end in a slash, replace the final
+  path segment with the path portion of the reference URI.  Replace the
+  fragment portion of the base URI with the fragment portion of the reference
+  URI.
+
+  * If a reference URI begins with a namespace prefix declared in `$namespaces`
+  followed by a colon `:`, the prefix and colon must be replaced by the
+  namespace declared in `$namespaces`.
+
+  * If a reference URI is an absolute URI consisting of a scheme and path,
+  no processing occurs.
+
+  Link resolution must not affect the base URI used to resolve identifiers
+  and other links.
+
+  ### Link resolution example
+
+  Given the following schema:
+
+  ```
+- $include: link_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: link_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: link_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_proc.yml
new file mode 100644
index 0000000..03e539d
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_proc.yml
@@ -0,0 +1,21 @@
+{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "http://example.com/one",
+    "things": [
+      {
+        "link": "http://example.com/two"
+      },
+      {
+        "link": "http://example.com/base#three"
+      },
+      {
+        "link": "http://example.com/four#five",
+      },
+      {
+        "link": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_schema.yml
new file mode 100644
index 0000000..76420d3
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_schema.yml
@@ -0,0 +1,16 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "link",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@id"
+      }
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_src.yml
new file mode 100644
index 0000000..23f7a29
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/link_res_src.yml
@@ -0,0 +1,21 @@
+{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "one",
+    "things": [
+      {
+        "link": "two"
+      },
+      {
+        "link": "#three",
+      },
+      {
+        "link": "four#five",
+      },
+      {
+        "link": "acid:six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
similarity index 60%
copy from cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
copy to cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
index 6e90775..d5472e9 100644
--- a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema.yml
@@ -46,36 +46,7 @@ $graph:
 #     How to generate the json-ld context...
 
 
-- name: PrimitiveType
-  type: enum
-  symbols:
-    - "sld:null"
-    - "xsd:boolean"
-    - "xsd:int"
-    - "xsd:long"
-    - "xsd:float"
-    - "xsd:double"
-    - "xsd:string"
-  doc:
-    - |
-      Salad data types are based on Avro schema declarations.  Refer to the
-      [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
-      detailed information.
-    - "null: no value"
-    - "boolean: a binary value"
-    - "int: 32-bit signed integer"
-    - "long: 64-bit signed integer"
-    - "float: single precision (32-bit) IEEE 754 floating-point number"
-    - "double: double precision (64-bit) IEEE 754 floating-point number"
-    - "string: Unicode character sequence"
-
-
-- name: "Any"
-  type: enum
-  symbols: ["#Any"]
-  doc: |
-    The **Any** type validates for any non-null value.
-
+- $import: metaschema_base.yml
 
 - name: JsonldPredicate
   type: record
@@ -84,7 +55,7 @@ $graph:
     URI resolution and JSON-LD context generation.
   fields:
     - name: _id
-      type: ["null", string]
+      type: string?
       jsonldPredicate:
         _id: sld:_id
         _type: "@id"
@@ -93,7 +64,7 @@ $graph:
         The predicate URI that this field corresponds to.
         Corresponds to JSON-LD `@id` directive.
     - name: _type
-      type: ["null", string]
+      type: string?
       doc: |
         The context type hint, corresponds to JSON-LD `@type` directive.
 
@@ -106,11 +77,11 @@ $graph:
           resolved using the vocabulary resolution rules.
 
     - name: _container
-      type: ["null", string]
+      type: string?
       doc: |
         Structure hint, corresponds to JSON-LD `@container` directive.
     - name: identity
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true and `_type` is `@id` this indicates that the parent field must
         be resolved according to identity resolution rules instead of link
@@ -118,11 +89,46 @@ $graph:
         assertion that the linked value exists; absence of an object in the loaded document
         with the URI is not an error.
     - name: noLinkCheck
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, this indicates that link validation traversal must stop at
         this field.  This field (it is is a URI) or any fields under it (if it
         is an object or array) are not subject to link checking.
+    - name: mapSubject
+      type: string?
+      doc: |
+        If the value of the field is a JSON object, it must be transformed
+        into an array of JSON objects, where each key-value pair from the
+        source JSON object is a list item, the list items must be JSON objects,
+        and the key is assigned to the field specified by `mapSubject`.
+    - name: mapPredicate
+      type: string?
+      doc: |
+        Only applies if `mapSubject` is also provided.  If the value of the
+        field is a JSON object, it is transformed as described in `mapSubject`,
+        with the addition that when the value of a map item is not an object,
+        the item is transformed to a JSON object with the key assigned to the
+        field specified by `mapSubject` and the value assigned to the field
+        specified by `mapPredicate`.
+    - name: refScope
+      type: int?
+      doc: |
+        If the field contains a relative reference, it must be resolved by
+        searching for valid document references in each successive parent scope
+        in the document fragment.  For example, a reference of `foo` in the
+        context `#foo/bar/baz` will first check for the existence of
+        `#foo/bar/baz/foo`, followed by `#foo/bar/foo`, then `#foo/foo` and
+        then finally `#foo`.  The first valid URI in the search order shall be
+        used as the fully resolved value of the identifier.  The value of the
+        refScope field is the specified number of levels from the containing
+        identifer scope before starting the search, so if `refScope: 2` then
+        "baz" and "bar" must be stripped to get the base `#foo` and search
+        `#foo/foo` and the `#foo`.  The last scope searched must be the top
+        level scope before determining if the identifier cannot be resolved.
+    - name: typeDSL
+      type: boolean?
+      doc: |
+        Field must be expanded based on the the Schema Salad type DSL.
 
 
 - name: SpecializeDef
@@ -134,6 +140,7 @@ $graph:
       jsonldPredicate:
         _id: "sld:specializeFrom"
         _type: "@id"
+        refScope: 1
 
     - name: specializeTo
       type: string
@@ -141,6 +148,7 @@ $graph:
       jsonldPredicate:
         _id: "sld:specializeTo"
         _type: "@id"
+        refScope: 1
 
 
 - name: NamedType
@@ -159,15 +167,13 @@ $graph:
   fields:
     - name: doc
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       doc: "A documentation string for this type, or an array of strings which should be concatenated."
-      jsonldPredicate: "sld:doc"
+      jsonldPredicate: "rdfs:comment"
 
     - name: docParent
-      type: ["null", string]
+      type: string?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for this type should appear in a subsection under `docParent`.
@@ -177,10 +183,8 @@ $graph:
 
     - name: docChild
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for `docChild` should appear in a subsection under this type.
@@ -189,7 +193,7 @@ $graph:
         _type: "@id"
 
     - name: docAfter
-      type: ["null", string]
+      type: string?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for this type should appear after the `docAfter` section at the same
@@ -201,225 +205,99 @@ $graph:
 
 - name: SchemaDefinedType
   type: record
-  extends: "#DocType"
+  extends: DocType
   doc: |
     Abstract base for schema-defined types.
   abstract: true
   fields:
     - name: jsonldPredicate
       type:
-        - "null"
-        - string
-        - "#JsonldPredicate"
+        - string?
+        - JsonldPredicate?
       doc: |
         Annotate this type with linked data context.
-      jsonldPredicate: "sld:jsonldPredicate"
+      jsonldPredicate: sld:jsonldPredicate
 
     - name: documentRoot
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, indicates that the type is a valid at the document root.  At
         least one type in a schema must be tagged with `documentRoot: true`.
 
 
-- name: RecordField
-  type: record
-  doc: "A field of a record."
-  fields:
-    - name: name
-      type: string
-      jsonldPredicate: "@id"
-      doc: |
-        The name of the field
-
-    - name: doc
-      type: ["null", string]
-      doc: |
-        A documentation string for this field
-      jsonldPredicate: "sld:doc"
-
-    - name: type
-      type:
-        - "#PrimitiveType"
-        - "#RecordSchema"
-        - "#EnumSchema"
-        - "#ArraySchema"
-        - string
-        - type: array
-          items:
-            - "#PrimitiveType"
-            - "#RecordSchema"
-            - "#EnumSchema"
-            - "#ArraySchema"
-            - string
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-      doc: |
-        The field type
-
-
 - name: SaladRecordField
   type: record
-  extends: "#RecordField"
+  extends: RecordField
   doc: "A field of a record."
   fields:
     - name: jsonldPredicate
       type:
-        - "null"
-        - string
-        - "#JsonldPredicate"
+        - string?
+        - JsonldPredicate?
       doc: |
         Annotate this type with linked data context.
       jsonldPredicate: "sld:jsonldPredicate"
 
-- name: RecordSchema
-  type: record
-  fields:
-    - name: type
-      doc: "Must be `record`"
-      type:
-        name: Record_symbol
-        type: enum
-        symbols:
-          - "sld:record"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: "fields"
-      type:
-        - "null"
-        - type: "array"
-          items: "#RecordField"
-
-      jsonldPredicate: "sld:fields"
-      doc: "Defines the fields of the record."
-
 
 - name: SaladRecordSchema
   type: record
-  extends: ["#NamedType", "#RecordSchema", "#SchemaDefinedType"]
+  extends: [NamedType, RecordSchema, SchemaDefinedType]
   documentRoot: true
   specialize:
-    specializeFrom: "#RecordField"
-    specializeTo: "#SaladRecordField"
+    RecordField: SaladRecordField
   fields:
     - name: abstract
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, this record is abstract and may be used as a base for other
         records, but is not valid on its own.
 
     - name: extends
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         _id: "sld:extends"
         _type: "@id"
+        refScope: 1
       doc: |
         Indicates that this record inherits fields from one or more base records.
 
     - name: specialize
       type:
-        - "null"
-        - "#SpecializeDef"
-        - type: array
-          items: "#SpecializeDef"
+        - SpecializeDef[]?
       doc: |
         Only applies if `extends` is declared.  Apply type specialization using the
         base record as a template.  For each field inherited from the base
         record, replace any instance of the type `specializeFrom` with
         `specializeTo`.
-
-
-- name: EnumSchema
-  type: record
-  doc: |
-    Define an enumerated type.
-  fields:
-    - name: type
-      doc: "Must be `enum`"
-      type:
-        name: Enum_symbol
-        type: enum
-        symbols:
-          - "sld:enum"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: "symbols"
-      type:
-        - type: "array"
-          items: "string"
       jsonldPredicate:
-        _id: "sld:symbols"
-        _type: "@id"
-        identity: true
-      doc: "Defines the set of valid symbols."
-
+        _id: "sld:specialize"
+        mapSubject: specializeFrom
+        mapPredicate: specializeTo
 
 - name: SaladEnumSchema
   type: record
-  extends: ["#EnumSchema", "#SchemaDefinedType"]
+  extends: [EnumSchema, SchemaDefinedType]
   documentRoot: true
   doc: |
     Define an enumerated type.
   fields:
     - name: extends
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         _id: "sld:extends"
         _type: "@id"
+        refScope: 1
       doc: |
         Indicates that this enum inherits symbols from a base enum.
 
 
-- name: ArraySchema
-  type: record
-  fields:
-    - name: type
-      doc: "Must be `array`"
-      type:
-        name: Array_symbol
-        type: enum
-        symbols:
-          - "sld:array"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: items
-      type:
-        - "#PrimitiveType"
-        - "#RecordSchema"
-        - "#EnumSchema"
-        - "#ArraySchema"
-        - string
-        - type: array
-          items:
-            - "#PrimitiveType"
-            - "#RecordSchema"
-            - "#EnumSchema"
-            - "#ArraySchema"
-            - string
-      jsonldPredicate:
-        _id: "sld:items"
-        _type: "@vocab"
-      doc: "Defines the type of the array elements."
-
-
 - name: Documentation
   type: record
-  extends: ["#NamedType", "#DocType"]
+  extends: [NamedType, DocType]
   documentRoot: true
   doc: |
     A documentation section.  This type exists to facilitate self-documenting
@@ -435,3 +313,5 @@ $graph:
       jsonldPredicate:
         _id: "sld:type"
         _type: "@vocab"
+        typeDSL: true
+        refScope: 2
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
new file mode 100644
index 0000000..73511d1
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/metaschema_base.yml
@@ -0,0 +1,164 @@
+$base: "https://w3id.org/cwl/salad#"
+
+$namespaces:
+  sld:  "https://w3id.org/cwl/salad#"
+  dct:  "http://purl.org/dc/terms/"
+  rdf:  "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
+  xsd:  "http://www.w3.org/2001/XMLSchema#"
+
+$graph:
+- name: PrimitiveType
+  type: enum
+  symbols:
+    - "sld:null"
+    - "xsd:boolean"
+    - "xsd:int"
+    - "xsd:long"
+    - "xsd:float"
+    - "xsd:double"
+    - "xsd:string"
+  doc:
+    - |
+      Salad data types are based on Avro schema declarations.  Refer to the
+      [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
+      detailed information.
+    - "null: no value"
+    - "boolean: a binary value"
+    - "int: 32-bit signed integer"
+    - "long: 64-bit signed integer"
+    - "float: single precision (32-bit) IEEE 754 floating-point number"
+    - "double: double precision (64-bit) IEEE 754 floating-point number"
+    - "string: Unicode character sequence"
+
+
+- name: Any
+  type: enum
+  symbols: ["#Any"]
+  doc: |
+    The **Any** type validates for any non-null value.
+
+
+- name: RecordField
+  type: record
+  doc: A field of a record.
+  fields:
+    - name: name
+      type: string
+      jsonldPredicate: "@id"
+      doc: |
+        The name of the field
+
+    - name: doc
+      type: string?
+      doc: |
+        A documentation string for this field
+      jsonldPredicate: "rdfs:comment"
+
+    - name: type
+      type:
+        - PrimitiveType
+        - RecordSchema
+        - EnumSchema
+        - ArraySchema
+        - string
+        - type: array
+          items:
+            - PrimitiveType
+            - RecordSchema
+            - EnumSchema
+            - ArraySchema
+            - string
+      jsonldPredicate:
+        _id: sld:type
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+      doc: |
+        The field type
+
+
+- name: RecordSchema
+  type: record
+  fields:
+    type:
+      doc: "Must be `record`"
+      type:
+        name: Record_symbol
+        type: enum
+        symbols:
+          - "sld:record"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    fields:
+      type: RecordField[]?
+      jsonldPredicate:
+        _id: sld:fields
+        mapSubject: name
+        mapPredicate: type
+      doc: "Defines the fields of the record."
+
+
+- name: EnumSchema
+  type: record
+  doc: |
+    Define an enumerated type.
+  fields:
+    type:
+      doc: "Must be `enum`"
+      type:
+        name: Enum_symbol
+        type: enum
+        symbols:
+          - "sld:enum"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    symbols:
+      type: string[]
+      jsonldPredicate:
+        _id: "sld:symbols"
+        _type: "@id"
+        identity: true
+      doc: "Defines the set of valid symbols."
+
+
+- name: ArraySchema
+  type: record
+  fields:
+    type:
+      doc: "Must be `array`"
+      type:
+        name: Array_symbol
+        type: enum
+        symbols:
+          - "sld:array"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    items:
+      type:
+        - PrimitiveType
+        - RecordSchema
+        - EnumSchema
+        - ArraySchema
+        - string
+        - type: array
+          items:
+            - PrimitiveType
+            - RecordSchema
+            - EnumSchema
+            - ArraySchema
+            - string
+      jsonldPredicate:
+        _id: "sld:items"
+        _type: "@vocab"
+        refScope: 2
+      doc: "Defines the type of the array elements."
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
new file mode 100644
index 0000000..6dd3e6a
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/salad.md
@@ -0,0 +1,256 @@
+# Semantic Annotations for Linked Avro Data (SALAD)
+
+Author:
+
+* Peter Amstutz <peter.amstutz at curoverse.com>, Curoverse
+
+Contributors:
+
+* The developers of Apache Avro
+* The developers of JSON-LD
+* Nebojša Tijanić <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics
+
+# Abstract
+
+Salad is a schema language for describing structured linked data documents
+in JSON or YAML documents.  A Salad schema provides rules for
+preprocessing, structural validation, and link checking for documents
+described by a Salad schema.  Salad builds on JSON-LD and the Apache Avro
+data serialization system, and extends Avro with features for rich data
+modeling such as inheritance, template specialization, object identifiers,
+and object references.  Salad was developed to provide a bridge between the
+record oriented data modeling supported by Apache Avro and the Semantic
+Web.
+
+# Status of This Document
+
+This document is the product of the [Common Workflow Language working
+group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
+latest version of this document is available in the "schema_salad" directory at
+
+https://github.com/common-workflow-language/schema_salad
+
+The products of the CWL working group (including this document) are made available
+under the terms of the Apache License, version 2.0.
+
+<!--ToC-->
+
+# Introduction
+
+The JSON data model is an extremely popular way to represent structured
+data.  It is attractive because of it's relative simplicity and is a
+natural fit with the standard types of many programming languages.
+However, this simplicity means that basic JSON lacks expressive features
+useful for working with complex data structures and document formats, such
+as schemas, object references, and namespaces.
+
+JSON-LD is a W3C standard providing a way to describe how to interpret a
+JSON document as Linked Data by means of a "context".  JSON-LD provides a
+powerful solution for representing object references and namespaces in JSON
+based on standard web URIs, but is not itself a schema language.  Without a
+schema providing a well defined structure, it is difficult to process an
+arbitrary JSON-LD document as idiomatic JSON because there are many ways to
+express the same data that are logically equivalent but structurally
+distinct.
+
+Several schema languages exist for describing and validating JSON data,
+such as the Apache Avro data serialization system, however none understand
+linked data.  As a result, to fully take advantage of JSON-LD to build the
+next generation of linked data applications, one must maintain separate
+JSON schema, JSON-LD context, RDF schema, and human documentation, despite
+significant overlap of content and obvious need for these documents to stay
+synchronized.
+
+Schema Salad is designed to address this gap.  It provides a schema
+language and processing rules for describing structured JSON content
+permitting URI resolution and strict document validation.  The schema
+language supports linked data through annotations that describe the linked
+data interpretation of the content, enables generation of JSON-LD context
+and RDF schema, and production of RDF triples by applying the JSON-LD
+context.  The schema language also provides for robust support of inline
+documentation.
+
+## Introduction to draft 1
+
+This is the first version of Schema Salad.  It is developed concurrently
+with draft 3 of the Common Workflow Language for use in specifying the
+Common Workflow Language, however Schema Salad is intended to be useful to
+a broader audience.
+
+## References to Other Specifications
+
+**Javascript Object Notation (JSON)**: http://json.org
+
+**JSON Linked Data (JSON-LD)**: http://json-ld.org
+
+**YAML**: http://yaml.org
+
+**Avro**: https://avro.apache.org/docs/current/spec.html
+
+**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
+
+**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
+
+**UTF-8**: https://www.ietf.org/rfc/rfc2279.txt)
+
+## Scope
+
+This document describes the syntax, data model, algorithms, and schema
+language for working with Salad documents.  It is not intended to document
+a specific implementation of Salad, however it may serve as a reference for
+the behavior of conforming implementations.
+
+## Terminology
+
+The terminology used to describe Salad documents is defined in the Concepts
+section of the specification. The terms defined in the following list are
+used in building those definitions and in describing the actions of an
+Salad implementation:
+
+**may**: Conforming Salad documents and Salad implementations are permitted but
+not required to be interpreted as described.
+
+**must**: Conforming Salad documents and Salad implementations are required
+to be interpreted as described; otherwise they are in error.
+
+**error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations may detect and report an error and may
+recover from it.
+
+**fatal error**: A violation of the rules of this specification; results
+are undefined. Conforming implementations must not continue to process the
+document and may report an error.
+
+**at user option**: Conforming software may or must (depending on the modal verb in
+the sentence) behave as described; if it does, it must provide users a means to
+enable or disable the behavior described.
+
+# Document model
+
+## Data concepts
+
+An **object** is a data structure equivalent to the "object" type in JSON,
+consisting of a unordered set of name/value pairs (referred to here as
+**fields**) and where the name is a string and the value is a string, number,
+boolean, array, or object.
+
+A **document** is a file containing a serialized object, or an array of
+objects.
+
+A **document type** is a class of files that share a common structure and
+semantics.
+
+A **document schema** is a formal description of the grammar of a document type.
+
+A **base URI** is a context-dependent URI used to resolve relative references.
+
+An **identifier** is a URI that designates a single document or single
+object within a document.
+
+A **vocabulary** is the set of symbolic field names and enumerated symbols defined
+by a document schema, where each term maps to absolute URI.
+
+## Syntax
+
+Conforming Salad documents are serialized and loaded using YAML syntax and
+UTF-8 text encoding.  Salad documents are written using the JSON-compatible
+subset of YAML.  Features of YAML such as headers and type tags that are
+not found in the standard JSON data model must not be used in conforming
+Salad documents.  It is a fatal error if the document is not valid YAML.
+
+A Salad document must consist only of either a single root object or an
+array of objects.
+
+## Document context
+
+### Implied context
+
+The implicit context consists of the vocabulary defined by the schema and
+the base URI.  By default, the base URI must be the URI that was used to
+load the document.  It may be overridden by an explicit context.
+
+### Explicit context
+
+If a document consists of a root object, this object may contain the
+fields `$base`, `$namespaces`, `$schemas`, and `$graph`:
+
+  * `$base`: Must be a string.  Set the base URI for the document used to
+    resolve relative references.
+
+  * `$namespaces`: Must be an object with strings as values.  The keys of
+    the object are namespace prefixes used in the document; the values of
+    the object are the prefix expansions.
+
+  * `$schemas`: Must be an array of strings.  This field may list URI
+    references to documents in RDF-XML format which will be queried for RDF
+    schema data.  The subjects and predicates described by the RDF schema
+    may provide additional semantic context for the document, and may be
+    used for validation of prefixed extension fields found in the document.
+
+Other directives beginning with `$` must be ignored.
+
+## Document graph
+
+If a document consists of a single root object, this object may contain the
+field `$graph`.  This field must be an array of objects.  If present, this
+field holds the primary content of the document.  A document that consists
+of array of objects at the root is an implicit graph.
+
+## Document metadata
+
+If a document consists of a single root object, metadata about the
+document, such as authorship, may be declared in the root object.
+
+## Document schema
+
+Document preprocessing, link validation and schema validation require a
+document schema.  A schema may consist of:
+
+  * At least one record definition object which defines valid fields that
+  make up a record type.  Record field definitions include the valid types
+  that may be assigned to each field and annotations to indicate fields
+  that represent identifiers and links, described below in "Semantic
+  Annotations".
+
+  * Any number of enumerated type objects which define a set of finite set of symbols that are
+  valid value of the type.
+
+  * Any number of documentation objects which allow in-line documentation of the schema.
+
+The schema for defining a salad schema (the metaschema) is described in
+detail in "Schema validation".
+
+### Record field annotations
+
+In a document schema, record field definitions may include the field
+`jsonldPredicate`, which may be either a string or object.  Implementations
+must use the following document preprocessing of fields by the following
+rules:
+
+  * If the value of `jsonldPredicate` is `@id`, the field is an identifier
+  field.
+
+  * If the value of `jsonldPredicate` is an object, and contains that
+  object contains the field `_type` with the value `@id`, the field is a
+  link field.
+
+  * If the value of `jsonldPredicate` is an object, and contains that
+  object contains the field `_type` with the value `@vocab`, the field is a
+  vocabulary field, which is a subtype of link field.
+
+## Document traversal
+
+To perform document document preprocessing, link validation and schema
+validation, the document must be traversed starting from the fields or
+array items of the root object or array and recursively visiting each child
+item which contains an object or arrays.
+
+# Document preprocessing
+
+After processing the explicit context (if any), document preprocessing
+begins.  Starting from the document root, object fields values or array
+items which contain objects or arrays are recursively traversed
+depth-first.  For each visited object, field names, identifier fields, link
+fields, vocabulary fields, and `$import` and `$include` directives must be
+processed as described in this section.  The order of traversal of child
+nodes within a parent node is undefined.
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res.yml
new file mode 100644
index 0000000..4555f5b
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res.yml
@@ -0,0 +1,35 @@
+- |
+  ## Vocabulary resolution
+
+    The schema may designate one or more vocabulary fields which use terms
+    defined in the vocabulary.  Processing must resolve vocabulary fields to
+    either vocabulary terms or absolute URIs by first applying the link
+    resolution rules defined above, then applying the following additional
+    rule:
+
+      * If a reference URI is a vocabulary field, and there is a vocabulary
+      term which maps to the resolved URI, the reference must be replace with
+      the vocabulary term.
+
+  ### Vocabulary resolution example
+
+  Given the following schema:
+
+  ```
+- $include: vocab_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: vocab_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: vocab_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_proc.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_proc.yml
new file mode 100644
index 0000000..d13ab15
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_proc.yml
@@ -0,0 +1,15 @@
+    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_schema.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_schema.yml
new file mode 100644
index 0000000..92b271e
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_schema.yml
@@ -0,0 +1,21 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "Colors",
+    "type": "enum",
+    "symbols": ["acid:red"]
+  },
+  {
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "voc",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@vocab"
+      }
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_src.yml b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_src.yml
new file mode 100644
index 0000000..82954f1
--- /dev/null
+++ b/cwltool/schemas/v1.0/salad/schema_salad/metaschema/vocab_res_src.yml
@@ -0,0 +1,15 @@
+    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.0/userguide-intro.md b/cwltool/schemas/v1.0/userguide-intro.md
new file mode 100644
index 0000000..342cd90
--- /dev/null
+++ b/cwltool/schemas/v1.0/userguide-intro.md
@@ -0,0 +1,28 @@
+# A Gentle Introduction to the Common Workflow Language
+
+Hello!
+
+This guide will introduce you to writing tool wrappers and workflows using the
+Common Workflow Language (CWL).  This guide describes the current stable
+specification, version 1.0.
+
+Note: This document is a work in progress.  Not all features are covered, yet.
+
+<!--ToC-->
+
+# Introduction
+
+CWL is a way to describe command line tools and connect them together to create
+workflows.  Because CWL is a specification and not a specific piece of
+software, tools and workflows described using CWL are portable across a variety
+of platforms that support the CWL standard.
+
+CWL has roots in "make" and many similar tools that determine order of
+execution based on dependencies between tasks.  However unlike "make", CWL
+tasks are isolated and you must be explicit about your inputs and outputs.  The
+benefit of explicitness and isolation are flexibility, portability, and
+scalability: tools and workflows described with CWL can transparently leverage
+technologies such as Docker, be used with CWL implementations from different
+vendors, and is well suited for describing large-scale workflows in cluster,
+cloud and high performance computing environments where tasks are scheduled in
+parallel across many nodes.
diff --git a/cwltool/schemas/v1.1.0-dev1/CommandLineTool-standalone.yml b/cwltool/schemas/v1.1.0-dev1/CommandLineTool-standalone.yml
new file mode 100644
index 0000000..10dbffa
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/CommandLineTool-standalone.yml
@@ -0,0 +1,2 @@
+- $import: Process.yml
+- $import: CommandLineTool.yml
\ No newline at end of file
diff --git a/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml b/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
new file mode 100644
index 0000000..d98f85a
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/CommandLineTool.yml
@@ -0,0 +1,948 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+
+$graph:
+
+- name: CommandLineToolDoc
+  type: documentation
+  doc:
+    - |
+      # Common Workflow Language (CWL) Command Line Tool Description, v1.1.0-dev1
+
+      This version:
+        * https://w3id.org/cwl/v1.1.0-dev1/
+
+      Current version:
+        * https://w3id.org/cwl/
+    - "\n\n"
+    - {$include: contrib.md}
+    - "\n\n"
+    - |
+      # Abstract
+
+      A Command Line Tool is a non-interactive executable program that reads
+      some input, performs a computation, and terminates after producing some
+      output.  Command line programs are a flexible unit of code sharing and
+      reuse, unfortunately the syntax and input/output semantics among command
+      line programs is extremely heterogeneous. A common layer for describing
+      the syntax and semantics of programs can reduce this incidental
+      complexity by providing a consistent way to connect programs together.
+      This specification defines the Common Workflow Language (CWL) Command
+      Line Tool Description, a vendor-neutral standard for describing the
+      syntax and input/output semantics of command line programs.
+
+    - {$include: intro.md}
+
+    - |
+      ## Introduction to v1.1.0-dev1
+
+      This is the in progress first development version of the first
+      maintenance release of the CWL CommandLineTool specification.
+      Version 1.1 introduces the followings additions:
+
+        * Addition of `stdin` type shortcut for `CommandInputParamater`s.
+
+      ## Introduction to v1.0
+
+      This specification represents the first full release from the CWL group.
+      Since draft-3, version 1.0 introduces the following changes and additions:
+
+        * The [Directory](#Directory) type.
+        * Syntax simplifcations: denoted by the `map<>` syntax. Example: inputs
+          contains a list of items, each with an id. Now one can specify
+          a mapping of that identifier to the corresponding
+          `CommandInputParamater`.
+          ```
+          inputs:
+           - id: one
+             type: string
+             doc: First input parameter
+           - id: two
+             type: int
+             doc: Second input parameter
+          ```
+          can be
+          ```
+          inputs:
+           one:
+            type: string
+            doc: First input parameter
+           two:
+            type: int
+            doc: Second input parameter
+          ```
+        * [InitialWorkDirRequirement](#InitialWorkDirRequirement): list of
+          files and subdirectories to be present in the output directory prior
+          to execution.
+        * Shortcuts for specifying the standard [output](#stdout) and/or
+          [error](#stderr) streams as a (streamable) File output.
+        * [SoftwareRequirement](#SoftwareRequirement) for describing software
+          dependencies of a tool.
+        * The common `description` field has been renamed to `doc`.
+
+      ## Errata
+
+      Post v1.0 release changes to the spec.
+
+        * 13 July 2016: Mark `baseCommand` as optional and update descriptive text.
+
+      ## Purpose
+
+      Standalone programs are a flexible and interoperable form of code reuse.
+      Unlike monolithic applications, applications and analysis workflows which
+      are composed of multiple separate programs can be written in multiple
+      languages and execute concurrently on multiple hosts.  However, POSIX
+      does not dictate computer-readable grammar or semantics for program input
+      and output, resulting in extremely heterogeneous command line grammar and
+      input/output semantics among program.  This is a particular problem in
+      distributed computing (multi-node compute clusters) and virtualized
+      environments (such as Docker containers) where it is often necessary to
+      provision resources such as input files before executing the program.
+
+      Often this gap is filled by hard coding program invocation and
+      implicitly assuming requirements will be met, or abstracting program
+      invocation with wrapper scripts or descriptor documents.  Unfortunately,
+      where these approaches are application or platform specific it creates a
+      significant barrier to reproducibility and portability, as methods
+      developed for one platform must be manually ported to be used on new
+      platforms.  Similarly it creates redundant work, as wrappers for popular
+      tools must be rewritten for each application or platform in use.
+
+      The Common Workflow Language Command Line Tool Description is designed to
+      provide a common standard description of grammar and semantics for
+      invoking programs used in data-intensive fields such as Bioinformatics,
+      Chemistry, Physics, Astronomy, and Statistics.  This specification
+      defines a precise data and execution model for Command Line Tools that
+      can be implemented on a variety of computing platforms, ranging from a
+      single workstation to cluster, grid, cloud, and high performance
+      computing platforms.
+
+    - {$include: concepts.md}
+    - {$include: invocation.md}
+
+
+- type: record
+  name: EnvironmentDef
+  doc: |
+    Define an environment variable that will be set in the runtime environment
+    by the workflow platform when executing the command line tool.  May be the
+    result of executing an expression, such as getting a parameter from input.
+  fields:
+    - name: envName
+      type: string
+      doc: The environment variable name
+    - name: envValue
+      type: [string, Expression]
+      doc: The environment variable value
+
+- type: record
+  name: CommandLineBinding
+  extends: InputBinding
+  doc: |
+
+    When listed under `inputBinding` in the input schema, the term
+    "value" refers to the the corresponding value in the input object.  For
+    binding objects listed in `CommandLineTool.arguments`, the term "value"
+    refers to the effective value after evaluating `valueFrom`.
+
+    The binding behavior when building the command line depends on the data
+    type of the value.  If there is a mismatch between the type described by
+    the input schema and the effective value, such as resulting from an
+    expression evaluation, an implementation must use the data type of the
+    effective value.
+
+      - **string**: Add `prefix` and the string to the command line.
+
+      - **number**: Add `prefix` and decimal representation to command line.
+
+      - **boolean**: If true, add `prefix` to the command line.  If false, add
+          nothing.
+
+      - **File**: Add `prefix` and the value of
+        [`File.path`](#File) to the command line.
+
+      - **array**: If `itemSeparator` is specified, add `prefix` and the join
+          the array into a single string with `itemSeparator` separating the
+          items.  Otherwise first add `prefix`, then recursively process
+          individual elements.
+
+      - **object**: Add `prefix` only, and recursively add object fields for
+          which `inputBinding` is specified.
+
+      - **null**: Add nothing.
+
+  fields:
+    - name: position
+      type: int?
+      doc: "The sorting key.  Default position is 0."
+    - name: prefix
+      type: string?
+      doc: "Command line prefix to add before the value."
+    - name: separate
+      type: boolean?
+      doc: |
+        If true (default), then the prefix and value must be added as separate
+        command line arguments; if false, prefix and value must be concatenated
+        into a single command line argument.
+    - name: itemSeparator
+      type: string?
+      doc: |
+        Join the array elements into a single string with the elements
+        separated by by `itemSeparator`.
+    - name: valueFrom
+      type:
+        - "null"
+        - string
+        - Expression
+      jsonldPredicate: "cwl:valueFrom"
+      doc: |
+        If `valueFrom` is a constant string value, use this as the value and
+        apply the binding rules above.
+
+        If `valueFrom` is an expression, evaluate the expression to yield the
+        actual value to use to build the command line and apply the binding
+        rules above.  If the inputBinding is associated with an input
+        parameter, the value of `self` in the expression will be the value of the
+        input parameter.
+
+        When a binding is part of the `CommandLineTool.arguments` field,
+        the `valueFrom` field is required.
+    - name: shellQuote
+      type: boolean?
+      doc: |
+        If `ShellCommandRequirement` is in the requirements for the current command,
+        this controls whether the value is quoted on the command line (default is true).
+        Use `shellQuote: false` to inject metacharacters for operations such as pipes.
+
+- type: record
+  name: CommandOutputBinding
+  extends: OutputBinding
+  doc: |
+    Describes how to generate an output parameter based on the files produced
+    by a CommandLineTool.
+
+    The output parameter is generated by applying these operations in
+    the following order:
+
+      - glob
+      - loadContents
+      - outputEval
+  fields:
+    - name: glob
+      type:
+        - "null"
+        - string
+        - Expression
+        - type: array
+          items: string
+      doc: |
+        Find files relative to the output directory, using POSIX glob(3)
+        pathname matching.  If an array is provided, find files that match any
+        pattern in the array.  If an expression is provided, the expression must
+        return a string or an array of strings, which will then be evaluated as
+        one or more glob patterns.  Must only match and return files which
+        actually exist.
+    - name: loadContents
+      type:
+        - "null"
+        - boolean
+      jsonldPredicate: "cwl:loadContents"
+      doc: |
+        For each file matched in `glob`, read up to
+        the first 64 KiB of text from the file and place it in the `contents`
+        field of the file object for manipulation by `outputEval`.
+    - name: outputEval
+      type:
+        - "null"
+        - string
+        - Expression
+      doc: |
+        Evaluate an expression to generate the output value.  If `glob` was
+        specified, the value of `self` must be an array containing file objects
+        that were matched.  If no files were matched, `self` must be a zero
+        length array; if a single file was matched, the value of `self` is an
+        array of a single element.  Additionally, if `loadContents` is `true`,
+        the File objects must include up to the first 64 KiB of file contents
+        in the `contents` field.
+
+
+- name: CommandInputRecordField
+  type: record
+  extends: InputRecordField
+  specialize:
+    - specializeFrom: InputRecordSchema
+      specializeTo: CommandInputRecordSchema
+    - specializeFrom: InputEnumSchema
+      specializeTo: CommandInputEnumSchema
+    - specializeFrom: InputArraySchema
+      specializeTo: CommandInputArraySchema
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandInputRecordSchema
+  type: record
+  extends: InputRecordSchema
+  specialize:
+    - specializeFrom: InputRecordField
+      specializeTo: CommandInputRecordField
+
+
+- name: CommandInputEnumSchema
+  type: record
+  extends: InputEnumSchema
+  specialize:
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandInputArraySchema
+  type: record
+  extends: InputArraySchema
+  specialize:
+    - specializeFrom: InputRecordSchema
+      specializeTo: CommandInputRecordSchema
+    - specializeFrom: InputEnumSchema
+      specializeTo: CommandInputEnumSchema
+    - specializeFrom: InputArraySchema
+      specializeTo: CommandInputArraySchema
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+
+
+- name: CommandOutputRecordField
+  type: record
+  extends: OutputRecordField
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- name: CommandOutputRecordSchema
+  type: record
+  extends: OutputRecordSchema
+  specialize:
+    - specializeFrom: OutputRecordField
+      specializeTo: CommandOutputRecordField
+
+
+- name: CommandOutputEnumSchema
+  type: record
+  extends: OutputEnumSchema
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- name: CommandOutputArraySchema
+  type: record
+  extends: OutputArraySchema
+  specialize:
+    - specializeFrom: OutputRecordSchema
+      specializeTo: CommandOutputRecordSchema
+    - specializeFrom: OutputEnumSchema
+      specializeTo: CommandOutputEnumSchema
+    - specializeFrom: OutputArraySchema
+      specializeTo: CommandOutputArraySchema
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+
+
+- type: record
+  name: CommandInputParameter
+  extends: InputParameter
+  doc: An input parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: InputBinding
+      specializeTo: CommandLineBinding
+  fields:
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - stdin
+        - CommandInputRecordSchema
+        - CommandInputEnumSchema
+        - CommandInputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - CommandInputRecordSchema
+            - CommandInputEnumSchema
+            - CommandInputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+
+- type: record
+  name: CommandOutputParameter
+  extends: OutputParameter
+  doc: An output parameter for a CommandLineTool.
+  specialize:
+    - specializeFrom: OutputBinding
+      specializeTo: CommandOutputBinding
+  fields:
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - stdin
+        - stdout
+        - stderr
+        - CommandOutputRecordSchema
+        - CommandOutputEnumSchema
+        - CommandOutputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - CommandOutputRecordSchema
+            - CommandOutputEnumSchema
+            - CommandOutputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+
+- name: stdin
+  type: enum
+  symbols: [ "cwl:stdin" ]
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Only valid as a `type` for a `CommandLineTool` input with no
+    `inputBinding` set. `stdin` must not be specified at the `CommandLineTool`
+    level.
+
+    The following
+    ```
+    inputs:
+       an_input_name:
+       type: stdin
+    ```
+    is equivalent to
+    ```
+    inputs:
+      an_input_name:
+        type: File
+        streamable: true
+
+    stdin: ${inputs.an_input_name.path}
+    ```
+
+- name: stdout
+  type: enum
+  symbols: [ "cwl:stdout" ]
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Only valid as a `type` for a `CommandLineTool` output with no
+    `outputBinding` set.
+
+    The following
+    ```
+    outputs:
+       an_output_name:
+       type: stdout
+
+    stdout: a_stdout_file
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: a_stdout_file
+
+    stdout: a_stdout_file
+    ```
+
+    If there is no `stdout` name provided, a random filename will be created.
+    For example, the following
+    ```
+    outputs:
+      an_output_name:
+        type: stdout
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: random_stdout_filenameABCDEFG
+
+    stdout: random_stdout_filenameABCDEFG
+    ```
+
+
+- name: stderr
+  type: enum
+  symbols: [ "cwl:stderr" ]
+  docParent: "#CommandOutputParameter"
+  doc: |
+    Only valid as a `type` for a `CommandLineTool` output with no
+    `outputBinding` set.
+
+    The following
+    ```
+    outputs:
+      an_output_name:
+      type: stderr
+
+    stderr: a_stderr_file
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: a_stderr_file
+
+    stderr: a_stderr_file
+    ```
+
+    If there is no `stderr` name provided, a random filename will be created.
+    For example, the following
+    ```
+    outputs:
+      an_output_name:
+        type: stderr
+    ```
+    is equivalent to
+    ```
+    outputs:
+      an_output_name:
+        type: File
+        streamable: true
+        outputBinding:
+          glob: random_stderr_filenameABCDEFG
+
+    stderr: random_stderr_filenameABCDEFG
+    ```
+
+
+- type: record
+  name: CommandLineTool
+  extends: Process
+  documentRoot: true
+  specialize:
+    - specializeFrom: InputParameter
+      specializeTo: CommandInputParameter
+    - specializeFrom: OutputParameter
+      specializeTo: CommandOutputParameter
+  doc: |
+    This defines the schema of the CWL Command Line Tool Description document.
+
+  fields:
+    - name: class
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+      type: string
+    - name: baseCommand
+      doc: |
+        Specifies the program to execute.  If an array, the first element of
+        the array is the command to execute, and subsequent elements are
+        mandatory command line arguments.  The elements in `baseCommand` must
+        appear before any command line bindings from `inputBinding` or
+        `arguments`.
+
+        If `baseCommand` is not provided or is an empty array, the first
+        element of the command line produced after processing `inputBinding` or
+        `arguments` must be used as the program to execute.
+
+        If the program includes a path separator character it must
+        be an absolute path, otherwise it is an error.  If the program does not
+        include a path separator, search the `$PATH` variable in the runtime
+        environment of the workflow runner find the absolute path of the
+        executable.
+      type:
+        - string?
+        - string[]?
+      jsonldPredicate:
+        "_id": "cwl:baseCommand"
+        "_container": "@list"
+    - name: arguments
+      doc: |
+        Command line bindings which are not directly associated with input parameters.
+      type:
+        - "null"
+        - type: array
+          items: [string, Expression, CommandLineBinding]
+      jsonldPredicate:
+        "_id": "cwl:arguments"
+        "_container": "@list"
+    - name: stdin
+      type: ["null", string, Expression]
+      jsonldPredicate: "https://w3id.org/cwl/cwl#stdin"
+      doc: |
+        A path to a file whose contents must be piped into the command's
+        standard input stream.
+    - name: stderr
+      type: ["null", string, Expression]
+      jsonldPredicate: "https://w3id.org/cwl/cwl#stderr"
+      doc: |
+        Capture the command's standard error stream to a file written to
+        the designated output directory.
+
+        If `stderr` is a string, it specifies the file name to use.
+
+        If `stderr` is an expression, the expression is evaluated and must
+        return a string with the file name to use to capture stderr.  If the
+        return value is not a string, or the resulting path contains illegal
+        characters (such as the path separator `/`) it is an error.
+    - name: stdout
+      type: ["null", string, Expression]
+      jsonldPredicate: "https://w3id.org/cwl/cwl#stdout"
+      doc: |
+        Capture the command's standard output stream to a file written to
+        the designated output directory.
+
+        If `stdout` is a string, it specifies the file name to use.
+
+        If `stdout` is an expression, the expression is evaluated and must
+        return a string with the file name to use to capture stdout.  If the
+        return value is not a string, or the resulting path contains illegal
+        characters (such as the path separator `/`) it is an error.
+    - name: successCodes
+      type: int[]?
+      doc: |
+        Exit codes that indicate the process completed successfully.
+
+    - name: temporaryFailCodes
+      type: int[]?
+      doc: |
+        Exit codes that indicate the process failed due to a possibly
+        temporary condition, where executing the process with the same
+        runtime environment and inputs may produce different results.
+
+    - name: permanentFailCodes
+      type: int[]?
+      doc:
+        Exit codes that indicate the process failed due to a permanent logic
+        error, where executing the process with the same runtime environment and
+        same inputs is expected to always fail.
+
+
+- type: record
+  name: DockerRequirement
+  extends: ProcessRequirement
+  doc: |
+    Indicates that a workflow component should be run in a
+    [Docker](http://docker.com) container, and specifies how to fetch or build
+    the image.
+
+    If a CommandLineTool lists `DockerRequirement` under
+    `hints` (or `requirements`), it may (or must) be run in the specified Docker
+    container.
+
+    The platform must first acquire or install the correct Docker image as
+    specified by `dockerPull`, `dockerImport`, `dockerLoad` or `dockerFile`.
+
+    The platform must execute the tool in the container using `docker run` with
+    the appropriate Docker image and tool command line.
+
+    The workflow platform may provide input files and the designated output
+    directory through the use of volume bind mounts.  The platform may rewrite
+    file paths in the input object to correspond to the Docker bind mounted
+    locations.
+
+    When running a tool contained in Docker, the workflow platform must not
+    assume anything about the contents of the Docker container, such as the
+    presence or absence of specific software, except to assume that the
+    generated command line represents a valid command within the runtime
+    environment of the container.
+
+    ## Interaction with other requirements
+
+    If [EnvVarRequirement](#EnvVarRequirement) is specified alongside a
+    DockerRequirement, the environment variables must be provided to Docker
+    using `--env` or `--env-file` and interact with the container's preexisting
+    environment as defined by Docker.
+
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'DockerRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: dockerPull
+      type: string?
+      doc: "Specify a Docker image to retrieve using `docker pull`."
+    - name: dockerLoad
+      type: string?
+      doc: "Specify a HTTP URL from which to download a Docker image using `docker load`."
+    - name: dockerFile
+      type: string?
+      doc: "Supply the contents of a Dockerfile which will be built using `docker build`."
+    - name: dockerImport
+      type: string?
+      doc: "Provide HTTP URL to download and gunzip a Docker images using `docker import."
+    - name: dockerImageId
+      type: string?
+      doc: |
+        The image id that will be used for `docker run`.  May be a
+        human-readable image name or the image identifier hash.  May be skipped
+        if `dockerPull` is specified, in which case the `dockerPull` image id
+        must be used.
+    - name: dockerOutputDirectory
+      type: string?
+      doc: |
+        Set the designated output directory to a specific location inside the
+        Docker container.
+
+
+- type: record
+  name: SoftwareRequirement
+  extends: ProcessRequirement
+  doc: |
+    A list of software packages that should be configured in the environment of
+    the defined process.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'SoftwareRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: packages
+      type: SoftwarePackage[]
+      doc: "The list of software to be configured."
+      jsonldPredicate:
+        mapSubject: package
+        mapPredicate: specs
+
+- name: SoftwarePackage
+  type: record
+  fields:
+    - name: package
+      type: string
+      doc: "The common name of the software to be configured."
+      jsonldPredicate: "@id"
+    - name: version
+      type: string[]?
+      doc: "The (optional) version of the software to configured."
+    - name: specs
+      type: string[]?
+      doc: |
+        Must be one or more IRIs identifying resources for installing or
+        enabling the software.  Implementations may provide resolvers which map
+        well-known software spec IRIs to some configuration action.
+
+        For example, an IRI `https://packages.debian.org/jessie/bowtie` could
+        be resolved with `apt-get install bowtie`.  An IRI
+        `https://anaconda.org/bioconda/bowtie` could be resolved with `conda
+        install -c bioconda bowtie`.
+
+        Tools may also provide IRIs to index entries such as
+        [RRID](http://www.identifiers.org/rrid/), such as
+        `http://identifiers.org/rrid/RRID:SCR_005476`
+
+
+- name: Dirent
+  type: record
+  doc: |
+    Define a file or subdirectory that must be placed in the designated output
+    directory prior to executing the command line tool.  May be the result of
+    executing an expression, such as building a configuration file from a
+    template.
+  fields:
+    - name: entryname
+      type: ["null", string, Expression]
+      jsonldPredicate:
+        _id: cwl:entryname
+      doc: |
+        The name of the file or subdirectory to create in the output directory.
+        If `entry` is a File or Directory, this overrides `basename`.  Optional.
+    - name: entry
+      type: [string, Expression]
+      jsonldPredicate:
+        _id: cwl:entry
+      doc: |
+        If the value is a string literal or an expression which evaluates to a
+        string, a new file must be created with the string as the file contents.
+
+        If the value is an expression that evaluates to a `File` object, this
+        indicates the referenced file should be added to the designated output
+        directory prior to executing the tool.
+
+        If the value is an expression that evaluates to a `Dirent` object, this
+        indicates that the File or Directory in `entry` should be added to the
+        designated output directory with the name in `entryname`.
+
+        If `writable` is false, the file may be made available using a bind
+        mount or file system link to avoid unnecessary copying of the input
+        file.
+    - name: writable
+      type: boolean?
+      doc: |
+        If true, the file or directory must be writable by the tool.  Changes
+        to the file or directory must be isolated and not visible by any other
+        CommandLineTool process.  This may be implemented by making a copy of
+        the original file or directory.  Default false (files and directories
+        read-only by default).
+
+
+- name: InitialWorkDirRequirement
+  type: record
+  extends: ProcessRequirement
+  doc:
+    Define a list of files and subdirectories that must be created by the
+    workflow platform in the designated output directory prior to executing the
+    command line tool.
+  fields:
+    - name: class
+      type: string
+      doc: InitialWorkDirRequirement
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: listing
+      type:
+        - type: array
+          items: [File, Directory, Dirent, string, Expression]
+        - string
+        - Expression
+      jsonldPredicate:
+        _id: "cwl:listing"
+      doc: |
+        The list of files or subdirectories that must be placed in the
+        designated output directory prior to executing the command line tool.
+
+        May be an expression.  If so, the expression return value must validate
+        as `{type: array, items: [File, Directory]}`.
+
+
+- name: EnvVarRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    Define a list of environment variables which will be set in the
+    execution environment of the tool.  See `EnvironmentDef` for details.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'EnvVarRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: envDef
+      type: EnvironmentDef[]
+      doc: The list of environment variables.
+      jsonldPredicate:
+        mapSubject: envName
+        mapPredicate: envValue
+
+
+- type: record
+  name: ShellCommandRequirement
+  extends: ProcessRequirement
+  doc: |
+    Modify the behavior of CommandLineTool to generate a single string
+    containing a shell command line.  Each item in the argument list must be
+    joined into a string separated by single spaces and quoted to prevent
+    intepretation by the shell, unless `CommandLineBinding` for that argument
+    contains `shellQuote: false`.  If `shellQuote: false` is specified, the
+    argument is joined into the command string without quoting, which allows
+    the use of shell metacharacters such as `|` for pipes.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'ShellCommandRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+
+
+- type: record
+  name: ResourceRequirement
+  extends: ProcessRequirement
+  doc: |
+    Specify basic hardware resource requirements.
+
+    "min" is the minimum amount of a resource that must be reserved to schedule
+    a job. If "min" cannot be satisfied, the job should not be run.
+
+    "max" is the maximum amount of a resource that the job shall be permitted
+    to use. If a node has sufficient resources, multiple jobs may be scheduled
+    on a single node provided each job's "max" resource requirements are
+    met. If a job attempts to exceed its "max" resource allocation, an
+    implementation may deny additional resources, which may result in job
+    failure.
+
+    If "min" is specified but "max" is not, then "max" == "min"
+    If "max" is specified by "min" is not, then "min" == "max".
+
+    It is an error if max < min.
+
+    It is an error if the value of any of these fields is negative.
+
+    If neither "min" nor "max" is specified for a resource, an implementation may provide a default.
+
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'ResourceRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: coresMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved number of CPU cores
+
+    - name: coresMax
+      type: ["null", int, string, Expression]
+      doc: Maximum reserved number of CPU cores
+
+    - name: ramMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved RAM in mebibytes (2**20)
+
+    - name: ramMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved RAM in mebibytes (2**20)
+
+    - name: tmpdirMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)
+
+    - name: tmpdirMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)
+
+    - name: outdirMin
+      type: ["null", long, string, Expression]
+      doc: Minimum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)
+
+    - name: outdirMax
+      type: ["null", long, string, Expression]
+      doc: Maximum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)
diff --git a/cwltool/schemas/v1.1.0-dev1/CommonWorkflowLanguage.yml b/cwltool/schemas/v1.1.0-dev1/CommonWorkflowLanguage.yml
new file mode 100644
index 0000000..73921e8
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/CommonWorkflowLanguage.yml
@@ -0,0 +1,11 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
+
+$graph:
+
+- $import: Process.yml
+- $import: CommandLineTool.yml
+- $import: Workflow.yml
diff --git a/cwltool/schemas/v1.1.0-dev1/Process.yml b/cwltool/schemas/v1.1.0-dev1/Process.yml
new file mode 100644
index 0000000..cf8e03a
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/Process.yml
@@ -0,0 +1,749 @@
+$base: "https://w3id.org/cwl/cwl#"
+
+$namespaces:
+  cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
+
+$graph:
+
+- name: "Common Workflow Language, v1.1.0-dev1"
+  type: documentation
+  doc: {$include: concepts.md}
+
+- $import: "salad/schema_salad/metaschema/metaschema_base.yml"
+
+- name: BaseTypesDoc
+  type: documentation
+  doc: |
+    ## Base types
+  docChild:
+    - "#CWLType"
+    - "#Process"
+
+- type: enum
+  name: CWLVersion
+  doc: "Version symbols for published CWL document versions."
+  symbols:
+    - cwl:draft-2
+    - cwl:draft-3.dev1
+    - cwl:draft-3.dev2
+    - cwl:draft-3.dev3
+    - cwl:draft-3.dev4
+    - cwl:draft-3.dev5
+    - cwl:draft-3
+    - cwl:draft-4.dev1
+    - cwl:draft-4.dev2
+    - cwl:draft-4.dev3
+    - cwl:v1.0.dev4
+    - cwl:v1.0
+    - cwl:v1.1.0-dev1 # a dash is required by the semver 2.0 rules
+
+- name: CWLType
+  type: enum
+  extends: "sld:PrimitiveType"
+  symbols:
+    - cwl:File
+    - cwl:Directory
+  doc:
+    - "Extends primitive types with the concept of a file and directory as a builtin type."
+    - "File: A File object"
+    - "Directory: A Directory object"
+
+- name: File
+  type: record
+  docParent: "#CWLType"
+  doc: |
+    Represents a file (or group of files if `secondaryFiles` is specified) that
+    must be accessible by tools using standard POSIX file system call API such as
+    open(2) and read(2).
+  fields:
+    - name: class
+      type:
+        type: enum
+        name: File_class
+        symbols:
+          - cwl:File
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+      doc: Must be `File` to indicate this object describes a file.
+    - name: location
+      type: string?
+      doc: |
+        An IRI that identifies the file resource.  This may be a relative
+        reference, in which case it must be resolved using the base IRI of the
+        document.  The location may refer to a local or remote resource; the
+        implementation must use the IRI to retrieve file content.  If an
+        implementation is unable to retrieve the file content stored at a
+        remote resource (due to unsupported protocol, access denied, or other
+        issue) it must signal an error.
+
+        If the `location` field is not provided, the `contents` field must be
+        provided.  The implementation must assign a unique identifier for
+        the `location` field.
+
+        If the `path` field is provided but the `location` field is not, an
+        implementation may assign the value of the `path` field to `location`,
+        then follow the rules above.
+      jsonldPredicate:
+        _id: "@id"
+        _type: "@id"
+    - name: path
+      type: string?
+      doc: |
+        The local host path where the File is available when a CommandLineTool is
+        executed.  This field must be set by the implementation.  The final
+        path component must match the value of `basename`.  This field
+        must not be used in any other context.  The command line tool being
+        executed must be able to to access the file at `path` using the POSIX
+        `open(2)` syscall.
+
+        As a special case, if the `path` field is provided but the `location`
+        field is not, an implementation may assign the value of the `path`
+        field to `location`, and remove the `path` field.
+
+        If the `path` contains [POSIX shell metacharacters](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)
+        (`|`,`&`, `;`, `<`, `>`, `(`,`)`, `$`,`` ` ``, `\`, `"`, `'`,
+        `<space>`, `<tab>`, and `<newline>`) or characters
+        [not allowed](http://www.iana.org/assignments/idna-tables-6.3.0/idna-tables-6.3.0.xhtml)
+        for [Internationalized Domain Names for Applications](https://tools.ietf.org/html/rfc6452)
+        then implementations may terminate the process with a
+        `permanentFailure`.
+      jsonldPredicate:
+        "_id": "cwl:path"
+        "_type": "@id"
+    - name: basename
+      type: string?
+      doc: |
+        The base name of the file, that is, the name of the file without any
+        leading directory path.  The base name must not contain a slash `/`.
+
+        If not provided, the implementation must set this field based on the
+        `location` field by taking the final path component after parsing
+        `location` as an IRI.  If `basename` is provided, it is not required to
+        match the value from `location`.
+
+        When this file is made available to a CommandLineTool, it must be named
+        with `basename`, i.e. the final component of the `path` field must match
+        `basename`.
+      jsonldPredicate: "cwl:basename"
+    - name: dirname
+      type: string?
+      doc: |
+        The name of the directory containing file, that is, the path leading up
+        to the final slash in the path such that `dirname + '/' + basename ==
+        path`.
+
+        The implementation must set this field based on the value of `path`
+        prior to evaluating parameter references or expressions in a
+        CommandLineTool document.  This field must not be used in any other
+        context.
+    - name: nameroot
+      type: string?
+      doc: |
+        The basename root such that `nameroot + nameext == basename`, and
+        `nameext` is empty or begins with a period and contains at most one
+        period.  For the purposess of path splitting leading periods on the
+        basename are ignored; a basename of `.cshrc` will have a nameroot of
+        `.cshrc`.
+
+        The implementation must set this field automatically based on the value
+        of `basename` prior to evaluating parameter references or expressions.
+    - name: nameext
+      type: string?
+      doc: |
+        The basename extension such that `nameroot + nameext == basename`, and
+        `nameext` is empty or begins with a period and contains at most one
+        period.  Leading periods on the basename are ignored; a basename of
+        `.cshrc` will have an empty `nameext`.
+
+        The implementation must set this field automatically based on the value
+        of `basename` prior to evaluating parameter references or expressions.
+    - name: checksum
+      type: string?
+      doc: |
+        Optional hash code for validating file integrity.  Currently must be in the form
+        "sha1$ + hexadecimal string" using the SHA-1 algorithm.
+    - name: size
+      type: long?
+      doc: Optional file size
+    - name: "secondaryFiles"
+      type:
+        - "null"
+        - type: array
+          items: [File, Directory]
+      jsonldPredicate: "cwl:secondaryFiles"
+      doc: |
+        A list of additional files that are associated with the primary file
+        and must be transferred alongside the primary file.  Examples include
+        indexes of the primary file, or external references which must be
+        included when loading primary document.  A file object listed in
+        `secondaryFiles` may itself include `secondaryFiles` for which the same
+        rules apply.
+    - name: format
+      type: string?
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        The format of the file: this must be an IRI of a concept node that
+        represents the file format, preferrably defined within an ontology.
+        If no ontology is available, file formats may be tested by exact match.
+
+        Reasoning about format compatability must be done by checking that an
+        input file format is the same, `owl:equivalentClass` or
+        `rdfs:subClassOf` the format required by the input parameter.
+        `owl:equivalentClass` is transitive with `rdfs:subClassOf`, e.g. if
+        `<B> owl:equivalentClass <C>` and `<B> owl:subclassOf <A>` then infer
+        `<C> owl:subclassOf <A>`.
+
+        File format ontologies may be provided in the "$schema" metadata at the
+        root of the document.  If no ontologies are specified in `$schema`, the
+        runtime may perform exact file format matches.
+    - name: contents
+      type: string?
+      doc: |
+        File contents literal.  Maximum of 64 KiB.
+
+        If neither `location` nor `path` is provided, `contents` must be
+        non-null.  The implementation must assign a unique identifier for the
+        `location` field.  When the file is staged as input to CommandLineTool,
+        the value of `contents` must be written to a file.
+
+        If `loadContents` of `inputBinding` or `outputBinding` is true and
+        `location` is valid, the implementation must read up to the first 64
+        KiB of text from the file and place it in the "contents" field.
+
+
+- name: Directory
+  type: record
+  docAfter: "#File"
+  doc: |
+    Represents a directory to present to a command line tool.
+  fields:
+    - name: class
+      type:
+        type: enum
+        name: Directory_class
+        symbols:
+          - cwl:Directory
+      jsonldPredicate:
+        _id: "@type"
+        _type: "@vocab"
+      doc: Must be `Directory` to indicate this object describes a Directory.
+    - name: location
+      type: string?
+      doc: |
+        An IRI that identifies the directory resource.  This may be a relative
+        reference, in which case it must be resolved using the base IRI of the
+        document.  The location may refer to a local or remote resource.  If
+        the `listing` field is not set, the implementation must use the
+        location IRI to retrieve directory listing.  If an implementation is
+        unable to retrieve the directory listing stored at a remote resource (due to
+        unsupported protocol, access denied, or other issue) it must signal an
+        error.
+
+        If the `location` field is not provided, the `listing` field must be
+        provided.  The implementation must assign a unique identifier for
+        the `location` field.
+
+        If the `path` field is provided but the `location` field is not, an
+        implementation may assign the value of the `path` field to `location`,
+        then follow the rules above.
+      jsonldPredicate:
+        _id: "@id"
+        _type: "@id"
+    - name: path
+      type: string?
+      doc: |
+        The local path where the Directory is made available prior to executing a
+        CommandLineTool.  This must be set by the implementation.  This field
+        must not be used in any other context.  The command line tool being
+        executed must be able to to access the directory at `path` using the POSIX
+        `opendir(2)` syscall.
+
+        If the `path` contains [POSIX shell metacharacters](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)
+        (`|`,`&`, `;`, `<`, `>`, `(`,`)`, `$`,`` ` ``, `\`, `"`, `'`,
+        `<space>`, `<tab>`, and `<newline>`) or characters
+        [not allowed](http://www.iana.org/assignments/idna-tables-6.3.0/idna-tables-6.3.0.xhtml)
+        for [Internationalized Domain Names for Applications](https://tools.ietf.org/html/rfc6452)
+        then implementations may terminate the process with a
+        `permanentFailure`.
+      jsonldPredicate:
+        _id: "cwl:path"
+        _type: "@id"
+    - name: basename
+      type: string?
+      doc: |
+        The base name of the directory, that is, the name of the file without any
+        leading directory path.  The base name must not contain a slash `/`.
+
+        If not provided, the implementation must set this field based on the
+        `location` field by taking the final path component after parsing
+        `location` as an IRI.  If `basename` is provided, it is not required to
+        match the value from `location`.
+
+        When this file is made available to a CommandLineTool, it must be named
+        with `basename`, i.e. the final component of the `path` field must match
+        `basename`.
+      jsonldPredicate: "cwl:basename"
+    - name: listing
+      type:
+        - "null"
+        - type: array
+          items: [File, Directory]
+      doc: |
+        List of files or subdirectories contained in this directory.  The name
+        of each file or subdirectory is determined by the `basename` field of
+        each `File` or `Directory` object.  It is an error if a `File` shares a
+        `basename` with any other entry in `listing`.  If two or more
+        `Directory` object share the same `basename`, this must be treated as
+        equivalent to a single subdirectory with the listings recursively
+        merged.
+      jsonldPredicate:
+        _id: "cwl:listing"
+
+- name: SchemaBase
+  type: record
+  abstract: true
+  fields:
+    - name: label
+      type:
+        - "null"
+        - string
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this object."
+
+
+- name: Parameter
+  type: record
+  extends: SchemaBase
+  abstract: true
+  doc: |
+    Define an input or output parameter to a process.
+
+  fields:
+    - name: secondaryFiles
+      type:
+        - "null"
+        - string
+        - Expression
+        - type: array
+          items: [string, Expression]
+      jsonldPredicate: "cwl:secondaryFiles"
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        Describes files that must be included alongside the primary file(s).
+
+        If the value is an expression, the value of `self` in the expression
+        must be the primary input or output File to which this binding applies.
+
+        If the value is a string, it specifies that the following pattern
+        should be applied to the primary file:
+
+          1. If string begins with one or more caret `^` characters, for each
+            caret, remove the last file extension from the path (the last
+            period `.` and all following characters).  If there are no file
+            extensions, the path is unchanged.
+          2. Append the remainder of the string to the end of the file path.
+
+    - name: format
+      type:
+        - "null"
+        - string
+        - type: array
+          items: string
+        - Expression
+      jsonldPredicate:
+        _id: cwl:format
+        _type: "@id"
+        identity: true
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        For input parameters, this must be one or more IRIs of concept nodes
+        that represents file formats which are allowed as input to this
+        parameter, preferrably defined within an ontology.  If no ontology is
+        available, file formats may be tested by exact match.
+
+        For output parameters, this is the file format that will be assigned to
+        the output parameter.
+
+    - name: streamable
+      type: boolean?
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        A value of `true` indicates that the file is read or written
+        sequentially without seeking.  An implementation may use this flag to
+        indicate whether it is valid to stream file contents using a named
+        pipe.  Default: `false`.
+
+    - name: doc
+      type:
+        - string?
+        - string[]?
+      doc: "A documentation string for this type, or an array of strings which should be concatenated."
+      jsonldPredicate: "rdfs:comment"
+
+
+- type: enum
+  name: Expression
+  doc: |
+    'Expression' is not a real type.  It indicates that a field must allow
+    runtime parameter references.  If [InlineJavascriptRequirement](#InlineJavascriptRequirement)
+    is declared and supported by the platform, the field must also allow
+    Javascript expressions.
+  symbols:
+    - cwl:ExpressionPlaceholder
+
+
+- name: InputBinding
+  type: record
+  abstract: true
+  fields:
+    - name: loadContents
+      type:
+        - "null"
+        - boolean
+      jsonldPredicate: "cwl:loadContents"
+      doc: |
+        Only valid when `type: File` or is an array of `items: File`.
+
+        Read up to the first 64 KiB of text from the file and place it in the
+        "contents" field of the file object for use by expressions.
+
+
+- name: OutputBinding
+  type: record
+  abstract: true
+
+
+- name: InputSchema
+  extends: SchemaBase
+  type: record
+  abstract: true
+
+
+- name: OutputSchema
+  extends: SchemaBase
+  type: record
+  abstract: true
+
+
+- name: InputRecordField
+  type: record
+  extends: "sld:RecordField"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: InputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: InputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: InputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+    - name: label
+      type: string?
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+
+
+- name: InputRecordSchema
+  type: record
+  extends: ["sld:RecordSchema", InputSchema]
+  specialize:
+    - specializeFrom: "sld:RecordField"
+      specializeTo: InputRecordField
+
+
+- name: InputEnumSchema
+  type: record
+  extends: ["sld:EnumSchema", InputSchema]
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+
+
+- name: InputArraySchema
+  type: record
+  extends: ["sld:ArraySchema", InputSchema]
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: InputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: InputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: InputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+
+
+- name: OutputRecordField
+  type: record
+  extends: "sld:RecordField"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: OutputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: OutputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: OutputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+
+- name: OutputRecordSchema
+  type: record
+  extends: ["sld:RecordSchema", "#OutputSchema"]
+  docParent: "#OutputParameter"
+  specialize:
+    - specializeFrom: "sld:RecordField"
+      specializeTo: OutputRecordField
+
+
+- name: OutputEnumSchema
+  type: record
+  extends: ["sld:EnumSchema", OutputSchema]
+  docParent: "#OutputParameter"
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+- name: OutputArraySchema
+  type: record
+  extends: ["sld:ArraySchema", OutputSchema]
+  docParent: "#OutputParameter"
+  specialize:
+    - specializeFrom: "sld:RecordSchema"
+      specializeTo: OutputRecordSchema
+    - specializeFrom: "sld:EnumSchema"
+      specializeTo: OutputEnumSchema
+    - specializeFrom: "sld:ArraySchema"
+      specializeTo: OutputArraySchema
+    - specializeFrom: "sld:PrimitiveType"
+      specializeTo: CWLType
+  fields:
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+
+
+- name: InputParameter
+  type: record
+  extends: Parameter
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+
+    - name: inputBinding
+      type: InputBinding?
+      jsonldPredicate: "cwl:inputBinding"
+      doc: |
+        Describes how to handle the inputs of a process and convert them
+        into a concrete form for execution, such as command line parameters.
+
+    - name: default
+      type: Any?
+      jsonldPredicate: "cwl:default"
+      doc: |
+        The default value for this parameter if not provided in the input
+        object.
+
+- name: RegularInputParameter
+  type: record
+  extends: InputParameter
+  fields:
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - InputRecordSchema
+        - InputEnumSchema
+        - InputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - InputRecordSchema
+            - InputEnumSchema
+            - InputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
+
+
+- name: OutputParameter
+  type: record
+  extends: Parameter
+  fields:
+    - name: id
+      type: string
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this parameter object."
+    - name: outputBinding
+      type: OutputBinding?
+      jsonldPredicate: "cwl:outputBinding"
+      doc: |
+        Describes how to handle the outputs of a process.
+
+
+- type: record
+  name: ProcessRequirement
+  abstract: true
+  doc: |
+    A process requirement declares a prerequisite that may or must be fulfilled
+    before executing a process.  See [`Process.hints`](#process) and
+    [`Process.requirements`](#process).
+
+    Process requirements are the primary mechanism for specifying extensions to
+    the CWL core specification.
+
+
+- type: record
+  name: Process
+  abstract: true
+  doc: |
+
+    The base executable type in CWL is the `Process` object defined by the
+    document.  Note that the `Process` object is abstract and cannot be
+    directly executed.
+
+  fields:
+    - name: id
+      type: string?
+      jsonldPredicate: "@id"
+      doc: "The unique identifier for this process object."
+    - name: inputs
+      type:
+        type: array
+        items: InputParameter
+      jsonldPredicate:
+        _id: "cwl:inputs"
+        mapSubject: id
+        mapPredicate: type
+      doc: |
+        Defines the input parameters of the process.  The process is ready to
+        run when all required input parameters are associated with concrete
+        values.  Input parameters include a schema for each parameter which is
+        used to validate the input object.  It may also be used to build a user
+        interface for constructing the input object.
+    - name: outputs
+      type:
+        type: array
+        items: OutputParameter
+      jsonldPredicate:
+        _id: "cwl:outputs"
+        mapSubject: id
+        mapPredicate: type
+      doc: |
+        Defines the parameters representing the output of the process.  May be
+        used to generate and/or validate the output object.
+    - name: requirements
+      type: ProcessRequirement[]?
+      jsonldPredicate:
+        _id: "cwl:requirements"
+        mapSubject: class
+      doc: |
+        Declares requirements that apply to either the runtime environment or the
+        workflow engine that must be met in order to execute this process.  If
+        an implementation cannot satisfy all requirements, or a requirement is
+        listed which is not recognized by the implementation, it is a fatal
+        error and the implementation must not attempt to run the process,
+        unless overridden at user option.
+    - name: hints
+      type: Any[]?
+      doc: |
+        Declares hints applying to either the runtime environment or the
+        workflow engine that may be helpful in executing this process.  It is
+        not an error if an implementation cannot satisfy all hints, however
+        the implementation may report a warning.
+      jsonldPredicate:
+        _id: cwl:hints
+        noLinkCheck: true
+        mapSubject: class
+    - name: label
+      type: string?
+      jsonldPredicate: "rdfs:label"
+      doc: "A short, human-readable label of this process object."
+    - name: doc
+      type: string?
+      jsonldPredicate: "rdfs:comment"
+      doc: "A long, human-readable description of this process object."
+    - name: cwlVersion
+      type: CWLVersion?
+      doc: |
+        CWL document version. Always required at the document root. Not
+        required for a Process embedded inside another Process.
+      jsonldPredicate:
+        "_id": "cwl:cwlVersion"
+        "_type": "@vocab"
+
+- name: InlineJavascriptRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    Indicates that the workflow platform must support inline Javascript expressions.
+    If this requirement is not present, the workflow platform must not perform expression
+    interpolatation.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'InlineJavascriptRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: expressionLib
+      type: string[]?
+      doc: |
+        Additional code fragments that will also be inserted
+        before executing the expression code.  Allows for function definitions that may
+        be called from CWL expressions.
+
+
+- name: SchemaDefRequirement
+  type: record
+  extends: ProcessRequirement
+  doc: |
+    This field consists of an array of type definitions which must be used when
+    interpreting the `inputs` and `outputs` fields.  When a `type` field
+    contain a IRI, the implementation must check if the type is defined in
+    `schemaDefs` and use that definition.  If the type is not found in
+    `schemaDefs`, it is an error.  The entries in `schemaDefs` must be
+    processed in the order listed such that later schema definitions may refer
+    to earlier schema definitions.
+  fields:
+    - name: class
+      type: string
+      doc: "Always 'SchemaDefRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
+    - name: types
+      type:
+        type: array
+        items: InputSchema
+      doc: The list of type definitions.
diff --git a/cwltool/schemas/draft-3/README.md b/cwltool/schemas/v1.1.0-dev1/README.md
similarity index 92%
copy from cwltool/schemas/draft-3/README.md
copy to cwltool/schemas/v1.1.0-dev1/README.md
index 142b728..2c5f66e 100644
--- a/cwltool/schemas/draft-3/README.md
+++ b/cwltool/schemas/v1.1.0-dev1/README.md
@@ -1,11 +1,9 @@
-# Common Workflow Language Specifications, draft-3
+# Common Workflow Language Specifications, v1.1.0-dev1
 
 The CWL specifications are divided up into several documents.
 
-<!--
 The [User Guide](UserGuide.html) provides a gentle introduction to writing CWL
 command line tools and workflows.
--->
 
 The [Command Line Tool Description Specification](CommandLineTool.html)
 specifies the document schema and execution semantics for wrapping and
diff --git a/cwltool/schemas/v1.1.0-dev1/UserGuide.yml b/cwltool/schemas/v1.1.0-dev1/UserGuide.yml
new file mode 100644
index 0000000..7464009
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/UserGuide.yml
@@ -0,0 +1,869 @@
+- name: userguide
+  type: documentation
+  doc:
+    - $include: userguide-intro.md
+
+    - |
+      # Wrapping Command Line Tools
+
+    - |
+      ## First example
+
+      The simplest "hello world" program.  This accepts one input parameter,
+      writes a message to the terminal or job log, and produces no permanent
+      output.  CWL documents are written in [JSON](http://json.org) or
+      [YAML](http://yaml.org), or a mix of the two.
+
+      *1st-tool.cwl*
+      ```
+    - $include: examples/1st-tool.cwl
+    - |
+      ```
+
+      Use a YAML object in a separate file to describe the input of a run:
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner 1st-tool.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!'
+      Hello world!
+      Final process status is success
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: v1.1.0-dev1
+      class: CommandLineTool
+      ```
+
+      The `cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a command
+      line tool.
+
+      ```
+      baseCommand: echo
+      ```
+
+      The `baseCommand` provides the name of program that will actually run
+      (echo)
+
+      ```
+      inputs:
+        message:
+          type: string
+          inputBinding:
+            position: 1
+      ```
+
+      The `inputs` section describes the inputs of the tool.  This is a list of input
+      parameters and each parameter includes an identifier, a data type, and
+      optionally an `inputBinding` which describes how this input parameter
+      should appear on the command line.  In this example, the `position` field
+      indicates where it should appear on the command line.
+
+      ```
+      outputs: []
+      ```
+
+      This tool has no formal output, so the `outputs` section is an empty list.
+
+    - |
+      ## Essential input parameters
+
+      The `inputs` of a tool is a list of input parameters that control how to
+      run the tool.  Each parameter has an `id` for the name of parameter, and
+      `type` describing what types of values are valid for that parameter.
+
+      Available primitive types are *string*, *int*, *long*, *float*, *double*,
+      and *null*; complex types are *array* and *record*; in addition there are
+      special types *File*, *Directory* and *Any*.
+
+      The following example demonstrates some input parameters with different
+      types and appearing on the command line in different ways:
+
+
+      *inp.cwl*
+      ```
+    - $include: examples/inp.cwl
+    - |
+      ```
+
+      *inp-job.yml*
+      ```
+    - $include: examples/inp-job.yml
+    - |
+      ```
+
+      Notice that "example_file", as a `File` type, must be provided as an
+      object with the fields `class: File` and `path`.
+
+      Next, create a whale.txt and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ touch whale.txt
+      $ cwl-runner inp.cwl inp-job.yml
+      [job 140020149614160] /home/example$ echo -f -i42 --example-string hello --file=/home/example/whale.txt
+      -f -i42 --example-string hello --file=/home/example/whale.txt
+      Final process status is success
+      ```
+
+      The field `inputBinding` is optional and indicates whether and how the
+      input parameter should be appear on the tool's command line.  If
+      `inputBinding` is missing, the parameter does not appear on the command
+      line.  Let's look at each example in detail.
+
+      ```
+      example_flag:
+        type: boolean
+        inputBinding:
+          position: 1
+          prefix: -f
+      ```
+
+      Boolean types are treated as a flag.  If the input parameter
+      "example_flag" is "true", then `prefix` will be added to the
+      command line.  If false, no flag is added.
+
+      ```
+      example_string:
+        type: string
+        inputBinding:
+          position: 3
+          prefix: --example-string
+      ```
+
+      String types appear on the command line as literal values.  The `prefix`
+      is optional, if provided, it appears as a separate argument on the
+      command line before the parameter .  In the example above, this is
+      rendered as `--example-string hello`.
+
+      ```
+      example_int:
+        type: int
+        inputBinding:
+          position: 2
+          prefix: -i
+          separate: false
+      ```
+
+      Integer (and floating point) types appear on the command line with
+      decimal text representation.  When the option `separate` is false (the
+      default value is true), the prefix and value are combined into a single
+      argument.  In the example above, this is rendered as `-i42`.
+
+
+      ```
+      example_file:
+        type: File?
+        inputBinding:
+          prefix: --file=
+          separate: false
+          position: 4
+      ```
+
+      File types appear on the command line as the path to the file.  When the
+      parameter type ends with a question mark `?` it indicates that the
+      parameter is optional.  In the example above, this is rendered as
+      `--file=/home/example/whale.txt`.  However, if the "example_file"
+      parameter were not provided in the input, nothing would appear on the
+      command line.
+
+      Input files are read-only.  If you wish to update an input file, you must
+      first copy it to the output directory.
+
+      The value of `position` is used to determine where parameter should
+      appear on the command line.  Positions are relative to one another, not
+      abosolute.  As a result, positions do not have to be sequential, three
+      parameters with positions `[1, 3, 5]` will result in the same command
+      line as `[1, 2, 3]`.  More than one parameter can have the same position
+      (ties are broken using the parameter name), and the position field itself
+      is optional.  the default position is 0.
+
+      The `baseCommand` field always comes before parameters.
+
+    - |
+      ## Returning output files
+
+      The `outputs` of a tool is a list of output parameters that should be
+      returned after running the tool.  Each parameter has an `id` for the name
+      of parameter, and `type` describing what types of values are valid for
+      that parameter.
+
+      When a tool runs under CWL, the starting working directory is the
+      designated output directory.  The underlying tool or script must record
+      its results in the form of files created in the output directory.  The
+      output parameters returned by the CWL tool are either the output files
+      themselves, or come from examining the content of those files.
+
+      *tar.cwl*
+      ```
+    - $include: examples/tar.cwl
+    - |
+      ```
+
+      *tar-job.yml*
+      ```
+    - $include: examples/tar-job.yml
+    - |
+      ```
+
+      Next, create a tar file for the example and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ touch hello.txt && tar -cvf hello.tar hello.txt
+      $ cwl-runner tar.cwl tar-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar
+      Final process status is success
+      {
+      "example_out": {
+        "location": "hello.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      ```
+
+      The field `outputBinding` describes how to to set the value of each
+      output parameter.
+
+      ```
+      outputs:
+        - id: example_out
+          type: File
+          outputBinding:
+            glob: hello.txt
+      ```
+
+      The `glob` field consists of the name of a file in the output directory.
+      If you don't know name of the file in advance, you can use a wildcard
+      pattern.
+
+    - |
+      ## Capturing a tool's standard output stream
+
+      To capture a tool's standard output stream, add the `stdout` field with
+      the name of the file where the output stream should go.  Then add `type:
+      stdout` on the corresponding output parameter.
+
+      *stdout.cwl*
+      ```
+    - $include: examples/stdout.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner stdout.cwl echo-job.yml
+      [job 140199012414352] $ echo 'Hello world!' > output.txt
+      Final process status is success
+      {
+      "output": {
+        "location": "output.txt",
+        "size": 13,
+        "class": "File",
+        "checksum": "sha1$47a013e660d408619d894b20806b1d5086aab03b"
+        }
+      }
+      $ cat output.txt
+      Hello world!
+      ```
+
+    - |
+      ## Parameter references
+
+      In a previous example, we used extracted a file using the "tar" program.
+      However, that example was very limited becuase it assumed that the file
+      we were interested in was called "hello.txt".  In this example, you will
+      see how to reference the value of input parameters dynamically from other
+      fields.
+
+      *tar-param.cwl*
+      ```
+    - $include: examples/tar-param.cwl
+    - |
+      ```
+
+      *tar-param-job.yml*
+      ```
+    - $include: examples/tar-param-job.yml
+    - |
+      ```
+
+      Create your input files and invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+      ```
+      $ rm hello.tar || true && touch goodbye.txt && tar -cvf hello.tar goodbye.txt
+      $ cwl-runner tar-param.cwl tar-param-job.yml
+      [job 139868145165200] $ tar xf /home/example/hello.tar goodbye.txt
+      Final process status is success
+      {
+      "example_out": {
+        "location": "goodbye.txt",
+        "size": 24,
+        "class": "File",
+        "checksum": "sha1$dd0a4c4c49ba43004d6611771972b6cf969c1c01"
+        }
+      }
+      ```
+
+      Certain fields permit parameter references which are enclosed in `$(...)`.
+      These are evaluated and replaced with value being referenced.
+
+      ```
+      outputs:
+        example_out:
+          type: File
+          outputBinding:
+            glob: $(inputs.extractfile)
+      ```
+
+      References are written using a subset of Javascript syntax.  In this
+      example, `$(inputs.extractfile)`, `$(inputs["extractfile"])`, and
+      `$(inputs['extractfile'])` are equivalent.
+
+      The value of the "inputs" variable is the input object provided when the
+      CWL tool was invoked.
+
+      Note that because File parameters are objects, to get the path to an
+      input file you must reference the path field on a file object; to
+      reference the path to the tar file in the above example you would write
+      `$(inputs.tarfile.path)`.
+
+    - |
+      ## Running tools inside Docker
+
+      [Docker](http://docker.io) containers simplify software installation by providing a complete
+      known-good runtime for software and its dependencies.  However,
+      containers are also purposefully isolated from the host system, so in
+      order to run a tool inside a Docker container there is additional work to
+      ensure that input files are available inside the container and output
+      files can be recovered from the contianer.  CWL can perform this work
+      automatically, allowing you to use Docker to simplify your software
+      management while avoiding the complexity of invoking and managing Docker
+      containers.
+
+      This example runs a simple Node.js script inside a Docker container.
+
+      *docker.cwl*
+      ```
+    - $include: examples/docker.cwl
+    - |
+      ```
+
+      *docker-job.yml*
+      ```
+    - $include: examples/docker-job.yml
+    - |
+      ```
+
+      Provide a hello.js and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "console.log(\"Hello World\");" > hello.js
+      $ cwl-runner docker.cwl docker-job.yml
+      [job 140259721854416] /home/example$ docker run -i --volume=/home/example/hello.js:/var/lib/cwl/job369354770_examples/hello.js:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpDLs5hm:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp node:slim node /var/lib/cwl/job369354770_examples/hello.js
+      Hello world!
+      Final process status is success
+      ```
+
+      Notice the CWL runner has constructed a Docker command line to run the
+      script.  One of the responsibilies of the CWL runner is to the paths of
+      input files to reflect the location where they appear inside the
+      container.  In this example, the path to the script `hello.js` is
+      `/home/example/hello.js` outside the container but
+      `/var/lib/cwl/job369354770_examples/hello.js` inside the container, as
+      reflected in the invocation of the `node` command.
+
+    - |
+      ## Additional command line arguments and runtime parameters
+
+      Sometimes tools require additional command line options that don't
+      correspond exactly to input parameters.
+
+      In this example, we will wrap the Java compiler to compile a java source
+      file to a class file.  By default, `javac` will create the class files in
+      the same directory as the source file.  However, CWL input files (and the
+      directories in which they appear) may be read-only, so we need to
+      instruct javac to write the class file to the designated output directory
+      instead.
+
+      *arguments.cwl*
+      ```
+    - $include: examples/arguments.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now create a sample Java file and invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ echo "public class Hello {}" > Hello.java
+      $ cwl-runner arguments.cwl arguments-job.yml
+      [job arguments.cwl] /tmp/tmpwYALo1$ docker \
+       run \
+       -i \
+       --volume=/home/peter/work/common-workflow-language/v1.1.0-dev1/examples/Hello.java:/var/lib/cwl/stg8939ac04-7443-4990-a518-1855b2322141/Hello.java:ro \
+       --volume=/tmp/tmpwYALo1:/var/spool/cwl:rw \
+       --volume=/tmp/tmpptIAJ8:/tmp:rw \
+       --workdir=/var/spool/cwl \
+       --read-only=true \
+       --user=1001 \
+       --rm \
+       --env=TMPDIR=/tmp \
+       --env=HOME=/var/spool/cwl \
+       java:7 \
+       javac \
+       -d \
+       /var/spool/cwl \
+       /var/lib/cwl/stg8939ac04-7443-4990-a518-1855b2322141/Hello.java
+      Final process status is success
+      {
+        "classfile": {
+          "size": 416,
+          "location": "/home/example/Hello.class",
+          "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+          "class": "File"
+        }
+      }
+
+      ```
+
+      Here we use the `arguments` field to add an additional argument to the
+      command line that isn't tied to a specific input parameter.
+
+      ```
+      arguments: ["-d", $(runtime.outdir)]
+      ```
+
+      This example references a runtime parameter.  Runtime parameters
+      provide information about the hardware or software environment when the
+      tool is actually executed.  The `$(runtime.outdir)` parameter is the path
+      to the designated output directory.  Other parameters include
+      `$(runtime.tmpdir)`, `$(runtime.ram)`, `$(runtime.cores)`,
+      `$(runtime.ram)`, `$(runtime.outdirSize)`, and `$(runtime.tmpdirSize)`.  See
+      the [Runtime Environment](CommandLineTool.html#Runtime_environment)
+      section of the CWL specification for details.
+
+    - |
+      ## Array inputs
+
+      It is easy to add arrays of input parameters represented to the command
+      line.  To specify an array parameter, the array definition is nested
+      under the `type` field with `type: array` and `items` defining the valid
+      data types that may appear in the array.
+
+      *array-inputs.cwl*
+      ```
+    - $include: examples/array-inputs.cwl
+    - |
+      ```
+
+      *array-inputs-job.yml*
+      ```
+    - $include: examples/array-inputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-inputs.cwl array-inputs-job.yml
+      [job 140334923640912] /home/example$ echo -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      -A one two three -B=four -B=five -B=six -C=seven,eight,nine
+      Final process status is success
+      {}
+      ```
+
+      The `inputBinding` can appear either on the outer array parameter
+      definition or the inner array element definition, and these produce
+      different behavior when constructing the command line, as shown above.
+      In addition, the `itemSeperator` field, if provided, specifies that array
+      values should be concatenated into a single argument separated by the
+      item separator string.
+
+      You can specify arrays of arrays, arrays of records, and other complex
+      types.
+
+    - |
+      ## Array outputs
+
+      You can also capture multiple output files into an array of files using `glob`.
+
+      *array-outputs.cwl*
+      ```
+    - $include: examples/array-outputs.cwl
+    - |
+      ```
+
+      *array-outpust-job.yml*
+      ```
+    - $include: examples/array-outputs-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` providing the tool wrapper and the input object
+      on the command line:
+
+      ```
+      $ cwl-runner array-outputs.cwl array-outputs-job.yml
+      [job 140190876078160] /home/example$ touch foo.txt bar.dat baz.txt
+      Final process status is success
+      {
+        "output": [
+          {
+            "size": 0,
+            "location": "/home/peter/work/common-workflow-language/draft-3/examples/foo.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          },
+          {
+            "size": 0,
+            "location": "/home/peter/work/common-workflow-language/draft-3/examples/baz.txt",
+            "checksum": "sha1$da39a3ee5e6b4b0d3255bfef95601890afd80709",
+            "class": "File"
+          }
+        ]
+      }
+      ```
+
+    - |
+      ## Record inputs, dependent and mutually exclusive parameters
+
+      Sometimes an underlying tool has several arguments that must be provided
+      together (they are dependent) or several arguments that cannot be
+      provided together (they are exclusive).  You can use records and type
+      unions to group parameters together to describe these two conditions.
+
+      *record.cwl*
+      ```
+    - $include: examples/record.cwl
+    - |
+      ```
+
+      *record-job1.yml*
+      ```
+    - $include: examples/record-job1.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job1.yml
+      Workflow error:
+        Error validating input record, could not validate field `dependent_parameters` because
+        missing required field `itemB`
+      ```
+
+      In the first example, you can't provide `itemA` without also providing `itemB`.
+
+      *record-job2.yml*
+      ```
+    - $include: examples/record-job2.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job2.yml
+      [job 140566927111376] /home/example$ echo -A one -B two -C three
+      -A one -B two -C three
+      Final process status is success
+      {}
+      ```
+
+      In the second example, `itemC` and `itemD` are exclusive, so only `itemC`
+      is added to the command line and `itemD` is ignored.
+
+      *record-job3.yml*
+      ```
+    - $include: examples/record-job3.yml
+    - |
+      ```
+
+      ```
+      $ cwl-runner record.cwl record-job3.yml
+      [job 140606932172880] /home/example$ echo -A one -B two -D four
+      -A one -B two -D four
+      Final process status is success
+      {}
+      ```
+
+      In the third example, only `itemD` is provided, so it appears on the
+      command line.
+
+    - |
+      ## Environment variables
+
+      Tools run in a restricted environment and do not inherit most environment
+      variables from the parent process.  You can set environment variables for
+      the tool using `EnvVarRequirement`.
+
+      *env.cwl*
+      ```
+    - $include: examples/env.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner env.cwl echo-job.yml
+      [job 140710387785808] /home/example$ env
+      PATH=/bin:/usr/bin:/usr/local/bin
+      HELLO=Hello world!
+      TMPDIR=/tmp/tmp63Obpk
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Javascript expressions
+
+      If you need to manipulate input parameters, include the requirement
+      `InlineJavascriptRequirement` and then anywhere a parameter reference is
+      legal you can provide a fragment of Javascript that will be evaluated by
+      the CWL runner.
+
+      *expression.cwl*
+      ```
+    - $include: examples/expression.cwl
+    - |
+      ```
+
+      ```
+      $ cwl-runner expression.cwl empty.yml
+      [job 140000594593168] /home/example$ echo -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      -A 2 -B baz -C 10 9 8 7 6 5 4 3 2 1
+      Final process status is success
+      {}
+      ```
+
+      You can only use expressions in certain fields.  These are: `filename`,
+      `fileContent`, `envValue`, `valueFrom`, `glob`, `outputEval`, `stdin`,
+      `stdout`, `coresMin`, `coresMax`, `ramMin`, `ramMax`, `tmpdirMin`,
+      `tmpdirMax`, `outdirMin`, and `outdirMax`.
+
+    - |
+      ## Creating files at runtime
+
+      Sometimes you need to create a file on the fly from input parameters,
+      such as tools which expect to read their input configuration from a file
+      rather than the command line parameters.  To do this, use
+      `InitialWorkDirRequirement`.
+
+      *createfile.cwl*
+      ```
+    - $include: examples/createfile.cwl
+    - |
+      ```
+
+      *echo-job.yml*
+      ```
+    - $include: examples/echo-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwltool createfile.cwl echo-job.yml
+      [job 140528604979344] /home/example$ cat example.conf
+      CONFIGVAR=Hello world!
+      Final process status is success
+      {}
+      ```
+
+    - |
+      ## Staging input files in the output directory
+
+      Normally, input files are located in a read-only directory separate from
+      the output directory.  This causes problems if the underlying tool
+      expects to write its output files alongside the input file in the same
+      directory.  You use `InitialWorkDirRequirement` to stage input files into the
+      output directory.  In this example, we use a Javascript expression to
+      extract the base name of the input file from its leading directory path.
+
+      *linkfile.cwl*
+      ```
+    - $include: examples/linkfile.cwl
+    - |
+      ```
+
+      *arguments-job.yml*
+      ```
+    - $include: examples/arguments-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ cwl-runner linkfile.cwl arguments-job.yml
+      [job 139928309171664] /home/example$ docker run -i --volume=/home/example/Hello.java:/var/lib/cwl/job557617295_examples/Hello.java:ro --volume=/home/example:/var/spool/cwl:rw --volume=/tmp/tmpmNbApw:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac Hello.java
+      Final process status is success
+      {
+      "classfile": {
+        "size": 416,
+        "location": "/home/example/Hello.class",
+        "checksum": "sha1$2f7ac33c1f3aac3f1fec7b936b6562422c85b38a",
+        "class": "File"
+        }
+      }
+      ```
+
+    - |
+      # Writing Workflows
+
+      ## First workflow
+
+      This workflow extracts a java source file from a tar file and then
+      compiles it.
+
+      *1st-workflow.cwl*
+      ```
+    - $include: examples/1st-workflow.cwl
+    - |
+      ```
+
+      Use a JSON object in a separate file to describe the input of a run:
+
+      *1st-workflow-job.yml*
+      ```
+    - $include: examples/1st-workflow-job.yml
+    - |
+      ```
+
+      Now invoke `cwl-runner` with the tool wrapper and the input object on the
+      command line:
+
+      ```
+      $ echo "public class Hello {}" > Hello.java && tar -cvf hello.tar Hello.java
+      $ cwl-runner 1st-workflow.cwl 1st-workflow-job.yml
+      [job untar] /tmp/tmp94qFiM$ tar xf /home/example/hello.tar Hello.java
+      [step untar] completion status is success
+      [job compile] /tmp/tmpu1iaKL$ docker run -i --volume=/tmp/tmp94qFiM/Hello.java:/var/lib/cwl/job301600808_tmp94qFiM/Hello.java:ro --volume=/tmp/tmpu1iaKL:/var/spool/cwl:rw --volume=/tmp/tmpfZnNdR:/tmp:rw --workdir=/var/spool/cwl --read-only=true --net=none --user=1001 --rm --env=TMPDIR=/tmp java:7 javac -d /var/spool/cwl /var/lib/cwl/job301600808_tmp94qFiM/Hello.java
+      [step compile] completion status is success
+      [workflow 1st-workflow.cwl] outdir is /home/example
+      Final process status is success
+      {
+        "classout": {
+          "location": "/home/example/Hello.class",
+          "checksum": "sha1$e68df795c0686e9aa1a1195536bd900f5f417b18",
+          "class": "File",
+          "size": 416
+        }
+      }
+      ```
+
+      What's going on here?  Let's break it down:
+
+      ```
+      cwlVersion: v1.1.0-dev1
+      class: Workflow
+      ```
+
+      The 'cwlVersion` field indicates the version of the CWL spec used by the
+      document.  The `class` field indicates this document describes a workflow.
+
+
+      ```
+      inputs:
+        inp: File
+        ex: string
+      ```
+
+      The `inputs` section describes the inputs of the workflow.  This is a
+      list of input parameters where each parameter consists of an identifier
+      and a data type.  These parameters can be used as sources for input to
+      specific workflows steps.
+
+      ```
+      outputs:
+        classout:
+          type: File
+          outputSource: compile/classfile
+      ```
+
+      The `outputs` section describes the outputs of the workflow.  This is a
+      list of output parameters where each parameter consists of an identifier
+      and a data type.  The `source` connects the output parameter `classfile`
+      of the `compile` step to the workflow output parameter `classout`.
+
+      ```
+      steps:
+        untar:
+          run: tar-param.cwl
+          in:
+            tarfile: inp
+            extractfile: ex
+          outputs: [example_out]
+      ```
+
+      The `steps` section describes the actual steps of the workflow.  In this
+      example, the first step extracts a file from a tar file, and the second
+      step compiles the file from the first step using the java compiler.
+      Workflow steps are not necessarily run in the order they are listed,
+      instead the order is determined by the dependencies between steps (using
+      `source`).  In addition, workflow steps which do not depend on one
+      another may run in parallel.
+
+      The first step, `untar` runs `tar-param.cwl` (described previously in
+      [Parameter references](#Parameter_references)).  This tool has two input
+      parameters, `tarfile` and `extractfile` and one output parameter
+      `example_out`.
+
+      The `inputs` section of the workflow step connects these two input
+      parameters to the inputs of the workflow, `inp` and `ex` using
+      `source`.  This means that when the workflow step is executed, the values
+      assigned to `inp` and `ex` will be used for the parameters `tarfile`
+      and `extractfile` in order to run the tool.
+
+      The `outputs` section of the workflow step lists the output parameters
+      that are expected from the tool.
+
+      ```
+        compile:
+          run: arguments.cwl
+          in:
+            src: untar/example_out
+          outputs: [classfile]
+      ```
+
+      The second step `compile` depends on the results from the first step by
+      connecting the input parameter `src` to the output parameter of `untar`
+      using `untar/example_out`.  The output of this step `classfile` is
+      connected to the `outputs` section for the Workflow, described above.
diff --git a/cwltool/schemas/draft-3/Workflow.yml b/cwltool/schemas/v1.1.0-dev1/Workflow.yml
similarity index 66%
copy from cwltool/schemas/draft-3/Workflow.yml
copy to cwltool/schemas/v1.1.0-dev1/Workflow.yml
index 066a66e..3f2f9c1 100644
--- a/cwltool/schemas/draft-3/Workflow.yml
+++ b/cwltool/schemas/v1.1.0-dev1/Workflow.yml
@@ -9,10 +9,10 @@ $graph:
   type: documentation
   doc:
     - |
-      # Common Workflow Language (CWL) Workflow Description, draft 3
+      # Common Workflow Language (CWL) Workflow Description, v1.1.0-dev1
 
       This version:
-        * https://w3id.org/cwl/draft-3/
+        * https://w3id.org/cwl/v1.1.0-dev1/
 
       Current version:
         * https://w3id.org/cwl/
@@ -22,32 +22,52 @@ $graph:
     - |
       # Abstract
 
-      A Workflow is an analysis task represented by a directed graph describing
-      a sequence of operations that transform an input data set to output.
-      This specification defines the Common Workflow Language (CWL) Workflow
-      description, a vendor-neutral standard for representing workflows
-      intended to be portable across a variety of computing platforms.
+      One way to define a workflow is: an analysis task represented by a
+      directed graph describing a sequence of operations that transform an
+      input data set to output. This specification defines the Common Workflow
+      Language (CWL) Workflow description, a vendor-neutral standard for
+      representing workflows intended to be portable across a variety of
+      computing platforms.
 
     - {$include: intro.md}
 
     - |
 
-      ## Introduction to draft 3
-
-      This specification represents the third milestone of the CWL group.  Since
-      draft-2, this draft introduces the following changes and additions:
-
-        * Greatly simplified naming within a document with scoped identifiers, as
-          described in the [Schema Salad specification](SchemaSalad.html).
-        * The draft-2 concept of pluggable expression engines has been replaced
-          by a [streamlined expression syntax)[#Parameter_references]
-          and standardization on [Javascript](#Expressions).
-        * [File](#File) objects can now include a `format` field to indicate
-          the file type.
-        * The addition of [MultipleInputFeatureRequirement](#MultipleInputFeatureRequirement).
-        * The addition of [StepInputExpressionRequirement](#StepInputExpressionRequirement).
-        * The separation of Workflow and CommandLineTool components into
-          separate specifications.
+      ## Introduction to v1.1.0-dev1
+
+      This is the first development release of the 1.1.0 version of the CWL
+      Workflow specification.
+
+      ## Introduction to v1.0
+
+      This specification represents the first full release from the CWL group.
+      Since draft-3, this draft introduces the following changes and additions:
+
+        * The `inputs` and `outputs` fields have been renamed `in` and `out`.
+        * Syntax simplifcations: denoted by the `map<>` syntax. Example: `in`
+          contains a list of items, each with an id. Now one can specify
+          a mapping of that identifier to the corresponding
+          `InputParameter`.
+          ```
+          in:
+           - id: one
+             type: string
+             doc: First input parameter
+           - id: two
+             type: int
+             doc: Second input parameter
+          ```
+          can be
+          ```
+          in:
+           one:
+            type: string
+            doc: First input parameter
+           two:
+            type: int
+            doc: Second input parameter
+          ```
+        * The common field `description` has been renamed to `doc`.
 
       ## Purpose
 
@@ -60,21 +80,52 @@ $graph:
 
     - {$include: concepts.md}
 
+- name: ExpressionToolOutputParameter
+  type: record
+  extends: OutputParameter
+  fields:
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - OutputRecordSchema
+        - OutputEnumSchema
+        - OutputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - OutputRecordSchema
+            - OutputEnumSchema
+            - OutputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
 
 - type: record
   name: ExpressionTool
-  extends: "#Process"
+  extends: Process
+  specialize:
+    - specializeFrom: InputParameter
+      specializeTo: RegularInputParameter
+    - specializeFrom: OutputParameter
+      specializeTo: ExpressionToolOutputParameter
   documentRoot: true
   doc: |
-    Execute an expression as a process step.
+    Execute an expression as a Workflow step.
   fields:
-    - name: "class"
+    - name: class
       jsonldPredicate:
         "_id": "@type"
         "_type": "@vocab"
       type: string
     - name: expression
-      type: [string, "#Expression"]
+      type: [string, Expression]
       doc: |
         The expression to execute.  The expression must return a JSON object which
         matches the output parameters of the ExpressionTool.
@@ -90,12 +141,52 @@ $graph:
 
 - name: WorkflowOutputParameter
   type: record
-  extends: ["#OutputParameter", "#Sink"]
+  extends: OutputParameter
   docParent: "#Workflow"
   doc: |
     Describe an output parameter of a workflow.  The parameter must be
     connected to one or more parameters defined in the workflow that will
     provide the value of the output parameter.
+  fields:
+    - name: outputSource
+      doc: |
+        Specifies one or more workflow parameters that supply the value of to
+        the output parameter.
+      jsonldPredicate:
+        "_id": "cwl:outputSource"
+        "_type": "@id"
+        refScope: 0
+      type:
+        - string?
+        - string[]?
+    - name: linkMerge
+      type: ["null", LinkMergeMethod]
+      jsonldPredicate: "cwl:linkMerge"
+      doc: |
+        The method to use to merge multiple sources into a single array.
+        If not specified, the default method is "merge_nested".
+    - name: type
+      type:
+        - "null"
+        - CWLType
+        - OutputRecordSchema
+        - OutputEnumSchema
+        - OutputArraySchema
+        - string
+        - type: array
+          items:
+            - CWLType
+            - OutputRecordSchema
+            - OutputEnumSchema
+            - OutputArraySchema
+            - string
+      jsonldPredicate:
+        "_id": "sld:type"
+        "_type": "@vocab"
+        refScope: 2
+        typeDSL: True
+      doc: |
+        Specify valid types of data that may be assigned to this parameter.
 
 
 - name: Sink
@@ -105,17 +196,17 @@ $graph:
     - name: source
       doc: |
         Specifies one or more workflow parameters that will provide input to
-        the underlying process parameter.
+        the underlying step parameter.
       jsonldPredicate:
         "_id": "cwl:source"
         "_type": "@id"
+        refScope: 2
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
     - name: linkMerge
-      type: ["null", "#LinkMergeMethod"]
+      type: LinkMergeMethod?
+      jsonldPredicate: "cwl:linkMerge"
       doc: |
         The method to use to merge multiple inbound links into a single array.
         If not specified, the default method is "merge_nested".
@@ -123,12 +214,12 @@ $graph:
 
 - type: record
   name: WorkflowStepInput
-  extends: "#Sink"
+  extends: Sink
   docParent: "#WorkflowStep"
   doc: |
     The input of a workflow step connects an upstream parameter (from the
     workflow inputs, or the outputs of other workflows steps) with the input
-    parameters of the underlying process.
+    parameters of the underlying step.
 
     ## Input object
 
@@ -179,8 +270,8 @@ $graph:
     - name: valueFrom
       type:
         - "null"
-        - "string"
-        - "#Expression"
+        - string
+        - Expression
       jsonldPredicate: "cwl:valueFrom"
       doc: |
         To use valueFrom, [StepInputExpressionRequirement](#StepInputExpressionRequirement) must
@@ -196,10 +287,12 @@ $graph:
         the value of the parameter(s) specified in the `source` field, or
         null if there is no `source` field.
 
-        The value of `inputs` in the parameter reference or expression is the
-        input object to the workflow step after assigning the `source` values,
-        but before evaluating any step with `valueFrom`.  The order of
-        evaluating `valueFrom` among step input parameters is undefined.
+        The value of `inputs` in the parameter reference or expression must be
+        the input object to the workflow step after assigning the `source`
+        values and then scattering.  The order of evaluating `valueFrom` among
+        step input parameters is undefined and the result of evaluating
+        `valueFrom` on a parameter must not be visible to evaluation of
+        `valueFrom` on other parameters.
 
 
 - type: record
@@ -235,9 +328,9 @@ $graph:
   docParent: "#Workflow"
   doc: |
     A workflow step is an executable element of a workflow.  It specifies the
-    underlying process implementation (such as `CommandLineTool`) in the `run`
-    field and connects the input and output parameters of the underlying
-    process to workflow parameters.
+    underlying process implementation (such as `CommandLineTool` or another
+    `Workflow`) in the `run` field and connects the input and output parameters
+    of the underlying process to workflow parameters.
 
     # Scatter/gather
 
@@ -287,31 +380,34 @@ $graph:
       type: string
       jsonldPredicate: "@id"
       doc: "The unique identifier for this workflow step."
-    - name: inputs
-      type:
-        type: array
-        items: "#WorkflowStepInput"
-      jsonldPredicate: "cwl:inputs"
+    - name: in
+      type: WorkflowStepInput[]
+      jsonldPredicate:
+        _id: "cwl:in"
+        mapSubject: id
+        mapPredicate: source
       doc: |
         Defines the input parameters of the workflow step.  The process is ready to
         run when all required input parameters are associated with concrete
         values.  Input parameters include a schema for each parameter which is
         used to validate the input object.  It may also be used build a user
         interface for constructing the input object.
-    - name: outputs
+    - name: out
       type:
-        type: array
-        items: "#WorkflowStepOutput"
-      jsonldPredicate: "cwl:outputs"
+        - type: array
+          items: [string, WorkflowStepOutput]
+      jsonldPredicate:
+        _id: "cwl:out"
+        _type: "@id"
+        identity: true
       doc: |
         Defines the parameters representing the output of the process.  May be
         used to generate and/or validate the output object.
     - name: requirements
-      type:
-        - "null"
-        - type: array
-          items: "#ProcessRequirement"
-      jsonldPredicate: "cwl:requirements"
+      type: ProcessRequirement[]?
+      jsonldPredicate:
+        _id: "cwl:requirements"
+        mapSubject: class
       doc: |
         Declares requirements that apply to either the runtime environment or the
         workflow engine that must be met in order to execute this workflow step.  If
@@ -320,33 +416,26 @@ $graph:
         error and the implementation must not attempt to run the process,
         unless overridden at user option.
     - name: hints
-      type:
-        - "null"
-        - type: array
-          items: "Any"
-      jsonldPredicate: "cwl:hints"
+      type: Any[]?
+      jsonldPredicate:
+        _id: "cwl:hints"
+        noLinkCheck: true
+        mapSubject: class
       doc: |
         Declares hints applying to either the runtime environment or the
         workflow engine that may be helpful in executing this workflow step.  It is
         not an error if an implementation cannot satisfy all hints, however
         the implementation may report a warning.
-      jsonldPredicate:
-        _id: cwl:hints
-        noLinkCheck: true
     - name: label
-      type:
-        - "null"
-        - string
+      type: string?
       jsonldPredicate: "rdfs:label"
       doc: "A short, human-readable label of this process object."
-    - name: description
-      type:
-        - "null"
-        - string
+    - name: doc
+      type: string?
       jsonldPredicate: "rdfs:comment"
       doc: "A long, human-readable description of this process object."
     - name: run
-      type: [string, "#Process"]
+      type: [string, Process]
       jsonldPredicate:
         "_id": "cwl:run"
         "_type": "@id"
@@ -354,20 +443,17 @@ $graph:
         Specifies the process to run.
     - name: scatter
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         "_id": "cwl:scatter"
         "_type": "@id"
         "_container": "@list"
+        refScope: 0
     - name: scatterMethod
       doc: |
         Required if `scatter` is an array of more than one element.
-      type:
-        - "null"
-        - "#ScatterMethod"
+      type: ScatterMethod?
       jsonldPredicate:
         "_id": "cwl:scatterMethod"
         "_type": "@vocab"
@@ -378,19 +464,21 @@ $graph:
   extends: "#Process"
   documentRoot: true
   specialize:
-    specializeFrom: "#OutputParameter"
-    specializeTo: "#WorkflowOutputParameter"
+    - specializeFrom: InputParameter
+      specializeTo: RegularInputParameter
+    - specializeFrom: OutputParameter
+      specializeTo: WorkflowOutputParameter
   doc: |
     A workflow describes a set of **steps** and the **dependencies** between
-    those processes.  When a process produces output that will be consumed by a
-    second process, the first process is a dependency of the second process.
+    those steps.  When a step produces output that will be consumed by a
+    second step, the first step is a dependency of the second step.
 
     When there is a dependency, the workflow engine must execute the preceeding
-    process and wait for it to successfully produce output before executing the
-    dependent process.  If two processes are defined in the workflow graph that
-    are not directly or indirectly dependent, these processes are
-    **independent**, and may execute in any order or execute concurrently.  A
-    workflow is complete when all steps have been executed.
+    step and wait for it to successfully produce output before executing the
+    dependent step.  If two steps are defined in the workflow graph that
+    are not directly or indirectly dependent, these steps are **independent**,
+    and may execute in any order or execute concurrently.  A workflow is
+    complete when all steps have been executed.
 
     Dependencies between parameters are expressed using the `source` field on
     [workflow step input parameters](#WorkflowStepInput) and [workflow output
@@ -404,27 +492,28 @@ $graph:
 
     ## Workflow success and failure
 
-    A completed process must result in one of `success`, `temporaryFailure` or
-    `permanentFailure` states.  An implementation may choose to retry a process
+    A completed step must result in one of `success`, `temporaryFailure` or
+    `permanentFailure` states.  An implementation may choose to retry a step
     execution which resulted in `temporaryFailure`.  An implementation may
     choose to either continue running other steps of a workflow, or terminate
     immediately upon `permanentFailure`.
 
-    * If any step of a workflow execution results in `permanentFailure`, then the
-    workflow status is `permanentFailure`.
+    * If any step of a workflow execution results in `permanentFailure`, then
+    the workflow status is `permanentFailure`.
 
     * If one or more steps result in `temporaryFailure` and all other steps
     complete `success` or are not executed, then the workflow status is
     `temporaryFailure`.
 
-    * If all workflow steps are executed and complete with `success`, then the workflow
-    status is `success`.
+    * If all workflow steps are executed and complete with `success`, then the
+    workflow status is `success`.
 
     # Extensions
 
     [ScatterFeatureRequirement](#ScatterFeatureRequirement) and
     [SubworkflowFeatureRequirement](#SubworkflowFeatureRequirement) are
-    available as standard extensions to core workflow semantics.
+    available as standard [extensions](#Extensions_and_Metadata) to core
+    workflow semantics.
 
   fields:
     - name: "class"
@@ -441,33 +530,62 @@ $graph:
       type:
         - type: array
           items: "#WorkflowStep"
-
+      jsonldPredicate:
+          mapSubject: id
 
 
 - type: record
   name: SubworkflowFeatureRequirement
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support nested workflows in
-    the `run` field of (WorkflowStep)(#WorkflowStep).
+    the `run` field of [WorkflowStep](#WorkflowStep).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'SubworkflowFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - name: ScatterFeatureRequirement
   type: record
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support the `scatter` and
     `scatterMethod` fields of [WorkflowStep](#WorkflowStep).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'ScatterFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - name: MultipleInputFeatureRequirement
   type: record
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicates that the workflow platform must support multiple inbound data links
     listed in the `source` field of [WorkflowStepInput](#WorkflowStepInput).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'MultipleInputFeatureRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
 
 - type: record
   name: StepInputExpressionRequirement
-  extends: "#ProcessRequirement"
+  extends: ProcessRequirement
   doc: |
     Indicate that the workflow platform must support the `valueFrom` field
-    of [WorkflowStepInput](#WorkflowStepInput).
\ No newline at end of file
+    of [WorkflowStepInput](#WorkflowStepInput).
+  fields:
+    - name: "class"
+      type: "string"
+      doc: "Always 'StepInputExpressionRequirement'"
+      jsonldPredicate:
+        "_id": "@type"
+        "_type": "@vocab"
diff --git a/cwltool/schemas/v1.1.0-dev1/concepts.md b/cwltool/schemas/v1.1.0-dev1/concepts.md
new file mode 100644
index 0000000..d3eac23
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/concepts.md
@@ -0,0 +1,389 @@
+## References to other specifications
+
+**Javascript Object Notation (JSON)**: http://json.org
+
+**JSON Linked Data (JSON-LD)**: http://json-ld.org
+
+**YAML**: http://yaml.org
+
+**Avro**: https://avro.apache.org/docs/current/spec.html
+
+**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
+
+**Internationalized Resource Identifiers (IRIs)**:
+https://tools.ietf.org/html/rfc3987
+
+**Portable Operating System Interface (POSIX.1-2008)**: http://pubs.opengroup.org/onlinepubs/9699919799/
+
+**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
+
+## Scope
+
+This document describes CWL syntax, execution, and object model.  It
+is not intended to document a CWL specific implementation, however it may
+serve as a reference for the behavior of conforming implementations.
+
+## Terminology
+
+The terminology used to describe CWL documents is defined in the
+Concepts section of the specification. The terms defined in the
+following list are used in building those definitions and in describing the
+actions of a CWL implementation:
+
+**may**: Conforming CWL documents and CWL implementations are permitted but
+not required to behave as described.
+
+**must**: Conforming CWL documents and CWL implementations are required to behave
+as described; otherwise they are in error.
+
+**error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations may detect and report an error and may
+recover from it.
+
+**fatal error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations must not continue to execute the current
+process and may report an error.
+
+**at user option**: Conforming software may or must (depending on the modal verb in
+the sentence) behave as described; if it does, it must provide users a means to
+enable or disable the behavior described.
+
+**deprecated**: Conforming software may implement a behavior for backwards
+compatibility.  Portable CWL documents should not rely on deprecated behavior.
+Behavior marked as deprecated may be removed entirely from future revisions of
+the CWL specification.
+
+# Data model
+
+## Data concepts
+
+An **object** is a data structure equivalent to the "object" type in JSON,
+consisting of a unordered set of name/value pairs (referred to here as
+**fields**) and where the name is a string and the value is a string, number,
+boolean, array, or object.
+
+A **document** is a file containing a serialized object, or an array of objects.
+
+A **process** is a basic unit of computation which accepts input data,
+performs some computation, and produces output data. Examples include
+CommandLineTools, Workflows, and ExpressionTools.
+
+An **input object** is an object describing the inputs to an invocation of
+a process.
+
+An **output object** is an object describing the output resulting from an
+invocation of a process.
+
+An **input schema** describes the valid format (required fields, data types)
+for an input object.
+
+An **output schema** describes the valid format for an output object.
+
+**Metadata** is information about workflows, tools, or input items.
+
+## Syntax
+
+CWL documents must consist of an object or array of objects represented using
+JSON or YAML syntax.  Upon loading, a CWL implementation must apply the
+preprocessing steps described in the
+[Semantic Annotations for Linked Avro Data (SALAD) Specification](SchemaSalad.html).
+An implementation may formally validate the structure of a CWL document using
+SALAD schemas located at
+https://github.com/common-workflow-language/common-workflow-language/tree/master/draft-4
+
+## Identifiers
+
+If an object contains an `id` field, that is used to uniquely identify the
+object in that document.  The value of the `id` field must be unique over the
+entire document.  Identifiers may be resolved relative to either the document
+base and/or other identifiers following the rules are described in the
+[Schema Salad specification](SchemaSalad.html#Identifier_resolution).
+
+An implementation may choose to only honor references to object types for
+which the `id` field is explicitly listed in this specification.
+
+## Document preprocessing
+
+An implementation must resolve [$import](SchemaSalad.html#Import) and
+[$include](SchemaSalad.html#Import) directives as described in the
+[Schema Salad specification](SchemaSalad.html).
+
+Another transformation defined in Schema salad is simplification of data type definitions.
+Type `<T>` ending with `?` should be transformed to `[<T>, "null"]`.
+Type `<T>` ending with `[]` should be transformed to `{"type": "array", "items": <T>}`
+
+## Extensions and metadata
+
+Input metadata (for example, a lab sample identifier) may be represented within
+a tool or workflow using input parameters which are explicitly propagated to
+output.  Future versions of this specification may define additional facilities
+for working with input/output metadata.
+
+Implementation extensions not required for correct execution (for example,
+fields related to GUI presentation) and metadata about the tool or workflow
+itself (for example, authorship for use in citations) may be provided as
+additional fields on any object.  Such extensions fields must use a namespace
+prefix listed in the `$namespaces` section of the document as described in the
+[Schema Salad specification](SchemaSalad.html#Explicit_context).
+
+Implementation extensions which modify execution semantics must be [listed in
+the `requirements` field](#Requirements_and_hints).
+
+# Execution model
+
+## Execution concepts
+
+A **parameter** is a named symbolic input or output of process, with an
+associated datatype or schema.  During execution, values are assigned to
+parameters to make the input object or output object used for concrete
+process invocation.
+
+A **CommandLineTool** is a process characterized by the execution of a
+standalone, non-interactive program which is invoked on some input,
+produces output, and then terminates.
+
+A **workflow** is a process characterized by multiple subprocess steps,
+where step outputs are connected to the inputs of downstream steps to
+form a directed acylic graph, and independent steps may run concurrently.
+
+A **runtime environment** is the actual hardware and software environment when
+executing a command line tool.  It includes, but is not limited to, the
+hardware architecture, hardware resources, operating system, software runtime
+(if applicable, such as the specific Python interpreter or the specific Java
+virtual machine), libraries, modules, packages, utilities, and data files
+required to run the tool.
+
+A **workflow platform** is a specific hardware and software implementation
+capable of interpreting CWL documents and executing the processes specified by
+the document.  The responsibilities of the workflow platform may include
+scheduling process invocation, setting up the necessary runtime environment,
+making input data available, invoking the tool process, and collecting output.
+
+A workflow platform may choose to only implement the Command Line Tool
+Description part of the CWL specification.
+
+It is intended that the workflow platform has broad leeway outside of this
+specification to optimize use of computing resources and enforce policies
+not covered by this specification.  Some areas that are currently out of
+scope for CWL specification but may be handled by a specific workflow
+platform include:
+
+* Data security and permissions
+* Scheduling tool invocations on remote cluster or cloud compute nodes.
+* Using virtual machines or operating system containers to manage the runtime
+(except as described in [DockerRequirement](CommandLineTool.html#DockerRequirement)).
+* Using remote or distributed file systems to manage input and output files.
+* Transforming file paths.
+* Determining if a process has previously been executed, and if so skipping it
+and reusing previous results.
+* Pausing, resuming or checkpointing processes or workflows.
+
+Conforming CWL processes must not assume anything about the runtime
+environment or workflow platform unless explicitly declared though the use
+of [process requirements](#Requirements_and_hints).
+
+## Generic execution process
+
+The generic execution sequence of a CWL process (including workflows and
+command line line tools) is as follows.
+
+1. Load, process and validate a CWL document, yielding a process object.
+2. Load input object.
+3. Validate the input object against the `inputs` schema for the process.
+4. Validate process requirements are met.
+5. Perform any further setup required by the specific process type.
+6. Execute the process.
+7. Capture results of process execution into the output object.
+8. Validate the output object against the `outputs` schema for the process.
+9. Report the output object to the process caller.
+
+## Requirements and hints
+
+A **process requirement** modifies the semantics or runtime
+environment of a process.  If an implementation cannot satisfy all
+requirements, or a requirement is listed which is not recognized by the
+implementation, it is a fatal error and the implementation must not attempt
+to run the process, unless overridden at user option.
+
+A **hint** is similar to a requirement; however, it is not an error if an
+implementation cannot satisfy all hints.  The implementation may report a
+warning if a hint cannot be satisfied.
+
+Requirements are inherited.  A requirement specified in a Workflow applies
+to all workflow steps; a requirement specified on a workflow step will
+apply to the process implementation of that step and any of its substeps.
+
+If the same process requirement appears at different levels of the
+workflow, the most specific instance of the requirement is used, that is,
+an entry in `requirements` on a process implementation such as
+CommandLineTool will take precedence over an entry in `requirements`
+specified in a workflow step, and an entry in `requirements` on a workflow
+step takes precedence over the workflow.  Entries in `hints` are resolved
+the same way.
+
+Requirements override hints.  If a process implementation provides a
+process requirement in `hints` which is also provided in `requirements` by
+an enclosing workflow or workflow step, the enclosing `requirements` takes
+precedence.
+
+## Parameter references
+
+Parameter references are denoted by the syntax `$(...)` and may be used in any
+field permitting the pseudo-type `Expression`, as specified by this document.
+Conforming implementations must support parameter references.  Parameter
+references use the following subset of
+[Javascript/ECMAScript 5.1](http://www.ecma-international.org/ecma-262/5.1/)
+syntax, but they are designed to not require a Javascript engine for evaluation.
+
+In the following [BNF
+grammar](https://en.wikipedia.org/wiki/Backus%E2%80%93Naur_Form), character
+classes, and grammar rules are denoted in '{}', '-' denotes exclusion from a
+character class, '(())' denotes grouping, '|' denotes alternates, trailing
+'*' denotes zero or more repeats, '+' denote one or more repeats, '/' escapes
+these special characters, and all other characters are literal values.
+
+<p>
+<table class="table">
+<tr><td>symbol::             </td><td>{Unicode alphanumeric}+</td></tr>
+<tr><td>singleq::            </td><td>[' (( {character - '} | \' ))* ']</td></tr>
+<tr><td>doubleq::            </td><td>[" (( {character - "} | \" ))* "]</td></tr>
+<tr><td>index::              </td><td>[ {decimal digit}+ ]</td></tr>
+<tr><td>segment::            </td><td>. {symbol} | {singleq} | {doubleq} | {index}</td></tr>
+<tr><td>parameter reference::</td><td>$( {symbol} {segment}*)</td></tr>
+</table>
+</p>
+
+Use the following algorithm to resolve a parameter reference:
+
+  1. Match the leading symbol as the key
+  2. Look up the key in the parameter context (described below) to get the current value.
+     It is an error if the key is not found in the parameter context.
+  3. If there are no subsequent segments, terminate and return current value
+  4. Else, match the next segment
+  5. Extract the symbol, string, or index from the segment as the key
+  6. Look up the key in current value and assign as new current value.  If
+     the key is a symbol or string, the current value must be an object.
+     If the key is an index, the current value must be an array or string.
+     It is an error if the key does not match the required type, or the key is not found or out
+     of range.
+  7. Repeat steps 3-6
+
+The root namespace is the parameter context.  The following parameters must
+be provided:
+
+  * `inputs`: The input object to the current Process.
+  * `self`: A context-specific value.  The contextual values for 'self' are
+    documented for specific fields elsewhere in this specification.  If
+    a contextual value of 'self' is not documented for a field, it
+    must be 'null'.
+  * `runtime`: An object containing configuration details.  Specific to the
+    process type.  An implementation may provide
+    opaque strings for any or all fields of `runtime`.  These must be
+    filled in by the platform after processing the Tool but before actual
+    execution.  Parameter references and expressions may only use the
+    literal string value of the field and must not perform computation on
+    the contents, except where noted otherwise.
+
+If the value of a field has no leading or trailing non-whitespace
+characters around a parameter reference, the effective value of the field
+becomes the value of the referenced parameter, preserving the return type.
+
+If the value of a field has non-whitespace leading or trailing characters
+around a parameter reference, it is subject to string interpolation.  The
+effective value of the field is a string containing the leading characters,
+followed by the string value of the parameter reference, followed by the
+trailing characters.  The string value of the parameter reference is its
+textual JSON representation with the following rules:
+
+  * Leading and trailing quotes are stripped from strings
+  * Objects entries are sorted by key
+
+Multiple parameter references may appear in a single field.  This case
+must be treated as a string interpolation.  After interpolating the first
+parameter reference, interpolation must be recursively applied to the
+trailing characters to yield the final string value.
+
+## Expressions
+
+An expression is a fragment of [Javascript/ECMAScript
+5.1](http://www.ecma-international.org/ecma-262/5.1/) code evaluated by the
+workflow platform to affect the inputs, outputs, or
+behavior of a process.  In the generic execution sequence, expressions may
+be evaluated during step 5 (process setup), step 6 (execute process),
+and/or step 7 (capture output).  Expressions are distinct from regular
+processes in that they are intended to modify the behavior of the workflow
+itself rather than perform the primary work of the workflow.
+
+To declare the use of expressions, the document must include the process
+requirement `InlineJavascriptRequirement`.  Expressions may be used in any
+field permitting the pseudo-type `Expression`, as specified by this
+document.
+
+Expressions are denoted by the syntax `$(...)` or `${...}`.  A code
+fragment wrapped in the `$(...)` syntax must be evaluated as a
+[ECMAScript expression](http://www.ecma-international.org/ecma-262/5.1/#sec-11).  A
+code fragment wrapped in the `${...}` syntax must be evaluated as a
+[EMACScript function body](http://www.ecma-international.org/ecma-262/5.1/#sec-13)
+for an anonymous, zero-argument function.  Expressions must return a valid JSON
+data type: one of null, string, number, boolean, array, object. Other return
+values must result in a `permanentFailure`. Implementations must permit any
+syntactically valid Javascript and account for nesting of parenthesis or braces
+and that strings that may contain parenthesis or braces when scanning for
+expressions.
+
+The runtime must include any code defined in the ["expressionLib" field of
+InlineJavascriptRequirement](#InlineJavascriptRequirement) prior to
+executing the actual expression.
+
+Before executing the expression, the runtime must initialize as global
+variables the fields of the parameter context described above.
+
+The effective value of the field after expression evaluation follows the
+same rules as parameter references discussed above.  Multiple expressions
+may appear in a single field.
+
+Expressions must be evaluated in an isolated context (a "sandbox") which
+permits no side effects to leak outside the context.  Expressions also must
+be evaluated in [Javascript strict mode](http://www.ecma-international.org/ecma-262/5.1/#sec-4.2.2).
+
+The order in which expressions are evaluated is undefined except where
+otherwise noted in this document.
+
+An implementation may choose to implement parameter references by
+evaluating as a Javascript expression.  The results of evaluating
+parameter references must be identical whether implemented by Javascript
+evaluation or some other means.
+
+Implementations may apply other limits, such as process isolation, timeouts,
+and operating system containers/jails to minimize the security risks associated
+with running untrusted code embedded in a CWL document.
+
+Exceptions thrown from an exception must result in a `permanentFailure` of the
+process.
+
+## Executing CWL documents as scripts
+
+By convention, a CWL document may begin with `#!/usr/bin/env cwl-runner`
+and be marked as executable (the POSIX "+x" permission bits) to enable it
+to be executed directly.  A workflow platform may support this mode of
+operation; if so, it must provide `cwl-runner` as an alias for the
+platform's CWL implementation.
+
+A CWL input object document may similarly begin with `#!/usr/bin/env
+cwl-runner` and be marked as executable.  In this case, the input object
+must include the field `cwl:tool` supplying an IRI to the default CWL
+document that should be executed using the fields of the input object as
+input parameters.
+
+## Discovering CWL documents on a local filesystem
+
+To discover CWL documents look in the following locations:
+
+`/usr/share/commonwl/`
+
+`/usr/local/share/commonwl/`
+
+`$XDG_DATA_HOME/commonwl/` (usually `$HOME/.local/share/commonwl`)
+
+`$XDF_DATA_HOME` is from the [XDG Base Directory
+Specification](http://standards.freedesktop.org/basedir-spec/basedir-spec-0.6.html)
diff --git a/cwltool/schemas/v1.1.0-dev1/contrib.md b/cwltool/schemas/v1.1.0-dev1/contrib.md
new file mode 100644
index 0000000..af6f6e8
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/contrib.md
@@ -0,0 +1,19 @@
+Authors:
+
+* Peter Amstutz <peter.amstutz at curoverse.com>, Arvados Project, Curoverse
+* Michael R. Crusoe <michael.crusoe at gmail.com>, Common Workflow Language
+  project
+* Nebojša Tijanić <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics
+
+Contributors:
+
+* Brad Chapman <bchapman at hsph.harvard.edu>, Harvard Chan School of Public Health
+* John Chilton <jmchilton at gmail.com>, Galaxy Project, Pennsylvania State University
+* Michael Heuer <heuermh at berkeley.edu,>,UC Berkeley AMPLab
+* Andrey Kartashov <Andrey.Kartashov at cchmc.org>, Cincinnati Children's Hospital
+* Dan Leehr <dan.leehr at duke.edu>, Duke University
+* Hervé Ménager <herve.menager at gmail.com>, Institut Pasteur
+* Maya Nedeljkovich <maja.nedeljkovic at sbgenomics.com>, Seven Bridges Genomics
+* Matt Scales <mscales at icr.ac.uk>, Institute of Cancer Research, London
+* Stian Soiland-Reyes [soiland-reyes at cs.manchester.ac.uk](mailto:soiland-reyes at cs.manchester.ac.uk), University of Manchester
+* Luka Stojanovic <luka.stojanovic at sbgenomics.com>, Seven Bridges Genomics
diff --git a/cwltool/schemas/v1.1.0-dev1/intro.md b/cwltool/schemas/v1.1.0-dev1/intro.md
new file mode 100644
index 0000000..17ebb4e
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/intro.md
@@ -0,0 +1,21 @@
+# Status of this document
+
+This document is the product of the [Common Workflow Language working
+group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
+latest stable version of this document is available in the "v1.0" directory at
+
+https://github.com/common-workflow-language/common-workflow-language
+
+The products of the CWL working group (including this document) are made available
+under the terms of the Apache License, version 2.0.
+
+<!--ToC-->
+
+# Introduction
+
+The Common Workflow Language (CWL) working group is an informal, multi-vendor
+working group consisting of various organizations and individuals that have an
+interest in portability of data analysis workflows.  The goal is to create
+specifications like this one that enable data scientists to describe analysis
+tools and workflows that are powerful, easy to use, portable, and support
+reproducibility.
diff --git a/cwltool/schemas/v1.1.0-dev1/invocation.md b/cwltool/schemas/v1.1.0-dev1/invocation.md
new file mode 100644
index 0000000..ce7524c
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/invocation.md
@@ -0,0 +1,153 @@
+# Running a Command
+
+To accommodate the enormous variety in syntax and semantics for input, runtime
+environment, invocation, and output of arbitrary programs, a CommandLineTool
+defines an "input binding" that describes how to translate abstract input
+parameters to an concrete program invocation, and an "output binding" that
+describes how to generate output parameters from program output.
+
+## Input binding
+
+The tool command line is built by applying command line bindings to the
+input object.  Bindings are listed either as part of an [input
+parameter](#CommandInputParameter) using the `inputBinding` field, or
+separately using the `arguments` field of the CommandLineTool.
+
+The algorithm to build the command line is as follows.  In this algorithm,
+the sort key is a list consisting of one or more numeric or string
+elements.  Strings are sorted lexicographically based on UTF-8 encoding.
+
+  1. Collect `CommandLineBinding` objects from `arguments`.  Assign a sorting
+  key `[position, i]` where `position` is
+  [`CommandLineBinding.position`](#CommandLineBinding) and `i`
+  is the index in the `arguments` list.
+
+  2. Collect `CommandLineBinding` objects from the `inputs` schema and
+  associate them with values from the input object.  Where the input type
+  is a record, array, or map, recursively walk the schema and input object,
+  collecting nested `CommandLineBinding` objects and associating them with
+  values from the input object.
+
+  3. Create a sorting key by taking the value of the `position` field at
+  each level leading to each leaf binding object.  If `position` is not
+  specified, it is not added to the sorting key.  For bindings on arrays
+  and maps, the sorting key must include the array index or map key
+  following the position.  If and only if two bindings have the same sort
+  key, the tie must be broken using the ordering of the field or parameter
+  name immediately containing the leaf binding.
+
+  4. Sort elements using the assigned sorting keys.  Numeric entries sort
+  before strings.
+
+  5. In the sorted order, apply the rules defined in
+  [`CommandLineBinding`](#CommandLineBinding) to convert bindings to actual
+  command line elements.
+
+  6. Insert elements from `baseCommand` at the beginning of the command
+  line.
+
+## Runtime environment
+
+All files listed in the input object must be made available in the runtime
+environment.  The implementation may use a shared or distributed file
+system or transfer files via explicit download to the host.  Implementations
+may choose not to provide access to files not explicitly specified in the input
+object or process requirements.
+
+Output files produced by tool execution must be written to the **designated
+output directory**.  The initial current working directory when executing
+the tool must be the designated output directory.
+
+Files may also be written to the **designated temporary directory**.  This
+directory must be isolated and not shared with other processes.  Any files
+written to the designated temporary directory may be automatically deleted by
+the workflow platform immediately after the tool terminates.
+
+For compatibility, files may be written to the **system temporary directory**
+which must be located at `/tmp`.  Because the system temporary directory may be
+shared with other processes on the system, files placed in the system temporary
+directory are not guaranteed to be deleted automatically.  A tool
+must not use the system temporary directory as a backchannel communication with
+other tools.  It is valid for the system temporary directory to be the same as
+the designated temporary directory.
+
+When executing the tool, the tool must execute in a new, empty environment
+with only the environment variables described below; the child process must
+not inherit environment variables from the parent process except as
+specified or at user option.
+
+  * `HOME` must be set to the designated output directory.
+  * `TMPDIR` must be set to the designated temporary directory.
+  * `PATH` may be inherited from the parent process, except when run in a
+    container that provides its own `PATH`.
+  * Variables defined by [EnvVarRequirement](#EnvVarRequirement)
+  * The default environment of the container, such as when using
+    [DockerRequirement](#DockerRequirement)
+
+An implementation may forbid the tool from writing to any location in the
+runtime environment file system other than the designated temporary directory,
+system temporary directory, and designated output directory.  An implementation
+may provide read-only input files, and disallow in-place update of input files.
+The designated temporary directory, system temporary directory and designated
+output directory may each reside on different mount points on different file
+systems.
+
+An implementation may forbid the tool from directly accessing network
+resources.  Correct tools must not assume any network access.  Future versions
+of the specification may incorporate optional process requirements that
+describe the networking needs of a tool.
+
+The `runtime` section available in [parameter references](#Parameter_references)
+and [expressions](#Expressions) contains the following fields.  As noted
+earlier, an implementation may perform deferred resolution of runtime fields by providing
+opaque strings for any or all of the following fields; parameter references
+and expressions may only use the literal string value of the field and must
+not perform computation on the contents.
+
+  * `runtime.outdir`: an absolute path to the designated output directory
+  * `runtime.tmpdir`: an absolute path to the designated temporary directory
+  * `runtime.cores`:  number of CPU cores reserved for the tool process
+  * `runtime.ram`:    amount of RAM in mebibytes (2\*\*20) reserved for the tool process
+  * `runtime.outdirSize`: reserved storage space available in the designated output directory
+  * `runtime.tmpdirSize`: reserved storage space available in the designated temporary directory
+
+For `cores`, `ram`, `outdirSize` and `tmpdirSize`, if an implementation can't
+provide the actual number of reserved cores during the expression evaluation time,
+it should report back the minimal requested amount.
+
+See [ResourceRequirement](#ResourceRequirement) for details on how to
+describe the hardware resources required by a tool.
+
+The standard input stream and standard output stream may be redirected as
+described in the `stdin` and `stdout` fields.
+
+## Execution
+
+Once the command line is built and the runtime environment is created, the
+actual tool is executed.
+
+The standard error stream and standard output stream (unless redirected by
+setting `stdout` or `stderr`) may be captured by platform logging facilities
+for storage and reporting.
+
+Tools may be multithreaded or spawn child processes; however, when the
+parent process exits, the tool is considered finished regardless of whether
+any detached child processes are still running.  Tools must not require any
+kind of console, GUI, or web based user interaction in order to start and
+run to completion.
+
+The exit code of the process indicates if the process completed
+successfully.  By convention, an exit code of zero is treated as success
+and non-zero exit codes are treated as failure.  This may be customized by
+providing the fields `successCodes`, `temporaryFailCodes`, and
+`permanentFailCodes`.  An implementation may choose to default unspecified
+non-zero exit codes to either `temporaryFailure` or `permanentFailure`.
+
+## Output binding
+
+If the output directory contains a file named "cwl.output.json", that file
+must be loaded and used as the output object.  Otherwise, the output object
+must be generated by walking the parameters listed in `outputs` and
+applying output bindings to the tool output.  Output bindings are
+associated with output parameters using the `outputBinding` field.  See
+[`CommandOutputBinding`](#CommandOutputBinding) for details.
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name.yml
new file mode 100644
index 0000000..44e95a2
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name.yml
@@ -0,0 +1,46 @@
+- |
+  ## Field name resolution
+
+  The document schema declares the vocabulary of known field names.  During
+  preprocessing traversal, field name in the document which are not part of
+  the schema vocabulary must be resolved to absolute URIs.  Under "strict"
+  validation, it is an error for a document to include fields which are not
+  part of the vocabulary and not resolvable to absolute URIs.  Fields names
+  which are not part of the vocabulary are resolved using the following
+  rules:
+
+  * If an field name URI begins with a namespace prefix declared in the
+  document context (`@context`) followed by a colon `:`, the prefix and
+  colon must be replaced by the namespace declared in `@context`.
+
+  * If there is a vocabulary term which maps to the URI of a resolved
+  field, the field name must be replace with the vocabulary term.
+
+  * If a field name URI is an absolute URI consisting of a scheme and path
+  and is not part of the vocabulary, no processing occurs.
+
+  Field name resolution is not relative.  It must not be affected by the
+  base URI.
+
+  ### Field name resolution example
+
+  Given the following schema:
+
+  ```
+- $include: field_name_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: field_name_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: field_name_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_proc.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_proc.yml
new file mode 100644
index 0000000..a53ef4b
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_proc.yml
@@ -0,0 +1,8 @@
+    {
+      "base": "one",
+      "form": {
+        "base": "two",
+        "http://example.com/three": "three",
+      },
+      "http://example.com/acid#four": "four"
+    }
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_schema.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_schema.yml
new file mode 100644
index 0000000..5089c4b
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_schema.yml
@@ -0,0 +1,14 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "base",
+      "type": "string",
+      "jsonldPredicate": "http://example.com/base"
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_src.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_src.yml
new file mode 100644
index 0000000..1ed79b9
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/field_name_src.yml
@@ -0,0 +1,8 @@
+    {
+      "base": "one",
+      "form": {
+        "http://example.com/base": "two",
+        "http://example.com/three": "three",
+      },
+      "acid:four": "four"
+    }
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res.yml
new file mode 100644
index 0000000..45f4efb
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res.yml
@@ -0,0 +1,53 @@
+- |
+  ## Identifier resolution
+
+  The schema may designate one or more fields as identifier fields to identify
+  specific objects.  Processing must resolve relative identifiers to absolute
+  identifiers using the following rules:
+
+    * If an identifier URI is prefixed with `#` it is a URI relative
+      fragment identifier.  It is resolved relative to the base URI by setting
+      or replacing the fragment portion of the base URI.
+
+    * If an identifier URI does not contain a scheme and is not prefixed `#` it
+      is a parent relative fragment identifier.  It is resolved relative to the
+      base URI by the following rule: if the base URI does not contain a
+      document fragment, set the fragment portion of the base URI.  If the base
+      URI does contain a document fragment, append a slash `/` followed by the
+      identifier field to the fragment portion of the base URI.
+
+    * If an identifier URI begins with a namespace prefix declared in
+      `$namespaces` followed by a colon `:`, the prefix and colon must be
+      replaced by the namespace declared in `$namespaces`.
+
+    * If an identifier URI is an absolute URI consisting of a scheme and path,
+      no processing occurs.
+
+  When preprocessing visits a node containing an identifier, that identifier
+  must be used as the base URI to process child nodes.
+
+  It is an error for more than one object in a document to have the same
+  absolute URI.
+
+  ### Identifier resolution example
+
+  Given the following schema:
+
+  ```
+- $include: ident_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: ident_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: ident_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_proc.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_proc.yml
new file mode 100644
index 0000000..24d3ea8
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_proc.yml
@@ -0,0 +1,20 @@
+{
+  "id": "http://example.com/base",
+  "form": {
+    "id": "http://example.com/base#one",
+    "things": [
+      {
+        "id": "http://example.com/base#one/two"
+      },
+      {
+        "id": "http://example.com/base#three"
+      },
+      {
+        "id": "http://example.com/four#five",
+      },
+      {
+        "id": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_schema.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_schema.yml
new file mode 100644
index 0000000..8a7bb04
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_schema.yml
@@ -0,0 +1,14 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "id",
+      "type": "string",
+      "jsonldPredicate": "@id"
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_src.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_src.yml
new file mode 100644
index 0000000..bbbd96e
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/ident_res_src.yml
@@ -0,0 +1,20 @@
+    {
+      "id": "http://example.com/base",
+      "form": {
+        "id": "one",
+        "things": [
+          {
+            "id": "two"
+          },
+          {
+            "id": "#three",
+          },
+          {
+            "id": "four#five",
+          },
+          {
+            "id": "acid:six",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/import_include.md b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/import_include.md
new file mode 100644
index 0000000..1b9f37f
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/import_include.md
@@ -0,0 +1,176 @@
+## Import
+
+During preprocessing traversal, an implementation must resolve `$import`
+directives.  An `$import` directive is an object consisting of exactly one
+field `$import` specifying resource by URI string.  It is an error if there
+are additional fields in the `$import` object, such additional fields must
+be ignored.
+
+The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  Implementations must support
+loading from `file`, `http` and `https` resources.  The URI referenced by
+`$import` must be loaded and recursively preprocessed as a Salad document.
+The external imported document does not inherit the context of the
+importing document, and the default base URI for processing the imported
+document must be the URI used to retrieve the imported document.  If the
+`$import` URI includes a document fragment, the fragment must be excluded
+from the base URI used to preprocess the imported document.
+
+Once loaded and processed, the `$import` node is replaced in the document
+structure by the object or array yielded from the import operation.
+
+URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the `$import` node must be
+replaced by only the object with the appropriate fragment identifier.
+
+It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.
+
+### Import example
+
+import.yml:
+```
+{
+  "hello": "world"
+}
+
+```
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$import": "import.yml"
+      }
+  }
+}
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": {
+      "hello": "world"
+    }
+  }
+}
+```
+
+## Include
+
+During preprocessing traversal, an implementation must resolve `$include`
+directives.  An `$include` directive is an object consisting of exactly one
+field `$include` specifying a URI string.  It is an error if there are
+additional fields in the `$include` object, such additional fields must be
+ignored.
+
+The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  The URI referenced by `$include` must
+be loaded as a text data.  Implementations must support loading from
+`file`, `http` and `https` resources.  Implementations may transcode the
+character encoding of the text data to match that of the parent document,
+but must not interpret or parse the text document in any other way.
+
+Once loaded, the `$include` node is replaced in the document structure by a
+string containing the text data loaded from the resource.
+
+It is a fatal error if an import directive refers to an external resource
+which does not exist or is not accessible.
+
+### Include example
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$include": "include.txt"
+      }
+  }
+}
+
+```
+
+include.txt:
+```
+hello world
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": "hello world"
+  }
+}
+```
+
+
+## Mixin
+
+During preprocessing traversal, an implementation must resolve `$mixin`
+directives.  An `$mixin` directive is an object consisting of the field
+`$mixin` specifying resource by URI string.  If there are additional fields in
+the `$mixin` object, these fields override fields in the object which is loaded
+from the `$mixin` URI.
+
+The URI string must be resolved to an absolute URI using the link resolution
+rules described previously.  Implementations must support loading from `file`,
+`http` and `https` resources.  The URI referenced by `$mixin` must be loaded
+and recursively preprocessed as a Salad document.  The external imported
+document must inherit the context of the importing document, however the file
+URI for processing the imported document must be the URI used to retrieve the
+imported document.  The `$mixin` URI must not include a document fragment.
+
+Once loaded and processed, the `$mixin` node is replaced in the document
+structure by the object or array yielded from the import operation.
+
+URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the `$mixin` node must be
+replaced by only the object with the appropriate fragment identifier.
+
+It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.
+
+### Mixin example
+
+mixin.yml:
+```
+{
+  "hello": "world",
+  "carrot": "orange"
+}
+
+```
+
+parent.yml:
+```
+{
+  "form": {
+    "bar": {
+      "$mixin": "mixin.yml"
+      "carrot": "cake"
+      }
+  }
+}
+
+```
+
+This becomes:
+
+```
+{
+  "form": {
+    "bar": {
+      "hello": "world",
+      "carrot": "cake"
+    }
+  }
+}
+```
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res.yml
new file mode 100644
index 0000000..9346f8a
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res.yml
@@ -0,0 +1,55 @@
+- |
+  ## Link resolution
+
+  The schema may designate one or more fields as link fields reference other
+  objects.  Processing must resolve links to either absolute URIs using the
+  following rules:
+
+  * If a reference URI is prefixed with `#` it is a relative
+  fragment identifier.  It is resolved relative to the base URI by setting
+  or replacing the fragment portion of the base URI.
+
+  * If a reference URI does not contain a scheme and is not prefixed with `#`
+  it is a path relative reference.  If the reference URI contains `#` in any
+  position other than the first character, the reference URI must be divided
+  into a path portion and a fragment portion split on the first instance of
+  `#`.  The path portion is resolved relative to the base URI by the following
+  rule: if the path portion of the base URI ends in a slash `/`, append the
+  path portion of the reference URI to the path portion of the base URI.  If
+  the path portion of the base URI does not end in a slash, replace the final
+  path segment with the path portion of the reference URI.  Replace the
+  fragment portion of the base URI with the fragment portion of the reference
+  URI.
+
+  * If a reference URI begins with a namespace prefix declared in `$namespaces`
+  followed by a colon `:`, the prefix and colon must be replaced by the
+  namespace declared in `$namespaces`.
+
+  * If a reference URI is an absolute URI consisting of a scheme and path,
+  no processing occurs.
+
+  Link resolution must not affect the base URI used to resolve identifiers
+  and other links.
+
+  ### Link resolution example
+
+  Given the following schema:
+
+  ```
+- $include: link_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: link_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: link_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_proc.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_proc.yml
new file mode 100644
index 0000000..03e539d
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_proc.yml
@@ -0,0 +1,21 @@
+{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "http://example.com/one",
+    "things": [
+      {
+        "link": "http://example.com/two"
+      },
+      {
+        "link": "http://example.com/base#three"
+      },
+      {
+        "link": "http://example.com/four#five",
+      },
+      {
+        "link": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_schema.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_schema.yml
new file mode 100644
index 0000000..76420d3
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_schema.yml
@@ -0,0 +1,16 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "link",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@id"
+      }
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_src.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_src.yml
new file mode 100644
index 0000000..23f7a29
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/link_res_src.yml
@@ -0,0 +1,21 @@
+{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "one",
+    "things": [
+      {
+        "link": "two"
+      },
+      {
+        "link": "#three",
+      },
+      {
+        "link": "four#five",
+      },
+      {
+        "link": "acid:six",
+      }
+    ]
+  }
+}
diff --git a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema.yml
similarity index 60%
copy from cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
copy to cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema.yml
index 6e90775..d5472e9 100644
--- a/cwltool/schemas/draft-3/salad/schema_salad/metaschema/metaschema.yml
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema.yml
@@ -46,36 +46,7 @@ $graph:
 #     How to generate the json-ld context...
 
 
-- name: PrimitiveType
-  type: enum
-  symbols:
-    - "sld:null"
-    - "xsd:boolean"
-    - "xsd:int"
-    - "xsd:long"
-    - "xsd:float"
-    - "xsd:double"
-    - "xsd:string"
-  doc:
-    - |
-      Salad data types are based on Avro schema declarations.  Refer to the
-      [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
-      detailed information.
-    - "null: no value"
-    - "boolean: a binary value"
-    - "int: 32-bit signed integer"
-    - "long: 64-bit signed integer"
-    - "float: single precision (32-bit) IEEE 754 floating-point number"
-    - "double: double precision (64-bit) IEEE 754 floating-point number"
-    - "string: Unicode character sequence"
-
-
-- name: "Any"
-  type: enum
-  symbols: ["#Any"]
-  doc: |
-    The **Any** type validates for any non-null value.
-
+- $import: metaschema_base.yml
 
 - name: JsonldPredicate
   type: record
@@ -84,7 +55,7 @@ $graph:
     URI resolution and JSON-LD context generation.
   fields:
     - name: _id
-      type: ["null", string]
+      type: string?
       jsonldPredicate:
         _id: sld:_id
         _type: "@id"
@@ -93,7 +64,7 @@ $graph:
         The predicate URI that this field corresponds to.
         Corresponds to JSON-LD `@id` directive.
     - name: _type
-      type: ["null", string]
+      type: string?
       doc: |
         The context type hint, corresponds to JSON-LD `@type` directive.
 
@@ -106,11 +77,11 @@ $graph:
           resolved using the vocabulary resolution rules.
 
     - name: _container
-      type: ["null", string]
+      type: string?
       doc: |
         Structure hint, corresponds to JSON-LD `@container` directive.
     - name: identity
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true and `_type` is `@id` this indicates that the parent field must
         be resolved according to identity resolution rules instead of link
@@ -118,11 +89,46 @@ $graph:
         assertion that the linked value exists; absence of an object in the loaded document
         with the URI is not an error.
     - name: noLinkCheck
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, this indicates that link validation traversal must stop at
         this field.  This field (it is is a URI) or any fields under it (if it
         is an object or array) are not subject to link checking.
+    - name: mapSubject
+      type: string?
+      doc: |
+        If the value of the field is a JSON object, it must be transformed
+        into an array of JSON objects, where each key-value pair from the
+        source JSON object is a list item, the list items must be JSON objects,
+        and the key is assigned to the field specified by `mapSubject`.
+    - name: mapPredicate
+      type: string?
+      doc: |
+        Only applies if `mapSubject` is also provided.  If the value of the
+        field is a JSON object, it is transformed as described in `mapSubject`,
+        with the addition that when the value of a map item is not an object,
+        the item is transformed to a JSON object with the key assigned to the
+        field specified by `mapSubject` and the value assigned to the field
+        specified by `mapPredicate`.
+    - name: refScope
+      type: int?
+      doc: |
+        If the field contains a relative reference, it must be resolved by
+        searching for valid document references in each successive parent scope
+        in the document fragment.  For example, a reference of `foo` in the
+        context `#foo/bar/baz` will first check for the existence of
+        `#foo/bar/baz/foo`, followed by `#foo/bar/foo`, then `#foo/foo` and
+        then finally `#foo`.  The first valid URI in the search order shall be
+        used as the fully resolved value of the identifier.  The value of the
+        refScope field is the specified number of levels from the containing
+        identifer scope before starting the search, so if `refScope: 2` then
+        "baz" and "bar" must be stripped to get the base `#foo` and search
+        `#foo/foo` and the `#foo`.  The last scope searched must be the top
+        level scope before determining if the identifier cannot be resolved.
+    - name: typeDSL
+      type: boolean?
+      doc: |
+        Field must be expanded based on the the Schema Salad type DSL.
 
 
 - name: SpecializeDef
@@ -134,6 +140,7 @@ $graph:
       jsonldPredicate:
         _id: "sld:specializeFrom"
         _type: "@id"
+        refScope: 1
 
     - name: specializeTo
       type: string
@@ -141,6 +148,7 @@ $graph:
       jsonldPredicate:
         _id: "sld:specializeTo"
         _type: "@id"
+        refScope: 1
 
 
 - name: NamedType
@@ -159,15 +167,13 @@ $graph:
   fields:
     - name: doc
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       doc: "A documentation string for this type, or an array of strings which should be concatenated."
-      jsonldPredicate: "sld:doc"
+      jsonldPredicate: "rdfs:comment"
 
     - name: docParent
-      type: ["null", string]
+      type: string?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for this type should appear in a subsection under `docParent`.
@@ -177,10 +183,8 @@ $graph:
 
     - name: docChild
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for `docChild` should appear in a subsection under this type.
@@ -189,7 +193,7 @@ $graph:
         _type: "@id"
 
     - name: docAfter
-      type: ["null", string]
+      type: string?
       doc: |
         Hint to indicate that during documentation generation, documentation
         for this type should appear after the `docAfter` section at the same
@@ -201,225 +205,99 @@ $graph:
 
 - name: SchemaDefinedType
   type: record
-  extends: "#DocType"
+  extends: DocType
   doc: |
     Abstract base for schema-defined types.
   abstract: true
   fields:
     - name: jsonldPredicate
       type:
-        - "null"
-        - string
-        - "#JsonldPredicate"
+        - string?
+        - JsonldPredicate?
       doc: |
         Annotate this type with linked data context.
-      jsonldPredicate: "sld:jsonldPredicate"
+      jsonldPredicate: sld:jsonldPredicate
 
     - name: documentRoot
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, indicates that the type is a valid at the document root.  At
         least one type in a schema must be tagged with `documentRoot: true`.
 
 
-- name: RecordField
-  type: record
-  doc: "A field of a record."
-  fields:
-    - name: name
-      type: string
-      jsonldPredicate: "@id"
-      doc: |
-        The name of the field
-
-    - name: doc
-      type: ["null", string]
-      doc: |
-        A documentation string for this field
-      jsonldPredicate: "sld:doc"
-
-    - name: type
-      type:
-        - "#PrimitiveType"
-        - "#RecordSchema"
-        - "#EnumSchema"
-        - "#ArraySchema"
-        - string
-        - type: array
-          items:
-            - "#PrimitiveType"
-            - "#RecordSchema"
-            - "#EnumSchema"
-            - "#ArraySchema"
-            - string
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-      doc: |
-        The field type
-
-
 - name: SaladRecordField
   type: record
-  extends: "#RecordField"
+  extends: RecordField
   doc: "A field of a record."
   fields:
     - name: jsonldPredicate
       type:
-        - "null"
-        - string
-        - "#JsonldPredicate"
+        - string?
+        - JsonldPredicate?
       doc: |
         Annotate this type with linked data context.
       jsonldPredicate: "sld:jsonldPredicate"
 
-- name: RecordSchema
-  type: record
-  fields:
-    - name: type
-      doc: "Must be `record`"
-      type:
-        name: Record_symbol
-        type: enum
-        symbols:
-          - "sld:record"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: "fields"
-      type:
-        - "null"
-        - type: "array"
-          items: "#RecordField"
-
-      jsonldPredicate: "sld:fields"
-      doc: "Defines the fields of the record."
-
 
 - name: SaladRecordSchema
   type: record
-  extends: ["#NamedType", "#RecordSchema", "#SchemaDefinedType"]
+  extends: [NamedType, RecordSchema, SchemaDefinedType]
   documentRoot: true
   specialize:
-    specializeFrom: "#RecordField"
-    specializeTo: "#SaladRecordField"
+    RecordField: SaladRecordField
   fields:
     - name: abstract
-      type: ["null", boolean]
+      type: boolean?
       doc: |
         If true, this record is abstract and may be used as a base for other
         records, but is not valid on its own.
 
     - name: extends
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         _id: "sld:extends"
         _type: "@id"
+        refScope: 1
       doc: |
         Indicates that this record inherits fields from one or more base records.
 
     - name: specialize
       type:
-        - "null"
-        - "#SpecializeDef"
-        - type: array
-          items: "#SpecializeDef"
+        - SpecializeDef[]?
       doc: |
         Only applies if `extends` is declared.  Apply type specialization using the
         base record as a template.  For each field inherited from the base
         record, replace any instance of the type `specializeFrom` with
         `specializeTo`.
-
-
-- name: EnumSchema
-  type: record
-  doc: |
-    Define an enumerated type.
-  fields:
-    - name: type
-      doc: "Must be `enum`"
-      type:
-        name: Enum_symbol
-        type: enum
-        symbols:
-          - "sld:enum"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: "symbols"
-      type:
-        - type: "array"
-          items: "string"
       jsonldPredicate:
-        _id: "sld:symbols"
-        _type: "@id"
-        identity: true
-      doc: "Defines the set of valid symbols."
-
+        _id: "sld:specialize"
+        mapSubject: specializeFrom
+        mapPredicate: specializeTo
 
 - name: SaladEnumSchema
   type: record
-  extends: ["#EnumSchema", "#SchemaDefinedType"]
+  extends: [EnumSchema, SchemaDefinedType]
   documentRoot: true
   doc: |
     Define an enumerated type.
   fields:
     - name: extends
       type:
-        - "null"
-        - string
-        - type: array
-          items: string
+        - string?
+        - string[]?
       jsonldPredicate:
         _id: "sld:extends"
         _type: "@id"
+        refScope: 1
       doc: |
         Indicates that this enum inherits symbols from a base enum.
 
 
-- name: ArraySchema
-  type: record
-  fields:
-    - name: type
-      doc: "Must be `array`"
-      type:
-        name: Array_symbol
-        type: enum
-        symbols:
-          - "sld:array"
-      jsonldPredicate:
-        _id: "sld:type"
-        _type: "@vocab"
-
-    - name: items
-      type:
-        - "#PrimitiveType"
-        - "#RecordSchema"
-        - "#EnumSchema"
-        - "#ArraySchema"
-        - string
-        - type: array
-          items:
-            - "#PrimitiveType"
-            - "#RecordSchema"
-            - "#EnumSchema"
-            - "#ArraySchema"
-            - string
-      jsonldPredicate:
-        _id: "sld:items"
-        _type: "@vocab"
-      doc: "Defines the type of the array elements."
-
-
 - name: Documentation
   type: record
-  extends: ["#NamedType", "#DocType"]
+  extends: [NamedType, DocType]
   documentRoot: true
   doc: |
     A documentation section.  This type exists to facilitate self-documenting
@@ -435,3 +313,5 @@ $graph:
       jsonldPredicate:
         _id: "sld:type"
         _type: "@vocab"
+        typeDSL: true
+        refScope: 2
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema_base.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema_base.yml
new file mode 100644
index 0000000..73511d1
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/metaschema_base.yml
@@ -0,0 +1,164 @@
+$base: "https://w3id.org/cwl/salad#"
+
+$namespaces:
+  sld:  "https://w3id.org/cwl/salad#"
+  dct:  "http://purl.org/dc/terms/"
+  rdf:  "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
+  xsd:  "http://www.w3.org/2001/XMLSchema#"
+
+$graph:
+- name: PrimitiveType
+  type: enum
+  symbols:
+    - "sld:null"
+    - "xsd:boolean"
+    - "xsd:int"
+    - "xsd:long"
+    - "xsd:float"
+    - "xsd:double"
+    - "xsd:string"
+  doc:
+    - |
+      Salad data types are based on Avro schema declarations.  Refer to the
+      [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
+      detailed information.
+    - "null: no value"
+    - "boolean: a binary value"
+    - "int: 32-bit signed integer"
+    - "long: 64-bit signed integer"
+    - "float: single precision (32-bit) IEEE 754 floating-point number"
+    - "double: double precision (64-bit) IEEE 754 floating-point number"
+    - "string: Unicode character sequence"
+
+
+- name: Any
+  type: enum
+  symbols: ["#Any"]
+  doc: |
+    The **Any** type validates for any non-null value.
+
+
+- name: RecordField
+  type: record
+  doc: A field of a record.
+  fields:
+    - name: name
+      type: string
+      jsonldPredicate: "@id"
+      doc: |
+        The name of the field
+
+    - name: doc
+      type: string?
+      doc: |
+        A documentation string for this field
+      jsonldPredicate: "rdfs:comment"
+
+    - name: type
+      type:
+        - PrimitiveType
+        - RecordSchema
+        - EnumSchema
+        - ArraySchema
+        - string
+        - type: array
+          items:
+            - PrimitiveType
+            - RecordSchema
+            - EnumSchema
+            - ArraySchema
+            - string
+      jsonldPredicate:
+        _id: sld:type
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+      doc: |
+        The field type
+
+
+- name: RecordSchema
+  type: record
+  fields:
+    type:
+      doc: "Must be `record`"
+      type:
+        name: Record_symbol
+        type: enum
+        symbols:
+          - "sld:record"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    fields:
+      type: RecordField[]?
+      jsonldPredicate:
+        _id: sld:fields
+        mapSubject: name
+        mapPredicate: type
+      doc: "Defines the fields of the record."
+
+
+- name: EnumSchema
+  type: record
+  doc: |
+    Define an enumerated type.
+  fields:
+    type:
+      doc: "Must be `enum`"
+      type:
+        name: Enum_symbol
+        type: enum
+        symbols:
+          - "sld:enum"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    symbols:
+      type: string[]
+      jsonldPredicate:
+        _id: "sld:symbols"
+        _type: "@id"
+        identity: true
+      doc: "Defines the set of valid symbols."
+
+
+- name: ArraySchema
+  type: record
+  fields:
+    type:
+      doc: "Must be `array`"
+      type:
+        name: Array_symbol
+        type: enum
+        symbols:
+          - "sld:array"
+      jsonldPredicate:
+        _id: "sld:type"
+        _type: "@vocab"
+        typeDSL: true
+        refScope: 2
+    items:
+      type:
+        - PrimitiveType
+        - RecordSchema
+        - EnumSchema
+        - ArraySchema
+        - string
+        - type: array
+          items:
+            - PrimitiveType
+            - RecordSchema
+            - EnumSchema
+            - ArraySchema
+            - string
+      jsonldPredicate:
+        _id: "sld:items"
+        _type: "@vocab"
+        refScope: 2
+      doc: "Defines the type of the array elements."
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/salad.md b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/salad.md
new file mode 100644
index 0000000..6dd3e6a
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/salad.md
@@ -0,0 +1,256 @@
+# Semantic Annotations for Linked Avro Data (SALAD)
+
+Author:
+
+* Peter Amstutz <peter.amstutz at curoverse.com>, Curoverse
+
+Contributors:
+
+* The developers of Apache Avro
+* The developers of JSON-LD
+* Nebojša Tijanić <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics
+
+# Abstract
+
+Salad is a schema language for describing structured linked data documents
+in JSON or YAML documents.  A Salad schema provides rules for
+preprocessing, structural validation, and link checking for documents
+described by a Salad schema.  Salad builds on JSON-LD and the Apache Avro
+data serialization system, and extends Avro with features for rich data
+modeling such as inheritance, template specialization, object identifiers,
+and object references.  Salad was developed to provide a bridge between the
+record oriented data modeling supported by Apache Avro and the Semantic
+Web.
+
+# Status of This Document
+
+This document is the product of the [Common Workflow Language working
+group](https://groups.google.com/forum/#!forum/common-workflow-language).  The
+latest version of this document is available in the "schema_salad" directory at
+
+https://github.com/common-workflow-language/schema_salad
+
+The products of the CWL working group (including this document) are made available
+under the terms of the Apache License, version 2.0.
+
+<!--ToC-->
+
+# Introduction
+
+The JSON data model is an extremely popular way to represent structured
+data.  It is attractive because of it's relative simplicity and is a
+natural fit with the standard types of many programming languages.
+However, this simplicity means that basic JSON lacks expressive features
+useful for working with complex data structures and document formats, such
+as schemas, object references, and namespaces.
+
+JSON-LD is a W3C standard providing a way to describe how to interpret a
+JSON document as Linked Data by means of a "context".  JSON-LD provides a
+powerful solution for representing object references and namespaces in JSON
+based on standard web URIs, but is not itself a schema language.  Without a
+schema providing a well defined structure, it is difficult to process an
+arbitrary JSON-LD document as idiomatic JSON because there are many ways to
+express the same data that are logically equivalent but structurally
+distinct.
+
+Several schema languages exist for describing and validating JSON data,
+such as the Apache Avro data serialization system, however none understand
+linked data.  As a result, to fully take advantage of JSON-LD to build the
+next generation of linked data applications, one must maintain separate
+JSON schema, JSON-LD context, RDF schema, and human documentation, despite
+significant overlap of content and obvious need for these documents to stay
+synchronized.
+
+Schema Salad is designed to address this gap.  It provides a schema
+language and processing rules for describing structured JSON content
+permitting URI resolution and strict document validation.  The schema
+language supports linked data through annotations that describe the linked
+data interpretation of the content, enables generation of JSON-LD context
+and RDF schema, and production of RDF triples by applying the JSON-LD
+context.  The schema language also provides for robust support of inline
+documentation.
+
+## Introduction to draft 1
+
+This is the first version of Schema Salad.  It is developed concurrently
+with draft 3 of the Common Workflow Language for use in specifying the
+Common Workflow Language, however Schema Salad is intended to be useful to
+a broader audience.
+
+## References to Other Specifications
+
+**Javascript Object Notation (JSON)**: http://json.org
+
+**JSON Linked Data (JSON-LD)**: http://json-ld.org
+
+**YAML**: http://yaml.org
+
+**Avro**: https://avro.apache.org/docs/current/spec.html
+
+**Uniform Resource Identifier (URI) Generic Syntax**: https://tools.ietf.org/html/rfc3986)
+
+**Resource Description Framework (RDF)**: http://www.w3.org/RDF/
+
+**UTF-8**: https://www.ietf.org/rfc/rfc2279.txt)
+
+## Scope
+
+This document describes the syntax, data model, algorithms, and schema
+language for working with Salad documents.  It is not intended to document
+a specific implementation of Salad, however it may serve as a reference for
+the behavior of conforming implementations.
+
+## Terminology
+
+The terminology used to describe Salad documents is defined in the Concepts
+section of the specification. The terms defined in the following list are
+used in building those definitions and in describing the actions of an
+Salad implementation:
+
+**may**: Conforming Salad documents and Salad implementations are permitted but
+not required to be interpreted as described.
+
+**must**: Conforming Salad documents and Salad implementations are required
+to be interpreted as described; otherwise they are in error.
+
+**error**: A violation of the rules of this specification; results are
+undefined. Conforming implementations may detect and report an error and may
+recover from it.
+
+**fatal error**: A violation of the rules of this specification; results
+are undefined. Conforming implementations must not continue to process the
+document and may report an error.
+
+**at user option**: Conforming software may or must (depending on the modal verb in
+the sentence) behave as described; if it does, it must provide users a means to
+enable or disable the behavior described.
+
+# Document model
+
+## Data concepts
+
+An **object** is a data structure equivalent to the "object" type in JSON,
+consisting of a unordered set of name/value pairs (referred to here as
+**fields**) and where the name is a string and the value is a string, number,
+boolean, array, or object.
+
+A **document** is a file containing a serialized object, or an array of
+objects.
+
+A **document type** is a class of files that share a common structure and
+semantics.
+
+A **document schema** is a formal description of the grammar of a document type.
+
+A **base URI** is a context-dependent URI used to resolve relative references.
+
+An **identifier** is a URI that designates a single document or single
+object within a document.
+
+A **vocabulary** is the set of symbolic field names and enumerated symbols defined
+by a document schema, where each term maps to absolute URI.
+
+## Syntax
+
+Conforming Salad documents are serialized and loaded using YAML syntax and
+UTF-8 text encoding.  Salad documents are written using the JSON-compatible
+subset of YAML.  Features of YAML such as headers and type tags that are
+not found in the standard JSON data model must not be used in conforming
+Salad documents.  It is a fatal error if the document is not valid YAML.
+
+A Salad document must consist only of either a single root object or an
+array of objects.
+
+## Document context
+
+### Implied context
+
+The implicit context consists of the vocabulary defined by the schema and
+the base URI.  By default, the base URI must be the URI that was used to
+load the document.  It may be overridden by an explicit context.
+
+### Explicit context
+
+If a document consists of a root object, this object may contain the
+fields `$base`, `$namespaces`, `$schemas`, and `$graph`:
+
+  * `$base`: Must be a string.  Set the base URI for the document used to
+    resolve relative references.
+
+  * `$namespaces`: Must be an object with strings as values.  The keys of
+    the object are namespace prefixes used in the document; the values of
+    the object are the prefix expansions.
+
+  * `$schemas`: Must be an array of strings.  This field may list URI
+    references to documents in RDF-XML format which will be queried for RDF
+    schema data.  The subjects and predicates described by the RDF schema
+    may provide additional semantic context for the document, and may be
+    used for validation of prefixed extension fields found in the document.
+
+Other directives beginning with `$` must be ignored.
+
+## Document graph
+
+If a document consists of a single root object, this object may contain the
+field `$graph`.  This field must be an array of objects.  If present, this
+field holds the primary content of the document.  A document that consists
+of array of objects at the root is an implicit graph.
+
+## Document metadata
+
+If a document consists of a single root object, metadata about the
+document, such as authorship, may be declared in the root object.
+
+## Document schema
+
+Document preprocessing, link validation and schema validation require a
+document schema.  A schema may consist of:
+
+  * At least one record definition object which defines valid fields that
+  make up a record type.  Record field definitions include the valid types
+  that may be assigned to each field and annotations to indicate fields
+  that represent identifiers and links, described below in "Semantic
+  Annotations".
+
+  * Any number of enumerated type objects which define a set of finite set of symbols that are
+  valid value of the type.
+
+  * Any number of documentation objects which allow in-line documentation of the schema.
+
+The schema for defining a salad schema (the metaschema) is described in
+detail in "Schema validation".
+
+### Record field annotations
+
+In a document schema, record field definitions may include the field
+`jsonldPredicate`, which may be either a string or object.  Implementations
+must use the following document preprocessing of fields by the following
+rules:
+
+  * If the value of `jsonldPredicate` is `@id`, the field is an identifier
+  field.
+
+  * If the value of `jsonldPredicate` is an object, and contains that
+  object contains the field `_type` with the value `@id`, the field is a
+  link field.
+
+  * If the value of `jsonldPredicate` is an object, and contains that
+  object contains the field `_type` with the value `@vocab`, the field is a
+  vocabulary field, which is a subtype of link field.
+
+## Document traversal
+
+To perform document document preprocessing, link validation and schema
+validation, the document must be traversed starting from the fields or
+array items of the root object or array and recursively visiting each child
+item which contains an object or arrays.
+
+# Document preprocessing
+
+After processing the explicit context (if any), document preprocessing
+begins.  Starting from the document root, object fields values or array
+items which contain objects or arrays are recursively traversed
+depth-first.  For each visited object, field names, identifier fields, link
+fields, vocabulary fields, and `$import` and `$include` directives must be
+processed as described in this section.  The order of traversal of child
+nodes within a parent node is undefined.
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res.yml
new file mode 100644
index 0000000..4555f5b
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res.yml
@@ -0,0 +1,35 @@
+- |
+  ## Vocabulary resolution
+
+    The schema may designate one or more vocabulary fields which use terms
+    defined in the vocabulary.  Processing must resolve vocabulary fields to
+    either vocabulary terms or absolute URIs by first applying the link
+    resolution rules defined above, then applying the following additional
+    rule:
+
+      * If a reference URI is a vocabulary field, and there is a vocabulary
+      term which maps to the resolved URI, the reference must be replace with
+      the vocabulary term.
+
+  ### Vocabulary resolution example
+
+  Given the following schema:
+
+  ```
+- $include: vocab_res_schema.yml
+- |
+  ```
+
+  Process the following example:
+
+  ```
+- $include: vocab_res_src.yml
+- |
+  ```
+
+  This becomes:
+
+  ```
+- $include: vocab_res_proc.yml
+- |
+  ```
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_proc.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_proc.yml
new file mode 100644
index 0000000..d13ab15
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_proc.yml
@@ -0,0 +1,15 @@
+    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_schema.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_schema.yml
new file mode 100644
index 0000000..92b271e
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_schema.yml
@@ -0,0 +1,21 @@
+{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "Colors",
+    "type": "enum",
+    "symbols": ["acid:red"]
+  },
+  {
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "voc",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@vocab"
+      }
+    }]
+  }]
+}
diff --git a/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml
new file mode 100644
index 0000000..82954f1
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/salad/schema_salad/metaschema/vocab_res_src.yml
@@ -0,0 +1,15 @@
+    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
diff --git a/cwltool/schemas/v1.1.0-dev1/userguide-intro.md b/cwltool/schemas/v1.1.0-dev1/userguide-intro.md
new file mode 100644
index 0000000..7077b35
--- /dev/null
+++ b/cwltool/schemas/v1.1.0-dev1/userguide-intro.md
@@ -0,0 +1,28 @@
+# A Gentle Introduction to the Common Workflow Language
+
+Hello!
+
+This guide will introduce you to writing tool wrappers and workflows using the
+Common Workflow Language (CWL).  This guide describes the current development
+specification, version 1.1.0-dev1.
+
+Note: This document is a work in progress.  Not all features are covered, yet.
+
+<!--ToC-->
+
+# Introduction
+
+CWL is a way to describe command line tools and connect them together to create
+workflows.  Because CWL is a specification and not a specific piece of
+software, tools and workflows described using CWL are portable across a variety
+of platforms that support the CWL standard.
+
+CWL has roots in "make" and many similar tools that determine order of
+execution based on dependencies between tasks.  However unlike "make", CWL
+tasks are isolated and you must be explicit about your inputs and outputs.  The
+benefit of explicitness and isolation are flexibility, portability, and
+scalability: tools and workflows described with CWL can transparently leverage
+technologies such as Docker, be used with CWL implementations from different
+vendors, and is well suited for describing large-scale workflows in cluster,
+cloud and high performance computing environments where tasks are scheduled in
+parallel across many nodes.
diff --git a/cwltool/stdfsaccess.py b/cwltool/stdfsaccess.py
index e70208b..5a67ce0 100644
--- a/cwltool/stdfsaccess.py
+++ b/cwltool/stdfsaccess.py
@@ -1,22 +1,36 @@
-from typing import Any, IO
+from typing import Any, BinaryIO, Text
 from .pathmapper import abspath
 import glob
 import os
 
-
 class StdFsAccess(object):
 
-    def __init__(self, basedir):  # type: (str) -> None
+    def __init__(self, basedir):  # type: (Text) -> None
         self.basedir = basedir
 
-    def _abs(self, p):  # type: (str) -> str
+    def _abs(self, p):  # type: (Text) -> Text
         return abspath(p, self.basedir)
 
-    def glob(self, pattern):  # type: (str) -> List[str]
-        return glob.glob(self._abs(pattern))
+    def glob(self, pattern):  # type: (Text) -> List[Text]
+        return ["file://%s" % self._abs(l) for l in glob.glob(self._abs(pattern))]
 
-    def open(self, fn, mode):  # type: (str, str) -> IO[Any]
+    def open(self, fn, mode):  # type: (Text, Text) -> BinaryIO
         return open(self._abs(fn), mode)
 
-    def exists(self, fn):  # type: (str) -> bool
+    def exists(self, fn):  # type: (Text) -> bool
         return os.path.exists(self._abs(fn))
+
+    def isfile(self, fn):  # type: (Text) -> bool
+        return os.path.isfile(self._abs(fn))
+
+    def isdir(self, fn):  # type: (Text) -> bool
+        return os.path.isdir(self._abs(fn))
+
+    def listdir(self, fn):  # type: (Text) -> List[Text]
+        return [abspath(l, fn) for l in os.listdir(self._abs(fn))]
+
+    def join(self, path, *paths):  # type: (Text, *Text) -> Text
+        return os.path.join(path, *paths)
+
+    def realpath(self, path):  # type: (Text) -> Text
+        return os.path.realpath(path)
diff --git a/cwltool/update.py b/cwltool/update.py
index cccbc5b..7dfacce 100644
--- a/cwltool/update.py
+++ b/cwltool/update.py
@@ -2,10 +2,13 @@ import sys
 import urlparse
 import json
 import re
-from .utils import aslist
-from typing import Any, Dict, Callable, List, Tuple, Union
 import traceback
+
 from schema_salad.ref_resolver import Loader
+import schema_salad.validate
+from typing import Any, Callable, Dict, List, Text, Tuple, Union  # pylint: disable=unused-import
+
+from .utils import aslist
 
 def findId(doc, frg):  # type: (Any, Any) -> Dict
     if isinstance(doc, dict):
@@ -27,18 +30,21 @@ def fixType(doc):  # type: (Any) -> Any
     if isinstance(doc, list):
         return [fixType(f) for f in doc]
 
-    if isinstance(doc, (str, unicode)):
-        if doc not in ("null", "boolean", "int", "long", "float", "double", "string", "File", "record", "enum", "array", "Any") and "#" not in doc:
+    if isinstance(doc, (str, Text)):
+        if doc not in (
+                "null", "boolean", "int", "long", "float", "double", "string",
+                "File", "record", "enum", "array", "Any") and "#" not in doc:
             return "#" + doc
     return doc
 
-def _draft2toDraft3dev1(doc, loader, baseuri):  # type: (Any, Loader, str) -> Any
+def _draft2toDraft3dev1(doc, loader, baseuri, update_steps=True):
+    # type: (Any, Loader, Text, bool) -> Any
     try:
         if isinstance(doc, dict):
             if "import" in doc:
                 imp = urlparse.urljoin(baseuri, doc["import"])
                 impLoaded = loader.fetch(imp)
-                r = None  # type: Dict[str, Any]
+                r = None  # type: Dict[Text, Any]
                 if isinstance(impLoaded, list):
                     r = {"@graph": impLoaded}
                 elif isinstance(impLoaded, dict):
@@ -55,11 +61,11 @@ def _draft2toDraft3dev1(doc, loader, baseuri):  # type: (Any, Loader, str) -> An
             if "include" in doc:
                 return loader.fetch_text(urlparse.urljoin(baseuri, doc["include"]))
 
-            for t in ("type", "items"):
-                if t in doc:
-                    doc[t] = fixType(doc[t])
+            for typename in ("type", "items"):
+                if typename in doc:
+                    doc[typename] = fixType(doc[typename])
 
-            if "steps" in doc:
+            if "steps" in doc and update_steps:
                 if not isinstance(doc["steps"], list):
                     raise Exception("Value of 'steps' must be a list")
                 for i, s in enumerate(doc["steps"]):
@@ -85,15 +91,16 @@ def _draft2toDraft3dev1(doc, loader, baseuri):  # type: (Any, Loader, str) -> An
             err = doc["id"]
         elif "name" in doc:
             err = doc["name"]
-        import traceback
         raise Exception(u"Error updating '%s'\n  %s\n%s" % (err, e, traceback.format_exc()))
 
-def draft2toDraft3dev1(doc, loader, baseuri):  # type: (Any, Loader, str) -> Any
-    return (_draft2toDraft3dev1(doc, loader, baseuri), "https://w3id.org/cwl/cwl#draft-3.dev1")
+def draft2toDraft3dev1(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (_draft2toDraft3dev1(doc, loader, baseuri), "draft-3.dev1")
+
 
 digits = re.compile("\d+")
 
-def updateScript(sc):  # type: (str) -> str
+def updateScript(sc):  # type: (Text) -> Text
     sc = sc.replace("$job", "inputs")
     sc = sc.replace("$tmpdir", "runtime.tmpdir")
     sc = sc.replace("$outdir", "runtime.outdir")
@@ -103,7 +110,7 @@ def updateScript(sc):  # type: (str) -> str
 
 def _updateDev2Script(ent):  # type: (Any) -> Any
     if isinstance(ent, dict) and "engine" in ent:
-        if ent["engine"] == "cwl:JsonPointer":
+        if ent["engine"] == "https://w3id.org/cwl/cwl#JsonPointer":
             sp = ent["script"].split("/")
             if sp[0] in ("tmpdir", "outdir"):
                 return u"$(runtime.%s)" % sp[0]
@@ -111,7 +118,7 @@ def _updateDev2Script(ent):  # type: (Any) -> Any
                 if not sp[0]:
                     sp.pop(0)
                 front = sp.pop(0)
-                sp = [str(i) if digits.match(i) else "'"+i+"'"
+                sp = [Text(i) if digits.match(i) else "'"+i+"'"
                       for i in sp]
                 if front == "job":
                     return u"$(inputs[%s])" % ']['.join(sp)
@@ -128,7 +135,7 @@ def _updateDev2Script(ent):  # type: (Any) -> Any
 
 
 def _draftDraft3dev1toDev2(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
+    # type: (Any, Loader, Text) -> Any
     doc = _updateDev2Script(doc)
     if isinstance(doc, basestring):
         return doc
@@ -137,10 +144,10 @@ def _draftDraft3dev1toDev2(doc, loader, baseuri):
     if isinstance(doc, dict):
         if "@import" in doc:
             resolved_doc = loader.resolve_ref(
-                    doc["@import"], base_url=baseuri)[0]
+                doc["@import"], base_url=baseuri)[0]
             if isinstance(resolved_doc, dict):
                 return _draftDraft3dev1toDev2(
-                        resolved_doc, loader, resolved_doc["id"])
+                    resolved_doc, loader, resolved_doc["id"])
             else:
                 raise Exception("Unexpected codepath")
 
@@ -172,11 +179,11 @@ def _draftDraft3dev1toDev2(doc, loader, baseuri):
 
 
 def draftDraft3dev1toDev2(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    return (_draftDraft3dev1toDev2(doc, loader, baseuri), "https://w3id.org/cwl/cwl#draft-3.dev2")
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (_draftDraft3dev1toDev2(doc, loader, baseuri), "draft-3.dev2")
 
 def _draftDraft3dev2toDev3(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
+    # type: (Any, Loader, Text) -> Any
     try:
         if isinstance(doc, dict):
             if "@import" in doc:
@@ -185,6 +192,7 @@ def _draftDraft3dev2toDev3(doc, loader, baseuri):
                 else:
                     imp = urlparse.urljoin(baseuri, doc["@import"])
                     impLoaded = loader.fetch(imp)
+                    r = {}  # type: Dict[Text, Any]
                     if isinstance(impLoaded, list):
                         r = {"@graph": impLoaded}
                     elif isinstance(impLoaded, dict):
@@ -218,18 +226,19 @@ def _draftDraft3dev2toDev3(doc, loader, baseuri):
         raise Exception(u"Error updating '%s'\n  %s\n%s" % (err, e, traceback.format_exc()))
 
 def draftDraft3dev2toDev3(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    return (_draftDraft3dev2toDev3(doc, loader, baseuri), "https://w3id.org/cwl/cwl#draft-3.dev3")
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (_draftDraft3dev2toDev3(doc, loader, baseuri), "draft-3.dev3")
 
 
 def traverseImport(doc, loader, baseuri, func):
-    # type: (Any, Loader, str, Callable[[Any, Loader, str], Any]) -> Any
+    # type: (Any, Loader, Text, Callable[[Any, Loader, Text], Any]) -> Any
     if "$import" in doc:
         if doc["$import"][0] == "#":
             return doc["$import"]
         else:
             imp = urlparse.urljoin(baseuri, doc["$import"])
             impLoaded = loader.fetch(imp)
+            r = {}  # type: Dict[Text, Any]
             if isinstance(impLoaded, list):
                 r = {"$graph": impLoaded}
             elif isinstance(impLoaded, dict):
@@ -245,7 +254,7 @@ def traverseImport(doc, loader, baseuri, func):
 
 
 def _draftDraft3dev3toDev4(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
+    # type: (Any, Loader, Text) -> Any
     try:
         if isinstance(doc, dict):
             r = traverseImport(doc, loader, baseuri, _draftDraft3dev3toDev4)
@@ -274,11 +283,11 @@ def _draftDraft3dev3toDev4(doc, loader, baseuri):
 
 
 def draftDraft3dev3toDev4(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    return (_draftDraft3dev3toDev4(doc, loader, baseuri), "https://w3id.org/cwl/cwl#draft-3.dev4")
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (_draftDraft3dev3toDev4(doc, loader, baseuri), "draft-3.dev4")
 
 def _draftDraft3dev4toDev5(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
+    # type: (Any, Loader, Text) -> Any
     try:
         if isinstance(doc, dict):
             r = traverseImport(doc, loader, baseuri, _draftDraft3dev4toDev5)
@@ -307,43 +316,203 @@ def _draftDraft3dev4toDev5(doc, loader, baseuri):
 
 
 def draftDraft3dev4toDev5(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    return (_draftDraft3dev4toDev5(doc, loader, baseuri), "https://w3id.org/cwl/cwl#draft-3.dev5")
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (_draftDraft3dev4toDev5(doc, loader, baseuri), "draft-3.dev5")
 
 def draftDraft3dev5toFinal(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    return (doc, "https://w3id.org/cwl/cwl#draft-3")
-
-
-def update(doc, loader, baseuri):
-    # type: (Any, Loader, str) -> Any
-    updates = {
-        "https://w3id.org/cwl/cwl#draft-2": draft2toDraft3dev1,
-        "https://w3id.org/cwl/cwl#draft-3.dev1": draftDraft3dev1toDev2,
-        "https://w3id.org/cwl/cwl#draft-3.dev2": draftDraft3dev2toDev3,
-        "https://w3id.org/cwl/cwl#draft-3.dev3": draftDraft3dev3toDev4,
-        "https://w3id.org/cwl/cwl#draft-3.dev4": draftDraft3dev4toDev5,
-        "https://w3id.org/cwl/cwl#draft-3.dev5": draftDraft3dev5toFinal,
-        "https://w3id.org/cwl/cwl#draft-3": None
-        }  # type: Dict[unicode, Any]
-
-    def identity(doc, loader, baseuri):
-        # type: (Any, Loader, str) -> Tuple[Any, Union[str, unicode]]
-        v = doc.get("cwlVersion")
-        if v:
-            return (doc, loader.expand_url(v, ""))
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    return (doc, "draft-3")
+
+def _draft3toDraft4dev1(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Any
+    if isinstance(doc, dict):
+        if "class" in doc and doc["class"] == "Workflow":
+            def fixup(f):  # type: (Text) -> Text
+                doc, frg = urlparse.urldefrag(f)
+                frg = '/'.join(frg.rsplit('.', 1))
+                return doc + "#" + frg
+
+            for step in doc["steps"]:
+                step["in"] = step["inputs"]
+                step["out"] = step["outputs"]
+                del step["inputs"]
+                del step["outputs"]
+                for io in ("in", "out"):
+                    for i in step[io]:
+                        i["id"] = fixup(i["id"])
+                        if "source" in i:
+                            i["source"] = [fixup(s) for s in aslist(i["source"])]
+                            if len(i["source"]) == 1:
+                                i["source"] = i["source"][0]
+                if "scatter" in step:
+                    step["scatter"] = [fixup(s) for s in aslist(step["scatter"])]
+            for out in doc["outputs"]:
+                out["source"] = fixup(out["source"])
+        for key, value in doc.items():
+            doc[key] = _draft3toDraft4dev1(value, loader, baseuri)
+    elif isinstance(doc, list):
+        doc = [_draft3toDraft4dev1(item, loader, baseuri) for item in doc]
+
+    return doc
+
+def draft3toDraft4dev1(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for draft-3 to draft-4.dev1."""
+    return (_draft3toDraft4dev1(doc, loader, baseuri), "draft-4.dev1")
+
+def _draft4Dev1toDev2(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Any
+    if isinstance(doc, dict):
+        if "class" in doc and doc["class"] == "Workflow":
+            for out in doc["outputs"]:
+                out["outputSource"] = out["source"]
+                del out["source"]
+        for key, value in doc.items():
+            doc[key] = _draft4Dev1toDev2(value, loader, baseuri)
+    elif isinstance(doc, list):
+        doc = [_draft4Dev1toDev2(item, loader, baseuri) for item in doc]
+
+    return doc
+
+def draft4Dev1toDev2(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for draft-4.dev1 to draft-4.dev2."""
+    return (_draft4Dev1toDev2(doc, loader, baseuri), "draft-4.dev2")
+
+
+def _draft4Dev2toDev3(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Any
+    if isinstance(doc, dict):
+        if "class" in doc and doc["class"] == "File":
+            doc["location"] = doc["path"]
+            del doc["path"]
+        if "secondaryFiles" in doc:
+            for i, sf in enumerate(doc["secondaryFiles"]):
+                if "$(" in sf or "${" in sf:
+                    doc["secondaryFiles"][i] = sf.replace('"path"', '"location"').replace(".path", ".location")
+
+        if "class" in doc and doc["class"] == "CreateFileRequirement":
+            doc["class"] = "InitialWorkDirRequirement"
+            doc["listing"] = []
+            for f in doc["fileDef"]:
+                doc["listing"].append({
+                    "entryname": f["filename"],
+                    "entry": f["fileContent"]
+                })
+            del doc["fileDef"]
+        for key, value in doc.items():
+            doc[key] = _draft4Dev2toDev3(value, loader, baseuri)
+    elif isinstance(doc, list):
+        doc = [_draft4Dev2toDev3(item, loader, baseuri) for item in doc]
+
+    return doc
+
+def draft4Dev2toDev3(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for draft-4.dev2 to draft-4.dev3."""
+    return (_draft4Dev2toDev3(doc, loader, baseuri), "draft-4.dev3")
+
+def _draft4Dev3to1_0dev4(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Any
+    if isinstance(doc, dict):
+        if "description" in doc:
+            doc["doc"] = doc["description"]
+            del doc["description"]
+        for key, value in doc.items():
+            doc[key] = _draft4Dev3to1_0dev4(value, loader, baseuri)
+    elif isinstance(doc, list):
+        doc = [_draft4Dev3to1_0dev4(item, loader, baseuri) for item in doc]
+    return doc
+
+def draft4Dev3to1_0dev4(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for draft-4.dev3 to v1.0.dev4."""
+    return (_draft4Dev3to1_0dev4(doc, loader, baseuri), "v1.0.dev4")
+
+def v1_0dev4to1_0(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for v1.0.dev4 to v1.0."""
+    return (doc, "v1.0")
+
+def v1_0to1_1_0dev1(doc, loader, baseuri):
+    # type: (Any, Loader, Text) -> Tuple[Any, Text]
+    """Public updater for v1.0 to v1.1.0-dev1."""
+    return (doc, "v1.1.0-dev1")
+
+
+UPDATES = {
+    "draft-2": draft2toDraft3dev1,
+    "draft-3": draft3toDraft4dev1,
+    "v1.0": None
+}  # type: Dict[Text, Callable[[Any, Loader, Text], Tuple[Any, Text]]]
+
+DEVUPDATES = {
+    "draft-3.dev1": draftDraft3dev1toDev2,
+    "draft-3.dev2": draftDraft3dev2toDev3,
+    "draft-3.dev3": draftDraft3dev3toDev4,
+    "draft-3.dev4": draftDraft3dev4toDev5,
+    "draft-3.dev5": draftDraft3dev5toFinal,
+    "draft-4.dev1": draft4Dev1toDev2,
+    "draft-4.dev2": draft4Dev2toDev3,
+    "draft-4.dev3": draft4Dev3to1_0dev4,
+    "v1.0.dev4": v1_0dev4to1_0,
+    "v1.0": v1_0to1_1_0dev1,
+    "v1.1.0-dev1": None
+}  # type: Dict[Text, Callable[[Any, Loader, Text], Tuple[Any, Text]]]
+
+ALLUPDATES = UPDATES.copy()
+ALLUPDATES.update(DEVUPDATES)
+
+LATEST = "v1.0"
+
+def identity(doc, loader, baseuri):  # pylint: disable=unused-argument
+    # type: (Any, Loader, Text) -> Tuple[Any, Union[Text, Text]]
+    """The default, do-nothing, CWL document upgrade function."""
+    return (doc, doc["cwlVersion"])
+
+def checkversion(doc, metadata, enable_dev):
+    # type: (Union[List[Dict[Text, Any]], Dict[Text, Any]], Dict[Text, Any], bool) -> Tuple[Dict[Text, Any], Text]  # pylint: disable=line-too-long
+    """Checks the validity of the version of the give CWL document.
+
+    Returns the document and the validated version string.
+    """
+    if isinstance(doc, list):
+        metadata = metadata.copy()
+        metadata[u"$graph"] = doc
+        cdoc = metadata
+    else:
+        cdoc = doc
+
+    version = cdoc[u"cwlVersion"]
+
+    if version not in UPDATES:
+        if version in DEVUPDATES:
+            if enable_dev:
+                pass
+            else:
+                raise schema_salad.validate.ValidationException(
+                    u"Version '%s' is a development or deprecated version.\n "
+                    "Update your document to a stable version (%s) or use "
+                    "--enable-dev to enable support for development and "
+                    "deprecated versions." % (version, ", ".join(
+                        UPDATES.keys())))
         else:
-            return (doc, "https://w3id.org/cwl/cwl#draft-2")
+            raise schema_salad.validate.ValidationException(
+                u"Unrecognized version %s" % version)
 
-    nextupdate = identity
+    return (cdoc, version)
+
+def update(doc, loader, baseuri, enable_dev, metadata):
+    # type: (Union[List[Dict[Text, Any]], Dict[Text, Any]], Loader, Text, bool, Any) -> Dict[Text, Any]
+
+    (cdoc, version) = checkversion(doc, metadata, enable_dev)
+
+    nextupdate = identity  # type: Callable[[Any, Loader, Text], Tuple[Any, Text]]
 
     while nextupdate:
-        (doc, version) = nextupdate(doc, loader, baseuri)
-        if version in updates:
-            nextupdate = updates[version]
-        else:
-            raise Exception(u"Unrecognized version %s" % version)
+        (cdoc, version) = nextupdate(cdoc, loader, baseuri)
+        nextupdate = ALLUPDATES[version]
 
-    doc["cwlVersion"] = version
+    cdoc[u"cwlVersion"] = version
 
-    return doc
+    return cdoc
diff --git a/cwltool/utils.py b/cwltool/utils.py
index ec33cad..2f1abf3 100644
--- a/cwltool/utils.py
+++ b/cwltool/utils.py
@@ -8,7 +8,7 @@ def aslist(l):  # type: (Any) -> List[Any]
     else:
         return [l]
 
-def get_feature(self, feature):  # type: (Any, Any) -> Tuple[Any, bool] 
+def get_feature(self, feature):  # type: (Any, Any) -> Tuple[Any, bool]
     for t in reversed(self.requirements):
         if t["class"] == feature:
             return (t, True)
diff --git a/cwltool/workflow.py b/cwltool/workflow.py
index 6b369c8..f2a07d0 100644
--- a/cwltool/workflow.py
+++ b/cwltool/workflow.py
@@ -8,24 +8,23 @@ import logging
 import random
 import os
 from collections import namedtuple
-import pprint
 import functools
 import schema_salad.validate as validate
 import urlparse
-import pprint
 import tempfile
 import shutil
 import json
 import schema_salad
 from . import expression
-from typing import Iterable, List, Callable, Any, Union, Generator, cast
+from .load_tool import load_tool
+from typing import Any, Callable, cast, Generator, Iterable, List, Text, Union
 
 _logger = logging.getLogger("cwltool")
 
 WorkflowStateItem = namedtuple('WorkflowStateItem', ['parameter', 'value'])
 
 def defaultMakeTool(toolpath_object, **kwargs):
-    # type: (Dict[str, Any], **Any) -> Process
+    # type: (Dict[Text, Any], **Any) -> Process
     if not isinstance(toolpath_object, dict):
         raise WorkflowException(u"Not a dict: `%s`" % toolpath_object)
     if "class" in toolpath_object:
@@ -38,7 +37,7 @@ def defaultMakeTool(toolpath_object, **kwargs):
 
     raise WorkflowException(u"Missing or invalid 'class' field in %s, expecting one of: CommandLineTool, ExpressionTool, Workflow" % toolpath_object["id"])
 
-def findfiles(wo, fn=None):  # type: (Any, List) -> List[Dict[str, Any]]
+def findfiles(wo, fn=None):  # type: (Any, List) -> List[Dict[Text, Any]]
     if fn is None:
         fn = []
     if isinstance(wo, dict):
@@ -55,7 +54,7 @@ def findfiles(wo, fn=None):  # type: (Any, List) -> List[Dict[str, Any]]
 
 
 def match_types(sinktype, src, iid, inputobj, linkMerge, valueFrom):
-    # type: (Union[List[str],str], WorkflowStateItem, str, Dict[str, Any], str, str) -> bool
+    # type: (Union[List[Text],Text], WorkflowStateItem, Text, Dict[Text, Any], Text, Text) -> bool
     if isinstance(sinktype, list):
         # Sink is union type
         for st in sinktype:
@@ -83,45 +82,92 @@ def match_types(sinktype, src, iid, inputobj, linkMerge, valueFrom):
             else:
                 raise WorkflowException(u"Unrecognized linkMerge enum '%s'" % linkMerge)
             return True
-    elif valueFrom is not None or are_same_type(src.parameter["type"], sinktype) or sinktype == "Any":
+    elif valueFrom is not None or can_assign_src_to_sink(src.parameter["type"], sinktype) or sinktype == "Any":
         # simply assign the value from state to input
         inputobj[iid] = copy.deepcopy(src.value)
         return True
     return False
 
-def are_same_type(src, sink):  # type: (Any, Any) -> bool
+def can_assign_src_to_sink(src, sink):  # type: (Any, Any) -> bool
     """Check for identical type specifications, ignoring extra keys like inputBinding.
     """
+    if sink == "Any":
+        return True
     if isinstance(src, dict) and isinstance(sink, dict):
         if src["type"] == "array" and sink["type"] == "array":
-            return are_same_type(src["items"], sink["items"])
-        elif src["type"] == sink["type"]:
-            return True
-        else:
-            return False
+            return can_assign_src_to_sink(src["items"], sink["items"])
+        elif src["type"] == "record" and sink["type"] == "record":
+            return _compare_records(src, sink)
+    elif isinstance(src, list):
+        for t in src:
+            if can_assign_src_to_sink(t, sink):
+                return True
+    elif isinstance(sink, list):
+        for t in sink:
+            if can_assign_src_to_sink(src, t):
+                return True
     else:
         return src == sink
+    return False
 
+def _compare_records(src, sink):
+    # type: (Dict[Text, Any], Dict[Text, Any]) -> bool
+    """Compare two records, ensuring they have compatible fields.
 
-def object_from_state(state, parms, frag_only, supportsMultipleInput):
-    # type: (Dict[str,WorkflowStateItem], List[Dict[str, Any]], bool, bool) -> Dict[str, str]
-    inputobj = {}  # type: Dict[str, str]
+    This handles normalizing record names, which will be relative to workflow
+    step, so that they can be compared.
+    """
+    def _rec_fields(rec):  # type: (Dict[Text, Any]) -> Dict[Text, Any]
+        out = {}
+        for field in rec["fields"]:
+            name = shortname(field["name"])
+            out[name] = field["type"]
+        return out
+
+    srcfields = _rec_fields(src)
+    sinkfields = _rec_fields(sink)
+    for key in sinkfields.iterkeys():
+        if (not can_assign_src_to_sink(
+                srcfields.get(key, "null"), sinkfields.get(key, "null"))
+                and sinkfields.get(key) is not None):
+            _logger.info("Record comparison failure for %s and %s\n"
+                         "Did not match fields for %s: %s and %s" %
+                         (src["name"], sink["name"], key, srcfields.get(key),
+                             sinkfields.get(key)))
+            return False
+    return True
+
+def object_from_state(state, parms, frag_only, supportsMultipleInput, sourceField):
+    # type: (Dict[Text, WorkflowStateItem], List[Dict[Text, Any]], bool, bool, Text) -> Dict[Text, Any]
+    inputobj = {}  # type: Dict[Text, Any]
     for inp in parms:
         iid = inp["id"]
         if frag_only:
             iid = shortname(iid)
-        if "source" in inp:
-            if isinstance(inp["source"], list) and not supportsMultipleInput:
-                raise WorkflowException("Workflow contains multiple inbound links to a single parameter but MultipleInputFeatureRequirement is not declared.")
-            connections = aslist(inp["source"])
+        if sourceField in inp:
+            if (isinstance(inp[sourceField], list) and not
+                    supportsMultipleInput):
+                raise WorkflowException(
+                    "Workflow contains multiple inbound links to a single "
+                    "parameter but MultipleInputFeatureRequirement is not "
+                    "declared.")
+            connections = aslist(inp[sourceField])
             for src in connections:
                 if src in state and state[src] is not None:
-                    if not match_types(inp["type"], state[src], iid, inputobj,
-                                            inp.get("linkMerge", ("merge_nested" if len(connections) > 1 else None)),
-                                       valueFrom=inp.get("valueFrom")):
-                        raise WorkflowException(u"Type mismatch between source '%s' (%s) and sink '%s' (%s)" % (src, state[src].parameter["type"], inp["id"], inp["type"]))
+                    if not match_types(
+                            inp["type"], state[src], iid, inputobj,
+                            inp.get("linkMerge", ("merge_nested"
+                                if len(connections) > 1 else None)),
+                            valueFrom=inp.get("valueFrom")):
+                        raise WorkflowException(
+                            u"Type mismatch between source '%s' (%s) and "
+                            "sink '%s' (%s)" % (src,
+                                state[src].parameter["type"], inp["id"],
+                                inp["type"]))
                 elif src not in state:
-                    raise WorkflowException(u"Connect source '%s' on parameter '%s' does not exist" % (src, inp["id"]))
+                    raise WorkflowException(
+                        u"Connect source '%s' on parameter '%s' does not "
+                        "exist" % (src, inp["id"]))
                 else:
                     return None
         elif "default" in inp:
@@ -144,11 +190,11 @@ class WorkflowJobStep(object):
         self.iterable = None  # type: Iterable
         self.name = uniquename(u"step %s" % shortname(self.id))
 
-    def job(self, joborder, basedir, output_callback, **kwargs):
-        # type: (Dict[str,str], str, functools.partial[None], **Any) -> Generator
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Text], functools.partial[None], **Any) -> Generator
         kwargs["part_of"] = self.name
         kwargs["name"] = shortname(self.id)
-        for j in self.step.job(joborder, basedir, output_callback, **kwargs):
+        for j in self.step.job(joborder, output_callback, **kwargs):
             yield j
 
 
@@ -159,9 +205,8 @@ class WorkflowJob(object):
         self.workflow = workflow
         self.tool = workflow.tool
         self.steps = [WorkflowJobStep(s) for s in workflow.steps]
-        self.id = workflow.tool["id"]
-        self.state = None  # type: Dict[str, WorkflowStateItem]
-        self.processStatus = None  # type: str
+        self.state = None  # type: Dict[Text, WorkflowStateItem]
+        self.processStatus = None  # type: Text
         if "outdir" in kwargs:
             self.outdir = kwargs["outdir"]
         elif "tmp_outdir_prefix" in kwargs:
@@ -170,12 +215,12 @@ class WorkflowJob(object):
             # tmp_outdir_prefix defaults to tmp, so this is unlikely to be used
             self.outdir = tempfile.mkdtemp()
 
-        self.name = uniquename(u"workflow %s" % kwargs.get("name", shortname(self.workflow.tool["id"])))
+        self.name = uniquename(u"workflow %s" % kwargs.get("name", shortname(self.workflow.tool.get("id", "embedded"))))
 
-        _logger.debug(u"[%s] initialized step from %s", self.name, self.tool["id"])
+        _logger.debug(u"[%s] initialized from %s", self.name, self.tool.get("id", "workflow embedded in %s" % kwargs.get("part_of")))
 
     def receive_output(self, step, outputparms, jobout, processStatus):
-        # type: (WorkflowJobStep, List[Dict[str,str]], Dict[str,str], str) -> None
+        # type: (WorkflowJobStep, List[Dict[Text,Text]], Dict[Text,Text], Text) -> None
         for i in outputparms:
             if "id" in i:
                 if i["id"] in jobout:
@@ -184,7 +229,8 @@ class WorkflowJob(object):
                     _logger.error(u"Output is missing expected field %s" % i["id"])
                     processStatus = "permanentFail"
 
-        _logger.debug(u"[%s] produced output %s", step.name, json.dumps(jobout, indent=4))
+        if _logger.isEnabledFor(logging.DEBUG):
+            _logger.debug(u"[%s] produced output %s", step.name, json.dumps(jobout, indent=4))
 
         if processStatus != "success":
             if self.processStatus != "permanentFail":
@@ -196,15 +242,17 @@ class WorkflowJob(object):
 
         step.completed = True
 
-    def try_make_job(self, step, basedir, **kwargs):
-        # type: (WorkflowJobStep, str, **Any) -> Generator
+    def try_make_job(self, step, **kwargs):
+        # type: (WorkflowJobStep, **Any) -> Generator
         inputparms = step.tool["inputs"]
         outputparms = step.tool["outputs"]
 
-        supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
+        supportsMultipleInput = bool(self.workflow.get_requirement(
+            "MultipleInputFeatureRequirement")[0])
 
         try:
-            inputobj = object_from_state(self.state, inputparms, False, supportsMultipleInput)
+            inputobj = object_from_state(
+                self.state, inputparms, False, supportsMultipleInput, "source")
             if inputobj is None:
                 _logger.debug(u"[%s] job step %s not ready", self.name, step.id)
                 return
@@ -216,52 +264,63 @@ class WorkflowJob(object):
 
             callback = functools.partial(self.receive_output, step, outputparms)
 
-            valueFrom = {i["id"]: i["valueFrom"] for i in step.tool["inputs"] if "valueFrom" in i}
+            valueFrom = {
+                i["id"]: i["valueFrom"] for i in step.tool["inputs"]
+                if "valueFrom" in i}
 
             if len(valueFrom) > 0 and not bool(self.workflow.get_requirement("StepInputExpressionRequirement")[0]):
                 raise WorkflowException("Workflow step contains valueFrom but StepInputExpressionRequirement not in requirements")
 
             vfinputs = {shortname(k): v for k,v in inputobj.iteritems()}
-            def valueFromFunc(k, v):  # type: (Any, Any) -> Any
-                if k in valueFrom:
-                    return expression.do_eval(valueFrom[k], vfinputs, self.workflow.requirements,
-                                       None, None, {}, context=v)
-                else:
-                    return v
+
+            def postScatterEval(io):
+                # type: (Dict[Text, Any]) -> Dict[Text, Any]
+                shortio = {shortname(k): v for k,v in io.iteritems()}
+
+                def valueFromFunc(k, v):  # type: (Any, Any) -> Any
+                    if k in valueFrom:
+                        return expression.do_eval(
+                            valueFrom[k], shortio, self.workflow.requirements,
+                            None, None, {}, context=v)
+                    else:
+                        return v
+                return {k: valueFromFunc(k, v) for k,v in io.items()}
 
             if "scatter" in step.tool:
                 scatter = aslist(step.tool["scatter"])
                 method = step.tool.get("scatterMethod")
                 if method is None and len(scatter) != 1:
                     raise WorkflowException("Must specify scatterMethod when scattering over multiple inputs")
-                kwargs["valueFrom"] = valueFromFunc
-
-                inputobj = {k: valueFromFunc(k, v) if k not in scatter else v
-                            for k,v in inputobj.items()}
+                kwargs["postScatterEval"] = postScatterEval
 
                 if method == "dotproduct" or method is None:
-                    jobs = dotproduct_scatter(step, inputobj, basedir, scatter,
+                    jobs = dotproduct_scatter(step, inputobj, scatter,
                                               cast(  # known bug with mypy
                                                   # https://github.com/python/mypy/issues/797
                                                   Callable[[Any], Any],callback), **kwargs)
                 elif method == "nested_crossproduct":
-                    jobs = nested_crossproduct_scatter(step, inputobj, basedir,
-                            scatter, cast(Callable[[Any], Any], callback),
-                            # known bug in mypy
-                            # https://github.com/python/mypy/issues/797
-                            **kwargs)
+                    jobs = nested_crossproduct_scatter(step, inputobj,
+                        scatter, cast(Callable[[Any], Any], callback),
+                        # known bug in mypy
+                        # https://github.com/python/mypy/issues/797
+                        **kwargs)
                 elif method == "flat_crossproduct":
-                    jobs = flat_crossproduct_scatter(step, inputobj, basedir,
-                                                     scatter,
-                                                     cast(Callable[[Any], Any],
+                    jobs = cast(Generator,
+                                flat_crossproduct_scatter(step, inputobj,
+                                                          scatter,
+                                                          cast(Callable[[Any], Any],
                                                          # known bug in mypy
                                                          # https://github.com/python/mypy/issues/797
-                                                         callback), 0, **kwargs)
+                                                               callback), 0, **kwargs))
             else:
-                _logger.debug(u"[job %s] job input %s", step.name, json.dumps(inputobj, indent=4))
-                inputobj = {k: valueFromFunc(k, v) for k,v in inputobj.items()}
-                _logger.debug(u"[job %s] evaluated job input to %s", step.name, json.dumps(inputobj, indent=4))
-                jobs = step.job(inputobj, basedir, callback, **kwargs)
+                if _logger.isEnabledFor(logging.DEBUG):
+                    _logger.debug(u"[job %s] job input %s", step.name, json.dumps(inputobj, indent=4))
+
+                inputobj = postScatterEval(inputobj)
+
+                if _logger.isEnabledFor(logging.DEBUG):
+                    _logger.debug(u"[job %s] evaluated job input to %s", step.name, json.dumps(inputobj, indent=4))
+                jobs = step.job(inputobj, callback, **kwargs)
 
             step.submitted = True
 
@@ -277,8 +336,8 @@ class WorkflowJob(object):
     def run(self, **kwargs):
         _logger.debug(u"[%s] workflow starting", self.name)
 
-    def job(self, joborder, basedir, output_callback, move_outputs=True, **kwargs):
-        # type: (Dict[str,str], str, Callable[[Any, Any], Any], bool, **Any) -> Generator[WorkflowJob, None, None]
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Any], Callable[[Any, Any], Any], **Any) -> Generator
         self.state = {}
         self.processStatus = "success"
 
@@ -298,22 +357,23 @@ class WorkflowJob(object):
             for out in s.tool["outputs"]:
                 self.state[out["id"]] = None
 
-        output_dirs = set()
-
         completed = 0
         while completed < len(self.steps) and self.processStatus == "success":
             made_progress = False
 
             for step in self.steps:
+                if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
+                    break
+
                 if not step.submitted:
-                    step.iterable = self.try_make_job(step, basedir, **kwargs)
+                    step.iterable = self.try_make_job(step, **kwargs)
 
                 if step.iterable:
                     for newjob in step.iterable:
+                        if kwargs.get("on_error", "stop") == "stop" and self.processStatus != "success":
+                            break
                         if newjob:
                             made_progress = True
-                            if newjob.outdir:
-                                output_dirs.add(newjob.outdir)
                             yield newjob
                         else:
                             break
@@ -325,48 +385,11 @@ class WorkflowJob(object):
 
         supportsMultipleInput = bool(self.workflow.get_requirement("MultipleInputFeatureRequirement")[0])
 
-        wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput)
+        wo = object_from_state(self.state, self.tool["outputs"], True, supportsMultipleInput, "outputSource")
 
         if wo is None:
             raise WorkflowException("Output for workflow not available")
 
-        if move_outputs:
-            targets = set()  # type: Set[str]
-            conflicts = set()
-
-            outfiles = findfiles(wo)
-
-            for f in outfiles:
-                for a in output_dirs:
-                    if f["path"].startswith(a):
-                        src = f["path"]
-                        dst = os.path.join(self.outdir, src[len(a)+1:])
-                        if dst in targets:
-                            conflicts.add(dst)
-                        else:
-                            targets.add(dst)
-
-            for f in outfiles:
-                for a in output_dirs:
-                    if f["path"].startswith(a):
-                        src = f["path"]
-                        dst = os.path.join(self.outdir, src[len(a)+1:])
-                        if dst in conflicts:
-                            sp = os.path.splitext(dst)
-                            dst = u"%s-%s%s" % (sp[0], str(random.randint(1, 1000000000)), sp[1])
-                        dirname = os.path.dirname(dst)
-                        if not os.path.exists(dirname):
-                            os.makedirs(dirname)
-                        _logger.debug(u"[%s] Moving '%s' to '%s'", self.name, src, dst)
-                        shutil.move(src, dst)
-                        f["path"] = dst
-
-            for a in output_dirs:
-                if os.path.exists(a) and empty_subtree(a):
-                    if kwargs.get("rm_tmpdir", True):
-                        _logger.debug(u"[%s] Removing intermediate output directory %s", self.name, a)
-                        shutil.rmtree(a, True)
-
         _logger.info(u"[%s] outdir is %s", self.name, self.outdir)
 
         output_callback(wo, self.processStatus)
@@ -374,7 +397,7 @@ class WorkflowJob(object):
 
 class Workflow(Process):
     def __init__(self, toolpath_object, **kwargs):
-        # type: (Dict[str, Any], **Any) -> None
+        # type: (Dict[Text, Any], **Any) -> None
         super(Workflow, self).__init__(toolpath_object, **kwargs)
 
         kwargs["requirements"] = self.requirements
@@ -386,15 +409,15 @@ class Workflow(Process):
 
         # TODO: statically validate data links instead of doing it at runtime.
 
-    def job(self, joborder, basedir, output_callback, **kwargs):
-        # type: (Dict[str,str], str, Callable[[Any, Any], Any], **Any) -> Generator[WorkflowJob, None, None]
-        builder = self._init_job(joborder, basedir, **kwargs)
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Text], Callable[[Any, Any], Any], **Any) -> Generator
+        builder = self._init_job(joborder, **kwargs)
         wj = WorkflowJob(self, **kwargs)
         yield wj
 
         kwargs["part_of"] = u"workflow %s" % wj.name
 
-        for w in wj.job(builder.job, basedir, output_callback, **kwargs):
+        for w in wj.job(builder.job, output_callback, **kwargs):
             yield w
 
     def visit(self, op):
@@ -406,39 +429,56 @@ class Workflow(Process):
 class WorkflowStep(Process):
 
     def __init__(self, toolpath_object, pos, **kwargs):
-        # type: (Dict[str, Any], int, **Any) -> None
+        # type: (Dict[Text, Any], int, **Any) -> None
         if "id" in toolpath_object:
             self.id = toolpath_object["id"]
         else:
-            self.id = "#step" + str(pos)
+            self.id = "#step" + Text(pos)
+
+        kwargs["requirements"] = kwargs.get("requirements", []) + toolpath_object.get("requirements", [])
+        kwargs["hints"] = kwargs.get("hints", []) + toolpath_object.get("hints", [])
 
         try:
-            makeTool = kwargs.get("makeTool")
-            runobj = None
-            if isinstance(toolpath_object["run"], (str, unicode)):
-                runobj = schema_salad.schema.load_and_validate(
-                        kwargs["loader"], kwargs["avsc_names"],
-                        toolpath_object["run"], True)[0]
+            if isinstance(toolpath_object["run"], dict):
+                self.embedded_tool = kwargs.get("makeTool")(toolpath_object["run"], **kwargs)
             else:
-                runobj = toolpath_object["run"]
-            self.embedded_tool = makeTool(runobj, **kwargs)
+                self.embedded_tool = load_tool(
+                    toolpath_object["run"], kwargs.get("makeTool"), kwargs,
+                    enable_dev=kwargs.get("enable_dev"),
+                    strict=kwargs.get("strict"),
+                    fetcher_constructor=kwargs.get("fetcher_constructor"))
         except validate.ValidationException as v:
-            raise WorkflowException(u"Tool definition %s failed validation:\n%s" % (toolpath_object["run"], validate.indent(str(v))))
+            raise WorkflowException(
+                u"Tool definition %s failed validation:\n%s" %
+                (toolpath_object["run"], validate.indent(str(v))))
+
+        self.tool = toolpath_object = copy.deepcopy(toolpath_object)
+        for stepfield, toolfield in (("in", "inputs"), ("out", "outputs")):
+            toolpath_object[toolfield] = []
+            for step_entry in toolpath_object[stepfield]:
+                if isinstance(step_entry, (str, unicode)):
+                    param = {}  # type: Dict[Text, Any]
+                    inputid = step_entry
+                else:
+                    param = step_entry.copy()
+                    inputid = step_entry["id"]
 
-        for field in ("inputs", "outputs"):
-            for i in toolpath_object[field]:
-                inputid = i["id"]
-                p = shortname(inputid)
+                shortinputid = shortname(inputid)
                 found = False
-                for a in self.embedded_tool.tool[field]:
-                    frag = shortname(a["id"])
-                    if frag == p:
-                        i.update(a)
+                for tool_entry in self.embedded_tool.tool[toolfield]:
+                    frag = shortname(tool_entry["id"])
+                    if frag == shortinputid:
+                        param.update(tool_entry)
                         found = True
+                        break
                 if not found:
-                    i["type"] = "Any"
-                    #raise WorkflowException("Parameter '%s' of %s in workflow step %s does not correspond to parameter in %s" % (p, field, self.id, self.embedded_tool.tool.get("id")))
-                i["id"] = inputid
+                    if stepfield == "in":
+                        param["type"] = "Any"
+                    else:
+                        raise WorkflowException("[%s] Workflow step output '%s' not found in the outputs of the tool (expected one of '%s')" % (
+                            self.id, shortname(step_entry), "', '".join([shortname(tool_entry["id"]) for tool_entry in self.embedded_tool.tool[toolfield]])))
+                param["id"] = inputid
+                toolpath_object[toolfield].append(param)
 
         super(WorkflowStep, self).__init__(toolpath_object, **kwargs)
 
@@ -479,7 +519,7 @@ class WorkflowStep(Process):
             self.tool["outputs"] = outputparms
 
     def receive_output(self, output_callback, jobout, processStatus):
-        # type: (Callable[...,Any], Dict[str, str], str) -> None
+        # type: (Callable[...,Any], Dict[Text, Text], Text) -> None
         #_logger.debug("WorkflowStep output from run is %s", jobout)
         output = {}
         for i in self.tool["outputs"]:
@@ -490,19 +530,16 @@ class WorkflowStep(Process):
                 processStatus = "permanentFail"
         output_callback(output, processStatus)
 
-    def job(self, joborder, basedir, output_callback, **kwargs):
-        # type: (Dict[str, Any], str, Callable[...,Any], **Any) -> Generator
+    def job(self, joborder, output_callback, **kwargs):
+        # type: (Dict[Text, Any], Callable[...,Any], **Any) -> Generator
         for i in self.tool["inputs"]:
             p = i["id"]
             field = shortname(p)
             joborder[field] = joborder[i["id"]]
             del joborder[i["id"]]
 
-        kwargs["requirements"] = kwargs.get("requirements", []) + self.tool.get("requirements", [])
-        kwargs["hints"] = kwargs.get("hints", []) + self.tool.get("hints", [])
-
         try:
-            for t in self.embedded_tool.job(joborder, basedir,
+            for t in self.embedded_tool.job(joborder,
                                             functools.partial(self.receive_output, output_callback),
                                             **kwargs):
                 yield t
@@ -511,7 +548,7 @@ class WorkflowStep(Process):
             raise
         except Exception as e:
             _logger.exception("Unexpected exception")
-            raise WorkflowException(str(e))
+            raise WorkflowException(Text(e))
 
     def visit(self, op):
         self.embedded_tool.visit(op)
@@ -520,15 +557,15 @@ class WorkflowStep(Process):
 class ReceiveScatterOutput(object):
 
     def __init__(self, output_callback, dest):
-        # type: (Callable[..., Any], Dict[str,List[str]]) -> None
+        # type: (Callable[..., Any], Dict[Text,List[Text]]) -> None
         self.dest = dest
         self.completed = 0
-        self.processStatus = "success"
+        self.processStatus = u"success"
         self.total = None  # type: int
         self.output_callback = output_callback
 
     def receive_scatter_output(self, index, jobout, processStatus):
-        # type: (int, Dict[str, str], str) -> None
+        # type: (int, Dict[Text, Text], Text) -> None
         for k,v in jobout.items():
             self.dest[k][index] = v
 
@@ -546,9 +583,25 @@ class ReceiveScatterOutput(object):
         if self.completed == self.total:
             self.output_callback(self.dest, self.processStatus)
 
+def parallel_steps(steps, rc, kwargs):  # type: (List[Generator], ReceiveScatterOutput, Dict[str, Any]) -> Generator
+    while rc.completed < rc.total:
+        made_progress = False
+        for step in steps:
+            if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
+                break
+            for j in step:
+                if kwargs.get("on_error", "stop") == "stop" and rc.processStatus != "success":
+                    break
+                if j:
+                    made_progress = True
+                    yield j
+                else:
+                    break
+        if not made_progress and rc.completed < rc.total:
+            yield None
 
-def dotproduct_scatter(process, joborder, basedir, scatter_keys, output_callback, **kwargs):
-    # type: (WorkflowJobStep, Dict[str, Any], str, List[str], Callable[..., Any], **Any) -> Generator[WorkflowJob, None, None]
+def dotproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
+    # type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
     l = None
     for s in scatter_keys:
         if l is None:
@@ -556,54 +609,58 @@ def dotproduct_scatter(process, joborder, basedir, scatter_keys, output_callback
         elif l != len(joborder[s]):
             raise WorkflowException("Length of input arrays must be equal when performing dotproduct scatter.")
 
-    output = {}  # type: Dict[str,List[str]]
+    output = {}  # type: Dict[Text,List[Text]]
     for i in process.tool["outputs"]:
         output[i["id"]] = [None] * l
 
     rc = ReceiveScatterOutput(output_callback, output)
 
+    steps = []
     for n in range(0, l):
         jo = copy.copy(joborder)
         for s in scatter_keys:
-            jo[s] = kwargs["valueFrom"](s, joborder[s][n])
+            jo[s] = joborder[s][n]
 
-        for j in process.job(jo, basedir, functools.partial(rc.receive_scatter_output, n), **kwargs):
-            yield j
+        jo = kwargs["postScatterEval"](jo)
+
+        steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
 
     rc.setTotal(l)
 
+    return parallel_steps(steps, rc, kwargs)
+
 
-def nested_crossproduct_scatter(process, joborder, basedir, scatter_keys, output_callback, **kwargs):
-    # type: (WorkflowJobStep, Dict[str, Any], str, List[str], Callable[..., Any], **Any) -> Generator[WorkflowJob, None, None]
+def nested_crossproduct_scatter(process, joborder, scatter_keys, output_callback, **kwargs):
+    # type: (WorkflowJobStep, Dict[Text, Any], List[Text], Callable[..., Any], **Any) -> Generator
     scatter_key = scatter_keys[0]
     l = len(joborder[scatter_key])
-    output = {}  # type: Dict[str,List[str]]
+    output = {}  # type: Dict[Text,List[Text]]
     for i in process.tool["outputs"]:
         output[i["id"]] = [None] * l
 
     rc = ReceiveScatterOutput(output_callback, output)
 
+    steps = []
     for n in range(0, l):
         jo = copy.copy(joborder)
-        jo[scatter_key] = kwargs["valueFrom"](scatter_key, joborder[scatter_key][n])
+        jo[scatter_key] = joborder[scatter_key][n]
 
         if len(scatter_keys) == 1:
-            for j in process.job(jo, basedir, functools.partial(rc.receive_scatter_output, n), **kwargs):
-                yield j
+            jo = kwargs["postScatterEval"](jo)
+            steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, n), **kwargs))
         else:
-            for j in nested_crossproduct_scatter(process, jo, basedir,
+            steps.append(nested_crossproduct_scatter(process, jo,
                     scatter_keys[1:], cast(  # known bug with mypy
-                        # https://github.com/python/mypy/issues/797
+                        # https://github.com/python/mypy/issues/797g
                         Callable[[Any], Any],
-                        functools.partial(rc.receive_scatter_output, n)),
-                    **kwargs):
-                yield j
+                        functools.partial(rc.receive_scatter_output, n)), **kwargs))
 
     rc.setTotal(l)
 
+    return parallel_steps(steps, rc, kwargs)
 
 def crossproduct_size(joborder, scatter_keys):
-    # type: (Dict[str, Any], List[str]) -> int
+    # type: (Dict[Text, Any], List[Text]) -> int
     scatter_key = scatter_keys[0]
     if len(scatter_keys) == 1:
         sum = len(joborder[scatter_key])
@@ -615,14 +672,14 @@ def crossproduct_size(joborder, scatter_keys):
             sum += crossproduct_size(joborder, scatter_keys[1:])
     return sum
 
-def flat_crossproduct_scatter(process, joborder, basedir, scatter_keys, output_callback, startindex, **kwargs):
-    # type: (WorkflowJobStep, Dict[str, Any], str, List[str], Union[ReceiveScatterOutput,Callable[..., Any]], int, **Any) -> Generator[WorkflowJob, None, None]
+def flat_crossproduct_scatter(process, joborder, scatter_keys, output_callback, startindex, **kwargs):
+    # type: (WorkflowJobStep, Dict[Text, Any], List[Text], Union[ReceiveScatterOutput,Callable[..., Any]], int, **Any) -> Union[List[Generator], Generator]
     scatter_key = scatter_keys[0]
     l = len(joborder[scatter_key])
     rc = None  # type: ReceiveScatterOutput
 
     if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
-        output = {}  # type: Dict[str,List[str]]
+        output = {}  # type: Dict[Text,List[Text]]
         for i in process.tool["outputs"]:
             output[i["id"]] = [None] * crossproduct_size(joborder, scatter_keys)
         rc = ReceiveScatterOutput(output_callback, output)
@@ -631,19 +688,23 @@ def flat_crossproduct_scatter(process, joborder, basedir, scatter_keys, output_c
     else:
         raise Exception("Unhandled code path. Please report this.")
 
+    steps = []
     put = startindex
     for n in range(0, l):
         jo = copy.copy(joborder)
-        jo[scatter_key] = kwargs["valueFrom"](scatter_key, joborder[scatter_key][n])
+        jo[scatter_key] = joborder[scatter_key][n]
 
         if len(scatter_keys) == 1:
-            for j in process.job(jo, basedir, functools.partial(rc.receive_scatter_output, put), **kwargs):
-                yield j
+            jo = kwargs["postScatterEval"](jo)
+            steps.append(process.job(jo, functools.partial(rc.receive_scatter_output, put), **kwargs))
             put += 1
         else:
-            for j in flat_crossproduct_scatter(process, jo, basedir, scatter_keys[1:], rc, put, **kwargs):
-                if j:
-                    put += 1
-                yield j
+            add = flat_crossproduct_scatter(process, jo, scatter_keys[1:], rc, put, **kwargs)
+            put += len(cast(List[Generator], add))
+            steps.extend(add)
 
-    rc.setTotal(put)
+    if startindex == 0 and not isinstance(output_callback, ReceiveScatterOutput):
+        rc.setTotal(put)
+        return parallel_steps(steps, rc, kwargs)
+    else:
+        return steps
diff --git a/ez_setup.py b/ez_setup.py
old mode 100644
new mode 100755
index f5fa741..50e0dfc
--- a/ez_setup.py
+++ b/ez_setup.py
@@ -1,18 +1,11 @@
 #!/usr/bin/env python
-"""Bootstrap setuptools installation
 
-To use setuptools in your package's setup.py, include this
-file in the same directory and add this to the top of your setup.py::
-
-    from ez_setup import use_setuptools
-    use_setuptools()
-
-To require a specific version of setuptools, set a download
-mirror, or use an alternate download directory, simply supply
-the appropriate options to ``use_setuptools()``.
+"""
+Setuptools bootstrapping installer.
 
-This file can also be run as a script to install or upgrade setuptools.
+Run this script to install or upgrade setuptools.
 """
+
 import os
 import shutil
 import sys
@@ -23,19 +16,29 @@ import subprocess
 import platform
 import textwrap
 import contextlib
+import warnings
 
 from distutils import log
 
 try:
+    from urllib.request import urlopen
+except ImportError:
+    from urllib2 import urlopen
+
+try:
     from site import USER_SITE
 except ImportError:
     USER_SITE = None
 
-DEFAULT_VERSION = "3.4.1"
+DEFAULT_VERSION = "18.5"
 DEFAULT_URL = "https://pypi.python.org/packages/source/s/setuptools/"
+DEFAULT_SAVE_DIR = os.curdir
+
 
 def _python_cmd(*args):
     """
+    Execute a command.
+
     Return True if the command succeeded.
     """
     args = (sys.executable,) + args
@@ -43,6 +46,7 @@ def _python_cmd(*args):
 
 
 def _install(archive_filename, install_args=()):
+    """Install Setuptools."""
     with archive_context(archive_filename):
         # installing
         log.warn('Installing Setuptools')
@@ -54,6 +58,7 @@ def _install(archive_filename, install_args=()):
 
 
 def _build_egg(egg, archive_filename, to_dir):
+    """Build Setuptools egg."""
     with archive_context(archive_filename):
         # building an egg
         log.warn('Building a Setuptools egg in %s', to_dir)
@@ -64,28 +69,36 @@ def _build_egg(egg, archive_filename, to_dir):
         raise IOError('Could not build the egg.')
 
 
-def get_zip_class():
-    """
-    Supplement ZipFile class to support context manager for Python 2.6
-    """
-    class ContextualZipFile(zipfile.ZipFile):
-        def __enter__(self):
-            return self
-        def __exit__(self, type, value, traceback):
-            self.close
-    return zipfile.ZipFile if hasattr(zipfile.ZipFile, '__exit__') else \
-        ContextualZipFile
+class ContextualZipFile(zipfile.ZipFile):
+
+    """Supplement ZipFile class to support context manager for Python 2.6."""
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, type, value, traceback):
+        self.close()
+
+    def __new__(cls, *args, **kwargs):
+        """Construct a ZipFile or ContextualZipFile as appropriate."""
+        if hasattr(zipfile.ZipFile, '__exit__'):
+            return zipfile.ZipFile(*args, **kwargs)
+        return super(ContextualZipFile, cls).__new__(cls)
 
 
 @contextlib.contextmanager
 def archive_context(filename):
-    # extracting the archive
+    """
+    Unzip filename to a temporary directory, set to the cwd.
+
+    The unzipped target is cleaned up after.
+    """
     tmpdir = tempfile.mkdtemp()
     log.warn('Extracting in %s', tmpdir)
     old_wd = os.getcwd()
     try:
         os.chdir(tmpdir)
-        with get_zip_class()(filename) as archive:
+        with ContextualZipFile(filename) as archive:
             archive.extractall()
 
         # going in the directory
@@ -100,6 +113,7 @@ def archive_context(filename):
 
 
 def _do_download(version, download_base, to_dir, download_delay):
+    """Download Setuptools."""
     egg = os.path.join(to_dir, 'setuptools-%s-py%d.%d.egg'
                        % (version, sys.version_info[0], sys.version_info[1]))
     if not os.path.exists(egg):
@@ -117,41 +131,77 @@ def _do_download(version, download_base, to_dir, download_delay):
     setuptools.bootstrap_install_from = egg
 
 
-def use_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=os.curdir, download_delay=15):
+def use_setuptools(
+        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+        to_dir=DEFAULT_SAVE_DIR, download_delay=15):
+    """
+    Ensure that a setuptools version is installed.
+
+    Return None. Raise SystemExit if the requested version
+    or later cannot be installed.
+    """
     to_dir = os.path.abspath(to_dir)
+
+    # prior to importing, capture the module state for
+    # representative modules.
     rep_modules = 'pkg_resources', 'setuptools'
     imported = set(sys.modules).intersection(rep_modules)
+
     try:
         import pkg_resources
-    except ImportError:
-        return _do_download(version, download_base, to_dir, download_delay)
-    try:
         pkg_resources.require("setuptools>=" + version)
+        # a suitable version is already installed
         return
+    except ImportError:
+        # pkg_resources not available; setuptools is not installed; download
+        pass
     except pkg_resources.DistributionNotFound:
-        return _do_download(version, download_base, to_dir, download_delay)
+        # no version of setuptools was found; allow download
+        pass
     except pkg_resources.VersionConflict as VC_err:
         if imported:
-            msg = textwrap.dedent("""
-                The required version of setuptools (>={version}) is not available,
-                and can't be installed while this script is running. Please
-                install a more recent version first, using
-                'easy_install -U setuptools'.
+            _conflict_bail(VC_err, version)
+
+        # otherwise, unload pkg_resources to allow the downloaded version to
+        #  take precedence.
+        del pkg_resources
+        _unload_pkg_resources()
 
-                (Currently using {VC_err.args[0]!r})
-                """).format(VC_err=VC_err, version=version)
-            sys.stderr.write(msg)
-            sys.exit(2)
+    return _do_download(version, download_base, to_dir, download_delay)
+
+
+def _conflict_bail(VC_err, version):
+    """
+    Setuptools was imported prior to invocation, so it is
+    unsafe to unload it. Bail out.
+    """
+    conflict_tmpl = textwrap.dedent("""
+        The required version of setuptools (>={version}) is not available,
+        and can't be installed while this script is running. Please
+        install a more recent version first, using
+        'easy_install -U setuptools'.
+
+        (Currently using {VC_err.args[0]!r})
+        """)
+    msg = conflict_tmpl.format(**locals())
+    sys.stderr.write(msg)
+    sys.exit(2)
+
+
+def _unload_pkg_resources():
+    del_modules = [
+        name for name in sys.modules
+        if name.startswith('pkg_resources')
+    ]
+    for mod_name in del_modules:
+        del sys.modules[mod_name]
 
-        # otherwise, reload ok
-        del pkg_resources, sys.modules['pkg_resources']
-        return _do_download(version, download_base, to_dir, download_delay)
 
 def _clean_check(cmd, target):
     """
-    Run the command to download target. If the command fails, clean up before
-    re-raising the error.
+    Run the command to download target.
+
+    If the command fails, clean up before re-raising the error.
     """
     try:
         subprocess.check_call(cmd)
@@ -160,115 +210,110 @@ def _clean_check(cmd, target):
             os.unlink(target)
         raise
 
+
 def download_file_powershell(url, target):
     """
-    Download the file at url to target using Powershell (which will validate
-    trust). Raise an exception if the command cannot complete.
+    Download the file at url to target using Powershell.
+
+    Powershell will validate trust.
+    Raise an exception if the command cannot complete.
     """
     target = os.path.abspath(target)
+    ps_cmd = (
+        "[System.Net.WebRequest]::DefaultWebProxy.Credentials = "
+        "[System.Net.CredentialCache]::DefaultCredentials; "
+        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)"
+        % vars()
+    )
     cmd = [
         'powershell',
         '-Command',
-        "(new-object System.Net.WebClient).DownloadFile(%(url)r, %(target)r)" % vars(),
+        ps_cmd,
     ]
     _clean_check(cmd, target)
 
+
 def has_powershell():
+    """Determine if Powershell is available."""
     if platform.system() != 'Windows':
         return False
     cmd = ['powershell', '-Command', 'echo test']
-    devnull = open(os.path.devnull, 'wb')
-    try:
+    with open(os.path.devnull, 'wb') as devnull:
         try:
             subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
         except Exception:
             return False
-    finally:
-        devnull.close()
     return True
-
 download_file_powershell.viable = has_powershell
 
+
 def download_file_curl(url, target):
     cmd = ['curl', url, '--silent', '--output', target]
     _clean_check(cmd, target)
 
+
 def has_curl():
     cmd = ['curl', '--version']
-    devnull = open(os.path.devnull, 'wb')
-    try:
+    with open(os.path.devnull, 'wb') as devnull:
         try:
             subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
         except Exception:
             return False
-    finally:
-        devnull.close()
     return True
-
 download_file_curl.viable = has_curl
 
+
 def download_file_wget(url, target):
     cmd = ['wget', url, '--quiet', '--output-document', target]
     _clean_check(cmd, target)
 
+
 def has_wget():
     cmd = ['wget', '--version']
-    devnull = open(os.path.devnull, 'wb')
-    try:
+    with open(os.path.devnull, 'wb') as devnull:
         try:
             subprocess.check_call(cmd, stdout=devnull, stderr=devnull)
         except Exception:
             return False
-    finally:
-        devnull.close()
     return True
-
 download_file_wget.viable = has_wget
 
+
 def download_file_insecure(url, target):
-    """
-    Use Python to download the file, even though it cannot authenticate the
-    connection.
-    """
+    """Use Python to download the file, without connection authentication."""
+    src = urlopen(url)
     try:
-        from urllib.request import urlopen
-    except ImportError:
-        from urllib2 import urlopen
-    src = dst = None
-    try:
-        src = urlopen(url)
-        # Read/write all in one block, so we don't create a corrupt file
-        # if the download is interrupted.
+        # Read all the data in one block.
         data = src.read()
-        dst = open(target, "wb")
-        dst.write(data)
     finally:
-        if src:
-            src.close()
-        if dst:
-            dst.close()
+        src.close()
 
+    # Write all the data in one block to avoid creating a partial file.
+    with open(target, "wb") as dst:
+        dst.write(data)
 download_file_insecure.viable = lambda: True
 
+
 def get_best_downloader():
-    downloaders = [
+    downloaders = (
         download_file_powershell,
         download_file_curl,
         download_file_wget,
         download_file_insecure,
-    ]
+    )
+    viable_downloaders = (dl for dl in downloaders if dl.viable())
+    return next(viable_downloaders, None)
 
-    for dl in downloaders:
-        if dl.viable():
-            return dl
 
-def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
-        to_dir=os.curdir, delay=15, downloader_factory=get_best_downloader):
+def download_setuptools(
+        version=DEFAULT_VERSION, download_base=DEFAULT_URL,
+        to_dir=DEFAULT_SAVE_DIR, delay=15,
+        downloader_factory=get_best_downloader):
     """
-    Download setuptools from a specified location and return its filename
+    Download setuptools from a specified location and return its filename.
 
     `version` should be a valid setuptools version number that is available
-    as an egg for download under the `download_base` URL (which should end
+    as an sdist for download under the `download_base` URL (which should end
     with a '/'). `to_dir` is the directory where the egg will be downloaded.
     `delay` is the number of seconds to pause before an actual download
     attempt.
@@ -287,16 +332,18 @@ def download_setuptools(version=DEFAULT_VERSION, download_base=DEFAULT_URL,
         downloader(url, saveto)
     return os.path.realpath(saveto)
 
+
 def _build_install_args(options):
     """
-    Build the arguments to 'python setup.py install' on the setuptools package
+    Build the arguments to 'python setup.py install' on the setuptools package.
+
+    Returns list of command line arguments.
     """
     return ['--user'] if options.user_install else []
 
+
 def _parse_args():
-    """
-    Parse the command line for options
-    """
+    """Parse the command line for options."""
     parser = optparse.OptionParser()
     parser.add_option(
         '--user', dest='user_install', action='store_true', default=False,
@@ -314,18 +361,30 @@ def _parse_args():
         '--version', help="Specify which version to download",
         default=DEFAULT_VERSION,
     )
+    parser.add_option(
+    	'--to-dir',
+    	help="Directory to save (and re-use) package",
+    	default=DEFAULT_SAVE_DIR,
+    )
     options, args = parser.parse_args()
     # positional arguments are ignored
     return options
 
+
+def _download_args(options):
+	"""Return args for download_setuptools function from cmdline args."""
+	return dict(
+		version=options.version,
+		download_base=options.download_base,
+		downloader_factory=options.downloader_factory,
+		to_dir=options.to_dir,
+	)
+
+
 def main():
-    """Install or upgrade setuptools and EasyInstall"""
+    """Install or upgrade setuptools and EasyInstall."""
     options = _parse_args()
-    archive = download_setuptools(
-        version=options.version,
-        download_base=options.download_base,
-        downloader_factory=options.downloader_factory,
-    )
+    archive = download_setuptools(**_download_args(options))
     return _install(archive, _build_install_args(options))
 
 if __name__ == '__main__':
diff --git a/setup.cfg b/setup.cfg
index 421cc91..8930401 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,8 +1,11 @@
 [flake8]
 ignore = E124,E128,E129,E201,E202,E225,E226,E231,E265,E271,E302,E303,F401,E402,E501,W503,E731,F811,F821,F841
+exclude = cwltool/schemas
+
+[easy_install]
 
 [egg_info]
-tag_build = .20160504183010
+tag_build = .20161207161158
 tag_date = 0
 tag_svn_revision = 0
 
diff --git a/setup.py b/setup.py
old mode 100644
new mode 100755
index 86e5aca..405f0fc
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
 #!/usr/bin/env python
-
+import ez_setup
+ez_setup.use_setuptools()
 import os
 import sys
 import shutil
@@ -22,27 +23,37 @@ setup(name='cwltool',
       long_description=open(README).read(),
       author='Common workflow language working group',
       author_email='common-workflow-language at googlegroups.com',
-      url="https://github.com/common-workflow-language/common-workflow-language",
-      download_url="https://github.com/common-workflow-language/common-workflow-language",
+      url="https://github.com/common-workflow-language/cwltool",
+      download_url="https://github.com/common-workflow-language/cwltool",
       license='Apache 2.0',
       packages=["cwltool"],
-      package_data={'cwltool': ['schemas/draft-3/*.yml',
+      package_data={'cwltool': ['schemas/draft-2/*.yml',
+                                'schemas/draft-3/*.yml',
                                 'schemas/draft-3/*.md',
                                 'schemas/draft-3/salad/schema_salad/metaschema/*.yml',
-                                'schemas/draft-3/salad/schema_salad/metaschema/*.md']},
+                                'schemas/draft-3/salad/schema_salad/metaschema/*.md',
+                                'schemas/v1.0/*.yml',
+                                'schemas/v1.0/*.md',
+                                'schemas/v1.0/salad/schema_salad/metaschema/*.yml',
+                                'schemas/v1.0/salad/schema_salad/metaschema/*.md',
+                                'schemas/v1.1.0-dev1/*.yml',
+                                'schemas/v1.1.0-dev1/*.md',
+                                'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.yml',
+                                'schemas/v1.1.0-dev1/salad/schema_salad/metaschema/*.md',
+                                'cwlNodeEngine.js']},
       install_requires=[
-          'requests',
-          'PyYAML',
-          'rdflib >= 4.1.0',
-          'rdflib-jsonld >= 0.3.0',
+          'setuptools',
+          'requests>=1.0',
+          'ruamel.yaml == 0.12.4',
+          'rdflib >= 4.2.0, < 4.3.0',
           'shellescape',
-          'schema_salad == 1.7.20160316203940',
-          'typing'
-        ],
+          'schema-salad >= 1.21.20161206204028, < 2',
+          'typing >= 3.5.2',
+          'cwltest >= 1.0.20160907111242'],
       test_suite='tests',
       tests_require=[],
       entry_points={
-          'console_scripts': [ "cwltool=cwltool.main:main", "cwltest=cwltool.cwltest:main" ]
+          'console_scripts': [ "cwltool=cwltool.main:main" ]
       },
       zip_safe=True,
       cmdclass={'egg_info': tagger},

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/cwltool.git



More information about the debian-med-commit mailing list