[med-svn] [python-schema-salad] 01/07: New upstream version 2.6.20171201034858

Andreas Tille tille at debian.org
Wed Dec 6 15:57:37 UTC 2017


This is an automated email from the git hooks/post-receive script.

tille pushed a commit to branch master
in repository python-schema-salad.

commit ab8b1036343a318b92d6fcfa0d60eb11e13385b0
Author: Andreas Tille <tille at debian.org>
Date:   Wed Dec 6 16:38:37 2017 +0100

    New upstream version 2.6.20171201034858
---
 MANIFEST.in                                        |    1 +
 Makefile                                           |    5 +-
 PKG-INFO                                           |   12 +-
 README.rst                                         |    7 +
 schema_salad.egg-info/PKG-INFO                     |   12 +-
 schema_salad.egg-info/SOURCES.txt                  |   33 +
 schema_salad.egg-info/pbr.json                     |    1 +
 schema_salad.egg-info/requires.txt                 |   23 +-
 schema_salad/__init__.py                           |   22 +
 schema_salad/codegen.py                            |  100 +
 schema_salad/codegen_base.py                       |   78 +
 schema_salad/java_codegen.py                       |  152 ++
 schema_salad/main.py                               |  116 +-
 schema_salad/makedoc.py                            |   13 +-
 schema_salad/metaschema.py                         | 1447 ++++++++++++
 schema_salad/metaschema/metaschema.html            |  971 ++++++++
 schema_salad/metaschema/metaschema.yml             |    2 +-
 .../metaschema/{metaschema.yml => metaschema2.yml} |   17 +-
 schema_salad/python_codegen.py                     |  225 ++
 schema_salad/python_codegen_support.py             |  383 ++++
 schema_salad/ref_resolver.py                       |  140 +-
 schema_salad/schema.py                             |    5 +-
 schema_salad/sourceline.py                         |   13 +-
 schema_salad/tests/#cg_metaschema.py#              | 1568 +++++++++++++
 schema_salad/tests/.coverage                       |    1 +
 schema_salad/tests/cwl-pre.yml                     | 2354 ++++++++++++++++++++
 schema_salad/tests/df                              |    5 +
 schema_salad/tests/df2                             |    1 +
 schema_salad/tests/docimp/d1.yml                   |    7 +
 schema_salad/tests/docimp/d2.md                    |    1 +
 schema_salad/tests/docimp/d3.yml                   |    3 +
 schema_salad/tests/docimp/d4.yml                   |    3 +
 schema_salad/tests/docimp/d5.md                    |    1 +
 schema_salad/tests/docimp/dpre.json                |   13 +
 schema_salad/tests/hello.txt                       |    1 +
 schema_salad/tests/hellofield.yml                  |    5 +
 schema_salad/tests/matcher.py                      |   32 +
 schema_salad/tests/metaschema-pre.yml              |  628 ++++++
 schema_salad/tests/pt.yml                          |   28 +
 schema_salad/tests/test_cg.py                      |  177 ++
 schema_salad/tests/test_errors.py                  |    3 +-
 schema_salad/tests/test_examples.py                |   49 +-
 schema_salad/tests/test_fetch.py                   |    3 +-
 schema_salad/tests/test_print_oneline.py           |  120 +
 schema_salad/tests/test_ref_resolver.py            |  108 +
 schema_salad/tests/test_schema/CommandLineTool.yml |    1 +
 schema_salad/tests/test_schema/Workflow.yml        |    1 +
 schema_salad/tests/test_schema/test15.cwl          |   13 +
 schema_salad/tests/test_schema/test16.cwl          |   15 +
 schema_salad/tests/test_schema/test17.cwl          |   13 +
 schema_salad/tests/test_schema/test18.cwl          |   13 +
 schema_salad/tests/test_schema/test19.cwl          |   15 +
 schema_salad/tests/test_validate.pyx               |   71 +
 schema_salad/tests/util.py                         |    1 +
 schema_salad/utils.py                              |   21 +-
 schema_salad/validate.py                           |    3 +-
 setup.cfg                                          |    2 +-
 setup.py                                           |    7 +-
 58 files changed, 8946 insertions(+), 119 deletions(-)

diff --git a/MANIFEST.in b/MANIFEST.in
index c3870ab..661c696 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -4,5 +4,6 @@ include schema_salad/tests/test_schema/*.md
 include schema_salad/tests/test_schema/*.yml
 include schema_salad/tests/test_schema/*.cwl
 include schema_salad/metaschema/*
+include schema_salad/tests/docimp/*
 global-exclude *~
 global-exclude *.pyc
diff --git a/Makefile b/Makefile
index c555c0a..9fe16fd 100644
--- a/Makefile
+++ b/Makefile
@@ -170,7 +170,7 @@ mypy2: ${PYSOURCES}
 	ln -s $(shell python -c 'from __future__ import print_function; import ruamel.yaml; import os.path; print(os.path.dirname(ruamel.yaml.__file__))') \
 		typeshed/2and3/ruamel/
 	MYPYPATH=$MYPYPATH:typeshed/2.7:typeshed/2and3 mypy --py2 --disallow-untyped-calls \
-		 --warn-redundant-casts --warn-unused-ignores \
+		 --warn-redundant-casts \
 		 schema_salad
 
 mypy3: ${PYSOURCES}
@@ -191,6 +191,7 @@ jenkins:
 	. env3/bin/activate ; \
 	pip install -U setuptools pip wheel ; \
 	${MAKE} install-dep ; \
-	pip install -U -r mypy_requirements.txt ; ${MAKE} mypy
+	pip install -U -r mypy_requirements.txt ; ${MAKE} mypy2
+	# pip install -U -r mypy_requirements.txt ; ${MAKE} mypy3
 
 FORCE:
diff --git a/PKG-INFO b/PKG-INFO
index b4230c3..e9f491e 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,13 +1,21 @@
 Metadata-Version: 1.1
 Name: schema-salad
-Version: 2.6.20170630075932
+Version: 2.6.20171201034858
 Summary: Schema Annotations for Linked Avro Data (SALAD)
 Home-page: https://github.com/common-workflow-language/common-workflow-language
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
 License: Apache 2.0
 Download-URL: https://github.com/common-workflow-language/common-workflow-language
-Description: Schema Salad
+Description-Content-Type: UNKNOWN
+Description: |Build Status| |Build status|
+        
+        .. |Build Status| image:: https://img.shields.io/travis/common-workflow-language/schema_salad/master.svg?label=unix%20build
+           :target: https://travis-ci.org/common-workflow-language/schema_salad
+        .. |Build status| image:: https://img.shields.io/appveyor/ci/mr-c/schema-salad/master.svg?label=windows%20build
+           :target: https://ci.appveyor.com/project/mr-c/schema-salad/branch/master
+        
+        Schema Salad
         ------------
         
         Salad is a schema language for describing JSON or YAML structured linked data
diff --git a/README.rst b/README.rst
index 38ffb42..7901a76 100644
--- a/README.rst
+++ b/README.rst
@@ -1,3 +1,10 @@
+|Build Status| |Build status|
+
+.. |Build Status| image:: https://img.shields.io/travis/common-workflow-language/schema_salad/master.svg?label=unix%20build
+   :target: https://travis-ci.org/common-workflow-language/schema_salad
+.. |Build status| image:: https://img.shields.io/appveyor/ci/mr-c/schema-salad/master.svg?label=windows%20build
+   :target: https://ci.appveyor.com/project/mr-c/schema-salad/branch/master
+
 Schema Salad
 ------------
 
diff --git a/schema_salad.egg-info/PKG-INFO b/schema_salad.egg-info/PKG-INFO
index b4230c3..e9f491e 100644
--- a/schema_salad.egg-info/PKG-INFO
+++ b/schema_salad.egg-info/PKG-INFO
@@ -1,13 +1,21 @@
 Metadata-Version: 1.1
 Name: schema-salad
-Version: 2.6.20170630075932
+Version: 2.6.20171201034858
 Summary: Schema Annotations for Linked Avro Data (SALAD)
 Home-page: https://github.com/common-workflow-language/common-workflow-language
 Author: Common workflow language working group
 Author-email: common-workflow-language at googlegroups.com
 License: Apache 2.0
 Download-URL: https://github.com/common-workflow-language/common-workflow-language
-Description: Schema Salad
+Description-Content-Type: UNKNOWN
+Description: |Build Status| |Build status|
+        
+        .. |Build Status| image:: https://img.shields.io/travis/common-workflow-language/schema_salad/master.svg?label=unix%20build
+           :target: https://travis-ci.org/common-workflow-language/schema_salad
+        .. |Build status| image:: https://img.shields.io/appveyor/ci/mr-c/schema-salad/master.svg?label=windows%20build
+           :target: https://ci.appveyor.com/project/mr-c/schema-salad/branch/master
+        
+        Schema Salad
         ------------
         
         Salad is a schema language for describing JSON or YAML structured linked data
diff --git a/schema_salad.egg-info/SOURCES.txt b/schema_salad.egg-info/SOURCES.txt
index 7c15ca2..ce2de9a 100644
--- a/schema_salad.egg-info/SOURCES.txt
+++ b/schema_salad.egg-info/SOURCES.txt
@@ -6,9 +6,15 @@ setup.cfg
 setup.py
 schema_salad/__init__.py
 schema_salad/__main__.py
+schema_salad/codegen.py
+schema_salad/codegen_base.py
+schema_salad/java_codegen.py
 schema_salad/jsonld_context.py
 schema_salad/main.py
 schema_salad/makedoc.py
+schema_salad/metaschema.py
+schema_salad/python_codegen.py
+schema_salad/python_codegen_support.py
 schema_salad/ref_resolver.py
 schema_salad/schema.py
 schema_salad/sourceline.py
@@ -18,6 +24,7 @@ schema_salad.egg-info/PKG-INFO
 schema_salad.egg-info/SOURCES.txt
 schema_salad.egg-info/dependency_links.txt
 schema_salad.egg-info/entry_points.txt
+schema_salad.egg-info/pbr.json
 schema_salad.egg-info/requires.txt
 schema_salad.egg-info/top_level.txt
 schema_salad.egg-info/zip-safe
@@ -38,7 +45,9 @@ schema_salad/metaschema/map_res.yml
 schema_salad/metaschema/map_res_proc.yml
 schema_salad/metaschema/map_res_schema.yml
 schema_salad/metaschema/map_res_src.yml
+schema_salad/metaschema/metaschema.html
 schema_salad/metaschema/metaschema.yml
+schema_salad/metaschema/metaschema2.yml
 schema_salad/metaschema/metaschema_base.yml
 schema_salad/metaschema/salad.md
 schema_salad/metaschema/typedsl_res.yml
@@ -49,17 +58,36 @@ schema_salad/metaschema/vocab_res.yml
 schema_salad/metaschema/vocab_res_proc.yml
 schema_salad/metaschema/vocab_res_schema.yml
 schema_salad/metaschema/vocab_res_src.yml
+schema_salad/tests/#cg_metaschema.py#
+schema_salad/tests/.coverage
 schema_salad/tests/EDAM.owl
 schema_salad/tests/Process.yml
 schema_salad/tests/__init__.py
+schema_salad/tests/cwl-pre.yml
+schema_salad/tests/df
+schema_salad/tests/df2
 schema_salad/tests/frag.yml
+schema_salad/tests/hello.txt
+schema_salad/tests/hellofield.yml
+schema_salad/tests/matcher.py
+schema_salad/tests/metaschema-pre.yml
 schema_salad/tests/mixin.yml
+schema_salad/tests/pt.yml
+schema_salad/tests/test_cg.py
 schema_salad/tests/test_cli_args.py
 schema_salad/tests/test_errors.py
 schema_salad/tests/test_examples.py
 schema_salad/tests/test_fetch.py
+schema_salad/tests/test_print_oneline.py
 schema_salad/tests/test_ref_resolver.py
+schema_salad/tests/test_validate.pyx
 schema_salad/tests/util.py
+schema_salad/tests/docimp/d1.yml
+schema_salad/tests/docimp/d2.md
+schema_salad/tests/docimp/d3.yml
+schema_salad/tests/docimp/d4.yml
+schema_salad/tests/docimp/d5.md
+schema_salad/tests/docimp/dpre.json
 schema_salad/tests/test_schema/CommandLineTool.yml
 schema_salad/tests/test_schema/CommonWorkflowLanguage.yml
 schema_salad/tests/test_schema/Process.yml
@@ -75,6 +103,11 @@ schema_salad/tests/test_schema/test11.cwl
 schema_salad/tests/test_schema/test12.cwl
 schema_salad/tests/test_schema/test13.cwl
 schema_salad/tests/test_schema/test14.cwl
+schema_salad/tests/test_schema/test15.cwl
+schema_salad/tests/test_schema/test16.cwl
+schema_salad/tests/test_schema/test17.cwl
+schema_salad/tests/test_schema/test18.cwl
+schema_salad/tests/test_schema/test19.cwl
 schema_salad/tests/test_schema/test2.cwl
 schema_salad/tests/test_schema/test3.cwl
 schema_salad/tests/test_schema/test4.cwl
diff --git a/schema_salad.egg-info/pbr.json b/schema_salad.egg-info/pbr.json
new file mode 100644
index 0000000..bf1c2af
--- /dev/null
+++ b/schema_salad.egg-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "c7f3140"}
\ No newline at end of file
diff --git a/schema_salad.egg-info/requires.txt b/schema_salad.egg-info/requires.txt
index 2e4673f..424bb23 100644
--- a/schema_salad.egg-info/requires.txt
+++ b/schema_salad.egg-info/requires.txt
@@ -1,16 +1,17 @@
 setuptools
-requests >= 1.0
-ruamel.yaml >= 0.12.4, < 0.15
-rdflib >= 4.2.2, < 4.3.0
-rdflib-jsonld >= 0.3.0, < 0.5.0
-mistune >= 0.7.3, < 0.8
-typing >= 3.5.3
-CacheControl >= 0.11.7, < 0.12
-lockfile >= 0.9
-six >= 1.8.0
+requests>=1.0
+ruamel.yaml<0.15,>=0.12.4
+rdflib<4.3.0,>=4.2.2
+rdflib-jsonld<0.5.0,>=0.3.0
+mistune<0.8,>=0.7.3
+typing>=3.5.3
+CacheControl<0.12,>=0.11.7
+lockfile>=0.9
+six>=1.8.0
 
 [:python_version<"3"]
-avro
+avro==1.8.1
 
 [:python_version>="3"]
-avro-python3
+future
+avro-cwl==1.8.4
diff --git a/schema_salad/__init__.py b/schema_salad/__init__.py
index a751d64..b2fc02c 100644
--- a/schema_salad/__init__.py
+++ b/schema_salad/__init__.py
@@ -1,10 +1,32 @@
 from __future__ import absolute_import
 import logging
+import os
 import sys
 import typing
 
+import six
+
+from .utils import onWindows
 __author__ = 'peter.amstutz at curoverse.com'
 
 _logger = logging.getLogger("salad")
 _logger.addHandler(logging.StreamHandler())
 _logger.setLevel(logging.INFO)
+
+if six.PY3:
+
+    if onWindows:
+        # create '/tmp' folder if not present
+        # required by autotranslate module
+        # TODO: remove when https://github.com/PythonCharmers/python-future/issues/295
+        # is fixed
+        if not os.path.exists("/tmp"):
+            try:
+                os.makedirs("/tmp")
+            except OSError as exception:
+                _logger.error(u"Cannot create '\\tmp' folder in root needed for",
+                              "'cwltool' Python 3 installation.")
+                exit(1)
+
+    from past import autotranslate  # type: ignore
+    autotranslate(['avro', 'avro.schema'])
diff --git a/schema_salad/codegen.py b/schema_salad/codegen.py
new file mode 100644
index 0000000..cac082a
--- /dev/null
+++ b/schema_salad/codegen.py
@@ -0,0 +1,100 @@
+import json
+import sys
+from six.moves import urllib, cStringIO
+import collections
+import logging
+from pkg_resources import resource_stream
+from .utils import aslist, flatten
+from . import schema
+from .codegen_base import shortname, CodeGenBase
+from .python_codegen import PythonCodeGen
+from .java_codegen import JavaCodeGen
+from .ref_resolver import Loader
+from typing import List, Dict, Text, Any, Union, Text
+from ruamel.yaml.comments import CommentedSeq, CommentedMap
+
+class GoCodeGen(object):
+    pass
+
+
+def codegen(lang,             # type: str
+            i,                # type: List[Dict[Text, Any]]
+            schema_metadata,  # type: Dict[Text, Any]
+            loader            # type: Loader
+           ):
+    # type: (...) -> None
+
+    j = schema.extend_and_specialize(i, loader)
+
+    cg = None  # type: CodeGenBase
+    if lang == "python":
+        cg = PythonCodeGen(sys.stdout)
+    elif lang == "java":
+        cg = JavaCodeGen(schema_metadata.get("$base", schema_metadata.get("id")))
+    else:
+        raise Exception("Unsupported code generation language '%s'" % lang)
+
+    cg.prologue()
+
+    documentRoots = []
+
+    for rec in j:
+        if rec["type"] in ("enum", "record"):
+            cg.type_loader(rec)
+            cg.add_vocab(shortname(rec["name"]), rec["name"])
+
+    for rec in j:
+        if rec["type"] == "enum":
+            for s in rec["symbols"]:
+                cg.add_vocab(shortname(s), s)
+
+        if rec["type"] == "record":
+            if rec.get("documentRoot"):
+                documentRoots.append(rec["name"])
+            cg.begin_class(rec["name"], aslist(rec.get("extends", [])), rec.get("doc"),
+                           rec.get("abstract"))
+            cg.add_vocab(shortname(rec["name"]), rec["name"])
+
+            for f in rec.get("fields", []):
+                if f.get("jsonldPredicate") == "@id":
+                    fieldpred = f["name"]
+                    tl = cg.uri_loader(cg.type_loader(f["type"]), True, False, None)
+                    cg.declare_id_field(fieldpred, tl, f.get("doc"))
+                    break
+
+            for f in rec.get("fields", []):
+                optional = bool("https://w3id.org/cwl/salad#null" in f["type"])
+                tl = cg.type_loader(f["type"])
+                jld = f.get("jsonldPredicate")
+                fieldpred = f["name"]
+                if isinstance(jld, dict):
+                    refScope = jld.get("refScope")
+
+                    if jld.get("typeDSL"):
+                        tl = cg.typedsl_loader(tl, refScope)
+                    elif jld.get("_type") == "@id":
+                        tl = cg.uri_loader(tl, jld.get("identity"), False, refScope)
+                    elif jld.get("_type") == "@vocab":
+                        tl = cg.uri_loader(tl, False, True, refScope)
+
+                    mapSubject = jld.get("mapSubject")
+                    if mapSubject:
+                        tl = cg.idmap_loader(f["name"], tl, mapSubject, jld.get("mapPredicate"))
+
+                    if "_id" in jld and jld["_id"][0] != "@":
+                        fieldpred = jld["_id"]
+
+                if jld == "@id":
+                    continue
+
+                cg.declare_field(fieldpred, tl, f.get("doc"), optional)
+
+            cg.end_class(rec["name"])
+
+    rootType = list(documentRoots)
+    rootType.append({
+        "type": "array",
+        "items": documentRoots
+    })
+
+    cg.epilogue(cg.type_loader(rootType))
diff --git a/schema_salad/codegen_base.py b/schema_salad/codegen_base.py
new file mode 100644
index 0000000..7e65800
--- /dev/null
+++ b/schema_salad/codegen_base.py
@@ -0,0 +1,78 @@
+import collections
+from six.moves import urllib
+from typing import List, Text, Dict, Union, Any
+from . import schema
+
+def shortname(inputid):
+    # type: (Text) -> Text
+    d = urllib.parse.urlparse(inputid)
+    if d.fragment:
+        return d.fragment.split(u"/")[-1]
+    else:
+        return d.path.split(u"/")[-1]
+
+class TypeDef(object):
+    def __init__(self, name, init):
+        # type: (Text, Text) -> None
+        self.name = name
+        self.init = init
+
+class CodeGenBase(object):
+    def __init__(self):
+        # type: () -> None
+        self.collected_types = collections.OrderedDict()  # type: collections.OrderedDict[Text, TypeDef]
+        self.vocab = {}  # type: Dict[Text, Text]
+
+    def declare_type(self, t):
+        # type: (TypeDef) -> TypeDef
+        if t not in self.collected_types:
+            self.collected_types[t.name] = t
+        return t
+
+    def add_vocab(self, name, uri):
+        # type: (Text, Text) -> None
+        self.vocab[name] = uri
+
+    def prologue(self):
+        # type: () -> None
+        raise NotImplementedError()
+
+    def safe_name(self, n):
+        # type: (Text) -> Text
+        return schema.avro_name(n)
+
+    def begin_class(self, classname, extends, doc, abstract):
+        # type: (Text, List[Text], Text, bool) -> None
+        raise NotImplementedError()
+
+    def end_class(self, classname):
+        # type: (Text) -> None
+        raise NotImplementedError()
+
+    def type_loader(self, t):
+        # type: (Union[List[Any], Dict[Text, Any]]) -> TypeDef
+        raise NotImplementedError()
+
+    def declare_field(self, name, typedef, doc, optional):
+        # type: (Text, TypeDef, Text, bool) -> None
+        raise NotImplementedError()
+
+    def declare_id_field(self, name, typedef, doc):
+        # type: (Text, TypeDef, Text) -> None
+        raise NotImplementedError()
+
+    def uri_loader(self, inner, scoped_id, vocab_term, refScope):
+        # type: (TypeDef, bool, bool, Union[int, None]) -> TypeDef
+        raise NotImplementedError()
+
+    def idmap_loader(self, field, inner, mapSubject, mapPredicate):
+        # type: (Text, TypeDef, Text, Union[Text, None]) -> TypeDef
+        raise NotImplementedError()
+
+    def typedsl_loader(self, inner, refScope):
+        # type: (TypeDef, Union[int, None]) -> TypeDef
+        raise NotImplementedError()
+
+    def epilogue(self, rootLoader):
+        # type: (TypeDef) -> None
+        raise NotImplementedError()
diff --git a/schema_salad/java_codegen.py b/schema_salad/java_codegen.py
new file mode 100644
index 0000000..57cb7fc
--- /dev/null
+++ b/schema_salad/java_codegen.py
@@ -0,0 +1,152 @@
+import json
+import sys
+import six
+from six.moves import urllib, cStringIO
+import collections
+import logging
+from pkg_resources import resource_stream
+from .utils import aslist, flatten
+from . import schema
+from .codegen_base import TypeDef, CodeGenBase, shortname
+from typing import Text
+import os
+
+class JavaCodeGen(CodeGenBase):
+    def __init__(self, base):
+        # type: (Text) -> None
+
+        super(JavaCodeGen, self).__init__()
+        sp = urllib.parse.urlsplit(base)
+        self.package = ".".join(list(reversed(sp.netloc.split("."))) + sp.path.strip("/").split("/"))
+        self.outdir = self.package.replace(".", "/")
+
+    def prologue(self):
+        if not os.path.exists(self.outdir):
+            os.makedirs(self.outdir)
+
+    def safe_name(self, n):
+        avn = schema.avro_name(n)
+        if avn in ("class", "extends", "abstract"):
+            # reserved words
+            avn = avn+"_"
+        return avn
+
+    def interface_name(self, n):
+        return self.safe_name(n)
+
+    def begin_class(self, classname, extends, doc, abstract):
+        cls = self.interface_name(classname)
+        self.current_class = cls
+        self.current_class_is_abstract = abstract
+        self.current_loader = cStringIO()
+        self.current_fields = cStringIO()
+        with open(os.path.join(self.outdir, "%s.java" % cls), "w") as f:
+            if extends:
+                ext = "extends " + ", ".join(self.interface_name(e) for e in extends)
+            else:
+                ext = ""
+            f.write("""package {package};
+
+public interface {cls} {ext} {{
+""".
+                    format(package=self.package,
+                           cls=cls,
+                           ext=ext))
+
+        if self.current_class_is_abstract:
+            return
+
+        with open(os.path.join(self.outdir, "%sImpl.java" % cls), "w") as f:
+            f.write("""package {package};
+
+public class {cls}Impl implements {cls} {{
+""".
+                    format(package=self.package,
+                           cls=cls,
+                           ext=ext))
+        self.current_loader.write("""
+    void Load() {
+""")
+
+    def end_class(self, classname):
+        with open(os.path.join(self.outdir, "%s.java" % self.current_class), "a") as f:
+            f.write("""
+}
+""")
+        if self.current_class_is_abstract:
+            return
+
+        self.current_loader.write("""
+    }
+""")
+
+        with open(os.path.join(self.outdir, "%sImpl.java" % self.current_class), "a") as f:
+            f.write(self.current_fields.getvalue())
+            f.write(self.current_loader.getvalue())
+            f.write("""
+}
+""")
+
+    prims = {
+        u"http://www.w3.org/2001/XMLSchema#string": TypeDef("String", "Support.StringLoader()"),
+        u"http://www.w3.org/2001/XMLSchema#int": TypeDef("Integer", "Support.IntLoader()"),
+        u"http://www.w3.org/2001/XMLSchema#long": TypeDef("Long", "Support.LongLoader()"),
+        u"http://www.w3.org/2001/XMLSchema#float": TypeDef("Float", "Support.FloatLoader()"),
+        u"http://www.w3.org/2001/XMLSchema#double": TypeDef("Double", "Support.DoubleLoader()"),
+        u"http://www.w3.org/2001/XMLSchema#boolean": TypeDef("Boolean", "Support.BoolLoader()"),
+        u"https://w3id.org/cwl/salad#null": TypeDef("null_type", "Support.NullLoader()"),
+        u"https://w3id.org/cwl/salad#Any": TypeDef("Any_type", "Support.AnyLoader()")
+    }
+
+    def type_loader(self, t):
+        if isinstance(t, list) and len(t) == 2:
+            if t[0] == "https://w3id.org/cwl/salad#null":
+                t = t[1]
+        if isinstance(t, basestring):
+            if t in self.prims:
+                return self.prims[t]
+        return TypeDef("Object", "")
+
+    def declare_field(self, name, typedef, doc, optional):
+        fieldname = self.safe_name(name)
+        with open(os.path.join(self.outdir, "%s.java" % self.current_class), "a") as f:
+            f.write("""
+    {type} get{capfieldname}();
+""".
+                    format(fieldname=fieldname,
+                           capfieldname=fieldname[0].upper() + fieldname[1:],
+                           type=typedef.name))
+
+        if self.current_class_is_abstract:
+            return
+
+        self.current_fields.write("""
+    private {type} {fieldname};
+    public {type} get{capfieldname}() {{
+        return this.{fieldname};
+    }}
+""".
+                    format(fieldname=fieldname,
+                           capfieldname=fieldname[0].upper() + fieldname[1:],
+                           type=typedef.name))
+
+        self.current_loader.write("""
+        this.{fieldname} = null; // TODO: loaders
+        """.
+                                  format(fieldname=fieldname))
+
+
+    def declare_id_field(self, name, typedef, doc):
+        pass
+
+    def uri_loader(self, inner, scoped_id, vocab_term, refScope):
+        return inner
+
+    def idmap_loader(self, field, inner, mapSubject, mapPredicate):
+        return inner
+
+    def typedsl_loader(self, inner, refScope):
+        return inner
+
+    def epilogue(self, rootLoader):
+        pass
diff --git a/schema_salad/main.py b/schema_salad/main.py
index 33225aa..f4f8776 100644
--- a/schema_salad/main.py
+++ b/schema_salad/main.py
@@ -6,12 +6,15 @@ import sys
 import traceback
 import json
 import os
+import re
+import itertools
 
+import six
 from six.moves import urllib
 
 import pkg_resources  # part of setuptools
 
-from typing import Any, Dict, List, Union, Text
+from typing import Any, Dict, List, Union, Pattern, Text, Tuple, cast
 
 from rdflib import Graph, plugin
 from rdflib.serializer import Serializer
@@ -20,10 +23,9 @@ from . import schema
 from . import jsonld_context
 from . import makedoc
 from . import validate
+from . import codegen
 from .sourceline import strip_dup_lineno
-from .ref_resolver import Loader
-import six
-
+from .ref_resolver import Loader, file_uri
 _logger = logging.getLogger("salad")
 
 from rdflib.plugin import register, Parser
@@ -40,6 +42,82 @@ def printrdf(workflow,  # type: str
     print(g.serialize(format=sr))
 
 
+def regex_chunk(lines, regex):
+    # type: (List[str], Pattern[str]) -> List[List[str]]
+    lst = list(itertools.dropwhile(lambda x: not regex.match(x), lines))
+    arr = []
+    while lst:
+        ret = [lst[0]]+list(itertools.takewhile(lambda x: not regex.match(x),
+                                                lst[1:]))
+        arr.append(ret)
+        lst = list(itertools.dropwhile(lambda x: not regex.match(x),
+                                       lst[1:]))
+    return arr
+
+
+def chunk_messages(message):  # type: (str) -> List[Tuple[int, str]]
+    file_regex = re.compile(r'^(.+:\d+:\d+:)(\s+)(.+)$')
+    item_regex = re.compile(r'^\s*\*\s+')
+    arr = []
+    for chun in regex_chunk(message.splitlines(), file_regex):
+        fst = chun[0]
+        mat = file_regex.match(fst)
+        place = mat.group(1)
+        indent = len(mat.group(2))
+
+        lst = [mat.group(3)]+chun[1:]
+        if [x for x in lst if item_regex.match(x)]:
+            for item in regex_chunk(lst, item_regex):
+                msg = re.sub(item_regex, '', "\n".join(item))
+                arr.append((indent, place+' '+re.sub(r'[\n\s]+',
+                                                     ' ',
+                                                     msg)))
+        else:
+            msg = re.sub(item_regex, '', "\n".join(lst))
+            arr.append((indent, place+' '+re.sub(r'[\n\s]+',
+                                                 ' ',
+                                                 msg)))
+    return arr
+
+
+def to_one_line_messages(message):  # type: (str) -> str
+    ret = []
+    max_elem = (0, '')
+    for (indent, msg) in chunk_messages(message):
+        if indent > max_elem[0]:
+            max_elem = (indent, msg)
+        else:
+            ret.append(max_elem[1])
+            max_elem = (indent, msg)
+    ret.append(max_elem[1])
+    return "\n".join(ret)
+
+
+def reformat_yaml_exception_message(message):  # type: (str) -> str
+    line_regex = re.compile(r'^\s+in "(.+)", line (\d+), column (\d+)$')
+    fname_regex = re.compile(r'^file://'+os.getcwd()+'/')
+    msgs = message.splitlines()
+    ret = []
+
+    if len(msgs) == 3:
+        msgs = msgs[1:]
+        nblanks = 0
+    elif len(msgs) == 4:
+        c_msg = msgs[0]
+        c_file, c_line, c_column = line_regex.match(msgs[1]).groups()
+        c_file = re.sub(fname_regex, '', c_file)
+        ret.append("%s:%s:%s: %s" % (c_file, c_line, c_column, c_msg))
+
+        msgs = msgs[2:]
+        nblanks = 2
+
+    p_msg = msgs[0]
+    p_file, p_line, p_column = line_regex.match(msgs[1]).groups()
+    p_file = re.sub(fname_regex, '', p_file)
+    ret.append("%s:%s:%s:%s %s" % (p_file, p_line, p_column, ' '*nblanks, p_msg))
+    return "\n".join(ret)
+
+
 def main(argsl=None):  # type: (List[str]) -> int
     if argsl is None:
         argsl = sys.argv[1:]
@@ -66,6 +144,11 @@ def main(argsl=None):  # type: (List[str]) -> int
     exgroup.add_argument("--print-metadata",
                          action="store_true", help="Print document metadata")
 
+    exgroup.add_argument("--codegen", type=str, metavar="language", help="Generate classes in target language, currently supported: python")
+
+    exgroup.add_argument("--print-oneline", action="store_true",
+                         help="Print each error message in oneline")
+
     exgroup = parser.add_mutually_exclusive_group()
     exgroup.add_argument("--strict", action="store_true", help="Strict validation (unrecognized or out of place fields are error)",
                          default=True, dest="strict")
@@ -111,8 +194,8 @@ def main(argsl=None):  # type: (List[str]) -> int
     # Load schema document and resolve refs
 
     schema_uri = args.schema
-    if not urllib.parse.urlparse(schema_uri)[0]:
-        schema_uri = "file://" + os.path.abspath(schema_uri)
+    if not (urllib.parse.urlparse(schema_uri)[0] and urllib.parse.urlparse(schema_uri)[0] in [u'http', u'https', u'file']):
+        schema_uri = file_uri(os.path.abspath(schema_uri))
     schema_raw_doc = metaschema_loader.fetch(schema_uri)
 
     try:
@@ -163,6 +246,11 @@ def main(argsl=None):  # type: (List[str]) -> int
     # Create the loader that will be used to load the target document.
     document_loader = Loader(schema_ctx)
 
+    if args.codegen:
+        codegen.codegen(args.codegen, cast(List[Dict[Text, Any]], schema_doc),
+                        schema_metadata, document_loader)
+        return 0
+
     # Make the Avro validation that will be used to validate the target
     # document
     if isinstance(schema_doc, list):
@@ -211,9 +299,18 @@ def main(argsl=None):  # type: (List[str]) -> int
         if not urllib.parse.urlparse(uri)[0]:
             doc = "file://" + os.path.abspath(uri)
         document, doc_metadata = document_loader.resolve_ref(uri)
-    except (validate.ValidationException, RuntimeError) as e:
+    except validate.ValidationException as e:
+        msg = strip_dup_lineno(six.text_type(e))
+        msg = to_one_line_messages(str(msg)) if args.print_oneline else msg
+        _logger.error("Document `%s` failed validation:\n%s",
+                      args.document, msg, exc_info=args.debug)
+        return 1
+    except RuntimeError as e:
+        msg = strip_dup_lineno(six.text_type(e))
+        msg = reformat_yaml_exception_message(str(msg))
+        msg = to_one_line_messages(msg) if args.print_oneline else msg
         _logger.error("Document `%s` failed validation:\n%s",
-                      args.document, strip_dup_lineno(six.text_type(e)), exc_info=args.debug)
+                      args.document, msg, exc_info=args.debug)
         return 1
 
     # Optionally print the document after ref resolution
@@ -230,8 +327,9 @@ def main(argsl=None):  # type: (List[str]) -> int
         schema.validate_doc(avsc_names, document,
                             document_loader, args.strict)
     except validate.ValidationException as e:
+        msg = to_one_line_messages(str(e)) if args.print_oneline else str(e)
         _logger.error("While validating document `%s`:\n%s" %
-                      (args.document, str(e)))
+                      (args.document, msg))
         return 1
 
     # Optionally convert the document to RDF
diff --git a/schema_salad/makedoc.py b/schema_salad/makedoc.py
index c0e7c8a..45bcab2 100644
--- a/schema_salad/makedoc.py
+++ b/schema_salad/makedoc.py
@@ -8,6 +8,7 @@ import copy
 import re
 import sys
 import logging
+from io import open
 
 from . import schema
 from .utils import add_dictlist, aslist
@@ -323,10 +324,10 @@ class RenderType(object):
 
         if f["type"] != "documentation":
             lines = []
-            for l in f["doc"].splitlines():
-                if len(l) > 0 and l[0] == "#":
-                    l = ("#" * depth) + l
-                lines.append(l)
+            for line in f["doc"].splitlines():
+                if len(line) > 0 and line[0] == "#":
+                    line = ("#" * depth) + line
+                lines.append(line)
             f["doc"] = "\n".join(lines)
 
             _, frg = urllib.parse.urldefrag(f["name"])
@@ -516,11 +517,11 @@ def main():  # type: () -> None
 
     s = []  # type: List[Dict[Text, Any]]
     a = args.schema
-    with open(a) as f:
+    with open(a, encoding='utf-8') as f:
         if a.endswith("md"):
             s.append({"name": os.path.splitext(os.path.basename(a))[0],
                       "type": "documentation",
-                      "doc": f.read().decode("utf-8")
+                      "doc": f.read()
                       })
         else:
             uri = "file://" + os.path.abspath(a)
diff --git a/schema_salad/metaschema.py b/schema_salad/metaschema.py
new file mode 100644
index 0000000..5c5e740
--- /dev/null
+++ b/schema_salad/metaschema.py
@@ -0,0 +1,1447 @@
+#
+# This file was autogenerated using schema-salad-tool --codegen=python
+#
+from __future__ import absolute_import
+import ruamel.yaml
+from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
+import re
+import os
+import traceback
+
+from typing import (Any, AnyStr, Callable, cast, Dict, List, Iterable, Tuple,
+                    TypeVar, Union, Text)
+import six
+
+lineno_re = re.compile(u"^(.*?:[0-9]+:[0-9]+: )(( *)(.*))")
+
+def _add_lc_filename(r, source):  # type: (ruamel.yaml.comments.CommentedBase, AnyStr) -> None
+    if isinstance(r, ruamel.yaml.comments.CommentedBase):
+        r.lc.filename = source
+    if isinstance(r, list):
+        for d in r:
+            _add_lc_filename(d, source)
+    elif isinstance(r, dict):
+        for d in six.itervalues(r):
+            _add_lc_filename(d, source)
+
+def relname(source):  # type: (Text) -> Text
+    if source.startswith("file://"):
+        source = source[7:]
+        source = os.path.relpath(source)
+    return source
+
+def add_lc_filename(r, source):  # type: (ruamel.yaml.comments.CommentedBase, Text) -> None
+    _add_lc_filename(r, relname(source))
+
+def reflow(text, maxline, shift=""):  # type: (Text, int, Text) -> Text
+    if maxline < 20:
+        maxline = 20
+    if len(text) > maxline:
+        sp = text.rfind(' ', 0, maxline)
+        if sp < 1:
+            sp = text.find(' ', sp+1)
+            if sp == -1:
+                sp = len(text)
+        if sp < len(text):
+            return "%s\n%s%s" % (text[0:sp], shift, reflow(text[sp+1:], maxline, shift))
+    return text
+
+def indent(v, nolead=False, shift=u"  ", bullet=u"  "):  # type: (Text, bool, Text, Text) -> Text
+    if nolead:
+        return v.splitlines()[0] + u"\n".join([shift + l for l in v.splitlines()[1:]])
+    else:
+        def lineno(i, l):  # type: (int, Text) -> Text
+            r = lineno_re.match(l)
+            if bool(r):
+                return r.group(1) + (bullet if i == 0 else shift) + r.group(2)
+            else:
+                return (bullet if i == 0 else shift) + l
+
+        return u"\n".join([lineno(i, l) for i, l in enumerate(v.splitlines())])
+
+def bullets(textlist, bul):  # type: (List[Text], Text) -> Text
+    if len(textlist) == 1:
+        return textlist[0]
+    else:
+        return "\n".join(indent(t, bullet=bul) for t in textlist)
+
+def strip_dup_lineno(text, maxline=None):  # type: (Text, int) -> Text
+    if maxline is None:
+        maxline = int(os.environ.get("COLUMNS", "100"))
+    pre = None
+    msg = []
+    for l in text.splitlines():
+        g = lineno_re.match(l)
+        if not g:
+            msg.append(l)
+            continue
+        shift = len(g.group(1)) + len(g.group(3))
+        g2 = reflow(g.group(2), maxline-shift, " " * shift)
+        if g.group(1) != pre:
+            pre = g.group(1)
+            msg.append(pre + g2)
+        else:
+            g2 = reflow(g.group(2), maxline-len(g.group(1)), " " * (len(g.group(1))+len(g.group(3))))
+            msg.append(" " * len(g.group(1)) + g2)
+    return "\n".join(msg)
+
+def cmap(d, lc=None, fn=None):  # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq]
+    if lc is None:
+        lc = [0, 0, 0, 0]
+    if fn is None:
+        fn = "test"
+
+    if isinstance(d, CommentedMap):
+        fn = d.lc.filename if hasattr(d.lc, "filename") else fn
+        for k,v in six.iteritems(d):
+            if k in d.lc.data:
+                d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
+            else:
+                d[k] = cmap(v, lc, fn=fn)
+        return d
+    if isinstance(d, CommentedSeq):
+        fn = d.lc.filename if hasattr(d.lc, "filename") else fn
+        for k,v in enumerate(d):
+            if k in d.lc.data:
+                d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
+            else:
+                d[k] = cmap(v, lc, fn=fn)
+        return d
+    if isinstance(d, dict):
+        cm = CommentedMap()
+        for k in sorted(d.keys()):
+            v = d[k]
+            if isinstance(v, CommentedBase):
+                uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
+                vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
+            else:
+                uselc = lc
+                vfn = fn
+            cm[k] = cmap(v, lc=uselc, fn=vfn)
+            cm.lc.add_kv_line_col(k, uselc)
+            cm.lc.filename = fn
+        return cm
+    if isinstance(d, list):
+        cs = CommentedSeq()
+        for k,v in enumerate(d):
+            if isinstance(v, CommentedBase):
+                uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
+                vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
+            else:
+                uselc = lc
+                vfn = fn
+            cs.append(cmap(v, lc=uselc, fn=vfn))
+            cs.lc.add_kv_line_col(k, uselc)
+            cs.lc.filename = fn
+        return cs
+    else:
+        return d
+
+class SourceLine(object):
+    def __init__(self, item, key=None, raise_type=six.text_type, include_traceback=False):  # type: (Any, Any, Callable, bool) -> None
+        self.item = item
+        self.key = key
+        self.raise_type = raise_type
+        self.include_traceback = include_traceback
+
+    def __enter__(self):  # type: () -> SourceLine
+        return self
+
+    def __exit__(self,
+                 exc_type,   # type: Any
+                 exc_value,  # type: Any
+                 tb   # type: Any
+                 ):   # -> Any
+        if not exc_value:
+            return
+        if self.include_traceback:
+            raise self.makeError("\n".join(traceback.format_exception(exc_type, exc_value, tb)))
+        else:
+            raise self.makeError(six.text_type(exc_value))
+
+    def makeLead(self):  # type: () -> Text
+        if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:
+            return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "",
+                                  (self.item.lc.line or 0)+1,
+                                  (self.item.lc.col or 0)+1)
+        else:
+            return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "",
+                                  (self.item.lc.data[self.key][0] or 0)+1,
+                                  (self.item.lc.data[self.key][1] or 0)+1)
+
+    def makeError(self, msg):  # type: (Text) -> Any
+        if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):
+            return self.raise_type(msg)
+        errs = []
+        lead = self.makeLead()
+        for m in msg.splitlines():
+            if bool(lineno_re.match(m)):
+                errs.append(m)
+            else:
+                errs.append("%s %s" % (lead, m))
+        return self.raise_type("\n".join(errs))
+
+
+import six
+from six.moves import urllib, StringIO
+import ruamel.yaml as yaml
+import copy
+import re
+from typing import List, Text, Dict, Union, Any, Sequence
+
+class ValidationException(Exception):
+    pass
+
+class Savable(object):
+    pass
+
+class LoadingOptions(object):
+    def __init__(self, fetcher=None, namespaces=None, fileuri=None, copyfrom=None):
+        if copyfrom is not None:
+            self.idx = copyfrom.idx
+            if fetcher is None:
+                fetcher = copyfrom.fetcher
+            if fileuri is None:
+                fileuri = copyfrom.fileuri
+        else:
+            self.idx = {}
+
+        if fetcher is None:
+            import os
+            import requests
+            from cachecontrol.wrapper import CacheControl
+            from cachecontrol.caches import FileCache
+            from schema_salad.ref_resolver import DefaultFetcher
+            if "HOME" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["HOME"], ".cache", "salad")))
+            elif "TMP" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["TMP"], ".cache", "salad")))
+            else:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache("/tmp", ".cache", "salad"))
+            self.fetcher = DefaultFetcher({}, session)
+        else:
+            self.fetcher = fetcher
+
+        self.fileuri = fileuri
+
+        self.vocab = _vocab
+        self.rvocab = _rvocab
+
+        if namespaces is not None:
+            self.vocab = self.vocab.copy()
+            self.rvocab = self.rvocab.copy()
+            for k,v in six.iteritems(namespaces):
+                self.vocab[k] = v
+                self.rvocab[v] = k
+
+def load_field(val, fieldtype, baseuri, loadingOptions):
+    if isinstance(val, dict):
+        if "$import" in val:
+            return _document_load_by_url(fieldtype, loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"]), loadingOptions)
+        elif "$include" in val:
+            val = loadingOptions.fetcher.fetch_text(loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"]))
+    return fieldtype.load(val, baseuri, loadingOptions)
+
+
+def save(val):
+    if isinstance(val, Savable):
+        return val.save()
+    if isinstance(val, list):
+        return [save(v) for v in val]
+    return val
+
+def expand_url(url,                 # type: Union[str, Text]
+               base_url,            # type: Union[str, Text]
+               loadingOptions,      # type: LoadingOptions
+               scoped_id=False,     # type: bool
+               vocab_term=False,    # type: bool
+               scoped_ref=None      # type: int
+               ):
+    # type: (...) -> Text
+
+    if not isinstance(url, six.string_types):
+        return url
+
+    url = Text(url)
+
+    if url in (u"@id", u"@type"):
+        return url
+
+    if vocab_term and url in loadingOptions.vocab:
+        return url
+
+    if bool(loadingOptions.vocab) and u":" in url:
+        prefix = url.split(u":")[0]
+        if prefix in loadingOptions.vocab:
+            url = loadingOptions.vocab[prefix] + url[len(prefix) + 1:]
+
+    split = urllib.parse.urlsplit(url)
+
+    if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u"$(")
+        or url.startswith(u"${")):
+        pass
+    elif scoped_id and not bool(split.fragment):
+        splitbase = urllib.parse.urlsplit(base_url)
+        frg = u""
+        if bool(splitbase.fragment):
+            frg = splitbase.fragment + u"/" + split.path
+        else:
+            frg = split.path
+        pt = splitbase.path if splitbase.path != '' else "/"
+        url = urllib.parse.urlunsplit(
+            (splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
+    elif scoped_ref is not None and not bool(split.fragment):
+        splitbase = urllib.parse.urlsplit(base_url)
+        sp = splitbase.fragment.split(u"/")
+        n = scoped_ref
+        while n > 0 and len(sp) > 0:
+            sp.pop()
+            n -= 1
+        sp.append(url)
+        url = urllib.parse.urlunsplit((
+            splitbase.scheme, splitbase.netloc, splitbase.path, splitbase.query,
+            u"/".join(sp)))
+    else:
+        url = loadingOptions.fetcher.urljoin(base_url, url)
+
+    if vocab_term:
+        split = urllib.parse.urlsplit(url)
+        if bool(split.scheme):
+            if url in loadingOptions.rvocab:
+                return loadingOptions.rvocab[url]
+        else:
+            raise ValidationException("Term '%s' not in vocabulary" % url)
+
+    return url
+
+
+class _Loader(object):
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        # type: (Any, Text, LoadingOptions, Union[Text, None]) -> Any
+        pass
+
+class _AnyLoader(_Loader):
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if doc is not None:
+            return doc
+        raise ValidationException("Expected non-null")
+
+class _PrimitiveLoader(_Loader):
+    def __init__(self, tp):
+        # type: (Union[type, Sequence[type]]) -> None
+        self.tp = tp
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, self.tp):
+            raise ValidationException("Expected a %s but got %s" % (self.tp, type(doc)))
+        return doc
+
+    def __repr__(self):
+        return str(self.tp)
+
+class _ArrayLoader(_Loader):
+    def __init__(self, items):
+        # type: (_Loader) -> None
+        self.items = items
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, list):
+            raise ValidationException("Expected a list")
+        r = []
+        errors = []
+        for i in range(0, len(doc)):
+            try:
+                lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
+                if isinstance(lf, list):
+                    r.extend(lf)
+                else:
+                    r.append(lf)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, i, str).makeError(six.text_type(e)))
+        if errors:
+            raise ValidationException("\n".join(errors))
+        return r
+
+    def __repr__(self):
+        return "array<%s>" % self.items
+
+class _EnumLoader(_Loader):
+    def __init__(self, symbols):
+        # type: (Sequence[Text]) -> None
+        self.symbols = symbols
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if doc in self.symbols:
+            return doc
+        else:
+            raise ValidationException("Expected one of %s" % (self.symbols,))
+
+
+class _RecordLoader(_Loader):
+    def __init__(self, classtype):
+        # type: (type) -> None
+        self.classtype = classtype
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, dict):
+            raise ValidationException("Expected a dict")
+        return self.classtype(doc, baseuri, loadingOptions, docRoot=docRoot)
+
+    def __repr__(self):
+        return str(self.classtype)
+
+
+class _UnionLoader(_Loader):
+    def __init__(self, alternates):
+        # type: (Sequence[_Loader]) -> None
+        self.alternates = alternates
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        errors = []
+        for t in self.alternates:
+            try:
+                return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
+            except ValidationException as e:
+                errors.append("tried %s but\n%s" % (t, indent(str(e))))
+        raise ValidationException(bullets(errors, "- "))
+
+    def __repr__(self):
+        return " | ".join(str(a) for a in self.alternates)
+
+class _URILoader(_Loader):
+    def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
+        # type: (_Loader, bool, bool, Union[int, None]) -> None
+        self.inner = inner
+        self.scoped_id = scoped_id
+        self.vocab_term = vocab_term
+        self.scoped_ref = scoped_ref
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, list):
+            doc = [expand_url(i, baseuri, loadingOptions,
+                            self.scoped_id, self.vocab_term, self.scoped_ref) for i in doc]
+        if isinstance(doc, six.string_types):
+            doc = expand_url(doc, baseuri, loadingOptions,
+                             self.scoped_id, self.vocab_term, self.scoped_ref)
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+class _TypeDSLLoader(_Loader):
+    typeDSLregex = re.compile(u"^([^[?]+)(\[\])?(\?)?$")
+
+    def __init__(self, inner, refScope):
+        # type: (_Loader, Union[int, None]) -> None
+        self.inner = inner
+        self.refScope = refScope
+
+    def resolve(self, doc, baseuri, loadingOptions):
+        m = self.typeDSLregex.match(doc)
+        if m:
+            first = expand_url(m.group(1), baseuri, loadingOptions, False, True, self.refScope)
+            second = third = None
+            if bool(m.group(2)):
+                second = {"type": "array", "items": first}
+                #second = CommentedMap((("type", "array"),
+                #                       ("items", first)))
+                #second.lc.add_kv_line_col("type", lc)
+                #second.lc.add_kv_line_col("items", lc)
+                #second.lc.filename = filename
+            if bool(m.group(3)):
+                third = [u"null", second or first]
+                #third = CommentedSeq([u"null", second or first])
+                #third.lc.add_kv_line_col(0, lc)
+                #third.lc.add_kv_line_col(1, lc)
+                #third.lc.filename = filename
+            doc = third or second or first
+        return doc
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, list):
+            r = []
+            for d in doc:
+                if isinstance(d, six.string_types):
+                    resolved = self.resolve(d, baseuri, loadingOptions)
+                    if isinstance(resolved, list):
+                        for i in resolved:
+                            if i not in r:
+                                r.append(i)
+                    else:
+                        if resolved not in r:
+                            r.append(resolved)
+                else:
+                    r.append(d)
+            doc = r
+        elif isinstance(doc, six.string_types):
+            doc = self.resolve(doc, baseuri, loadingOptions)
+
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+class _IdMapLoader(_Loader):
+    def __init__(self, inner, mapSubject, mapPredicate):
+        # type: (_Loader, Text, Union[Text, None]) -> None
+        self.inner = inner
+        self.mapSubject = mapSubject
+        self.mapPredicate = mapPredicate
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, dict):
+            r = []
+            for k in sorted(doc.keys()):
+                val = doc[k]
+                if isinstance(val, dict):
+                    v = copy.copy(val)
+                    if hasattr(val, 'lc'):
+                        v.lc.data = val.lc.data
+                        v.lc.filename = val.lc.filename
+                else:
+                    if self.mapPredicate:
+                        v = {self.mapPredicate: val}
+                    else:
+                        raise ValidationException("No mapPredicate")
+                v[self.mapSubject] = k
+                r.append(v)
+            doc = r
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+def _document_load(loader, doc, baseuri, loadingOptions):
+    if isinstance(doc, six.string_types):
+        return _document_load_by_url(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions)
+
+    if isinstance(doc, dict):
+        if "$namespaces" in doc:
+            loadingOptions = LoadingOptions(copyfrom=loadingOptions, namespaces=doc["$namespaces"])
+
+        if "$base" in doc:
+            baseuri = doc["$base"]
+
+        if "$graph" in doc:
+            return loader.load(doc["$graph"], baseuri, loadingOptions)
+        else:
+            return loader.load(doc, baseuri, loadingOptions, docRoot=baseuri)
+
+    if isinstance(doc, list):
+        return loader.load(doc, baseuri, loadingOptions)
+
+    raise ValidationException()
+
+
+def _document_load_by_url(loader, url, loadingOptions):
+    if url in loadingOptions.idx:
+        return _document_load(loader, loadingOptions.idx[url], url, loadingOptions)
+
+    text = loadingOptions.fetcher.fetch_text(url)
+    if isinstance(text, bytes):
+        textIO = StringIO(text.decode('utf-8'))
+    else:
+        textIO = StringIO(text)
+    textIO.name = url    # type: ignore
+    result = yaml.round_trip_load(textIO)
+    add_lc_filename(result, url)
+
+    loadingOptions.idx[url] = result
+
+    loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=url)
+
+    return _document_load(loader, result, url, loadingOptions)
+
+def file_uri(path, split_frag=False):  # type: (str, bool) -> str
+    if path.startswith("file://"):
+        return path
+    if split_frag:
+        pathsp = path.split("#", 2)
+        frag = "#" + urllib.parse.quote(str(pathsp[1])) if len(pathsp) == 2 else ""
+        urlpath = urllib.request.pathname2url(str(pathsp[0]))
+    else:
+        urlpath = urllib.request.pathname2url(path)
+        frag = ""
+    if urlpath.startswith("//"):
+        return "file:%s%s" % (urlpath, frag)
+    else:
+        return "file://%s%s" % (urlpath, frag)
+
+
+class RecordField(Savable):
+    """
+A field of a record.
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'name' in doc:
+            try:
+                self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+        else:
+            self.name = None
+
+
+        if self.name is None:
+            if docRoot is not None:
+                self.name = docRoot
+            else:
+                raise ValidationException("Missing name")
+        baseuri = self.name
+        if 'doc' in doc:
+            try:
+                self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+        else:
+            self.doc = None
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'RecordField'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class RecordSchema(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'fields' in doc:
+            try:
+                self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e)))
+        else:
+            self.fields = None
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'RecordSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.fields is not None:
+            r['fields'] = save(self.fields)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class EnumSchema(Savable):
+    """
+Define an enumerated type.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        try:
+            self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e)))
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'EnumSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.symbols is not None:
+            r['symbols'] = save(self.symbols)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class ArraySchema(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        try:
+            self.items = load_field(doc.get('items'), uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e)))
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Array_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'ArraySchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.items is not None:
+            r['items'] = save(self.items)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class JsonldPredicate(Savable):
+    """
+Attached to a record field to define how the parent record field is handled for
+URI resolution and JSON-LD context generation.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if '_id' in doc:
+            try:
+                self._id = load_field(doc.get('_id'), uri_union_of_None_type_or_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, '_id', str).makeError("the `_id` field is not valid because:\n"+str(e)))
+        else:
+            self._id = None
+
+        if '_type' in doc:
+            try:
+                self._type = load_field(doc.get('_type'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, '_type', str).makeError("the `_type` field is not valid because:\n"+str(e)))
+        else:
+            self._type = None
+
+        if '_container' in doc:
+            try:
+                self._container = load_field(doc.get('_container'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, '_container', str).makeError("the `_container` field is not valid because:\n"+str(e)))
+        else:
+            self._container = None
+
+        if 'identity' in doc:
+            try:
+                self.identity = load_field(doc.get('identity'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'identity', str).makeError("the `identity` field is not valid because:\n"+str(e)))
+        else:
+            self.identity = None
+
+        if 'noLinkCheck' in doc:
+            try:
+                self.noLinkCheck = load_field(doc.get('noLinkCheck'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'noLinkCheck', str).makeError("the `noLinkCheck` field is not valid because:\n"+str(e)))
+        else:
+            self.noLinkCheck = None
+
+        if 'mapSubject' in doc:
+            try:
+                self.mapSubject = load_field(doc.get('mapSubject'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'mapSubject', str).makeError("the `mapSubject` field is not valid because:\n"+str(e)))
+        else:
+            self.mapSubject = None
+
+        if 'mapPredicate' in doc:
+            try:
+                self.mapPredicate = load_field(doc.get('mapPredicate'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'mapPredicate', str).makeError("the `mapPredicate` field is not valid because:\n"+str(e)))
+        else:
+            self.mapPredicate = None
+
+        if 'refScope' in doc:
+            try:
+                self.refScope = load_field(doc.get('refScope'), union_of_None_type_or_inttype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'refScope', str).makeError("the `refScope` field is not valid because:\n"+str(e)))
+        else:
+            self.refScope = None
+
+        if 'typeDSL' in doc:
+            try:
+                self.typeDSL = load_field(doc.get('typeDSL'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'typeDSL', str).makeError("the `typeDSL` field is not valid because:\n"+str(e)))
+        else:
+            self.typeDSL = None
+
+
+        if errors:
+            raise ValidationException("Trying 'JsonldPredicate'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self._id is not None:
+            r['_id'] = save(self._id)
+        if self._type is not None:
+            r['_type'] = save(self._type)
+        if self._container is not None:
+            r['_container'] = save(self._container)
+        if self.identity is not None:
+            r['identity'] = save(self.identity)
+        if self.noLinkCheck is not None:
+            r['noLinkCheck'] = save(self.noLinkCheck)
+        if self.mapSubject is not None:
+            r['mapSubject'] = save(self.mapSubject)
+        if self.mapPredicate is not None:
+            r['mapPredicate'] = save(self.mapPredicate)
+        if self.refScope is not None:
+            r['refScope'] = save(self.refScope)
+        if self.typeDSL is not None:
+            r['typeDSL'] = save(self.typeDSL)
+        return r
+
+
+class SpecializeDef(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        try:
+            self.specializeFrom = load_field(doc.get('specializeFrom'), uri_strtype_None_False_1, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'specializeFrom', str).makeError("the `specializeFrom` field is not valid because:\n"+str(e)))
+
+        try:
+            self.specializeTo = load_field(doc.get('specializeTo'), uri_strtype_None_False_1, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'specializeTo', str).makeError("the `specializeTo` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'SpecializeDef'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.specializeFrom is not None:
+            r['specializeFrom'] = save(self.specializeFrom)
+        if self.specializeTo is not None:
+            r['specializeTo'] = save(self.specializeTo)
+        return r
+
+
+class NamedType(Savable):
+    pass
+
+class DocType(Savable):
+    pass
+
+class SchemaDefinedType(DocType):
+    """
+Abstract base for schema-defined types.
+
+    """
+    pass
+
+class SaladRecordField(RecordField):
+    """
+A field of a record.
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'name' in doc:
+            try:
+                self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+        else:
+            self.name = None
+
+
+        if self.name is None:
+            if docRoot is not None:
+                self.name = docRoot
+            else:
+                raise ValidationException("Missing name")
+        baseuri = self.name
+        if 'doc' in doc:
+            try:
+                self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+        else:
+            self.doc = None
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+        if 'jsonldPredicate' in doc:
+            try:
+                self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+        else:
+            self.jsonldPredicate = None
+
+
+        if errors:
+            raise ValidationException("Trying 'SaladRecordField'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        return r
+
+
+class SaladRecordSchema(NamedType, RecordSchema, SchemaDefinedType):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'name' in doc:
+            try:
+                self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+        else:
+            self.name = None
+
+
+        if self.name is None:
+            if docRoot is not None:
+                self.name = docRoot
+            else:
+                raise ValidationException("Missing name")
+        baseuri = self.name
+        if 'inVocab' in doc:
+            try:
+                self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+        else:
+            self.inVocab = None
+
+        if 'fields' in doc:
+            try:
+                self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e)))
+        else:
+            self.fields = None
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Record_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+        if 'doc' in doc:
+            try:
+                self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+        else:
+            self.doc = None
+
+        if 'docParent' in doc:
+            try:
+                self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+        else:
+            self.docParent = None
+
+        if 'docChild' in doc:
+            try:
+                self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+        else:
+            self.docChild = None
+
+        if 'docAfter' in doc:
+            try:
+                self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+        else:
+            self.docAfter = None
+
+        if 'jsonldPredicate' in doc:
+            try:
+                self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+        else:
+            self.jsonldPredicate = None
+
+        if 'documentRoot' in doc:
+            try:
+                self.documentRoot = load_field(doc.get('documentRoot'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'documentRoot', str).makeError("the `documentRoot` field is not valid because:\n"+str(e)))
+        else:
+            self.documentRoot = None
+
+        if 'abstract' in doc:
+            try:
+                self.abstract = load_field(doc.get('abstract'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'abstract', str).makeError("the `abstract` field is not valid because:\n"+str(e)))
+        else:
+            self.abstract = None
+
+        if 'extends' in doc:
+            try:
+                self.extends = load_field(doc.get('extends'), uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_1, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'extends', str).makeError("the `extends` field is not valid because:\n"+str(e)))
+        else:
+            self.extends = None
+
+        if 'specialize' in doc:
+            try:
+                self.specialize = load_field(doc.get('specialize'), idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'specialize', str).makeError("the `specialize` field is not valid because:\n"+str(e)))
+        else:
+            self.specialize = None
+
+
+        if errors:
+            raise ValidationException("Trying 'SaladRecordSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.fields is not None:
+            r['fields'] = save(self.fields)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        if self.documentRoot is not None:
+            r['documentRoot'] = save(self.documentRoot)
+        if self.abstract is not None:
+            r['abstract'] = save(self.abstract)
+        if self.extends is not None:
+            r['extends'] = save(self.extends)
+        if self.specialize is not None:
+            r['specialize'] = save(self.specialize)
+        return r
+
+
+class SaladEnumSchema(NamedType, EnumSchema, SchemaDefinedType):
+    """
+Define an enumerated type.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'name' in doc:
+            try:
+                self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+        else:
+            self.name = None
+
+
+        if self.name is None:
+            if docRoot is not None:
+                self.name = docRoot
+            else:
+                raise ValidationException("Missing name")
+        baseuri = self.name
+        if 'inVocab' in doc:
+            try:
+                self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+        else:
+            self.inVocab = None
+
+        try:
+            self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_True_False_None, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e)))
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Enum_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+        if 'doc' in doc:
+            try:
+                self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+        else:
+            self.doc = None
+
+        if 'docParent' in doc:
+            try:
+                self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+        else:
+            self.docParent = None
+
+        if 'docChild' in doc:
+            try:
+                self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+        else:
+            self.docChild = None
+
+        if 'docAfter' in doc:
+            try:
+                self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+        else:
+            self.docAfter = None
+
+        if 'jsonldPredicate' in doc:
+            try:
+                self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+        else:
+            self.jsonldPredicate = None
+
+        if 'documentRoot' in doc:
+            try:
+                self.documentRoot = load_field(doc.get('documentRoot'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'documentRoot', str).makeError("the `documentRoot` field is not valid because:\n"+str(e)))
+        else:
+            self.documentRoot = None
+
+        if 'extends' in doc:
+            try:
+                self.extends = load_field(doc.get('extends'), uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_1, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'extends', str).makeError("the `extends` field is not valid because:\n"+str(e)))
+        else:
+            self.extends = None
+
+
+        if errors:
+            raise ValidationException("Trying 'SaladEnumSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.symbols is not None:
+            r['symbols'] = save(self.symbols)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        if self.documentRoot is not None:
+            r['documentRoot'] = save(self.documentRoot)
+        if self.extends is not None:
+            r['extends'] = save(self.extends)
+        return r
+
+
+class Documentation(NamedType, DocType):
+    """
+A documentation section.  This type exists to facilitate self-documenting
+schemas but has no role in formal validation.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+        if 'name' in doc:
+            try:
+                self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+        else:
+            self.name = None
+
+
+        if self.name is None:
+            if docRoot is not None:
+                self.name = docRoot
+            else:
+                raise ValidationException("Missing name")
+        baseuri = self.name
+        if 'inVocab' in doc:
+            try:
+                self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+        else:
+            self.inVocab = None
+
+        if 'doc' in doc:
+            try:
+                self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+        else:
+            self.doc = None
+
+        if 'docParent' in doc:
+            try:
+                self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+        else:
+            self.docParent = None
+
+        if 'docChild' in doc:
+            try:
+                self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+        else:
+            self.docChild = None
+
+        if 'docAfter' in doc:
+            try:
+                self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_None_False_None, baseuri, loadingOptions)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+        else:
+            self.docAfter = None
+
+        try:
+            self.type = load_field(doc.get('type'), typedsl_Documentation_symbolLoader_2, baseuri, loadingOptions)
+        except ValidationException as e:
+            errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+        if errors:
+            raise ValidationException("Trying 'Documentation'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+_vocab = {
+    "Any": "https://w3id.org/cwl/salad#Any",
+    "ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
+    "DocType": "https://w3id.org/cwl/salad#DocType",
+    "Documentation": "https://w3id.org/cwl/salad#Documentation",
+    "EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
+    "JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
+    "NamedType": "https://w3id.org/cwl/salad#NamedType",
+    "PrimitiveType": "https://w3id.org/cwl/salad#PrimitiveType",
+    "RecordField": "https://w3id.org/cwl/salad#RecordField",
+    "RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
+    "SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
+    "SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
+    "SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
+    "SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
+    "SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
+    "array": "https://w3id.org/cwl/salad#array",
+    "boolean": "http://www.w3.org/2001/XMLSchema#boolean",
+    "documentation": "https://w3id.org/cwl/salad#documentation",
+    "double": "http://www.w3.org/2001/XMLSchema#double",
+    "enum": "https://w3id.org/cwl/salad#enum",
+    "float": "http://www.w3.org/2001/XMLSchema#float",
+    "int": "http://www.w3.org/2001/XMLSchema#int",
+    "long": "http://www.w3.org/2001/XMLSchema#long",
+    "null": "https://w3id.org/cwl/salad#null",
+    "record": "https://w3id.org/cwl/salad#record",
+    "string": "http://www.w3.org/2001/XMLSchema#string",
+}
+_rvocab = {
+    "https://w3id.org/cwl/salad#Any": "Any",
+    "https://w3id.org/cwl/salad#ArraySchema": "ArraySchema",
+    "https://w3id.org/cwl/salad#DocType": "DocType",
+    "https://w3id.org/cwl/salad#Documentation": "Documentation",
+    "https://w3id.org/cwl/salad#EnumSchema": "EnumSchema",
+    "https://w3id.org/cwl/salad#JsonldPredicate": "JsonldPredicate",
+    "https://w3id.org/cwl/salad#NamedType": "NamedType",
+    "https://w3id.org/cwl/salad#PrimitiveType": "PrimitiveType",
+    "https://w3id.org/cwl/salad#RecordField": "RecordField",
+    "https://w3id.org/cwl/salad#RecordSchema": "RecordSchema",
+    "https://w3id.org/cwl/salad#SaladEnumSchema": "SaladEnumSchema",
+    "https://w3id.org/cwl/salad#SaladRecordField": "SaladRecordField",
+    "https://w3id.org/cwl/salad#SaladRecordSchema": "SaladRecordSchema",
+    "https://w3id.org/cwl/salad#SchemaDefinedType": "SchemaDefinedType",
+    "https://w3id.org/cwl/salad#SpecializeDef": "SpecializeDef",
+    "https://w3id.org/cwl/salad#array": "array",
+    "http://www.w3.org/2001/XMLSchema#boolean": "boolean",
+    "https://w3id.org/cwl/salad#documentation": "documentation",
+    "http://www.w3.org/2001/XMLSchema#double": "double",
+    "https://w3id.org/cwl/salad#enum": "enum",
+    "http://www.w3.org/2001/XMLSchema#float": "float",
+    "http://www.w3.org/2001/XMLSchema#int": "int",
+    "http://www.w3.org/2001/XMLSchema#long": "long",
+    "https://w3id.org/cwl/salad#null": "null",
+    "https://w3id.org/cwl/salad#record": "record",
+    "http://www.w3.org/2001/XMLSchema#string": "string",
+}
+
+floattype = _PrimitiveLoader(float)
+None_type = _PrimitiveLoader(type(None))
+inttype = _PrimitiveLoader(int)
+strtype = _PrimitiveLoader((str, six.text_type))
+booltype = _PrimitiveLoader(bool)
+Any_type = _AnyLoader()
+PrimitiveTypeLoader = _EnumLoader(("null", "boolean", "int", "long", "float", "double", "string",))
+AnyLoader = _EnumLoader(("Any",))
+RecordFieldLoader = _RecordLoader(RecordField)
+RecordSchemaLoader = _RecordLoader(RecordSchema)
+EnumSchemaLoader = _RecordLoader(EnumSchema)
+ArraySchemaLoader = _RecordLoader(ArraySchema)
+JsonldPredicateLoader = _RecordLoader(JsonldPredicate)
+SpecializeDefLoader = _RecordLoader(SpecializeDef)
+NamedTypeLoader = _RecordLoader(NamedType)
+DocTypeLoader = _RecordLoader(DocType)
+SchemaDefinedTypeLoader = _RecordLoader(SchemaDefinedType)
+SaladRecordFieldLoader = _RecordLoader(SaladRecordField)
+SaladRecordSchemaLoader = _RecordLoader(SaladRecordSchema)
+SaladEnumSchemaLoader = _RecordLoader(SaladEnumSchema)
+DocumentationLoader = _RecordLoader(Documentation)
+uri_strtype_True_False_None = _URILoader(strtype, True, False, None)
+union_of_None_type_or_strtype = _UnionLoader((None_type, strtype,))
+union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype,))
+array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype)
+union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype, array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype,))
+typedsl_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_2 = _TypeDSLLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, 2)
+array_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader)
+union_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader((None_type, array_of_RecordFieldLoader,))
+idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_RecordFieldLoader, 'name', 'type')
+Record_symbolLoader = _EnumLoader(("record",))
+typedsl_Record_symbolLoader_2 = _TypeDSLLoader(Record_symbolLoader, 2)
+array_of_strtype = _ArrayLoader(strtype)
+uri_array_of_strtype_True_False_None = _URILoader(array_of_strtype, True, False, None)
+Enum_symbolLoader = _EnumLoader(("enum",))
+typedsl_Enum_symbolLoader_2 = _TypeDSLLoader(Enum_symbolLoader, 2)
+uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, False, True, 2)
+Array_symbolLoader = _EnumLoader(("array",))
+typedsl_Array_symbolLoader_2 = _TypeDSLLoader(Array_symbolLoader, 2)
+uri_union_of_None_type_or_strtype_True_False_None = _URILoader(union_of_None_type_or_strtype, True, False, None)
+union_of_None_type_or_booltype = _UnionLoader((None_type, booltype,))
+union_of_None_type_or_inttype = _UnionLoader((None_type, inttype,))
+uri_strtype_None_False_1 = _URILoader(strtype, None, False, 1)
+union_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader((None_type, strtype, array_of_strtype,))
+uri_union_of_None_type_or_strtype_None_False_None = _URILoader(union_of_None_type_or_strtype, None, False, None)
+uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_None = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, None, False, None)
+union_of_None_type_or_strtype_or_JsonldPredicateLoader = _UnionLoader((None_type, strtype, JsonldPredicateLoader,))
+array_of_SaladRecordFieldLoader = _ArrayLoader(SaladRecordFieldLoader)
+union_of_None_type_or_array_of_SaladRecordFieldLoader = _UnionLoader((None_type, array_of_SaladRecordFieldLoader,))
+idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_SaladRecordFieldLoader, 'name', 'type')
+uri_union_of_None_type_or_strtype_or_array_of_strtype_None_False_1 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, None, False, 1)
+array_of_SpecializeDefLoader = _ArrayLoader(SpecializeDefLoader)
+union_of_None_type_or_array_of_SpecializeDefLoader = _UnionLoader((None_type, array_of_SpecializeDefLoader,))
+idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader = _IdMapLoader(union_of_None_type_or_array_of_SpecializeDefLoader, 'specializeFrom', 'specializeTo')
+Documentation_symbolLoader = _EnumLoader(("documentation",))
+typedsl_Documentation_symbolLoader_2 = _TypeDSLLoader(Documentation_symbolLoader, 2)
+union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader((SaladRecordSchemaLoader, SaladEnumSchemaLoader, DocumentationLoader,))
+array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _ArrayLoader(union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader)
+union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader((SaladRecordSchemaLoader, SaladEnumSchemaLoader, DocumentationLoader, array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader,))
+
+
+
+def load_document(doc, baseuri=None, loadingOptions=None):
+    if baseuri is None:
+        baseuri = file_uri(os.getcwd()) + "/"
+    if loadingOptions is None:
+        loadingOptions = LoadingOptions()
+    return _document_load(union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader, doc, baseuri, loadingOptions)
diff --git a/schema_salad/metaschema/metaschema.html b/schema_salad/metaschema/metaschema.html
new file mode 100644
index 0000000..ffafb1a
--- /dev/null
+++ b/schema_salad/metaschema/metaschema.html
@@ -0,0 +1,971 @@
+
+    <!DOCTYPE html>
+    <html>
+    <head>
+    <meta charset="UTF-8">
+    <meta name="viewport" content="width=device-width, initial-scale=1.0">
+    <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css">
+    <title>Semantic Annotations for Linked Avro Data (SALAD)</title>
+    <style>
+    :target {
+      padding-top: 61px;
+      margin-top: -61px;
+    }
+    body {
+      padding-top: 61px;
+    }
+    .tocnav ol {
+      list-style: none
+    }
+    pre {
+      margin-left: 2em;
+      margin-right: 2em;
+    }
+    </style>
+    </head>
+    <body>
+    
+      <nav class="navbar navbar-default navbar-fixed-top">
+        <div class="container">
+          <div class="navbar-header">
+            <a class="navbar-brand" href="None">None</a>
+    
+                <ul class="nav navbar-nav">
+                  <li><a href="#toc">Table of contents</a></li>
+                </ul>
+        
+          </div>
+        </div>
+      </nav>
+    
+    <div class="container">
+    
+    <div class="row">
+    
+    <div class="col-md-12" role="main" id="main"><h1 id="Semantic_Annotations_for_Linked_Avro_Data_(SALAD)">Semantic Annotations for Linked Avro Data (SALAD)</h1><p>Author:</p>
+<ul>
+<li>Peter Amstutz <a href="mailto:peter.amstutz at curoverse.com">peter.amstutz at curoverse.com</a>, Curoverse</li>
+</ul>
+<p>Contributors:</p>
+<ul>
+<li>The developers of Apache Avro</li>
+<li>The developers of JSON-LD</li>
+<li>Nebojša Tijanić <a href="mailto:nebojsa.tijanic at sbgenomics.com">nebojsa.tijanic at sbgenomics.com</a>, Seven Bridges Genomics</li>
+</ul>
+<h1 id="Abstract">Abstract</h1><p>Salad is a schema language for describing structured linked data documents
+in JSON or YAML documents.  A Salad schema provides rules for
+preprocessing, structural validation, and link checking for documents
+described by a Salad schema.  Salad builds on JSON-LD and the Apache Avro
+data serialization system, and extends Avro with features for rich data
+modeling such as inheritance, template specialization, object identifiers,
+and object references.  Salad was developed to provide a bridge between the
+record oriented data modeling supported by Apache Avro and the Semantic
+Web.</p>
+<h1 id="Status_of_This_Document">Status of This Document</h1><p>This document is the product of the <a href="https://groups.google.com/forum/#!forum/common-workflow-language">Common Workflow Language working
+group</a>.  The
+latest version of this document is available in the "schema_salad" repository at</p>
+<p><a href="https://github.com/common-workflow-language/schema_salad">https://github.com/common-workflow-language/schema_salad</a></p>
+<p>The products of the CWL working group (including this document) are made available
+under the terms of the Apache License, version 2.0.</p>
+<h1 id="toc">Table of contents</h1>
+               <nav class="tocnav"><ol><li><a href="#Semantic_Annotations_for_Linked_Avro_Data_(SALAD)"> Semantic Annotations for Linked Avro Data (SALAD)</a><ol>
+</ol><li><a href="#Abstract"> Abstract</a><ol>
+</ol><li><a href="#Status_of_This_Document"> Status of This Document</a><ol>
+</ol><li><a href="#Introduction">1. Introduction</a><ol>
+<li><a href="#Introduction_to_v1.0">1.1 Introduction to v1.0</a><ol>
+</ol><li><a href="#References_to_Other_Specifications">1.2 References to Other Specifications</a><ol>
+</ol><li><a href="#Scope">1.3 Scope</a><ol>
+</ol><li><a href="#Terminology">1.4 Terminology</a><ol>
+</ol></li></ol><li><a href="#Document_model">2. Document model</a><ol>
+<li><a href="#Data_concepts">2.1 Data concepts</a><ol>
+</ol><li><a href="#Syntax">2.2 Syntax</a><ol>
+</ol><li><a href="#Document_context">2.3 Document context</a><ol>
+<li><a href="#Implied_context">2.3.1 Implied context</a><ol>
+</ol><li><a href="#Explicit_context">2.3.2 Explicit context</a><ol>
+</ol></li></ol><li><a href="#Document_graph">2.4 Document graph</a><ol>
+</ol><li><a href="#Document_metadata">2.5 Document metadata</a><ol>
+</ol><li><a href="#Document_schema">2.6 Document schema</a><ol>
+<li><a href="#Record_field_annotations">2.6.1 Record field annotations</a><ol>
+</ol></li></ol><li><a href="#Document_traversal">2.7 Document traversal</a><ol>
+</ol></li></ol><li><a href="#Document_preprocessing">3. Document preprocessing</a><ol>
+<li><a href="#Field_name_resolution">3.1 Field name resolution</a><ol>
+<li><a href="#Field_name_resolution_example">3.1.1 Field name resolution example</a><ol>
+</ol></li></ol><li><a href="#Identifier_resolution">3.2 Identifier resolution</a><ol>
+<li><a href="#Identifier_resolution_example">3.2.1 Identifier resolution example</a><ol>
+</ol></li></ol><li><a href="#Link_resolution">3.3 Link resolution</a><ol>
+<li><a href="#Link_resolution_example">3.3.1 Link resolution example</a><ol>
+</ol></li></ol><li><a href="#Vocabulary_resolution">3.4 Vocabulary resolution</a><ol>
+<li><a href="#Vocabulary_resolution_example">3.4.1 Vocabulary resolution example</a><ol>
+</ol></li></ol><li><a href="#Import">3.5 Import</a><ol>
+<li><a href="#Import_example">3.5.1 Import example</a><ol>
+</ol></li></ol><li><a href="#Include">3.6 Include</a><ol>
+<li><a href="#Include_example">3.6.1 Include example</a><ol>
+</ol></li></ol><li><a href="#Mixin">3.7 Mixin</a><ol>
+<li><a href="#Mixin_example">3.7.1 Mixin example</a><ol>
+</ol></li></ol><li><a href="#Identifier_maps">3.8 Identifier maps</a><ol>
+<li><a href="#Identifier_map_example">3.8.1 Identifier map example</a><ol>
+</ol></li></ol><li><a href="#Domain_Specific_Language_for_types">3.9 Domain Specific Language for types</a><ol>
+<li><a href="#Type_DSL_example">3.9.1 Type DSL example</a><ol>
+</ol></li></ol></li></ol><li><a href="#Link_validation">4. Link validation</a><ol>
+</ol><li><a href="#Schema">5. Schema</a><ol>
+<li><a href="#SaladRecordSchema">5.1 SaladRecordSchema</a><ol>
+<li><a href="#SaladRecordField">5.1.1 SaladRecordField</a><ol>
+<li><a href="#PrimitiveType">5.1.1.1 PrimitiveType</a><ol>
+</ol><li><a href="#Any">5.1.1.2 Any</a><ol>
+</ol><li><a href="#RecordSchema">5.1.1.3 RecordSchema</a><ol>
+</ol><li><a href="#RecordField">5.1.1.4 RecordField</a><ol>
+<li><a href="#EnumSchema">5.1.1.4.1 EnumSchema</a><ol>
+</ol><li><a href="#ArraySchema">5.1.1.4.2 ArraySchema</a><ol>
+</ol></li></ol><li><a href="#JsonldPredicate">5.1.1.5 JsonldPredicate</a><ol>
+</ol></li></ol><li><a href="#SpecializeDef">5.1.2 SpecializeDef</a><ol>
+</ol></li></ol><li><a href="#SaladEnumSchema">5.2 SaladEnumSchema</a><ol>
+</ol><li><a href="#Documentation">5.3 Documentation</a><ol>
+</ol></li></ol></li></ol></nav>
+
+<h1 id="Introduction">1. Introduction</h1><p>The JSON data model is an extremely popular way to represent structured
+data.  It is attractive because of its relative simplicity and is a
+natural fit with the standard types of many programming languages.
+However, this simplicity means that basic JSON lacks expressive features
+useful for working with complex data structures and document formats, such
+as schemas, object references, and namespaces.</p>
+<p>JSON-LD is a W3C standard providing a way to describe how to interpret a
+JSON document as Linked Data by means of a "context".  JSON-LD provides a
+powerful solution for representing object references and namespaces in JSON
+based on standard web URIs, but is not itself a schema language.  Without a
+schema providing a well defined structure, it is difficult to process an
+arbitrary JSON-LD document as idiomatic JSON because there are many ways to
+express the same data that are logically equivalent but structurally
+distinct.</p>
+<p>Several schema languages exist for describing and validating JSON data,
+such as the Apache Avro data serialization system, however none understand
+linked data.  As a result, to fully take advantage of JSON-LD to build the
+next generation of linked data applications, one must maintain separate
+JSON schema, JSON-LD context, RDF schema, and human documentation, despite
+significant overlap of content and obvious need for these documents to stay
+synchronized.</p>
+<p>Schema Salad is designed to address this gap.  It provides a schema
+language and processing rules for describing structured JSON content
+permitting URI resolution and strict document validation.  The schema
+language supports linked data through annotations that describe the linked
+data interpretation of the content, enables generation of JSON-LD context
+and RDF schema, and production of RDF triples by applying the JSON-LD
+context.  The schema language also provides for robust support of inline
+documentation.</p>
+<h2 id="Introduction_to_v1.0">1.1 Introduction to v1.0</h2><p>This is the second version of of the Schema Salad specification.  It is
+developed concurrently with v1.0 of the Common Workflow Language for use in
+specifying the Common Workflow Language, however Schema Salad is intended to be
+useful to a broader audience.  Compared to the draft-1 schema salad
+specification, the following changes have been made:</p>
+<ul>
+<li>Use of <a href="#Identifier_maps">mapSubject and mapPredicate</a> to transform maps to lists of records.</li>
+<li>Resolution of the <a href="#Domain_Specific_Language_for_types">domain Specific Language for types</a></li>
+<li>Consolidation of the formal <a href="#Schema">schema into section 5</a>.</li>
+</ul>
+<h2 id="References_to_Other_Specifications">1.2 References to Other Specifications</h2><p><strong>Javascript Object Notation (JSON)</strong>: <a href="http://json.org">http://json.org</a></p>
+<p><strong>JSON Linked Data (JSON-LD)</strong>: <a href="http://json-ld.org">http://json-ld.org</a></p>
+<p><strong>YAML</strong>: <a href="http://yaml.org">http://yaml.org</a></p>
+<p><strong>Avro</strong>: <a href="https://avro.apache.org/docs/current/spec.html">https://avro.apache.org/docs/current/spec.html</a></p>
+<p><strong>Uniform Resource Identifier (URI) Generic Syntax</strong>: <a href="https://tools.ietf.org/html/rfc3986">https://tools.ietf.org/html/rfc3986</a>)</p>
+<p><strong>Resource Description Framework (RDF)</strong>: <a href="http://www.w3.org/RDF/">http://www.w3.org/RDF/</a></p>
+<p><strong>UTF-8</strong>: <a href="https://www.ietf.org/rfc/rfc2279.txt">https://www.ietf.org/rfc/rfc2279.txt</a>)</p>
+<h2 id="Scope">1.3 Scope</h2><p>This document describes the syntax, data model, algorithms, and schema
+language for working with Salad documents.  It is not intended to document
+a specific implementation of Salad, however it may serve as a reference for
+the behavior of conforming implementations.</p>
+<h2 id="Terminology">1.4 Terminology</h2><p>The terminology used to describe Salad documents is defined in the Concepts
+section of the specification. The terms defined in the following list are
+used in building those definitions and in describing the actions of an
+Salad implementation:</p>
+<p><strong>may</strong>: Conforming Salad documents and Salad implementations are permitted but
+not required to be interpreted as described.</p>
+<p><strong>must</strong>: Conforming Salad documents and Salad implementations are required
+to be interpreted as described; otherwise they are in error.</p>
+<p><strong>error</strong>: A violation of the rules of this specification; results are
+undefined. Conforming implementations may detect and report an error and may
+recover from it.</p>
+<p><strong>fatal error</strong>: A violation of the rules of this specification; results
+are undefined. Conforming implementations must not continue to process the
+document and may report an error.</p>
+<p><strong>at user option</strong>: Conforming software may or must (depending on the modal verb in
+the sentence) behave as described; if it does, it must provide users a means to
+enable or disable the behavior described.</p>
+<h1 id="Document_model">2. Document model</h1><h2 id="Data_concepts">2.1 Data concepts</h2><p>An <strong>object</strong> is a data structure equivalent to the "object" type in JSON,
+consisting of a unordered set of name/value pairs (referred to here as
+<strong>fields</strong>) and where the name is a string and the value is a string, number,
+boolean, array, or object.</p>
+<p>A <strong>document</strong> is a file containing a serialized object, or an array of
+objects.</p>
+<p>A <strong>document type</strong> is a class of files that share a common structure and
+semantics.</p>
+<p>A <strong>document schema</strong> is a formal description of the grammar of a document type.</p>
+<p>A <strong>base URI</strong> is a context-dependent URI used to resolve relative references.</p>
+<p>An <strong>identifier</strong> is a URI that designates a single document or single
+object within a document.</p>
+<p>A <strong>vocabulary</strong> is the set of symbolic field names and enumerated symbols defined
+by a document schema, where each term maps to absolute URI.</p>
+<h2 id="Syntax">2.2 Syntax</h2><p>Conforming Salad documents are serialized and loaded using YAML syntax and
+UTF-8 text encoding.  Salad documents are written using the JSON-compatible
+subset of YAML.  Features of YAML such as headers and type tags that are
+not found in the standard JSON data model must not be used in conforming
+Salad documents.  It is a fatal error if the document is not valid YAML.</p>
+<p>A Salad document must consist only of either a single root object or an
+array of objects.</p>
+<h2 id="Document_context">2.3 Document context</h2><h3 id="Implied_context">2.3.1 Implied context</h3><p>The implicit context consists of the vocabulary defined by the schema and
+the base URI.  By default, the base URI must be the URI that was used to
+load the document.  It may be overridden by an explicit context.</p>
+<h3 id="Explicit_context">2.3.2 Explicit context</h3><p>If a document consists of a root object, this object may contain the
+fields <code>$base</code>, <code>$namespaces</code>, <code>$schemas</code>, and <code>$graph</code>:</p>
+<ul>
+<li><p><code>$base</code>: Must be a string.  Set the base URI for the document used to
+resolve relative references.</p>
+</li>
+<li><p><code>$namespaces</code>: Must be an object with strings as values.  The keys of
+the object are namespace prefixes used in the document; the values of
+the object are the prefix expansions.</p>
+</li>
+<li><p><code>$schemas</code>: Must be an array of strings.  This field may list URI
+references to documents in RDF-XML format which will be queried for RDF
+schema data.  The subjects and predicates described by the RDF schema
+may provide additional semantic context for the document, and may be
+used for validation of prefixed extension fields found in the document.</p>
+</li>
+</ul>
+<p>Other directives beginning with <code>$</code> must be ignored.</p>
+<h2 id="Document_graph">2.4 Document graph</h2><p>If a document consists of a single root object, this object may contain the
+field <code>$graph</code>.  This field must be an array of objects.  If present, this
+field holds the primary content of the document.  A document that consists
+of array of objects at the root is an implicit graph.</p>
+<h2 id="Document_metadata">2.5 Document metadata</h2><p>If a document consists of a single root object, metadata about the
+document, such as authorship, may be declared in the root object.</p>
+<h2 id="Document_schema">2.6 Document schema</h2><p>Document preprocessing, link validation and schema validation require a
+document schema.  A schema may consist of:</p>
+<ul>
+<li><p>At least one record definition object which defines valid fields that
+make up a record type.  Record field definitions include the valid types
+that may be assigned to each field and annotations to indicate fields
+that represent identifiers and links, described below in "Semantic
+Annotations".</p>
+</li>
+<li><p>Any number of enumerated type objects which define a set of finite set of symbols that are
+valid value of the type.</p>
+</li>
+<li><p>Any number of documentation objects which allow in-line documentation of the schema.</p>
+</li>
+</ul>
+<p>The schema for defining a salad schema (the metaschema) is described in
+detail in "Schema validation".</p>
+<h3 id="Record_field_annotations">2.6.1 Record field annotations</h3><p>In a document schema, record field definitions may include the field
+<code>jsonldPredicate</code>, which may be either a string or object.  Implementations
+must use the following document preprocessing of fields by the following
+rules:</p>
+<ul>
+<li><p>If the value of <code>jsonldPredicate</code> is <code>@id</code>, the field is an identifier
+field.</p>
+</li>
+<li><p>If the value of <code>jsonldPredicate</code> is an object, and contains that
+object contains the field <code>_type</code> with the value <code>@id</code>, the field is a
+link field.</p>
+</li>
+<li><p>If the value of <code>jsonldPredicate</code> is an object, and contains that
+object contains the field <code>_type</code> with the value <code>@vocab</code>, the field is a
+vocabulary field, which is a subtype of link field.</p>
+</li>
+</ul>
+<h2 id="Document_traversal">2.7 Document traversal</h2><p>To perform document document preprocessing, link validation and schema
+validation, the document must be traversed starting from the fields or
+array items of the root object or array and recursively visiting each child
+item which contains an object or arrays.</p>
+<h1 id="Document_preprocessing">3. Document preprocessing</h1><p>After processing the explicit context (if any), document preprocessing
+begins.  Starting from the document root, object fields values or array
+items which contain objects or arrays are recursively traversed
+depth-first.  For each visited object, field names, identifier fields, link
+fields, vocabulary fields, and <code>$import</code> and <code>$include</code> directives must be
+processed as described in this section.  The order of traversal of child
+nodes within a parent node is undefined.</p>
+<h2 id="Field_name_resolution">3.1 Field name resolution</h2><p>The document schema declares the vocabulary of known field names.  During
+preprocessing traversal, field name in the document which are not part of
+the schema vocabulary must be resolved to absolute URIs.  Under "strict"
+validation, it is an error for a document to include fields which are not
+part of the vocabulary and not resolvable to absolute URIs.  Fields names
+which are not part of the vocabulary are resolved using the following
+rules:</p>
+<ul>
+<li><p>If an field name URI begins with a namespace prefix declared in the
+document context (<code>@context</code>) followed by a colon <code>:</code>, the prefix and
+colon must be replaced by the namespace declared in <code>@context</code>.</p>
+</li>
+<li><p>If there is a vocabulary term which maps to the URI of a resolved
+field, the field name must be replace with the vocabulary term.</p>
+</li>
+<li><p>If a field name URI is an absolute URI consisting of a scheme and path
+and is not part of the vocabulary, no processing occurs.</p>
+</li>
+</ul>
+<p>Field name resolution is not relative.  It must not be affected by the
+base URI.</p>
+<h3 id="Field_name_resolution_example">3.1.1 Field name resolution example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "base",
+      "type": "string",
+      "jsonldPredicate": "http://example.com/base"
+    }]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>    {
+      "base": "one",
+      "form": {
+        "http://example.com/base": "two",
+        "http://example.com/three": "three",
+      },
+      "acid:four": "four"
+    }
+</code></pre>
+<p>This becomes:</p>
+<pre><code>    {
+      "base": "one",
+      "form": {
+        "base": "two",
+        "http://example.com/three": "three",
+      },
+      "http://example.com/acid#four": "four"
+    }
+</code></pre>
+<h2 id="Identifier_resolution">3.2 Identifier resolution</h2><p>The schema may designate one or more fields as identifier fields to identify
+specific objects.  Processing must resolve relative identifiers to absolute
+identifiers using the following rules:</p>
+<ul>
+<li><p>If an identifier URI is prefixed with <code>#</code> it is a URI relative
+fragment identifier.  It is resolved relative to the base URI by setting
+or replacing the fragment portion of the base URI.</p>
+</li>
+<li><p>If an identifier URI does not contain a scheme and is not prefixed <code>#</code> it
+is a parent relative fragment identifier.  It is resolved relative to the
+base URI by the following rule: if the base URI does not contain a
+document fragment, set the fragment portion of the base URI.  If the base
+URI does contain a document fragment, append a slash <code>/</code> followed by the
+identifier field to the fragment portion of the base URI.</p>
+</li>
+<li><p>If an identifier URI begins with a namespace prefix declared in
+<code>$namespaces</code> followed by a colon <code>:</code>, the prefix and colon must be
+replaced by the namespace declared in <code>$namespaces</code>.</p>
+</li>
+<li><p>If an identifier URI is an absolute URI consisting of a scheme and path,
+no processing occurs.</p>
+</li>
+</ul>
+<p>When preprocessing visits a node containing an identifier, that identifier
+must be used as the base URI to process child nodes.</p>
+<p>It is an error for more than one object in a document to have the same
+absolute URI.</p>
+<h3 id="Identifier_resolution_example">3.2.1 Identifier resolution example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "id",
+      "type": "string",
+      "jsonldPredicate": "@id"
+    }]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>    {
+      "id": "http://example.com/base",
+      "form": {
+        "id": "one",
+        "things": [
+          {
+            "id": "two"
+          },
+          {
+            "id": "#three",
+          },
+          {
+            "id": "four#five",
+          },
+          {
+            "id": "acid:six",
+          }
+        ]
+      }
+    }
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+  "id": "http://example.com/base",
+  "form": {
+    "id": "http://example.com/base#one",
+    "things": [
+      {
+        "id": "http://example.com/base#one/two"
+      },
+      {
+        "id": "http://example.com/base#three"
+      },
+      {
+        "id": "http://example.com/four#five",
+      },
+      {
+        "id": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
+</code></pre>
+<h2 id="Link_resolution">3.3 Link resolution</h2><p>The schema may designate one or more fields as link fields reference other
+objects.  Processing must resolve links to either absolute URIs using the
+following rules:</p>
+<ul>
+<li><p>If a reference URI is prefixed with <code>#</code> it is a relative
+fragment identifier.  It is resolved relative to the base URI by setting
+or replacing the fragment portion of the base URI.</p>
+</li>
+<li><p>If a reference URI does not contain a scheme and is not prefixed with <code>#</code>
+it is a path relative reference.  If the reference URI contains <code>#</code> in any
+position other than the first character, the reference URI must be divided
+into a path portion and a fragment portion split on the first instance of
+<code>#</code>.  The path portion is resolved relative to the base URI by the following
+rule: if the path portion of the base URI ends in a slash <code>/</code>, append the
+path portion of the reference URI to the path portion of the base URI.  If
+the path portion of the base URI does not end in a slash, replace the final
+path segment with the path portion of the reference URI.  Replace the
+fragment portion of the base URI with the fragment portion of the reference
+URI.</p>
+</li>
+<li><p>If a reference URI begins with a namespace prefix declared in <code>$namespaces</code>
+followed by a colon <code>:</code>, the prefix and colon must be replaced by the
+namespace declared in <code>$namespaces</code>.</p>
+</li>
+<li><p>If a reference URI is an absolute URI consisting of a scheme and path,
+no processing occurs.</p>
+</li>
+</ul>
+<p>Link resolution must not affect the base URI used to resolve identifiers
+and other links.</p>
+<h3 id="Link_resolution_example">3.3.1 Link resolution example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "link",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@id"
+      }
+    }]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "one",
+    "things": [
+      {
+        "link": "two"
+      },
+      {
+        "link": "#three",
+      },
+      {
+        "link": "four#five",
+      },
+      {
+        "link": "acid:six",
+      }
+    ]
+  }
+}
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+  "$base": "http://example.com/base",
+  "link": "http://example.com/base/zero",
+  "form": {
+    "link": "http://example.com/one",
+    "things": [
+      {
+        "link": "http://example.com/two"
+      },
+      {
+        "link": "http://example.com/base#three"
+      },
+      {
+        "link": "http://example.com/four#five",
+      },
+      {
+        "link": "http://example.com/acid#six",
+      }
+    ]
+  }
+}
+</code></pre>
+<h2 id="Vocabulary_resolution">3.4 Vocabulary resolution</h2><p>The schema may designate one or more vocabulary fields which use terms
+  defined in the vocabulary.  Processing must resolve vocabulary fields to
+  either vocabulary terms or absolute URIs by first applying the link
+  resolution rules defined above, then applying the following additional
+  rule:</p>
+<pre><code>* If a reference URI is a vocabulary field, and there is a vocabulary
+term which maps to the resolved URI, the reference must be replace with
+the vocabulary term.
+</code></pre>
+<h3 id="Vocabulary_resolution_example">3.4.1 Vocabulary resolution example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$namespaces": {
+    "acid": "http://example.com/acid#"
+  },
+  "$graph": [{
+    "name": "Colors",
+    "type": "enum",
+    "symbols": ["acid:red"]
+  },
+  {
+    "name": "ExampleType",
+    "type": "record",
+    "fields": [{
+      "name": "voc",
+      "type": "string",
+      "jsonldPredicate": {
+        "_type": "@vocab"
+      }
+    }]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
+</code></pre>
+<p>This becomes:</p>
+<pre><code>    {
+      "form": {
+        "things": [
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "red",
+          },
+          {
+            "voc": "http://example.com/acid#blue",
+          }
+        ]
+      }
+    }
+</code></pre>
+<h2 id="Import">3.5 Import</h2><p>During preprocessing traversal, an implementation must resolve <code>$import</code>
+directives.  An <code>$import</code> directive is an object consisting of exactly one
+field <code>$import</code> specifying resource by URI string.  It is an error if there
+are additional fields in the <code>$import</code> object, such additional fields must
+be ignored.</p>
+<p>The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  Implementations must support
+loading from <code>file</code>, <code>http</code> and <code>https</code> resources.  The URI referenced by
+<code>$import</code> must be loaded and recursively preprocessed as a Salad document.
+The external imported document does not inherit the context of the
+importing document, and the default base URI for processing the imported
+document must be the URI used to retrieve the imported document.  If the
+<code>$import</code> URI includes a document fragment, the fragment must be excluded
+from the base URI used to preprocess the imported document.</p>
+<p>Once loaded and processed, the <code>$import</code> node is replaced in the document
+structure by the object or array yielded from the import operation.</p>
+<p>URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the <code>$import</code> node must be
+replaced by only the object with the appropriate fragment identifier.</p>
+<p>It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.</p>
+<h3 id="Import_example">3.5.1 Import example</h3><p>import.yml:</p>
+<pre><code>{
+  "hello": "world"
+}
+</code></pre>
+<p>parent.yml:</p>
+<pre><code>{
+  "form": {
+    "bar": {
+      "$import": "import.yml"
+      }
+  }
+}
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+  "form": {
+    "bar": {
+      "hello": "world"
+    }
+  }
+}
+</code></pre>
+<h2 id="Include">3.6 Include</h2><p>During preprocessing traversal, an implementation must resolve <code>$include</code>
+directives.  An <code>$include</code> directive is an object consisting of exactly one
+field <code>$include</code> specifying a URI string.  It is an error if there are
+additional fields in the <code>$include</code> object, such additional fields must be
+ignored.</p>
+<p>The URI string must be resolved to an absolute URI using the link
+resolution rules described previously.  The URI referenced by <code>$include</code> must
+be loaded as a text data.  Implementations must support loading from
+<code>file</code>, <code>http</code> and <code>https</code> resources.  Implementations may transcode the
+character encoding of the text data to match that of the parent document,
+but must not interpret or parse the text document in any other way.</p>
+<p>Once loaded, the <code>$include</code> node is replaced in the document structure by a
+string containing the text data loaded from the resource.</p>
+<p>It is a fatal error if an import directive refers to an external resource
+which does not exist or is not accessible.</p>
+<h3 id="Include_example">3.6.1 Include example</h3><p>parent.yml:</p>
+<pre><code>{
+  "form": {
+    "bar": {
+      "$include": "include.txt"
+      }
+  }
+}
+</code></pre>
+<p>include.txt:</p>
+<pre><code>hello world
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+  "form": {
+    "bar": "hello world"
+  }
+}
+</code></pre>
+<h2 id="Mixin">3.7 Mixin</h2><p>During preprocessing traversal, an implementation must resolve <code>$mixin</code>
+directives.  An <code>$mixin</code> directive is an object consisting of the field
+<code>$mixin</code> specifying resource by URI string.  If there are additional fields in
+the <code>$mixin</code> object, these fields override fields in the object which is loaded
+from the <code>$mixin</code> URI.</p>
+<p>The URI string must be resolved to an absolute URI using the link resolution
+rules described previously.  Implementations must support loading from <code>file</code>,
+<code>http</code> and <code>https</code> resources.  The URI referenced by <code>$mixin</code> must be loaded
+and recursively preprocessed as a Salad document.  The external imported
+document must inherit the context of the importing document, however the file
+URI for processing the imported document must be the URI used to retrieve the
+imported document.  The <code>$mixin</code> URI must not include a document fragment.</p>
+<p>Once loaded and processed, the <code>$mixin</code> node is replaced in the document
+structure by the object or array yielded from the import operation.</p>
+<p>URIs may reference document fragments which refer to specific an object in
+the target document.  This indicates that the <code>$mixin</code> node must be
+replaced by only the object with the appropriate fragment identifier.</p>
+<p>It is a fatal error if an import directive refers to an external resource
+or resource fragment which does not exist or is not accessible.</p>
+<h3 id="Mixin_example">3.7.1 Mixin example</h3><p>mixin.yml:</p>
+<pre><code>{
+  "hello": "world",
+  "carrot": "orange"
+}
+</code></pre>
+<p>parent.yml:</p>
+<pre><code>{
+  "form": {
+    "bar": {
+      "$mixin": "mixin.yml"
+      "carrot": "cake"
+      }
+  }
+}
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+  "form": {
+    "bar": {
+      "hello": "world",
+      "carrot": "cake"
+    }
+  }
+}
+</code></pre>
+<h2 id="Identifier_maps">3.8 Identifier maps</h2><p>The schema may designate certain fields as having a <code>mapSubject</code>.  If the
+value of the field is a JSON object, it must be transformed into an array of
+JSON objects.  Each key-value pair from the source JSON object is a list
+item, each list item must be a JSON objects, and the value of the key is
+assigned to the field specified by <code>mapSubject</code>.</p>
+<p>Fields which have <code>mapSubject</code> specified may also supply a <code>mapPredicate</code>.
+If the value of a map item is not a JSON object, the item is transformed to a
+JSON object with the key assigned to the field specified by <code>mapSubject</code> and
+the value assigned to the field specified by <code>mapPredicate</code>.</p>
+<h3 id="Identifier_map_example">3.8.1 Identifier map example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$graph": [{
+    "name": "MappedType",
+    "type": "record",
+    "documentRoot": true,
+    "fields": [{
+      "name": "mapped",
+      "type": {
+        "type": "array",
+        "items": "ExampleRecord"
+      },
+      "jsonldPredicate": {
+        "mapSubject": "key",
+        "mapPredicate": "value"
+      }
+    }],
+  },
+  {
+    "name": "ExampleRecord",
+    "type": "record",
+    "fields": [{
+      "name": "key",
+      "type": "string"
+      }, {
+      "name": "value",
+      "type": "string"
+      }
+    ]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>{
+  "mapped": {
+    "shaggy": {
+      "value": "scooby"
+    },
+    "fred": "daphne"
+  }
+}
+</code></pre>
+<p>This becomes:</p>
+<pre><code>{
+    "mapped": [
+        {
+            "value": "daphne",
+            "key": "fred"
+        },
+        {
+            "value": "scooby",
+            "key": "shaggy"
+        }
+    ]
+}
+</code></pre>
+<h2 id="Domain_Specific_Language_for_types">3.9 Domain Specific Language for types</h2><p>Fields may be tagged <code>typeDSL: true</code>.  If so, the field is expanded using the
+following micro-DSL for schema salad types:</p>
+<ul>
+<li>If the type ends with a question mark <code>?</code> it is expanded to a union with <code>null</code></li>
+<li>If the type ends with square brackets <code>[]</code> it is expanded to an array with items of the preceeding type symbol</li>
+<li>The type may end with both <code>[]?</code> to indicate it is an optional array.</li>
+<li>Identifier resolution is applied after type DSL expansion.</li>
+</ul>
+<h3 id="Type_DSL_example">3.9.1 Type DSL example</h3><p>Given the following schema:</p>
+<pre><code>{
+  "$graph": [
+  {"$import": "metaschema_base.yml"},
+  {
+    "name": "TypeDSLExample",
+    "type": "record",
+    "documentRoot": true,
+    "fields": [{
+      "name": "extype",
+      "type": "string",
+      "jsonldPredicate": {
+        _type: "@vocab",
+        "typeDSL": true
+      }
+    }]
+  }]
+}
+</code></pre>
+<p>Process the following example:</p>
+<pre><code>[{
+  "extype": "string"
+}, {
+  "extype": "string?"
+}, {
+  "extype": "string[]"
+}, {
+  "extype": "string[]?"
+}]
+</code></pre>
+<p>This becomes:</p>
+<pre><code>[
+    {
+        "extype": "string"
+    }, 
+    {
+        "extype": [
+            "null", 
+            "string"
+        ]
+    }, 
+    {
+        "extype": {
+            "type": "array", 
+            "items": "string"
+        }
+    }, 
+    {
+        "extype": [
+            "null", 
+            {
+                "type": "array", 
+                "items": "string"
+            }
+        ]
+    }
+]
+</code></pre>
+<h1 id="Link_validation">4. Link validation</h1><p>Once a document has been preprocessed, an implementation may validate
+links.  The link validation traversal may visit fields which the schema
+designates as link fields and check that each URI references an existing
+object in the current document, an imported document, file system, or
+network resource.  Failure to validate links may be a fatal error.  Link
+validation behavior for individual fields may be modified by <code>identity</code> and
+<code>noLinkCheck</code> in the <code>jsonldPredicate</code> section of the field schema.</p>
+<h1 id="Schema">5. Schema</h1><h2 id="SaladRecordSchema">5.1 SaladRecordSchema</h2><h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>name</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The identifier for this type</p>
+</td></tr><tr><td><code>type</code></td><td>Record_symbol</td><td>True</td><td><p>Must be <code>record</code></p>
+</td></tr><tr><td><code>fields</code></td><td>array<<a href="#SaladRecordField">SaladRecordField</a>></td><td>False</td><td><p>Defines the fields of the record.</p>
+</td></tr><tr><td><code>doc</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>A documentation string for this type, or an array of strings which should be concatenated.</p>
+</td></tr><tr><td><code>docParent</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear in a subsection under <code>docParent</code>.</p>
+</td></tr><tr><td><code>docChild</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for <code>docChild</code> should appear in a subsection under this type.</p>
+</td></tr><tr><td><code>docAfter</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear after the <code>docAfter</code> section at the same
+level.</p>
+</td></tr><tr><td><code>jsonldPredicate</code></td><td><a href="#PrimitiveType">string</a> | <a href="#JsonldPredicate">JsonldPredicate</a></td><td>False</td><td><p>Annotate this type with linked data context.</p>
+</td></tr><tr><td><code>documentRoot</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>If true, indicates that the type is a valid at the document root.  At
+least one type in a schema must be tagged with <code>documentRoot: true</code>.</p>
+</td></tr><tr><td><code>abstract</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>If true, this record is abstract and may be used as a base for other
+records, but is not valid on its own.</p>
+</td></tr><tr><td><code>extends</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>Indicates that this record inherits fields from one or more base records.</p>
+</td></tr><tr><td><code>specialize</code></td><td>array<<a href="#SpecializeDef">SpecializeDef</a>></td><td>False</td><td><p>Only applies if <code>extends</code> is declared.  Apply type specialization using the
+base record as a template.  For each field inherited from the base
+record, replace any instance of the type <code>specializeFrom</code> with
+<code>specializeTo</code>.</p>
+</td></tr></table><h3 id="SaladRecordField">5.1.1 SaladRecordField</h3><p>A field of a record.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>name</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The name of the field</p>
+</td></tr><tr><td><code>type</code></td><td><a href="#PrimitiveType">PrimitiveType</a> | <a href="#RecordSchema">RecordSchema</a> | <a href="#EnumSchema">EnumSchema</a> | <a href="#ArraySchema">ArraySchema</a> | <a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">PrimitiveType</a> | <a href="#RecordSchema">RecordSchema</a> | <a href="#EnumSchema">EnumSchema</a> | <a href="#ArraySchema">ArraySchema</a> | <a href="#PrimitiveType">string</a>></td><td>True</td><td><p>T [...]
+</td></tr><tr><td><code>doc</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>A documentation string for this field</p>
+</td></tr><tr><td><code>jsonldPredicate</code></td><td><a href="#PrimitiveType">string</a> | <a href="#JsonldPredicate">JsonldPredicate</a></td><td>False</td><td><p>Annotate this type with linked data context.</p>
+</td></tr></table><h4 id="PrimitiveType">5.1.1.1 PrimitiveType</h4><p>Salad data types are based on Avro schema declarations.  Refer to the
+<a href="https://avro.apache.org/docs/current/spec.html#schemas">Avro schema declaration documentation</a> for
+detailed information.</p>
+<h3>Symbols</h3><table class="table table-striped"><tr><th>symbol</th><th>description</th></tr><tr><td><code>null</code></td><td> no value</td></tr><tr><td><code>boolean</code></td><td> a binary value</td></tr><tr><td><code>int</code></td><td> 32-bit signed integer</td></tr><tr><td><code>long</code></td><td> 64-bit signed integer</td></tr><tr><td><code>float</code></td><td> single precision (32-bit) IEEE 754 floating-point number</td></tr><tr><td><code>double</code></td><td> double preci [...]
+<h3>Symbols</h3><table class="table table-striped"><tr><th>symbol</th><th>description</th></tr><tr><td><code>Any</code></td><td></td></tr></table><h4 id="RecordSchema">5.1.1.3 RecordSchema</h4><h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>type</code></td><td>Record_symbol</td><td>True</td><td><p>Must be <code>record</code></p>
+</td></tr><tr><td><code>fields</code></td><td>array<<a href="#RecordField">RecordField</a>></td><td>False</td><td><p>Defines the fields of the record.</p>
+</td></tr></table><h4 id="RecordField">5.1.1.4 RecordField</h4><p>A field of a record.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>name</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The name of the field</p>
+</td></tr><tr><td><code>type</code></td><td><a href="#PrimitiveType">PrimitiveType</a> | <a href="#RecordSchema">RecordSchema</a> | <a href="#EnumSchema">EnumSchema</a> | <a href="#ArraySchema">ArraySchema</a> | <a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">PrimitiveType</a> | <a href="#RecordSchema">RecordSchema</a> | <a href="#EnumSchema">EnumSchema</a> | <a href="#ArraySchema">ArraySchema</a> | <a href="#PrimitiveType">string</a>></td><td>True</td><td><p>T [...]
+</td></tr><tr><td><code>doc</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>A documentation string for this field</p>
+</td></tr></table><h5 id="EnumSchema">5.1.1.4.1 EnumSchema</h5><p>Define an enumerated type.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>symbols</code></td><td>array<<a href="#PrimitiveType">string</a>></td><td>True</td><td><p>Defines the set of valid symbols.</p>
+</td></tr><tr><td><code>type</code></td><td>Enum_symbol</td><td>True</td><td><p>Must be <code>enum</code></p>
+</td></tr></table><h5 id="ArraySchema">5.1.1.4.2 ArraySchema</h5><h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>items</code></td><td><a href="#PrimitiveType">PrimitiveType</a> | <a href="#RecordSchema">RecordSchema</a> | <a href="#EnumSchema">EnumSchema</a> | <a href="#ArraySchema">ArraySchema</a> | <a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">PrimitiveType</a> | <a href=" [...]
+</td></tr><tr><td><code>type</code></td><td>Array_symbol</td><td>True</td><td><p>Must be <code>array</code></p>
+</td></tr></table><h4 id="JsonldPredicate">5.1.1.5 JsonldPredicate</h4><p>Attached to a record field to define how the parent record field is handled for
+URI resolution and JSON-LD context generation.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>_id</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>The predicate URI that this field corresponds to.
+Corresponds to JSON-LD <code>@id</code> directive.</p>
+</td></tr><tr><td><code>_type</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>The context type hint, corresponds to JSON-LD <code>@type</code> directive.</p>
+<ul>
+<li><p>If the value of this field is <code>@id</code> and <code>identity</code> is false or
+unspecified, the parent field must be resolved using the link
+resolution rules.  If <code>identity</code> is true, the parent field must be
+resolved using the identifier expansion rules.</p>
+</li>
+<li><p>If the value of this field is <code>@vocab</code>, the parent field must be
+resolved using the vocabulary resolution rules.</p>
+</li>
+</ul>
+</td></tr><tr><td><code>_container</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Structure hint, corresponds to JSON-LD <code>@container</code> directive.</p>
+</td></tr><tr><td><code>identity</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>If true and <code>_type</code> is <code>@id</code> this indicates that the parent field must
+be resolved according to identity resolution rules instead of link
+resolution rules.  In addition, the field value is considered an
+assertion that the linked value exists; absence of an object in the loaded document
+with the URI is not an error.</p>
+</td></tr><tr><td><code>noLinkCheck</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>If true, this indicates that link validation traversal must stop at
+this field.  This field (it is is a URI) or any fields under it (if it
+is an object or array) are not subject to link checking.</p>
+</td></tr><tr><td><code>mapSubject</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>If the value of the field is a JSON object, it must be transformed
+into an array of JSON objects, where each key-value pair from the
+source JSON object is a list item, the list items must be JSON objects,
+and the key is assigned to the field specified by <code>mapSubject</code>.</p>
+</td></tr><tr><td><code>mapPredicate</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Only applies if <code>mapSubject</code> is also provided.  If the value of the
+field is a JSON object, it is transformed as described in <code>mapSubject</code>,
+with the addition that when the value of a map item is not an object,
+the item is transformed to a JSON object with the key assigned to the
+field specified by <code>mapSubject</code> and the value assigned to the field
+specified by <code>mapPredicate</code>.</p>
+</td></tr><tr><td><code>refScope</code></td><td><a href="#PrimitiveType">int</a></td><td>False</td><td><p>If the field contains a relative reference, it must be resolved by
+searching for valid document references in each successive parent scope
+in the document fragment.  For example, a reference of <code>foo</code> in the
+context <code>#foo/bar/baz</code> will first check for the existence of
+<code>#foo/bar/baz/foo</code>, followed by <code>#foo/bar/foo</code>, then <code>#foo/foo</code> and
+then finally <code>#foo</code>.  The first valid URI in the search order shall be
+used as the fully resolved value of the identifier.  The value of the
+refScope field is the specified number of levels from the containing
+identifer scope before starting the search, so if <code>refScope: 2</code> then
+"baz" and "bar" must be stripped to get the base <code>#foo</code> and search
+<code>#foo/foo</code> and the <code>#foo</code>.  The last scope searched must be the top
+level scope before determining if the identifier cannot be resolved.</p>
+</td></tr><tr><td><code>typeDSL</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>Field must be expanded based on the the Schema Salad type DSL.</p>
+</td></tr></table><h3 id="SpecializeDef">5.1.2 SpecializeDef</h3><h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>specializeFrom</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The data type to be replaced</p>
+</td></tr><tr><td><code>specializeTo</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The new data type to replace with</p>
+</td></tr></table><h2 id="SaladEnumSchema">5.2 SaladEnumSchema</h2><p>Define an enumerated type.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>symbols</code></td><td>array<<a href="#PrimitiveType">string</a>></td><td>True</td><td><p>Defines the set of valid symbols.</p>
+</td></tr><tr><td><code>type</code></td><td>Enum_symbol</td><td>True</td><td><p>Must be <code>enum</code></p>
+</td></tr><tr><td><code>doc</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>A documentation string for this type, or an array of strings which should be concatenated.</p>
+</td></tr><tr><td><code>docParent</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear in a subsection under <code>docParent</code>.</p>
+</td></tr><tr><td><code>docChild</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for <code>docChild</code> should appear in a subsection under this type.</p>
+</td></tr><tr><td><code>docAfter</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear after the <code>docAfter</code> section at the same
+level.</p>
+</td></tr><tr><td><code>jsonldPredicate</code></td><td><a href="#PrimitiveType">string</a> | <a href="#JsonldPredicate">JsonldPredicate</a></td><td>False</td><td><p>Annotate this type with linked data context.</p>
+</td></tr><tr><td><code>documentRoot</code></td><td><a href="#PrimitiveType">boolean</a></td><td>False</td><td><p>If true, indicates that the type is a valid at the document root.  At
+least one type in a schema must be tagged with <code>documentRoot: true</code>.</p>
+</td></tr><tr><td><code>extends</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>Indicates that this enum inherits symbols from a base enum.</p>
+</td></tr></table><h2 id="Documentation">5.3 Documentation</h2><p>A documentation section.  This type exists to facilitate self-documenting
+schemas but has no role in formal validation.</p>
+<h3>Fields</h3><table class="table table-striped"><tr><th>field</th><th>type</th><th>required</th><th>description</th></tr><tr><td><code>name</code></td><td><a href="#PrimitiveType">string</a></td><td>True</td><td><p>The identifier for this type</p>
+</td></tr><tr><td><code>type</code></td><td>Documentation_symbol</td><td>True</td><td><p>Must be <code>documentation</code></p>
+</td></tr><tr><td><code>doc</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>A documentation string for this type, or an array of strings which should be concatenated.</p>
+</td></tr><tr><td><code>docParent</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear in a subsection under <code>docParent</code>.</p>
+</td></tr><tr><td><code>docChild</code></td><td><a href="#PrimitiveType">string</a> | array<<a href="#PrimitiveType">string</a>></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for <code>docChild</code> should appear in a subsection under this type.</p>
+</td></tr><tr><td><code>docAfter</code></td><td><a href="#PrimitiveType">string</a></td><td>False</td><td><p>Hint to indicate that during documentation generation, documentation
+for this type should appear after the <code>docAfter</code> section at the same
+level.</p>
+</td></tr></table></div>
+    </div>
+    </div>
+    </body>
+    </html>
\ No newline at end of file
diff --git a/schema_salad/metaschema/metaschema.yml b/schema_salad/metaschema/metaschema.yml
index 28b9e66..4ab9959 100644
--- a/schema_salad/metaschema/metaschema.yml
+++ b/schema_salad/metaschema/metaschema.yml
@@ -290,7 +290,7 @@ $graph:
 - name: SaladEnumSchema
   docParent: "#Schema"
   type: record
-  extends: [EnumSchema, SchemaDefinedType]
+  extends: [NamedType, EnumSchema, SchemaDefinedType]
   documentRoot: true
   doc: |
     Define an enumerated type.
diff --git a/schema_salad/metaschema/metaschema.yml b/schema_salad/metaschema/metaschema2.yml
similarity index 95%
copy from schema_salad/metaschema/metaschema.yml
copy to schema_salad/metaschema/metaschema2.yml
index 28b9e66..c928928 100644
--- a/schema_salad/metaschema/metaschema.yml
+++ b/schema_salad/metaschema/metaschema2.yml
@@ -18,8 +18,6 @@ $graph:
     - $import: link_res.yml
     - $import: vocab_res.yml
     - $include: import_include.md
-    - $import: map_res.yml
-    - $import: typedsl_res.yml
 
 - name: "Link_Validation"
   type: documentation
@@ -156,24 +154,16 @@ $graph:
 - name: NamedType
   type: record
   abstract: true
-  docParent: "#Schema"
   fields:
     - name: name
       type: string
       jsonldPredicate: "@id"
       doc: "The identifier for this type"
-    - name: inVocab
-      type: boolean?
-      doc: |
-        By default or if "true", include the short name of this type in the
-        vocabulary (the keys of the JSON-LD context).  If false, do not include
-        the short name in the vocabulary.
 
 
 - name: DocType
   type: record
   abstract: true
-  docParent: "#Schema"
   fields:
     - name: doc
       type:
@@ -250,7 +240,6 @@ $graph:
 
 
 - name: SaladRecordSchema
-  docParent: "#Schema"
   type: record
   extends: [NamedType, RecordSchema, SchemaDefinedType]
   documentRoot: true
@@ -288,7 +277,6 @@ $graph:
         mapPredicate: specializeTo
 
 - name: SaladEnumSchema
-  docParent: "#Schema"
   type: record
   extends: [EnumSchema, SchemaDefinedType]
   documentRoot: true
@@ -309,15 +297,14 @@ $graph:
 
 - name: Documentation
   type: record
-  docParent: "#Schema"
   extends: [NamedType, DocType]
   documentRoot: true
   doc: |
     A documentation section.  This type exists to facilitate self-documenting
     schemas but has no role in formal validation.
   fields:
-    - name: type
-      doc: "Must be `documentation`"
+    type:
+      doc: {foo: "Must be `documentation`"}
       type:
         name: Documentation_symbol
         type: enum
diff --git a/schema_salad/python_codegen.py b/schema_salad/python_codegen.py
new file mode 100644
index 0000000..34e28b4
--- /dev/null
+++ b/schema_salad/python_codegen.py
@@ -0,0 +1,225 @@
+import json
+import sys
+import six
+from six.moves import urllib, cStringIO
+import collections
+import logging
+from pkg_resources import resource_stream
+from .utils import aslist, flatten
+from . import schema
+from .codegen_base import TypeDef, CodeGenBase, shortname
+from typing import List, Text, Dict, Union, IO, Any
+
+class PythonCodeGen(CodeGenBase):
+    def __init__(self, out):
+        # type: (IO[str]) -> None
+        super(PythonCodeGen, self).__init__()
+        self.out = out
+        self.current_class_is_abstract = False
+
+    def safe_name(self, n):
+        # type: (Text) -> Text
+
+        avn = schema.avro_name(n)
+        if avn in ("class", "in"):
+            # reserved words
+            avn = avn+"_"
+        return avn
+
+
+    def prologue(self):
+        # type: () -> None
+
+        self.out.write("""#
+# This file was autogenerated using schema-salad-tool --codegen=python
+#
+""")
+
+        rs = resource_stream(__name__, 'sourceline.py')
+        self.out.write(rs.read().decode("UTF-8"))
+        rs.close()
+        self.out.write("\n\n")
+
+        rs = resource_stream(__name__, 'python_codegen_support.py')
+        self.out.write(rs.read().decode("UTF-8"))
+        rs.close()
+        self.out.write("\n\n")
+
+        for p in six.itervalues(self.prims):
+            self.declare_type(p)
+
+
+    def begin_class(self, classname, extends, doc, abstract):
+        # type: (Text, List[Text], Text, bool) -> None
+
+        classname = self.safe_name(classname)
+
+        if extends:
+            ext = ", ".join(self.safe_name(e) for e in extends)
+        else:
+            ext = "Savable"
+
+        self.out.write("class %s(%s):\n" % (self.safe_name(classname), ext))
+
+        if doc:
+            self.out.write('    """\n')
+            self.out.write(str(doc))
+            self.out.write('\n    """\n')
+
+        self.serializer = cStringIO()
+
+        self.current_class_is_abstract = abstract
+        if self.current_class_is_abstract:
+            self.out.write("    pass\n\n")
+            return
+
+        self.out.write(
+            """    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+        doc = copy.copy(_doc)
+        if hasattr(_doc, 'lc'):
+            doc.lc.data = _doc.lc.data
+            doc.lc.filename = _doc.lc.filename
+        errors = []
+        #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+""")
+
+        self.serializer.write("""
+    def save(self):
+        r = {}
+""")
+
+    def end_class(self, classname):
+        # type: (Text) -> None
+
+        if self.current_class_is_abstract:
+            return
+
+        self.out.write("""
+        if errors:
+            raise ValidationException(\"Trying '%s'\\n\"+\"\\n\".join(errors))
+""" % self.safe_name(classname))
+
+        self.serializer.write("        return r\n")
+        self.out.write(self.serializer.getvalue())
+        self.out.write("\n\n")
+
+    prims = {
+        u"http://www.w3.org/2001/XMLSchema#string": TypeDef("strtype", "_PrimitiveLoader((str, six.text_type))"),
+        u"http://www.w3.org/2001/XMLSchema#int": TypeDef("inttype", "_PrimitiveLoader(int)"),
+        u"http://www.w3.org/2001/XMLSchema#long": TypeDef("inttype", "_PrimitiveLoader(int)"),
+        u"http://www.w3.org/2001/XMLSchema#float": TypeDef("floattype", "_PrimitiveLoader(float)"),
+        u"http://www.w3.org/2001/XMLSchema#double": TypeDef("floattype", "_PrimitiveLoader(float)"),
+        u"http://www.w3.org/2001/XMLSchema#boolean": TypeDef("booltype", "_PrimitiveLoader(bool)"),
+        u"https://w3id.org/cwl/salad#null": TypeDef("None_type", "_PrimitiveLoader(type(None))"),
+        u"https://w3id.org/cwl/salad#Any": TypeDef("Any_type", "_AnyLoader()")
+    }
+
+    def type_loader(self, t):
+        # type: (Union[List[Any], Dict[Text, Any], Text]) -> TypeDef
+
+        if isinstance(t, list):
+            sub = [self.type_loader(i) for i in t]
+            return self.declare_type(TypeDef("union_of_%s" % "_or_".join(s.name for s in sub), "_UnionLoader((%s,))" % (", ".join(s.name for s in sub))))
+        if isinstance(t, dict):
+            if t["type"] in ("array", "https://w3id.org/cwl/salad#array"):
+                i = self.type_loader(t["items"])
+                return self.declare_type(TypeDef("array_of_%s" % i.name, "_ArrayLoader(%s)" % i.name))
+            elif t["type"] in ("enum", "https://w3id.org/cwl/salad#enum"):
+                for sym in t["symbols"]:
+                    self.add_vocab(shortname(sym), sym)
+                return self.declare_type(TypeDef(self.safe_name(t["name"])+"Loader", '_EnumLoader(("%s",))' % (
+                    '", "'.join(self.safe_name(sym) for sym in t["symbols"]))))
+            elif t["type"] in ("record", "https://w3id.org/cwl/salad#record"):
+                return self.declare_type(TypeDef(self.safe_name(t["name"])+"Loader", "_RecordLoader(%s)" % self.safe_name(t["name"])))
+            else:
+                raise Exception("wft %s" % t["type"])
+        if t in self.prims:
+            return self.prims[t]
+        return self.collected_types[self.safe_name(t)+"Loader"]
+
+    def declare_id_field(self, name, fieldtype, doc):
+        # type: (Text, TypeDef, Text) -> None
+
+        if self.current_class_is_abstract:
+            return
+
+        self.declare_field(name, fieldtype, doc, True)
+        self.out.write("""
+        if self.{safename} is None:
+            if docRoot is not None:
+                self.{safename} = docRoot
+            else:
+                raise ValidationException("Missing {fieldname}")
+        baseuri = self.{safename}
+""".
+                       format(safename=self.safe_name(name),
+                              fieldname=shortname(name)))
+
+    def declare_field(self, name, fieldtype, doc, optional):
+        # type: (Text, TypeDef, Text, bool) -> None
+
+        if self.current_class_is_abstract:
+            return
+
+        if optional:
+            self.out.write("        if '{fieldname}' in doc:\n".format(fieldname=shortname(name)))
+            spc = "    "
+        else:
+            spc = ""
+        self.out.write("""{spc}        try:
+{spc}            self.{safename} = load_field(doc.get('{fieldname}'), {fieldtype}, baseuri, loadingOptions)
+{spc}        except ValidationException as e:
+{spc}            errors.append(SourceLine(doc, '{fieldname}', str).makeError(\"the `{fieldname}` field is not valid because:\\n\"+str(e)))
+""".
+                       format(safename=self.safe_name(name),
+                              fieldname=shortname(name),
+                              fieldtype=fieldtype.name,
+                              spc=spc))
+        if optional:
+            self.out.write("""        else:
+            self.{safename} = None
+""".format(safename=self.safe_name(name)))
+
+        self.out.write("\n")
+
+        self.serializer.write("        if self.%s is not None:\n            r['%s'] = save(self.%s)\n" % (self.safe_name(name), shortname(name), self.safe_name(name)))
+
+    def uri_loader(self, inner, scoped_id, vocab_term, refScope):
+        # type: (TypeDef, bool, bool, Union[int, None]) -> TypeDef
+        return self.declare_type(TypeDef("uri_%s_%s_%s_%s" % (inner.name, scoped_id, vocab_term, refScope),
+                                         "_URILoader(%s, %s, %s, %s)" % (inner.name, scoped_id, vocab_term, refScope)))
+
+    def idmap_loader(self, field, inner, mapSubject, mapPredicate):
+        # type: (Text, TypeDef, Text, Union[Text, None]) -> TypeDef
+        return self.declare_type(TypeDef("idmap_%s_%s" % (self.safe_name(field), inner.name),
+                                         "_IdMapLoader(%s, '%s', '%s')" % (inner.name, mapSubject, mapPredicate)))
+
+    def typedsl_loader(self, inner, refScope):
+        # type: (TypeDef, Union[int, None]) -> TypeDef
+        return self.declare_type(TypeDef("typedsl_%s_%s" % (inner.name, refScope),
+                                         "_TypeDSLLoader(%s, %s)" % (inner.name, refScope)))
+
+    def epilogue(self, rootLoader):
+        # type: (TypeDef) -> None
+        self.out.write("_vocab = {\n")
+        for k in sorted(self.vocab.keys()):
+            self.out.write("    \"%s\": \"%s\",\n" % (k, self.vocab[k]))
+        self.out.write("}\n")
+
+        self.out.write("_rvocab = {\n")
+        for k in sorted(self.vocab.keys()):
+            self.out.write("    \"%s\": \"%s\",\n" % (self.vocab[k], k))
+        self.out.write("}\n\n")
+
+        for k,tv in six.iteritems(self.collected_types):
+            self.out.write("%s = %s\n" % (tv.name, tv.init))
+        self.out.write("\n\n")
+
+        self.out.write("""
+def load_document(doc, baseuri=None, loadingOptions=None):
+    if baseuri is None:
+        baseuri = file_uri(os.getcwd()) + "/"
+    if loadingOptions is None:
+        loadingOptions = LoadingOptions()
+    return _document_load(%s, doc, baseuri, loadingOptions)
+""" % rootLoader.name)
diff --git a/schema_salad/python_codegen_support.py b/schema_salad/python_codegen_support.py
new file mode 100644
index 0000000..461d0b6
--- /dev/null
+++ b/schema_salad/python_codegen_support.py
@@ -0,0 +1,383 @@
+import six
+from six.moves import urllib, StringIO
+import ruamel.yaml as yaml
+import copy
+import re
+from typing import List, Text, Dict, Union, Any, Sequence
+
+class ValidationException(Exception):
+    pass
+
+class Savable(object):
+    pass
+
+class LoadingOptions(object):
+    def __init__(self, fetcher=None, namespaces=None, fileuri=None, copyfrom=None):
+        if copyfrom is not None:
+            self.idx = copyfrom.idx
+            if fetcher is None:
+                fetcher = copyfrom.fetcher
+            if fileuri is None:
+                fileuri = copyfrom.fileuri
+        else:
+            self.idx = {}
+
+        if fetcher is None:
+            import os
+            import requests
+            from cachecontrol.wrapper import CacheControl
+            from cachecontrol.caches import FileCache
+            from schema_salad.ref_resolver import DefaultFetcher
+            if "HOME" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["HOME"], ".cache", "salad")))
+            elif "TMP" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["TMP"], ".cache", "salad")))
+            else:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache("/tmp", ".cache", "salad"))
+            self.fetcher = DefaultFetcher({}, session)
+        else:
+            self.fetcher = fetcher
+
+        self.fileuri = fileuri
+
+        self.vocab = _vocab
+        self.rvocab = _rvocab
+
+        if namespaces is not None:
+            self.vocab = self.vocab.copy()
+            self.rvocab = self.rvocab.copy()
+            for k,v in six.iteritems(namespaces):
+                self.vocab[k] = v
+                self.rvocab[v] = k
+
+def load_field(val, fieldtype, baseuri, loadingOptions):
+    if isinstance(val, dict):
+        if "$import" in val:
+            return _document_load_by_url(fieldtype, loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"]), loadingOptions)
+        elif "$include" in val:
+            val = loadingOptions.fetcher.fetch_text(loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"]))
+    return fieldtype.load(val, baseuri, loadingOptions)
+
+
+def save(val):
+    if isinstance(val, Savable):
+        return val.save()
+    if isinstance(val, list):
+        return [save(v) for v in val]
+    return val
+
+def expand_url(url,                 # type: Union[str, Text]
+               base_url,            # type: Union[str, Text]
+               loadingOptions,      # type: LoadingOptions
+               scoped_id=False,     # type: bool
+               vocab_term=False,    # type: bool
+               scoped_ref=None      # type: int
+               ):
+    # type: (...) -> Text
+
+    if not isinstance(url, six.string_types):
+        return url
+
+    url = Text(url)
+
+    if url in (u"@id", u"@type"):
+        return url
+
+    if vocab_term and url in loadingOptions.vocab:
+        return url
+
+    if bool(loadingOptions.vocab) and u":" in url:
+        prefix = url.split(u":")[0]
+        if prefix in loadingOptions.vocab:
+            url = loadingOptions.vocab[prefix] + url[len(prefix) + 1:]
+
+    split = urllib.parse.urlsplit(url)
+
+    if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u"$(")
+        or url.startswith(u"${")):
+        pass
+    elif scoped_id and not bool(split.fragment):
+        splitbase = urllib.parse.urlsplit(base_url)
+        frg = u""
+        if bool(splitbase.fragment):
+            frg = splitbase.fragment + u"/" + split.path
+        else:
+            frg = split.path
+        pt = splitbase.path if splitbase.path != '' else "/"
+        url = urllib.parse.urlunsplit(
+            (splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
+    elif scoped_ref is not None and not bool(split.fragment):
+        splitbase = urllib.parse.urlsplit(base_url)
+        sp = splitbase.fragment.split(u"/")
+        n = scoped_ref
+        while n > 0 and len(sp) > 0:
+            sp.pop()
+            n -= 1
+        sp.append(url)
+        url = urllib.parse.urlunsplit((
+            splitbase.scheme, splitbase.netloc, splitbase.path, splitbase.query,
+            u"/".join(sp)))
+    else:
+        url = loadingOptions.fetcher.urljoin(base_url, url)
+
+    if vocab_term:
+        split = urllib.parse.urlsplit(url)
+        if bool(split.scheme):
+            if url in loadingOptions.rvocab:
+                return loadingOptions.rvocab[url]
+        else:
+            raise ValidationException("Term '%s' not in vocabulary" % url)
+
+    return url
+
+
+class _Loader(object):
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        # type: (Any, Text, LoadingOptions, Union[Text, None]) -> Any
+        pass
+
+class _AnyLoader(_Loader):
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if doc is not None:
+            return doc
+        raise ValidationException("Expected non-null")
+
+class _PrimitiveLoader(_Loader):
+    def __init__(self, tp):
+        # type: (Union[type, Sequence[type]]) -> None
+        self.tp = tp
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, self.tp):
+            raise ValidationException("Expected a %s but got %s" % (self.tp, type(doc)))
+        return doc
+
+    def __repr__(self):
+        return str(self.tp)
+
+class _ArrayLoader(_Loader):
+    def __init__(self, items):
+        # type: (_Loader) -> None
+        self.items = items
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, list):
+            raise ValidationException("Expected a list")
+        r = []
+        errors = []
+        for i in range(0, len(doc)):
+            try:
+                lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
+                if isinstance(lf, list):
+                    r.extend(lf)
+                else:
+                    r.append(lf)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, i, str).makeError(six.text_type(e)))
+        if errors:
+            raise ValidationException("\n".join(errors))
+        return r
+
+    def __repr__(self):
+        return "array<%s>" % self.items
+
+class _EnumLoader(_Loader):
+    def __init__(self, symbols):
+        # type: (Sequence[Text]) -> None
+        self.symbols = symbols
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if doc in self.symbols:
+            return doc
+        else:
+            raise ValidationException("Expected one of %s" % (self.symbols,))
+
+
+class _RecordLoader(_Loader):
+    def __init__(self, classtype):
+        # type: (type) -> None
+        self.classtype = classtype
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, dict):
+            raise ValidationException("Expected a dict")
+        return self.classtype(doc, baseuri, loadingOptions, docRoot=docRoot)
+
+    def __repr__(self):
+        return str(self.classtype)
+
+
+class _UnionLoader(_Loader):
+    def __init__(self, alternates):
+        # type: (Sequence[_Loader]) -> None
+        self.alternates = alternates
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        errors = []
+        for t in self.alternates:
+            try:
+                return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
+            except ValidationException as e:
+                errors.append("tried %s but\n%s" % (t, indent(str(e))))
+        raise ValidationException(bullets(errors, "- "))
+
+    def __repr__(self):
+        return " | ".join(str(a) for a in self.alternates)
+
+class _URILoader(_Loader):
+    def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
+        # type: (_Loader, bool, bool, Union[int, None]) -> None
+        self.inner = inner
+        self.scoped_id = scoped_id
+        self.vocab_term = vocab_term
+        self.scoped_ref = scoped_ref
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, list):
+            doc = [expand_url(i, baseuri, loadingOptions,
+                            self.scoped_id, self.vocab_term, self.scoped_ref) for i in doc]
+        if isinstance(doc, six.string_types):
+            doc = expand_url(doc, baseuri, loadingOptions,
+                             self.scoped_id, self.vocab_term, self.scoped_ref)
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+class _TypeDSLLoader(_Loader):
+    typeDSLregex = re.compile(u"^([^[?]+)(\[\])?(\?)?$")
+
+    def __init__(self, inner, refScope):
+        # type: (_Loader, Union[int, None]) -> None
+        self.inner = inner
+        self.refScope = refScope
+
+    def resolve(self, doc, baseuri, loadingOptions):
+        m = self.typeDSLregex.match(doc)
+        if m:
+            first = expand_url(m.group(1), baseuri, loadingOptions, False, True, self.refScope)
+            second = third = None
+            if bool(m.group(2)):
+                second = {"type": "array", "items": first}
+                #second = CommentedMap((("type", "array"),
+                #                       ("items", first)))
+                #second.lc.add_kv_line_col("type", lc)
+                #second.lc.add_kv_line_col("items", lc)
+                #second.lc.filename = filename
+            if bool(m.group(3)):
+                third = [u"null", second or first]
+                #third = CommentedSeq([u"null", second or first])
+                #third.lc.add_kv_line_col(0, lc)
+                #third.lc.add_kv_line_col(1, lc)
+                #third.lc.filename = filename
+            doc = third or second or first
+        return doc
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, list):
+            r = []
+            for d in doc:
+                if isinstance(d, six.string_types):
+                    resolved = self.resolve(d, baseuri, loadingOptions)
+                    if isinstance(resolved, list):
+                        for i in resolved:
+                            if i not in r:
+                                r.append(i)
+                    else:
+                        if resolved not in r:
+                            r.append(resolved)
+                else:
+                    r.append(d)
+            doc = r
+        elif isinstance(doc, six.string_types):
+            doc = self.resolve(doc, baseuri, loadingOptions)
+
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+class _IdMapLoader(_Loader):
+    def __init__(self, inner, mapSubject, mapPredicate):
+        # type: (_Loader, Text, Union[Text, None]) -> None
+        self.inner = inner
+        self.mapSubject = mapSubject
+        self.mapPredicate = mapPredicate
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, dict):
+            r = []
+            for k in sorted(doc.keys()):
+                val = doc[k]
+                if isinstance(val, dict):
+                    v = copy.copy(val)
+                    if hasattr(val, 'lc'):
+                        v.lc.data = val.lc.data
+                        v.lc.filename = val.lc.filename
+                else:
+                    if self.mapPredicate:
+                        v = {self.mapPredicate: val}
+                    else:
+                        raise ValidationException("No mapPredicate")
+                v[self.mapSubject] = k
+                r.append(v)
+            doc = r
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+def _document_load(loader, doc, baseuri, loadingOptions):
+    if isinstance(doc, six.string_types):
+        return _document_load_by_url(loader, loadingOptions.fetcher.urljoin(baseuri, doc), loadingOptions)
+
+    if isinstance(doc, dict):
+        if "$namespaces" in doc:
+            loadingOptions = LoadingOptions(copyfrom=loadingOptions, namespaces=doc["$namespaces"])
+
+        if "$base" in doc:
+            baseuri = doc["$base"]
+
+        if "$graph" in doc:
+            return loader.load(doc["$graph"], baseuri, loadingOptions)
+        else:
+            return loader.load(doc, baseuri, loadingOptions, docRoot=baseuri)
+
+    if isinstance(doc, list):
+        return loader.load(doc, baseuri, loadingOptions)
+
+    raise ValidationException()
+
+
+def _document_load_by_url(loader, url, loadingOptions):
+    if url in loadingOptions.idx:
+        return _document_load(loader, loadingOptions.idx[url], url, loadingOptions)
+
+    text = loadingOptions.fetcher.fetch_text(url)
+    if isinstance(text, bytes):
+        textIO = StringIO(text.decode('utf-8'))
+    else:
+        textIO = StringIO(text)
+    textIO.name = url    # type: ignore
+    result = yaml.round_trip_load(textIO)
+    add_lc_filename(result, url)
+
+    loadingOptions.idx[url] = result
+
+    loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=url)
+
+    return _document_load(loader, result, url, loadingOptions)
+
+def file_uri(path, split_frag=False):  # type: (str, bool) -> str
+    if path.startswith("file://"):
+        return path
+    if split_frag:
+        pathsp = path.split("#", 2)
+        frag = "#" + urllib.parse.quote(str(pathsp[1])) if len(pathsp) == 2 else ""
+        urlpath = urllib.request.pathname2url(str(pathsp[0]))
+    else:
+        urlpath = urllib.request.pathname2url(path)
+        frag = ""
+    if urlpath.startswith("//"):
+        return "file:%s%s" % (urlpath, frag)
+    else:
+        return "file://%s%s" % (urlpath, frag)
diff --git a/schema_salad/ref_resolver.py b/schema_salad/ref_resolver.py
index d5d1408..57153be 100644
--- a/schema_salad/ref_resolver.py
+++ b/schema_salad/ref_resolver.py
@@ -5,6 +5,7 @@ import json
 import hashlib
 import logging
 import collections
+from io import open
 
 import six
 from six.moves import range
@@ -40,6 +41,8 @@ DocumentType = TypeVar('DocumentType', CommentedSeq, CommentedMap)
 DocumentOrStrType = TypeVar(
     'DocumentOrStrType', CommentedSeq, CommentedMap, six.text_type)
 
+_re_drive = re.compile(r"/([a-zA-Z]):")
+
 def file_uri(path, split_frag=False):  # type: (str, bool) -> str
     if path.startswith("file://"):
         return path
@@ -117,7 +120,7 @@ class Fetcher(object):
 
 class DefaultFetcher(Fetcher):
     def __init__(self,
-                 cache,   # type: Dict[Text, Text]
+                 cache,   # type: Dict[Text, Union[Text, bool]]
                  session  # type: Optional[requests.sessions.Session]
                  ):  # type: (...) -> None
         self.cache = cache
@@ -125,8 +128,10 @@ class DefaultFetcher(Fetcher):
 
     def fetch_text(self, url):
         # type: (Text) -> Text
-        if url in self.cache:
-            return self.cache[url]
+        if url in self.cache and self.cache[url] is not True:
+            # treat "True" as a placeholder that indicates something exists but
+            # not necessarily what its contents is.
+            return cast(Text, self.cache[url])
 
         split = urllib.parse.urlsplit(url)
         scheme, path = split.scheme, split.path
@@ -140,12 +145,14 @@ class DefaultFetcher(Fetcher):
             return resp.text
         elif scheme == 'file':
             try:
-                with open(urllib.request.url2pathname(str(path))) as fp:
-                    read = fp.read()
-                if hasattr(read, "decode"):
-                    return read.decode("utf-8")
-                else:
-                    return read
+                # On Windows, url.path will be /drive:/path ; on Unix systems,
+                # /path. As we want drive:/path instead of /drive:/path on Windows,
+                # remove the leading /.
+                if os.path.isabs(path[1:]):  # checking if pathis valid after removing front / or not
+                    path = path[1:]
+                with open(urllib.request.url2pathname(str(path)), encoding='utf-8') as fp:
+                    return fp.read()
+
             except (OSError, IOError) as e:
                 if e.filename == path:
                     raise RuntimeError(six.text_type(e))
@@ -167,6 +174,7 @@ class DefaultFetcher(Fetcher):
                 resp.raise_for_status()
             except Exception as e:
                 return False
+            self.cache[url] = True
             return True
         elif scheme == 'file':
             return os.path.exists(urllib.request.url2pathname(str(path)))
@@ -174,6 +182,61 @@ class DefaultFetcher(Fetcher):
             raise ValueError('Unsupported scheme in url: %s' % url)
 
     def urljoin(self, base_url, url):  # type: (Text, Text) -> Text
+        basesplit = urllib.parse.urlsplit(base_url)
+        split = urllib.parse.urlsplit(url)
+        if (basesplit.scheme and basesplit.scheme != "file" and split.scheme == "file"):
+            raise ValueError("Not resolving potential remote exploit %s from base %s" % (url, base_url))
+
+        if sys.platform == 'win32':
+            if (base_url == url):
+                return url
+            basesplit = urllib.parse.urlsplit(base_url)
+            # note that below might split
+            # "C:" with "C" as URI scheme
+            split = urllib.parse.urlsplit(url)
+
+            has_drive = split.scheme and len(split.scheme) == 1
+
+            if basesplit.scheme == "file":
+                # Special handling of relative file references on Windows
+                # as urllib seems to not be quite up to the job
+
+                # netloc MIGHT appear in equivalents of UNC Strings
+                # \\server1.example.com\path as
+                # file:///server1.example.com/path
+                # https://tools.ietf.org/html/rfc8089#appendix-E.3.2
+                # (TODO: test this)
+                netloc = split.netloc or basesplit.netloc
+
+                # Check if url is a local path like "C:/Users/fred"
+                # or actually an absolute URI like http://example.com/fred
+                if has_drive:
+                    # Assume split.scheme is actually a drive, e.g. "C:"
+                    # so we'll recombine into a path
+                    path_with_drive = urllib.parse.urlunsplit((split.scheme, '', split.path,'', ''))
+                    # Compose new file:/// URI with path_with_drive
+                    # .. carrying over any #fragment (?query just in case..)
+                    return urllib.parse.urlunsplit(("file", netloc,
+                                        path_with_drive, split.query, split.fragment))
+                if (not split.scheme and not netloc and
+                    split.path and split.path.startswith("/")):
+                    # Relative - but does it have a drive?
+                    base_drive = _re_drive.match(basesplit.path)
+                    drive = _re_drive.match(split.path)
+                    if base_drive and not drive:
+                        # Keep drive letter from base_url
+                        # https://tools.ietf.org/html/rfc8089#appendix-E.2.1
+                        # e.g. urljoin("file:///D:/bar/a.txt", "/foo/b.txt") == file:///D:/foo/b.txt
+                        path_with_drive = "/%s:%s" % (base_drive.group(1), split.path)
+                        return urllib.parse.urlunsplit(("file", netloc, path_with_drive,
+                                                   split.query, split.fragment))
+
+                # else: fall-through to resolve as relative URI
+            elif has_drive:
+                # Base is http://something but url is C:/something - which urllib would wrongly
+                # resolve as an absolute path that could later be used to access local files
+                raise ValueError("Not resolving potential remote exploit %s from base %s" % (url, base_url))
+
         return urllib.parse.urljoin(base_url, url)
 
 class Loader(object):
@@ -184,7 +247,7 @@ class Loader(object):
                  idx=None,                  # type: Dict[Text, Union[CommentedMap, CommentedSeq, Text, None]]
                  cache=None,                # type: Dict[Text, Any]
                  session=None,              # type: requests.sessions.Session
-                 fetcher_constructor=None,  # type: Callable[[Dict[Text, Text], requests.sessions.Session], Fetcher]
+                 fetcher_constructor=None,  # type: Callable[[Dict[Text, Union[Text, bool]], requests.sessions.Session], Fetcher]
                  skip_schemas=None          # type: bool
                  ):
         # type: (...) -> None
@@ -237,7 +300,6 @@ class Loader(object):
         else:
             self.fetcher_constructor = DefaultFetcher
         self.fetcher = self.fetcher_constructor(self.cache, self.session)
-
         self.fetch_text = self.fetcher.fetch_text
         self.check_exists = self.fetcher.check_exists
 
@@ -277,7 +339,7 @@ class Loader(object):
 
         split = urllib.parse.urlsplit(url)
 
-        if (bool(split.scheme) or url.startswith(u"$(")
+        if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u"$(")
             or url.startswith(u"${")):
             pass
         elif scoped_id and not bool(split.fragment):
@@ -319,22 +381,25 @@ class Loader(object):
         if self.skip_schemas:
             return
         for sch in aslist(ns):
-            fetchurl = self.fetcher.urljoin(base_url, sch)
-            if fetchurl not in self.cache:
-                _logger.debug("Getting external schema %s", fetchurl)
-                content = self.fetch_text(fetchurl)
-                self.cache[fetchurl] = rdflib.graph.Graph()
-                for fmt in ['xml', 'turtle', 'rdfa']:
-                    try:
-                        self.cache[fetchurl].parse(data=content, format=fmt, publicID=str(fetchurl))
-                        self.graph += self.cache[fetchurl]
-                        break
-                    except xml.sax.SAXParseException:
-                        pass
-                    except TypeError:
-                        pass
-                    except BadSyntax:
-                        pass
+            try:
+                fetchurl = self.fetcher.urljoin(base_url, sch)
+                if fetchurl not in self.cache or self.cache[fetchurl] is True:
+                    _logger.debug("Getting external schema %s", fetchurl)
+                    content = self.fetch_text(fetchurl)
+                    self.cache[fetchurl] = rdflib.graph.Graph()
+                    for fmt in ['xml', 'turtle', 'rdfa']:
+                        try:
+                            self.cache[fetchurl].parse(data=content, format=fmt, publicID=str(fetchurl))
+                            self.graph += self.cache[fetchurl]
+                            break
+                        except xml.sax.SAXParseException:
+                            pass
+                        except TypeError:
+                            pass
+                        except BadSyntax:
+                            pass
+            except Exception as e:
+                _logger.warn("Could not load extension schema %s: %s", fetchurl, e)
 
         for s, _, _ in self.graph.triples((None, RDF.type, RDF.Property)):
             self._add_properties(s)
@@ -430,10 +495,6 @@ class Loader(object):
         if not base_url:
             base_url = file_uri(os.getcwd()) + "/"
 
-        if isinstance(lref, (str, six.text_type)) and os.sep == "\\":
-            # Convert Windows path separator in ref
-            lref = lref.replace("\\", "/")
-
         sl = SourceLine(obj, None, ValueError)
         # If `ref` is a dict, look for special directives.
         if isinstance(lref, CommentedMap):
@@ -477,6 +538,10 @@ class Loader(object):
             raise ValueError(u"Expected CommentedMap or string, got %s: `%s`"
                     % (type(lref), six.text_type(lref)))
 
+        if isinstance(lref, (str, six.text_type)) and os.sep == "\\":
+            # Convert Windows path separator in ref
+            lref = lref.replace("\\", "/")
+
         url = self.expand_url(lref, base_url, scoped_id=(obj is not None))
         # Has this reference been loaded already?
         if url in self.idx and (not mixin):
@@ -966,7 +1031,10 @@ class Loader(object):
                             all_doc_ids[document[identifier]] = sl.makeLead()
                             break
             except validate.ValidationException as v:
-                errors.append(sl.makeError(six.text_type(v)))
+                if d == "$schemas":
+                    _logger.warn( validate.indent(six.text_type(v)))
+                else:
+                    errors.append(sl.makeError(six.text_type(v)))
             if hasattr(document, "iteritems"):
                 iterator = six.iteritems(document)
             else:
@@ -979,7 +1047,9 @@ class Loader(object):
             try:
                 self.validate_links(val, docid, all_doc_ids)
             except validate.ValidationException as v:
-                if key not in self.nolinkcheck:
+                if key in self.nolinkcheck or (isinstance(key, six.string_types) and ":" in key):
+                    _logger.warn( validate.indent(six.text_type(v)))
+                else:
                     docid2 = self.getid(val)
                     if docid2 is not None:
                         errors.append(sl.makeError("checking object `%s`\n%s"
@@ -991,8 +1061,6 @@ class Loader(object):
                         else:
                             errors.append(sl.makeError("checking item\n%s" % (
                                 validate.indent(six.text_type(v)))))
-                else:
-                    _logger.warn( validate.indent(six.text_type(v)))
         if bool(errors):
             if len(errors) > 1:
                 raise validate.ValidationException(
diff --git a/schema_salad/schema.py b/schema_salad/schema.py
index f1ca9af..7a60e36 100644
--- a/schema_salad/schema.py
+++ b/schema_salad/schema.py
@@ -14,10 +14,7 @@ import os
 import six
 from six.moves import urllib
 
-if six.PY3:
-    AvroSchemaFromJSONData = avro.schema.SchemaFromJSONData
-else:
-    AvroSchemaFromJSONData = avro.schema.make_avsc_object
+AvroSchemaFromJSONData = avro.schema.make_avsc_object
 
 from avro.schema import Names, SchemaParseException
 from . import ref_resolver
diff --git a/schema_salad/sourceline.py b/schema_salad/sourceline.py
index 21e57c1..972180a 100644
--- a/schema_salad/sourceline.py
+++ b/schema_salad/sourceline.py
@@ -3,6 +3,7 @@ import ruamel.yaml
 from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
 import re
 import os
+import traceback
 
 from typing import (Any, AnyStr, Callable, cast, Dict, List, Iterable, Tuple,
                     TypeVar, Union, Text)
@@ -134,10 +135,11 @@ def cmap(d, lc=None, fn=None):  # type: (Union[int, float, str, Text, Dict, List
         return d
 
 class SourceLine(object):
-    def __init__(self, item, key=None, raise_type=six.text_type):  # type: (Any, Any, Callable) -> None
+    def __init__(self, item, key=None, raise_type=six.text_type, include_traceback=False):  # type: (Any, Any, Callable, bool) -> None
         self.item = item
         self.key = key
         self.raise_type = raise_type
+        self.include_traceback = include_traceback
 
     def __enter__(self):  # type: () -> SourceLine
         return self
@@ -145,11 +147,14 @@ class SourceLine(object):
     def __exit__(self,
                  exc_type,   # type: Any
                  exc_value,  # type: Any
-                 traceback   # type: Any
-                 ):  # -> Any
+                 tb   # type: Any
+                 ):   # -> Any
         if not exc_value:
             return
-        raise self.makeError(six.text_type(exc_value))
+        if self.include_traceback:
+            raise self.makeError("\n".join(traceback.format_exception(exc_type, exc_value, tb)))
+        else:
+            raise self.makeError(six.text_type(exc_value))
 
     def makeLead(self):  # type: () -> Text
         if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:
diff --git a/schema_salad/tests/#cg_metaschema.py# b/schema_salad/tests/#cg_metaschema.py#
new file mode 100644
index 0000000..e8dbb49
--- /dev/null
+++ b/schema_salad/tests/#cg_metaschema.py#
@@ -0,0 +1,1568 @@
+from __future__ import absolute_import
+import ruamel.yaml
+from ruamel.yaml.comments import CommentedBase, CommentedMap, CommentedSeq
+import re
+import os
+import traceback
+
+from typing import (Any, AnyStr, Callable, cast, Dict, List, Iterable, Tuple,
+                    TypeVar, Union, Text)
+import six
+
+lineno_re = re.compile(u"^(.*?:[0-9]+:[0-9]+: )(( *)(.*))")
+
+def _add_lc_filename(r, source):  # type: (ruamel.yaml.comments.CommentedBase, AnyStr) -> None
+    if isinstance(r, ruamel.yaml.comments.CommentedBase):
+        r.lc.filename = source
+    if isinstance(r, list):
+        for d in r:
+            _add_lc_filename(d, source)
+    elif isinstance(r, dict):
+        for d in six.itervalues(r):
+            _add_lc_filename(d, source)
+
+def relname(source):  # type: (Text) -> Text
+    if source.startswith("file://"):
+        source = source[7:]
+        source = os.path.relpath(source)
+    return source
+
+def add_lc_filename(r, source):  # type: (ruamel.yaml.comments.CommentedBase, Text) -> None
+    _add_lc_filename(r, relname(source))
+
+def reflow(text, maxline, shift=""):  # type: (Text, int, Text) -> Text
+    if maxline < 20:
+        maxline = 20
+    if len(text) > maxline:
+        sp = text.rfind(' ', 0, maxline)
+        if sp < 1:
+            sp = text.find(' ', sp+1)
+            if sp == -1:
+                sp = len(text)
+        if sp < len(text):
+            return "%s\n%s%s" % (text[0:sp], shift, reflow(text[sp+1:], maxline, shift))
+    return text
+
+def indent(v, nolead=False, shift=u"  ", bullet=u"  "):  # type: (Text, bool, Text, Text) -> Text
+    if nolead:
+        return v.splitlines()[0] + u"\n".join([shift + l for l in v.splitlines()[1:]])
+    else:
+        def lineno(i, l):  # type: (int, Text) -> Text
+            r = lineno_re.match(l)
+            if bool(r):
+                return r.group(1) + (bullet if i == 0 else shift) + r.group(2)
+            else:
+                return (bullet if i == 0 else shift) + l
+
+        return u"\n".join([lineno(i, l) for i, l in enumerate(v.splitlines())])
+
+def bullets(textlist, bul):  # type: (List[Text], Text) -> Text
+    if len(textlist) == 1:
+        return textlist[0]
+    else:
+        return "\n".join(indent(t, bullet=bul) for t in textlist)
+
+def strip_dup_lineno(text, maxline=None):  # type: (Text, int) -> Text
+    if maxline is None:
+        maxline = int(os.environ.get("COLUMNS", "100"))
+    pre = None
+    msg = []
+    for l in text.splitlines():
+        g = lineno_re.match(l)
+        if not g:
+            msg.append(l)
+            continue
+        shift = len(g.group(1)) + len(g.group(3))
+        g2 = reflow(g.group(2), maxline-shift, " " * shift)
+        if g.group(1) != pre:
+            pre = g.group(1)
+            msg.append(pre + g2)
+        else:
+            g2 = reflow(g.group(2), maxline-len(g.group(1)), " " * (len(g.group(1))+len(g.group(3))))
+            msg.append(" " * len(g.group(1)) + g2)
+    return "\n".join(msg)
+
+def cmap(d, lc=None, fn=None):  # type: (Union[int, float, str, Text, Dict, List], List[int], Text) -> Union[int, float, str, Text, CommentedMap, CommentedSeq]
+    if lc is None:
+        lc = [0, 0, 0, 0]
+    if fn is None:
+        fn = "test"
+
+    if isinstance(d, CommentedMap):
+        fn = d.lc.filename if hasattr(d.lc, "filename") else fn
+        for k,v in six.iteritems(d):
+            if k in d.lc.data:
+                d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
+            else:
+                d[k] = cmap(v, lc, fn=fn)
+        return d
+    if isinstance(d, CommentedSeq):
+        fn = d.lc.filename if hasattr(d.lc, "filename") else fn
+        for k,v in enumerate(d):
+            if k in d.lc.data:
+                d[k] = cmap(v, lc=d.lc.data[k], fn=fn)
+            else:
+                d[k] = cmap(v, lc, fn=fn)
+        return d
+    if isinstance(d, dict):
+        cm = CommentedMap()
+        for k in sorted(d.keys()):
+            v = d[k]
+            if isinstance(v, CommentedBase):
+                uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
+                vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
+            else:
+                uselc = lc
+                vfn = fn
+            cm[k] = cmap(v, lc=uselc, fn=vfn)
+            cm.lc.add_kv_line_col(k, uselc)
+            cm.lc.filename = fn
+        return cm
+    if isinstance(d, list):
+        cs = CommentedSeq()
+        for k,v in enumerate(d):
+            if isinstance(v, CommentedBase):
+                uselc = [v.lc.line, v.lc.col, v.lc.line, v.lc.col]
+                vfn = v.lc.filename if hasattr(v.lc, "filename") else fn
+            else:
+                uselc = lc
+                vfn = fn
+            cs.append(cmap(v, lc=uselc, fn=vfn))
+            cs.lc.add_kv_line_col(k, uselc)
+            cs.lc.filename = fn
+        return cs
+    else:
+        return d
+
+class SourceLine(object):
+    def __init__(self, item, key=None, raise_type=six.text_type, include_traceback=False):  # type: (Any, Any, Callable, bool) -> None
+        self.item = item
+        self.key = key
+        self.raise_type = raise_type
+        self.include_traceback = include_traceback
+
+    def __enter__(self):  # type: () -> SourceLine
+        return self
+
+    def __exit__(self,
+                 exc_type,   # type: Any
+                 exc_value,  # type: Any
+                 tb   # type: Any
+                 ):   # -> Any
+        if not exc_value:
+            return
+        if self.include_traceback:
+            raise self.makeError("\n".join(traceback.format_exception(exc_type, exc_value, tb)))
+        else:
+            raise self.makeError(six.text_type(exc_value))
+
+    def makeLead(self):  # type: () -> Text
+        if self.key is None or self.item.lc.data is None or self.key not in self.item.lc.data:
+            return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "",
+                                  (self.item.lc.line or 0)+1,
+                                  (self.item.lc.col or 0)+1)
+        else:
+            return "%s:%i:%i:" % (self.item.lc.filename if hasattr(self.item.lc, "filename") else "",
+                                  (self.item.lc.data[self.key][0] or 0)+1,
+                                  (self.item.lc.data[self.key][1] or 0)+1)
+
+    def makeError(self, msg):  # type: (Text) -> Any
+        if not isinstance(self.item, ruamel.yaml.comments.CommentedBase):
+            return self.raise_type(msg)
+        errs = []
+        lead = self.makeLead()
+        for m in msg.splitlines():
+            if bool(lineno_re.match(m)):
+                errs.append(m)
+            else:
+                errs.append("%s %s" % (lead, m))
+        return self.raise_type("\n".join(errs))
+
+from types import NoneType
+from six.moves import urllib
+import ruamel.yaml as yaml
+from StringIO import StringIO
+import copy
+
+class ValidationException(Exception):
+    pass
+
+class Savable(object):
+    pass
+
+class LoadingOptions(object):
+    def __init__(self, fetcher=None, namespaces=None, fileuri=None, copyfrom=None):
+        if copyfrom is not None:
+            self.idx = copyfrom.idx
+            if fetcher is None:
+                fetcher = copyfrom.fetcher
+            if fileuri is None:
+                fileuri = copyfrom.fileuri
+        else:
+            self.idx = {}
+
+        if fetcher is None:
+            import os
+            import requests
+            from cachecontrol.wrapper import CacheControl
+            from cachecontrol.caches import FileCache
+            from schema_salad.ref_resolver import DefaultFetcher
+            if "HOME" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["HOME"], ".cache", "salad")))
+            elif "TMP" in os.environ:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache(os.path.join(os.environ["TMP"], ".cache", "salad")))
+            else:
+                session = CacheControl(
+                    requests.Session(),
+                    cache=FileCache("/tmp", ".cache", "salad"))
+            self.fetcher = DefaultFetcher({}, session)
+        else:
+            self.fetcher = fetcher
+
+        self.fileuri = fileuri
+
+        self.vocab = _vocab
+        self.rvocab = _rvocab
+
+        if namespaces is not None:
+            self.vocab = self.vocab.copy()
+            self.rvocab = self.rvocab.copy()
+            for k,v in namespaces.iteritems():
+                self.vocab[k] = v
+                self.rvocab[v] = k
+
+def load_field(val, fieldtype, baseuri, loadingOptions):
+    if isinstance(val, dict):
+        if "$import" in val:
+            return _document_load_by_url(fieldtype, loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$import"]), loadingOptions)
+        elif "$include" in val:
+            val = loadingOptions.fetcher.fetch_text(loadingOptions.fetcher.urljoin(loadingOptions.fileuri, val["$include"]))
+    return fieldtype.load(val, baseuri, loadingOptions)
+
+
+def save(val):
+   if isinstance(val, Savable):
+       return val.save()
+   if isinstance(val, list):
+       return [save(v) for v in val]
+   return val
+
+def expand_url(url,                 # type: Text
+               base_url,            # type: Text
+               loadingOptions,
+               scoped_id=False,     # type: bool
+               vocab_term=False,    # type: bool
+               scoped_ref=None      # type: int
+               ):
+    # type: (...) -> Text
+    if url in (u"@id", u"@type"):
+        return url
+
+    if vocab_term and url in loadingOptions.vocab:
+        return url
+
+    if bool(loadingOptions.vocab) and u":" in url:
+        prefix = url.split(u":")[0]
+        if prefix in loadingOptions.vocab:
+            url = loadingOptions.vocab[prefix] + url[len(prefix) + 1:]
+
+    split = urllib.parse.urlsplit(url)
+
+    if ((bool(split.scheme) and split.scheme in [u'http', u'https', u'file']) or url.startswith(u"$(")
+        or url.startswith(u"${")):
+        pass
+    elif scoped_id and not bool(split.fragment):
+        splitbase = urllib.parse.urlsplit(base_url)
+        frg = u""
+        if bool(splitbase.fragment):
+            frg = splitbase.fragment + u"/" + split.path
+        else:
+            frg = split.path
+        pt = splitbase.path if splitbase.path != '' else "/"
+        url = urllib.parse.urlunsplit(
+            (splitbase.scheme, splitbase.netloc, pt, splitbase.query, frg))
+    elif scoped_ref is not None and not split.fragment:
+        pass
+    else:
+        url = loadingOptions.fetcher.urljoin(base_url, url)
+
+    if vocab_term:
+        if bool(split.scheme):
+            if url in loadingOptions.rvocab:
+                return loadingOptions.rvocab[url]
+        else:
+            raise ValidationException("Term '%s' not in vocabulary" % url)
+    return url
+
+
+class _Loader(object):
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        pass
+
+class _PrimitiveLoader(_Loader):
+    def __init__(self, tp):
+        self.tp = tp
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, self.tp):
+            raise ValidationException("Expected a %s but got %s" % (self.tp, type(doc)))
+        return doc
+
+    def __repr__(self):
+        return str(self.tp)
+
+class _ArrayLoader(_Loader):
+    def __init__(self, items):
+        self.items = items
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, list):
+            raise ValidationException("Expected a list")
+        r = []
+        errors = []
+        for i in xrange(0, len(doc)):
+            try:
+                lf = load_field(doc[i], _UnionLoader((self, self.items)), baseuri, loadingOptions)
+                if isinstance(lf, list):
+                    r.extend(lf)
+                else:
+                    r.append(lf)
+            except ValidationException as e:
+                errors.append(SourceLine(doc, i, str).makeError(six.text_type(e)))
+        if errors:
+            raise ValidationException("\n".join(errors))
+        return r
+
+    def __repr__(self):
+        return "array<%s>" % self.items
+
+class _EnumLoader(_Loader):
+    def __init__(self, symbols):
+        self.symbols = symbols
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if doc in self.symbols:
+            return doc
+        else:
+            raise ValidationException("Expected one of %s" % (self.symbols,))
+
+
+class _RecordLoader(_Loader):
+    def __init__(self, classtype):
+        self.classtype = classtype
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if not isinstance(doc, dict):
+            raise ValidationException("Expected a dict")
+        return self.classtype(doc, baseuri, loadingOptions, docRoot=docRoot)
+
+    def __repr__(self):
+        return str(self.classtype)
+
+
+class _UnionLoader(_Loader):
+    def __init__(self, alternates):
+        self.alternates = alternates
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        errors = []
+        for t in self.alternates:
+            try:
+                return t.load(doc, baseuri, loadingOptions, docRoot=docRoot)
+            except ValidationException as e:
+                errors.append("tried %s but\n%s" % (t, indent(str(e))))
+        raise ValidationException(bullets(errors, "- "))
+
+    def __repr__(self):
+        return " | ".join(str(a) for a in self.alternates)
+
+class _URILoader(_Loader):
+    def __init__(self, inner, scoped_id, vocab_term, scoped_ref):
+        self.inner = inner
+        self.scoped_id = scoped_id
+        self.vocab_term = vocab_term
+        self.scoped_ref = scoped_ref
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, list):
+            return [self.load(i, baseuri, loadingOptions) for i in doc]
+        if isinstance(doc, basestring):
+            return expand_url(doc, baseuri, loadingOptions,
+                              self.scoped_id, self.vocab_term, self.scoped_ref)
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+class _TypeDSLLoader(_Loader):
+    def __init__(self, inner):
+        self.inner = inner
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+class _IdMapLoader(_Loader):
+    def __init__(self, inner, mapSubject, mapPredicate):
+        self.inner = inner
+        self.mapSubject = mapSubject
+        self.mapPredicate = mapPredicate
+
+    def load(self, doc, baseuri, loadingOptions, docRoot=None):
+        if isinstance(doc, dict):
+            r = []
+            for k in sorted(doc.keys()):
+                val = doc[k]
+                if isinstance(val, dict):
+                    v = copy.copy(val)
+                    if hasattr(val, 'lc'):
+                        v.lc.data = val.lc.data
+                        v.lc.filename = val.lc.filename
+                else:
+                    if self.mapPredicate:
+                        v = {self.mapPredicate: val}
+                    else:
+                        raise ValidationException("No mapPredicate")
+                v[self.mapSubject] = k
+                r.append(v)
+            doc = r
+        return self.inner.load(doc, baseuri, loadingOptions)
+
+
+def _document_load(loader, doc, baseuri, loadingOptions):
+    if isinstance(doc, basestring):
+        return _document_load_by_url(loader, doc, loadingOptions)
+
+    if isinstance(doc, dict):
+        if "$namespaces" in doc:
+            loadingOptions = LoadingOptions(copyfrom=loadingOptions, namespaces=doc["$namespaces"])
+
+        if "$base" in doc:
+            baseuri = doc["$base"]
+
+        if "$graph" in doc:
+            return loader.load(doc["$graph"], baseuri, loadingOptions)
+        else:
+            return loader.load(doc, baseuri, loadingOptions, docRoot=baseuri)
+
+    if isinstance(doc, list):
+        return loader.load(doc, baseuri, loadingOptions)
+
+    raise ValidationException()
+
+
+def _document_load_by_url(loader, url, loadingOptions):
+    if url in loadingOptions.idx:
+        return _document_load(loader, loadingOptions.idx[url], url, loadingOptions)
+
+    text = loadingOptions.fetcher.fetch_text(url)
+    if isinstance(text, bytes):
+        textIO = StringIO(text.decode('utf-8'))
+    else:
+        textIO = StringIO(text)
+    textIO.name = url    # type: ignore
+    result = yaml.round_trip_load(textIO)
+    add_lc_filename(result, url)
+
+    loadingOptions.idx[url] = result
+
+    loadingOptions = LoadingOptions(copyfrom=loadingOptions, fileuri=url)
+
+    return _document_load(loader, result, url, loadingOptions)
+
+class RecordField(Savable):
+    """
+A field of a record.
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'RecordField'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class RecordSchema(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'fields' in doc:
+               try:
+                   self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e)))
+           else:
+               self.fields = None
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Record_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'RecordSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.fields is not None:
+            r['fields'] = save(self.fields)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class EnumSchema(Savable):
+    """
+Define an enumerated type.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           try:
+               self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_False_False_None, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e)))
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Enum_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'EnumSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.symbols is not None:
+            r['symbols'] = save(self.symbols)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class ArraySchema(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           try:
+               self.items = load_field(doc.get('items'), uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'items', str).makeError("the `items` field is not valid because:\n"+str(e)))
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Array_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'ArraySchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.items is not None:
+            r['items'] = save(self.items)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+class JsonldPredicate(Savable):
+    """
+Attached to a record field to define how the parent record field is handled for
+URI resolution and JSON-LD context generation.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if '_id' in doc:
+               try:
+                   self._id = load_field(doc.get('_id'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, '_id', str).makeError("the `_id` field is not valid because:\n"+str(e)))
+           else:
+               self._id = None
+
+           if '_type' in doc:
+               try:
+                   self._type = load_field(doc.get('_type'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, '_type', str).makeError("the `_type` field is not valid because:\n"+str(e)))
+           else:
+               self._type = None
+
+           if '_container' in doc:
+               try:
+                   self._container = load_field(doc.get('_container'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, '_container', str).makeError("the `_container` field is not valid because:\n"+str(e)))
+           else:
+               self._container = None
+
+           if 'identity' in doc:
+               try:
+                   self.identity = load_field(doc.get('identity'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'identity', str).makeError("the `identity` field is not valid because:\n"+str(e)))
+           else:
+               self.identity = None
+
+           if 'noLinkCheck' in doc:
+               try:
+                   self.noLinkCheck = load_field(doc.get('noLinkCheck'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'noLinkCheck', str).makeError("the `noLinkCheck` field is not valid because:\n"+str(e)))
+           else:
+               self.noLinkCheck = None
+
+           if 'mapSubject' in doc:
+               try:
+                   self.mapSubject = load_field(doc.get('mapSubject'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'mapSubject', str).makeError("the `mapSubject` field is not valid because:\n"+str(e)))
+           else:
+               self.mapSubject = None
+
+           if 'mapPredicate' in doc:
+               try:
+                   self.mapPredicate = load_field(doc.get('mapPredicate'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'mapPredicate', str).makeError("the `mapPredicate` field is not valid because:\n"+str(e)))
+           else:
+               self.mapPredicate = None
+
+           if 'refScope' in doc:
+               try:
+                   self.refScope = load_field(doc.get('refScope'), union_of_None_type_or_inttype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'refScope', str).makeError("the `refScope` field is not valid because:\n"+str(e)))
+           else:
+               self.refScope = None
+
+           if 'typeDSL' in doc:
+               try:
+                   self.typeDSL = load_field(doc.get('typeDSL'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'typeDSL', str).makeError("the `typeDSL` field is not valid because:\n"+str(e)))
+           else:
+               self.typeDSL = None
+
+
+           if errors:
+               raise ValidationException("Trying 'JsonldPredicate'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self._id is not None:
+            r['_id'] = save(self._id)
+        if self._type is not None:
+            r['_type'] = save(self._type)
+        if self._container is not None:
+            r['_container'] = save(self._container)
+        if self.identity is not None:
+            r['identity'] = save(self.identity)
+        if self.noLinkCheck is not None:
+            r['noLinkCheck'] = save(self.noLinkCheck)
+        if self.mapSubject is not None:
+            r['mapSubject'] = save(self.mapSubject)
+        if self.mapPredicate is not None:
+            r['mapPredicate'] = save(self.mapPredicate)
+        if self.refScope is not None:
+            r['refScope'] = save(self.refScope)
+        if self.typeDSL is not None:
+            r['typeDSL'] = save(self.typeDSL)
+        return r
+
+
+class SpecializeDef(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           try:
+               self.specializeFrom = load_field(doc.get('specializeFrom'), uri_strtype_False_False_1, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'specializeFrom', str).makeError("the `specializeFrom` field is not valid because:\n"+str(e)))
+
+           try:
+               self.specializeTo = load_field(doc.get('specializeTo'), uri_strtype_False_False_1, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'specializeTo', str).makeError("the `specializeTo` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'SpecializeDef'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.specializeFrom is not None:
+            r['specializeFrom'] = save(self.specializeFrom)
+        if self.specializeTo is not None:
+            r['specializeTo'] = save(self.specializeTo)
+        return r
+
+
+class NamedType(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'inVocab' in doc:
+               try:
+                   self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+           else:
+               self.inVocab = None
+
+
+           if errors:
+               raise ValidationException("Trying 'NamedType'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        return r
+
+
+class DocType(Savable):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           if 'docParent' in doc:
+               try:
+                   self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+           else:
+               self.docParent = None
+
+           if 'docChild' in doc:
+               try:
+                   self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+           else:
+               self.docChild = None
+
+           if 'docAfter' in doc:
+               try:
+                   self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+           else:
+               self.docAfter = None
+
+
+           if errors:
+               raise ValidationException("Trying 'DocType'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        return r
+
+
+class SchemaDefinedType(DocType):
+    """
+Abstract base for schema-defined types.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           if 'docParent' in doc:
+               try:
+                   self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+           else:
+               self.docParent = None
+
+           if 'docChild' in doc:
+               try:
+                   self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+           else:
+               self.docChild = None
+
+           if 'docAfter' in doc:
+               try:
+                   self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+           else:
+               self.docAfter = None
+
+           if 'jsonldPredicate' in doc:
+               try:
+                   self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+           else:
+               self.jsonldPredicate = None
+
+           if 'documentRoot' in doc:
+               try:
+                   self.documentRoot = load_field(doc.get('documentRoot'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'documentRoot', str).makeError("the `documentRoot` field is not valid because:\n"+str(e)))
+           else:
+               self.documentRoot = None
+
+
+           if errors:
+               raise ValidationException("Trying 'SchemaDefinedType'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        if self.documentRoot is not None:
+            r['documentRoot'] = save(self.documentRoot)
+        return r
+
+
+class SaladRecordField(RecordField):
+    """
+A field of a record.
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+           if 'jsonldPredicate' in doc:
+               try:
+                   self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+           else:
+               self.jsonldPredicate = None
+
+
+           if errors:
+               raise ValidationException("Trying 'SaladRecordField'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        return r
+
+
+class SaladRecordSchema(NamedType, RecordSchema, SchemaDefinedType):
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'inVocab' in doc:
+               try:
+                   self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+           else:
+               self.inVocab = None
+
+           if 'fields' in doc:
+               try:
+                   self.fields = load_field(doc.get('fields'), idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'fields', str).makeError("the `fields` field is not valid because:\n"+str(e)))
+           else:
+               self.fields = None
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Record_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           if 'docParent' in doc:
+               try:
+                   self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+           else:
+               self.docParent = None
+
+           if 'docChild' in doc:
+               try:
+                   self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+           else:
+               self.docChild = None
+
+           if 'docAfter' in doc:
+               try:
+                   self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+           else:
+               self.docAfter = None
+
+           if 'jsonldPredicate' in doc:
+               try:
+                   self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+           else:
+               self.jsonldPredicate = None
+
+           if 'documentRoot' in doc:
+               try:
+                   self.documentRoot = load_field(doc.get('documentRoot'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'documentRoot', str).makeError("the `documentRoot` field is not valid because:\n"+str(e)))
+           else:
+               self.documentRoot = None
+
+           if 'abstract' in doc:
+               try:
+                   self.abstract = load_field(doc.get('abstract'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'abstract', str).makeError("the `abstract` field is not valid because:\n"+str(e)))
+           else:
+               self.abstract = None
+
+           if 'extends' in doc:
+               try:
+                   self.extends = load_field(doc.get('extends'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'extends', str).makeError("the `extends` field is not valid because:\n"+str(e)))
+           else:
+               self.extends = None
+
+           if 'specialize' in doc:
+               try:
+                   self.specialize = load_field(doc.get('specialize'), idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'specialize', str).makeError("the `specialize` field is not valid because:\n"+str(e)))
+           else:
+               self.specialize = None
+
+
+           if errors:
+               raise ValidationException("Trying 'SaladRecordSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.fields is not None:
+            r['fields'] = save(self.fields)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        if self.documentRoot is not None:
+            r['documentRoot'] = save(self.documentRoot)
+        if self.abstract is not None:
+            r['abstract'] = save(self.abstract)
+        if self.extends is not None:
+            r['extends'] = save(self.extends)
+        if self.specialize is not None:
+            r['specialize'] = save(self.specialize)
+        return r
+
+
+class SaladEnumSchema(NamedType, EnumSchema, SchemaDefinedType):
+    """
+Define an enumerated type.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'inVocab' in doc:
+               try:
+                   self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+           else:
+               self.inVocab = None
+
+           try:
+               self.symbols = load_field(doc.get('symbols'), uri_array_of_strtype_False_False_None, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'symbols', str).makeError("the `symbols` field is not valid because:\n"+str(e)))
+
+            
+               
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Enum_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           if 'docParent' in doc:
+               try:
+                   self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+           else:
+               self.docParent = None
+
+           if 'docChild' in doc:
+               try:
+                   self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+           else:
+               self.docChild = None
+
+           if 'docAfter' in doc:
+               try:
+                   self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+           else:
+               self.docAfter = None
+
+           if 'jsonldPredicate' in doc:
+               try:
+                   self.jsonldPredicate = load_field(doc.get('jsonldPredicate'), union_of_None_type_or_strtype_or_JsonldPredicateLoader, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'jsonldPredicate', str).makeError("the `jsonldPredicate` field is not valid because:\n"+str(e)))
+           else:
+               self.jsonldPredicate = None
+
+           if 'documentRoot' in doc:
+               try:
+                   self.documentRoot = load_field(doc.get('documentRoot'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'documentRoot', str).makeError("the `documentRoot` field is not valid because:\n"+str(e)))
+           else:
+               self.documentRoot = None
+
+           if 'extends' in doc:
+               try:
+                   self.extends = load_field(doc.get('extends'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'extends', str).makeError("the `extends` field is not valid because:\n"+str(e)))
+           else:
+               self.extends = None
+
+
+           if errors:
+               raise ValidationException("Trying 'SaladEnumSchema'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.symbols is not None:
+            r['symbols'] = save(self.symbols)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.jsonldPredicate is not None:
+            r['jsonldPredicate'] = save(self.jsonldPredicate)
+        if self.documentRoot is not None:
+            r['documentRoot'] = save(self.documentRoot)
+        if self.extends is not None:
+            r['extends'] = save(self.extends)
+        return r
+
+
+class Documentation(NamedType, DocType):
+    """
+A documentation section.  This type exists to facilitate self-documenting
+schemas but has no role in formal validation.
+
+    """
+    def __init__(self, _doc, baseuri, loadingOptions, docRoot=None):
+           doc = copy.copy(_doc)
+           if hasattr(_doc, 'lc'):
+               doc.lc.data = _doc.lc.data
+               doc.lc.filename = _doc.lc.filename
+           errors = []
+           #doc = {expand_url(d, u"", loadingOptions, scoped_id=False, vocab_term=True): v for d,v in doc.items()}
+           if 'name' in doc:
+               try:
+                   self.name = load_field(doc.get('name'), uri_strtype_True_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'name', str).makeError("the `name` field is not valid because:\n"+str(e)))
+           else:
+               self.name = None
+
+
+           if self.name is None:
+               if docRoot is not None:
+                   self.name = docRoot
+               else:
+                   raise ValidationException("Missing name")
+           baseuri = self.name
+           if 'inVocab' in doc:
+               try:
+                   self.inVocab = load_field(doc.get('inVocab'), union_of_None_type_or_booltype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'inVocab', str).makeError("the `inVocab` field is not valid because:\n"+str(e)))
+           else:
+               self.inVocab = None
+
+           if 'doc' in doc:
+               try:
+                   self.doc = load_field(doc.get('doc'), union_of_None_type_or_strtype_or_array_of_strtype, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'doc', str).makeError("the `doc` field is not valid because:\n"+str(e)))
+           else:
+               self.doc = None
+
+           if 'docParent' in doc:
+               try:
+                   self.docParent = load_field(doc.get('docParent'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docParent', str).makeError("the `docParent` field is not valid because:\n"+str(e)))
+           else:
+               self.docParent = None
+
+           if 'docChild' in doc:
+               try:
+                   self.docChild = load_field(doc.get('docChild'), uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docChild', str).makeError("the `docChild` field is not valid because:\n"+str(e)))
+           else:
+               self.docChild = None
+
+           if 'docAfter' in doc:
+               try:
+                   self.docAfter = load_field(doc.get('docAfter'), uri_union_of_None_type_or_strtype_False_False_None, baseuri, loadingOptions)
+               except ValidationException as e:
+                   errors.append(SourceLine(doc, 'docAfter', str).makeError("the `docAfter` field is not valid because:\n"+str(e)))
+           else:
+               self.docAfter = None
+
+           try:
+               self.type = load_field(doc.get('type'), typedsl_uri_Documentation_symbolLoader_False_True_2, baseuri, loadingOptions)
+           except ValidationException as e:
+               errors.append(SourceLine(doc, 'type', str).makeError("the `type` field is not valid because:\n"+str(e)))
+
+
+           if errors:
+               raise ValidationException("Trying 'Documentation'\n"+"\n".join(errors))
+
+    def save(self):
+        r = {}
+        if self.name is not None:
+            r['name'] = save(self.name)
+        if self.inVocab is not None:
+            r['inVocab'] = save(self.inVocab)
+        if self.doc is not None:
+            r['doc'] = save(self.doc)
+        if self.docParent is not None:
+            r['docParent'] = save(self.docParent)
+        if self.docChild is not None:
+            r['docChild'] = save(self.docChild)
+        if self.docAfter is not None:
+            r['docAfter'] = save(self.docAfter)
+        if self.type is not None:
+            r['type'] = save(self.type)
+        return r
+
+
+_vocab = {
+    "fields": "https://w3id.org/cwl/salad#fields",
+    "int": "http://www.w3.org/2001/XMLSchema#int",
+    "refScope": "https://w3id.org/cwl/salad#JsonldPredicate/refScope",
+    "abstract": "https://w3id.org/cwl/salad#SaladRecordSchema/abstract",
+    "float": "http://www.w3.org/2001/XMLSchema#float",
+    "symbols": "https://w3id.org/cwl/salad#symbols",
+    "inVocab": "https://w3id.org/cwl/salad#NamedType/inVocab",
+    "jsonldPredicate": "https://w3id.org/cwl/salad#SchemaDefinedType/jsonldPredicate",
+    "boolean": "http://www.w3.org/2001/XMLSchema#boolean",
+    "mapPredicate": "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate",
+    "NamedType": "https://w3id.org/cwl/salad#NamedType",
+    "array": "https://w3id.org/cwl/salad#array",
+    "null": "https://w3id.org/cwl/salad#null",
+    "SchemaDefinedType": "https://w3id.org/cwl/salad#SchemaDefinedType",
+    "mapSubject": "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject",
+    "SaladRecordField": "https://w3id.org/cwl/salad#SaladRecordField",
+    "SaladEnumSchema": "https://w3id.org/cwl/salad#SaladEnumSchema",
+    "SpecializeDef": "https://w3id.org/cwl/salad#SpecializeDef",
+    "DocType": "https://w3id.org/cwl/salad#DocType",
+    "long": "http://www.w3.org/2001/XMLSchema#long",
+    "JsonldPredicate": "https://w3id.org/cwl/salad#JsonldPredicate",
+    "docParent": "https://w3id.org/cwl/salad#docParent",
+    "extends": "https://w3id.org/cwl/salad#extends",
+    "specializeFrom": "https://w3id.org/cwl/salad#specializeFrom",
+    "type": "https://w3id.org/cwl/salad#type",
+    "ArraySchema": "https://w3id.org/cwl/salad#ArraySchema",
+    "_type": "https://w3id.org/cwl/salad#JsonldPredicate/_type",
+    "docChild": "https://w3id.org/cwl/salad#docChild",
+    "string": "http://www.w3.org/2001/XMLSchema#string",
+    "RecordField": "https://w3id.org/cwl/salad#RecordField",
+    "enum": "https://w3id.org/cwl/salad#enum",
+    "RecordSchema": "https://w3id.org/cwl/salad#RecordSchema",
+    "typeDSL": "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL",
+    "Documentation": "https://w3id.org/cwl/salad#Documentation",
+    "docAfter": "https://w3id.org/cwl/salad#docAfter",
+    "_container": "https://w3id.org/cwl/salad#JsonldPredicate/_container",
+    "noLinkCheck": "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck",
+    "identity": "https://w3id.org/cwl/salad#JsonldPredicate/identity",
+    "EnumSchema": "https://w3id.org/cwl/salad#EnumSchema",
+    "specialize": "https://w3id.org/cwl/salad#specialize",
+    "documentRoot": "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot",
+    "double": "http://www.w3.org/2001/XMLSchema#double",
+    "documentation": "https://w3id.org/cwl/salad#documentation",
+    "SaladRecordSchema": "https://w3id.org/cwl/salad#SaladRecordSchema",
+    "record": "https://w3id.org/cwl/salad#record",
+    "doc": "https://w3id.org/cwl/salad#DocType/doc",
+    "specializeTo": "https://w3id.org/cwl/salad#specializeTo",
+    "items": "https://w3id.org/cwl/salad#items",
+    "_id": "https://w3id.org/cwl/salad#_id",
+    "Any": "https://w3id.org/cwl/salad#Any",
+}
+_rvocab = {
+    "https://w3id.org/cwl/salad#fields": "fields",
+    "http://www.w3.org/2001/XMLSchema#int": "int",
+    "https://w3id.org/cwl/salad#JsonldPredicate/refScope": "refScope",
+    "https://w3id.org/cwl/salad#SaladRecordSchema/abstract": "abstract",
+    "http://www.w3.org/2001/XMLSchema#float": "float",
+    "https://w3id.org/cwl/salad#symbols": "symbols",
+    "https://w3id.org/cwl/salad#NamedType/inVocab": "inVocab",
+    "https://w3id.org/cwl/salad#SchemaDefinedType/jsonldPredicate": "jsonldPredicate",
+    "http://www.w3.org/2001/XMLSchema#boolean": "boolean",
+    "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate": "mapPredicate",
+    "https://w3id.org/cwl/salad#NamedType": "NamedType",
+    "https://w3id.org/cwl/salad#array": "array",
+    "https://w3id.org/cwl/salad#null": "null",
+    "https://w3id.org/cwl/salad#SchemaDefinedType": "SchemaDefinedType",
+    "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject": "mapSubject",
+    "https://w3id.org/cwl/salad#SaladRecordField": "SaladRecordField",
+    "https://w3id.org/cwl/salad#SaladEnumSchema": "SaladEnumSchema",
+    "https://w3id.org/cwl/salad#SpecializeDef": "SpecializeDef",
+    "https://w3id.org/cwl/salad#DocType": "DocType",
+    "http://www.w3.org/2001/XMLSchema#long": "long",
+    "https://w3id.org/cwl/salad#JsonldPredicate": "JsonldPredicate",
+    "https://w3id.org/cwl/salad#docParent": "docParent",
+    "https://w3id.org/cwl/salad#extends": "extends",
+    "https://w3id.org/cwl/salad#specializeFrom": "specializeFrom",
+    "https://w3id.org/cwl/salad#type": "type",
+    "https://w3id.org/cwl/salad#ArraySchema": "ArraySchema",
+    "https://w3id.org/cwl/salad#JsonldPredicate/_type": "_type",
+    "https://w3id.org/cwl/salad#docChild": "docChild",
+    "http://www.w3.org/2001/XMLSchema#string": "string",
+    "https://w3id.org/cwl/salad#RecordField": "RecordField",
+    "https://w3id.org/cwl/salad#enum": "enum",
+    "https://w3id.org/cwl/salad#RecordSchema": "RecordSchema",
+    "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL": "typeDSL",
+    "https://w3id.org/cwl/salad#Documentation": "Documentation",
+    "https://w3id.org/cwl/salad#docAfter": "docAfter",
+    "https://w3id.org/cwl/salad#JsonldPredicate/_container": "_container",
+    "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck": "noLinkCheck",
+    "https://w3id.org/cwl/salad#JsonldPredicate/identity": "identity",
+    "https://w3id.org/cwl/salad#EnumSchema": "EnumSchema",
+    "https://w3id.org/cwl/salad#specialize": "specialize",
+    "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot": "documentRoot",
+    "http://www.w3.org/2001/XMLSchema#double": "double",
+    "https://w3id.org/cwl/salad#documentation": "documentation",
+    "https://w3id.org/cwl/salad#SaladRecordSchema": "SaladRecordSchema",
+    "https://w3id.org/cwl/salad#record": "record",
+    "https://w3id.org/cwl/salad#DocType/doc": "doc",
+    "https://w3id.org/cwl/salad#specializeTo": "specializeTo",
+    "https://w3id.org/cwl/salad#items": "items",
+    "https://w3id.org/cwl/salad#_id": "_id",
+    "https://w3id.org/cwl/salad#Any": "Any",
+}
+
+inttype = _PrimitiveLoader(int)
+booltype = _PrimitiveLoader(bool)
+None_type = _PrimitiveLoader(NoneType)
+strtype = _PrimitiveLoader((str, six.text_type))
+PrimitiveTypeLoader = _EnumLoader(("null", "boolean", "int", "long", "float", "double", "string",))
+AnyLoader = _EnumLoader(("Any",))
+RecordFieldLoader = _RecordLoader(RecordField)
+RecordSchemaLoader = _RecordLoader(RecordSchema)
+EnumSchemaLoader = _RecordLoader(EnumSchema)
+ArraySchemaLoader = _RecordLoader(ArraySchema)
+JsonldPredicateLoader = _RecordLoader(JsonldPredicate)
+SpecializeDefLoader = _RecordLoader(SpecializeDef)
+NamedTypeLoader = _RecordLoader(NamedType)
+DocTypeLoader = _RecordLoader(DocType)
+SchemaDefinedTypeLoader = _RecordLoader(SchemaDefinedType)
+SaladRecordFieldLoader = _RecordLoader(SaladRecordField)
+SaladRecordSchemaLoader = _RecordLoader(SaladRecordSchema)
+SaladEnumSchemaLoader = _RecordLoader(SaladEnumSchema)
+DocumentationLoader = _RecordLoader(Documentation)
+uri_strtype_True_False_None = _URILoader(strtype, True, False, None)
+union_of_None_type_or_strtype = _UnionLoader((None_type, strtype))
+union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype))
+array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _ArrayLoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype)
+union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype = _UnionLoader((PrimitiveTypeLoader, RecordSchemaLoader, EnumSchemaLoader, ArraySchemaLoader, strtype, array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype))
+uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _URILoader(union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype, False, True, 2)
+typedsl_uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2 = _TypeDSLLoader(uri_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_or_array_of_union_of_PrimitiveTypeLoader_or_RecordSchemaLoader_or_EnumSchemaLoader_or_ArraySchemaLoader_or_strtype_False_True_2)
+array_of_RecordFieldLoader = _ArrayLoader(RecordFieldLoader)
+union_of_None_type_or_array_of_RecordFieldLoader = _UnionLoader((None_type, array_of_RecordFieldLoader))
+idmap_fields_union_of_None_type_or_array_of_RecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_RecordFieldLoader, 'name', 'type')
+Record_symbolLoader = _EnumLoader(("record",))
+uri_Record_symbolLoader_False_True_2 = _URILoader(Record_symbolLoader, False, True, 2)
+typedsl_uri_Record_symbolLoader_False_True_2 = _TypeDSLLoader(uri_Record_symbolLoader_False_True_2)
+array_of_strtype = _ArrayLoader(strtype)
+uri_array_of_strtype_False_False_None = _URILoader(array_of_strtype, False, False, None)
+Enum_symbolLoader = _EnumLoader(("enum",))
+uri_Enum_symbolLoader_False_True_2 = _URILoader(Enum_symbolLoader, False, True, 2)
+typedsl_uri_Enum_symbolLoader_False_True_2 = _TypeDSLLoader(uri_Enum_symbolLoader_False_True_2)
+Array_symbolLoader = _EnumLoader(("array",))
+uri_Array_symbolLoader_False_True_2 = _URILoader(Array_symbolLoader, False, True, 2)
+typedsl_uri_Array_symbolLoader_False_True_2 = _TypeDSLLoader(uri_Array_symbolLoader_False_True_2)
+uri_union_of_None_type_or_strtype_False_False_None = _URILoader(union_of_None_type_or_strtype, False, False, None)
+union_of_None_type_or_booltype = _UnionLoader((None_type, booltype))
+union_of_None_type_or_inttype = _UnionLoader((None_type, inttype))
+uri_strtype_False_False_1 = _URILoader(strtype, False, False, 1)
+union_of_None_type_or_strtype_or_array_of_strtype = _UnionLoader((None_type, strtype, array_of_strtype))
+uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_None = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, None)
+union_of_None_type_or_strtype_or_JsonldPredicateLoader = _UnionLoader((None_type, strtype, JsonldPredicateLoader))
+array_of_SaladRecordFieldLoader = _ArrayLoader(SaladRecordFieldLoader)
+union_of_None_type_or_array_of_SaladRecordFieldLoader = _UnionLoader((None_type, array_of_SaladRecordFieldLoader))
+idmap_fields_union_of_None_type_or_array_of_SaladRecordFieldLoader = _IdMapLoader(union_of_None_type_or_array_of_SaladRecordFieldLoader, 'name', 'type')
+uri_union_of_None_type_or_strtype_or_array_of_strtype_False_False_1 = _URILoader(union_of_None_type_or_strtype_or_array_of_strtype, False, False, 1)
+array_of_SpecializeDefLoader = _ArrayLoader(SpecializeDefLoader)
+union_of_None_type_or_array_of_SpecializeDefLoader = _UnionLoader((None_type, array_of_SpecializeDefLoader))
+idmap_specialize_union_of_None_type_or_array_of_SpecializeDefLoader = _IdMapLoader(union_of_None_type_or_array_of_SpecializeDefLoader, 'specializeFrom', 'specializeTo')
+Documentation_symbolLoader = _EnumLoader(("documentation",))
+uri_Documentation_symbolLoader_False_True_2 = _URILoader(Documentation_symbolLoader, False, True, 2)
+typedsl_uri_Documentation_symbolLoader_False_True_2 = _TypeDSLLoader(uri_Documentation_symbolLoader_False_True_2)
+union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader((SaladRecordSchemaLoader, SaladEnumSchemaLoader, DocumentationLoader))
+array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _ArrayLoader(union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader)
+union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader = _UnionLoader((SaladRecordSchemaLoader, SaladEnumSchemaLoader, DocumentationLoader, array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader))
+
+
+
+def load_document(doc, baseuri, loadingOptions):
+    return _document_load(union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader_or_array_of_union_of_SaladRecordSchemaLoader_or_SaladEnumSchemaLoader_or_DocumentationLoader, doc, baseuri, loadingOptions)
diff --git a/schema_salad/tests/.coverage b/schema_salad/tests/.coverage
new file mode 100644
index 0000000..b4ab5e5
--- /dev/null
+++ b/schema_salad/tests/.coverage
@@ -0,0 +1 @@
+!coverage.py: This is a private format, don't read it directly!{"lines": {"/home/peter/work/salad/schema_salad/validate.py": [1, 2, 3, 4, 5, 6, 7, 9, 10, 12, 13, 15, 19, 20, 21, 22, 25, 26, 27, 28, 29, 30, 31, 32, 33, 37, 38, 39, 41, 43, 44, 48, 51, 52, 54, 56, 57, 58, 60, 63, 64, 65, 66, 72, 73, 74, 75, 79, 80, 82, 83, 91, 92, 93, 94, 100, 109, 118, 119, 127, 128, 129, 131, 132, 133, 135, 136, 137, 138, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 155, 157, 158, 160, [...]
\ No newline at end of file
diff --git a/schema_salad/tests/cwl-pre.yml b/schema_salad/tests/cwl-pre.yml
new file mode 100644
index 0000000..41e3766
--- /dev/null
+++ b/schema_salad/tests/cwl-pre.yml
@@ -0,0 +1,2354 @@
+[
+    {
+        "name": "https://w3id.org/cwl/cwl#Common Workflow Language, v1.0",
+        "type": "documentation",
+        "doc": "\n"
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#PrimitiveType",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/salad#null",
+            "http://www.w3.org/2001/XMLSchema#boolean",
+            "http://www.w3.org/2001/XMLSchema#int",
+            "http://www.w3.org/2001/XMLSchema#long",
+            "http://www.w3.org/2001/XMLSchema#float",
+            "http://www.w3.org/2001/XMLSchema#double",
+            "http://www.w3.org/2001/XMLSchema#string"
+        ],
+        "doc": [
+            "Salad data types are based on Avro schema declarations.  Refer to the\n[Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for\ndetailed information.\n",
+            "null: no value",
+            "boolean: a binary value",
+            "int: 32-bit signed integer",
+            "long: 64-bit signed integer",
+            "float: single precision (32-bit) IEEE 754 floating-point number",
+            "double: double precision (64-bit) IEEE 754 floating-point number",
+            "string: Unicode character sequence"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Any",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/salad#Any"
+        ],
+        "doc": "The **Any** type validates for any non-null value.\n"
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#RecordField",
+        "type": "record",
+        "doc": "A field of a record.",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/name",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The name of the field\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/doc",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "A documentation string for this field\n",
+                "jsonldPredicate": "rdfs:comment"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/type",
+                "type": [
+                    "PrimitiveType",
+                    "RecordSchema",
+                    "EnumSchema",
+                    "ArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "PrimitiveType",
+                            "RecordSchema",
+                            "EnumSchema",
+                            "ArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "doc": "The field type\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#RecordSchema",
+        "type": "record",
+        "fields": [
+            {
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "RecordField"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#fields",
+                    "mapSubject": "name",
+                    "mapPredicate": "type"
+                },
+                "doc": "Defines the fields of the record.",
+                "name": "https://w3id.org/cwl/salad#RecordSchema/fields"
+            },
+            {
+                "doc": "Must be `record`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#record"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#RecordSchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#EnumSchema",
+        "type": "record",
+        "doc": "Define an enumerated type.\n",
+        "fields": [
+            {
+                "type": {
+                    "type": "array",
+                    "items": "string"
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#symbols",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "Defines the set of valid symbols.",
+                "name": "https://w3id.org/cwl/salad#EnumSchema/symbols"
+            },
+            {
+                "doc": "Must be `enum`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#enum"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#EnumSchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#ArraySchema",
+        "type": "record",
+        "fields": [
+            {
+                "type": [
+                    "PrimitiveType",
+                    "RecordSchema",
+                    "EnumSchema",
+                    "ArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "PrimitiveType",
+                            "RecordSchema",
+                            "EnumSchema",
+                            "ArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#items",
+                    "_type": "@vocab",
+                    "refScope": 2
+                },
+                "doc": "Defines the type of the array elements.",
+                "name": "https://w3id.org/cwl/salad#ArraySchema/items"
+            },
+            {
+                "doc": "Must be `array`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#array"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#ArraySchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#BaseTypesDoc",
+        "type": "documentation",
+        "doc": "## Base types\n",
+        "docChild": [
+            "https://w3id.org/cwl/cwl#CWLType",
+            "https://w3id.org/cwl/cwl#Process"
+        ]
+    },
+    {
+        "type": "enum",
+        "name": "https://w3id.org/cwl/cwl#CWLVersion",
+        "doc": "Version symbols for published CWL document versions.",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#draft-2",
+            "https://w3id.org/cwl/cwl#draft-3.dev1",
+            "https://w3id.org/cwl/cwl#draft-3.dev2",
+            "https://w3id.org/cwl/cwl#draft-3.dev3",
+            "https://w3id.org/cwl/cwl#draft-3.dev4",
+            "https://w3id.org/cwl/cwl#draft-3.dev5",
+            "https://w3id.org/cwl/cwl#draft-3",
+            "https://w3id.org/cwl/cwl#draft-4.dev1",
+            "https://w3id.org/cwl/cwl#draft-4.dev2",
+            "https://w3id.org/cwl/cwl#draft-4.dev3",
+            "https://w3id.org/cwl/cwl#v1.0.dev4",
+            "https://w3id.org/cwl/cwl#v1.0"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CWLType",
+        "type": "enum",
+        "extends": "https://w3id.org/cwl/salad#PrimitiveType",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#File",
+            "https://w3id.org/cwl/cwl#Directory"
+        ],
+        "doc": [
+            "Extends primitive types with the concept of a file and directory as a builtin type.",
+            "File: A File object",
+            "Directory: A Directory object"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#File",
+        "type": "record",
+        "docParent": "https://w3id.org/cwl/cwl#CWLType",
+        "doc": "Represents a file (or group of files if `secondaryFiles` is specified) that\nmust be accessible by tools using standard POSIX file system call API such as\nopen(2) and read(2).\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#File/class",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/cwl#File"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                },
+                "doc": "Must be `File` to indicate this object describes a file."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/location",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "An IRI that identifies the file resource.  This may be a relative\nreference, in which case it must be resolved using the base IRI of the\ndocument.  The location may refer to a local or remote resource; the\nimplementation must use the IRI to retrieve file content.  If an\nimplementation is unable to retrieve the file content stored at a\nremote resource (due to unsupported protocol, access denied, or other\nissue) it must signal an error.\n\nIf the `location` fi [...]
+                "jsonldPredicate": {
+                    "_id": "@id",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/path",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The local host path where the File is available when a CommandLineTool is\nexecuted.  This field must be set by the implementation.  The final\npath component must match the value of `basename`.  This field\nmust not be used in any other context.  The command line tool being\nexecuted must be able to to access the file at `path` using the POSIX\n`open(2)` syscall.\n\nAs a special case, if the `path` field is provided but the `location`\nfield is not, an implementa [...]
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#path",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/basename",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The base name of the file, that is, the name of the file without any\nleading directory path.  The base name must not contain a slash `/`.\n\nIf not provided, the implementation must set this field based on the\n`location` field by taking the final path component after parsing\n`location` as an IRI.  If `basename` is provided, it is not required to\nmatch the value from `location`.\n\nWhen this file is made available to a CommandLineTool, it must be named\nwith `b [...]
+                "jsonldPredicate": "cwl:basename"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/dirname",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The name of the directory containing file, that is, the path leading up\nto the final slash in the path such that `dirname + '/' + basename ==\npath`.\n\nThe implementation must set this field based on the value of `path`\nprior to evaluating parameter references or expressions in a\nCommandLineTool document.  This field must not be used in any other\ncontext.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/nameroot",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The basename root such that `nameroot + nameext == basename`, and\n`nameext` is empty or begins with a period and contains at most one\nperiod.  For the purposess of path splitting leading periods on the\nbasename are ignored; a basename of `.cshrc` will have a nameroot of\n`.cshrc`.\n\nThe implementation must set this field automatically based on the value\nof `basename` prior to evaluating parameter references or expressions.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/nameext",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The basename extension such that `nameroot + nameext == basename`, and\n`nameext` is empty or begins with a period and contains at most one\nperiod.  Leading periods on the basename are ignored; a basename of\n`.cshrc` will have an empty `nameext`.\n\nThe implementation must set this field automatically based on the value\nof `basename` prior to evaluating parameter references or expressions.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/checksum",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Optional hash code for validating file integrity.  Currently must be in the form\n\"sha1$ + hexadecimal string\" using the SHA-1 algorithm.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/size",
+                "type": [
+                    "null",
+                    "long"
+                ],
+                "doc": "Optional file size"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/secondaryFiles",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#File",
+                            "https://w3id.org/cwl/cwl#Directory"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": "cwl:secondaryFiles",
+                "doc": "A list of additional files that are associated with the primary file\nand must be transferred alongside the primary file.  Examples include\nindexes of the primary file, or external references which must be\nincluded when loading primary document.  A file object listed in\n`secondaryFiles` may itself include `secondaryFiles` for which the same\nrules apply.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/format",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#format",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "The format of the file: this must be an IRI of a concept node that\nrepresents the file format, preferrably defined within an ontology.\nIf no ontology is available, file formats may be tested by exact match.\n\nReasoning about format compatability must be done by checking that an\ninput file format is the same, `owl:equivalentClass` or\n`rdfs:subClassOf` the format required by the input parameter.\n`owl:equivalentClass` is transitive with `rdfs:subClassOf`, e.g.  [...]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#File/contents",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "File contents literal.  Maximum of 64 KiB.\n\nIf neither `location` nor `path` is provided, `contents` must be\nnon-null.  The implementation must assign a unique identifier for the\n`location` field.  When the file is staged as input to CommandLineTool,\nthe value of `contents` must be written to a file.\n\nIf `loadContents` of `inputBinding` or `outputBinding` is true and\n`location` is valid, the implementation must read up to the first 64\nKiB of text from the [...]
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#Directory",
+        "type": "record",
+        "docAfter": "https://w3id.org/cwl/cwl#File",
+        "doc": "Represents a directory to present to a command line tool.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Directory/class",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/cwl#Directory"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                },
+                "doc": "Must be `Directory` to indicate this object describes a Directory."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Directory/location",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "An IRI that identifies the directory resource.  This may be a relative\nreference, in which case it must be resolved using the base IRI of the\ndocument.  The location may refer to a local or remote resource.  If\nthe `listing` field is not set, the implementation must use the\nlocation IRI to retrieve directory listing.  If an implementation is\nunable to retrieve the directory listing stored at a remote resource (due to\nunsupported protocol, access denied, or o [...]
+                "jsonldPredicate": {
+                    "_id": "@id",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Directory/path",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The local path where the Directory is made available prior to executing a\nCommandLineTool.  This must be set by the implementation.  This field\nmust not be used in any other context.  The command line tool being\nexecuted must be able to to access the directory at `path` using the POSIX\n`opendir(2)` syscall.\n\nIf the `path` contains [POSIX shell metacharacters](http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)\n(`|`,`&`, `;`,  [...]
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#path",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Directory/basename",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The base name of the directory, that is, the name of the file without any\nleading directory path.  The base name must not contain a slash `/`.\n\nIf not provided, the implementation must set this field based on the\n`location` field by taking the final path component after parsing\n`location` as an IRI.  If `basename` is provided, it is not required to\nmatch the value from `location`.\n\nWhen this file is made available to a CommandLineTool, it must be named\nwi [...]
+                "jsonldPredicate": "cwl:basename"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Directory/listing",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#File",
+                            "https://w3id.org/cwl/cwl#Directory"
+                        ]
+                    }
+                ],
+                "doc": "List of files or subdirectories contained in this directory.  The name\nof each file or subdirectory is determined by the `basename` field of\neach `File` or `Directory` object.  It is an error if a `File` shares a\n`basename` with any other entry in `listing`.  If two or more\n`Directory` object share the same `basename`, this must be treated as\nequivalent to a single subdirectory with the listings recursively\nmerged.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#listing"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#SchemaBase",
+        "type": "record",
+        "abstract": true,
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#SchemaBase/label",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:label",
+                "doc": "A short, human-readable label of this object."
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#Parameter",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#SchemaBase",
+        "abstract": true,
+        "doc": "Define an input or output parameter to a process.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Parameter/secondaryFiles",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression",
+                    {
+                        "type": "array",
+                        "items": [
+                            "string",
+                            "https://w3id.org/cwl/cwl#Expression"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": "cwl:secondaryFiles",
+                "doc": "Only valid when `type: File` or is an array of `items: File`.\n\nDescribes files that must be included alongside the primary file(s).\n\nIf the value is an expression, the value of `self` in the expression\nmust be the primary input or output File to which this binding applies.\n\nIf the value is a string, it specifies that the following pattern\nshould be applied to the primary file:\n\n  1. If string begins with one or more caret `^` characters, for each\n    ca [...]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Parameter/format",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    },
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#format",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "Only valid when `type: File` or is an array of `items: File`.\n\nFor input parameters, this must be one or more IRIs of concept nodes\nthat represents file formats which are allowed as input to this\nparameter, preferrably defined within an ontology.  If no ontology is\navailable, file formats may be tested by exact match.\n\nFor output parameters, this is the file format that will be assigned to\nthe output parameter.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Parameter/streamable",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "Only valid when `type: File` or is an array of `items: File`.\n\nA value of `true` indicates that the file is read or written\nsequentially without seeking.  An implementation may use this flag to\nindicate whether it is valid to stream file contents using a named\npipe.  Default: `false`.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Parameter/doc",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "A documentation string for this type, or an array of strings which should be concatenated.",
+                "jsonldPredicate": "rdfs:comment"
+            }
+        ]
+    },
+    {
+        "type": "enum",
+        "name": "https://w3id.org/cwl/cwl#Expression",
+        "doc": "'Expression' is not a real type.  It indicates that a field must allow\nruntime parameter references.  If [InlineJavascriptRequirement](#InlineJavascriptRequirement)\nis declared and supported by the platform, the field must also allow\nJavascript expressions.\n",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#ExpressionPlaceholder"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputBinding",
+        "type": "record",
+        "abstract": true,
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InputBinding/loadContents",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "jsonldPredicate": "cwl:loadContents",
+                "doc": "Only valid when `type: File` or is an array of `items: File`.\n\nRead up to the first 64 KiB of text from the file and place it in the\n\"contents\" field of the file object for use by expressions.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputBinding",
+        "type": "record",
+        "abstract": true
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputSchema",
+        "extends": "https://w3id.org/cwl/cwl#SchemaBase",
+        "type": "record",
+        "abstract": true
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputSchema",
+        "extends": "https://w3id.org/cwl/cwl#SchemaBase",
+        "type": "record",
+        "abstract": true
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputRecordField",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/salad#RecordField",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#EnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#ArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#PrimitiveType",
+                "specializeTo": "https://w3id.org/cwl/cwl#CWLType"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InputRecordField/inputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#InputBinding"
+                ],
+                "jsonldPredicate": "cwl:inputBinding"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InputRecordField/label",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:label",
+                "doc": "A short, human-readable label of this process object."
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputRecordSchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#RecordSchema",
+            "https://w3id.org/cwl/cwl#InputSchema"
+        ],
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordField",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputRecordField"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputEnumSchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#EnumSchema",
+            "https://w3id.org/cwl/cwl#InputSchema"
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InputEnumSchema/inputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#InputBinding"
+                ],
+                "jsonldPredicate": "cwl:inputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputArraySchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#ArraySchema",
+            "https://w3id.org/cwl/cwl#InputSchema"
+        ],
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#EnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#ArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#InputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#PrimitiveType",
+                "specializeTo": "https://w3id.org/cwl/cwl#CWLType"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InputArraySchema/inputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#InputBinding"
+                ],
+                "jsonldPredicate": "cwl:inputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputRecordField",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/salad#RecordField",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#EnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#ArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#PrimitiveType",
+                "specializeTo": "https://w3id.org/cwl/cwl#CWLType"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#OutputRecordField/outputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#OutputBinding"
+                ],
+                "jsonldPredicate": "cwl:outputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputRecordSchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#RecordSchema",
+            "https://w3id.org/cwl/cwl#OutputSchema"
+        ],
+        "docParent": "https://w3id.org/cwl/cwl#OutputParameter",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordField",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputRecordField"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputEnumSchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#EnumSchema",
+            "https://w3id.org/cwl/cwl#OutputSchema"
+        ],
+        "docParent": "https://w3id.org/cwl/cwl#OutputParameter",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#OutputEnumSchema/outputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#OutputBinding"
+                ],
+                "jsonldPredicate": "cwl:outputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputArraySchema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#ArraySchema",
+            "https://w3id.org/cwl/cwl#OutputSchema"
+        ],
+        "docParent": "https://w3id.org/cwl/cwl#OutputParameter",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#EnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#ArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#OutputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/salad#PrimitiveType",
+                "specializeTo": "https://w3id.org/cwl/cwl#CWLType"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#OutputArraySchema/outputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#OutputBinding"
+                ],
+                "jsonldPredicate": "cwl:outputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InputParameter",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#Parameter",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InputParameter/id",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The unique identifier for this parameter object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InputParameter/inputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#InputBinding"
+                ],
+                "jsonldPredicate": "cwl:inputBinding",
+                "doc": "Describes how to handle the inputs of a process and convert them\ninto a concrete form for execution, such as command line parameters.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InputParameter/default",
+                "type": [
+                    "null",
+                    "Any"
+                ],
+                "jsonldPredicate": "cwl:default",
+                "doc": "The default value for this parameter if not provided in the input\nobject.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InputParameter/type",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#CWLType",
+                    "https://w3id.org/cwl/cwl#InputRecordSchema",
+                    "https://w3id.org/cwl/cwl#InputEnumSchema",
+                    "https://w3id.org/cwl/cwl#InputArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#CWLType",
+                            "https://w3id.org/cwl/cwl#InputRecordSchema",
+                            "https://w3id.org/cwl/cwl#InputEnumSchema",
+                            "https://w3id.org/cwl/cwl#InputArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "refScope": 2,
+                    "typeDSL": true
+                },
+                "doc": "Specify valid types of data that may be assigned to this parameter.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#OutputParameter",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#Parameter",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#OutputParameter/id",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The unique identifier for this parameter object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#OutputParameter/outputBinding",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#OutputBinding"
+                ],
+                "jsonldPredicate": "cwl:outputBinding",
+                "doc": "Describes how to handle the outputs of a process.\n"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "abstract": true,
+        "doc": "A process requirement declares a prerequisite that may or must be fulfilled\nbefore executing a process.  See [`Process.hints`](#process) and\n[`Process.requirements`](#process).\n\nProcess requirements are the primary mechanism for specifying extensions to\nthe CWL core specification.\n"
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#Process",
+        "abstract": true,
+        "doc": "\nThe base executable type in CWL is the `Process` object defined by the\ndocument.  Note that the `Process` object is abstract and cannot be\ndirectly executed.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/id",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "@id",
+                "doc": "The unique identifier for this process object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/inputs",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#InputParameter"
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#inputs",
+                    "mapSubject": "id",
+                    "mapPredicate": "type"
+                },
+                "doc": "Defines the input parameters of the process.  The process is ready to\nrun when all required input parameters are associated with concrete\nvalues.  Input parameters include a schema for each parameter which is\nused to validate the input object.  It may also be used to build a user\ninterface for constructing the input object.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/outputs",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#OutputParameter"
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#outputs",
+                    "mapSubject": "id",
+                    "mapPredicate": "type"
+                },
+                "doc": "Defines the parameters representing the output of the process.  May be\nused to generate and/or validate the output object.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/requirements",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "https://w3id.org/cwl/cwl#ProcessRequirement"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#requirements",
+                    "mapSubject": "class"
+                },
+                "doc": "Declares requirements that apply to either the runtime environment or the\nworkflow engine that must be met in order to execute this process.  If\nan implementation cannot satisfy all requirements, or a requirement is\nlisted which is not recognized by the implementation, it is a fatal\nerror and the implementation must not attempt to run the process,\nunless overridden at user option.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/hints",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "Any"
+                    }
+                ],
+                "doc": "Declares hints applying to either the runtime environment or the\nworkflow engine that may be helpful in executing this process.  It is\nnot an error if an implementation cannot satisfy all hints, however\nthe implementation may report a warning.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#hints",
+                    "noLinkCheck": true,
+                    "mapSubject": "class"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/label",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:label",
+                "doc": "A short, human-readable label of this process object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/doc",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:comment",
+                "doc": "A long, human-readable description of this process object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Process/cwlVersion",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#CWLVersion"
+                ],
+                "doc": "CWL document version. Always required at the document root. Not\nrequired for a Process embedded inside another Process.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#cwlVersion",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InlineJavascriptRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicates that the workflow platform must support inline Javascript expressions.\nIf this requirement is not present, the workflow platform must not perform expression\ninterpolatation.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InlineJavascriptRequirement/class",
+                "type": "string",
+                "doc": "Always 'InlineJavascriptRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InlineJavascriptRequirement/expressionLib",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "Additional code fragments that will also be inserted\nbefore executing the expression code.  Allows for function definitions that may\nbe called from CWL expressions.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#SchemaDefRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "This field consists of an array of type definitions which must be used when\ninterpreting the `inputs` and `outputs` fields.  When a `type` field\ncontain a IRI, the implementation must check if the type is defined in\n`schemaDefs` and use that definition.  If the type is not found in\n`schemaDefs`, it is an error.  The entries in `schemaDefs` must be\nprocessed in the order listed such that later schema definitions may refer\nto earlier schema definitions.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#SchemaDefRequirement/class",
+                "type": "string",
+                "doc": "Always 'SchemaDefRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#SchemaDefRequirement/types",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#InputSchema"
+                },
+                "doc": "The list of type definitions."
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandLineToolDoc",
+        "type": "documentation",
+        "doc": [
+            "# Common Workflow Language (CWL) Command Line Tool Description, v1.0\n\nThis version:\n  * https://w3id.org/cwl/v1.0/\n\nCurrent version:\n  * https://w3id.org/cwl/\n",
+            "\n\n",
+            "\n",
+            "\n\n",
+            "# Abstract\n\nA Command Line Tool is a non-interactive executable program that reads\nsome input, performs a computation, and terminates after producing some\noutput.  Command line programs are a flexible unit of code sharing and\nreuse, unfortunately the syntax and input/output semantics among command\nline programs is extremely heterogeneous. A common layer for describing\nthe syntax and semantics of programs can reduce this incidental\ncomplexity by providing a consistent [...]
+            "\n",
+            "## Introduction to v1.0\n\nThis specification represents the first full release from the CWL group.\nSince draft-3, version 1.0 introduces the following changes and additions:\n\n  * The [Directory](#Directory) type.\n  * Syntax simplifcations: denoted by the `map<>` syntax. Example: inputs\n    contains a list of items, each with an id. Now one can specify\n    a mapping of that identifier to the corresponding\n    `CommandInputParamater`.\n    ```\n    inputs:\n     - id:  [...]
+            "\n",
+            "\n"
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#EnvironmentDef",
+        "doc": "Define an environment variable that will be set in the runtime environment\nby the workflow platform when executing the command line tool.  May be the\nresult of executing an expression, such as getting a parameter from input.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#EnvironmentDef/envName",
+                "type": "string",
+                "doc": "The environment variable name"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#EnvironmentDef/envValue",
+                "type": [
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "The environment variable value"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#CommandLineBinding",
+        "extends": "https://w3id.org/cwl/cwl#InputBinding",
+        "doc": "\nWhen listed under `inputBinding` in the input schema, the term\n\"value\" refers to the the corresponding value in the input object.  For\nbinding objects listed in `CommandLineTool.arguments`, the term \"value\"\nrefers to the effective value after evaluating `valueFrom`.\n\nThe binding behavior when building the command line depends on the data\ntype of the value.  If there is a mismatch between the type described by\nthe input schema and the effective value, such as  [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/position",
+                "type": [
+                    "null",
+                    "int"
+                ],
+                "doc": "The sorting key.  Default position is 0."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/prefix",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Command line prefix to add before the value."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/separate",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true (default), then the prefix and value must be added as separate\ncommand line arguments; if false, prefix and value must be concatenated\ninto a single command line argument.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/itemSeparator",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Join the array elements into a single string with the elements\nseparated by by `itemSeparator`.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/valueFrom",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": "cwl:valueFrom",
+                "doc": "If `valueFrom` is a constant string value, use this as the value and\napply the binding rules above.\n\nIf `valueFrom` is an expression, evaluate the expression to yield the\nactual value to use to build the command line and apply the binding\nrules above.  If the inputBinding is associated with an input\nparameter, the value of `self` in the expression will be the value of the\ninput parameter.\n\nWhen a binding is part of the `CommandLineTool.arguments` field,\n [...]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineBinding/shellQuote",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If `ShellCommandRequirement` is in the requirements for the current command,\nthis controls whether the value is quoted on the command line (default is true).\nUse `shellQuote: false` to inject metacharacters for operations such as pipes.\n"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#CommandOutputBinding",
+        "extends": "https://w3id.org/cwl/cwl#OutputBinding",
+        "doc": "Describes how to generate an output parameter based on the files produced\nby a CommandLineTool.\n\nThe output parameter is generated by applying these operations in\nthe following order:\n\n  - glob\n  - loadContents\n  - outputEval\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandOutputBinding/glob",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "Find files relative to the output directory, using POSIX glob(3)\npathname matching.  If an array is provided, find files that match any\npattern in the array.  If an expression is provided, the expression must\nreturn a string or an array of strings, which will then be evaluated as\none or more glob patterns.  Must only match and return files which\nactually exist.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandOutputBinding/loadContents",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "jsonldPredicate": "cwl:loadContents",
+                "doc": "For each file matched in `glob`, read up to\nthe first 64 KiB of text from the file and place it in the `contents`\nfield of the file object for manipulation by `outputEval`.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandOutputBinding/outputEval",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Evaluate an expression to generate the output value.  If `glob` was\nspecified, the value of `self` must be an array containing file objects\nthat were matched.  If no files were matched, `self` must be a zero\nlength array; if a single file was matched, the value of `self` is an\narray of a single element.  Additionally, if `loadContents` is `true`,\nthe File objects must include up to the first 64 KiB of file contents\nin the `contents` field.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandInputRecordField",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#InputRecordField",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandLineBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandInputRecordSchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#InputRecordSchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputRecordField",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputRecordField"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandInputEnumSchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#InputEnumSchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandLineBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandInputArraySchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#InputArraySchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandLineBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandOutputRecordField",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputRecordField",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandOutputRecordSchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputRecordSchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputRecordField",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputRecordField"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandOutputEnumSchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputEnumSchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputBinding"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#CommandOutputArraySchema",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputArraySchema",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputBinding"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#CommandInputParameter",
+        "extends": "https://w3id.org/cwl/cwl#InputParameter",
+        "doc": "An input parameter for a CommandLineTool.",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputRecordSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputRecordSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputEnumSchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputEnumSchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputArraySchema",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputArraySchema"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandLineBinding"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#CommandOutputParameter",
+        "extends": "https://w3id.org/cwl/cwl#OutputParameter",
+        "doc": "An output parameter for a CommandLineTool.",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputBinding",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputBinding"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandOutputParameter/type",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#CWLType",
+                    "https://w3id.org/cwl/cwl#stdout",
+                    "https://w3id.org/cwl/cwl#stderr",
+                    "https://w3id.org/cwl/cwl#CommandOutputRecordSchema",
+                    "https://w3id.org/cwl/cwl#CommandOutputEnumSchema",
+                    "https://w3id.org/cwl/cwl#CommandOutputArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#CWLType",
+                            "https://w3id.org/cwl/cwl#CommandOutputRecordSchema",
+                            "https://w3id.org/cwl/cwl#CommandOutputEnumSchema",
+                            "https://w3id.org/cwl/cwl#CommandOutputArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "refScope": 2,
+                    "typeDSL": true
+                },
+                "doc": "Specify valid types of data that may be assigned to this parameter.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#stdout",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#stdout"
+        ],
+        "docParent": "https://w3id.org/cwl/cwl#CommandOutputParameter",
+        "doc": "Only valid as a `type` for a `CommandLineTool` output with no\n`outputBinding` set.\n\nThe following\n```\noutputs:\n   an_output_name:\n   type: stdout\n\nstdout: a_stdout_file\n```\nis equivalent to\n```\noutputs:\n  an_output_name:\n    type: File\n    streamable: true\n    outputBinding:\n      glob: a_stdout_file\n\nstdout: a_stdout_file\n```\n\nIf there is no `stdout` name provided, a random filename will be created.\nFor example, the following\n```\noutputs:\n  an_ [...]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#stderr",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#stderr"
+        ],
+        "docParent": "https://w3id.org/cwl/cwl#CommandOutputParameter",
+        "doc": "Only valid as a `type` for a `CommandLineTool` output with no\n`outputBinding` set.\n\nThe following\n```\noutputs:\n  an_output_name:\n  type: stderr\n\nstderr: a_stderr_file\n```\nis equivalent to\n```\noutputs:\n  an_output_name:\n    type: File\n    streamable: true\n    outputBinding:\n      glob: a_stderr_file\n\nstderr: a_stderr_file\n```\n\nIf there is no `stderr` name provided, a random filename will be created.\nFor example, the following\n```\noutputs:\n  an_ou [...]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#CommandLineTool",
+        "extends": "https://w3id.org/cwl/cwl#Process",
+        "documentRoot": true,
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#InputParameter",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandInputParameter"
+            },
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputParameter",
+                "specializeTo": "https://w3id.org/cwl/cwl#CommandOutputParameter"
+            }
+        ],
+        "doc": "This defines the schema of the CWL Command Line Tool Description document.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/class",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                },
+                "type": "string"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/baseCommand",
+                "doc": "Specifies the program to execute.  If an array, the first element of\nthe array is the command to execute, and subsequent elements are\nmandatory command line arguments.  The elements in `baseCommand` must\nappear before any command line bindings from `inputBinding` or\n`arguments`.\n\nIf `baseCommand` is not provided or is an empty array, the first\nelement of the command line produced after processing `inputBinding` or\n`arguments` must be used as the program to [...]
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#baseCommand",
+                    "_container": "@list"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/arguments",
+                "doc": "Command line bindings which are not directly associated with input parameters.\n",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": [
+                            "string",
+                            "https://w3id.org/cwl/cwl#Expression",
+                            "https://w3id.org/cwl/cwl#CommandLineBinding"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#arguments",
+                    "_container": "@list"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/stdin",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "A path to a file whose contents must be piped into the command's\nstandard input stream.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/stderr",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": "https://w3id.org/cwl/cwl#stderr",
+                "doc": "Capture the command's standard error stream to a file written to\nthe designated output directory.\n\nIf `stderr` is a string, it specifies the file name to use.\n\nIf `stderr` is an expression, the expression is evaluated and must\nreturn a string with the file name to use to capture stderr.  If the\nreturn value is not a string, or the resulting path contains illegal\ncharacters (such as the path separator `/`) it is an error.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/stdout",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": "https://w3id.org/cwl/cwl#stdout",
+                "doc": "Capture the command's standard output stream to a file written to\nthe designated output directory.\n\nIf `stdout` is a string, it specifies the file name to use.\n\nIf `stdout` is an expression, the expression is evaluated and must\nreturn a string with the file name to use to capture stdout.  If the\nreturn value is not a string, or the resulting path contains illegal\ncharacters (such as the path separator `/`) it is an error.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/successCodes",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "int"
+                    }
+                ],
+                "doc": "Exit codes that indicate the process completed successfully.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/temporaryFailCodes",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "int"
+                    }
+                ],
+                "doc": "Exit codes that indicate the process failed due to a possibly\ntemporary condition, where executing the process with the same\nruntime environment and inputs may produce different results.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#CommandLineTool/permanentFailCodes",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "int"
+                    }
+                ],
+                "doc": "Exit codes that indicate the process failed due to a permanent logic error, where executing the process with the same runtime environment and same inputs is expected to always fail."
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#DockerRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicates that a workflow component should be run in a\n[Docker](http://docker.com) container, and specifies how to fetch or build\nthe image.\n\nIf a CommandLineTool lists `DockerRequirement` under\n`hints` (or `requirements`), it may (or must) be run in the specified Docker\ncontainer.\n\nThe platform must first acquire or install the correct Docker image as\nspecified by `dockerPull`, `dockerImport`, `dockerLoad` or `dockerFile`.\n\nThe platform must execute the tool i [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/class",
+                "type": "string",
+                "doc": "Always 'DockerRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerPull",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Specify a Docker image to retrieve using `docker pull`."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerLoad",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Specify a HTTP URL from which to download a Docker image using `docker load`."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerFile",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Supply the contents of a Dockerfile which will be built using `docker build`."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerImport",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Provide HTTP URL to download and gunzip a Docker images using `docker import."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerImageId",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The image id that will be used for `docker run`.  May be a\nhuman-readable image name or the image identifier hash.  May be skipped\nif `dockerPull` is specified, in which case the `dockerPull` image id\nmust be used.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#DockerRequirement/dockerOutputDirectory",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Set the designated output directory to a specific location inside the\nDocker container.\n"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#SoftwareRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "A list of software packages that should be configured in the environment of\nthe defined process.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#SoftwareRequirement/class",
+                "type": "string",
+                "doc": "Always 'SoftwareRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#SoftwareRequirement/packages",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#SoftwarePackage"
+                },
+                "doc": "The list of software to be configured.",
+                "jsonldPredicate": {
+                    "mapSubject": "package",
+                    "mapPredicate": "specs"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#SoftwarePackage",
+        "type": "record",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#SoftwarePackage/package",
+                "type": "string",
+                "doc": "The common name of the software to be configured."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#SoftwarePackage/version",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "The (optional) version of the software to configured."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#SoftwarePackage/specs",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "Must be one or more IRIs identifying resources for installing or\nenabling the software.  Implementations may provide resolvers which map\nwell-known software spec IRIs to some configuration action.\n\nFor example, an IRI `https://packages.debian.org/jessie/bowtie` could\nbe resolved with `apt-get install bowtie`.  An IRI\n`https://anaconda.org/bioconda/bowtie` could be resolved with `conda\ninstall -c bioconda bowtie`.\n\nTools may also provide IRIs to index entr [...]
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#Dirent",
+        "type": "record",
+        "doc": "Define a file or subdirectory that must be placed in the designated output\ndirectory prior to executing the command line tool.  May be the result of\nexecuting an expression, such as building a configuration file from a\ntemplate.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Dirent/entryname",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#entryname"
+                },
+                "doc": "The name of the file or subdirectory to create in the output directory.\nIf `entry` is a File or Directory, this overrides `basename`.  Optional.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Dirent/entry",
+                "type": [
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#entry"
+                },
+                "doc": "If the value is a string literal or an expression which evaluates to a\nstring, a new file must be created with the string as the file contents.\n\nIf the value is an expression that evaluates to a `File` object, this\nindicates the referenced file should be added to the designated output\ndirectory prior to executing the tool.\n\nIf the value is an expression that evaluates to a `Dirent` object, this\nindicates that the File or Directory in `entry` should be adde [...]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Dirent/writable",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true, the file or directory must be writable by the tool.  Changes\nto the file or directory must be isolated and not visible by any other\nCommandLineTool process.  This may be implemented by making a copy of\nthe original file or directory.  Default false (files and directories\nread-only by default).\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#InitialWorkDirRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Define a list of files and subdirectories that must be created by the workflow platform in the designated output directory prior to executing the command line tool.",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#InitialWorkDirRequirement/class",
+                "type": "string",
+                "doc": "InitialWorkDirRequirement",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#InitialWorkDirRequirement/listing",
+                "type": [
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#File",
+                            "https://w3id.org/cwl/cwl#Directory",
+                            "https://w3id.org/cwl/cwl#Dirent",
+                            "string",
+                            "https://w3id.org/cwl/cwl#Expression"
+                        ]
+                    },
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#listing"
+                },
+                "doc": "The list of files or subdirectories that must be placed in the\ndesignated output directory prior to executing the command line tool.\n\nMay be an expression.  If so, the expression return value must validate\nas `{type: array, items: [File, Directory]}`.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#EnvVarRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Define a list of environment variables which will be set in the\nexecution environment of the tool.  See `EnvironmentDef` for details.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#EnvVarRequirement/class",
+                "type": "string",
+                "doc": "Always 'EnvVarRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#EnvVarRequirement/envDef",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#EnvironmentDef"
+                },
+                "doc": "The list of environment variables.",
+                "jsonldPredicate": {
+                    "mapSubject": "envName",
+                    "mapPredicate": "envValue"
+                }
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#ShellCommandRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Modify the behavior of CommandLineTool to generate a single string\ncontaining a shell command line.  Each item in the argument list must be\njoined into a string separated by single spaces and quoted to prevent\nintepretation by the shell, unless `CommandLineBinding` for that argument\ncontains `shellQuote: false`.  If `shellQuote: false` is specified, the\nargument is joined into the command string without quoting, which allows\nthe use of shell metacharacters such as ` [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#ShellCommandRequirement/class",
+                "type": "string",
+                "doc": "Always 'ShellCommandRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#ResourceRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Specify basic hardware resource requirements.\n\n\"min\" is the minimum amount of a resource that must be reserved to schedule\na job. If \"min\" cannot be satisfied, the job should not be run.\n\n\"max\" is the maximum amount of a resource that the job shall be permitted\nto use. If a node has sufficient resources, multiple jobs may be scheduled\non a single node provided each job's \"max\" resource requirements are\nmet. If a job attempts to exceed its \"max\" resource  [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/class",
+                "type": "string",
+                "doc": "Always 'ResourceRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/coresMin",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Minimum reserved number of CPU cores"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/coresMax",
+                "type": [
+                    "null",
+                    "int",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Maximum reserved number of CPU cores"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/ramMin",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Minimum reserved RAM in mebibytes (2**20)"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/ramMax",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Maximum reserved RAM in mebibytes (2**20)"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/tmpdirMin",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Minimum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/tmpdirMax",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Maximum reserved filesystem based storage for the designated temporary directory, in mebibytes (2**20)"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/outdirMin",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Minimum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ResourceRequirement/outdirMax",
+                "type": [
+                    "null",
+                    "long",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "Maximum reserved filesystem based storage for the designated output directory, in mebibytes (2**20)"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#WorkflowDoc",
+        "type": "documentation",
+        "doc": [
+            "# Common Workflow Language (CWL) Workflow Description, v1.0\n\nThis version:\n  * https://w3id.org/cwl/v1.0/\n\nCurrent version:\n  * https://w3id.org/cwl/\n",
+            "\n\n",
+            "\n",
+            "\n\n",
+            "# Abstract\n\nOne way to define a workflow is: an analysis task represented by a\ndirected graph describing a sequence of operations that transform an\ninput data set to output. This specification defines the Common Workflow\nLanguage (CWL) Workflow description, a vendor-neutral standard for\nrepresenting workflows intended to be portable across a variety of\ncomputing platforms.\n",
+            "\n",
+            "\n## Introduction to v1.0\n\nThis specification represents the first full release from the CWL group.\nSince draft-3, this draft introduces the following changes and additions:\n\n  * The `inputs` and `outputs` fields have been renamed `in` and `out`.\n  * Syntax simplifcations: denoted by the `map<>` syntax. Example: `in`\n    contains a list of items, each with an id. Now one can specify\n    a mapping of that identifier to the corresponding\n    `InputParameter`.\n    ``` [...]
+            "\n"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#ExpressionToolOutputParameter",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputParameter",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#ExpressionToolOutputParameter/type",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#CWLType",
+                    "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                    "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                    "https://w3id.org/cwl/cwl#OutputArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#CWLType",
+                            "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                            "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                            "https://w3id.org/cwl/cwl#OutputArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "refScope": 2,
+                    "typeDSL": true
+                },
+                "doc": "Specify valid types of data that may be assigned to this parameter.\n"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#ExpressionTool",
+        "extends": "https://w3id.org/cwl/cwl#Process",
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputParameter",
+                "specializeTo": "https://w3id.org/cwl/cwl#ExpressionToolOutputParameter"
+            }
+        ],
+        "documentRoot": true,
+        "doc": "Execute an expression as a Workflow step.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#ExpressionTool/class",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                },
+                "type": "string"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#ExpressionTool/expression",
+                "type": [
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "doc": "The expression to execute.  The expression must return a JSON object which\nmatches the output parameters of the ExpressionTool.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#LinkMergeMethod",
+        "type": "enum",
+        "docParent": "https://w3id.org/cwl/cwl#WorkflowStepInput",
+        "doc": "The input link merge method, described in [WorkflowStepInput](#WorkflowStepInput).",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_nested",
+            "https://w3id.org/cwl/cwl#LinkMergeMethod/merge_flattened"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#WorkflowOutputParameter",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#OutputParameter",
+        "docParent": "https://w3id.org/cwl/cwl#Workflow",
+        "doc": "Describe an output parameter of a workflow.  The parameter must be\nconnected to one or more parameters defined in the workflow that will\nprovide the value of the output parameter.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowOutputParameter/outputSource",
+                "doc": "Specifies one or more workflow parameters that supply the value of to\nthe output parameter.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#outputSource",
+                    "_type": "@id",
+                    "refScope": 0
+                },
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowOutputParameter/linkMerge",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#LinkMergeMethod"
+                ],
+                "jsonldPredicate": "cwl:linkMerge",
+                "doc": "The method to use to merge multiple sources into a single array.\nIf not specified, the default method is \"merge_nested\".\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowOutputParameter/type",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#CWLType",
+                    "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                    "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                    "https://w3id.org/cwl/cwl#OutputArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "https://w3id.org/cwl/cwl#CWLType",
+                            "https://w3id.org/cwl/cwl#OutputRecordSchema",
+                            "https://w3id.org/cwl/cwl#OutputEnumSchema",
+                            "https://w3id.org/cwl/cwl#OutputArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "refScope": 2,
+                    "typeDSL": true
+                },
+                "doc": "Specify valid types of data that may be assigned to this parameter.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#Sink",
+        "type": "record",
+        "abstract": true,
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Sink/source",
+                "doc": "Specifies one or more workflow parameters that will provide input to\nthe underlying step parameter.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#source",
+                    "_type": "@id",
+                    "refScope": 2
+                },
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ]
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Sink/linkMerge",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#LinkMergeMethod"
+                ],
+                "jsonldPredicate": "cwl:linkMerge",
+                "doc": "The method to use to merge multiple inbound links into a single array.\nIf not specified, the default method is \"merge_nested\".\n"
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#WorkflowStepInput",
+        "extends": "https://w3id.org/cwl/cwl#Sink",
+        "docParent": "https://w3id.org/cwl/cwl#WorkflowStep",
+        "doc": "The input of a workflow step connects an upstream parameter (from the\nworkflow inputs, or the outputs of other workflows steps) with the input\nparameters of the underlying step.\n\n## Input object\n\nA WorkflowStepInput object must contain an `id` field in the form\n`#fieldname` or `#stepname.fieldname`.  When the `id` field contains a\nperiod `.` the field name consists of the characters following the final\nperiod.  This defines a field of the workflow step input obje [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStepInput/id",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "A unique identifier for this workflow input parameter."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStepInput/default",
+                "type": [
+                    "null",
+                    "Any"
+                ],
+                "doc": "The default value for this parameter if there is no `source`\nfield.\n",
+                "jsonldPredicate": "cwl:default"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStepInput/valueFrom",
+                "type": [
+                    "null",
+                    "string",
+                    "https://w3id.org/cwl/cwl#Expression"
+                ],
+                "jsonldPredicate": "cwl:valueFrom",
+                "doc": "To use valueFrom, [StepInputExpressionRequirement](#StepInputExpressionRequirement) must\nbe specified in the workflow or workflow step requirements.\n\nIf `valueFrom` is a constant string value, use this as the value for\nthis input parameter.\n\nIf `valueFrom` is a parameter reference or expression, it must be\nevaluated to yield the actual value to be assiged to the input field.\n\nThe `self` value of in the parameter reference or expression must be\nthe value  [...]
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#WorkflowStepOutput",
+        "docParent": "https://w3id.org/cwl/cwl#WorkflowStep",
+        "doc": "Associate an output parameter of the underlying process with a workflow\nparameter.  The workflow parameter (given in the `id` field) be may be used\nas a `source` to connect with input parameters of other workflow steps, or\nwith an output parameter of the process.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStepOutput/id",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "A unique identifier for this workflow output parameter.  This is the\nidentifier to use in the `source` field of `WorkflowStepInput` to\nconnect the output value to downstream parameters.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#ScatterMethod",
+        "type": "enum",
+        "docParent": "https://w3id.org/cwl/cwl#WorkflowStep",
+        "doc": "The scatter method, as described in [workflow step scatter](#WorkflowStep).",
+        "symbols": [
+            "https://w3id.org/cwl/cwl#ScatterMethod/dotproduct",
+            "https://w3id.org/cwl/cwl#ScatterMethod/nested_crossproduct",
+            "https://w3id.org/cwl/cwl#ScatterMethod/flat_crossproduct"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#WorkflowStep",
+        "type": "record",
+        "docParent": "https://w3id.org/cwl/cwl#Workflow",
+        "doc": "A workflow step is an executable element of a workflow.  It specifies the\nunderlying process implementation (such as `CommandLineTool` or another\n`Workflow`) in the `run` field and connects the input and output parameters\nof the underlying process to workflow parameters.\n\n# Scatter/gather\n\nTo use scatter/gather,\n[ScatterFeatureRequirement](#ScatterFeatureRequirement) must be specified\nin the workflow or workflow step requirements.\n\nA \"scatter\" operation speci [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/id",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The unique identifier for this workflow step."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/in",
+                "type": {
+                    "type": "array",
+                    "items": "https://w3id.org/cwl/cwl#WorkflowStepInput"
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#in",
+                    "mapSubject": "id",
+                    "mapPredicate": "source"
+                },
+                "doc": "Defines the input parameters of the workflow step.  The process is ready to\nrun when all required input parameters are associated with concrete\nvalues.  Input parameters include a schema for each parameter which is\nused to validate the input object.  It may also be used build a user\ninterface for constructing the input object.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/out",
+                "type": [
+                    {
+                        "type": "array",
+                        "items": [
+                            "string",
+                            "https://w3id.org/cwl/cwl#WorkflowStepOutput"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#out",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "Defines the parameters representing the output of the process.  May be\nused to generate and/or validate the output object.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/requirements",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "https://w3id.org/cwl/cwl#ProcessRequirement"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#requirements",
+                    "mapSubject": "class"
+                },
+                "doc": "Declares requirements that apply to either the runtime environment or the\nworkflow engine that must be met in order to execute this workflow step.  If\nan implementation cannot satisfy all requirements, or a requirement is\nlisted which is not recognized by the implementation, it is a fatal\nerror and the implementation must not attempt to run the process,\nunless overridden at user option.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/hints",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "Any"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#hints",
+                    "noLinkCheck": true,
+                    "mapSubject": "class"
+                },
+                "doc": "Declares hints applying to either the runtime environment or the\nworkflow engine that may be helpful in executing this workflow step.  It is\nnot an error if an implementation cannot satisfy all hints, however\nthe implementation may report a warning.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/label",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:label",
+                "doc": "A short, human-readable label of this process object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/doc",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": "rdfs:comment",
+                "doc": "A long, human-readable description of this process object."
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/run",
+                "type": [
+                    "string",
+                    "https://w3id.org/cwl/cwl#Process"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#run",
+                    "_type": "@id"
+                },
+                "doc": "Specifies the process to run.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/scatter",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#scatter",
+                    "_type": "@id",
+                    "_container": "@list",
+                    "refScope": 0
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#WorkflowStep/scatterMethod",
+                "doc": "Required if `scatter` is an array of more than one element.\n",
+                "type": [
+                    "null",
+                    "https://w3id.org/cwl/cwl#ScatterMethod"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/cwl#scatterMethod",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#Workflow",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#Process",
+        "documentRoot": true,
+        "specialize": [
+            {
+                "specializeFrom": "https://w3id.org/cwl/cwl#OutputParameter",
+                "specializeTo": "https://w3id.org/cwl/cwl#WorkflowOutputParameter"
+            }
+        ],
+        "doc": "A workflow describes a set of **steps** and the **dependencies** between\nthose steps.  When a step produces output that will be consumed by a\nsecond step, the first step is a dependency of the second step.\n\nWhen there is a dependency, the workflow engine must execute the preceeding\nstep and wait for it to successfully produce output before executing the\ndependent step.  If two steps are defined in the workflow graph that\nare not directly or indirectly dependent, th [...]
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#Workflow/class",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                },
+                "type": "string"
+            },
+            {
+                "name": "https://w3id.org/cwl/cwl#Workflow/steps",
+                "doc": "The individual steps that make up the workflow.  Each step is executed when all of its\ninput data links are fufilled.  An implementation may choose to execute\nthe steps in a different order than listed and/or execute steps\nconcurrently, provided that dependencies between steps are met.\n",
+                "type": [
+                    {
+                        "type": "array",
+                        "items": "https://w3id.org/cwl/cwl#WorkflowStep"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "mapSubject": "id"
+                }
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicates that the workflow platform must support nested workflows in\nthe `run` field of [WorkflowStep](#WorkflowStep).\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#SubworkflowFeatureRequirement/class",
+                "type": "string",
+                "doc": "Always 'SubworkflowFeatureRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#ScatterFeatureRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicates that the workflow platform must support the `scatter` and\n`scatterMethod` fields of [WorkflowStep](#WorkflowStep).\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#ScatterFeatureRequirement/class",
+                "type": "string",
+                "doc": "Always 'ScatterFeatureRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicates that the workflow platform must support multiple inbound data links\nlisted in the `source` field of [WorkflowStepInput](#WorkflowStepInput).\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#MultipleInputFeatureRequirement/class",
+                "type": "string",
+                "doc": "Always 'MultipleInputFeatureRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    },
+    {
+        "type": "record",
+        "name": "https://w3id.org/cwl/cwl#StepInputExpressionRequirement",
+        "extends": "https://w3id.org/cwl/cwl#ProcessRequirement",
+        "doc": "Indicate that the workflow platform must support the `valueFrom` field\nof [WorkflowStepInput](#WorkflowStepInput).\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/cwl#StepInputExpressionRequirement/class",
+                "type": "string",
+                "doc": "Always 'StepInputExpressionRequirement'",
+                "jsonldPredicate": {
+                    "_id": "@type",
+                    "_type": "@vocab"
+                }
+            }
+        ]
+    }
+]
diff --git a/schema_salad/tests/df b/schema_salad/tests/df
new file mode 100644
index 0000000..d96eeae
--- /dev/null
+++ b/schema_salad/tests/df
@@ -0,0 +1,5 @@
+...........
+----------------------------------------------------------------------
+Ran 11 tests in 0.593s
+
+OK
diff --git a/schema_salad/tests/df2 b/schema_salad/tests/df2
new file mode 100644
index 0000000..6bc3a14
--- /dev/null
+++ b/schema_salad/tests/df2
@@ -0,0 +1 @@
+say what
diff --git a/schema_salad/tests/docimp/d1.yml b/schema_salad/tests/docimp/d1.yml
new file mode 100644
index 0000000..92cb815
--- /dev/null
+++ b/schema_salad/tests/docimp/d1.yml
@@ -0,0 +1,7 @@
+$graph:
+- name: "Semantic_Annotations_for_Linked_Avro_Data"
+  type: documentation
+  doc:
+    - $include: d2.md
+    - $import: d3.yml
+    - $import: d4.yml
diff --git a/schema_salad/tests/docimp/d2.md b/schema_salad/tests/docimp/d2.md
new file mode 100644
index 0000000..2032c4a
--- /dev/null
+++ b/schema_salad/tests/docimp/d2.md
@@ -0,0 +1 @@
+*Hello*
\ No newline at end of file
diff --git a/schema_salad/tests/docimp/d3.yml b/schema_salad/tests/docimp/d3.yml
new file mode 100644
index 0000000..99211ec
--- /dev/null
+++ b/schema_salad/tests/docimp/d3.yml
@@ -0,0 +1,3 @@
+- "hello 2"
+- $include: d5.md
+- "hello 3"
diff --git a/schema_salad/tests/docimp/d4.yml b/schema_salad/tests/docimp/d4.yml
new file mode 100644
index 0000000..1587596
--- /dev/null
+++ b/schema_salad/tests/docimp/d4.yml
@@ -0,0 +1,3 @@
+- "hello 4"
+- $include: d5.md
+- "hello 5"
diff --git a/schema_salad/tests/docimp/d5.md b/schema_salad/tests/docimp/d5.md
new file mode 100644
index 0000000..a8eba23
--- /dev/null
+++ b/schema_salad/tests/docimp/d5.md
@@ -0,0 +1 @@
+*dee dee dee five*
\ No newline at end of file
diff --git a/schema_salad/tests/docimp/dpre.json b/schema_salad/tests/docimp/dpre.json
new file mode 100644
index 0000000..c23da58
--- /dev/null
+++ b/schema_salad/tests/docimp/dpre.json
@@ -0,0 +1,13 @@
+[
+    {
+        "name": "file:///home/peter/work/salad/schema_salad/tests/docimp/d1.yml#Semantic_Annotations_for_Linked_Avro_Data", 
+        "type": "documentation", 
+        "doc": [
+            "*Hello*", 
+            "hello 2", 
+            "hello 3", 
+            "hello 4", 
+            "hello 5"
+        ]
+    }
+]
diff --git a/schema_salad/tests/hello.txt b/schema_salad/tests/hello.txt
new file mode 100644
index 0000000..a042389
--- /dev/null
+++ b/schema_salad/tests/hello.txt
@@ -0,0 +1 @@
+hello world!
diff --git a/schema_salad/tests/hellofield.yml b/schema_salad/tests/hellofield.yml
new file mode 100644
index 0000000..11bc98f
--- /dev/null
+++ b/schema_salad/tests/hellofield.yml
@@ -0,0 +1,5 @@
+{
+"name": "hello",
+"doc": {"$include": "hello.txt"},
+"type": "string"
+}
\ No newline at end of file
diff --git a/schema_salad/tests/matcher.py b/schema_salad/tests/matcher.py
new file mode 100644
index 0000000..50c0c60
--- /dev/null
+++ b/schema_salad/tests/matcher.py
@@ -0,0 +1,32 @@
+# Copyright (C) The Arvados Authors. All rights reserved.
+#
+# SPDX-License-Identifier: Apache-2.0
+
+import difflib
+import json
+import re
+
+
+class JsonDiffMatcher(object):
+    """Raise AssertionError with a readable JSON diff when not __eq__().
+
+    Used with assert_called_with() so it's possible for a human to see
+    the differences between expected and actual call arguments that
+    include non-trivial data structures.
+    """
+    def __init__(self, expected):
+        self.expected = expected
+
+    def __eq__(self, actual):
+        expected_json = json.dumps(self.expected, sort_keys=True, indent=2)
+        actual_json = json.dumps(actual, sort_keys=True, indent=2)
+        if expected_json != actual_json:
+            raise AssertionError("".join(difflib.context_diff(
+                expected_json.splitlines(1),
+                actual_json.splitlines(1),
+                fromfile="Expected", tofile="Actual")))
+        return True
+
+
+def StripYAMLComments(yml):
+    return re.sub(r'(?ms)^(#.*?\n)*\n*', '', yml)
diff --git a/schema_salad/tests/metaschema-pre.yml b/schema_salad/tests/metaschema-pre.yml
new file mode 100644
index 0000000..5f6a3e9
--- /dev/null
+++ b/schema_salad/tests/metaschema-pre.yml
@@ -0,0 +1,628 @@
+[
+    {
+        "name": "https://w3id.org/cwl/salad#Semantic_Annotations_for_Linked_Avro_Data",
+        "type": "documentation",
+        "doc": [
+            "# Semantic Annotations for Linked Avro Data (SALAD)\n\nAuthor:\n\n* Peter Amstutz <peter.amstutz at curoverse.com>, Curoverse\n\nContributors:\n\n* The developers of Apache Avro\n* The developers of JSON-LD\n* Neboj\u0161a Tijani\u0107 <nebojsa.tijanic at sbgenomics.com>, Seven Bridges Genomics\n\n# Abstract\n\nSalad is a schema language for describing structured linked data documents\nin JSON or YAML documents.  A Salad schema provides rules for\npreprocessing, structural validat [...]
+            "## Field name resolution\n\nThe document schema declares the vocabulary of known field names.  During\npreprocessing traversal, field name in the document which are not part of\nthe schema vocabulary must be resolved to absolute URIs.  Under \"strict\"\nvalidation, it is an error for a document to include fields which are not\npart of the vocabulary and not resolvable to absolute URIs.  Fields names\nwhich are not part of the vocabulary are resolved using the following\nrule [...]
+            "{\n  \"$namespaces\": {\n    \"acid\": \"http://example.com/acid#\"\n  },\n  \"$graph\": [{\n    \"name\": \"ExampleType\",\n    \"type\": \"record\",\n    \"fields\": [{\n      \"name\": \"base\",\n      \"type\": \"string\",\n      \"jsonldPredicate\": \"http://example.com/base\"\n    }]\n  }]\n}\n",
+            "```\n\nProcess the following example:\n\n```\n",
+            "    {\n      \"base\": \"one\",\n      \"form\": {\n        \"http://example.com/base\": \"two\",\n        \"http://example.com/three\": \"three\",\n      },\n      \"acid:four\": \"four\"\n    }\n",
+            "```\n\nThis becomes:\n\n```\n",
+            "    {\n      \"base\": \"one\",\n      \"form\": {\n        \"base\": \"two\",\n        \"http://example.com/three\": \"three\",\n      },\n      \"http://example.com/acid#four\": \"four\"\n    }\n",
+            "```\n",
+            "## Identifier resolution\n\nThe schema may designate one or more fields as identifier fields to identify\nspecific objects.  Processing must resolve relative identifiers to absolute\nidentifiers using the following rules:\n\n  * If an identifier URI is prefixed with `#` it is a URI relative\n    fragment identifier.  It is resolved relative to the base URI by setting\n    or replacing the fragment portion of the base URI.\n\n  * If an identifier URI does not contain a scheme [...]
+            "{\n  \"$namespaces\": {\n    \"acid\": \"http://example.com/acid#\"\n  },\n  \"$graph\": [{\n    \"name\": \"ExampleType\",\n    \"type\": \"record\",\n    \"fields\": [{\n      \"name\": \"id\",\n      \"type\": \"string\",\n      \"jsonldPredicate\": \"@id\"\n    }]\n  }]\n}\n",
+            "```\n\nProcess the following example:\n\n```\n",
+            "    {\n      \"id\": \"http://example.com/base\",\n      \"form\": {\n        \"id\": \"one\",\n        \"things\": [\n          {\n            \"id\": \"two\"\n          },\n          {\n            \"id\": \"#three\",\n          },\n          {\n            \"id\": \"four#five\",\n          },\n          {\n            \"id\": \"acid:six\",\n          }\n        ]\n      }\n    }\n",
+            "```\n\nThis becomes:\n\n```\n",
+            "{\n  \"id\": \"http://example.com/base\",\n  \"form\": {\n    \"id\": \"http://example.com/base#one\",\n    \"things\": [\n      {\n        \"id\": \"http://example.com/base#one/two\"\n      },\n      {\n        \"id\": \"http://example.com/base#three\"\n      },\n      {\n        \"id\": \"http://example.com/four#five\",\n      },\n      {\n        \"id\": \"http://example.com/acid#six\",\n      }\n    ]\n  }\n}\n",
+            "```\n",
+            "## Link resolution\n\nThe schema may designate one or more fields as link fields reference other\nobjects.  Processing must resolve links to either absolute URIs using the\nfollowing rules:\n\n* If a reference URI is prefixed with `#` it is a relative\nfragment identifier.  It is resolved relative to the base URI by setting\nor replacing the fragment portion of the base URI.\n\n* If a reference URI does not contain a scheme and is not prefixed with `#`\nit is a path relative [...]
+            "{\n  \"$namespaces\": {\n    \"acid\": \"http://example.com/acid#\"\n  },\n  \"$graph\": [{\n    \"name\": \"ExampleType\",\n    \"type\": \"record\",\n    \"fields\": [{\n      \"name\": \"link\",\n      \"type\": \"string\",\n      \"jsonldPredicate\": {\n        \"_type\": \"@id\"\n      }\n    }]\n  }]\n}\n",
+            "```\n\nProcess the following example:\n\n```\n",
+            "{\n  \"$base\": \"http://example.com/base\",\n  \"link\": \"http://example.com/base/zero\",\n  \"form\": {\n    \"link\": \"one\",\n    \"things\": [\n      {\n        \"link\": \"two\"\n      },\n      {\n        \"link\": \"#three\",\n      },\n      {\n        \"link\": \"four#five\",\n      },\n      {\n        \"link\": \"acid:six\",\n      }\n    ]\n  }\n}\n",
+            "```\n\nThis becomes:\n\n```\n",
+            "{\n  \"$base\": \"http://example.com/base\",\n  \"link\": \"http://example.com/base/zero\",\n  \"form\": {\n    \"link\": \"http://example.com/one\",\n    \"things\": [\n      {\n        \"link\": \"http://example.com/two\"\n      },\n      {\n        \"link\": \"http://example.com/base#three\"\n      },\n      {\n        \"link\": \"http://example.com/four#five\",\n      },\n      {\n        \"link\": \"http://example.com/acid#six\",\n      }\n    ]\n  }\n}\n",
+            "```\n",
+            "## Vocabulary resolution\n\n  The schema may designate one or more vocabulary fields which use terms\n  defined in the vocabulary.  Processing must resolve vocabulary fields to\n  either vocabulary terms or absolute URIs by first applying the link\n  resolution rules defined above, then applying the following additional\n  rule:\n\n    * If a reference URI is a vocabulary field, and there is a vocabulary\n    term which maps to the resolved URI, the reference must be replace [...]
+            "{\n  \"$namespaces\": {\n    \"acid\": \"http://example.com/acid#\"\n  },\n  \"$graph\": [{\n    \"name\": \"Colors\",\n    \"type\": \"enum\",\n    \"symbols\": [\"acid:red\"]\n  },\n  {\n    \"name\": \"ExampleType\",\n    \"type\": \"record\",\n    \"fields\": [{\n      \"name\": \"voc\",\n      \"type\": \"string\",\n      \"jsonldPredicate\": {\n        \"_type\": \"@vocab\"\n      }\n    }]\n  }]\n}\n",
+            "```\n\nProcess the following example:\n\n```\n",
+            "    {\n      \"form\": {\n        \"things\": [\n          {\n            \"voc\": \"red\",\n          },\n          {\n            \"voc\": \"http://example.com/acid#red\",\n          },\n          {\n            \"voc\": \"http://example.com/acid#blue\",\n          }\n        ]\n      }\n    }\n",
+            "```\n\nThis becomes:\n\n```\n",
+            "    {\n      \"form\": {\n        \"things\": [\n          {\n            \"voc\": \"red\",\n          },\n          {\n            \"voc\": \"red\",\n          },\n          {\n            \"voc\": \"http://example.com/acid#blue\",\n          }\n        ]\n      }\n    }\n",
+            "```\n",
+            "## Import\n\nDuring preprocessing traversal, an implementation must resolve `$import`\ndirectives.  An `$import` directive is an object consisting of exactly one\nfield `$import` specifying resource by URI string.  It is an error if there\nare additional fields in the `$import` object, such additional fields must\nbe ignored.\n\nThe URI string must be resolved to an absolute URI using the link\nresolution rules described previously.  Implementations must support\nloading fro [...]
+            "## Identifier maps\n\nThe schema may designate certain fields as having a `mapSubject`.  If the\nvalue of the field is a JSON object, it must be transformed into an array of\nJSON objects.  Each key-value pair from the source JSON object is a list\nitem, each list item must be a JSON objects, and the value of the key is\nassigned to the field specified by `mapSubject`.\n\nFields which have `mapSubject` specified may also supply a `mapPredicate`.\nIf the value of a map item i [...]
+            "{\n  \"$graph\": [{\n    \"name\": \"MappedType\",\n    \"type\": \"record\",\n    \"documentRoot\": true,\n    \"fields\": [{\n      \"name\": \"mapped\",\n      \"type\": {\n        \"type\": \"array\",\n        \"items\": \"ExampleRecord\"\n      },\n      \"jsonldPredicate\": {\n        \"mapSubject\": \"key\",\n        \"mapPredicate\": \"value\"\n      }\n    }],\n  },\n  {\n    \"name\": \"ExampleRecord\",\n    \"type\": \"record\",\n    \"fields\": [{\n      \"name\" [...]
+            "```\n\nProcess the following example:\n\n```\n",
+            "{\n  \"mapped\": {\n    \"shaggy\": {\n      \"value\": \"scooby\"\n    },\n    \"fred\": \"daphne\"\n  }\n}",
+            "```\n\nThis becomes:\n\n```\n",
+            "{\n    \"mapped\": [\n        {\n            \"value\": \"daphne\",\n            \"key\": \"fred\"\n        },\n        {\n            \"value\": \"scooby\",\n            \"key\": \"shaggy\"\n        }\n    ]\n}",
+            "```\n",
+            "## Domain Specific Language for types\n\nFields may be tagged `typeDSL: true`.  If so, the field is expanded using the\nfollowing micro-DSL for schema salad types:\n\n* If the type ends with a question mark `?` it is expanded to a union with `null`\n* If the type ends with square brackets `[]` it is expanded to an array with items of the preceeding type symbol\n* The type may end with both `[]?` to indicate it is an optional array.\n* Identifier resolution is applied after t [...]
+            "{\n  \"$graph\": [\n  {\"$import\": \"metaschema_base.yml\"},\n  {\n    \"name\": \"TypeDSLExample\",\n    \"type\": \"record\",\n    \"documentRoot\": true,\n    \"fields\": [{\n      \"name\": \"extype\",\n      \"type\": \"string\",\n      \"jsonldPredicate\": {\n        _type: \"@vocab\",\n        \"typeDSL\": true\n      }\n    }]\n  }]\n}\n",
+            "```\n\nProcess the following example:\n\n```\n",
+            "[{\n  \"extype\": \"string\"\n}, {\n  \"extype\": \"string?\"\n}, {\n  \"extype\": \"string[]\"\n}, {\n  \"extype\": \"string[]?\"\n}]\n",
+            "```\n\nThis becomes:\n\n```\n",
+            "[\n    {\n        \"extype\": \"string\"\n    }, \n    {\n        \"extype\": [\n            \"null\", \n            \"string\"\n        ]\n    }, \n    {\n        \"extype\": {\n            \"type\": \"array\", \n            \"items\": \"string\"\n        }\n    }, \n    {\n        \"extype\": [\n            \"null\", \n            {\n                \"type\": \"array\", \n                \"items\": \"string\"\n            }\n        ]\n    }\n]\n",
+            "```\n"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Link_Validation",
+        "type": "documentation",
+        "doc": "# Link validation\n\nOnce a document has been preprocessed, an implementation may validate\nlinks.  The link validation traversal may visit fields which the schema\ndesignates as link fields and check that each URI references an existing\nobject in the current document, an imported document, file system, or\nnetwork resource.  Failure to validate links may be a fatal error.  Link\nvalidation behavior for individual fields may be modified by `identity` and\n`noLinkCheck` i [...]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Schema_validation",
+        "type": "documentation",
+        "doc": ""
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Schema",
+        "type": "documentation",
+        "doc": "# Schema\n"
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#PrimitiveType",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/salad#null",
+            "http://www.w3.org/2001/XMLSchema#boolean",
+            "http://www.w3.org/2001/XMLSchema#int",
+            "http://www.w3.org/2001/XMLSchema#long",
+            "http://www.w3.org/2001/XMLSchema#float",
+            "http://www.w3.org/2001/XMLSchema#double",
+            "http://www.w3.org/2001/XMLSchema#string"
+        ],
+        "doc": [
+            "Salad data types are based on Avro schema declarations.  Refer to the\n[Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for\ndetailed information.\n",
+            "null: no value",
+            "boolean: a binary value",
+            "int: 32-bit signed integer",
+            "long: 64-bit signed integer",
+            "float: single precision (32-bit) IEEE 754 floating-point number",
+            "double: double precision (64-bit) IEEE 754 floating-point number",
+            "string: Unicode character sequence"
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Any",
+        "type": "enum",
+        "symbols": [
+            "https://w3id.org/cwl/salad#Any"
+        ],
+        "docAfter": "https://w3id.org/cwl/salad#PrimitiveType",
+        "doc": "The **Any** type validates for any non-null value.\n"
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#RecordField",
+        "type": "record",
+        "doc": "A field of a record.",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/name",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The name of the field\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/doc",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "A documentation string for this field\n",
+                "jsonldPredicate": "rdfs:comment"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#RecordField/type",
+                "type": [
+                    "PrimitiveType",
+                    "RecordSchema",
+                    "EnumSchema",
+                    "ArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "PrimitiveType",
+                            "RecordSchema",
+                            "EnumSchema",
+                            "ArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "doc": "The field type\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#RecordSchema",
+        "type": "record",
+        "fields": [
+            {
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "RecordField"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#fields",
+                    "mapSubject": "name",
+                    "mapPredicate": "type"
+                },
+                "doc": "Defines the fields of the record.",
+                "name": "https://w3id.org/cwl/salad#RecordSchema/fields"
+            },
+            {
+                "doc": "Must be `record`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#record"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#RecordSchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#EnumSchema",
+        "type": "record",
+        "doc": "Define an enumerated type.\n",
+        "fields": [
+            {
+                "type": {
+                    "type": "array",
+                    "items": "string"
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#symbols",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "Defines the set of valid symbols.",
+                "name": "https://w3id.org/cwl/salad#EnumSchema/symbols"
+            },
+            {
+                "doc": "Must be `enum`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#enum"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#EnumSchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#ArraySchema",
+        "type": "record",
+        "fields": [
+            {
+                "type": [
+                    "PrimitiveType",
+                    "RecordSchema",
+                    "EnumSchema",
+                    "ArraySchema",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": [
+                            "PrimitiveType",
+                            "RecordSchema",
+                            "EnumSchema",
+                            "ArraySchema",
+                            "string"
+                        ]
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#items",
+                    "_type": "@vocab",
+                    "refScope": 2
+                },
+                "doc": "Defines the type of the array elements.",
+                "name": "https://w3id.org/cwl/salad#ArraySchema/items"
+            },
+            {
+                "doc": "Must be `array`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#array"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                },
+                "name": "https://w3id.org/cwl/salad#ArraySchema/type"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#JsonldPredicate",
+        "type": "record",
+        "doc": "Attached to a record field to define how the parent record field is handled for\nURI resolution and JSON-LD context generation.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/_id",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#_id",
+                    "_type": "@id",
+                    "identity": true
+                },
+                "doc": "The predicate URI that this field corresponds to.\nCorresponds to JSON-LD `@id` directive.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/_type",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "The context type hint, corresponds to JSON-LD `@type` directive.\n\n* If the value of this field is `@id` and `identity` is false or\nunspecified, the parent field must be resolved using the link\nresolution rules.  If `identity` is true, the parent field must be\nresolved using the identifier expansion rules.\n\n* If the value of this field is `@vocab`, the parent field must be\n  resolved using the vocabulary resolution rules.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/_container",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Structure hint, corresponds to JSON-LD `@container` directive.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/identity",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true and `_type` is `@id` this indicates that the parent field must\nbe resolved according to identity resolution rules instead of link\nresolution rules.  In addition, the field value is considered an\nassertion that the linked value exists; absence of an object in the loaded document\nwith the URI is not an error.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/noLinkCheck",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true, this indicates that link validation traversal must stop at\nthis field.  This field (it is is a URI) or any fields under it (if it\nis an object or array) are not subject to link checking.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/mapSubject",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "If the value of the field is a JSON object, it must be transformed\ninto an array of JSON objects, where each key-value pair from the\nsource JSON object is a list item, the list items must be JSON objects,\nand the key is assigned to the field specified by `mapSubject`.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/mapPredicate",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Only applies if `mapSubject` is also provided.  If the value of the\nfield is a JSON object, it is transformed as described in `mapSubject`,\nwith the addition that when the value of a map item is not an object,\nthe item is transformed to a JSON object with the key assigned to the\nfield specified by `mapSubject` and the value assigned to the field\nspecified by `mapPredicate`.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/refScope",
+                "type": [
+                    "null",
+                    "int"
+                ],
+                "doc": "If the field contains a relative reference, it must be resolved by\nsearching for valid document references in each successive parent scope\nin the document fragment.  For example, a reference of `foo` in the\ncontext `#foo/bar/baz` will first check for the existence of\n`#foo/bar/baz/foo`, followed by `#foo/bar/foo`, then `#foo/foo` and\nthen finally `#foo`.  The first valid URI in the search order shall be\nused as the fully resolved value of the identifier.  Th [...]
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#JsonldPredicate/typeDSL",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "Field must be expanded based on the the Schema Salad type DSL.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#SpecializeDef",
+        "type": "record",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#SpecializeDef/specializeFrom",
+                "type": "string",
+                "doc": "The data type to be replaced",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#specializeFrom",
+                    "_type": "@id",
+                    "refScope": 1
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#SpecializeDef/specializeTo",
+                "type": "string",
+                "doc": "The new data type to replace with",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#specializeTo",
+                    "_type": "@id",
+                    "refScope": 1
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#NamedType",
+        "type": "record",
+        "abstract": true,
+        "docParent": "https://w3id.org/cwl/salad#Schema",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#NamedType/name",
+                "type": "string",
+                "jsonldPredicate": "@id",
+                "doc": "The identifier for this type"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#NamedType/inVocab",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "By default or if \"true\", include the short name of this type in the\nvocabulary (the keys of the JSON-LD context).  If false, do not include\nthe short name in the vocabulary.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#DocType",
+        "type": "record",
+        "abstract": true,
+        "docParent": "https://w3id.org/cwl/salad#Schema",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#DocType/doc",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "A documentation string for this type, or an array of strings which should be concatenated.",
+                "jsonldPredicate": "rdfs:comment"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#DocType/docParent",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Hint to indicate that during documentation generation, documentation\nfor this type should appear in a subsection under `docParent`.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#docParent",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#DocType/docChild",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "doc": "Hint to indicate that during documentation generation, documentation\nfor `docChild` should appear in a subsection under this type.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#docChild",
+                    "_type": "@id"
+                }
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#DocType/docAfter",
+                "type": [
+                    "null",
+                    "string"
+                ],
+                "doc": "Hint to indicate that during documentation generation, documentation\nfor this type should appear after the `docAfter` section at the same\nlevel.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#docAfter",
+                    "_type": "@id"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#SchemaDefinedType",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/salad#DocType",
+        "doc": "Abstract base for schema-defined types.\n",
+        "abstract": true,
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#SchemaDefinedType/jsonldPredicate",
+                "type": [
+                    "null",
+                    "string",
+                    "JsonldPredicate"
+                ],
+                "doc": "Annotate this type with linked data context.\n",
+                "jsonldPredicate": "sld:jsonldPredicate"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#SchemaDefinedType/documentRoot",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true, indicates that the type is a valid at the document root.  At\nleast one type in a schema must be tagged with `documentRoot: true`.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#SaladRecordField",
+        "type": "record",
+        "extends": "https://w3id.org/cwl/salad#RecordField",
+        "doc": "A field of a record.",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#SaladRecordField/jsonldPredicate",
+                "type": [
+                    "null",
+                    "string",
+                    "JsonldPredicate"
+                ],
+                "doc": "Annotate this type with linked data context.\n",
+                "jsonldPredicate": "sld:jsonldPredicate"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#SaladRecordSchema",
+        "docParent": "https://w3id.org/cwl/salad#Schema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#NamedType",
+            "https://w3id.org/cwl/salad#RecordSchema",
+            "https://w3id.org/cwl/salad#SchemaDefinedType"
+        ],
+        "documentRoot": true,
+        "specialize": [
+            {
+                "specializeTo": "https://w3id.org/cwl/salad#SaladRecordField",
+                "specializeFrom": "https://w3id.org/cwl/salad#RecordField"
+            }
+        ],
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#SaladRecordSchema/abstract",
+                "type": [
+                    "null",
+                    "boolean"
+                ],
+                "doc": "If true, this record is abstract and may be used as a base for other\nrecords, but is not valid on its own.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#SaladRecordSchema/extends",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#extends",
+                    "_type": "@id",
+                    "refScope": 1
+                },
+                "doc": "Indicates that this record inherits fields from one or more base records.\n"
+            },
+            {
+                "name": "https://w3id.org/cwl/salad#SaladRecordSchema/specialize",
+                "type": [
+                    "null",
+                    {
+                        "type": "array",
+                        "items": "SpecializeDef"
+                    }
+                ],
+                "doc": "Only applies if `extends` is declared.  Apply type specialization using the\nbase record as a template.  For each field inherited from the base\nrecord, replace any instance of the type `specializeFrom` with\n`specializeTo`.\n",
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#specialize",
+                    "mapSubject": "specializeFrom",
+                    "mapPredicate": "specializeTo"
+                }
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#SaladEnumSchema",
+        "docParent": "https://w3id.org/cwl/salad#Schema",
+        "type": "record",
+        "extends": [
+            "https://w3id.org/cwl/salad#NamedType",
+            "https://w3id.org/cwl/salad#EnumSchema",
+            "https://w3id.org/cwl/salad#SchemaDefinedType"
+        ],
+        "documentRoot": true,
+        "doc": "Define an enumerated type.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#SaladEnumSchema/extends",
+                "type": [
+                    "null",
+                    "string",
+                    {
+                        "type": "array",
+                        "items": "string"
+                    }
+                ],
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#extends",
+                    "_type": "@id",
+                    "refScope": 1
+                },
+                "doc": "Indicates that this enum inherits symbols from a base enum.\n"
+            }
+        ]
+    },
+    {
+        "name": "https://w3id.org/cwl/salad#Documentation",
+        "type": "record",
+        "docParent": "https://w3id.org/cwl/salad#Schema",
+        "extends": [
+            "https://w3id.org/cwl/salad#NamedType",
+            "https://w3id.org/cwl/salad#DocType"
+        ],
+        "documentRoot": true,
+        "doc": "A documentation section.  This type exists to facilitate self-documenting\nschemas but has no role in formal validation.\n",
+        "fields": [
+            {
+                "name": "https://w3id.org/cwl/salad#Documentation/type",
+                "doc": "Must be `documentation`",
+                "type": {
+                    "type": "enum",
+                    "symbols": [
+                        "https://w3id.org/cwl/salad#documentation"
+                    ]
+                },
+                "jsonldPredicate": {
+                    "_id": "https://w3id.org/cwl/salad#type",
+                    "_type": "@vocab",
+                    "typeDSL": true,
+                    "refScope": 2
+                }
+            }
+        ]
+    }
+]
diff --git a/schema_salad/tests/pt.yml b/schema_salad/tests/pt.yml
new file mode 100644
index 0000000..5f2377f
--- /dev/null
+++ b/schema_salad/tests/pt.yml
@@ -0,0 +1,28 @@
+$namespaces:
+  sld:  "https://w3id.org/cwl/salad#"
+  dct:  "http://purl.org/dc/terms/"
+  rdf:  "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+  rdfs: "http://www.w3.org/2000/01/rdf-schema#"
+  xsd:  "http://www.w3.org/2001/XMLSchema#"
+name: PrimitiveType
+type: enum
+symbols:
+  - "sld:null"
+  - "xsd:boolean"
+  - "xsd:int"
+  - "xsd:long"
+  - "xsd:float"
+  - "xsd:double"
+  - "xsd:string"
+doc:
+  - |
+    Salad data types are based on Avro schema declarations.  Refer to the
+    [Avro schema declaration documentation](https://avro.apache.org/docs/current/spec.html#schemas) for
+    detailed information.
+  - "null: no value"
+  - "boolean: a binary value"
+  - "int: 32-bit signed integer"
+  - "long: 64-bit signed integer"
+  - "float: single precision (32-bit) IEEE 754 floating-point number"
+  - "double: double precision (64-bit) IEEE 754 floating-point number"
+  - "string: Unicode character sequence"
diff --git a/schema_salad/tests/test_cg.py b/schema_salad/tests/test_cg.py
new file mode 100644
index 0000000..f353c16
--- /dev/null
+++ b/schema_salad/tests/test_cg.py
@@ -0,0 +1,177 @@
+import schema_salad.metaschema as cg_metaschema
+import unittest
+import logging
+import os
+import json
+from schema_salad.ref_resolver import file_uri
+
+from .matcher import JsonDiffMatcher
+from .util import get_data
+
+
+class TestGeneratedMetaschema(unittest.TestCase):
+    def test_load(self):
+        doc = {
+            "type": "record",
+            "fields": [{
+                "name": "hello",
+                "doc": "Hello test case",
+                "type": "string"
+            }]
+        }
+        rs = cg_metaschema.RecordSchema(doc, "http://example.com/", cg_metaschema.LoadingOptions())
+        self.assertEqual("record", rs.type)
+        self.assertEqual("http://example.com/#hello", rs.fields[0].name)
+        self.assertEqual("Hello test case", rs.fields[0].doc)
+        self.assertEqual("string", rs.fields[0].type)
+        self.assertEqual({
+            "type": "record",
+            "fields": [{
+                "name": "http://example.com/#hello",
+                "doc": "Hello test case",
+                "type": "string"
+            }]
+        }, rs.save())
+
+    def test_err(self):
+        doc = {
+            "doc": "Hello test case",
+            "type": "string"
+        }
+        with self.assertRaises(cg_metaschema.ValidationException):
+            rf = cg_metaschema.RecordField(doc, "", cg_metaschema.LoadingOptions())
+
+    def test_include(self):
+        doc = {
+            "name": "hello",
+            "doc": [{"$include": "hello.txt"}],
+            "type": "documentation"
+        }
+        rf = cg_metaschema.Documentation(doc, "http://example.com/",
+                                         cg_metaschema.LoadingOptions(fileuri=file_uri(get_data("tests/_"))))
+        self.assertEqual("http://example.com/#hello", rf.name)
+        self.assertEqual(["hello world!\n"], rf.doc)
+        self.assertEqual("documentation", rf.type)
+        self.assertEqual({
+            "name": "http://example.com/#hello",
+            "doc": ["hello world!\n"],
+            "type": "documentation"
+        }, rf.save())
+
+    def test_import(self):
+        doc = {
+            "type": "record",
+            "fields": [{
+                "$import": "hellofield.yml"
+            }]
+        }
+        lead = file_uri(os.path.normpath(get_data("tests")))
+        rs = cg_metaschema.RecordSchema(doc, "http://example.com/", cg_metaschema.LoadingOptions(fileuri=lead+"/_"))
+        self.assertEqual("record", rs.type)
+        self.assertEqual(lead+"/hellofield.yml#hello", rs.fields[0].name)
+        self.assertEqual("hello world!\n", rs.fields[0].doc)
+        self.assertEqual("string", rs.fields[0].type)
+        self.assertEqual({
+            "type": "record",
+            "fields": [{
+                "name": lead+"/hellofield.yml#hello",
+                "doc": "hello world!\n",
+                "type": "string"
+            }]
+        }, rs.save())
+
+
+    maxDiff = None
+
+    def test_import2(self):
+        rs = cg_metaschema.load_document(file_uri(get_data("tests/docimp/d1.yml")), "", cg_metaschema.LoadingOptions())
+        self.assertEqual([{'doc': [u'*Hello*', 'hello 2', u'*dee dee dee five*',
+                                   'hello 3', 'hello 4', u'*dee dee dee five*',
+                                   'hello 5'],
+                           'type': 'documentation',
+                           'name': file_uri(get_data("tests/docimp/d1.yml"))+"#Semantic_Annotations_for_Linked_Avro_Data"}],
+              [r.save() for r in rs])
+
+    def test_err2(self):
+        doc = {
+            "type": "rucord",
+            "fields": [{
+                "name": "hello",
+                "doc": "Hello test case",
+                "type": "string"
+            }]
+        }
+        with self.assertRaises(cg_metaschema.ValidationException):
+            rs = cg_metaschema.RecordSchema(doc, "", cg_metaschema.LoadingOptions())
+
+    def test_idmap(self):
+        doc = {
+            "type": "record",
+            "fields": {
+                "hello": {
+                    "doc": "Hello test case",
+                    "type": "string"
+                }
+            }
+        }
+        rs = cg_metaschema.RecordSchema(doc, "http://example.com/", cg_metaschema.LoadingOptions())
+        self.assertEqual("record", rs.type)
+        self.assertEqual("http://example.com/#hello", rs.fields[0].name)
+        self.assertEqual("Hello test case", rs.fields[0].doc)
+        self.assertEqual("string", rs.fields[0].type)
+        self.assertEqual({
+            "type": "record",
+            "fields": [{
+                "name": "http://example.com/#hello",
+                "doc": "Hello test case",
+                "type": "string"
+            }]
+        }, rs.save())
+
+    def test_idmap2(self):
+        doc = {
+            "type": "record",
+            "fields": {
+                "hello": "string"
+            }
+        }
+        rs = cg_metaschema.RecordSchema(doc, "http://example.com/", cg_metaschema.LoadingOptions())
+        self.assertEqual("record", rs.type)
+        self.assertEqual("http://example.com/#hello", rs.fields[0].name)
+        self.assertEqual(None, rs.fields[0].doc)
+        self.assertEqual("string", rs.fields[0].type)
+        self.assertEqual({
+            "type": "record",
+            "fields": [{
+                "name": "http://example.com/#hello",
+                "type": "string"
+            }]
+        }, rs.save())
+
+    def test_load_pt(self):
+        doc = cg_metaschema.load_document(file_uri(get_data("tests/pt.yml")), "", cg_metaschema.LoadingOptions())
+        self.assertEqual(['https://w3id.org/cwl/salad#null',
+                          'http://www.w3.org/2001/XMLSchema#boolean',
+                          'http://www.w3.org/2001/XMLSchema#int',
+                          'http://www.w3.org/2001/XMLSchema#long',
+                          'http://www.w3.org/2001/XMLSchema#float',
+                          'http://www.w3.org/2001/XMLSchema#double',
+                          'http://www.w3.org/2001/XMLSchema#string'], doc.symbols)
+
+    def test_load_metaschema(self):
+        doc = cg_metaschema.load_document(file_uri(get_data("metaschema/metaschema.yml")), "", cg_metaschema.LoadingOptions())
+        with open(get_data("tests/metaschema-pre.yml")) as f:
+            pre = json.load(f)
+        saved = [d.save() for d in doc]
+        self.assertEqual(saved, JsonDiffMatcher(pre))
+
+    def test_load_cwlschema(self):
+        doc = cg_metaschema.load_document(file_uri(get_data("tests/test_schema/CommonWorkflowLanguage.yml")), "", cg_metaschema.LoadingOptions())
+        with open(get_data("tests/cwl-pre.yml")) as f:
+            pre = json.load(f)
+        saved = [d.save() for d in doc]
+        self.assertEqual(saved, JsonDiffMatcher(pre))
+
+
+if __name__ == '__main__':
+    unittest.main()
diff --git a/schema_salad/tests/test_errors.py b/schema_salad/tests/test_errors.py
index 590984a..4578ba1 100644
--- a/schema_salad/tests/test_errors.py
+++ b/schema_salad/tests/test_errors.py
@@ -25,7 +25,8 @@ class TestErrors(unittest.TestCase):
                   "test_schema/test11.cwl",
                   "test_schema/test12.cwl",
                   "test_schema/test13.cwl",
-                  "test_schema/test14.cwl"):
+                  "test_schema/test14.cwl",
+                  "test_schema/test15.cwl"):
             with self.assertRaises(ValidationException):
                 try:
                     load_and_validate(document_loader, avsc_names,
diff --git a/schema_salad/tests/test_examples.py b/schema_salad/tests/test_examples.py
index 4ffdd0d..3ffc66c 100644
--- a/schema_salad/tests/test_examples.py
+++ b/schema_salad/tests/test_examples.py
@@ -10,7 +10,7 @@ import rdflib
 import ruamel.yaml
 import json
 import os
-from schema_salad.sourceline import cmap
+from schema_salad.sourceline import cmap, SourceLine
 
 try:
     from ruamel.yaml import CSafeLoader as SafeLoader
@@ -22,16 +22,16 @@ from ruamel.yaml.comments import CommentedSeq, CommentedMap
 
 class TestSchemas(unittest.TestCase):
     def test_schemas(self):
-        l = schema_salad.ref_resolver.Loader({})
+        loader = schema_salad.ref_resolver.Loader({})
 
-        ra, _ = l.resolve_all(cmap({
-            u"$schemas": ["file://" + get_data("tests/EDAM.owl")],
+        ra, _ = loader.resolve_all(cmap({
+            u"$schemas": [schema_salad.ref_resolver.file_uri(get_data("tests/EDAM.owl"))],
             u"$namespaces": {u"edam": u"http://edamontology.org/"},
             u"edam:has_format": u"edam:format_1915"
         }), "")
 
         self.assertEqual({
-            u"$schemas": ["file://" + get_data("tests/EDAM.owl")],
+            u"$schemas": [schema_salad.ref_resolver.file_uri(get_data("tests/EDAM.owl"))],
             u"$namespaces": {u"edam": u"http://edamontology.org/"},
             u'http://edamontology.org/has_format': u'http://edamontology.org/format_1915'
         }, ra)
@@ -332,7 +332,7 @@ class TestSchemas(unittest.TestCase):
         print(g.serialize(format="n3"))
 
     def test_mixin(self):
-        base_url = "file://" + os.getcwd() + "/tests/"
+        base_url = schema_salad.ref_resolver.file_uri(os.path.join(os.getcwd(), "tests"))
         ldr = schema_salad.ref_resolver.Loader({})
         ra = ldr.resolve_ref(cmap({"$mixin": get_data("tests/mixin.yml"), "one": "five"}),
                              base_url=base_url)
@@ -367,11 +367,38 @@ class TestSchemas(unittest.TestCase):
     def test_file_uri(self):
         # Note: this test probably won't pass on Windows.  Someone with a
         # windows box should add an alternate test.
-        self.assertEqual("file:///foo/bar%20baz/quux", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux"))
-        self.assertEqual("/foo/bar baz/quux", schema_salad.ref_resolver.uri_file_path("file:///foo/bar%20baz/quux"))
-        self.assertEqual("file:///foo/bar%20baz/quux%23zing%20zong", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux#zing zong"))
-        self.assertEqual("file:///foo/bar%20baz/quux#zing%20zong", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux#zing zong", split_frag=True))
-        self.assertEqual("/foo/bar baz/quux#zing zong", schema_salad.ref_resolver.uri_file_path("file:///foo/bar%20baz/quux#zing%20zong"))
+        self.assertEquals("file:///foo/bar%20baz/quux", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux"))
+        self.assertEquals(os.path.normpath("/foo/bar baz/quux"),
+                          schema_salad.ref_resolver.uri_file_path("file:///foo/bar%20baz/quux"))
+        self.assertEquals("file:///foo/bar%20baz/quux%23zing%20zong", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux#zing zong"))
+        self.assertEquals("file:///foo/bar%20baz/quux#zing%20zong", schema_salad.ref_resolver.file_uri("/foo/bar baz/quux#zing zong", split_frag=True))
+        self.assertEquals(os.path.normpath("/foo/bar baz/quux#zing zong"),
+                          schema_salad.ref_resolver.uri_file_path("file:///foo/bar%20baz/quux#zing%20zong"))
+
+
+class SourceLineTest(unittest.TestCase):
+    def test_sourceline(self):
+        ldr = schema_salad.ref_resolver.Loader({"id": "@id"})
+        b, _ = ldr.resolve_ref(get_data("tests/frag.yml"))
+
+        class TestExp(Exception):
+            pass
+
+        try:
+            with SourceLine(b, 1, TestExp, False):
+                raise Exception("Whoops")
+        except TestExp as e:
+            self.assertTrue(str(e).endswith("frag.yml:3:3: Whoops"))
+        except Exception:
+            self.assertFail()
+
+        try:
+            with SourceLine(b, 1, TestExp, True):
+                raise Exception("Whoops")
+        except TestExp as e:
+            self.assertTrue(str(e).splitlines()[0].endswith("frag.yml:3:3: Traceback (most recent call last):"))
+        except Exception:
+            self.assertFail()
 
 
 if __name__ == '__main__':
diff --git a/schema_salad/tests/test_fetch.py b/schema_salad/tests/test_fetch.py
index 6f36951..7396e37 100644
--- a/schema_salad/tests/test_fetch.py
+++ b/schema_salad/tests/test_fetch.py
@@ -53,7 +53,8 @@ class TestFetcher(unittest.TestCase):
 
     def test_cache(self):
         loader = schema_salad.ref_resolver.Loader({})
-        foo = "file://%s/foo.txt" % os.getcwd()
+        foo = os.path.join(os.getcwd(), "foo.txt")
+        foo = schema_salad.ref_resolver.file_uri(foo)
         loader.cache.update({foo: "hello: foo"})
         print(loader.cache)
         self.assertEqual({"hello": "foo"}, loader.resolve_ref("foo.txt")[0])
diff --git a/schema_salad/tests/test_print_oneline.py b/schema_salad/tests/test_print_oneline.py
new file mode 100644
index 0000000..9e442ea
--- /dev/null
+++ b/schema_salad/tests/test_print_oneline.py
@@ -0,0 +1,120 @@
+from .util import get_data
+import unittest
+from schema_salad.main import to_one_line_messages, reformat_yaml_exception_message
+from schema_salad.schema import load_schema, load_and_validate
+from schema_salad.sourceline import strip_dup_lineno
+from schema_salad.validate import ValidationException
+from os.path import normpath
+import re
+import six
+
+class TestPrintOneline(unittest.TestCase):
+    def test_print_oneline(self):
+        # Issue #135
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test15.cwl"
+        with self.assertRaises(ValidationException):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(get_data("tests/test_schema/"+src)), True)
+            except ValidationException as e:
+                msgs = to_one_line_messages(str(e)).splitlines()
+                self.assertEqual(len(msgs), 2)
+                m = re.match(r'^(.+:\d+:\d+:)(.+)$', msgs[0])
+                self.assertTrue(msgs[0].endswith(src+":11:7: invalid field `invalid_field`, expected one of: 'loadContents', 'position', 'prefix', 'separate', 'itemSeparator', 'valueFrom', 'shellQuote'"))
+                self.assertTrue(msgs[1].endswith(src+":12:7: invalid field `another_invalid_field`, expected one of: 'loadContents', 'position', 'prefix', 'separate', 'itemSeparator', 'valueFrom', 'shellQuote'"))
+                print("\n", e)
+                raise
+
+    def test_print_oneline_for_invalid_yaml(self):
+        # Issue #137
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test16.cwl"
+        with self.assertRaises(RuntimeError):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(get_data("tests/test_schema/"+src)), True)
+            except RuntimeError as e:
+                msg = reformat_yaml_exception_message(strip_dup_lineno(six.text_type(e)))
+                msg = to_one_line_messages(msg)
+                self.assertTrue(msg.endswith(src+":10:1: could not find expected \':\'"))
+                print("\n", e)
+                raise
+
+    def test_print_oneline_for_errors_in_the_same_line(self):
+        # Issue #136
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test17.cwl"
+        with self.assertRaises(ValidationException):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(get_data("tests/test_schema/"+src)), True)
+            except ValidationException as e:
+                msgs = to_one_line_messages(str(e)).splitlines()
+                self.assertEqual(len(msgs), 2)
+                self.assertTrue(msgs[0].endswith(src+":13:5: missing required field `id`"))
+                self.assertTrue(msgs[1].endswith(src+":13:5: invalid field `aa`, expected one of: 'label', 'secondaryFiles', 'format', 'streamable', 'doc', 'id', 'outputBinding', 'type'"))
+                print("\n", e)
+                raise
+
+    def test_print_oneline_for_errors_in_resolve_ref(self):
+        # Issue #141
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test18.cwl"
+        fullpath = normpath(get_data("tests/test_schema/"+src))
+        with self.assertRaises(ValidationException):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(fullpath), True)
+            except ValidationException as e:
+                msgs = to_one_line_messages(str(strip_dup_lineno(six.text_type(e)))).splitlines()
+                # convert Windows path to Posix path
+                if '\\' in fullpath:
+                    fullpath = '/'+fullpath.replace('\\', '/')
+                self.assertEqual(len(msgs), 1)
+                self.assertTrue(msgs[0].endswith(src+':13:5: Field `type` references unknown identifier `Filea`, tried file://%s#Filea' % (fullpath)))
+                print("\n", e)
+                raise
+
+    def test_for_invalid_yaml1(self):
+        # Issue 143
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test16.cwl"
+        with self.assertRaises(RuntimeError):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(get_data("tests/test_schema/"+src)), True)
+            except RuntimeError as e:
+                msg = reformat_yaml_exception_message(strip_dup_lineno(six.text_type(e)))
+                msgs = msg.splitlines()
+                self.assertEqual(len(msgs), 2)
+                self.assertTrue(msgs[0].endswith(src+":9:7: while scanning a simple key"))
+                self.assertTrue(msgs[1].endswith(src+":10:1:   could not find expected ':'"))
+                print("\n", e)
+                raise
+
+    def test_for_invalid_yaml2(self):
+        # Issue 143
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(
+            get_data(u"tests/test_schema/CommonWorkflowLanguage.yml"))
+
+        src = "test19.cwl"
+        with self.assertRaises(RuntimeError):
+            try:
+                load_and_validate(document_loader, avsc_names,
+                                  six.text_type(get_data("tests/test_schema/"+src)), True)
+            except RuntimeError as e:
+                msg = reformat_yaml_exception_message(strip_dup_lineno(six.text_type(e)))
+                self.assertTrue(msg.endswith(src+":1:1: expected <block end>, but found ':'"))
+                print("\n", e)
+                raise
diff --git a/schema_salad/tests/test_ref_resolver.py b/schema_salad/tests/test_ref_resolver.py
index fdb5e61..d1d5074 100644
--- a/schema_salad/tests/test_ref_resolver.py
+++ b/schema_salad/tests/test_ref_resolver.py
@@ -53,3 +53,111 @@ def test_Loader_initialisation_with_neither_TMP_HOME_set(tmp_dir_fixture):
 
     loader = Loader(ctx={})
     assert isinstance(loader.session, Session)
+
+def test_DefaultFetcher_urljoin_win32(tmp_dir_fixture):
+    import os
+    import sys
+    from schema_salad.ref_resolver import DefaultFetcher
+    from requests import Session
+
+    # Ensure HOME is set.
+    os.environ["HOME"] = tmp_dir_fixture
+
+    actual_platform = sys.platform
+    try:
+        # For this test always pretend we're on Windows
+        sys.platform = "win32"
+        fetcher = DefaultFetcher({}, None)
+        # Relative path, same folder
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "soup.cwl")
+        assert url == "file:///C:/Users/fred/soup.cwl"
+        # Relative path, sub folder
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "foo/soup.cwl")
+        assert url == "file:///C:/Users/fred/foo/soup.cwl"
+        # relative climb-up path
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "../alice/soup.cwl")
+        assert url == "file:///C:/Users/alice/soup.cwl"
+
+        # Path with drive: should not be treated as relative to directory
+        # Note: \ would already have been converted to / by resolve_ref()
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "c:/bar/soup.cwl")
+        assert url == "file:///c:/bar/soup.cwl"
+        # /C:/  (regular URI absolute path)
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "/c:/bar/soup.cwl")
+        assert url == "file:///c:/bar/soup.cwl"
+        # Relative, change drive
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "D:/baz/soup.cwl")
+        assert url == "file:///d:/baz/soup.cwl"
+        # Relative from root of base's D: drive
+        url = fetcher.urljoin("file:///d:/baz/soup.cwl", "/foo/soup.cwl")
+        assert url == "file:///d:/foo/soup.cwl"
+
+        # resolving absolute non-drive URIs still works
+        url = fetcher.urljoin("file:///C:/Users/fred/foo.cwl", "http://example.com/bar/soup.cwl")
+        assert url == "http://example.com/bar/soup.cwl"
+        # and of course relative paths from http://
+        url = fetcher.urljoin("http://example.com/fred/foo.cwl", "soup.cwl")
+        assert url == "http://example.com/fred/soup.cwl"
+
+        # Stay on http:// and same host
+        url = fetcher.urljoin("http://example.com/fred/foo.cwl", "/bar/soup.cwl")
+        assert url == "http://example.com/bar/soup.cwl"
+
+
+        # Security concern - can't resolve file: from http:
+        with pytest.raises(ValueError):
+            url = fetcher.urljoin("http://example.com/fred/foo.cwl", "file:///c:/bar/soup.cwl")
+        # Drive-relative -- should NOT return "absolute" URI c:/bar/soup.cwl"
+        # as that is a potential remote exploit
+        with pytest.raises(ValueError):
+            url = fetcher.urljoin("http://example.com/fred/foo.cwl", "c:/bar/soup.cwl")
+
+    finally:
+        sys.platform = actual_platform
+
+def test_DefaultFetcher_urljoin_linux(tmp_dir_fixture):
+    import os
+    import sys
+    from schema_salad.ref_resolver import DefaultFetcher
+    from requests import Session
+
+    # Ensure HOME is set.
+    os.environ["HOME"] = tmp_dir_fixture
+
+    actual_platform = sys.platform
+    try:
+        # Pretend it's Linux (e.g. not win32)
+        sys.platform = "linux2"
+        fetcher = DefaultFetcher({}, None)
+        url = fetcher.urljoin("file:///home/fred/foo.cwl", "soup.cwl")
+        assert url == "file:///home/fred/soup.cwl"
+
+        url = fetcher.urljoin("file:///home/fred/foo.cwl", "../alice/soup.cwl")
+        assert url == "file:///home/alice/soup.cwl"
+        # relative from root
+        url = fetcher.urljoin("file:///home/fred/foo.cwl", "/baz/soup.cwl")
+        assert url == "file:///baz/soup.cwl"
+
+        url = fetcher.urljoin("file:///home/fred/foo.cwl", "http://example.com/bar/soup.cwl")
+        assert url == "http://example.com/bar/soup.cwl"
+
+        url = fetcher.urljoin("http://example.com/fred/foo.cwl", "soup.cwl")
+        assert url == "http://example.com/fred/soup.cwl"
+
+        # Root-relative -- here relative to http host, not file:///
+        url = fetcher.urljoin("http://example.com/fred/foo.cwl", "/bar/soup.cwl")
+        assert url == "http://example.com/bar/soup.cwl"
+
+        # Security concern - can't resolve file: from http:
+        with pytest.raises(ValueError):
+            url = fetcher.urljoin("http://example.com/fred/foo.cwl", "file:///bar/soup.cwl")
+
+        # But this one is not "dangerous" on Linux
+        fetcher.urljoin("http://example.com/fred/foo.cwl", "c:/bar/soup.cwl")
+
+    finally:
+        sys.platform = actual_platform
+
+
+def test_link_checking(tmp_dir_fixture):
+    pass
diff --git a/schema_salad/tests/test_schema/CommandLineTool.yml b/schema_salad/tests/test_schema/CommandLineTool.yml
index 181c51c..6a5de98 100644
--- a/schema_salad/tests/test_schema/CommandLineTool.yml
+++ b/schema_salad/tests/test_schema/CommandLineTool.yml
@@ -2,6 +2,7 @@ $base: "https://w3id.org/cwl/cwl#"
 
 $namespaces:
   cwl: "https://w3id.org/cwl/cwl#"
+  sld: "https://w3id.org/cwl/salad#"
 
 $graph:
 
diff --git a/schema_salad/tests/test_schema/Workflow.yml b/schema_salad/tests/test_schema/Workflow.yml
index 26bde8e..6c2b062 100644
--- a/schema_salad/tests/test_schema/Workflow.yml
+++ b/schema_salad/tests/test_schema/Workflow.yml
@@ -1,6 +1,7 @@
 $base: "https://w3id.org/cwl/cwl#"
 
 $namespaces:
+  sld: "https://w3id.org/cwl/salad#"
   cwl: "https://w3id.org/cwl/cwl#"
 
 $graph:
diff --git a/schema_salad/tests/test_schema/test15.cwl b/schema_salad/tests/test_schema/test15.cwl
new file mode 100755
index 0000000..0fc7f7e
--- /dev/null
+++ b/schema_salad/tests/test_schema/test15.cwl
@@ -0,0 +1,13 @@
+#!/usr/bin/env cwl-runner
+
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+inputs:
+  message:
+    type: string
+    inputBinding:
+      position: 1
+      invalid_field: it_is_invalid_field
+      another_invalid_field: invalid
+outputs: []
diff --git a/schema_salad/tests/test_schema/test16.cwl b/schema_salad/tests/test_schema/test16.cwl
new file mode 100644
index 0000000..6bcfcc1
--- /dev/null
+++ b/schema_salad/tests/test_schema/test16.cwl
@@ -0,0 +1,15 @@
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+inputs:
+  message:
+    type: string
+    inputBinding:
+      position: 1
+      posi
+outputs:
+  hello_output:
+    type: File
+    outputBinding:
+      glob: hello-out.txt
+stdout: hello-out.txt
diff --git a/schema_salad/tests/test_schema/test17.cwl b/schema_salad/tests/test_schema/test17.cwl
new file mode 100644
index 0000000..51cbbf1
--- /dev/null
+++ b/schema_salad/tests/test_schema/test17.cwl
@@ -0,0 +1,13 @@
+class: CommandLineTool
+cwlVersion: v1.0
+baseCommand: cowsay
+inputs:
+  - id: input
+    type: string?
+    inputBinding:
+      position: 0
+outputs:
+  - id: output
+    type: string?
+    outputBinding: {}
+  - aa: moa
diff --git a/schema_salad/tests/test_schema/test18.cwl b/schema_salad/tests/test_schema/test18.cwl
new file mode 100644
index 0000000..e849a3d
--- /dev/null
+++ b/schema_salad/tests/test_schema/test18.cwl
@@ -0,0 +1,13 @@
+class: CommandLineTool
+cwlVersion: v1.0
+baseCommand: echo
+inputs:
+  - id: input
+    type: string?
+    inputBinding: {}
+outputs:
+  - id: output
+    type: string?
+    outputBinding: {}
+  - id: output1
+    type: Filea
diff --git a/schema_salad/tests/test_schema/test19.cwl b/schema_salad/tests/test_schema/test19.cwl
new file mode 100644
index 0000000..a78d287
--- /dev/null
+++ b/schema_salad/tests/test_schema/test19.cwl
@@ -0,0 +1,15 @@
+: aaa
+cwlVersion: v1.0
+class: CommandLineTool
+baseCommand: echo
+inputs:
+  message:
+    type: string
+    inputBinding:
+      position: 1
+outputs:
+  hello_output:
+    type: File
+    outputBinding:
+      glob: hello-out.txt
+stdout: hello-out.txt
diff --git a/schema_salad/tests/test_validate.pyx b/schema_salad/tests/test_validate.pyx
new file mode 100644
index 0000000..b37127a
--- /dev/null
+++ b/schema_salad/tests/test_validate.pyx
@@ -0,0 +1,71 @@
+import unittest
+import json
+from schema_salad.schema import load_schema
+from schema_salad.validate import validate_ex
+from schema_salad.sourceline import cmap
+
+class TestValidate(unittest.TestCase):
+    schema = cmap({"name": "_", "$graph":[{
+        "name": "File",
+        "type": "record",
+        "fields": [{
+            "name": "class",
+            "type": {
+                "type": "enum",
+                "name": "File_class",
+                "symbols": ["#_/File"]
+            },
+            "jsonldPredicate": {
+                "_id": "@type",
+                "_type": "@vocab"
+            }
+        }, {
+            "name": "location",
+            "type": "string",
+            "jsonldPredicate": "_:location"
+        }]
+    }, {
+        "name": "Directory",
+        "type": "record",
+        "fields": [{
+            "name": "class",
+            "type": {
+                "type": "enum",
+                "name": "Directory_class",
+                "symbols": ["#_/Directory"]
+            },
+            "jsonldPredicate": {
+                "_id": "@type",
+                "_type": "@vocab"
+            }
+        }, {
+            "name": "location",
+            "type": "string",
+            "jsonldPredicate": "_:location"
+        }, {
+            "name": "listing",
+            "type": {
+                "type": "array",
+                "items": ["File", "Directory"]
+            }
+        }],
+    }]})
+
+    def test_validate_big(self):
+        document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(self.schema)
+
+        with open("biglisting.yml") as f:
+            biglisting = json.load(f)
+
+        self.assertEquals(True, validate_ex(avsc_names.get_name("Directory", ""), biglisting,
+                                            strict=True, raise_ex=False))
+
+
+    # def test_validate_small(self):
+    #     document_loader, avsc_names, schema_metadata, metaschema_loader = load_schema(self.schema)
+
+    #     with open("smalllisting.yml") as f:
+    #         smalllisting = json.load(f)
+
+    #     validate_ex(avsc_names.get_name("Directory", ""), smalllisting,
+    #                 strict=True, raise_ex=True)
diff --git a/schema_salad/tests/util.py b/schema_salad/tests/util.py
index 0f71ece..110daa5 100644
--- a/schema_salad/tests/util.py
+++ b/schema_salad/tests/util.py
@@ -4,6 +4,7 @@ from typing import Optional, Text
 import os
 
 def get_data(filename):  # type: (Text) -> Optional[Text]
+    filename = os.path.normpath(filename)  # normalizing path depending on OS or else it will cause problem when joining path
     filepath = None
     try:
         filepath = resource_filename(
diff --git a/schema_salad/utils.py b/schema_salad/utils.py
index 2ba98dc..438bab5 100644
--- a/schema_salad/utils.py
+++ b/schema_salad/utils.py
@@ -1,5 +1,5 @@
 from __future__ import absolute_import
-
+import os
 from typing import Any, Dict, List
 
 
@@ -27,15 +27,20 @@ def flatten(l, ltypes=(list, tuple)):
         return [l]
 
     ltype = type(l)
-    l = list(l)
+    lst = list(l)
     i = 0
-    while i < len(l):
-        while isinstance(l[i], ltypes):
-            if not l[i]:
-                l.pop(i)
+    while i < len(lst):
+        while isinstance(lst[i], ltypes):
+            if not lst[i]:
+                lst.pop(i)
                 i -= 1
                 break
             else:
-                l[i:i + 1] = l[i]
+                lst[i:i + 1] = lst[i]
         i += 1
-    return ltype(l)
+    return ltype(lst)
+
+# Check if we are on windows OS
+def onWindows():
+    # type: () -> (bool)
+    return os.name == 'nt'
diff --git a/schema_salad/validate.py b/schema_salad/validate.py
index 2fab415..7f344ff 100644
--- a/schema_salad/validate.py
+++ b/schema_salad/validate.py
@@ -213,7 +213,8 @@ def validate_ex(expected_schema,                  # type: Schema
                 continue
             elif isinstance(datum, dict) and not isinstance(s, avro.schema.RecordSchema):
                 continue
-            elif isinstance(datum, (bool, six.integer_types, float, six.string_types)) and isinstance(s, (avro.schema.ArraySchema, avro.schema.RecordSchema)):
+            elif (isinstance(datum, (bool, six.integer_types, float, six.string_types)) and  # type: ignore
+                  isinstance(s, (avro.schema.ArraySchema, avro.schema.RecordSchema))):
                 continue
             elif datum is not None and s.type == "null":
                 continue
diff --git a/setup.cfg b/setup.cfg
index bab3a64..7d9974a 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -11,6 +11,6 @@ test = pytest
 addopts = --pyarg schema_salad
 
 [egg_info]
-tag_build = .20170630075932
+tag_build = .20171201034858
 tag_date = 0
 
diff --git a/setup.py b/setup.py
index 80192c1..a66595e 100755
--- a/setup.py
+++ b/setup.py
@@ -2,10 +2,8 @@
 
 import os
 import sys
-import shutil
 
 import setuptools.command.egg_info as egg_info_cmd
-
 from setuptools import setup, find_packages
 
 SETUP_DIR = os.path.dirname(__file__)
@@ -40,8 +38,9 @@ install_requires = [
     'six >= 1.8.0']
 
 extras_require={
-    ':python_version<"3"': ['avro'],
-    ':python_version>="3"': ['avro-python3']}
+    ':python_version<"3"': ['avro == 1.8.1'],
+    ':python_version>="3"': ['future', 'avro-cwl == 1.8.4']  # fork of avro for working with python3
+}
 
 setup(name='schema-salad',
       version='2.6',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-schema-salad.git



More information about the debian-med-commit mailing list