[med-svn] [python-rdflib-jsonld] 01/01: Imported Upstream version 0.4.0
Michael Crusoe
misterc-guest at moszumanska.debian.org
Thu Nov 10 11:27:01 UTC 2016
This is an automated email from the git hooks/post-receive script.
misterc-guest pushed a commit to annotated tag upstream/0.4.0
in repository python-rdflib-jsonld.
commit 2612df50c4532613da90f3c3963b9aeb5181cd47
Author: Michael R. Crusoe <crusoe at ucdavis.edu>
Date: Tue May 24 04:05:53 2016 -0700
Imported Upstream version 0.4.0
---
LICENSE.md | 35 +++
PKG-INFO | 2 +-
README.md | 78 +++---
docs/jsonld-parser.rst | 8 +-
docs/jsonld-serializer.rst | 8 +-
rdflib_jsonld.egg-info/PKG-INFO | 2 +-
rdflib_jsonld.egg-info/SOURCES.txt | 11 +-
rdflib_jsonld.egg-info/entry_points.txt | 2 +
rdflib_jsonld.egg-info/requires.txt | 2 +-
rdflib_jsonld/__init__.py | 2 +-
rdflib_jsonld/context.py | 100 +++++---
rdflib_jsonld/errors.py | 6 +
rdflib_jsonld/parser.py | 428 ++++++++++++++++----------------
rdflib_jsonld/util.py | 18 +-
setup.py | 4 +-
test/.DS_Store | Bin 6148 -> 0 bytes
test/__init__.pyc | Bin 597 -> 0 bytes
test/earl-context.jsonld | 24 --
test/test-suite/.DS_Store | Bin 6148 -> 0 bytes
test/test_api.pyc | Bin 1434 -> 0 bytes
test/test_compaction.py | 62 ++++-
test/test_compaction.pyc | Bin 1718 -> 0 bytes
test/test_context.py | 169 +++++++++++++
test/test_context.rst | 116 ---------
test/test_testsuite.py | 3 +
test/test_testsuite.pyc | Bin 11331 -> 0 bytes
26 files changed, 632 insertions(+), 448 deletions(-)
diff --git a/LICENSE.md b/LICENSE.md
new file mode 100644
index 0000000..565704b
--- /dev/null
+++ b/LICENSE.md
@@ -0,0 +1,35 @@
+LICENSE AGREEMENT FOR RDFLIB-JSONLD
+========================================================================
+
+Copyright (c) 2012-2015, RDFLib Team
+All rights reserved.
+
+See http://github.com/RDFLib/rdflib-jsonld
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/PKG-INFO b/PKG-INFO
index 604c956..870ea1a 100644
--- a/PKG-INFO
+++ b/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: rdflib-jsonld
-Version: 0.3
+Version: 0.4.0
Summary: rdflib extension adding JSON-LD parser and serializer
Home-page: https://github.com/RDFLib/rdflib-jsonld
Author: RDFLib Team
diff --git a/README.md b/README.md
index 570f6dc..328daaa 100644
--- a/README.md
+++ b/README.md
@@ -9,44 +9,58 @@ This implementation will:
* serialize an RDF graph to JSON-LD formatted output
+Installation
+------------
+
+The easiest way to install the RDFLib JSON-LD plugin is directly from PyPi using pip by running the command below:
+
+ pip install rdflib-jsonld
+
+Otherwise you can download the source and install it directly by running:
+
+ python setup.py install
+
+
Using the plug-in JSONLD serializer/parser with RDFLib
------------------------------------------------------
The plugin parser and serializer are automatically registered if installed by
setuptools.
-
- >>> from rdflib import Graph, plugin
- >>> from rdflib.serializer import Serializer
-
- >>> testrdf = '''
- ... @prefix dc: <http://purl.org/dc/terms/> .
- ... <http://example.org/about>
- ... dc:title "Someone's Homepage"@en .
- ... '''
-
- >>> g = Graph().parse(data=testrdf, format='n3')
-
- >>> print(g.serialize(format='json-ld', indent=4))
- {
- "@id": "http://example.org/about",
- "http://purl.org/dc/terms/title": [
- {
- "@language": "en",
- "@value": "Someone's Homepage"
- }
- ]
- }
-
- >>> context = {"@vocab": "http://purl.org/dc/terms/", "@language": "en"}
- >>> print(g.serialize(format='json-ld', context=context, indent=4))
- {
- "@context": {
+
+```python
+>>> from rdflib import Graph, plugin
+>>> from rdflib.serializer import Serializer
+
+>>> testrdf = '''
+... @prefix dc: <http://purl.org/dc/terms/> .
+... <http://example.org/about>
+... dc:title "Someone's Homepage"@en .
+... '''
+
+>>> g = Graph().parse(data=testrdf, format='n3')
+
+>>> print(g.serialize(format='json-ld', indent=4))
+{
+ "@id": "http://example.org/about",
+ "http://purl.org/dc/terms/title": [
+ {
"@language": "en",
- "@vocab": "http://purl.org/dc/terms/"
- },
- "@id": "http://example.org/about",
- "title": "Someone's Homepage"
- }
+ "@value": "Someone's Homepage"
+ }
+ ]
+}
+
+>>> context = {"@vocab": "http://purl.org/dc/terms/", "@language": "en"}
+>>> print(g.serialize(format='json-ld', context=context, indent=4))
+{
+ "@context": {
+ "@language": "en",
+ "@vocab": "http://purl.org/dc/terms/"
+ },
+ "@id": "http://example.org/about",
+ "title": "Someone's Homepage"
+}
+```
Building the Sphinx documentation
diff --git a/docs/jsonld-parser.rst b/docs/jsonld-parser.rst
index d7e1384..1c9445c 100644
--- a/docs/jsonld-parser.rst
+++ b/docs/jsonld-parser.rst
@@ -8,8 +8,7 @@ Using the plug-in JSONLD parser with RDFLib
---------------------------------------------
The plugin serializer is automatically registered if installed by
-setuptools, otherwise call ``rdfextras.registerplugins()`` after
-importing, as shown below.
+setuptools.
Identify a source of JSON-LD, pass the source to the parser,
manipulate the resulting graph.
@@ -17,9 +16,6 @@ manipulate the resulting graph.
.. code-block:: python
>>> from rdflib import Graph, URIRef, Literal
- >>> from rdflib.parser import Parser
- >>> import rdfextras
- >>> rdfextras.registerplugins() # if no setuptools
>>> test_json = '''
... {
... "@context": {
@@ -30,7 +26,7 @@ manipulate the resulting graph.
... "@id": "http://example.org/about",
... "dc:title": {
... "@language": "en",
- ... "@literal": "Someone's Homepage"
+ ... "@value": "Someone's Homepage"
... }
... }
... '''
diff --git a/docs/jsonld-serializer.rst b/docs/jsonld-serializer.rst
index 441a360..d35ce5f 100644
--- a/docs/jsonld-serializer.rst
+++ b/docs/jsonld-serializer.rst
@@ -8,17 +8,13 @@ Using the plug-in JSONLD serializer with RDFLib
------------------------------------------------
The plugin serializer is automatically registered if installed by
-setuptools, otherwise call ``rdfextras.registerplugins()`` after
-importing, as shown below.
+setuptools.
Read in an RDFLib Graph and serialize it, specifying ``format='json-ld'``.
.. code-block:: python
>>> from rdflib import Graph, plugin
- >>> from rdflib.serializer import Serializer
- >>> import rdfextras
- >>> rdfextras.registerplugins()
>>> testrdf = '''
... @prefix dc: <http://purl.org/dc/terms/> .
@@ -38,7 +34,7 @@ Read in an RDFLib Graph and serialize it, specifying ``format='json-ld'``.
"@id": "http://example.org/about",
"dc:title": {
"@language": "en",
- "@literal": "Someone's Homepage"
+ "@value": "Someone's Homepage"
}
}
diff --git a/rdflib_jsonld.egg-info/PKG-INFO b/rdflib_jsonld.egg-info/PKG-INFO
index 604c956..870ea1a 100644
--- a/rdflib_jsonld.egg-info/PKG-INFO
+++ b/rdflib_jsonld.egg-info/PKG-INFO
@@ -1,6 +1,6 @@
Metadata-Version: 1.1
Name: rdflib-jsonld
-Version: 0.3
+Version: 0.4.0
Summary: rdflib extension adding JSON-LD parser and serializer
Home-page: https://github.com/RDFLib/rdflib-jsonld
Author: RDFLib Team
diff --git a/rdflib_jsonld.egg-info/SOURCES.txt b/rdflib_jsonld.egg-info/SOURCES.txt
index c06601f..c475c73 100644
--- a/rdflib_jsonld.egg-info/SOURCES.txt
+++ b/rdflib_jsonld.egg-info/SOURCES.txt
@@ -1,3 +1,4 @@
+LICENSE.md
MANIFEST.in
README.md
setup.cfg
@@ -10,6 +11,7 @@ docs/jsonld-serializer.rst
docs/make.bat
rdflib_jsonld/__init__.py
rdflib_jsonld/context.py
+rdflib_jsonld/errors.py
rdflib_jsonld/keys.py
rdflib_jsonld/parser.py
rdflib_jsonld/serializer.py
@@ -20,19 +22,12 @@ rdflib_jsonld.egg-info/dependency_links.txt
rdflib_jsonld.egg-info/entry_points.txt
rdflib_jsonld.egg-info/requires.txt
rdflib_jsonld.egg-info/top_level.txt
-test/.DS_Store
test/README.md
test/__init__.py
-test/__init__.pyc
-test/earl-context.jsonld
test/test_api.py
-test/test_api.pyc
test/test_compaction.py
-test/test_compaction.pyc
-test/test_context.rst
+test/test_context.py
test/test_testsuite.py
-test/test_testsuite.pyc
-test/test-suite/.DS_Store
test/test-suite/README
test/test-suite/context.jsonld
test/test-suite/manifest.jsonld
diff --git a/rdflib_jsonld.egg-info/entry_points.txt b/rdflib_jsonld.egg-info/entry_points.txt
index 230ae31..056591f 100644
--- a/rdflib_jsonld.egg-info/entry_points.txt
+++ b/rdflib_jsonld.egg-info/entry_points.txt
@@ -1,6 +1,8 @@
[rdf.plugins.parser]
+application/ld+json = rdflib_jsonld.parser:JsonLDParser
json-ld = rdflib_jsonld.parser:JsonLDParser
[rdf.plugins.serializer]
+application/ld+json = rdflib_jsonld.serializer:JsonLDSerializer
json-ld = rdflib_jsonld.serializer:JsonLDSerializer
diff --git a/rdflib_jsonld.egg-info/requires.txt b/rdflib_jsonld.egg-info/requires.txt
index 4776c71..98496bc 100644
--- a/rdflib_jsonld.egg-info/requires.txt
+++ b/rdflib_jsonld.egg-info/requires.txt
@@ -1 +1 @@
-rdflib>=4.2
\ No newline at end of file
+rdflib>=4.2
diff --git a/rdflib_jsonld/__init__.py b/rdflib_jsonld/__init__.py
index 1a8f722..8fa62fa 100644
--- a/rdflib_jsonld/__init__.py
+++ b/rdflib_jsonld/__init__.py
@@ -1,3 +1,3 @@
"""
"""
-__version__ = "0.3"
+__version__ = "0.4.0"
diff --git a/rdflib_jsonld/context.py b/rdflib_jsonld/context.py
index 80b0ae1..9c086d8 100644
--- a/rdflib_jsonld/context.py
+++ b/rdflib_jsonld/context.py
@@ -5,11 +5,13 @@ Implementation of the JSON-LD Context structure. See:
http://json-ld.org/
"""
+from collections import namedtuple
from rdflib.namespace import RDF
-from .util import source_to_json, urljoin, split_iri, norm_url
from .keys import (BASE, CONTAINER, CONTEXT, GRAPH, ID, INDEX, LANG, LIST,
REV, SET, TYPE, VALUE, VOCAB)
+from . import errors
+from .util import source_to_json, urljoin, urlsplit, split_iri, norm_url
NODE_KEYS = set([LANG, ID, TYPE, VALUE, LIST, SET, REV, GRAPH])
@@ -33,25 +35,19 @@ class Context(object):
if source:
self.load(source)
- def load(self, source, base=None):
- self.active = True
- inputs = not isinstance(source, list) and [source] or source
- sources = []
- for source in inputs:
- if isinstance(source, basestring):
- url = urljoin(base, source)
- #if url in visited_urls: continue
- #visited_urls.append(url)
- source = source_to_json(url)
- if isinstance(source, dict):
- if CONTEXT in source:
- source = source[CONTEXT]
- if isinstance(source, list):
- sources.extend(source)
- else:
- sources.append(source)
- for source in sources:
- self._read_source(source)
+ @property
+ def base(self):
+ return self._base
+
+ @base.setter
+ def base(self, base):
+ if base:
+ hash_index = base.find('#')
+ if hash_index > -1:
+ base = base[0:hash_index]
+ self._base = self.resolve_iri(base) if (
+ hasattr(self, '_base') and base is not None) else base
+ self._basedomain = '%s://%s' % urlsplit(base)[0:2] if base else None
def subcontext(self, source):
# IMPROVE: to optimize, implement SubContext with parent fallback support
@@ -136,12 +132,15 @@ class Context(object):
def resolve(self, curie_or_iri):
iri = self.expand(curie_or_iri, False)
- if iri.startswith('_:'):
+ if self.isblank(iri):
return iri
return self.resolve_iri(iri)
def resolve_iri(self, iri):
- return norm_url(self.base, iri)
+ return norm_url(self._base, iri)
+
+ def isblank(self, ref):
+ return ref.startswith('_:')
def expand(self, term_curie_or_iri, use_vocab=True):
if use_vocab:
@@ -166,6 +165,11 @@ class Context(object):
pfx = self._prefixes.get(ns)
if pfx:
return u":".join((pfx, name))
+ elif self._base:
+ if unicode(iri) == self._base:
+ return ""
+ elif iri.startswith(self._basedomain):
+ return iri[len(self._basedomain):]
return iri
def to_symbol(self, iri):
@@ -181,7 +185,39 @@ class Context(object):
return u":".join((pfx, name))
return iri
- def _read_source(self, source):
+ def load(self, source, base=None):
+ self.active = True
+ sources = []
+ source = source if isinstance(source, list) else [source]
+ self._prep_sources(base, source, sources)
+ for source_url, source in sources:
+ self._read_source(source, source_url)
+
+ def _prep_sources(self, base, inputs, sources, referenced_contexts=None,
+ in_source_url=None):
+ referenced_contexts = referenced_contexts or set()
+ for source in inputs:
+ if isinstance(source, basestring):
+ source_url = urljoin(base, source)
+ if source_url in referenced_contexts:
+ raise errors.RECURSIVE_CONTEXT_INCLUSION
+ referenced_contexts.add(source_url)
+ source = source_to_json(source_url)
+ if CONTEXT not in source:
+ raise errors.INVALID_REMOTE_CONTEXT
+ else:
+ source_url = in_source_url
+
+ if isinstance(source, dict):
+ if CONTEXT in source:
+ source = source[CONTEXT]
+ source = source if isinstance(source, list) else [source]
+ if isinstance(source, list):
+ self._prep_sources(base, source, sources, referenced_contexts, source_url)
+ else:
+ sources.append((source_url, source))
+
+ def _read_source(self, source, source_url=None):
self.vocab = source.get(VOCAB, self.vocab)
for key, value in source.items():
if key == LANG:
@@ -189,10 +225,8 @@ class Context(object):
elif key == VOCAB:
continue
elif key == BASE:
- # TODO: only base to None if source is embedded
- #if value is None and remote:
- # self.base = self.doc_base
- #else:
+ if source_url:
+ continue
self.base = value
else:
self._read_term(source, key, value)
@@ -258,12 +292,6 @@ class Context(object):
return term
-class Term(object):
- def __init__(self, idref, name, coercion=UNDEF, container=UNDEF,
- language=UNDEF, reverse=False):
- self.name = name
- self.id = idref
- self.type = coercion
- self.container = container
- self.language = language
- self.reverse = reverse
+Term = namedtuple('Term',
+ 'id, name, type, container, language, reverse')
+Term.__new__.__defaults__ = (UNDEF, UNDEF, UNDEF, False)
diff --git a/rdflib_jsonld/errors.py b/rdflib_jsonld/errors.py
new file mode 100644
index 0000000..5f275ed
--- /dev/null
+++ b/rdflib_jsonld/errors.py
@@ -0,0 +1,6 @@
+class JSONLDException(ValueError):
+ pass
+
+# http://www.w3.org/TR/json-ld-api/#idl-def-JsonLdErrorCode.{code-message}
+RECURSIVE_CONTEXT_INCLUSION = JSONLDException("recursive context inclusion")
+INVALID_REMOTE_CONTEXT = JSONLDException("invalid remote context")
diff --git a/rdflib_jsonld/parser.py b/rdflib_jsonld/parser.py
index 89fb739..960c473 100644
--- a/rdflib_jsonld/parser.py
+++ b/rdflib_jsonld/parser.py
@@ -49,6 +49,8 @@ __all__ = ['JsonLDParser', 'to_rdf']
TYPE_TERM = Term(unicode(RDF.type), TYPE, VOCAB)
+ALLOW_LISTS_OF_LISTS = True # NOTE: Not allowed in JSON-LD 1.0
+
class JsonLDParser(Parser):
def __init__(self):
@@ -74,243 +76,253 @@ class JsonLDParser(Parser):
to_rdf(data, conj_sink, base, context_data)
-generalized_rdf = False
-def to_rdf(data, graph, base=None, context_data=None, produce_generalized_rdf=False):
+def to_rdf(data, graph, base=None, context_data=None,
+ produce_generalized_rdf=False,
+ allow_lists_of_lists=None):
# TODO: docstring w. args and return value
- global generalized_rdf # FIXME: not thread-safe and error-prone
- generalized_rdf = produce_generalized_rdf
- context = Context(base=base)
-
+ context=Context(base=base)
if context_data:
context.load(context_data)
+ parser = Parser(generalized_rdf=produce_generalized_rdf,
+ allow_lists_of_lists=allow_lists_of_lists)
+ return parser.parse(data, context, graph)
- topcontext = False
- if isinstance(data, list):
- resources = data
- elif isinstance(data, dict):
- l_ctx = data.get(CONTEXT)
- if l_ctx:
- context.load(l_ctx, base)
- topcontext = True
- resources = data
- if not isinstance(resources, list):
- resources = [resources]
+class Parser(object):
- if context.vocab:
- graph.bind(None, context.vocab)
- for name, term in context.terms.items():
- if term.id and term.id.endswith(VOCAB_DELIMS):
- graph.bind(name, term.id)
+ def __init__(self, generalized_rdf=False, allow_lists_of_lists=None):
+ self.generalized_rdf = generalized_rdf
+ self.allow_lists_of_lists = (allow_lists_of_lists
+ if allow_lists_of_lists is not None else ALLOW_LISTS_OF_LISTS)
- for node in resources:
- _add_to_graph(graph, graph, context, node, topcontext)
+ def parse(self, data, context, graph):
+ topcontext = False
- return graph
+ if isinstance(data, list):
+ resources = data
+ elif isinstance(data, dict):
+ l_ctx = data.get(CONTEXT)
+ if l_ctx:
+ context.load(l_ctx, context.base)
+ topcontext = True
+ resources = data
+ if not isinstance(resources, list):
+ resources = [resources]
+ if context.vocab:
+ graph.bind(None, context.vocab)
+ for name, term in context.terms.items():
+ if term.id and term.id.endswith(VOCAB_DELIMS):
+ graph.bind(name, term.id)
-def _add_to_graph(dataset, graph, context, node, topcontext=False):
- if not isinstance(node, dict) or context.get_value(node):
- return
+ for node in resources:
+ self._add_to_graph(graph, graph, context, node, topcontext)
- if CONTEXT in node and not topcontext:
- l_ctx = node.get(CONTEXT)
- if l_ctx:
- context = context.subcontext(l_ctx)
- else:
- context = Context(base=context.doc_base)
-
- id_val = context.get_id(node)
- if isinstance(id_val, basestring):
- subj = _to_rdf_id(context, id_val)
- else:
- subj = BNode()
-
- if subj is None:
- return None
-
- for key, obj in node.items():
- if key in (CONTEXT, ID, context.get_key(ID)):
- continue
- if key in (REV, context.get_key(REV)):
- for rkey, robj in obj.items():
- _key_to_graph(dataset, graph, context, subj, rkey, robj, True)
- else:
- _key_to_graph(dataset, graph, context, subj, key, obj)
-
- return subj
-
-
-def _key_to_graph(dataset, graph, context, subj, key, obj, reverse=False):
-
- if isinstance(obj, list):
- obj_nodes = obj
- else:
- obj_nodes = [obj]
-
- term = context.terms.get(key)
- if term:
- term_id = term.id
- if term.container == LIST:
- obj_nodes = [{LIST: obj_nodes}]
- elif isinstance(obj, dict):
- if term.container == INDEX:
- obj_nodes = []
- for values in obj.values():
- if not isinstance(values, list):
- obj_nodes.append(values)
- else:
- obj_nodes += values
- elif term.container == LANG:
- obj_nodes = []
- for lang, values in obj.items():
- if not isinstance(values, list):
- values = [values]
- for v in values:
- obj_nodes.append((v, lang))
- else:
- term_id = None
-
- if TYPE in (key, term_id):
- term = TYPE_TERM
- elif GRAPH in (key, term_id):
- #assert graph.context_aware
- subgraph = dataset.get_context(subj)
- for onode in obj_nodes:
- _add_to_graph(dataset, subgraph, context, onode)
- return
- elif SET in (key, term_id):
- for onode in obj_nodes:
- _add_to_graph(dataset, graph, context, onode)
- return
-
- pred_uri = term.id if term else context.expand(key)
-
- flattened = []
- for obj in obj_nodes:
- if isinstance(obj, dict):
- objs = context.get_set(obj)
- if objs is not None:
- obj = objs
- if isinstance(obj, list):
- flattened += obj
- continue
- flattened.append(obj)
- obj_nodes = flattened
+ return graph
- if not pred_uri:
- return
- if term and term.reverse:
- reverse = not reverse
-
- bid = _get_bnodeid(pred_uri)
- if bid:
- if not generalized_rdf:
+ def _add_to_graph(self, dataset, graph, context, node, topcontext=False):
+ if not isinstance(node, dict) or context.get_value(node):
return
- pred = BNode(bid)
- else:
- pred = URIRef(pred_uri)
- for obj_node in obj_nodes:
- obj = _to_object(dataset, graph, context, term, obj_node)
- if obj is None:
- continue
- if reverse:
- graph.add((obj, pred, subj))
+
+ if CONTEXT in node and not topcontext:
+ l_ctx = node.get(CONTEXT)
+ if l_ctx:
+ context = context.subcontext(l_ctx)
+ else:
+ context = Context(base=context.doc_base)
+
+ id_val = context.get_id(node)
+ if isinstance(id_val, basestring):
+ subj = self._to_rdf_id(context, id_val)
else:
- graph.add((subj, pred, obj))
+ subj = BNode()
+
+ if subj is None:
+ return None
+
+ for key, obj in node.items():
+ if key in (CONTEXT, ID, context.get_key(ID)):
+ continue
+ if key in (REV, context.get_key(REV)):
+ for rkey, robj in obj.items():
+ self._key_to_graph(dataset, graph, context, subj, rkey, robj, True)
+ else:
+ self._key_to_graph(dataset, graph, context, subj, key, obj)
+
+ return subj
-def _to_object(dataset, graph, context, term, node, inlist=False):
+ def _key_to_graph(self, dataset, graph, context, subj, key, obj, reverse=False):
- if node is None:
- return
+ if isinstance(obj, list):
+ obj_nodes = obj
+ else:
+ obj_nodes = [obj]
+
+ term = context.terms.get(key)
+ if term:
+ term_id = term.id
+ if term.container == LIST:
+ obj_nodes = [{LIST: obj_nodes}]
+ elif isinstance(obj, dict):
+ if term.container == INDEX:
+ obj_nodes = []
+ for values in obj.values():
+ if not isinstance(values, list):
+ obj_nodes.append(values)
+ else:
+ obj_nodes += values
+ elif term.container == LANG:
+ obj_nodes = []
+ for lang, values in obj.items():
+ if not isinstance(values, list):
+ values = [values]
+ for v in values:
+ obj_nodes.append((v, lang))
+ else:
+ term_id = None
+
+ if TYPE in (key, term_id):
+ term = TYPE_TERM
+ elif GRAPH in (key, term_id):
+ #assert graph.context_aware
+ subgraph = dataset.get_context(subj)
+ for onode in obj_nodes:
+ self._add_to_graph(dataset, subgraph, context, onode)
+ return
+ elif SET in (key, term_id):
+ for onode in obj_nodes:
+ self._add_to_graph(dataset, graph, context, onode)
+ return
- if isinstance(node, tuple):
- value, lang = node
- if value is None:
+ pred_uri = term.id if term else context.expand(key)
+
+ flattened = []
+ for obj in obj_nodes:
+ if isinstance(obj, dict):
+ objs = context.get_set(obj)
+ if objs is not None:
+ obj = objs
+ if isinstance(obj, list):
+ flattened += obj
+ continue
+ flattened.append(obj)
+ obj_nodes = flattened
+
+ if not pred_uri:
return
- return Literal(value, lang=lang)
- if isinstance(node, dict):
- node_list = context.get_list(node)
- if node_list is not None:
- if inlist: # TODO: and NO_LISTS_OF_LISTS
+ if term and term.reverse:
+ reverse = not reverse
+
+ bid = self._get_bnodeid(pred_uri)
+ if bid:
+ if not self.generalized_rdf:
return
- listref = _add_list(dataset, graph, context, term, node_list)
- if listref:
- return listref
-
- else: # expand..
- if not term or not term.type:
- if isinstance(node, float):
- return Literal(node, datatype=XSD.double)
- if term and term.language is not UNDEF:
- lang = term.language
- else:
- lang = context.language
- return Literal(node, lang=lang)
+ pred = BNode(bid)
else:
- if term.type == ID:
- node = {ID: context.resolve(node)}
- elif term.type == VOCAB:
- node = {ID: context.expand(node) or context.resolve_iri(node)}
+ pred = URIRef(pred_uri)
+ for obj_node in obj_nodes:
+ obj = self._to_object(dataset, graph, context, term, obj_node)
+ if obj is None:
+ continue
+ if reverse:
+ graph.add((obj, pred, subj))
else:
- node = {TYPE: term.type,
- VALUE: node}
+ graph.add((subj, pred, obj))
- lang = context.get_language(node)
- if lang or context.get_key(VALUE) in node or VALUE in node:
- value = context.get_value(node)
- if value is None:
- return None
- datatype = not lang and context.get_type(node) or None
- if lang:
+
+ def _to_object(self, dataset, graph, context, term, node, inlist=False):
+
+ if node is None:
+ return
+
+ if isinstance(node, tuple):
+ value, lang = node
+ if value is None:
+ return
return Literal(value, lang=lang)
- elif datatype:
- return Literal(value, datatype=context.expand(datatype))
+
+ if isinstance(node, dict):
+ node_list = context.get_list(node)
+ if node_list is not None:
+ if inlist and not self.allow_lists_of_lists:
+ return
+ listref = self._add_list(dataset, graph, context, term, node_list)
+ if listref:
+ return listref
+
+ else: # expand..
+ if not term or not term.type:
+ if isinstance(node, float):
+ return Literal(node, datatype=XSD.double)
+ if term and term.language is not UNDEF:
+ lang = term.language
+ else:
+ lang = context.language
+ return Literal(node, lang=lang)
+ else:
+ if term.type == ID:
+ node = {ID: context.resolve(node)}
+ elif term.type == VOCAB:
+ node = {ID: context.expand(node) or context.resolve_iri(node)}
+ else:
+ node = {TYPE: term.type,
+ VALUE: node}
+
+ lang = context.get_language(node)
+ if lang or context.get_key(VALUE) in node or VALUE in node:
+ value = context.get_value(node)
+ if value is None:
+ return None
+ datatype = not lang and context.get_type(node) or None
+ if lang:
+ return Literal(value, lang=lang)
+ elif datatype:
+ return Literal(value, datatype=context.expand(datatype))
+ else:
+ return Literal(value)
else:
- return Literal(value)
- else:
- return _add_to_graph(dataset, graph, context, node)
-
-
-def _to_rdf_id(context, id_val):
- bid = _get_bnodeid(id_val)
- if bid:
- return BNode(bid)
- else:
- uri = context.resolve(id_val)
- if not generalized_rdf and ':' not in uri:
- return None
- return URIRef(uri)
+ return self._add_to_graph(dataset, graph, context, node)
-def _get_bnodeid(ref):
- if not ref.startswith('_:'):
- return
- bid = ref.split('_:', 1)[-1]
- return bid or None
+ def _to_rdf_id(self, context, id_val):
+ bid = self._get_bnodeid(id_val)
+ if bid:
+ return BNode(bid)
+ else:
+ uri = context.resolve(id_val)
+ if not self.generalized_rdf and ':' not in uri:
+ return None
+ return URIRef(uri)
-def _add_list(dataset, graph, context, term, node_list):
- if not isinstance(node_list, list):
- node_list = [node_list]
- first_subj = BNode()
- subj, rest = first_subj, None
- for node in node_list:
- if node is None:
- continue
+ def _get_bnodeid(self, ref):
+ if not ref.startswith('_:'):
+ return
+ bid = ref.split('_:', 1)[-1]
+ return bid or None
+
+
+ def _add_list(self, dataset, graph, context, term, node_list):
+ if not isinstance(node_list, list):
+ node_list = [node_list]
+ first_subj = BNode()
+ subj, rest = first_subj, None
+ for node in node_list:
+ if node is None:
+ continue
+ if rest:
+ graph.add((subj, RDF.rest, rest))
+ subj = rest
+ obj = self._to_object(dataset, graph, context, term, node, inlist=True)
+ if obj is None:
+ continue
+ graph.add((subj, RDF.first, obj))
+ rest = BNode()
if rest:
- graph.add((subj, RDF.rest, rest))
- subj = rest
- obj = _to_object(dataset, graph, context, term, node, inlist=True)
- if obj is None:
- continue
- graph.add((subj, RDF.first, obj))
- rest = BNode()
- if rest:
- graph.add((subj, RDF.rest, RDF.nil))
- return first_subj
- else:
- return RDF.nil
+ graph.add((subj, RDF.rest, RDF.nil))
+ return first_subj
+ else:
+ return RDF.nil
diff --git a/rdflib_jsonld/util.py b/rdflib_jsonld/util.py
index f9f3581..d9b8820 100644
--- a/rdflib_jsonld/util.py
+++ b/rdflib_jsonld/util.py
@@ -4,7 +4,7 @@ try:
except ImportError:
import simplejson as json
-from rdflib.py3compat import PY3
+from rdflib.py3compat import PY3, format_doctest_out
from os import sep
from os.path import normpath
@@ -41,9 +41,21 @@ def split_iri(iri):
return iri[:at+1], iri[at+1:]
return iri, None
+ at format_doctest_out
def norm_url(base, url):
- url = urljoin(base, url)
- parts = urlsplit(url)
+ """
+ >>> norm_url('http://example.org/', '/one')
+ 'http://example.org/one'
+ >>> norm_url('http://example.org/', '/one#')
+ 'http://example.org/one#'
+ >>> norm_url('http://example.org/one', 'two')
+ 'http://example.org/two'
+ >>> norm_url('http://example.org/one/', 'two')
+ 'http://example.org/one/two'
+ >>> norm_url('http://example.org/', 'http://example.net/one')
+ 'http://example.net/one'
+ """
+ parts = urlsplit(urljoin(base, url))
path = normpath(parts[2])
if sep != '/':
path = '/'.join(path.split(sep))
diff --git a/setup.py b/setup.py
index 812a3e7..931f9b9 100644
--- a/setup.py
+++ b/setup.py
@@ -19,7 +19,7 @@ def setup_python3():
from os.path import join
tmp_src = join("build", "src")
- log.set_verbosity(1)
+ # log.set_verbosity(1)
fl = FileList()
for line in open("MANIFEST.in"):
if not line.strip():
@@ -97,9 +97,11 @@ config = dict(
entry_points = {
'rdf.plugins.parser': [
'json-ld = rdflib_jsonld.parser:JsonLDParser',
+ 'application/ld+json = rdflib_jsonld.parser:JsonLDParser',
],
'rdf.plugins.serializer': [
'json-ld = rdflib_jsonld.serializer:JsonLDSerializer',
+ 'application/ld+json = rdflib_jsonld.serializer:JsonLDSerializer',
],
}
)
diff --git a/test/.DS_Store b/test/.DS_Store
deleted file mode 100644
index 84a765c..0000000
Binary files a/test/.DS_Store and /dev/null differ
diff --git a/test/__init__.pyc b/test/__init__.pyc
deleted file mode 100644
index 21b3cbb..0000000
Binary files a/test/__init__.pyc and /dev/null differ
diff --git a/test/earl-context.jsonld b/test/earl-context.jsonld
deleted file mode 100644
index 7f00745..0000000
--- a/test/earl-context.jsonld
+++ /dev/null
@@ -1,24 +0,0 @@
-{
- "@context": {
- "@vocab": "http://www.w3.org/ns/earl#",
- "dc": "http://purl.org/dc/terms/",
- "foaf": "http://xmlns.com/foaf/0.1/",
- "doap": "http://usefulinc.com/ns/doap#",
- "xsd": "http://www.w3.org/2001/XMLSchema#",
- "Project": {"@id": "doap:Project"},
- "projectpage": {"@id": "doap:homepage", "@type": "@id"},
- "projectName": {"@id": "doap:name"},
- "projectTitle": {"@id": "doap:title"},
- "programmingLanguage": {"@id": "doap:programming-language"},
- "homepage": {"@id": "foaf:homepage", "@type": "@id"},
- "name": {"@id": "foaf:name"},
- "outcome": {"@type": "@id"},
- "test": {"@type": "@id"},
- "assertedBy": {"@type": "@id"},
- "license": {"@id": "doap:license", "@type": "@id"},
- "creator": {"@id": "dc:creator", "@type": "@id"},
- "mode": {"@type": "@vocab"},
- "date": {"@id": "dc:date", "@type": "xsd:dateTime"},
- "subjectOf": {"@reverse": "earl:subject" }
- }
-}
diff --git a/test/test-suite/.DS_Store b/test/test-suite/.DS_Store
deleted file mode 100644
index 5008ddf..0000000
Binary files a/test/test-suite/.DS_Store and /dev/null differ
diff --git a/test/test_api.pyc b/test/test_api.pyc
deleted file mode 100644
index 6cc1060..0000000
Binary files a/test/test_api.pyc and /dev/null differ
diff --git a/test/test_compaction.py b/test/test_compaction.py
index 14a9c3e..7e762f3 100644
--- a/test/test_compaction.py
+++ b/test/test_compaction.py
@@ -4,7 +4,8 @@ import re
import json
import itertools
from rdflib import Graph
-from rdflib.serializer import Serializer
+from rdflib.plugin import register, Serializer
+register('json-ld', Serializer, 'rdflib_jsonld.serializer', 'JsonLDSerializer')
cases = []
@@ -179,17 +180,70 @@ case("""
)
+# Shorten result IRIs by using @base
+case("""
+BASE <http://example.org/>
+PREFIX : <http://example.org/vocab/>
+<Thing> a :Class .
+<Work> a :Class; :subClassOf <Thing> .
+</some/path/> a :Thing .
+</some/path/#this> a :Thing .
+</some/path/#other> a :Thing .
+""",
+{
+ "@context": {
+ "@base": "http://example.org/some/path/#other",
+ "@vocab": "http://example.org/vocab/"
+ },
+ "@graph": [
+ {
+ "@id": "/Thing",
+ "@type": "Class"
+ },
+ {
+ "@id": "/Work",
+ "@type": "Class",
+ "subClassOf": {
+ "@id": "/Thing"
+ }
+ },
+ {
+ "@id": "",
+ "@type": "Thing"
+ },
+ {
+ "@id": "/some/path/#this",
+ "@type": "Thing"
+ },
+ {
+ "@id": "/some/path/#other",
+ "@type": "Thing"
+ }
+ ]
+}
+)
+
+
json_kwargs = dict(indent=2, separators=(',', ': '), sort_keys=True, ensure_ascii=False)
def run(data, expected):
- context = expected['@context']
g = Graph().parse(data=data, format='turtle')
- result = g.serialize(format='json-ld', context=context, **json_kwargs).decode('utf-8')
+ result = g.serialize(format='json-ld', context=expected['@context']).decode('utf-8')
+ result = json.loads(result)
+
+ sort_graph(result)
+ result = json.dumps(result, **json_kwargs)
incr = itertools.count(1)
result = re.sub(r'"_:[^"]+"', lambda m: '"_:blank-%s"' % incr.next(), result)
+
+ sort_graph(expected)
expected = json.dumps(expected, **json_kwargs)
- assert result == expected
+ assert result == expected, "Expected not equal to result: %s" % result
+
+def sort_graph(data):
+ if '@graph' in data:
+ data['@graph'].sort(key=lambda node: node.get('@id'))
def test_cases():
for data, expected in cases:
diff --git a/test/test_compaction.pyc b/test/test_compaction.pyc
deleted file mode 100644
index 4e84db0..0000000
Binary files a/test/test_compaction.pyc and /dev/null differ
diff --git a/test/test_context.py b/test/test_context.py
new file mode 100644
index 0000000..afc9a99
--- /dev/null
+++ b/test/test_context.py
@@ -0,0 +1,169 @@
+"""
+JSON-LD Context Spec
+"""
+from __future__ import unicode_literals
+from rdflib_jsonld.context import Context, Term
+from rdflib_jsonld import errors
+
+
+# exception utility (see also nose.tools.raises)
+from functools import wraps
+def _expect_exception(expected_error):
+ def _try_wrapper(f):
+ @wraps(f)
+ def _try():
+ try:
+ f()
+ assert e == expected_error
+ except Exception as e:
+ success = e == expected_error
+ else:
+ success = False
+ assert success, "Expected %r" % expected_error
+ return _try
+ return _try_wrapper
+
+
+def test_create_context():
+ ctx = Context()
+ ctx.add_term('label', 'http://example.org/ns/label')
+ term = ctx.terms.get('label')
+
+ assert term.name == 'label'
+ assert ctx.find_term('http://example.org/ns/label') is term
+
+
+def test_select_term_based_on_value_characteristics():
+ ctx = Context()
+
+ ctx.add_term('updated', 'http://example.org/ns/updated')
+ ctx.add_term('updatedDate', 'http://example.org/ns/updated',
+ coercion='http://www.w3.org/2001/XMLSchema#date')
+
+ assert ctx.find_term('http://example.org/ns/updated').name == 'updated'
+ assert ctx.find_term('http://example.org/ns/updated',
+ coercion='http://www.w3.org/2001/XMLSchema#date').name == 'updatedDate'
+
+ #ctx.find_term('http://example.org/ns/title_sv', language='sv')
+
+ #ctx.find_term('http://example.org/ns/authorList', container='@set')
+
+ #ctx.find_term('http://example.org/ns/creator', reverse=True)
+
+
+def test_getting_keyword_values_from_nodes():
+ ctx = Context()
+ assert ctx.get_id({'@id': 'urn:x:1'}) == 'urn:x:1'
+ assert ctx.get_language({'@language': 'en'}) == 'en'
+
+
+def test_parsing_a_context_expands_prefixes():
+ ctx = Context({
+ '@vocab': 'http://example.org/ns/',
+ 'x': 'http://example.org/ns/',
+ 'label': 'x:label',
+ 'x:updated': {'@type': 'x:date'}})
+
+ term = ctx.terms.get('label')
+
+ assert term.id == 'http://example.org/ns/label'
+
+ term = ctx.terms.get('x:updated')
+ assert term.id == 'http://example.org/ns/updated'
+ assert term.type == 'http://example.org/ns/date'
+
+ # test_expanding_terms():
+ assert ctx.expand('term') == 'http://example.org/ns/term'
+ assert ctx.expand('x:term') == 'http://example.org/ns/term'
+
+ # test_shrinking_iris():
+ assert ctx.shrink_iri('http://example.org/ns/term') == 'x:term'
+ assert ctx.to_symbol('http://example.org/ns/term') == 'term'
+
+
+def test_resolving_iris():
+ ctx = Context({'@base': 'http://example.org/path/leaf'})
+ assert ctx.resolve('/') == 'http://example.org/'
+ assert ctx.resolve('/trail') == 'http://example.org/trail'
+ assert ctx.resolve('../') == 'http://example.org/'
+ assert ctx.resolve('../../') == 'http://example.org/'
+
+
+def test_accessing_keyword_values_by_alias():
+ ctx = Context({'iri': '@id', 'lang': '@language'})
+ assert ctx.get_id({'iri': 'urn:x:1'}) == 'urn:x:1'
+ assert ctx.get_language({'lang': 'en'}) == 'en'
+
+ # test_standard_keywords_still_work():
+ assert ctx.get_id({'@id': 'urn:x:1'}) == 'urn:x:1'
+
+ # test_representing_keywords_by_alias():
+ assert ctx.id_key == 'iri'
+ assert ctx.lang_key == 'lang'
+
+
+def test_creating_a_subcontext():
+ ctx = Context()
+ ctx4 = ctx.subcontext({'lang': '@language'})
+ assert ctx4.get_language({'lang': 'en'}) == 'en'
+
+
+# Mock external sources loading
+from rdflib_jsonld import context
+_source_to_sjon = context.source_to_json
+SOURCES = {}
+context.source_to_json = SOURCES.get
+
+def test_loading_contexts():
+ # Given context data:
+ source1 = "http://example.org/base.jsonld"
+ source2 = "http://example.org/context.jsonld"
+ SOURCES[source1] = {'@context': {"@vocab": "http://example.org/vocab/"}}
+ SOURCES[source2] = {'@context': [source1, {"n": "name"}]}
+
+ # Create a context:
+ ctx = Context(source2)
+ assert ctx.expand('n') == 'http://example.org/vocab/name'
+
+ # Context can be a list:
+ ctx = Context([source2])
+ assert ctx.expand('n') == 'http://example.org/vocab/name'
+
+def test_use_base_in_local_context():
+ ctx = Context({'@base': "/local"})
+ assert ctx.base == '/local'
+
+def test_override_base():
+ ctx = Context(base="http://example.org/app/data/item",
+ source={'@base': "http://example.org/"})
+ assert ctx.base == "http://example.org/"
+
+def test_resolve_relative_base():
+ ctx = Context(base="http://example.org/app/data/item",
+ source={'@base': "../"})
+ assert ctx.base == "http://example.org/app/"
+ assert ctx.resolve_iri("../other") == "http://example.org/other"
+
+def test_set_null_base():
+ ctx = Context(base="http://example.org/app/data/item",
+ source={'@base': None})
+ assert ctx.base is None
+ assert ctx.resolve_iri("../other") == "../other"
+
+def test_ignore_base_remote_context():
+ ctx_url = "http://example.org/remote-base.jsonld"
+ SOURCES[ctx_url] = {'@context': {'@base': "/remote"}}
+ ctx = Context(ctx_url)
+ assert ctx.base == None
+
+ at _expect_exception(errors.RECURSIVE_CONTEXT_INCLUSION)
+def test_recursive_context_inclusion_error():
+ ctx_url = "http://example.org/recursive.jsonld"
+ SOURCES[ctx_url] = {'@context': ctx_url}
+ ctx = Context(ctx_url)
+
+ at _expect_exception(errors.INVALID_REMOTE_CONTEXT)
+def test_invalid_remote_context():
+ ctx_url = "http://example.org/recursive.jsonld"
+ SOURCES[ctx_url] = {"key": "value"}
+ ctx = Context(ctx_url)
diff --git a/test/test_context.rst b/test/test_context.rst
deleted file mode 100644
index 1ee2994..0000000
--- a/test/test_context.rst
+++ /dev/null
@@ -1,116 +0,0 @@
-
-Context Spec:
-
- >>> from rdflib_jsonld.context import Context, Term
-
- >>> ctx = Context()
-
- >>> ctx.add_term(u'label', u'http://example.org/ns/label')
- >>> term = ctx.terms.get(u'label')
- >>> term.name
- u'label'
-
- >>> ctx.find_term(u'http://example.org/ns/label') is term
- True
-
-Selecting term based on value characteristics:
-
- >>> ctx.add_term(u'updated', u'http://example.org/ns/updated')
- >>> ctx.add_term(u'updatedDate', u'http://example.org/ns/updated',
- ... coercion=u'http://www.w3.org/2001/XMLSchema#date')
-
- >>> ctx.find_term(u'http://example.org/ns/updated').name
- u'updated'
- >>> ctx.find_term(u'http://example.org/ns/updated',
- ... coercion=u'http://www.w3.org/2001/XMLSchema#date').name
- u'updatedDate'
-
- >>> ctx.find_term(u'http://example.org/ns/title_sv', language=u'sv')
- >>>
-
- >>> ctx.find_term(u'http://example.org/ns/authorList', container=u'@set')
- >>>
-
- >>> ctx.find_term(u'http://example.org/ns/creator', reverse=True)
- >>>
-
-Getting keyword values from nodes:
-
- >>> ctx.get_id({u'@id': u'urn:x:1'})
- u'urn:x:1'
- >>> ctx.get_language({u'@language': u'en'})
- u'en'
-
-Parsing a context expands prefixes:
-
- >>> ctx = Context({
- ... u'@vocab': u'http://example.org/ns/',
- ... u'x': u'http://example.org/ns/',
- ... u'label': u'x:label',
- ... u'x:updated': {u'@type': u'x:date'}})
- >>> term = ctx.terms.get(u'label')
-
- >>> term.id
- u'http://example.org/ns/label'
-
- >>> term = ctx.terms.get(u'x:updated')
- >>> term.id
- u'http://example.org/ns/updated'
- >>> term.type
- u'http://example.org/ns/date'
-
-Expanding terms:
-
- >>> ctx.expand(u'term')
- u'http://example.org/ns/term'
-
- >>> ctx.expand(u'x:term')
- u'http://example.org/ns/term'
-
-Shrinking IRIs:
-
- >>> ctx.shrink_iri(u'http://example.org/ns/term')
- u'x:term'
-
- >>> ctx.to_symbol(u'http://example.org/ns/term')
- u'term'
-
-Resolving IRIs:
-
- >>> ctx = Context({u'@base': u'http://example.org/path/leaf'})
- >>> ctx.resolve(u'/')
- u'http://example.org/'
- >>> ctx.resolve(u'/trail')
- u'http://example.org/trail'
- >>> ctx.resolve(u'../')
- u'http://example.org/'
- >>> ctx.resolve(u'../../')
- u'http://example.org/'
-
-Accessing keyword values by alias:
-
- >>> ctx = Context({u'iri': u'@id', u'lang': u'@language'})
- >>> ctx.get_id({u'iri': u'urn:x:1'})
- u'urn:x:1'
- >>> ctx.get_language({u'lang': u'en'})
- u'en'
-
-Standard keywords still work:
-
- >>> ctx.get_id({u'@id': u'urn:x:1'})
- u'urn:x:1'
-
-Representing keywords by alias:
-
- >>> ctx.id_key
- u'iri'
-
- >>> ctx.lang_key
- u'lang'
-
-Creating a subcontext:
-
- >>> ctx4 = ctx.subcontext({u'lang': u'@language'}) #doctest: +ELLIPSIS
- >>> ctx4.get_language({u'lang': u'en'})
- u'en'
-
diff --git a/test/test_testsuite.py b/test/test_testsuite.py
index d8cc638..8368fa6 100644
--- a/test/test_testsuite.py
+++ b/test/test_testsuite.py
@@ -8,11 +8,14 @@ except ImportError:
from rdflib import ConjunctiveGraph, Graph, Literal, URIRef
from rdflib.compare import isomorphic
from rdflib.py3compat import PY3
+import rdflib_jsonld.parser
from rdflib_jsonld.parser import to_rdf
from rdflib_jsonld.serializer import from_rdf
from rdflib_jsonld.keys import CONTEXT, GRAPH
+rdflib_jsonld.parser.ALLOW_LISTS_OF_LISTS = False
+
# monkey-patch NTriplesParser to keep source bnode id:s ..
from rdflib.plugins.parsers.ntriples import NTriplesParser, r_nodeid, bNode
def _preserving_nodeid(self):
diff --git a/test/test_testsuite.pyc b/test/test_testsuite.pyc
deleted file mode 100644
index 6fd3a9c..0000000
Binary files a/test/test_testsuite.pyc and /dev/null differ
--
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-med/python-rdflib-jsonld.git
More information about the debian-med-commit
mailing list