Skip to content
Snippets Groups Projects

Extend json-schema model parser

Merged Florian Spreckelsen requested to merge f-enhance-json-parser into dev
Files
8
@@ -154,7 +154,12 @@ def parse_model_from_string(string):
return parser.parse_model_from_string(string)
def parse_model_from_json_schema(filename: str, top_level_recordtype: bool = True):
def parse_model_from_json_schema(
filename: str,
top_level_recordtype: bool = True,
types_for_missing_array_items: dict = {},
ignore_unspecified_array_items: bool = False
):
"""Return a datamodel parsed from a json schema definition.
Parameters
@@ -164,7 +169,14 @@ def parse_model_from_json_schema(filename: str, top_level_recordtype: bool = Tru
top_level_recordtype : bool, optional
Whether there is a record type defined at the top level of the
schema. Default is true.
types_for_missing_array_items : dict, optional
dictionary containing fall-back types for json entries with `type:
array` but without `items` specification. Default is an empty dict.
ignore_unspecified_array_items : bool, optional
Whether to ignore `type: array` entries the type of which is not
specified by their `items` property or given in
`types_for_missing_array_items`. An error is raised if they are not
ignored. Default is False.
Returns
-------
@@ -180,8 +192,8 @@ def parse_model_from_json_schema(filename: str, top_level_recordtype: bool = Tru
"""
# @author Florian Spreckelsen
# @date 2022-02-17
# @review Daniel Hornung 2022-02-18
parser = JsonSchemaParser()
# @review Timm Fitschen 2023-05-25
parser = JsonSchemaParser(types_for_missing_array_items, ignore_unspecified_array_items)
return parser.parse_model_from_json_schema(filename, top_level_recordtype)
@@ -600,7+612,7 @@
for key, value in self.model.items():
if value is None:
self.model[key] = db.RecordType(name=key)
class JsonSchemaParser(Parser):
"""Extends the yaml parser to read in datamodels defined in a json schema.
**EXPERIMENTAL:** While this calss can already be used to create data models
from basic json schemas, there are the following limitations and missing
features:
* Due to limitations of json-schema itself, we currently do not support
@@ -627,7+639,7 @@
"""
# @author Florian Spreckelsen
# @date 2022-02-17
# @review Timm Fitschen 2022-02-30
# @review Timm Fitschen 2023-05-25
def __init__(self, types_for_missing_array_items={}, ignore_unspecified_array_items=False):
super().__init__()
self.types_for_missing_array_items = types_for_missing_array_items
self.ignore_unspecified_array_items = ignore_unspecified_array_items
def parse_model_from_json_schema(self, filename: str, top_level_recordtype: bool = True):
"""Return a datamodel created from the definition in the json schema in
@@ -648,7 +665,7 @@ class JsonSchemaParser(Parser):
"""
# @author Florian Spreckelsen
# @date 2022-02-17
# @review Timm Fitschen 2022-02-30
# @review Timm Fitschen 2023-05-25
with open(filename, 'r') as schema_file:
model_dict = jsonref.load(schema_file)
@@ -660,7+677,7 @@
The dictionary was typically created from the model definition in a json schema file.
Parameters
----------
model_dict : dict or list[dict]
One or several dictionaries read in from a json-schema file
top_level_recordtype : bool, optional
@@ -694,7+711,7 @@
# Check if this is a valid Json Schema
name = self._stringify(elt["title"], context=elt)
self._treat_element(elt, name)
elif "properties" in elt or "patternProperties":
elif "properties" in elt or "patternProperties" in elt:
# No top-level type but there are entities
if "properties" in elt:
for key, prop in elt["properties"].items():
name = self._get_name_from_property(key, prop)
self._treat_element(prop, name)
if "patternProperties" in elt:
# See also treatment in ``_treat_record_type``. Since here,
# there is no top-level RT we use the prefix `__Pattern`,
@@ -715,5+732,5 @@
return DataModel(self.model.values())
def _get_name_from_property(self, key: str, prop: dict):
# @review Timm Fitschen 2023-05-25
if "title" in prop:
name = self._stringify(prop["title"])
else:
@@ -724,7 +741,7 @@ class JsonSchemaParser(Parser):
return name
def _get_atomic_datatype(self, elt):
# @review Timm Fitschen 2022-02-30
# @review Timm Fitschen 2023-05-25
if elt["type"] == "string":
if "format" in elt and elt["format"] in ["date", "date-time"]:
return db.DATETIME
@@ -737,7+754,7 @@
elif elt["type"] == "boolean":
return db.BOOLEAN
elif elt["type"] == "null":
# This could be any datatype since a valid json will never have a
# value in a null property. We use TEXT for convenience.
return db.TEXT
else:
@@ -755,7+772,7 @@
if name == "name":
# This is identified with the CaosDB name property as long as the
# type is correct.
if not elt["type"] == "string":
if not elt["type"] == "string" and "string" not in elt["type"]:
raise JsonSchemaDefinitionError(
"The 'name' property must be string-typed, otherwise it cannot "
"be identified with CaosDB's name property."
@@ -775,9 +792,6 @@ class JsonSchemaParser(Parser):
ent = self._treat_record_type(elt, name)
elif elt["type"] == "array":
ent, force_list = self._treat_list(elt, name)
elif elt["type"] == "null":
# null
return None, force_list
else:
raise NotImplementedError(
f"Cannot parse items of type '{elt['type']}' (yet).")
@@ -786,7 +800,8 @@ class JsonSchemaParser(Parser):
# treat_something function
ent.description = elt["description"]
self.model[name] = ent
if ent is not None:
self.model[name] = ent
return ent, force_list
def _treat_record_type(self, elt: dict, name: str):
@@ -844,30 +859,38 @@ class JsonSchemaParser(Parser):
return rt
def _treat_list(self, elt: dict, name: str):
# @review Timm Fitschen 2022-02-30
# @review Timm Fitschen 2023-05-25
if "items" not in elt:
if "items" not in elt and name not in self.types_for_missing_array_items:
if self.ignore_unspecified_array_items:
return None, False
raise JsonSchemaDefinitionError(
f"The definition of the list items is missing in {elt}.")
items = elt["items"]
if "enum" in items:
return self._treat_enum(items, name), True
if items["type"] in JSON_SCHEMA_ATOMIC_TYPES:
datatype = db.LIST(self._get_atomic_datatype(items))
if "items" in elt:
items = elt["items"]
if "enum" in items:
return self._treat_enum(items, name), True
if items["type"] in JSON_SCHEMA_ATOMIC_TYPES:
datatype = db.LIST(self._get_atomic_datatype(items))
return db.Property(name=name, datatype=datatype), False
if items["type"] == "object":
if "title" not in items or self._stringify(items["title"]) == name:
# Property is RecordType
return self._treat_record_type(items, name), True
else:
# List property will be an entity of its own with a name
# different from the referenced RT
ref_rt = self._treat_record_type(
items, self._stringify(items["title"]))
self.model[ref_rt.name] = ref_rt
return db.Property(name=name, datatype=db.LIST(ref_rt)), False
else:
# Use predefined type:
datatype = db.LIST(self.types_for_missing_array_items[name])
return db.Property(name=name, datatype=datatype), False
if items["type"] == "object":
if "title" not in items or self._stringify(items["title"]) == name:
# Property is RecordType
return self._treat_record_type(items, name), True
else:
# List property will be an entity of its own with a name
# different from the referenced RT
ref_rt = self._treat_record_type(
items, self._stringify(items["title"]))
self.model[ref_rt.name] = ref_rt
return db.Property(name=name, datatype=db.LIST(ref_rt)), False
def _get_pattern_prop(self):
# @review Timm Fitschen 2023-05-25
if "__pattern_property_pattern_property" in self.model:
return self.model["__pattern_property_pattern_property"]
pp = db.Property(name="__matched_pattern", datatype=db.TEXT)
@@ -888,12 +911,16 @@ class JsonSchemaParser(Parser):
array.
"""
# @review Timm Fitschen 2023-05-25
num_patterns = len(pattern_elements)
pattern_prop = self._get_pattern_prop()
returns = []
for ii, (key, element) in enumerate(pattern_elements.items()):
name_suffix = f"_{ii+1}" if num_patterns > 1 else ""
name = name_prefix + "Entry" + name_suffix
if "title" not in element:
name_suffix = f"_{ii+1}" if num_patterns > 1 else ""
name = name_prefix + "Entry" + name_suffix
else:
name = element["title"]
if element["type"] == "object":
# simple, is already an object, so can be treated like any other
# record type.
Loading