diff --git a/.gitignore b/.gitignore
index 4c175607e5327472c301949f187c58d925f0d05e..2b50c0fc33c80b9d83bc913c1e23836b51049f1c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -3,6 +3,7 @@
 src/caosadvancedtools/version.py
 
 # compiled python and dist stuff
+.venv
 *.egg
 .eggs
 *.egg-info/
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index d2abdcd653c3315335c29058a8ca2774dad34577..61c68e67a7107457e2f3c780b54366a23eae1e78 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -134,22 +134,16 @@ unittest_py39:
     - python3 -c "import linkahead; print('LinkAhead Version:', linkahead.__version__)"
     - tox
 
-unittest_py37:
+unittest_py38:
   tags: [docker]
   stage: unittest
-  image: python:3.7
+  image: python:3.8
   script: &python_test_script
-    - pip install nose pandas pytest pytest-cov gitignore-parser openpyxl>=3.0.7 xlrd==1.2 h5py
+    - pip install pynose pandas pytest pytest-cov gitignore-parser openpyxl>=3.0.7 xlrd==1.2 h5py
     - pip install git+https://gitlab.indiscale.com/caosdb/src/caosdb-pylib.git@dev
     - pip install .
     - pytest --cov=caosadvancedtools unittests
 
-unittest_py38:
-  tags: [docker]
-  stage: unittest
-  image: python:3.8
-  script: *python_test_script
-
 unittest_py310:
   tags: [docker]
   stage: unittest
@@ -162,6 +156,25 @@ unittest_py311:
   image: python:3.11
   script: *python_test_script
 
+unittest_py312:
+  tags: [docker]
+  stage: unittest
+  image: python:3.12
+  script: *python_test_script
+
+unittest_py313:
+  tags: [docker]
+  stage: unittest
+  image: python:3.13-rc
+  script:
+    # TODO: Replace by '*python_test_script' as soon as 3.13 has been officially released.
+    - apt update && apt install -y cargo || true
+    - pip install meson[ninja] meson-python || true
+    - pip install pynose pandas pytest pytest-cov gitignore-parser openpyxl>=3.0.7 xlrd==1.2 h5py || true
+    - pip install git+https://gitlab.indiscale.com/caosdb/src/caosdb-pylib.git@dev || true
+    - pip install . || true
+    - pytest --cov=caosadvancedtools unittests || true
+
 # Build the sphinx documentation and make it ready for deployment by Gitlab Pages
 # Special job for serving a static website. See https://docs.gitlab.com/ee/ci/yaml/README.html#pages
 pages_prepare: &pages_prepare
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2419b32ffa48bdc9f0d1c93319348dea70cd744c..a6382f9c6050c78d974ce954dbd3efa9d6c2d5de 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,6 +4,37 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [0.10.0] - 2024-04-24 ##
+
+### Added ###
+
+- XLSX handling: template generator and conversion from Json to XLSX
+- Json schema exporter:
+  - has new parameter `use_rt_pool`
+  - propagates more properties in the `make_array` function
+- Support for Python 3.12 and experimental support for 3.13
+
+### Changed ###
+
+* `table_converter.to_table` now returns an empty DataFrame instead of raising a
+  ValueError when called with an empty container.
+
+### Removed ###
+
+* The deprecated `parent` keyword from the YAML datamodel specification. Use
+  `inherit_from_{obligatory|recommended|suggested}` instead.
+* Support for Python 3.7
+
+### Fixed ###
+
+- Json schema exporter handles reference properties better.
+- [#59](https://gitlab.com/linkahead/linkahead-advanced-user-tools/-/issues/59)
+  `to_table` failed on lists as values.
+
+### Documentation ###
+
+* Added documentation for json-schema datamodel export
+
 ## [0.9.0] - 2023-11-27 ##
 
 ### Added ###
diff --git a/CITATION.cff b/CITATION.cff
index eea049338996c494a88076b9e9e0f3131ed44a66..6f7b69b665ada694ec5756fd0a6bb16c490c23a5 100644
--- a/CITATION.cff
+++ b/CITATION.cff
@@ -20,6 +20,6 @@ authors:
     given-names: Stefan
     orcid: https://orcid.org/0000-0001-7214-8125
 title: CaosDB - Advanced User Tools
-version: 0.9.0
+version: 0.10.0
 doi: 10.3390/data4020083
-date-released: 2023-11-27
\ No newline at end of file
+date-released: 2024-04-24
\ No newline at end of file
diff --git a/Makefile b/Makefile
index d9b182cbd0b17490e9d81b900d6ba8cefadb1b64..724d9e5a4f06cb3a2880df9016adfe527314c338 100644
--- a/Makefile
+++ b/Makefile
@@ -36,10 +36,10 @@ unittest:
 	pytest-3 unittests
 
 style:
-	pycodestyle --count src unittests --exclude=swagger_client
-	autopep8 -ar --diff --exit-code --exclude swagger_client .
+	pycodestyle --count --exclude=swagger_client src unittests
+	autopep8 -ar --diff --exit-code --exclude swagger_client src unittests
 .PHONY: style
 
 lint:
-	pylint --unsafe-load-any-extension=y -d all -e E,F --ignore=swagger_client src/caosadvancedtools
+	pylint --unsafe-load-any-extension=y --fail-under=9.59 -d R,C --ignore=swagger_client src/caosadvancedtools
 .PHONY: lint
diff --git a/integrationtests/test_json_schema_exporter.py b/integrationtests/test_json_schema_exporter.py
index 69edcf42d1fd285c030ad6d6ccb7f73f2d1b5536..44b428263ebbd9696fc2a171ea356d764482d5e3 100644
--- a/integrationtests/test_json_schema_exporter.py
+++ b/integrationtests/test_json_schema_exporter.py
@@ -20,9 +20,12 @@
 # with this program. If not, see <https://www.gnu.org/licenses/>.
 #
 
+import json
+
 import linkahead as db
 
 from caosadvancedtools.json_schema_exporter import recordtype_to_json_schema as rtjs
+from caosadvancedtools.models.parser import parse_model_from_string
 
 
 def _delete_everything():
@@ -75,3 +78,37 @@ def test_uniqueness_of_reference_types():
     assert one_of[1 - enum_index]["type"] == "object"
     # No properties in parent_type
     assert len(one_of[1 - enum_index]["properties"]) == 0
+
+
+def test_reference_property():
+    model_string = """
+RT1:
+  description: Some recordtype
+RT2:
+  obligatory_properties:
+    prop1:
+      description: Some reference property
+      datatype: RT1
+    """
+    model = parse_model_from_string(model_string)
+    model.sync_data_model(noquestion=True)
+    schema = rtjs(db.RecordType(name="RT2").retrieve())
+    assert json.dumps(schema, indent=2) == """{
+  "type": "object",
+  "required": [
+    "prop1"
+  ],
+  "additionalProperties": true,
+  "title": "RT2",
+  "properties": {
+    "prop1": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": true,
+      "description": "Some reference property",
+      "title": "prop1",
+      "properties": {}
+    }
+  },
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}"""
diff --git a/pylintrc b/pylintrc
index 625f83ce950841f7a239538123ef7b5812fc5c5f..d3a2e89ae1990480e5377daf443e0a63224342bc 100644
--- a/pylintrc
+++ b/pylintrc
@@ -1,5 +1,3 @@
-# -*- mode:conf; -*-
-
 [FORMAT]
 # Good variable names which should always be accepted, separated by a comma
 good-names=ii,rt,df
@@ -17,3 +15,8 @@ init-hook=
   import sys; sys.path.extend(["src/caosadvancedtools"]);
   import astroid; astroid.context.InferenceContext.max_inferred = 500;
 
+[MESSAGES CONTROL]
+disable=
+  fixme,
+  logging-format-interpolation,
+  logging-not-lazy,
diff --git a/setup.py b/setup.py
index 50cb59467a5162663d31a23e4cfd2159a611bf9c..f8346b8c581f9a6f086eb081cf3c1e1dfcec5322 100755
--- a/setup.py
+++ b/setup.py
@@ -46,7 +46,7 @@ from setuptools import find_packages, setup
 ########################################################################
 
 MAJOR = 0
-MINOR = 9
+MINOR = 10
 MICRO = 0
 PRE = ""  # e.g. rc0, alpha.1, 0.beta-23
 ISRELEASED = True
@@ -154,11 +154,11 @@ def setup_package():
         long_description_content_type="text/markdown",
         author='Henrik tom Wörden',
         author_email='h.tomwoerden@indiscale.com',
-        python_requires='>=3.7',
+        python_requires='>=3.8',
         install_requires=["linkahead>=0.13.1",
                           "jsonref",
                           "jsonschema[format]>=4.4.0",
-                          "numpy>=1.17.3",
+                          "numpy>=1.24.0",
                           "openpyxl>=3.0.7",
                           "pandas>=1.2.0",
                           "xlrd>=2.0",
diff --git a/src/caosadvancedtools/bloxberg/bloxberg.py b/src/caosadvancedtools/bloxberg/bloxberg.py
index 42af1e11a23a37214ec294b8032517bb5c70bb5b..6aa2eaab94a1beb7735a09de6b22ab7745c0b078 100644
--- a/src/caosadvancedtools/bloxberg/bloxberg.py
+++ b/src/caosadvancedtools/bloxberg/bloxberg.py
@@ -145,6 +145,7 @@ filename : str
 Write the JSON to this file.
 """
         content = {}
+        print(certificate, filename)
 
         return content
 
@@ -188,7 +189,6 @@ def demo_run():
     print("Making sure that the remote data model is up to date.")
     ensure_data_model()
     print("Data model is up to date.")
-    import caosdb as db
     CertRT = db.RecordType(name="BloxbergCertificate").retrieve()
     print("Certifying the `BloxbergCertificate` RecordType...")
     json_filename = "/tmp/cert.json"
diff --git a/src/caosadvancedtools/cache.py b/src/caosadvancedtools/cache.py
index cf74e330d3efb754d8e79d84ba816877c295c784..bf1287ba35d9c0af28d440db56e38173d38fcacf 100644
--- a/src/caosadvancedtools/cache.py
+++ b/src/caosadvancedtools/cache.py
@@ -150,13 +150,18 @@ class AbstractCache(ABC):
         finally:
             conn.close()
 
-    def run_sql_commands(self, commands, fetchall=False):
-        """
-        Run a list of SQL commands on self.db_file.
+    def run_sql_commands(self, commands, fetchall: bool = False):
+        """Run a list of SQL commands on self.db_file.
+
+Parameters
+----------
+
+commands:
+  List of sql commands (tuples) to execute
 
-        commands: list of sql commands (tuples) to execute
-        fetchall: When True, run fetchall as last command and return the results.
-                  Otherwise nothing is returned.
+fetchall: bool, optional
+  When True, run fetchall as last command and return the results.
+  Otherwise nothing is returned.
         """
         conn = sqlite3.connect(self.db_file)
         c = conn.cursor()
diff --git a/src/caosadvancedtools/cfood.py b/src/caosadvancedtools/cfood.py
index c0da4f0156dc2af48a4ba80b4d0af69c62cd5c3e..588476bd624f7e24a5e0f49380fb1e66f5bd1b17 100644
--- a/src/caosadvancedtools/cfood.py
+++ b/src/caosadvancedtools/cfood.py
@@ -27,12 +27,14 @@
 
 CaosDB can automatically be filled with Records based on some structure, a file
 structure, a table or similar.
+
 The Crawler will iterate over the respective items and test for each item
 whether a CFood class exists that matches the file path, i.e. whether CFood
 class wants to treat that pariticular item. If one does, it is instanciated to
 treat the match. This occurs in basically three steps:
+
 1. Create a list of identifiables, i.e. unique representation of CaosDB Records
-(such as an experiment belonging to a project and a date/time).
+   (such as an experiment belonging to a project and a date/time).
 2. The identifiables are either found in CaosDB or they are created.
 3. The identifiables are update based on the date in the file structure.
 """
@@ -333,7 +335,7 @@ class AbstractFileCFood(AbstractCFood):
         out : str
             The regular expression, starting with ``.*\\.`` and ending with the EOL dollar
             character.  The actual extension will be accessible in the
-            :py:attribute:`pattern group name<python:re.Pattern.groupindexe>` ``ext``.
+            :py:attr:`pattern group name <python:re.Pattern.groupindex>` ``ext``.
         """
 
         if not extensions:
diff --git a/src/caosadvancedtools/cfoods/h5.py b/src/caosadvancedtools/cfoods/h5.py
index 4e6832f2e96e0950ed99146d4907f1ffb70d8494..dfd6f2905c955c1b4e493da1f2cbf45583ee68dc 100644
--- a/src/caosadvancedtools/cfoods/h5.py
+++ b/src/caosadvancedtools/cfoods/h5.py
@@ -1,9 +1,9 @@
 #!/usr/bin/env python3
 
-# This file is a part of the CaosDB Project.
+# This file is a part of the LinkAhead Project.
 #
 # Copyright (C) 2020,2021 IndiScale GmbH <www.indiscale.com>
-# Copyright (C) 2020 Daniel Hornung <d.hornung@indiscale.com>
+# Copyright (C) 2020-2025 Daniel Hornung <d.hornung@indiscale.com>
 # Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
 # Copyright (C) 2021 Alexander Kreft
 # Copyright (C) 2021 Laboratory for Fluid Physics and Biocomplexity,
@@ -22,8 +22,7 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-"""A CFood for hdf5 files
-
+"""A CFood for hdf5 files.
 
 This module allows to parse hdf5 files and reproduce their structure in form
 of Records that reference each other.
@@ -33,19 +32,14 @@ attributes. Groups and datasets are mapped to Records and attributes to
 Properties.
 """
 
-import re
 from copy import deepcopy
 
 import caosdb as db
 import h5py
 import numpy as np
 from caosadvancedtools.cfood import fileguide
-from caosdb.common.datatype import is_reference
-from caosdb.common.utils import uuid
 
-from ..cfood import (AbstractFileCFood, assure_has_description,
-                     assure_has_parent, assure_has_property,
-                     assure_property_is)
+from ..cfood import AbstractFileCFood
 from ..structure_mapping import (EntityMapping, collect_existing_structure,
                                  update_structure)
 
@@ -100,8 +94,7 @@ def h5_attr_to_property(val):
         if hasattr(val, 'ndim'):
             if not isinstance(val, np.ndarray) and val.ndim != 0:
                 print(val, val.ndim)
-                raise Exception(
-                    "Implementation assumes that only np.arrays have ndim.")
+                raise RuntimeError("Implementation assumes that only np.arrays have ndim.")
 
         return val, dtype
 
@@ -127,6 +120,8 @@ class H5CFood(AbstractFileCFood):
         self.identifiable_root = None
         self.root_name = "root"
         self.hdf5Container = db.Container()
+        self.to_be_inserted = db.Container()
+        self.structure = db.Container()
         self.em = EntityMapping()
 
     def collect_information(self):
@@ -134,7 +129,7 @@ class H5CFood(AbstractFileCFood):
 
     @staticmethod
     def get_re():
-        """Return a regular expression string to match *.h5, *.nc, *.hdf, *.hdf5."""
+        """Return a regular expression string to match ``*.h5``, ``*.nc``, ``*.hdf``, ``*.hdf5``."""
         extensions = [
             "h5",
             "nc",
@@ -165,7 +160,8 @@ class H5CFood(AbstractFileCFood):
 
         """
 
-        self.structure._cuid = "root element"
+        # TODO Why do we need a protected member here?
+        self.structure._cuid = "root element"  # pylint: disable=protected-access
         self.em.add(self.structure, self.identifiable_root)
         collect_existing_structure(self.structure, self.identifiable_root,
                                    self.em)
@@ -282,7 +278,7 @@ class H5CFood(AbstractFileCFood):
         return rec
 
     def insert_missing_structure(self, target_structure: db.Record):
-        if target_structure._cuid not in self.em.to_existing:
+        if target_structure._cuid not in self.em.to_existing:  # pylint: disable=protected-access
             self.to_be_inserted.append(target_structure)
 
         for prop in target_structure.get_properties():
diff --git a/src/caosadvancedtools/converter/labfolder_export.py b/src/caosadvancedtools/converter/labfolder_export.py
index 172ed993ecf0bc96d39a57f06082d7f83716ba19..6e282218b6d451749ad3070693d38299663f1bee 100644
--- a/src/caosadvancedtools/converter/labfolder_export.py
+++ b/src/caosadvancedtools/converter/labfolder_export.py
@@ -22,17 +22,7 @@
 """ Imports labfolder exports """
 
 import os
-import re
-import shutil
-import subprocess
-import sys
-import tempfile
-import time
-import warnings
-from io import BytesIO, StringIO
-
-import requests
-import yaml
+
 from bs4 import BeautifulSoup
 
 import caosdb as db
@@ -73,8 +63,8 @@ def get_author_from_entry(entry):
 def val_or_none(stuff):
     if len(stuff) == 0:
         return None
-    else:
-        return stuff[0].getText()
+
+    return stuff[0].getText()
 
 
 def add_property_from_data_element(dbrecord, element):
diff --git a/src/caosadvancedtools/json_schema_exporter.py b/src/caosadvancedtools/json_schema_exporter.py
index 700c24e890c36a5b4219a1c2cc7d74ce38d6d398..c4c6de164aede0ce2e35b2f47fc30275552066b5 100644
--- a/src/caosadvancedtools/json_schema_exporter.py
+++ b/src/caosadvancedtools/json_schema_exporter.py
@@ -19,16 +19,48 @@
 # You should have received a copy of the GNU Affero General Public License along
 # with this program. If not, see <https://www.gnu.org/licenses/>.
 #
-"""Module for converting a data model into a json schema compatible dictionary.
+"""Convert a data model into a json schema.
+
+Sometimes you may want to have a `json schema <https://json-schema.org>`_ which describes a
+LinkAhead data model, for example for the automatic generation of user interfaces with third-party
+tools like `rjsf <https://rjsf-team.github.io/react-jsonschema-form/docs/>`_.  Then this is the
+right module for you!
+
+The :mod:`json_schema_exporter <caosadvancedtools.json_schema_exporter>` module has one main class,
+:class:`JsonSchemaExporter`, and a few utility and wrapper functions.
+
+For easy usage, you may simply import `recordtype_to_json_schema` and use it on a fully referenced
+RecordType like this::
+
+  import caosadvancedtools.models.parser as parser
+  import caosadvancedtools.json_schema_exporter as jsex
+
+  model = parser.parse_model_from_yaml("my_model.yml")
+
+  # get the data model schema for the "Journey" recordtype
+  schema, ui_schema = recordtype_to_json_schema(
+      rt=model.get_deep("Journey"),
+      do_not_create=["Continent"],         # only choose from existing Records
+      multiple_choice=["visited_cities"],
+      rjsf=True                            # also create a UI schema
+  )
+
+For more details on how to use this wrapper, read the `function documentation
+<recordtype_to_json_schema>`.
+
+Other useful functions are `make_array`, which creates an array out of a single schema, and
+`merge_schemas`, which as the name suggests allows to combine multiple schema definitions into a
+single schema.
 
-The scope of this json schema is the automatic generation of user interfaces.
 """
 
 from collections import OrderedDict
 from typing import Any, Dict, Iterable, List, Optional, Sequence, Tuple, Union
 
 import linkahead as db
+from linkahead.cached import cached_query, cache_clear
 from linkahead.common.datatype import get_list_datatype, is_list_datatype
+from .models.data_model import DataModel
 
 
 class JsonSchemaExporter:
@@ -45,6 +77,7 @@ class JsonSchemaExporter:
                  do_not_create: List[str] = None,
                  do_not_retrieve: List[str] = None,
                  no_remote: bool = False,
+                 use_rt_pool: DataModel = None,
                  multiple_choice: List[str] = None,
                  wrap_files_in_objects: bool = False,
                  ):
@@ -73,16 +106,21 @@ class JsonSchemaExporter:
             description of the corresponding schema entry. If set to false, an
             additional `unit` key is added to the schema itself which is purely
             annotational and ignored, e.g., in validation. Default is True.
-        do_not_create : list[str]
+        do_not_create : list[str], optional
             A list of reference Property names, for which there should be no option
             to create them.  Instead, only the choice of existing elements should
             be given.
-        do_not_retrieve : list[str]
+        do_not_retrieve : list[str], optional
             A list of RedcordType names, for which no Records shall be retrieved.  Instead, only an
             object description should be given.  If this list overlaps with the `do_not_create`
             parameter, the behavior is undefined.
-        no_remote : bool
-            If True, do not attempt to connect to a LinkAhead server at all. Default is False.
+        no_remote : bool, optional
+            If True, do not attempt to connect to a LinkAhead server at all. Default is False. Note
+            that the exporter may fail if this option is activated and the data model is not
+            self-sufficient.
+        use_rt_pool : models.data_model.DataModel, optional
+            If given, do not attempt to retrieve RecordType information remotely but from this parameter
+            instead.
         multiple_choice : list[str], optional
             A list of reference Property names which shall be denoted as multiple choice properties.
             This means that each option in this property may be selected at most once.  This is not
@@ -108,6 +146,8 @@ class JsonSchemaExporter:
         if not multiple_choice:
             multiple_choice = []
 
+        cache_clear()
+
         self._additional_properties = additional_properties
         self._name_property_for_new_records = name_property_for_new_records
         self._description_property_for_new_records = description_property_for_new_records
@@ -118,6 +158,7 @@ class JsonSchemaExporter:
         self._do_not_create = do_not_create
         self._do_not_retrieve = do_not_retrieve
         self._no_remote = no_remote
+        self._use_rt_pool = use_rt_pool
         self._multiple_choice = multiple_choice
         self._wrap_files_in_objects = wrap_files_in_objects
 
@@ -129,8 +170,6 @@ class JsonSchemaExporter:
             if rt.get_importance(prop.name) != db.OBLIGATORY:
                 continue
             prop_name = prop.name
-            if isinstance(prop.datatype, db.Entity):
-                prop_name = prop.datatype.name
             required_list.append(prop_name)
 
         return required_list
@@ -138,16 +177,25 @@ class JsonSchemaExporter:
     def _make_segment_from_prop(self, prop: db.Property) -> Tuple[OrderedDict, dict]:
         """Return the JSON Schema and ui schema segments for the given property.
 
-        The result may either be a simple json schema segment, such as a `string
-        <https://json-schema.org/understanding-json-schema/reference/string>`_ element (or another
-        simple type), a combination such as `anyOf
-        <https://json-schema.org/understanding-json-schema/reference/combining#anyof>`_ or an `array
-        <https://json-schema.org/understanding-json-schema/reference/array>`_ element
+The result may either be a simple json schema segment, such as a `string
+<https://json-schema.org/understanding-json-schema/reference/string>`_ element (or another
+simple type), a combination such as `anyOf
+<https://json-schema.org/understanding-json-schema/reference/combining#anyof>`_ or an `array
+<https://json-schema.org/understanding-json-schema/reference/array>`_ element
 
-        Parameters
-        ----------
-        prop : db.Property
-            The property to be transformed.
+Parameters
+----------
+prop : db.Property
+    The property to be transformed.
+
+Returns
+-------
+
+json_schema : OrderedDict
+    The Json schema.
+
+ui_schema : dict
+    An appropriate UI schema.
         """
         json_prop = OrderedDict()
         ui_schema: dict = {}
@@ -193,6 +241,11 @@ class JsonSchemaExporter:
             list_element_prop = db.Property(
                 name=prop.name, datatype=get_list_datatype(prop.datatype, strict=True))
             json_prop["items"], inner_ui_schema = self._make_segment_from_prop(list_element_prop)
+            if "type" in json_prop["items"] and (
+                    json_prop["items"]["type"] in ["boolean", "integer", "number", "string"]
+            ):
+                json_prop["items"]["type"] = [json_prop["items"]["type"], "null"]
+
             if prop.name in self._multiple_choice and prop.name in self._do_not_create:
                 # TODO: if not multiple_choice, but do_not_create:
                 # "ui:widget" = "radio" & "ui:inline" = true
@@ -226,8 +279,10 @@ class JsonSchemaExporter:
                     json_prop["items"] = {
                         "type": "object",
                         "title": "Next file",
+                        # TODO Why can't it be empty?
                         # The wrapper object must wrap a file and can't be empty.
-                        "required": ["file"],
+                        "required": [  # "file"
+                        ],
                         # Wrapper objects must only contain the wrapped file.
                         "additionalProperties": False,
                         "properties": {
@@ -256,12 +311,24 @@ class JsonSchemaExporter:
                     # Only a simple list of values
                     json_prop["enum"] = values
                 else:
-                    if self._no_remote:
+                    if self._use_rt_pool:
+                        rt = self._use_rt_pool.get_deep(prop_name)
+                    elif self._no_remote:
                         rt = prop.datatype
                     else:
-                        rt = db.execute_query(f"FIND RECORDTYPE WITH name='{prop_name}'",
-                                              unique=True)
+                        results = cached_query(f"FIND RECORDTYPE WITH name='{prop_name}'")
+                        assert len(results) <= 1
+                        if len(results):
+                            rt = results[0]
+                        else:
+                            rt = db.Entity()
                     subschema, ui_schema = self._make_segment_from_recordtype(rt)
+                    if prop.is_reference():
+                        if prop.name:
+                            subschema["title"] = prop.name
+                        if prop.description:
+                            subschema["description"] = prop.description
+
                     # if inner_ui_schema:
                     #     ui_schema = inner_ui_schema
                     if values:
@@ -334,7 +401,7 @@ class JsonSchemaExporter:
         if self._no_remote:
             return []
 
-        possible_values = db.execute_query(f"SELECT name, id FROM {role}")
+        possible_values = cached_query(f"SELECT name, id FROM {role}")
 
         vals = []
         for val in possible_values:
@@ -479,6 +546,7 @@ def recordtype_to_json_schema(rt: db.RecordType, additional_properties: bool = T
                               do_not_create: List[str] = None,
                               do_not_retrieve: List[str] = None,
                               no_remote: bool = False,
+                              use_rt_pool: DataModel = None,
                               multiple_choice: List[str] = None,
                               rjsf: bool = False,
                               wrap_files_in_objects: bool = False
@@ -524,6 +592,9 @@ def recordtype_to_json_schema(rt: db.RecordType, additional_properties: bool = T
         parameter, the behavior is undefined.
     no_remote : bool, optional
         If True, do not attempt to connect to a LinkAhead server at all.  Default is False.
+    use_rt_pool : models.data_model.DataModel, optional
+        If given, do not attempt to retrieve RecordType information remotely but from this parameter
+        instead.
     multiple_choice : list[str], optional
         A list of reference Property names which shall be denoted as multiple choice properties.
         This means that each option in this property may be selected at most once.  This is not
@@ -534,8 +605,8 @@ def recordtype_to_json_schema(rt: db.RecordType, additional_properties: bool = T
     wrap_files_in_objects : bool, optional
         Whether (lists of) files should be wrapped into an array of objects that
         have a file property. The sole purpose of this wrapping is to provide a
-        workaround for a `react-jsonschema-form
-        bug<https://github.com/rjsf-team/react-jsonschema-form/issues/3957>`_ so
+        workaround for a `react-jsonschema-form bug
+        <https://github.com/rjsf-team/react-jsonschema-form/issues/3957>`_ so
         only set this to True if you're using the exported schema with
         react-json-form and you are experiencing the bug. Default is False.
 
@@ -560,6 +631,7 @@ def recordtype_to_json_schema(rt: db.RecordType, additional_properties: bool = T
         do_not_create=do_not_create,
         do_not_retrieve=do_not_retrieve,
         no_remote=no_remote,
+        use_rt_pool=use_rt_pool,
         multiple_choice=multiple_choice,
         wrap_files_in_objects=wrap_files_in_objects
     )
@@ -603,8 +675,16 @@ ui_schema : dict, optional
         "$schema": "https://json-schema.org/draft/2020-12/schema",
     }
 
+    if schema.get("description"):
+        result["description"] = schema["description"]
+
     if rjsf_uischema is not None:
         ui_schema = {"items": rjsf_uischema}
+        # Propagate ui: options up one level.
+        for key in rjsf_uischema.keys():
+            if key.startswith("ui:"):
+                ui_schema[key] = rjsf_uischema[key]
+
         return result, ui_schema
     return result
 
diff --git a/src/caosadvancedtools/models/parser.py b/src/caosadvancedtools/models/parser.py
index 37f34e7bcbae48188c96b9bea6434d59571020fd..f9bea92455e948eb40e337a43ad87b6d79156fce 100644
--- a/src/caosadvancedtools/models/parser.py
+++ b/src/caosadvancedtools/models/parser.py
@@ -52,9 +52,7 @@ from linkahead.common.datatype import get_list_datatype
 from .data_model import CAOSDB_INTERNAL_PROPERTIES, DataModel
 
 # Keywords which are allowed in data model descriptions.
-KEYWORDS = ["parent",  # deprecated, use inherit_from_* instead:
-                       # https://gitlab.com/caosdb/caosdb-advanced-user-tools/-/issues/36
-            "importance",
+KEYWORDS = ["importance",
             "datatype",  # for example TEXT, INTEGER or REFERENCE
             "unit",
             "description",
@@ -250,7 +248,7 @@ debug : bool, optional
 
         Returns
         -------
-        out : DataModel
+        out : data_model.DataModel
           The created DataModel
         """
         with open(filename, 'r') as outfile:
@@ -271,7 +269,7 @@ debug : bool, optional
 
         Returns
         -------
-        out : DataModel
+        out : data_model.DataModel
           The created DataModel
         """
         ymlmodel = yaml.load(string, Loader=SafeLineLoader)
@@ -291,7 +289,7 @@ debug : bool, optional
 
         Returns
         -------
-        out : DataModel
+        out : data_model.DataModel
           The created DataModel
         """
 
@@ -595,16 +593,6 @@ debug : bool, optional
                     self._inherit(name, prop, db.RECOMMENDED)
                 elif prop_name == "inherit_from_suggested":
                     self._inherit(name, prop, db.SUGGESTED)
-                elif prop_name == "parent":
-                    warn(
-                        DeprecationWarning(
-                            "The `parent` keyword is deprecated and will be "
-                            "removed in a future version.  Use "
-                            "`inherit_from_{obligatory|recommended|suggested}` "
-                            "instead."
-                        )
-                    )
-                    self._inherit(name, prop, db.OBLIGATORY)
 
                 else:
                     raise ValueError("invalid keyword: {}".format(prop_name))
@@ -718,7 +706,7 @@ class JsonSchemaParser(Parser):
 
         Returns
         -------
-        out : DataModel
+        out : data_model.DataModel
             The created DataModel
         """
         # @author Florian Spreckelsen
@@ -744,7 +732,7 @@ class JsonSchemaParser(Parser):
 
         Returns
         -------
-        our : DataModel
+        our : data_model.DataModel
             The datamodel defined in `model_dict`
         """
         # @review Timm Fitschen 2023-05-25
diff --git a/src/caosadvancedtools/scifolder/analysis_cfood.py b/src/caosadvancedtools/scifolder/analysis_cfood.py
index 27cb871aed08f41531c367567ea36ea9a3faaf69..adce7225649ddf80852588d1cb78d045c04db1d3 100644
--- a/src/caosadvancedtools/scifolder/analysis_cfood.py
+++ b/src/caosadvancedtools/scifolder/analysis_cfood.py
@@ -16,17 +16,14 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-import os
-from itertools import chain
-
 import caosdb as db
-from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_parent,
+from caosadvancedtools.cfood import (AbstractFileCFood,
                                      assure_has_property,
-                                     assure_object_is_in_list, get_entity)
-from caosadvancedtools.read_md_header import get_header
+                                     assure_object_is_in_list,
+                                     )
 
 from .generic_pattern import full_pattern
-from .utils import (get_files_referenced_by_field, parse_responsibles,
+from .utils import (parse_responsibles,
                     reference_records_corresponding_to_files)
 from .withreadme import DATAMODEL as dm
 from .withreadme import (RESULTS, REVISIONOF, SCRIPTS, SOURCES, WithREADME,
@@ -44,6 +41,9 @@ class AnalysisCFood(AbstractFileCFood, WithREADME):
     def __init__(self,  *args, **kwargs):
         super().__init__(*args, **kwargs)
         WithREADME.__init__(self)
+        self.analysis = None
+        self.people = None
+        self.project = None
 
     def collect_information(self):
         self.find_referenced_files([RESULTS, SOURCES, SCRIPTS])
diff --git a/src/caosadvancedtools/scifolder/experiment_cfood.py b/src/caosadvancedtools/scifolder/experiment_cfood.py
index 38606b5f8ffd372d7bf6f507ed96738d9345f16c..83329863433d302e16744a781a3b599fe0bb11f5 100644
--- a/src/caosadvancedtools/scifolder/experiment_cfood.py
+++ b/src/caosadvancedtools/scifolder/experiment_cfood.py
@@ -17,15 +17,15 @@
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
 import caosdb as db
-from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_description,
-                                     assure_has_parent, assure_has_property,
-                                     assure_object_is_in_list, get_entity)
-from caosadvancedtools.read_md_header import get_header
+from caosadvancedtools.cfood import (AbstractFileCFood,
+                                     assure_has_property,
+                                     assure_object_is_in_list,
+                                     )
 
 from .generic_pattern import full_pattern
 from .utils import parse_responsibles, reference_records_corresponding_to_files
 from .withreadme import DATAMODEL as dm
-from .withreadme import RESULTS, REVISIONOF, SCRIPTS, WithREADME, get_glob
+from .withreadme import RESULTS, REVISIONOF, WithREADME, get_glob
 
 
 class ExperimentCFood(AbstractFileCFood, WithREADME):
@@ -42,7 +42,10 @@ class ExperimentCFood(AbstractFileCFood, WithREADME):
         super().__init__(*args, **kwargs)
         WithREADME.__init__(self)
 
-        self.name_map = {},
+        self.name_map = ({}, )
+        self.experiment = None
+        self.people = None
+        self.project = None
 
     @staticmethod
     def get_re():
diff --git a/src/caosadvancedtools/scifolder/publication_cfood.py b/src/caosadvancedtools/scifolder/publication_cfood.py
index fc78e5b759e98e8989c952ccbafeef117e2ed33d..68e345aca1bd650b4da784f4741866683bd4f04a 100644
--- a/src/caosadvancedtools/scifolder/publication_cfood.py
+++ b/src/caosadvancedtools/scifolder/publication_cfood.py
@@ -16,18 +16,14 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-import os
-from itertools import chain
-
 import caosdb as db
 from caosadvancedtools.cfood import (AbstractFileCFood,
                                      assure_object_is_in_list, fileguide,
-                                     get_entity)
+                                     )
 from caosadvancedtools.read_md_header import get_header
-from caosadvancedtools.utils import find_records_that_reference_ids
 
 from .generic_pattern import date_suffix_pattern, readme_pattern
-from .utils import (get_files_referenced_by_field, parse_responsibles,
+from .utils import (parse_responsibles,
                     reference_records_corresponding_to_files)
 from .withreadme import DATAMODEL as dm
 from .withreadme import (RESULTS, REVISIONOF, SCRIPTS, SOURCES, WithREADME,
@@ -37,16 +33,15 @@ from .withreadme import (RESULTS, REVISIONOF, SCRIPTS, SOURCES, WithREADME,
 def folder_to_type(name):
     if name == "Theses":
         return "Thesis"
-    elif name == "Articles":
+    if name == "Articles":
         return "Article"
-    elif name == "Posters":
+    if name == "Posters":
         return "Poster"
-    elif name == "Presentations":
+    if name == "Presentations":
         return "Presentation"
-    elif name == "Reports":
+    if name == "Reports":
         return "Report"
-    else:
-        raise ValueError()
+    raise ValueError()
 
 
 class PublicationCFood(AbstractFileCFood, WithREADME):
@@ -58,6 +53,8 @@ class PublicationCFood(AbstractFileCFood, WithREADME):
     def __init__(self,  *args, **kwargs):
         super().__init__(*args, **kwargs)
         WithREADME.__init__(self)
+        self.people = None
+        self.publication = None
 
     def collect_information(self):
         self.find_referenced_files([RESULTS, SOURCES, SCRIPTS])
diff --git a/src/caosadvancedtools/scifolder/result_table_cfood.py b/src/caosadvancedtools/scifolder/result_table_cfood.py
index deaa2d00118659a9b177a05fe40b19a1793a16fb..e32cd0bd1efe77d5be4583c8bae764a677e33fc4 100644
--- a/src/caosadvancedtools/scifolder/result_table_cfood.py
+++ b/src/caosadvancedtools/scifolder/result_table_cfood.py
@@ -20,17 +20,12 @@ import re
 
 import caosdb as db
 import pandas as pd
-from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_description,
-                                     assure_has_parent, assure_has_property,
-                                     assure_object_is_in_list, get_entity)
-from caosadvancedtools.read_md_header import get_header
+from caosadvancedtools.cfood import (AbstractFileCFood,
+                                     )
 
 from ..cfood import assure_property_is, fileguide
 from .experiment_cfood import ExperimentCFood
 from .generic_pattern import date_pattern, date_suffix_pattern, project_pattern
-from .utils import parse_responsibles, reference_records_corresponding_to_files
-from .withreadme import DATAMODEL as dm
-from .withreadme import RESULTS, REVISIONOF, SCRIPTS, WithREADME, get_glob
 
 
 # TODO similarities with TableCrawler
@@ -49,6 +44,9 @@ class ResultTableCFood(AbstractFileCFood):
     def __init__(self,  *args, **kwargs):
         super().__init__(*args, **kwargs)
         self.table = pd.read_csv(fileguide.access(self.crawled_path))
+        self.recs = []
+        self.experiment = None
+        self.project = None
 
     @staticmethod
     def get_re():
@@ -60,7 +58,7 @@ class ResultTableCFood(AbstractFileCFood):
         self.experiment, self.project = (
             ExperimentCFood.create_identifiable_experiment(self.match))
 
-        for idx, row in self.table.iterrows():
+        for _, row in self.table.iterrows():
             rec = db.Record()
             rec.add_parent(self.match.group("recordtype"))
 
@@ -77,10 +75,11 @@ class ResultTableCFood(AbstractFileCFood):
         self.identifiables.extend([self.project, self.experiment])
 
     def update_identifiables(self):
-        for ii, (idx, row) in enumerate(self.table.iterrows()):
+        for ii, (_, row) in enumerate(self.table.iterrows()):
             for col in row.index:
                 match = re.match(ResultTableCFood.property_name_re, col)
-                assure_property_is(self.recs[ii], match.group("pname"), row.loc[col], to_be_updated=self.to_be_updated)
+                assure_property_is(self.recs[ii], match.group("pname"), row.loc[col],
+                                   to_be_updated=self.to_be_updated)
         assure_property_is(self.experiment, self.match.group("recordtype"),
                            self.recs, to_be_updated=self.to_be_updated,
                            datatype=db.LIST(self.match.group("recordtype")))
diff --git a/src/caosadvancedtools/scifolder/simulation_cfood.py b/src/caosadvancedtools/scifolder/simulation_cfood.py
index c8f23f1485d7a1f64dcd940552051d2e1ec5bb07..f8f3d07e30b81e42591ce0c4698c0f47164f2b90 100644
--- a/src/caosadvancedtools/scifolder/simulation_cfood.py
+++ b/src/caosadvancedtools/scifolder/simulation_cfood.py
@@ -16,17 +16,14 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-import os
-from itertools import chain
-
 import caosdb as db
-from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_parent,
+from caosadvancedtools.cfood import (AbstractFileCFood,
                                      assure_has_property,
-                                     assure_object_is_in_list, get_entity)
-from caosadvancedtools.read_md_header import get_header
+                                     assure_object_is_in_list,
+                                     )
 
 from .generic_pattern import full_pattern
-from .utils import (get_files_referenced_by_field, parse_responsibles,
+from .utils import (parse_responsibles,
                     reference_records_corresponding_to_files)
 from .withreadme import DATAMODEL as dm
 from .withreadme import (RESULTS, REVISIONOF, SCRIPTS, SOURCES, WithREADME,
@@ -42,6 +39,9 @@ class SimulationCFood(AbstractFileCFood, WithREADME):
     def __init__(self,  *args, **kwargs):
         super().__init__(*args, **kwargs)
         WithREADME.__init__(self)
+        self.people = None
+        self.project = None
+        self.simulation = None
 
     def collect_information(self):
         self.find_referenced_files([RESULTS, SOURCES, SCRIPTS])
diff --git a/src/caosadvancedtools/scifolder/software_cfood.py b/src/caosadvancedtools/scifolder/software_cfood.py
index 77fb46521e9aab875b6f99d0a1ee4ac44177e09c..d91817f10a278b91f7c52aa1a0674b1a2daa0394 100644
--- a/src/caosadvancedtools/scifolder/software_cfood.py
+++ b/src/caosadvancedtools/scifolder/software_cfood.py
@@ -17,20 +17,16 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 
-import os
-from itertools import chain
-
 import caosdb as db
-from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_parent,
+from caosadvancedtools.cfood import (AbstractFileCFood,
                                      assure_has_property, assure_name_is,
-                                     assure_object_is_in_list, get_entity)
+                                     assure_object_is_in_list,
+                                     )
 from caosadvancedtools.guard import global_guard as guard
-from caosadvancedtools.read_md_header import get_header
 
 from .generic_pattern import full_pattern
-from .utils import get_files_referenced_by_field, parse_responsibles
+from .utils import parse_responsibles
 from .withreadme import BINARIES
-from .withreadme import DATAMODEL as dm
 from .withreadme import SOURCECODE, WithREADME
 
 
@@ -44,6 +40,9 @@ class SoftwareCFood(AbstractFileCFood, WithREADME):
     def __init__(self,  *args, **kwargs):
         super().__init__(*args, **kwargs)
         WithREADME.__init__(self)
+        self.people = None
+        self.software = None
+        self.softwareversion = None
 
     def collect_information(self):
         self.find_referenced_files([BINARIES, SOURCECODE])
diff --git a/src/caosadvancedtools/scifolder/utils.py b/src/caosadvancedtools/scifolder/utils.py
index 50e897c7d2f19c6269ec622489c5a2c6ce1a28e0..cbf87c4b802c829f34f4368c3605ce05fa42cfb2 100644
--- a/src/caosadvancedtools/scifolder/utils.py
+++ b/src/caosadvancedtools/scifolder/utils.py
@@ -26,7 +26,6 @@ import pandas as pd
 from caosadvancedtools.cfood import assure_object_is_in_list, fileguide
 from caosadvancedtools.utils import (find_records_that_reference_ids,
                                      read_field_as_list,
-                                     return_field_or_property,
                                      string_to_person)
 
 logger = logging.getLogger("caosadvancedtools")
diff --git a/src/caosadvancedtools/scifolder/withreadme.py b/src/caosadvancedtools/scifolder/withreadme.py
index e1968ba49799827467c7ef93a7070b7f090010fb..d8388e1d28bd23e1804c2f747a47f4ea97d265b0 100644
--- a/src/caosadvancedtools/scifolder/withreadme.py
+++ b/src/caosadvancedtools/scifolder/withreadme.py
@@ -26,8 +26,7 @@ import caosdb as db
 from caosadvancedtools.cfood import (assure_has_description, assure_has_parent,
                                      assure_object_is_in_list, fileguide)
 from caosadvancedtools.read_md_header import get_header as get_md_header
-from caosadvancedtools.table_importer import (win_path_converter,
-                                              win_path_list_converter)
+from caosadvancedtools.table_importer import (win_path_converter)
 from caosadvancedtools.utils import return_field_or_property
 
 from .utils import (get_entity_ids_from_include_file,
@@ -236,7 +235,7 @@ class WithREADME(object):
                     generic_references,
                     record,
                     prop_name,
-                    to_be_updated=self.to_be_updated,
+                    to_be_updated=self.to_be_updated,  # pylint: disable=no-member
                     datatype=db.LIST(db.REFERENCE),
                 )
 
diff --git a/src/caosadvancedtools/serverside/generic_analysis.py b/src/caosadvancedtools/serverside/generic_analysis.py
index 85d0c860df75fce205c5eaad77731fc04eee9e40..7c7b26cc3ddb19ff54710f04debe3c0a48f35b82 100644
--- a/src/caosadvancedtools/serverside/generic_analysis.py
+++ b/src/caosadvancedtools/serverside/generic_analysis.py
@@ -38,7 +38,7 @@ Das aufgerufene Skript kann beliebige Eigenschaften benutzen und erstellen.
 ABER wenn die Standardeigenschaften (InputDataSet, etc) verwendet werden, kann
 der Record leicht erzeugt werden.
 
-
+.. code-block::
 
       "Analyze"       "Perform Anlysis"
    Knopf an Record     Form im WebUI
diff --git a/src/caosadvancedtools/serverside/helper.py b/src/caosadvancedtools/serverside/helper.py
index ba75739e0fdc0a83f235db6920471afb196f4246..b7289c7ffd1d67ebd9862aa5f92cd41ccc62d706 100644
--- a/src/caosadvancedtools/serverside/helper.py
+++ b/src/caosadvancedtools/serverside/helper.py
@@ -223,6 +223,7 @@ def init_data_model(entities):
                         "be a {}.".format(e.role, local_role))
                 raise DataModelError(e.name, info)
     except db.exceptions.EntityDoesNotExistError:
+        # pylint: disable=raise-missing-from
         raise DataModelError(e.name, "This entity does not exist.")
 
     return True
@@ -246,7 +247,7 @@ def get_data(filename, default=None):
         Data from the given file.
     """
     result = default.copy() if default is not None else {}
-    with open(filename, 'r') as fi:
+    with open(filename, mode="r", encoding="utf-8") as fi:
         data = json.load(fi)
     result.update(data)
 
diff --git a/src/caosadvancedtools/suppressKnown.py b/src/caosadvancedtools/suppressKnown.py
index c4b57039c5184f2443e4dbb91cf11f5e59ae6790..1b31de7e9d8f1fdce35a135d558dd5ceea3bca2a 100644
--- a/src/caosadvancedtools/suppressKnown.py
+++ b/src/caosadvancedtools/suppressKnown.py
@@ -16,8 +16,11 @@ class SuppressKnown(logging.Filter):
     added to the appropriate Logger and logging calls (e.g. to warning, info
     etc.) need to have an additional `extra` argument.
     This argument should be a dict that contains an identifier and a category.
-    Example: `extra={"identifier":"<Record>something</Record>",
-                     category="entities"}
+
+    Example::
+
+      extra={"identifier":"<Record>something</Record>", category="entities"}
+
     The identifier is used to check whether a message was shown before and
     should be a string. The category can be used to remove a specific group of
     messages from memory and the logger would show those messages again even
@@ -74,9 +77,7 @@ class SuppressKnown(logging.Filter):
         return sha256((txt+str(identifier)).encode("utf-8")).hexdigest()
 
     def filter(self, record):
-        """
-        Return whether the record shall be logged.
-
+        """Return whether the record shall be logged.
 
         If either identifier of category is missing 1 is returned (logging
         enabled). If the record has both attributes, it is checked whether the
diff --git a/src/caosadvancedtools/table_converter.py b/src/caosadvancedtools/table_converter.py
index 4b8591ed009ee8e63b328ad43e0d458b3e805ce7..7d1097e13a0780a7656a18c71bf7f408df5c69c5 100644
--- a/src/caosadvancedtools/table_converter.py
+++ b/src/caosadvancedtools/table_converter.py
@@ -25,6 +25,7 @@ import re
 import sys
 
 import caosdb as db
+import numpy as np
 import pandas as pd
 
 
@@ -48,27 +49,25 @@ def generate_property_name(prop):
 
 
 def to_table(container):
-    """ creates a table from the records in a container """
+    """Create a table from the records in a container."""
 
     if len(container) == 0:
-        raise ValueError("Container is empty")
-    properties = set()
+        return pd.DataFrame()
+    rts = {p.name for p in container[0].parents}
 
+    data = []
     for rec in container:
-        properties.update([generate_property_name(p)
-                           for p in container[0].get_properties()])
-    df = pd.DataFrame(columns=list(properties))
-    rts = set([p.name for p in container[0].parents])
-
-    for ii, rec in enumerate(container):
-        if set([p.name for p in rec.parents]) != rts:
+        if {p.name for p in rec.parents} != rts:
             raise ValueError("Parents differ")
 
-        for p in rec.get_properties():
-
-            df.loc[ii, generate_property_name(p)] = p.value
+        row_dict = {}
+        for prop in rec.get_properties():
+            propname = generate_property_name(prop)
+            row_dict[propname] = prop.value
+        data.append(row_dict)
+    result = pd.DataFrame(data=data)
 
-    return df
+    return result
 
 
 def from_table(spreadsheet, recordtype):
diff --git a/src/caosadvancedtools/table_json_conversion/__init__.py b/src/caosadvancedtools/table_json_conversion/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/caosadvancedtools/table_json_conversion/fill_xlsx.py b/src/caosadvancedtools/table_json_conversion/fill_xlsx.py
new file mode 100644
index 0000000000000000000000000000000000000000..60b5c96c7de141b1ecb12254e6928252fe4a9f5c
--- /dev/null
+++ b/src/caosadvancedtools/table_json_conversion/fill_xlsx.py
@@ -0,0 +1,398 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+from __future__ import annotations
+
+import json
+import pathlib
+from collections import OrderedDict
+from types import SimpleNamespace
+from typing import Any, Dict, List, Optional, TextIO, Union
+from warnings import warn
+
+from jsonschema import FormatChecker, validate
+from jsonschema.exceptions import ValidationError
+from openpyxl import Workbook, load_workbook
+from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
+from openpyxl.worksheet.worksheet import Worksheet
+
+from .table_generator import ColumnType, RowType
+from .utils import p2s
+
+
+def _is_exploded_sheet(sheet: Worksheet) -> bool:
+    """Return True if this is a an "exploded" sheet.
+
+    An exploded sheet is a sheet whose data entries are LIST valued properties of entries in another
+    sheet.  A sheet is detected as exploded iff it has FOREIGN columns.
+    """
+    column_types = _get_column_types(sheet)
+    return ColumnType.FOREIGN.name in column_types.values()
+
+
+def _get_column_types(sheet: Worksheet) -> OrderedDict:
+    """Return an OrderedDict: column index -> column type for the sheet.
+    """
+    result = OrderedDict()
+    type_row_index = _get_row_type_column_index(sheet)
+    for idx, col in enumerate(sheet.columns):
+        type_cell = col[type_row_index]
+        result[idx] = type_cell.value if type_cell.value is not None else ColumnType.IGNORE.name
+        assert (hasattr(ColumnType, result[idx])
+                or result[idx] == RowType.COL_TYPE.name), (
+            f"Unexpected column type value ({idx}{type_row_index}): {type_cell.value}")
+    return result
+
+
+def _get_foreign_key_columns(sheet: Worksheet) -> Dict[str, SimpleNamespace]:
+    """Return the foreign keys of the worksheet.
+
+Returns
+-------
+out: dict[str, SimpleNamespace]
+  The keys are the stringified paths.  The values are SimpleNamespace objects with ``index``,
+  ``path`` and ``column`` attributes.
+    """
+    column_types = _get_column_types(sheet)
+    path_rows = _get_path_rows(sheet)
+    result = OrderedDict()
+    for for_idx, name in column_types.items():
+        if name != ColumnType.FOREIGN.name:
+            continue
+        path = []
+        for row in path_rows:
+            component = sheet.cell(row=row+1, column=for_idx+1).value
+            if component is None:
+                break
+            assert isinstance(component, str), f"Expected string: {component}"
+            path.append(component)
+        result[p2s(path)] = SimpleNamespace(index=for_idx, path=path,
+                                            column=list(sheet.columns)[for_idx])
+    return result
+
+
+def _get_row_type_column_index(sheet: Worksheet):
+    """Return the column index (0-indexed) of the column which defines the row types.
+    """
+    for col in sheet.columns:
+        for cell in col:
+            if cell.value == RowType.COL_TYPE.name:
+                return cell.column - 1
+    raise ValueError("The column which defines row types (COL_TYPE, PATH, ...) is missing")
+
+
+def _get_path_rows(sheet: Worksheet):
+    """Return the 0-based indices of the rows which represent paths."""
+    rows = []
+    rt_col = _get_row_type_column_index(sheet)
+    for cell in list(sheet.columns)[rt_col]:
+        if cell.value == RowType.PATH.name:
+            rows.append(cell.row-1)
+    return rows
+
+
+def _next_row_index(sheet: Worksheet) -> int:
+    """Return the index for the next data row.
+
+    This is defined as the first row without any content.
+    """
+    return sheet.max_row
+
+
+def _read_or_dict(data: Union[dict, str, TextIO]) -> dict:
+    """If data is a json file name or input stream, read data from there."""
+    if isinstance(data, dict):
+        pass
+    elif isinstance(data, str):
+        with open(data, encoding="utf-8") as infile:
+            data = json.load(infile)
+    elif hasattr(data, "read"):
+        data = json.load(data)
+    else:
+        raise ValueError(f"I don't know how to handle the datatype of `data`: {type(data)}")
+    assert isinstance(data, dict)
+    return data
+
+
+class TemplateFiller:
+    """Class to fill XLSX templates.  Has an index for all relevant columns."""
+
+    def __init__(self, workbook: Workbook, graceful: bool = False):
+        self._workbook = workbook
+        self._graceful = graceful
+        self._create_index()
+
+    @property
+    def workbook(self):
+        return self._workbook
+
+    def fill_data(self, data: dict):
+        """Fill the data into the workbook."""
+        self._handle_data(data=data)
+
+    class Context:
+        """Context for an entry: simple properties of all ancestors, organized in a dict.
+
+        This is similar to a dictionary with all scalar element properties at the tree nodes up to
+        the root.  Siblings in lists and dicts are ignored.  Additionally the context knows where
+        its current position is.
+
+        Lookup of elements can easily be achieved by giving the path (as ``list[str]`` or
+        stringified path).
+
+        """
+
+        def __init__(self, current_path: List[str] = None, props: Dict[str, Any] = None):
+            self._current_path = current_path if current_path is not None else []
+            self._props = props if props is not None else {}  # this is flat
+
+        def copy(self) -> TemplateFiller.Context:
+            """Deep copy."""
+            result = TemplateFiller.Context(current_path=self._current_path.copy(),
+                                            props=self._props.copy())
+            return result
+
+        def next_level(self, next_level: str) -> TemplateFiller.Context:
+            result = self.copy()
+            result._current_path.append(next_level)  # pylint: disable=protected-access
+            return result
+
+        def __getitem__(self, path: Union[List[str], str], owner=None) -> Any:
+            if isinstance(path, list):
+                path = p2s(path)
+            return self._props[path]
+
+        def __setitem__(self, propname: str, value):
+            fullpath = p2s(self._current_path + [propname])
+            self._props[fullpath] = value
+
+        def fill_from_data(self, data: Dict[str, Any]):
+            # TODO recursive for dicts and list?
+            """Fill current level with all scalar elements of ``data``."""
+            for name, value in data.items():
+                if not isinstance(value, (dict, list)):
+                    self[name] = value
+                elif isinstance(value, dict):
+                    if not value or isinstance(list(value.items())[0], list):
+                        continue
+                    old_path = self._current_path
+                    new_path = self._current_path.copy() + [name]
+                    self._current_path = new_path
+                    self.fill_from_data(data=value)
+                    self._current_path = old_path
+
+    def _create_index(self):
+        """Create a sheet index for the workbook.
+
+        Index the sheets by all path arrays leading to them.  Also create a simple column index by
+        column type and path.
+
+        """
+        self._sheet_index = {}
+        for sheetname in self._workbook.sheetnames:
+            sheet = self._workbook[sheetname]
+            type_column = [x.value for x in list(sheet.columns)[
+                _get_row_type_column_index(sheet)]]
+            # 0-indexed, as everything outside of sheet.cell(...):
+            coltype_idx = type_column.index(RowType.COL_TYPE.name)
+            path_indices = [i for i, typ in enumerate(type_column) if typ == RowType.PATH.name]
+
+            # Get the paths, use without the leaf component for sheet indexing, with type prefix and
+            # leaf for column indexing.
+            for col_idx, col in enumerate(sheet.columns):
+                if col[coltype_idx].value == RowType.COL_TYPE.name:
+                    continue
+                path = []
+                for path_idx in path_indices:
+                    if col[path_idx].value is not None:
+                        path.append(col[path_idx].value)
+                # col_key = p2s([col[coltype_idx].value] + path)
+                # col_index[col_key] = SimpleNamespace(column=col, col_index=col_idx)
+                if col[coltype_idx].value not in [ColumnType.SCALAR.name, ColumnType.LIST.name]:
+                    continue
+
+                path_str = p2s(path)
+                assert path_str not in self._sheet_index
+                self._sheet_index[path_str] = SimpleNamespace(
+                    sheetname=sheetname, sheet=sheet, col_index=col_idx,
+                    col_type=col[coltype_idx].value)
+
+    def _handle_data(self, data: dict, current_path: List[str] = None,
+                     context: TemplateFiller.Context = None,
+                     only_collect_insertables: bool = False,
+                     ) -> Optional[Dict[str, Any]]:
+        """Handle the data and write it into ``workbook``.
+
+Parameters
+----------
+data: dict
+  The data at the current path position.  Elements may be dicts, lists or simple scalar values.
+
+current_path: list[str], optional
+  If this is None or empty, we are at the top level.  This means that all children shall be entered
+  into their respective sheets and not into a sheet at this level.  ``current_path`` and ``context``
+  must either both be given, or none of them.
+
+context: TemplateFiller.Context, optional
+  Directopry of scalar element properties at the tree nodes up to the root.  Siblings in lists
+  and dicts are ignored.  ``context`` and ``current_path`` must either both be given, or none of
+  them.
+
+only_collect_insertables: bool, optional
+  If True, do not insert anything on this level, but return a dict with entries to be inserted.
+
+
+Returns
+-------
+out: union[dict, None]
+  If ``only_collect_insertables`` is True, return a dict (path string -> value)
+        """
+        assert (current_path is None) is (context is None), (
+            "`current_path` and `context` must either both be given, or none of them.")
+        if current_path is None:
+            current_path = []
+        if context is None:
+            context = TemplateFiller.Context()
+        context.fill_from_data(data)
+
+        insertables: Dict[str, Any] = {}
+        for name, content in data.items():
+            # TODO is this the best way to do it????
+            if name == "file":
+                continue
+            path = current_path + [name]
+            next_context = context.next_level(name)
+            # preprocessing
+            if isinstance(content, list):
+                if not content:  # empty list
+                    continue
+                # List elements must be all of the same type.
+                assert len(set(type(entry) for entry in content)) == 1
+
+                if isinstance(content[0], dict):  # all elements are dicts
+                    # An array of objects: must go into exploded sheet
+                    for entry in content:
+                        self._handle_data(data=entry, current_path=path, context=next_context)
+                    continue
+            elif isinstance(content, dict):  # we recurse and simply use the result
+                if not current_path:  # Special handling for top level
+                    self._handle_data(content, current_path=path, context=next_context)
+                    continue
+                insert = self._handle_data(content, current_path=path, context=next_context.copy(),
+                                           only_collect_insertables=True)
+                assert isinstance(insert, dict)
+                assert not any(key in insertables for key in insert)
+                insertables.update(insert)
+                continue
+            else:  # scalars
+                content = [content]  # make list for unified treatment below
+
+            # collecting the data
+            assert isinstance(content, list)
+            if len(content) > 1:
+                content = [ILLEGAL_CHARACTERS_RE.sub("", str(x)) for x in content]
+                value = ";".join(content)  # TODO we need escaping of values
+            else:
+                value = content[0]
+                if isinstance(value, str):
+                    value = ILLEGAL_CHARACTERS_RE.sub("", value)
+            path_str = p2s(path)
+            assert path_str not in insertables
+            insertables[path_str] = value
+        if only_collect_insertables:
+            return insertables
+        if not current_path:  # Top level returns, because there are only sheets for the children.
+            return None
+
+        # actual data insertion
+        insert_row = None
+        sheet = None
+        for path_str, value in insertables.items():
+            if self._graceful and path_str not in self._sheet_index:
+                warn(f"Ignoring path with missing sheet index: {path_str}")
+                continue
+            sheet_meta = self._sheet_index[path_str]
+            if sheet is None:
+                sheet = sheet_meta.sheet
+            assert sheet is sheet_meta.sheet, "All entries must be in the same sheet."
+            col_index = sheet_meta.col_index
+            if insert_row is None:
+                insert_row = _next_row_index(sheet)
+
+            sheet.cell(row=insert_row+1, column=col_index+1, value=value)
+
+        # Insert foreign keys
+        if insert_row is not None and sheet is not None and _is_exploded_sheet(sheet):
+            try:
+                foreigns = _get_foreign_key_columns(sheet)
+            except ValueError:
+                print(f"Sheet: {sheet}")
+                raise
+            for index, path in ((f.index, f.path) for f in foreigns.values()):
+                value = context[path]
+                sheet.cell(row=insert_row+1, column=index+1, value=value)
+
+        return None
+
+
+def fill_template(data: Union[dict, str, TextIO], template: str, result: str,
+                  validation_schema: Union[dict, str, TextIO] = None) -> None:
+    """Insert json data into an xlsx file, according to a template.
+
+This function fills the json data into the template stored at ``template`` and stores the result as
+``result``.
+
+Parameters
+----------
+data: Union[dict, str, TextIO]
+  The data, given as Python dict, path to a file or a file-like object.
+template: str
+  Path to the XLSX template.
+result: str
+  Path for the result XLSX.
+validation_schema: dict, optional
+  If given, validate the date against this schema first.  This raises an exception if the validation
+  fails.  If no validation schema is given, try to ignore more errors in the data when filling the
+  XLSX template.
+"""
+    data = _read_or_dict(data)
+    assert isinstance(data, dict)
+
+    # Validation
+    if validation_schema is not None:
+        validation_schema = _read_or_dict(validation_schema)
+        try:
+            validate(data, validation_schema, format_checker=FormatChecker())
+        except ValidationError as ve:
+            print(ve.message)
+            raise ve
+    else:
+        print("No validation schema given, continue at your own risk.")
+
+    # Filling the data
+    result_wb = load_workbook(template)
+    template_filler = TemplateFiller(result_wb, graceful=(validation_schema is None))
+    template_filler.fill_data(data=data)
+
+    parentpath = pathlib.Path(result).parent
+    parentpath.mkdir(parents=True, exist_ok=True)
+    result_wb.save(result)
diff --git a/src/caosadvancedtools/table_json_conversion/table_generator.py b/src/caosadvancedtools/table_json_conversion/table_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..8ca026a8758361b658e98cabbcff42b849bb07fe
--- /dev/null
+++ b/src/caosadvancedtools/table_json_conversion/table_generator.py
@@ -0,0 +1,379 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""
+This module allows to generate template tables from JSON schemas.
+"""
+
+import pathlib
+import re
+from abc import ABC, abstractmethod
+from enum import Enum
+from typing import Dict, List, Optional, Tuple
+
+from openpyxl import Workbook
+from openpyxl.styles import PatternFill
+from openpyxl.workbook.child import INVALID_TITLE_REGEX
+
+from .utils import p2s
+
+
+class ColumnType(Enum):
+    """ column types enum """
+    SCALAR = 1
+    LIST = 2
+    FOREIGN = 3
+    IGNORE = 4
+
+
+class RowType(Enum):
+    """ row types enum """
+    COL_TYPE = 1
+    PATH = 2
+    IGNORE = 3
+
+
+class TableTemplateGenerator(ABC):
+    """ base class for generating tables from json schema """
+
+    def __init__(self):
+        pass
+
+    @abstractmethod
+    def generate(self, schema: dict, foreign_keys: dict, filepath: str):
+        """Generate a sheet definition from a given JSON schema.
+
+        Parameters:
+        -----------
+        schema: dict
+            Given JSON schema.
+
+        foreign_keys: dict
+            A tree-like configuration (nested dict) that defines which attributes shall be used to
+            create additional columns when a list of references exists. The nested dict is
+            structured like the data model, its innermost elements are leaves of the path trees
+            within the JSON, they define the required keys.
+
+            | Suppose we want to distinguish Persons that are referenced by Trainings, then
+              ``foreign_keys`` must at least contain the following:
+            | ``{"Training": {"Person": ["name", "email"]}}``.
+
+            Values within the dicts can be either a list representing the keys (as in the example
+            above) or a dict that allows to set additional foreign keys at higher depths.  In the
+            latter case (dict instead of list) if foreign keys exist at that level (e.g. in the
+            above example there might be further levels below "Person"), then the foreign keys can
+            be set using the special ``__this__`` key.
+
+            Example: ``{"Training": {"__this__": ["date"], "Person": ["name", "email"]}}``
+            Here, ``date`` is the sole foreign key for Training.
+        """
+
+    def _generate_sheets_from_schema(self, schema: dict, foreign_keys: Optional[dict] = None
+                                     ) -> Dict[str, Dict[str,
+                                                         Tuple[ColumnType, Optional[str], list]]]:
+        """Generate a sheet definition from a given JSON schema.
+
+        Parameters
+        ----------
+        schema: dict
+            given JSON schema
+        foreign_keys: dict, optional
+            a configuration that defines which attributes shall be used to create
+            additional columns when a list of references exists. See ``foreign_keys``
+            argument of TableTemplateGenerator.generate.
+
+        Returns
+        -------
+        sheets: dict
+            A two-level dict which describes columns of template sheets.
+
+            | The structure of this two-level dict is as follows:
+            | ``sheets[sheetname][colname]= (<col_type>, <description>, [<path>, ...])``
+
+            I.e. the outer dict contains sheet names as keys, the inner dict has column names as
+            keys and tuples as values. These tuples consist of:
+            - the column type
+            - the description of the corresponding property
+            - a list representing the path.
+
+        """
+        if not ("type" in schema or "anyOf" in schema):
+            raise ValueError("Inappropriate JSON schema: The following object must contain the "
+                             f"'type' or 'anyOf' key:\n{schema}\n")
+        if "properties" not in schema:
+            raise ValueError("Inappropriate JSON schema: The following object must contain "
+                             f"the 'properties' key:\n{schema}\n")
+        if "type" in schema:
+            assert schema["type"] == "object"
+        if foreign_keys is None:
+            foreign_keys = {}
+        # here, we treat the top level
+        # sheets[sheetname][colname]= (COL_TYPE, description, [path])
+        sheets: Dict[str, Dict[str, Tuple[ColumnType, Optional[str], list]]] = {}
+        for rt_name, rt_def in schema["properties"].items():
+            sheets[rt_name] = self._treat_schema_element(schema=rt_def, sheets=sheets,
+                                                         path=[rt_name], foreign_keys=foreign_keys)
+        return sheets
+
+    def _get_foreign_keys(self, keys: dict, path: list) -> list:
+        """Return the foreign keys that are needed at the location to which path points."""
+        msg = f"A foreign key definition is missing for path:\n{path}\nKeys are:\n{keys}"
+        while path:
+            if keys is None or path[0] not in keys:
+                raise ValueError(msg)
+            keys = keys[path[0]]
+            path = path[1:]
+        if isinstance(keys, dict) and "__this__" in keys:
+            return keys["__this__"]
+        if isinstance(keys, list):
+            return keys
+        raise ValueError(msg)
+
+    def _treat_schema_element(self, schema: dict, sheets: dict, path: List[str],
+                              foreign_keys: Optional[dict] = None, level_in_sheet_name: int = 1,
+                              array_paths: Optional[list] = None
+                              ) -> Dict[str, Tuple[ColumnType, Optional[str], list]]:
+        """Recursively transform elements from the schema into column definitions.
+
+        ``sheets`` is modified in place.
+
+        Parameters
+        ----------
+        schema: dict
+            Part of the json schema; it must be the level that contains the type definition
+            (e.g. 'type' or 'oneOf' key)
+        sheets: dict
+            All the sheets, indexed by their name.  This is typically modified in place by this
+            method.
+        path: list[str]
+            The relevant (sub) path for this schema part?
+        array_paths: list
+            A list of path along the way to the current object, where the json contains arrays.
+
+        Returns
+        -------
+        columns: dict
+            Describing the columns; see doc string of `_generate_sheets_from_schema`_
+        """
+        if not ("type" in schema or "enum" in schema or "oneOf" in schema or "anyOf" in schema):
+            raise ValueError("Inappropriate JSON schema: The following schema part must contain "
+                             f"'type', 'enum', 'oneOf' or 'anyOf':\n{schema}\n")
+
+        if array_paths is None:
+            # if this is not set, we are at top level and the top level element may always be an
+            # array
+            array_paths = [path]
+        if foreign_keys is None:
+            foreign_keys = {}
+
+        ctype = ColumnType.SCALAR
+
+        # if it is an array, value defs are in 'items'
+        if schema.get('type') == 'array':
+            if (schema['items'].get('type') == 'object'
+                    and len(path) > 1):  # list of references; special treatment
+                # we add a new sheet with columns generated from the subtree of the schema
+                sheetname = p2s(path)
+                if sheetname in sheets:
+                    raise ValueError("The schema would lead to two sheets with the same name, "
+                                     f"which is forbidden: {sheetname}")
+                col_def = self._treat_schema_element(
+                    schema=schema['items'], sheets=sheets, path=path, foreign_keys=foreign_keys,
+                    level_in_sheet_name=len(path),
+                    array_paths=array_paths+[path]  # since this level is an array extend the list
+                )
+                if col_def:
+                    sheets[sheetname] = col_def
+                    # and add the foreign keys that are necessary up to this point
+                    for array_path in array_paths:
+                        foreigns = self._get_foreign_keys(foreign_keys, array_path)
+                        if isinstance(foreigns, str):
+                            raise ValueError("Foreign keys must be a list of strings, but a single "
+                                             "string was given:\n"
+                                             f"{array_path} -> {foreigns}")
+                        for foreign in foreigns:
+                            internal_key = p2s(array_path + [foreign])
+                            if internal_key in sheets[sheetname]:
+                                raise ValueError("The schema would lead to two columns with the same "
+                                                 "name, which is forbidden:\n"
+                                                 f"{foreign} -> {internal_key}")
+                            ref_sheet = p2s(array_path)
+                            sheets[sheetname][internal_key] = (
+                                ColumnType.FOREIGN, f"see sheet '{ref_sheet}'", array_path + [foreign])
+                # Columns are added to the new sheet, thus we do not return any columns for the
+                # current sheet.
+                return {}
+
+            # it is a list of primitive types -> semicolon separated list
+            schema = schema['items']
+            ctype = ColumnType.LIST
+
+        # This should only be the case for "new or existing reference".
+        for el in schema.get('oneOf', []):
+            if 'type' in el:
+                schema = el
+                break
+
+        if "properties" in schema:  # recurse for each property, then return
+            cols = {}
+            for pname in schema["properties"]:
+                col_defs = self._treat_schema_element(
+                    schema["properties"][pname], sheets, path+[pname], foreign_keys,
+                    level_in_sheet_name, array_paths=array_paths)
+                for k in col_defs:
+                    if k in cols:
+                        raise ValueError(f"The schema would lead to two columns with the same "
+                                         f"name which is forbidden: {k}")
+                cols.update(col_defs)
+            return cols
+
+        # The schema is a leaf.
+        description = schema['description'] if 'description' in schema else None
+        # definition of a single column
+        default_return = {p2s(path[level_in_sheet_name:]): (ctype, description, path)}
+        if 'type' not in schema and 'enum' in schema:
+            return default_return
+        if 'type' not in schema and 'anyOf' in schema:
+            for d in schema['anyOf']:
+                # currently the only case where this occurs is date formats
+                assert d['type'] == 'string'
+                assert d['format'] == 'date' or d['format'] == 'date-time'
+            return default_return
+        if schema["type"] in ['string', 'number', 'integer', 'boolean']:
+            if 'format' in schema and schema['format'] == 'data-url':
+                return {}  # file; ignore for now
+            return default_return
+        raise ValueError("Inappropriate JSON schema: The following part should define an"
+                         f" object with properties or a primitive type:\n{schema}\n")
+
+
+class XLSXTemplateGenerator(TableTemplateGenerator):
+    """Class for generating XLSX tables from json schema definitions."""
+
+    # def __init__(self):
+    #     super().__init__()
+
+    def generate(self, schema: dict, foreign_keys: dict, filepath: str) -> None:
+        """Generate a sheet definition from a given JSON schema.
+
+        Parameters:
+        -----------
+        schema: dict
+            Given JSON schema
+        foreign_keys: dict
+            A configuration that defines which attributes shall be used to create
+            additional columns when a list of references exists. See ``foreign_keys``
+            argument of :meth:`TableTemplateGenerator.generate` .
+        filepath: str
+            The XLSX file will be stored under this path.
+        """
+        sheets = self._generate_sheets_from_schema(schema, foreign_keys)
+        wb = self._create_workbook_from_sheets_def(sheets)
+        parentpath = pathlib.Path(filepath).parent
+        parentpath.mkdir(parents=True, exist_ok=True)
+        wb.save(filepath)
+
+    @staticmethod
+    def _get_max_path_length(sheetdef: dict) -> int:
+        """ returns the length of the longest path contained in the sheet definition
+
+        see TableTemplateGenerator._generate_sheets_from_schema for the structure of the sheets
+        definition dict
+        You need to pass the dict of a single sheet to this function.
+        """
+        return max([len(path) for _, _, path in sheetdef.values()])
+
+    @staticmethod
+    def _get_ordered_cols(sheetdef: dict) -> list:
+        """
+        creates a list with tuples (colname, column type, path) where the foreign keys are first
+        """
+        ordered_cols = []
+        # first foreign cols
+        for colname, (ct, desc, path) in sheetdef.items():
+            if ct == ColumnType.FOREIGN:
+                ordered_cols.append((colname, ct, desc, path))
+        # now the other
+        for colname, (ct, desc, path) in sheetdef.items():
+            if ct != ColumnType.FOREIGN:
+                ordered_cols.append((colname, ct, desc, path))
+
+        return ordered_cols
+
+    def _create_workbook_from_sheets_def(
+            self, sheets: Dict[str, Dict[str, Tuple[ColumnType, Optional[str], list]]]):
+        """Create and return a nice workbook for the given sheets."""
+        wb = Workbook()
+        yellowfill = PatternFill(fill_type="solid", fgColor='00FFFFAA')
+        # remove initial sheet
+        assert wb.sheetnames == ["Sheet"]
+        del wb['Sheet']
+
+        for sheetname, sheetdef in sheets.items():
+            if not sheetdef:
+                continue
+            ws = wb.create_sheet(re.sub(INVALID_TITLE_REGEX, '_', sheetname))
+            # First row will by the COL_TYPE row.
+            # First column will be the indicator row with values COL_TYPE, PATH, IGNORE.
+            # The COL_TYPE row will be followed by as many PATH rows as needed.
+
+            max_path_length = self._get_max_path_length(sheetdef)
+            header_index = 2 + max_path_length
+            description_index = 3 + max_path_length
+
+            # create first column
+            ws.cell(1, 1, RowType.COL_TYPE.name)
+            for index in range(max_path_length):
+                ws.cell(2 + index, 1, RowType.PATH.name)
+            ws.cell(header_index, 1, RowType.IGNORE.name)
+            ws.cell(description_index, 1, RowType.IGNORE.name)
+
+            ordered_cols = self._get_ordered_cols(sheetdef)
+
+            # create other columns
+            for index, (colname, ct, desc, path) in enumerate(ordered_cols):
+                ws.cell(1, 2 + index, ct.name)
+                for path_index, el in enumerate(path):
+                    ws.cell(2 + path_index, 2 + index, el)
+                ws.cell(header_index, 2 + index, colname)
+                if ct == ColumnType.FOREIGN:
+                    # Visual highlighting
+                    ws.cell(header_index, 2 + index).fill = yellowfill
+                if desc:
+                    ws.cell(description_index, 2 + index, desc)
+
+            # hide special rows
+            for index, row in enumerate(ws.rows):
+                if not (row[0].value is None or row[0].value == RowType.IGNORE.name):
+                    ws.row_dimensions[index+1].hidden = True
+
+            # hide special column
+            ws.column_dimensions['A'].hidden = True
+
+        # order sheets
+        # for index, sheetname in enumerate(sorted(wb.sheetnames)):
+        # wb.move_sheet(sheetname, index-wb.index(wb[sheetname]))
+        # reverse sheets
+        for index, sheetname in enumerate(wb.sheetnames[::-1]):
+            wb.move_sheet(sheetname, index-wb.index(wb[sheetname]))
+
+        return wb
diff --git a/src/caosadvancedtools/table_json_conversion/utils.py b/src/caosadvancedtools/table_json_conversion/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..15ae488d7cb8e142afba58424b49e8fc3a15e0d6
--- /dev/null
+++ b/src/caosadvancedtools/table_json_conversion/utils.py
@@ -0,0 +1,25 @@
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+from typing import List
+
+
+def p2s(path: List[str]):
+    """Path to string: dot-separated.
+    """
+    return ".".join(path)
diff --git a/src/caosadvancedtools/utils.py b/src/caosadvancedtools/utils.py
index 4d6a4b36bcb5dbdcd5bfe9357c53d4e9aa3501ca..05000a34fd27162837ecfe316b20af42b1156c45 100644
--- a/src/caosadvancedtools/utils.py
+++ b/src/caosadvancedtools/utils.py
@@ -113,17 +113,22 @@ def read_field_as_list(field):
         return [field]
 
 
-def get_referenced_files(glob, prefix=None, filename=None, location=None):
+def get_referenced_files(glob: str, prefix: str = None, filename: str = None, location: str = None):
     """
     queries the database for files referenced by the provided glob
 
-    Parameters:
-    glob: the glob referencing the file(s)
-    prefix: the glob can be relative to some path, in that case that path needs
-            to be given as prefix
-    filename: the file in which the glob is given (used for error messages)
-    location: the location in the file in which the glob is given (used for
-              error messages)
+    Parameters
+    ----------
+    glob: str
+      the glob referencing the file(s)
+    prefix: str, optional
+      the glob can be relative to some path, in that case that path needs
+      to be given as prefix
+    filename: str, optional
+      the file in which the glob is given (used for error messages)
+    location: str, optional
+      the location in the file in which the glob is given (used for
+      error messages)
     """
 
     orig_glob = glob
@@ -161,16 +166,19 @@ def get_referenced_files(glob, prefix=None, filename=None, location=None):
     return files
 
 
-def check_win_path(path, filename=None):
+def check_win_path(path: str, filename: str = None):
     """
     check whether '/' are in the path but no '\'.
 
     If that is the case, it is likely, that the path is not a Windows path.
 
-    Parameters:
-    path: path to be checked
-    filename: if the path is located in a file, this parameter can be used to
-              direct the user to the file where the path is located.
+    Parameters
+    ----------
+    path: str
+      Path to be checked.
+    filename: str
+      If the path is located in a file, this parameter can be used to
+      direct the user to the file where the path is located.
     """
 
     if r"\\" not in path and "/" in path:
diff --git a/src/doc/conf.py b/src/doc/conf.py
index 60630cf6c2c00aae2c9a48a328613e247e0a4015..b94d3060eac8f165f62b6d6fc0417c3c52d4c2a5 100644
--- a/src/doc/conf.py
+++ b/src/doc/conf.py
@@ -27,9 +27,9 @@ copyright = '2023, IndiScale GmbH'
 author = 'Daniel Hornung'
 
 # The short X.Y version
-version = '0.9.0'
+version = '0.10.0'
 # The full version, including alpha/beta/rc tags
-release = '0.9.0'
+release = '0.10.0'
 
 
 # -- General configuration ---------------------------------------------------
@@ -64,7 +64,7 @@ master_doc = 'index'
 #
 # This is also used if you do content translation via gettext catalogs.
 # Usually you set "language" from the command line for these cases.
-language = None
+language = "en"
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
@@ -74,6 +74,7 @@ exclude_patterns = []
 # The name of the Pygments (syntax highlighting) style to use.
 pygments_style = None
 
+default_role = "py:obj"
 
 # -- Options for HTML output -------------------------------------------------
 
@@ -190,7 +191,7 @@ epub_exclude_files = ['search.html']
 # Example configuration for intersphinx: refer to the Python standard library.
 intersphinx_mapping = {
     "python": ("https://docs.python.org/", None),
-    "caosdb-pylib": ("https://caosdb.gitlab.io/caosdb-pylib/", None),
+    "linkahead-pylib": ("https://docs.indiscale.com/caosdb-pylib/", None),
 }
 
 
@@ -199,3 +200,7 @@ autodoc_default_options = {
     'members': None,
     'undoc-members': None,
 }
+autodoc_mock_imports = [
+    "caosadvancedtools.bloxberg",
+    "labfolder",
+]
diff --git a/src/doc/crawler.rst b/src/doc/crawler.rst
index aea023192c0b95c784d5ac91ade12d0c30591d42..d7b351bb3132cefed8920f977b345dc32e4db819 100644
--- a/src/doc/crawler.rst
+++ b/src/doc/crawler.rst
@@ -77,8 +77,8 @@ problems. The exact behavior depends on your setup. However, you can
 have a look at the example in the
 `tests <https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/blob/main/integrationtests/crawl.py>`__.
 
-.. Note:: The crawler depends on the CaosDB Python client, so make sure to install :doc:`pycaosdb
-          <caosdb-pylib:getting_started>`.
+.. Note:: The crawler depends on the LinkAhead Python client, so make sure to install :doc:`pylinkahead
+          <linkahead-pylib:README_SETUP>`.
 
 
 Call ``python3 crawl.py --help`` to see what parameters can be provided.
diff --git a/src/doc/index.rst b/src/doc/index.rst
index 6c2c5f9894ad5c0f5dc3f124de726d264f46d452..7fa017ec4202f25fe9f94a154ed8762c4581eebc 100644
--- a/src/doc/index.rst
+++ b/src/doc/index.rst
@@ -4,8 +4,8 @@ Welcome to caosadvancedtools' documentation!
 Welcome to the advanced Python tools for CaosDB!
 
 
-This documentation helps you to :doc:`get started<getting_started>`, explains the most important
-:doc:`concepts<concepts>` and offers a range of :doc:`tutorials<tutorials>`.
+This documentation helps you to :doc:`get started<README_SETUP>`, explains the most important
+:doc:`concepts<concepts>` and offers a range of deep dives into specific sub-topics.
 
 .. toctree::
    :maxdepth: 2
@@ -16,7 +16,11 @@ This documentation helps you to :doc:`get started<getting_started>`, explains th
    The Caosdb Crawler <crawler>
    YAML data model specification <yaml_interface>
    Specifying a datamodel with JSON schema <json_schema_interface>
+   Convert a data model into a json schema <json_schema_exporter>
+   Conversion between XLSX, JSON and LinkAhead Entities <table-json-conversion/specs>
    _apidoc/modules
+   Related Projects <related_projects/index>
+   Back to overview <https://docs.indiscale.com/>
 
 
 
diff --git a/src/doc/json_schema_exporter.rst b/src/doc/json_schema_exporter.rst
new file mode 100644
index 0000000000000000000000000000000000000000..da4fa9d273f7cb9295974e9632e8eb1730bd47a3
--- /dev/null
+++ b/src/doc/json_schema_exporter.rst
@@ -0,0 +1,8 @@
+JSON schema from data model
+===========================
+
+Sometimes you may want to have a `json schema <https://json-schema.org>`_ which describes a
+LinkAhead data model, for example for the automatic generation of user interfaces with third-party
+tools like `rjsf <https://rjsf-team.github.io/react-jsonschema-form/docs/>`_.
+
+For this use case, look at the documentation of the `caosadvancedtools.json_schema_exporter` module.
diff --git a/src/doc/related_projects/index.rst b/src/doc/related_projects/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..87808b4264dfdd37dfa162025b24d31e63de7083
--- /dev/null
+++ b/src/doc/related_projects/index.rst
@@ -0,0 +1,25 @@
+Related Projects
+++++++++++++++++
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+   :hidden:
+
+.. container:: projects
+
+   For in-depth documentation for users, administrators  and developers, you may want to visit the subproject-specific documentation pages for:
+
+   :`Server <https://docs.indiscale.com/caosdb-server/>`_: The Java part of the LinkAhead server.
+
+   :`MySQL backend <https://docs.indiscale.com/caosdb-mysqlbackend/>`_: The MySQL/MariaDB components of the LinkAhead server.
+
+   :`WebUI <https://docs.indiscale.com/caosdb-webui/>`_: The default web frontend for the LinkAhead server.
+
+   :`PyLinkAhead <https://docs.indiscale.com/caosdb-pylib/>`_: The LinkAhead Python library.
+
+   :`LinkAhead Crawler <https://docs.indiscale.com/caosdb-crawler/>`_: The crawler is the main tool for automatic data integration in LinkAhead.
+
+   :`LinkAhead <https://docs.indiscale.com/caosdb-deploy>`_: Your all inclusive LinkAhead software package.
+
+   :`Back to Overview <https://docs.indiscale.com/>`_: LinkAhead Documentation.
diff --git a/src/doc/table-json-conversion/specs.md b/src/doc/table-json-conversion/specs.md
new file mode 100644
index 0000000000000000000000000000000000000000..73e480440eccb923fd0979dc2adb653146667951
--- /dev/null
+++ b/src/doc/table-json-conversion/specs.md
@@ -0,0 +1,253 @@
+# Conversion between LinkAhead data models, JSON schema, and XLSX (and vice versa) #
+
+This file describes the conversion between JSON schema files and XLSX templates, and between JSON
+data files following a given schema and XLSX files with data.  This conversion is handled by the
+Python modules in the `table_json_conversion` library.
+
+Requirements: When converting from a json schema, the top level of the json schema must be a
+dict. The keys of the dict are RecordType names.
+
+## Data models in JSON Schema and JSON data ##
+
+The data model in LinkAhead defines the types of records present in a LinkAhead instance and their
+structure. This data model can also be represented in a JSON Schema, which defines the structure of
+JSON files containing records pertaining to the data model.
+
+For example, the following JSON can describe a "Person" Record:
+
+```JSON
+{
+    "Person": {
+        "family_name": "Steve",
+        "given_name": "Stevie"
+    }
+}
+```
+
+A *JSON Schema* specifies a concrete structure, and the associated JSON files can be used to
+represent data for specific record structures. For instance, one could create a JSON Schema allowing
+the storage of "Training" Records containing information about conducted trainings. This is
+particularly valuable for data import and export. One could generate web forms from the JSON Schema
+or use it to export objects stored in LinkAhead as JSON.
+
+## From JSON to XLSX: Data Representation ##
+
+The following describes how JSON files representing LinkAhead records are converted into XLSX files,
+or how JSON files with records are created from XLSX files.
+
+The attribute name (e.g., "Person" above) determines the RecordType, and the value of this attribute
+can either be an object or a list. If it is an object (as in the example above), a single record is
+represented. In the case of a list, multiple records sharing the same RecordType as the parent are
+represented.
+
+The *Properties* of the record (e.g., `family_name` and `given_name` above) become *columns* in the
+XLSX file. These properties have an attribute name and a value. The value can be:
+
+a. A primitive (text, number, boolean, ...)
+b. A record
+c. A list of primitive types
+d. A list of records
+
+In cases *a.* and *c.*, a cell is created in the column corresponding to the property in the XLSX
+file.  In case *b.*, columns are created for the Properties of the record, where for each of the
+Properties the cases *a.* - *d.* are considered recursively.
+
+For case *d.* however, the two-dimensional structure of an XLSX sheet is not sufficient. Therefore,
+for such cases, *new* XLSX sheets/tables are created.
+
+In these sheets/tables, the referenced records are treated as described above (new columns for the
+Properties).  However, there are now additional columns that indicate from which "external" record
+these records are referenced.
+
+Let's now consider these four cases in detail and with examples:
+
+### a. Properties with Primitive Data Types ###
+
+```JSON
+{
+    "Training": {
+        "date": "2023-01-01",
+        "url": "www.indiscale.com",
+        "duration": 1.0,
+        "participants": 1,
+        "remote": false
+    }
+}
+```
+
+This entry is represented in an XLSX sheet with the following content:
+date	url	duration	participants	remote
+2023-01-01	www.indiscale.com	1.0	1	false
+
+
+### a. Properties mit primitiven Datentypen ###
+
+```JSON
+{
+    "Training": {
+        "date": "2023-01-01",
+        "url": "www.indiscale.com",
+        "duration": 1.0,
+        "participants": 1,
+        "remote": false
+    }
+}
+```
+
+This entry will be represented in an XLSX sheet with the following content:
+
+| date       | url               | duration | participants | remote |
+|------------|-------------------|----------|--------------|--------|
+| 2023-01-01 | www.indiscale.com | 1.0      | 1            | false  |
+
+### b. Property referencing a record ###
+
+```JSON
+{
+    "Training": {
+        "date": "2023-01-01",
+        "supervisor": {
+            "family_name": "Stevenson",
+            "given_name": "Stevie",
+        }
+    }
+}
+```
+
+This entry will be represented in an XLSX sheet with the following content:
+
+| date       | `supervisor.family_name` | `supervisor.given_name` |
+|------------|--------------------------|-------------------------|
+| 2023-01-01 | Stevenson                | Stevie                  |
+
+Note that column names may be renamed. The mapping of columns to properties of records is ensured
+through the content of hidden rows.  (See below for the definition of hidden rows.)
+
+### c. Properties containing lists of primitive data types ###
+
+```JSON
+{
+    "Training": {
+        "url": "www.indiscale.com",
+        "subjects": ["Math", "Physics"],
+    }
+}
+```
+
+This entry would be represented in an XLSX sheet with the following content:
+
+| url               | subjects     |
+|-------------------|--------------|
+| www.indiscale.com | Math;Physics |
+
+The list elements are written into the cell separated by `;` (semicolon). If the elements contain
+the separator `;`, it is escaped with `\\`.
+
+### d. Properties containing lists with references ###
+
+```JSON
+{
+    "Training": {
+        "date": "2023-01-01",
+        "coach": [
+            {
+              "family_name": "Sky",
+              "given_name": "Max",
+            },
+            {
+              "family_name": "Sky",
+              "given_name": "Min",
+            }
+        ]
+    }
+}
+```
+
+Since the two coaches cannot be represented properly in a single cell, another worksheet is needed
+to contain the properties of the coaches.
+
+The sheet for the Trainings in this example only contains the "date" column
+
+| date       |
+|------------|
+| 2023-01-01 |
+
+Additionally, there is *another* sheet where the coaches are stored. Here, it is crucial to define
+how the correct element is chosen from potentially multiple "Trainings". In this case, it means that
+the "date" must be unique.
+
+Note: This uniqueness requirement is not strictly checked right now, it is your responsibility as a
+user that such "foreign properties" are truly unique.
+
+The second sheet looks like this:
+
+| date       | `coach.family_name` | `coach.given_name` |
+|------------|---------------------|--------------------|
+| 2023-01-01 | Sky                 | Max                |
+| 2023-01-01 | Sky                 | Min                |
+
+## Data in XLSX: Hidden automation logic ##
+
+### First column: Marker for row types ###
+
+The first column in each sheet will be hidden and it will contain an entry in each row that needs
+special treatment.  The following values are used:
+
+- ``IGNORE``: This row is ignored.  It can be used for explanatory texts or layout.
+- ``COL_TYPE``: Typically the first row that is not `IGNORE`.  It indicates the row that defines the
+  type of columns (`FOREIGN`, `SCALAR`, `LIST`, `IGNORE`).  This row may occur only once.
+- ``PATH``: Indicates that the row is used to define the path within the JSON.  These rows are
+  typically hidden for users.
+
+An example table could look like this:
+
+| `IGNORE`   |                                     | Welcome        | to this       | file!        |                    |
+| `IGNORE`   |                                     | Please         | enter your    | data here:   |                    |
+| `COL_TYPE` | `IGNORE`                            | `SCALAR`       | `SCALAR`      | `LIST`       | `SCALAR`           |
+| `PATH`     |                                     | `Training`     | `Training`    | `Training`   | `Training`         |
+| `PATH`     |                                     | `url`          | `date`        | `subjects`   | `supervisor`       |
+| `PATH`     |                                     |                |               |              | `email`            |
+| `IGNORE`   | Please enter one training per line. | Training URL   | Training date | Subjects     | Supervisor's email |
+|------------|-------------------------------------|----------------|---------------|--------------|--------------------|
+|            |                                     | example.com/mp | 2024-02-27    | Math;Physics | steve@example.com  |
+|            |                                     | example.com/m  | 2024-02-27    | Math         | stella@example.com |
+
+### Parsing XLSX data ###
+
+To extract the value of a given cell, we traverse all path elements (in ``PATH`` rows) from top to
+bottom. The final element of the path is the name of the Property to which the value belongs.  In
+the example above, `steve@example.com` is the value of the `email` Property in the path
+`["Training", "supervisor", "email"]`.
+
+The path elements are sufficient to identify the object within a JSON, at least if the corresponding
+JSON element is a single object. If the JSON element is an array, the appropriate object within the
+array needs to be selected.
+
+For this selection additional ``FOREIGN`` columns are used. The paths in these columns must all have
+the same *base* and one additional *unique key* component.  For example, two `FOREIGN` columns could
+be `["Training", "date"]` and `["Training", "url"]`, where `["Training"]` is the *base path* and
+`"date"` and `"url"` are the *unique keys*.
+
+The base path defines the table (or recordtype) to which the entries belong, and the values of the
+unique keys define the actual rows to which data belongs.
+
+For example, this table defines three coaches for the two trainings from the last table:
+
+| `COL_TYPE` | `FOREIGN`             | `FOREIGN`             | `SCALAR`               |
+| `PATH`     | `Training`            | `Training`            | `Training`             |
+| `PATH`     | `date`                | `url`                 | `coach`                |
+| `PATH`     |                       |                       | `given_name`           |
+| `IGNORE`   | Date of training      | URL of training       | The coach's given name |
+| `IGNORE`   | from sheet 'Training' | from sheet 'Training' |                        |
+|------------|-----------------------|-----------------------|------------------------|
+|            | 2024-02-27            | example.com/mp        | Ada                    |
+|            | 2024-02-27            | example.com/mp        | Berta                  |
+|            | 2024-02-27            | example.com/m         | Chris                  |
+
+## Current limitations ##
+
+The current implementation still lacks the following:
+
+- Lists of enum references are not yet implemented as columns where matching cell can simply be
+  ticked/crossed.
+- Files handling is not implemented yet.
diff --git a/src/doc/yaml_interface.rst b/src/doc/yaml_interface.rst
index ac3914385d31df5a306b3f8400fbcb6b005f17fa..48e27802e72177424fd2c6258492b283bbff0d3b 100644
--- a/src/doc/yaml_interface.rst
+++ b/src/doc/yaml_interface.rst
@@ -109,7 +109,6 @@ Keywords
   added as parents, and all Properties with at least the importance ``XXX`` are inherited.  For
   example, ``inherited_from_recommended`` will inherit all Properties of importance ``recommended``
   and ``obligatory``, but not ``suggested``.
-- **parent**: Parent of this entity. Same as ``inherit_from_obligatory``. (*Deprecated*) 
 
 Usage
 =====
diff --git a/tox.ini b/tox.ini
index 00548dea25c5017f1d0301a00a629c62d16631ef..a7e06bf51f1f4cad2a2c695e44d3a4d09020b2a3 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist=py37, py38, py39, py310, py311
+envlist=py38, py39, py310, py311, py312, py313
 skip_missing_interpreters = true
 
 [testenv]
diff --git a/unittests/table_json_conversion/__init__.py b/unittests/table_json_conversion/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/unittests/table_json_conversion/create_jsonschema.py b/unittests/table_json_conversion/create_jsonschema.py
new file mode 100755
index 0000000000000000000000000000000000000000..9585f5458edf8f9d3f785099295a3e675230932c
--- /dev/null
+++ b/unittests/table_json_conversion/create_jsonschema.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+
+# Copyright (C) 2023 IndiScale GmbH <www.indiscale.com>
+# Copyright (C) 2023 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""Create JSON-Schema according to configuration.
+
+"""
+
+import argparse
+import json
+from typing import List
+
+import caosadvancedtools.json_schema_exporter as jsex
+from caosadvancedtools.models import parser
+# import tomli
+
+
+def prepare_datamodel(modelfile, recordtypes: List[str], outfile: str,
+                      do_not_create: List[str] = None):
+    if do_not_create is None:
+        do_not_create = []
+    model = parser.parse_model_from_yaml(modelfile)
+
+    exporter = jsex.JsonSchemaExporter(additional_properties=False,
+                                       # additional_options_for_text_props=additional_text_options,
+                                       # name_and_description_in_properties=True,
+                                       name_property_for_new_records=True,
+                                       do_not_create=do_not_create,
+                                       # do_not_retrieve=do_not_retrieve,
+                                       no_remote=True,
+                                       use_rt_pool=model,
+                                       )
+    schemas = []
+    for recordtype in recordtypes:
+        schemas.append(exporter.recordtype_to_json_schema(model.get_deep(recordtype)))
+    merged_schema = jsex.merge_schemas(schemas)
+
+    with open(outfile, mode="w", encoding="utf8") as json_file:
+        json.dump(merged_schema, json_file, ensure_ascii=False, indent=2)
+
+
+def _parse_arguments():
+    """Parse the arguments."""
+    arg_parser = argparse.ArgumentParser(description='')
+
+    return arg_parser.parse_args()
+
+
+def main():
+    """The main function of this script."""
+    _ = _parse_arguments()
+    prepare_datamodel("data/simple_model.yml", ["Training", "Person"], "data/simple_schema.json",
+                      do_not_create=["Organisation"])
+    prepare_datamodel("data/multiple_refs_model.yml", ["Training", "Person"],
+                      "data/multiple_refs_schema.json")
+    prepare_datamodel("data/indirect_model.yml", ["Wrapper"],
+                      "data/indirect_schema.json")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/unittests/table_json_conversion/data/error_simple_data.json b/unittests/table_json_conversion/data/error_simple_data.json
new file mode 100644
index 0000000000000000000000000000000000000000..bfea88b675ab2a6e0c1787fc401afec5c564c006
--- /dev/null
+++ b/unittests/table_json_conversion/data/error_simple_data.json
@@ -0,0 +1,11 @@
+{
+  "Training": {
+    "duration": 1.0,
+    "participants": 0.5
+  },
+  "Person": {
+    "family_name": "Auric",
+    "given_name": "Goldfinger",
+    "Organisation": "Federal Reserve"
+  }
+}
diff --git a/unittests/table_json_conversion/data/indirect_data.json b/unittests/table_json_conversion/data/indirect_data.json
new file mode 100644
index 0000000000000000000000000000000000000000..c77dd1ff2a703af6b6b2a0db19f450ac10616d9b
--- /dev/null
+++ b/unittests/table_json_conversion/data/indirect_data.json
@@ -0,0 +1,18 @@
+{
+  "Wrapper": {
+    "Results": [
+      {
+        "year": 2022,
+        "avg_score": 2.4
+      },
+      {
+        "year": 2023,
+        "avg_score": 4.2
+      }
+    ],
+    "Training": {
+      "name": "Basic Training",
+      "url": "www.example.com/training/basic"
+    }
+  }
+}
diff --git a/unittests/table_json_conversion/data/indirect_data.xlsx b/unittests/table_json_conversion/data/indirect_data.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..894ec95f87aa32a618b3b70504727398f2ce2358
Binary files /dev/null and b/unittests/table_json_conversion/data/indirect_data.xlsx differ
diff --git a/unittests/table_json_conversion/data/indirect_model.yml b/unittests/table_json_conversion/data/indirect_model.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2a7f4f98ff9a46478eb631e6990deceadc9a498c
--- /dev/null
+++ b/unittests/table_json_conversion/data/indirect_model.yml
@@ -0,0 +1,18 @@
+Training:
+  recommended_properties:
+    url:
+      datatype: TEXT
+      description: 'The URL'
+Results:
+  description: "Results for a training"
+  recommended_properties:
+    year:
+      datatype: INTEGER
+    avg_score:
+      description: The average score for the linked training.
+      datatype: DOUBLE
+Wrapper:
+  recommended_properties:
+    Training:
+    Results:
+      datatype: LIST<Results>
diff --git a/unittests/table_json_conversion/data/indirect_schema.json b/unittests/table_json_conversion/data/indirect_schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..64b6ff279c584456fe1f90454225d144c90e014d
--- /dev/null
+++ b/unittests/table_json_conversion/data/indirect_schema.json
@@ -0,0 +1,63 @@
+{
+  "type": "object",
+  "properties": {
+    "Wrapper": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": false,
+      "title": "Wrapper",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the Record to be created"
+        },
+        "Training": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "Training",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "url": {
+              "type": "string",
+              "description": "The URL"
+            }
+          }
+        },
+        "Results": {
+          "description": "Results for a training",
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "description": "Results for a training",
+            "title": "Results",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "year": {
+                "type": "integer"
+              },
+              "avg_score": {
+                "description": "The average score for the linked training.",
+                "type": "number"
+              }
+            }
+          }
+        }
+      },
+      "$schema": "https://json-schema.org/draft/2020-12/schema"
+    }
+  },
+  "required": [
+    "Wrapper"
+  ],
+  "additionalProperties": false,
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}
diff --git a/unittests/table_json_conversion/data/indirect_template.xlsx b/unittests/table_json_conversion/data/indirect_template.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..cc614acb75b36e10143a29f28dff9fce7d5e006f
Binary files /dev/null and b/unittests/table_json_conversion/data/indirect_template.xlsx differ
diff --git a/unittests/table_json_conversion/data/multiple_refs_data.json b/unittests/table_json_conversion/data/multiple_refs_data.json
new file mode 100644
index 0000000000000000000000000000000000000000..5b8ce9136635832111abb2206d8afe1bc7c58444
--- /dev/null
+++ b/unittests/table_json_conversion/data/multiple_refs_data.json
@@ -0,0 +1,48 @@
+{
+  "Training": {
+    "trainer": [],
+    "participant": [
+      {
+        "full_name": "Petra Participant",
+        "email": "petra@indiscale.com"
+      },
+      {
+        "full_name": "Peter",
+        "email": "peter@getlinkahead.com"
+      }
+    ],
+    "Organisation": [
+      {
+        "Person": [
+          {
+            "full_name": "Henry Henderson",
+            "email": "henry@organization.org"
+          },
+          {
+            "full_name": "Harry Hamburg",
+            "email": "harry@organization.org"
+          }
+        ],
+        "name": "World Training Organization",
+        "Country": "US"
+      },
+      {
+        "Person": [
+          {
+            "full_name": "Hermione Harvard",
+            "email": "hermione@organisation.org.uk"
+          },
+          {
+            "full_name": "Hazel Harper",
+            "email": "hazel@organisation.org.uk"
+          }
+        ],
+        "name": "European Training Organisation",
+        "Country": "UK"
+      }
+    ],
+    "date": "2024-03-21T14:12:00.000Z",
+    "url": "www.indiscale.com",
+    "name": "Example training with multiple organizations."
+  }
+}
diff --git a/unittests/table_json_conversion/data/multiple_refs_data.xlsx b/unittests/table_json_conversion/data/multiple_refs_data.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..21622ede9515b0cfa9f965f8c2ee89f782c4bf0c
Binary files /dev/null and b/unittests/table_json_conversion/data/multiple_refs_data.xlsx differ
diff --git a/unittests/table_json_conversion/data/multiple_refs_model.yml b/unittests/table_json_conversion/data/multiple_refs_model.yml
new file mode 100644
index 0000000000000000000000000000000000000000..576ff1d59215f893cb5dec1fd5a90c5975713c60
--- /dev/null
+++ b/unittests/table_json_conversion/data/multiple_refs_model.yml
@@ -0,0 +1,36 @@
+Person:
+  recommended_properties:
+    full_name:
+      datatype: TEXT
+    email:
+      datatype: TEXT
+Training:
+  recommended_properties:
+    date:
+      datatype: DATETIME
+      description: 'The date of the training.'
+    url:
+      datatype: TEXT
+      description: 'The URL'
+    trainer:
+      datatype: LIST<Person>
+    participant:
+      datatype: LIST<Person>
+    supervisor:
+      datatype: Person
+    responsible:
+      datatype: Person
+    Organisation:
+      datatype: LIST<Organisation>
+    supervisor_inherit:
+      inherit_from_suggested:
+        - Person
+    responsible_inherit:
+      inherit_from_suggested:
+        - Person
+Organisation:
+  recommended_properties:
+    Country:
+      datatype: TEXT
+    Person:
+      datatype: LIST<Person>
diff --git a/unittests/table_json_conversion/data/multiple_refs_schema.json b/unittests/table_json_conversion/data/multiple_refs_schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..2adec8de92b548ef97129074c7a24e9378118a4f
--- /dev/null
+++ b/unittests/table_json_conversion/data/multiple_refs_schema.json
@@ -0,0 +1,210 @@
+{
+  "type": "object",
+  "properties": {
+    "Training": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": false,
+      "title": "Training",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the Record to be created"
+        },
+        "date": {
+          "description": "The date of the training.",
+          "anyOf": [
+            {
+              "type": "string",
+              "format": "date"
+            },
+            {
+              "type": "string",
+              "format": "date-time"
+            }
+          ]
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL"
+        },
+        "trainer": {
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "title": "trainer",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "full_name": {
+                "type": "string"
+              },
+              "email": {
+                "type": "string"
+              }
+            }
+          }
+        },
+        "participant": {
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "title": "participant",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "full_name": {
+                "type": "string"
+              },
+              "email": {
+                "type": "string"
+              }
+            }
+          }
+        },
+        "supervisor": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "supervisor",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "full_name": {
+              "type": "string"
+            },
+            "email": {
+              "type": "string"
+            }
+          }
+        },
+        "responsible": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "responsible",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "full_name": {
+              "type": "string"
+            },
+            "email": {
+              "type": "string"
+            }
+          }
+        },
+        "Organisation": {
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "title": "Organisation",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "Country": {
+                "type": "string"
+              },
+              "Person": {
+                "type": "array",
+                "items": {
+                  "type": "object",
+                  "required": [],
+                  "additionalProperties": false,
+                  "title": "Person",
+                  "properties": {
+                    "name": {
+                      "type": "string",
+                      "description": "The name of the Record to be created"
+                    },
+                    "full_name": {
+                      "type": "string"
+                    },
+                    "email": {
+                      "type": "string"
+                    }
+                  }
+                }
+              }
+            }
+          }
+        },
+        "supervisor_inherit": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "supervisor_inherit",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "full_name": {
+              "type": "string"
+            },
+            "email": {
+              "type": "string"
+            }
+          }
+        },
+        "responsible_inherit": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "responsible_inherit",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "full_name": {
+              "type": "string"
+            },
+            "email": {
+              "type": "string"
+            }
+          }
+        }
+      },
+      "$schema": "https://json-schema.org/draft/2020-12/schema"
+    },
+    "Person": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": false,
+      "title": "Person",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the Record to be created"
+        },
+        "full_name": {
+          "type": "string"
+        },
+        "email": {
+          "type": "string"
+        }
+      },
+      "$schema": "https://json-schema.org/draft/2020-12/schema"
+    }
+  },
+  "required": [],
+  "additionalProperties": false,
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}
diff --git a/unittests/table_json_conversion/data/multiple_refs_template.xlsx b/unittests/table_json_conversion/data/multiple_refs_template.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..cff3dad99a3c296e360d660ed5178a0eee48cd40
Binary files /dev/null and b/unittests/table_json_conversion/data/multiple_refs_template.xlsx differ
diff --git a/unittests/table_json_conversion/data/simple_data.json b/unittests/table_json_conversion/data/simple_data.json
new file mode 100644
index 0000000000000000000000000000000000000000..9997f17e76a46d5e97d842fdee40626047e7a347
--- /dev/null
+++ b/unittests/table_json_conversion/data/simple_data.json
@@ -0,0 +1,32 @@
+{
+  "Training": {
+    "date": "2023-01-01",
+    "url": "www.indiscale.com",
+    "coach": [
+      {
+        "family_name": "Sky",
+        "given_name": "Max",
+        "Organisation": "ECB"
+      },
+      {
+        "family_name": "Sky",
+        "given_name": "Min",
+        "Organisation": "ECB"
+      }
+    ],
+    "supervisor": {
+      "family_name": "Steve",
+      "given_name": "Stevie",
+            "Organisation": "IMF"
+    },
+    "duration": 1.0,
+    "participants": 1,
+    "subjects": ["Math", "Physics"],
+    "remote": false
+  },
+  "Person": {
+    "family_name": "Steve",
+    "given_name": "Stevie",
+    "Organisation": "IMF"
+  }
+}
diff --git a/unittests/table_json_conversion/data/simple_data.xlsx b/unittests/table_json_conversion/data/simple_data.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..662a636603944a91cbaeaea8cc0c3bbab12f2f50
Binary files /dev/null and b/unittests/table_json_conversion/data/simple_data.xlsx differ
diff --git a/unittests/table_json_conversion/data/simple_data_ascii_chars.json b/unittests/table_json_conversion/data/simple_data_ascii_chars.json
new file mode 100644
index 0000000000000000000000000000000000000000..b1d13ebee5d6e3949fa606a130e6f5819bfc4bc8
--- /dev/null
+++ b/unittests/table_json_conversion/data/simple_data_ascii_chars.json
@@ -0,0 +1,18 @@
+{
+  "Training": {
+    "date": "2023-01-01",
+    "url": "char: >\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009<",
+    "subjects": [
+      ">\u000a\u000b\u000c\u000e\u000f<",
+      ">\u0010\u0011\u0012\u0013\u0014\u0015\u0016\u0017<",
+      ">\u0018\u0019\u001a\u001b\u001c\u001d\u001e\u001f<",
+      ">\u0020\u0021\u0022\u0023\u0024\u0025\u0026\u0027<",
+      ">\u0028\u0029\u002a\u002b\u002c\u002d\u002e\u002f<"
+    ]
+  },
+  "Person": {
+    "family_name": "Steve",
+    "given_name": "Stevie",
+    "Organisation": "IMF"
+  }
+}
diff --git a/unittests/table_json_conversion/data/simple_data_ascii_chars.xlsx b/unittests/table_json_conversion/data/simple_data_ascii_chars.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..bdf60b568bf51726a0a25e599bb6c7e70988d287
Binary files /dev/null and b/unittests/table_json_conversion/data/simple_data_ascii_chars.xlsx differ
diff --git a/unittests/table_json_conversion/data/simple_model.yml b/unittests/table_json_conversion/data/simple_model.yml
new file mode 100644
index 0000000000000000000000000000000000000000..74fb5bc5dc4251bb3834ea2f6201f991cab510d1
--- /dev/null
+++ b/unittests/table_json_conversion/data/simple_model.yml
@@ -0,0 +1,36 @@
+Person:
+  recommended_properties:
+    family_name:
+      datatype: TEXT
+    given_name:
+      datatype: TEXT
+    Organisation:
+Training:
+  recommended_properties:
+    date:
+      datatype: DATETIME
+      description: 'The date of the training.'
+    url:
+      datatype: TEXT
+      description: 'The URL'
+    subjects:
+      datatype: LIST<TEXT>
+    coach:
+      datatype: LIST<Person>
+    supervisor:
+      datatype: Person
+    duration:
+      datatype: DOUBLE
+    participants:
+      datatype: INTEGER
+    remote:
+      datatype: BOOLEAN
+    slides:
+      datatype: FILE
+ProgrammingCourse:
+  inherit_from_suggested:
+    - Training
+Organisation:
+  recommended_properties:
+    Country:
+      datatype: TEXT
diff --git a/unittests/table_json_conversion/data/simple_schema.json b/unittests/table_json_conversion/data/simple_schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..01a732d67758ed52a334ff7076778a45f8850f95
--- /dev/null
+++ b/unittests/table_json_conversion/data/simple_schema.json
@@ -0,0 +1,139 @@
+{
+  "type": "object",
+  "properties": {
+    "Training": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": false,
+      "title": "Training",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the Record to be created"
+        },
+        "date": {
+          "description": "The date of the training.",
+          "anyOf": [
+            {
+              "type": "string",
+              "format": "date"
+            },
+            {
+              "type": "string",
+              "format": "date-time"
+            }
+          ]
+        },
+        "url": {
+          "type": "string",
+          "description": "The URL"
+        },
+        "subjects": {
+          "type": "array",
+          "items": {
+            "type": "string"
+          }
+        },
+        "coach": {
+          "type": "array",
+          "items": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "title": "coach",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "family_name": {
+                "type": "string"
+              },
+              "given_name": {
+                "type": "string"
+              },
+              "Organisation": {
+                "enum": [
+                  "Federal Reserve",
+                  "IMF",
+                  "ECB"
+                ]
+              }
+            }
+          }
+        },
+        "supervisor": {
+          "type": "object",
+          "required": [],
+          "additionalProperties": false,
+          "title": "supervisor",
+          "properties": {
+            "name": {
+              "type": "string",
+              "description": "The name of the Record to be created"
+            },
+            "family_name": {
+              "type": "string"
+            },
+            "given_name": {
+              "type": "string"
+            },
+            "Organisation": {
+              "enum": [
+                "Federal Reserve",
+                "IMF",
+                "ECB"
+              ]
+            }
+          }
+        },
+        "duration": {
+          "type": "number"
+        },
+        "participants": {
+          "type": "integer"
+        },
+        "remote": {
+          "type": "boolean"
+        },
+        "slides": {
+          "type": "string",
+          "format": "data-url"
+        }
+      },
+      "$schema": "https://json-schema.org/draft/2020-12/schema"
+    },
+    "Person": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": false,
+      "title": "Person",
+      "properties": {
+        "name": {
+          "type": "string",
+          "description": "The name of the Record to be created"
+        },
+        "family_name": {
+          "type": "string"
+        },
+        "given_name": {
+          "type": "string"
+        },
+        "Organisation": {
+          "enum": [
+            "Federal Reserve",
+            "IMF",
+            "ECB"
+          ]
+        }
+      },
+      "$schema": "https://json-schema.org/draft/2020-12/schema"
+    }
+  },
+  "required": [
+    "Training",
+    "Person"
+  ],
+  "additionalProperties": false,
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}
diff --git a/unittests/table_json_conversion/data/simple_template.xlsx b/unittests/table_json_conversion/data/simple_template.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..1162965bf44642a4523123fa52c58dd240b25e5f
Binary files /dev/null and b/unittests/table_json_conversion/data/simple_template.xlsx differ
diff --git a/unittests/table_json_conversion/how_to_schema.md b/unittests/table_json_conversion/how_to_schema.md
new file mode 100644
index 0000000000000000000000000000000000000000..a7b4e3ca35a1fc9e67ebbb29f316825e89596f4a
--- /dev/null
+++ b/unittests/table_json_conversion/how_to_schema.md
@@ -0,0 +1,19 @@
+Insert the data model into a LinkAhead server.
+
+Run the following code:
+```
+    model = parser.parse_model_from_yaml("./model.yml")
+
+    exporter = jsex.JsonSchemaExporter(additional_properties=False,
+                                       #additional_options_for_text_props=additional_text_options,
+                                       #name_and_description_in_properties=True,
+                                       #do_not_create=do_not_create,
+                                       #do_not_retrieve=do_not_retrieve,
+                                       )
+    schema_top = exporter.recordtype_to_json_schema(model.get_deep("Training"))
+    schema_pers = exporter.recordtype_to_json_schema(model.get_deep("Person"))
+    merged_schema = jsex.merge_schemas([schema_top, schema_pers])
+
+    with open("model_schema.json", mode="w", encoding="utf8") as json_file:
+        json.dump(merged_schema, json_file, ensure_ascii=False, indent=2)
+```
diff --git a/unittests/table_json_conversion/test_fill_xlsx.py b/unittests/table_json_conversion/test_fill_xlsx.py
new file mode 100644
index 0000000000000000000000000000000000000000..946336da721f7c9affd5c553ccbb38cb46217eef
--- /dev/null
+++ b/unittests/table_json_conversion/test_fill_xlsx.py
@@ -0,0 +1,154 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+import json
+import os
+import re
+import tempfile
+
+import jsonschema.exceptions as schema_exc
+import pytest
+from caosadvancedtools.table_json_conversion.fill_xlsx import (
+    _get_path_rows, _get_row_type_column_index, fill_template)
+from openpyxl import load_workbook
+
+from .utils import compare_workbooks
+
+
+def rfp(*pathcomponents):
+    """
+    Return full path.
+    Shorthand convenience function.
+    """
+    return os.path.join(os.path.dirname(__file__), *pathcomponents)
+
+
+def fill_and_compare(json_file: str, template_file: str, known_good: str,
+                     schema: str = None, custom_output: str = None):
+    """Fill the data into a template and compare to a known good.
+
+Parameters:
+-----------
+schema: str, optional,
+  Json schema to validate against.
+custom_output: str, optional
+  If given, write to this file and drop into an IPython shell.  For development only.
+    """
+    with tempfile.TemporaryDirectory() as tmpdir:
+        outfile = os.path.join(tmpdir, 'test.xlsx')
+        assert not os.path.exists(outfile)
+        if custom_output is not None:
+            outfile = custom_output
+        fill_template(data=json_file, template=template_file, result=outfile,
+                      validation_schema=schema)
+        assert os.path.exists(outfile)
+        generated = load_workbook(outfile)  # workbook can be read
+    known_good_wb = load_workbook(known_good)
+    compare_workbooks(generated, known_good_wb)
+
+
+def test_detect():
+    example = load_workbook(rfp("data/simple_template.xlsx"))
+    assert 0 == _get_row_type_column_index(example['Person'])
+    assert [1, 2] == _get_path_rows(example['Person'])
+
+
+def test_temporary():
+    # TODO: remove the following after manual testing
+    di = '/home/henrik/CaosDB/management/external/dimr/eingabemaske/crawler/schemas'
+    dd = '/home/henrik/CaosDB/management/external/dimr/eingabemaske/django/laforms/persistent/'
+    allreadydone = [
+                "Präventionsmaßnahmen",
+                "Beratungsstellen",
+                "Schutzeinrichtungen",
+                "Einzelfallversorgung",
+                "Strategiedokumente",
+                "Kooperationsvereinbarungen",
+                "Gremien",
+                "Verwaltungsvorschriften",
+                "Gewaltschutzkonzepte und -maßnahmen",
+                "Polizeilicher Opferschutz",
+                "Feedback",
+                ]
+    for prefix, _, files in os.walk(dd):
+        for fi in files:
+            match = re.match(r"(?P<teilb>.*)_2024-.*\.json", fi)
+
+            if match:
+                print(match.group('teilb'))
+                tb = match.group('teilb')
+                if tb in allreadydone:
+                    continue
+                # allreadydone.append(tb)
+                template = os.path.join(di, "template_"+tb+".xlsx")
+                schema = os.path.join(di, "schema_"+tb+".json")
+                if not os.path.exists(template):
+                    print(template)
+                    assert False
+                jfi = os.path.join(prefix, fi)
+                print(jfi)
+                if not fi.startswith("Art"):
+                    continue
+                # if jfi != "/home/henrik/CaosDB/management/external/dimr/eingabemaske/django/laforms/persistent/data/datenhalterin_gg/he_gg_2/Art__13_Bewusstseinsbildung_2024-01-11T10:22:26.json":
+                    # continue
+                with open(jfi, encoding="utf-8") as infile:
+                    data = json.load(infile)
+                    data = data["form_data"]
+                    if "__version__" in data:
+                        del data["__version__"]
+                with tempfile.TemporaryDirectory() as tmpdir:
+                    outfile = os.path.join(tmpdir, 'test.xlsx')
+                    fill_template(data=data, template=template, result=outfile,
+                                  validation_schema=schema)
+                    os.system(f'libreoffice {outfile}')
+
+
+def test_fill_xlsx():
+    fill_and_compare(json_file=rfp("data/simple_data.json"),
+                     template_file=rfp("data/simple_template.xlsx"),
+                     known_good=rfp("data/simple_data.xlsx"),
+                     schema=rfp("data/simple_schema.json"))
+    fill_and_compare(json_file=rfp("data/multiple_refs_data.json"),
+                     template_file=rfp("data/multiple_refs_template.xlsx"),
+                     known_good=rfp("data/multiple_refs_data.xlsx"),
+                     schema=rfp("data/multiple_refs_schema.json"))
+    fill_and_compare(json_file=rfp("data/indirect_data.json"),
+                     template_file=rfp("data/indirect_template.xlsx"),
+                     known_good=rfp("data/indirect_data.xlsx"),
+                     schema=rfp("data/indirect_schema.json"))
+    fill_and_compare(json_file=rfp("data/simple_data_ascii_chars.json"),
+                     template_file=rfp("data/simple_template.xlsx"),
+                     known_good=rfp("data/simple_data_ascii_chars.xlsx"),
+                     schema=rfp("data/simple_schema.json"))
+
+
+def test_errors():
+    with pytest.raises(AssertionError) as exc:
+        fill_and_compare(json_file=rfp("data/error_simple_data.json"),
+                         template_file=rfp("data/simple_template.xlsx"),
+                         known_good=rfp("data/simple_data.xlsx"))
+    assert "Auric\nSteve" in str(exc.value)
+    with pytest.raises(schema_exc.ValidationError) as exc:
+        fill_and_compare(json_file=rfp("data/error_simple_data.json"),
+                         template_file=rfp("data/simple_template.xlsx"),
+                         known_good=rfp("data/simple_data.xlsx"),
+                         schema=rfp("data/simple_schema.json"))
+    assert exc.value.message == "0.5 is not of type 'integer'"
diff --git a/unittests/table_json_conversion/test_table_template_generator.py b/unittests/table_json_conversion/test_table_template_generator.py
new file mode 100644
index 0000000000000000000000000000000000000000..8fc7b216d0eb2aa54ece6ace986cbeb227cc3e45
--- /dev/null
+++ b/unittests/table_json_conversion/test_table_template_generator.py
@@ -0,0 +1,279 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+import json
+import os
+import tempfile
+from typing import Tuple
+
+import pytest
+from caosadvancedtools.table_json_conversion.table_generator import (
+    ColumnType, XLSXTemplateGenerator)
+from openpyxl import load_workbook
+
+from .utils import compare_workbooks
+
+
+def rfp(*pathcomponents):
+    """
+    Return full path.
+    Shorthand convenience function.
+    """
+    return os.path.join(os.path.dirname(__file__), *pathcomponents)
+
+
+def _compare_generated_to_known_good(schema_file: str, known_good: str, foreign_keys: dict = None,
+                                     outfile: str = None) -> Tuple:
+    """Generate an XLSX from the schema, then compare to known good output.
+
+Returns
+-------
+out: tuple
+  The generated and known good workbook objects.
+    """
+    generator = XLSXTemplateGenerator()
+    if foreign_keys is None:
+        foreign_keys = {}
+    with open(schema_file, encoding="utf-8") as schema_input:
+        schema = json.load(schema_input)
+    with tempfile.TemporaryDirectory() as tmpdir:
+        if outfile is None:
+            outpath = os.path.join(tmpdir, 'generated.xlsx')
+        else:
+            outpath = outfile
+        assert not os.path.exists(outpath)
+        generator.generate(schema=schema,
+                           foreign_keys=foreign_keys,
+                           filepath=outpath)
+        assert os.path.exists(outpath)
+        generated = load_workbook(outpath)
+    good = load_workbook(known_good)
+    compare_workbooks(generated, good)
+    return generated, good
+
+
+def test_generate_sheets_from_schema():
+    # trivial case; we do not support this
+    schema = {}
+    generator = XLSXTemplateGenerator()
+    with pytest.raises(ValueError, match="Inappropriate JSON schema:.*"):
+        generator._generate_sheets_from_schema(schema)
+
+    # top level must be RT with Properties
+    schema = {
+        "type": "string"
+    }
+    with pytest.raises(ValueError, match="Inappropriate JSON schema:.*"):
+        generator._generate_sheets_from_schema(schema)
+
+    # bad type
+    schema = {
+        "type": "object",
+        "properties": {
+            "Training": {
+                "type": "object",
+                "properties": {
+                    "name": {
+                        "type": "str",
+                        "description": "The name of the Record to be created"
+                    },
+                }
+            }
+        }
+    }
+    with pytest.raises(ValueError,
+                       match="Inappropriate JSON schema: The following part "
+                       "should define an object.*"):
+        generator._generate_sheets_from_schema(schema, {'Training': ['a']})
+
+    # bad schema
+    schema = {
+        "type": "object",
+        "properties": {
+            "Training": {
+                "type": "object"
+            }
+        }
+    }
+    with pytest.raises(ValueError,
+                       match="Inappropriate JSON schema: The following part "
+                       "should define an object.*"):
+        generator._generate_sheets_from_schema(schema, {'Training': ['a']})
+
+    # minimal case: one RT with one P
+    schema = {
+        "type": "object",
+        "properties": {
+            "Training": {
+                "type": "object",
+                "properties": {
+                    "name": {
+                        "type": "string",
+                        "description": "The name of the Record to be created"
+                    },
+                }
+            }
+        }
+    }
+    sdef = generator._generate_sheets_from_schema(schema, {'Training': ['a']})
+    assert "Training" in sdef
+    tdef = sdef['Training']
+    assert 'name' in tdef
+    assert tdef['name'] == (ColumnType.SCALAR, "The name of the Record to be created", ["Training", 'name'])
+
+    # example case
+    with open(rfp("data/simple_schema.json")) as sfi:
+        schema = json.load(sfi)
+    with pytest.raises(ValueError, match="A foreign key definition is missing.*"):
+        generator._generate_sheets_from_schema(schema)
+    sdef = generator._generate_sheets_from_schema(
+        schema,
+        foreign_keys={'Training': {"__this__": ['date', 'url']}})
+    assert "Training" in sdef
+    tdef = sdef['Training']
+    assert tdef['date'] == (ColumnType.SCALAR, 'The date of the training.', ["Training", 'date'])
+    assert tdef['url'] == (ColumnType.SCALAR, 'The URL', ["Training", 'url'])
+    assert tdef['supervisor.family_name'] == (ColumnType.SCALAR, None, ["Training", 'supervisor',
+                                                                        'family_name'])
+    assert tdef['supervisor.given_name'] == (ColumnType.SCALAR, None, ["Training", 'supervisor',
+                                                                       'given_name'])
+    assert tdef['supervisor.Organisation'] == (ColumnType.SCALAR, None, ["Training", 'supervisor',
+                                                                         'Organisation'])
+    assert tdef['duration'] == (ColumnType.SCALAR, None, ["Training", 'duration'])
+    assert tdef['participants'] == (ColumnType.SCALAR, None, ["Training", 'participants'])
+    assert tdef['subjects'] == (ColumnType.LIST, None, ["Training", 'subjects'])
+    assert tdef['remote'] == (ColumnType.SCALAR, None, ["Training", 'remote'])
+    cdef = sdef['Training.coach']
+    assert cdef['family_name'] == (ColumnType.SCALAR, None, ["Training", 'coach', 'family_name'])
+    assert cdef['given_name'] == (ColumnType.SCALAR, None, ["Training", 'coach', 'given_name'])
+    assert cdef['Organisation'] == (ColumnType.SCALAR, None, ["Training", 'coach',
+                                                              'Organisation'])
+    assert cdef['Training.date'] == (ColumnType.FOREIGN, "see sheet 'Training'", ["Training", 'date'])
+    assert cdef['Training.url'] == (ColumnType.FOREIGN, "see sheet 'Training'", ["Training", 'url'])
+
+
+def test_get_foreign_keys():
+    generator = XLSXTemplateGenerator()
+    fkd = {"Training": ['a']}
+    assert ['a'] == generator._get_foreign_keys(fkd, ['Training'])
+
+    fkd = {"Training": {"__this__": ['a']}}
+    assert ['a'] == generator._get_foreign_keys(fkd, ['Training'])
+
+    fkd = {"Training": {'hallo'}}
+    with pytest.raises(ValueError, match=r"A foreign key definition is missing for path:\n\["
+                       r"'Training'\]\nKeys are:\n{'Training': \{'hallo'\}\}"):
+        generator._get_foreign_keys(fkd, ['Training'])
+
+    fkd = {"Training": {"__this__": ['a'], 'b': ['c']}}
+    assert ['c'] == generator._get_foreign_keys(fkd, ['Training', 'b'])
+
+    with pytest.raises(ValueError, match=r"A foreign key definition is missing for .*"):
+        generator._get_foreign_keys({}, ['Training'])
+
+
+def test_get_max_path_length():
+    assert 4 == XLSXTemplateGenerator._get_max_path_length({'a': (1, 'desc', [1, 2, 3]),
+                                                            'b': (2, 'desc', [1, 2, 3, 4])})
+
+
+def test_template_generator():
+    generated, _ = _compare_generated_to_known_good(
+        schema_file=rfp("data/simple_schema.json"), known_good=rfp("data/simple_template.xlsx"),
+        foreign_keys={'Training': {"__this__": ['date', 'url']}},
+        outfile=None)
+    # test some hidden
+    ws = generated.active
+    assert ws.row_dimensions[1].hidden is True
+    assert ws.column_dimensions['A'].hidden is True
+
+    # TODO: remove the following after manual testing
+    di = '/home/henrik/CaosDB/management/external/dimr/eingabemaske/crawler/schemas'
+    if not os.path.exists(di):
+        return
+    for fi in os.listdir(di):
+        rp = os.path.join(di, fi)
+        if not fi.startswith("schema_"):
+            continue
+        with open(rp) as sfi:
+            schema = json.load(sfi)
+        fk_path = os.path.join(di, "foreign_keys"+fi[len('schema'):])
+        path = os.path.join(di, "template"+fi[len('schema'):-4]+"xlsx")
+        alreadydone = [
+            "Präventionsmaßnahmen",
+            "Beratungsstellen",
+            "Schutzeinrichtungen",
+            "Einzelfallversorgung",
+            "Strategiedokumente",
+            "Kooperationsvereinbarungen",
+            "Gremien",
+            "Verwaltungsvorschriften",
+            "Gewaltschutzkonzepte und -maßnahmen",
+            "Polizeilicher Opferschutz",
+            "Feedback",
+        ]
+        if any([path.startswith("template_"+k) for k in alreadydone]):
+            continue
+
+        if not os.path.exists(fk_path):
+            print(f"No foreign keys file for:\n{fk_path}")
+            assert False
+        with open(fk_path) as sfi:
+            fk = json.load(sfi)
+        generator = XLSXTemplateGenerator()
+        if not os.path.exists(path):
+            generator.generate(schema=schema, foreign_keys=fk, filepath=path)
+            os.system(f'libreoffice {path}')
+        else:
+            print(f"Not creating template because it exists:\n{path}")
+
+    # TODO test collisions of sheet or colnames
+    # TODO test escaping of values
+
+    # TODO finish enum example
+
+
+def test_model_with_multiple_refs():
+    _compare_generated_to_known_good(
+        schema_file=rfp("data/multiple_refs_schema.json"),
+        known_good=rfp("data/multiple_refs_template.xlsx"),
+        foreign_keys={"Training": {"__this__": ["date", "url"],
+                                   "Organisation": ["name"]}},
+        outfile=None)
+
+
+def test_model_with_indirect_reference():
+    _compare_generated_to_known_good(
+        schema_file=rfp("data/indirect_schema.json"),
+        known_good=rfp("data/indirect_template.xlsx"),
+        foreign_keys={"Wrapper": ["Training.name", "Training.url"]},
+        outfile=None)
+
+
+def test_exceptions():
+    # Foreign keys must be lists
+    with pytest.raises(ValueError, match="Foreign keys must be a list of strings, but a single "
+                       r"string was given:\n\['Wrapper'\] -> name"):
+        _compare_generated_to_known_good(
+            schema_file=rfp("data/indirect_schema.json"),
+            known_good=rfp("data/multiple_refs_template.xlsx"),
+            foreign_keys={"Wrapper": {"__this__": "name"}},
+            outfile=None)
diff --git a/unittests/table_json_conversion/utils.py b/unittests/table_json_conversion/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c32117c1296e686290ad75bf5f704a1abfb2547
--- /dev/null
+++ b/unittests/table_json_conversion/utils.py
@@ -0,0 +1,54 @@
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""Utilities for the tests.
+"""
+
+from openpyxl import Workbook
+
+
+def compare_workbooks(wb1: Workbook, wb2: Workbook, hidden: bool = True):
+    """Compare two workbooks for equal content.
+
+Raises an error if differences are found.
+
+Parameters
+----------
+
+hidden: bool, optional
+  Test if the "hidden" status of rows and columns is the same.
+    """
+    assert wb1.sheetnames == wb2.sheetnames, (
+        f"Sheet names are different: \n{wb1.sheetnames}\n   !=\n{wb2.sheetnames}"
+    )
+    for sheetname in wb2.sheetnames:
+        sheet_1 = wb1[sheetname]
+        sheet_2 = wb2[sheetname]
+        for irow, (row1, row2) in enumerate(zip(sheet_1.iter_rows(), sheet_2.iter_rows())):
+            if hidden:
+                assert (sheet_1.row_dimensions[irow].hidden
+                        == sheet_2.row_dimensions[irow].hidden), f"hidden row: {sheetname}, {irow}"
+            for icol, (cell1, cell2) in enumerate(zip(row1, row2)):
+                if hidden:
+                    assert (sheet_1.column_dimensions[cell1.column_letter].hidden
+                            == sheet_2.column_dimensions[cell2.column_letter].hidden), (
+                                f"hidden col: {sheetname}, {icol}")
+                assert cell1.value == cell2.value, (
+                    f"Sheet: {sheetname}, cell: {cell1.coordinate}, Values: \n"
+                    f"{cell1.value}\n{cell2.value}"
+                )
diff --git a/unittests/test_json_schema_exporter.py b/unittests/test_json_schema_exporter.py
index f0503385a25eb89e66dd3518d71a32b91d07bf88..1cea2f58e42d28545b8354d0f821ab2c61e7a4f8 100644
--- a/unittests/test_json_schema_exporter.py
+++ b/unittests/test_json_schema_exporter.py
@@ -58,6 +58,14 @@ RT21 = GLOBAL_MODEL.get_deep("RT21")
 RT31 = GLOBAL_MODEL.get_deep("RT31")
 
 
+def _make_unique(entity, unique: bool):
+    """Mock the `unique` behavior of execute_query().
+    """
+    if unique:
+        return entity
+    return db.Container().append(entity)
+
+
 def _mock_execute_query(query_string, unique=False, **kwargs):
     """Mock the response to queries for references."""
     all_records = db.Container()
@@ -83,20 +91,20 @@ def _mock_execute_query(query_string, unique=False, **kwargs):
 
     if query_string == "SELECT name, id FROM RECORD 'OtherType'":
         return other_type_records
-    elif query_string == "FIND RECORDTYPE WITH name='OtherType'" and unique is True:
-        return other_type_rt
+    elif query_string == "FIND RECORDTYPE WITH name='OtherType'":
+        return _make_unique(other_type_rt, unique)
     elif query_string == "SELECT name, id FROM RECORD 'ReferencingType'":
         return referencing_type_records
-    elif query_string == "FIND RECORDTYPE WITH name='ReferencingType'" and unique is True:
-        return referencing_type_rt
+    elif query_string == "FIND RECORDTYPE WITH name='ReferencingType'":
+        return _make_unique(referencing_type_rt, unique)
     elif query_string == "SELECT name, id FROM RECORD 'RT1'":
         return referencing_type_records  # wrong types, but who cares for the test?
-    elif query_string == "FIND RECORDTYPE WITH name='RT1'" and unique is True:
-        return RT1
-    elif query_string == "FIND RECORDTYPE WITH name='RT21'" and unique is True:
-        return RT21
-    elif query_string == "FIND RECORDTYPE WITH name='RT31'" and unique is True:
-        return RT31
+    elif query_string == "FIND RECORDTYPE WITH name='RT1'":
+        return _make_unique(RT1, unique)
+    elif query_string == "FIND RECORDTYPE WITH name='RT21'":
+        return _make_unique(RT21, unique)
+    elif query_string == "FIND RECORDTYPE WITH name='RT31'":
+        return _make_unique(RT31, unique)
     elif query_string == "SELECT name, id FROM RECORD":
         return all_records
     elif query_string == "SELECT name, id FROM FILE":
@@ -260,7 +268,7 @@ def test_units():
     assert "ListWithUnit" in props
     assert props["ListWithUnit"]["type"] == "array"
     assert "items" in props["ListWithUnit"]
-    assert props["ListWithUnit"]["items"]["type"] == "number"
+    assert props["ListWithUnit"]["items"]["type"] == ["number", "null"]
     assert "description" in props["ListWithUnit"]
     assert props["ListWithUnit"]["description"] == "This is a list. Unit is m."
     assert "unit" not in props["ListWithUnit"]
@@ -277,7 +285,7 @@ def test_units():
     assert "ListWithUnit" in props
     assert props["ListWithUnit"]["type"] == "array"
     assert "items" in props["ListWithUnit"]
-    assert props["ListWithUnit"]["items"]["type"] == "number"
+    assert props["ListWithUnit"]["items"]["type"] == ["number", "null"]
     assert "description" in props["ListWithUnit"]
     assert props["ListWithUnit"]["description"] == "This is a list."
     assert "unit" in props["ListWithUnit"]
@@ -298,14 +306,14 @@ def test_rt_with_list_props():
     assert "ListOfIntegers" in props
     assert props["ListOfIntegers"]["type"] == "array"
     assert "items" in props["ListOfIntegers"]
-    assert props["ListOfIntegers"]["items"]["type"] == "integer"
+    assert props["ListOfIntegers"]["items"]["type"] == ["integer", "null"]
     assert "description" not in props["ListOfIntegers"]["items"]
     assert props["ListOfIntegers"]["description"] == "List of integers"
 
     assert "ListOfPatterns" in props
     assert props["ListOfPatterns"]["type"] == "array"
     assert "items" in props["ListOfPatterns"]
-    assert props["ListOfPatterns"]["items"]["type"] == "string"
+    assert props["ListOfPatterns"]["items"]["type"] == ["string", "null"]
     assert props["ListOfPatterns"]["items"]["pattern"] == "[A-Z]+"
 
     # Validation
@@ -333,6 +341,7 @@ def test_rt_with_list_props():
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_rt_with_references():
 
     rt = db.RecordType()
@@ -566,8 +575,7 @@ def test_rt_with_references():
     assert "items" in schema["properties"]["FileProp"]
     items = schema["properties"]["FileProp"]["items"]
     assert items["type"] == "object"
-    assert len(items["required"]) == 1
-    assert "file" in items["required"]
+    assert len(items["required"]) == 0
     assert items["additionalProperties"] is False
     assert len(items["properties"]) == 1
     assert "file" in items["properties"]
@@ -579,7 +587,7 @@ def test_rt_with_references():
 
     schema = rtjs(rt)
     assert schema["properties"]["FileProp"]["type"] == "array"
-    assert schema["properties"]["FileProp"]["items"]["type"] == "string"
+    assert schema["properties"]["FileProp"]["items"]["type"] == ["string", "null"]
     assert schema["properties"]["FileProp"]["items"]["format"] == "data-url"
 
     # wrap in array (cf. https://github.com/rjsf-team/react-jsonschema-form/issues/3957)
@@ -590,14 +598,45 @@ def test_rt_with_references():
     assert "items" in schema["properties"]["FileProp"]
     items = schema["properties"]["FileProp"]["items"]
     assert items["type"] == "object"
-    assert len(items["required"]) == 1
-    assert "file" in items["required"]
+    assert len(items["required"]) == 0
     assert items["additionalProperties"] is False
     assert len(items["properties"]) == 1
     assert "file" in items["properties"]
     assert items["properties"]["file"]["type"] == "string"
     assert items["properties"]["file"]["format"] == "data-url"
 
+    # Test reference property
+    model_string = """
+RT1:
+  description: Some recordtype
+RT2:
+  obligatory_properties:
+    prop1:
+      description: Some reference property
+      datatype: RT1
+    """
+    model = parse_model_from_string(model_string)
+    schema = rtjs(model.get_deep("RT2"), no_remote=True)
+    assert json.dumps(schema, indent=2) == """{
+  "type": "object",
+  "required": [
+    "prop1"
+  ],
+  "additionalProperties": true,
+  "title": "RT2",
+  "properties": {
+    "prop1": {
+      "type": "object",
+      "required": [],
+      "additionalProperties": true,
+      "description": "Some reference property",
+      "title": "prop1",
+      "properties": {}
+    }
+  },
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}"""
+
 
 def test_broken():
 
@@ -621,6 +660,7 @@ def test_broken():
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_reference_options():
     """Testing miscellaneous options.
     """
@@ -851,6 +891,7 @@ RT5:
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_empty_retrieve():
     """Special case: ``do_not_retrieve`` is set, or the retrieve result is empty."""
     model_str = """
@@ -893,6 +934,7 @@ RT3:
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_multiple_choice():
     """Multiple choice is mostyly a matter of UI."""
     model_str = """
@@ -933,6 +975,7 @@ RT4:
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_uischema():
     model_str = """
 RT1:
@@ -983,6 +1026,7 @@ RT3:
 
 
 @patch("linkahead.execute_query", new=Mock(side_effect=_mock_execute_query))
+@patch("linkahead.cached.execute_query", new=Mock(side_effect=_mock_execute_query))
 def test_schema_customization_with_dicts():
     """Testing the ``additional_json_schema`` and ``additional_ui_schema`` parameters."""
     model_str = """
diff --git a/unittests/test_table_converter.py b/unittests/test_table_converter.py
index dbf593de4b63e031777c109c26b971171e660638..9b1ac11b7c9ed5251f05c921fb08f7c42079c91a 100644
--- a/unittests/test_table_converter.py
+++ b/unittests/test_table_converter.py
@@ -27,6 +27,7 @@ from tempfile import NamedTemporaryFile
 import caosdb as db
 import pandas as pd
 from caosdb.apiutils import compare_entities
+from numpy import nan
 
 from caosadvancedtools.table_converter import (from_table, from_tsv, to_table,
                                                to_tsv)
@@ -42,7 +43,8 @@ class TableTest(unittest.TestCase):
 
     def test_empty(self):
         c = db.Container()
-        self.assertRaises(ValueError, to_table, c)
+        df = to_table(c)
+        assert df.shape == (0, 0)
 
     def test_different_props(self):
         r1 = db.Record()
@@ -65,6 +67,36 @@ class TableTest(unittest.TestCase):
         c.extend([r1, r2])
         self.assertRaises(ValueError, to_table, c)
 
+    def test_list(self):
+        r1 = db.Record()
+        r1.add_parent("no1")
+        r1.add_property("p1", value=1)
+        r1.add_property("p3", value=23)
+        r1.add_property("p4", value=[1])
+        r2 = db.Record()
+        r2.add_parent("no1")
+        r2.add_property("p1")
+        r2.add_property("p2", value=[20, 21])
+        r2.add_property("p3", value=[30, 31])
+        r2.add_property("p4", value=[40.0, 41.0])
+        r3 = db.Record()
+        r3.add_parent("no1")
+        r3.add_property("p5", value=[50, 51])
+        c = db.Container()
+        c.extend([r1, r2, r3])
+        result = to_table(c)
+        # NaN is hard to compare, so we replace it by -999
+        # autopep8: off
+        assert result.replace(to_replace=nan, value=-999).to_dict() == {
+            'p1': {0: 1,    1: -999,         2: -999},  # noqa: E202
+            'p3': {0: 23,   1: [30, 31],     2: -999},  # noqa: E202
+            'p4': {0: [1],  1: [40.0, 41.0], 2: -999},  # noqa: E202
+            'p2': {0: -999, 1: [20, 21],     2: -999},  # noqa: E202
+            'p5': {0: -999, 1: -999,         2: [50, 51]}
+        }
+        # autopep8: on
+        assert list(result.dtypes) == [float, object, object, object, object]
+
 
 class FromTsvTest(unittest.TestCase):
     def test_basic(self):
diff --git a/unittests/test_yaml_model_parser.py b/unittests/test_yaml_model_parser.py
index 1019a93a0aa4292cea75fe8fba57e19e55359baa..97c3450f654e7b836734335cafac37adc6e700bb 100644
--- a/unittests/test_yaml_model_parser.py
+++ b/unittests/test_yaml_model_parser.py
@@ -19,7 +19,7 @@
 import unittest
 from datetime import date
 from tempfile import NamedTemporaryFile
-from pytest import deprecated_call, raises, mark
+from pytest import raises, mark
 
 import linkahead as db
 from caosadvancedtools.models.parser import (TwiceDefinedException,
@@ -527,7 +527,7 @@ F:
 
 
 def test_issue_36():
-    """Test whether the `parent` keyword is deprecated.
+    """Test whether the `parent` keyword is removed.
 
     See https://gitlab.com/caosdb/caosdb-advanced-user-tools/-/issues/36.
 
@@ -550,17 +550,12 @@ R3:
   inherit_from_obligatory:
   - R1
 """
-    with deprecated_call():
-        # Check whether this is actually deprecated
+    with raises(ValueError) as ve:
+        # The keyword has been removed, so it should raise a regular ValueError.
         model = parse_model_from_string(model_string)
 
-    assert "R3" in model
-    r3 = model["R3"]
-    assert isinstance(r3, db.RecordType)
-    for par in ["R1", "R2"]:
-        # Until removal, both do the same
-        assert has_parent(r3, par)
-        assert r3.get_parent(par)._flags["inheritance"] == db.OBLIGATORY
+    assert "invalid keyword" in str(ve.value)
+    assert "parent" in str(ve.value)
 
 
 def test_yaml_error():
@@ -597,3 +592,50 @@ prop2:
     model = parse_model_from_string(model_string)
     prop2 = model["prop2"]
     assert prop2.role == "Property"
+
+
+def test_fancy_yaml():
+    """Testing aliases and other fancy YAML features."""
+    # Simple aliasing
+    model_string = """
+foo:
+  datatype: INTEGER
+RT1:
+  obligatory_properties: &RT1_oblig
+    foo:
+RT2:
+  obligatory_properties: *RT1_oblig
+    """
+    model = parse_model_from_string(model_string)
+    assert str(model) == """{'foo': <Property name="foo" datatype="INTEGER"/>
+, 'RT1': <RecordType name="RT1">
+  <Property name="foo" importance="OBLIGATORY" flag="inheritance:FIX"/>
+</RecordType>
+, 'RT2': <RecordType name="RT2">
+  <Property name="foo" importance="OBLIGATORY" flag="inheritance:FIX"/>
+</RecordType>
+}"""
+
+    # Aliasing with override
+    model_string = """
+foo:
+  datatype: INTEGER
+RT1:
+  obligatory_properties: &RT1_oblig
+    foo:
+RT2:
+  obligatory_properties:
+    <<: *RT1_oblig
+    bar:
+    """
+    model = parse_model_from_string(model_string)
+    assert str(model) == """{'foo': <Property name="foo" datatype="INTEGER"/>
+, 'RT1': <RecordType name="RT1">
+  <Property name="foo" importance="OBLIGATORY" flag="inheritance:FIX"/>
+</RecordType>
+, 'RT2': <RecordType name="RT2">
+  <Property name="foo" importance="OBLIGATORY" flag="inheritance:FIX"/>
+  <Property name="bar" importance="OBLIGATORY" flag="inheritance:FIX"/>
+</RecordType>
+, 'bar': <RecordType name="bar"/>
+}"""