diff --git a/.docker/Dockerfile b/.docker/Dockerfile
index 43e5eff1171da8d69eb8897bea678bf90572570a..876f252299991f2fa4410994b73259c3593c2198 100644
--- a/.docker/Dockerfile
+++ b/.docker/Dockerfile
@@ -1,4 +1,4 @@
-FROM debian:10
+FROM debian:11
 RUN apt-get update && \
     apt-get install \
     curl \
@@ -13,7 +13,7 @@ RUN apt-get update && \
     tox \
     git \
     openjdk-11-jdk-headless \
-    python-autopep8 \
+    python3-autopep8 \
     python3-pytest \
     libxml2 \
     -y
@@ -25,7 +25,7 @@ ADD https://gitlab.com/api/v4/projects/13656973/repository/branches/dev \
 RUN git clone https://gitlab.com/caosdb/caosdb-pylib.git && \
    cd caosdb-pylib && git checkout dev && pip3 install .
 # At least recommonmark 0.6 required.
-RUN pip3 install recommonmark sphinx-rtd-theme
+RUN pip3 install -U html2text pycodestyle pylint recommonmark sphinx-rtd-theme
 COPY . /git
 RUN rm -r /git/.git \
     && mv /git/.docker/pycaosdb.ini /git/integrationtests
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index f620aeffd5146254bf630645eaded34d69f35f1c..8ebbefaa39650ddaff45b856a8a4d44a2ac495d1 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -64,6 +64,7 @@ test:
       - rc=`cat .docker/result`  
       - exit $rc
   dependencies: [cert]
+  needs: [cert]
   artifacts:
     paths:
       - caosdb_log.txt
@@ -95,6 +96,7 @@ cert:
   tags: [docker]
   stage: cert
   image: $CI_REGISTRY_IMAGE
+  needs: [build-testenv]
   artifacts:
     paths:
       - .docker/cert/
@@ -102,18 +104,30 @@ cert:
   script:
       - cd .docker
       - CAOSHOSTNAME=caosdb-server ./cert.sh
+
 style:
   tags: [docker]
   stage: style
   image: $CI_REGISTRY_IMAGE
+  needs: [build-testenv]
+  script:
+      - make style
+  allow_failure: true
+
+linting:
+  tags: [docker]
+  stage: style
+  image: $CI_REGISTRY_IMAGE
+  needs: [build-testenv]
   script:
-      - autopep8 -ar --diff --exit-code --exclude swagger_client .
+      - make lint
   allow_failure: true
 
 unittest:
   tags: [docker]
   stage: unittest
   image: $CI_REGISTRY_IMAGE
+  needs: [build-testenv]
   script:
       - tox
 
diff --git a/.gitlab/merge_request_templates/Default.md b/.gitlab/merge_request_templates/Default.md
deleted file mode 100644
index 77a95da1cc40c815e4952a1283d345af56e80461..0000000000000000000000000000000000000000
--- a/.gitlab/merge_request_templates/Default.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Summary
-
-    Insert a meaningful description for this merge request here.  What is the
-    new/changed behavior? Which bug has been fixed? Are there related Issues?
-
-# Focus
-
-    Point the reviewer to the core of the code change. Where should they start
-    reading? What should they focus on (e.g. security, performance,
-    maintainability, user-friendliness, compliance with the specs, finding more
-    corner cases, concrete questions)?
-
-# Test Environment
-
-    How to set up a test environment for manual testing?
-
-# Check List for the Author
-
-Please, prepare your MR for a review. Be sure to write a summary and a
-focus and create gitlab comments for the reviewer. They should guide the
-reviewer through the changes, explain your changes and also point out open
-questions. For further good practices have a look at [our review
-guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md)
-
-- [ ] All automated tests pass
-- [ ] Reference related Issues
-- [ ] Up-to-date CHANGELOG.md
-- [ ] Annotations in code (Gitlab comments)
-  - Intent of new code
-  - Problems with old code
-  - Why this implementation?
-
-
-# Check List for the Reviewer
-
-
-- [ ] I understand the intent of this MR
-- [ ] All automated tests pass
-- [ ] Up-to-date CHANGELOG.md
-- [ ] The test environment setup works and the intended behavior is
-  reproducible in the test environment
-- [ ] In-code documentation and comments are up-to-date.
-- [ ] Check: Are there spezifications? Are they satisfied?
-
-For further good practices have a look at [our review guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md).
-
-
-/assign me
-/target_branch dev
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c5c1ba8566895139917cd0360f6c4ec48374523a..62105323a81f22594c92601a405e287dc76106ee 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -4,12 +4,42 @@ All notable changes to this project will be documented in this file.
 The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
 and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
 
+## [Unreleased] ##
+
+### Added ###
+
+- CFood that creates a Record for each line in a csv file
+- `generic_analysis.py` allows to easily call scripts to perform analyses in
+  server side scripting [EXPERIMENTAL]
+- **EXPERIMENTAL:** Models parser can import from Json Schema files now:
+  `models.parser.parse_model_from_json_schema(...)`. See the documentation of
+  `models.parser.JsonSchemaParser` for the limitations of the current
+  implementation.
+- New keyword "role" in yaml data model that allows creation of Records and Files.
+- It is now possible to set values of properties and default values of properties
+  directly in the yaml model.
+
+### Changed ###
+
+- `TableConverter` now converts int to float and vice versa to match the desired dtype.
+
+### Deprecated ###
+
+### Removed ###
+
+### Fixed ###
+
+- CaosDB internal properties `name`, `unit` and `description` can now be used via the `extern`
+  keyword in YAML datamodel specifications.
+
+### Security ###
+
 ## [0.3.1] - 2021-12-06  ##
 
 ### Added ###
 - `check_reference_field` function to check whether entities with provided ids
   exits (for example when importing data from a table)
-- added the `datatypes` argument to `TableImporter` for columns that do not 
+- added the `datatypes` argument to `TableImporter` for columns that do not
   need a special conversion function
 
 ## [0.3.0] - 2021-11-02 ##
@@ -23,14 +53,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 - New class to collect possible problems with the data model
 - New class for checking and importing tables
 - Function to get a file path to a shared resource directory
-- Function to setup logging appropriate for server side scripts with webui 
+- Function to setup logging appropriate for server side scripts with webui
   output
 - New class for collecting information for exporting tables, e.g., to
   metadata repositories
 - new name parsing
 - new test for software folder structure
 - new assure_name_is function
-- two utility functions when working with files: NameCollector and 
+- two utility functions when working with files: NameCollector and
   get_file_via_download
 - Automated documentation builds: `make doc`
 - Crawler documentation
@@ -43,8 +73,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ### Changed ###
 
-- identifiables of single CFoods are now treated one after the other. This 
-  allows them to have dependencies among each other if they are ordered 
+- identifiables of single CFoods are now treated one after the other. This
+  allows them to have dependencies among each other if they are ordered
   correctly
 - identifiables must have at least one property or a name
 - `caosadvancedtools.serverside.helper.init_data_model` also checks the role
@@ -72,9 +102,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
   cause an `sqlite3.IntegrityError` if more than one change was cached
   for the same entity.
 - #40 Insertion of identifiables with missing obligatory properties
-- Before, a Property with the datatype "LIST(TEXT)" would lead to the creation 
+- Before, a Property with the datatype "LIST(TEXT)" would lead to the creation
   of a RecordType. This is fixed now.
-- #52 `XLSimporter.read_xls` throwed a wrong error when reading from a file with a wrong ending. 
+- #52 `XLSimporter.read_xls` throwed a wrong error when reading from a file with a wrong ending.
   Now, a `DataInconsistencyError` is raised instead of a ValueError.
 - List properties are no longer updated unnecessarily by the crawler.
 
diff --git a/Makefile b/Makefile
index 7609444bd4fd3a8ce980eca0bc3993b3cf2e168f..d9b182cbd0b17490e9d81b900d6ba8cefadb1b64 100644
--- a/Makefile
+++ b/Makefile
@@ -34,3 +34,12 @@ install:
 
 unittest:
 	pytest-3 unittests
+
+style:
+	pycodestyle --count src unittests --exclude=swagger_client
+	autopep8 -ar --diff --exit-code --exclude swagger_client .
+.PHONY: style
+
+lint:
+	pylint --unsafe-load-any-extension=y -d all -e E,F --ignore=swagger_client src/caosadvancedtools
+.PHONY: lint
diff --git a/README_SETUP.md b/README_SETUP.md
index 09a368a3d33f9fb2be799a5ba9961d293a2fd6c3..43047d554afbe8ffba11aef67b20dde44d29bdcf 100644
--- a/README_SETUP.md
+++ b/README_SETUP.md
@@ -38,7 +38,7 @@ Optional h5-crawler:
 
 1. Change directory to `integrationtests/`.
 2. Mount `extroot` to the folder that will be used as extroot. E.g. `sudo mount
-   -o bind extroot ../../caosdb-deploy/profiles/empty/paths/extroot` (or
+   -o bind extroot ../../caosdb-deploy/profiles/debug/paths/extroot` (or
    whatever path the extroot of the empty profile to be used is located at).
 3. Start (or restart) an empty (!) CaosDB instance (with the mounted
    extroot). The database will be cleared during testing, so it's important to
@@ -48,7 +48,8 @@ Optional h5-crawler:
    `integrationtest/extroot/` directory.
 
 ## Code Formatting
-`autopep8 -i -r ./`
+
+`make style`
 
 ## Documentation #
 
diff --git a/RELEASE_GUIDELINES.md b/RELEASE_GUIDELINES.md
new file mode 100644
index 0000000000000000000000000000000000000000..e71234b8e2bc95f954ffbebdc26acf6edd8e0b2d
--- /dev/null
+++ b/RELEASE_GUIDELINES.md
@@ -0,0 +1,43 @@
+# Release Guidelines for the CaosDB Python Client Library
+
+This document specifies release guidelines in addition to the general release
+guidelines of the CaosDB Project
+([RELEASE_GUIDELINES.md](https://gitlab.com/caosdb/caosdb/blob/dev/RELEASE_GUIDELINES.md))
+
+## General Prerequisites
+
+* All tests are passing.
+* FEATURES.md is up-to-date and a public API is being declared in that document.
+* CHANGELOG.md is up-to-date.
+* DEPENDENCIES.md is up-to-date.
+
+## Steps
+
+1. Create a release branch from the dev branch. This prevents further changes
+   to the code base and a never ending release process. Naming: `release-<VERSION>`
+
+2. Update CHANGELOG.md
+
+3. Check all general prerequisites.
+
+4. Update the version:
+   - `version` variables in `src/doc/conf.py`
+   - Version on [setup.py](./setup.py): Check the `MAJOR`, `MINOR`, `MICRO`, `PRE` variables and set
+     `ISRELEASED` to `True`. Use the possibility to issue pre-release versions for testing.
+
+5. Merge the release branch into the main branch.
+
+6. Tag the latest commit of the main branch with `v<VERSION>`.
+
+7. Delete the release branch.
+
+8. Remove possibly existing `./dist` directory with old release.
+
+9. Publish the release by executing `./release.sh` with uploads the caosdb
+   module to the Python Package Index [pypi.org](https://pypi.org).
+
+10. Merge the main branch back into the dev branch.
+
+11. After the merge of main to dev, start a new development version by
+    setting `ISRELEASED` to `False` and by increasing at least the `MICRO`
+    version in [setup.py](./setup.py) and preparing CHANGELOG.md.
diff --git a/integrationtests/crawl.py b/integrationtests/crawl.py
index 79ed3b5ffe52d276677e2a7914f70923e5c9e70c..defed2cb4f5fb0a0f349898e555c5d25924e2f9b 100755
--- a/integrationtests/crawl.py
+++ b/integrationtests/crawl.py
@@ -34,7 +34,7 @@ from caosadvancedtools.crawler import FileCrawler
 from caosadvancedtools.guard import INSERT, UPDATE
 from caosadvancedtools.scifolder import (AnalysisCFood, ExperimentCFood,
                                          PublicationCFood, SimulationCFood,
-                                         SoftwareCFood)
+                                         SoftwareCFood, ResultTableCFood)
 
 from example_hdf5cfood import ExampleH5CFood
 
@@ -91,6 +91,7 @@ if __name__ == "__main__":
                     interactive=False, hideKnown=False,
                     cfood_types=[ExperimentCFood, AnalysisCFood, SoftwareCFood,
                                  PublicationCFood, SimulationCFood,
+                                 ResultTableCFood,
                                  ExampleH5CFood
                                  ])
 
diff --git a/integrationtests/create_analysis.py b/integrationtests/create_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b7aa0d2d6671f14a3c65cf5ed135dfecb0aa69c
--- /dev/null
+++ b/integrationtests/create_analysis.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+A small script that creates an Analysis Record that can be used for testing the
+automated analysis pipeline.
+"""
+
+import sys
+from datetime import datetime
+
+import caosdb as db
+
+
+def main():
+    script = db.File(
+        file="../src/caosadvancedtools/serverside/examples/example_script.py",
+        path=("AutomatedAnalysis/scripts/"
+              + str(datetime.now())+"example_script.py"),
+    )
+    script.insert()
+
+    da = db.Record()
+    da.add_parent("Analysis")
+    da.add_property("scripts", value=[script], datatype=db.LIST(db.FILE))
+    da.add_property("sources",
+                    value=db.execute_query(
+                        "FIND FILE which is stored at '**/timeseries.npy'",
+                        unique=True),
+                    )
+    da.add_property("date", "2020-01-01")
+    da.add_property("identifier", "TEST")
+    only = db.execute_query(
+                        "FIND RECORD Person WITH firstname=Only",
+                        unique=True)
+    only.add_property(db.Property("Email").retrieve().id, "only@example.com")
+    only.update()
+    da.add_property("responsible", only)
+    da.insert()
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/integrationtests/example_script.py b/integrationtests/example_script.py
new file mode 120000
index 0000000000000000000000000000000000000000..f6e9b498ff97638cb4105e019424c0c677a7f414
--- /dev/null
+++ b/integrationtests/example_script.py
@@ -0,0 +1 @@
+../src/caosadvancedtools/serverside/examples/example_script.py
\ No newline at end of file
diff --git a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
+++ b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..09157f2c0961d412efea36ea0e56db5aac03fd36 100644
Binary files a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf and b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf differ
diff --git a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e29553fe01c8706e15a042e5ac6f85ed1a2cc8ce 100644
--- a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
+++ b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
@@ -0,0 +1 @@
+datadatadata
diff --git a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv
new file mode 100644
index 0000000000000000000000000000000000000000..a29679afce78089f3cdd4e5e388262456668cd90
--- /dev/null
+++ b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv
@@ -0,0 +1,3 @@
+temperature [°C] ,depth 
+234.4,3.0
+344.6,5.1
diff --git a/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf b/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..09157f2c0961d412efea36ea0e56db5aac03fd36 100644
Binary files a/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf and b/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf differ
diff --git a/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy b/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..18da9b18cda23d411d0f2666629377dd7991ac8f 100644
Binary files a/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy and b/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy differ
diff --git a/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py b/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
+++ b/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py b/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
+++ b/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py b/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
+++ b/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/model.yml b/integrationtests/model.yml
index cad7958803edf1a01f0649353443ffab350cc5e5..9f7a62d1d0befbc7225353380c79db2f368c969c 100644
--- a/integrationtests/model.yml
+++ b/integrationtests/model.yml
@@ -19,6 +19,14 @@ SoftwareVersion:
     binaries:
     sourceCode:
     Software:
+DepthTest:
+  obligatory_properties:
+    temperature:
+      datatype: DOUBLE 
+      description: 'temp'
+    depth:
+      datatype: DOUBLE 
+      description: 'temp'
 Person:
   obligatory_properties:
     firstName:
@@ -51,6 +59,9 @@ Analysis:
     date:
     identifier:
     responsible:
+  suggested_properties:
+    mean_value:
+      datatype: DOUBLE
 Publication:
 Thesis:
   inherit_from_suggested:
diff --git a/integrationtests/test.sh b/integrationtests/test.sh
index 71af543643a35cb082f10a24440c5ea87df946c9..a142d917215eb7469faab9c66a581539ce867e4e 100755
--- a/integrationtests/test.sh
+++ b/integrationtests/test.sh
@@ -57,14 +57,21 @@ then
     exit 1
 fi
 set -e
-echo "undo changes"
+echo "Undoing previous changes to extroot content..."
 cd extroot
 egrep -liRZ 'A description of this example' . | xargs -0 -l sed -i -e 's/A description of this example/A description of another example/g'
 cd ..
+echo "Done."
 python3 test_table.py
 # TODO the following test deletes lots of the data inserted by the crawler
 echo "Testing im and export"
 python3 test_im_und_export.py
+
+# automated analysis
+# for some reason the loadFiles of sim data has to be called again
+python3 -m caosadvancedtools.loadFiles /opt/caosdb/mnt/extroot/SimulationData
+python3 create_analysis.py
+
 # Better safe than sorry:
 python3 clear_database.py
 
@@ -76,5 +83,11 @@ python3 -m pytest test_crawl_with_datamodel_problems.py
 echo "Testing table export"
 python3 -m pytest test_base_table_exporter_integration.py
 
+echo "Testing json-schema datamodel parser"
+python3 -m pytest test_json_schema_datamodel_parser.py
+
+echo "Testing yaml datamodel parser"
+python3 -m pytest test_yaml_parser.py
+
 # Obsolete due to teardown in the above test.
 # echo "/n/n/n YOU NEED TO RESTART THE SERVER TO REDO TESTS!!!"
diff --git a/integrationtests/test_assure_functions.py b/integrationtests/test_assure_functions.py
index 56f9767a0f436201ab6003ffd88f631bdb089544..9f4e387d52f25382d18cfb21372a06346d2b5465 100644
--- a/integrationtests/test_assure_functions.py
+++ b/integrationtests/test_assure_functions.py
@@ -32,26 +32,25 @@ from caosadvancedtools.cfood import (assure_object_is_in_list)
 from caosadvancedtools.guard import (global_guard, RETRIEVE, UPDATE)
 
 
-def setup_module():
+def setup():
     """Delete all test entities."""
     db.execute_query("FIND Test*").delete(raise_exception_on_error=False)
 
 
-def setup():
+def setup_module():
     """Allow all updates and delete test data"""
     global_guard.level = UPDATE
-    setup_module()
+    setup()
 
 
-def teardown():
+def teardown_module():
     """Reset guard level and delete test data."""
     global_guard.level = RETRIEVE
-    setup_module()
+    setup()
 
 
 def test_assure_list_in_place():
     """Test an in-place update with `assure_object_is_in_list`."""
-
     int_list_prop = db.Property(name="TestIntListProperty",
                                 datatype=db.LIST(db.INTEGER)).insert()
     rt1 = db.RecordType(name="TestType1").add_property(
diff --git a/integrationtests/test_base_table_exporter_integration.py b/integrationtests/test_base_table_exporter_integration.py
index 1c9158bd1d9600884571957d4916939f82c1a9ca..9d79e857fe706d78103ade3b92ee38498a2a1607 100644
--- a/integrationtests/test_base_table_exporter_integration.py
+++ b/integrationtests/test_base_table_exporter_integration.py
@@ -23,6 +23,7 @@
 # ** end header
 #
 import caosdb as db
+import pytest
 from caosadvancedtools import table_export as te
 
 
@@ -85,8 +86,11 @@ def setup_module():
         pass
 
 
+@pytest.fixture(autouse=True)
 def setup():
-    """No further setup"""
+    """Same as module setup."""
+    setup_module()
+    yield None
     setup_module()
 
 
diff --git a/integrationtests/test_crawl_with_datamodel_problems.py b/integrationtests/test_crawl_with_datamodel_problems.py
index 61fec39026a1a3480ecc5b52551c712d7a459b08..0c6a145afdab682f82af09a17fb9aa0770769959 100644
--- a/integrationtests/test_crawl_with_datamodel_problems.py
+++ b/integrationtests/test_crawl_with_datamodel_problems.py
@@ -20,10 +20,11 @@
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 #
 # ** end header
-"""Test whether the crawler correctly identifies the data model
-problems caused by a faulty model.
+
+"""Test whether the crawler correctly identifies the data model problems caused by a faulty model.
 
 """
+
 import caosdb as db
 from caosadvancedtools import loadFiles
 from caosadvancedtools.cfood import fileguide
diff --git a/integrationtests/test_crawler_with_cfoods.py b/integrationtests/test_crawler_with_cfoods.py
index 05bb581058a964d76ab78583cc290c348e8c4566..4efef87cef52e4a2a20a615afe210c32f52a276a 100755
--- a/integrationtests/test_crawler_with_cfoods.py
+++ b/integrationtests/test_crawler_with_cfoods.py
@@ -66,6 +66,17 @@ class CrawlerTest(unittest.TestCase):
                          datfile.description)
         assert os.path.basename(datfile.path) == "datafile.dat"
 
+        # There should be two DepthTest Properties
+        depthtests = exp.get_property("DepthTest")
+        assert depthtests is not None
+        assert len(depthtests.value) == 2
+        depthtest = db.Record(id=depthtests.value[0])
+        depthtest.retrieve()
+        assert "DepthTest" in [p.name for p in depthtest.get_parents()]
+        assert 234.4 == depthtest.get_property("temperature").value
+        assert "°C" == depthtest.get_property("temperature").unit
+        assert 3.0 == depthtest.get_property("depth").value
+
         # Should have a responsible person
         self.assertIsNotNone(exp.get_property("responsible"))
         person = db.Record(id=exp.get_property("responsible").value[0])
diff --git a/integrationtests/test_datamodel.schema.json b/integrationtests/test_datamodel.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..356964702dd83a8c81edf1e8d72bf4a30468e6f2
--- /dev/null
+++ b/integrationtests/test_datamodel.schema.json
@@ -0,0 +1,85 @@
+[
+    {
+        "title": "TestTypeWithAtomicProps",
+        "description": "RecordType with scalar atomic properties",
+        "type": "object",
+        "properties": {
+            "simple_text_prop": { "type": "string" },
+            "int_prop_with_name": { "type": "integer", "title": "IntegerProperty" },
+            "double_prop": {
+                "type": "number",
+                "description": "Some generic double-valued property"
+            },
+            "bool_prop": { "type": "boolean" },
+            "datetime_prop": { "type": "string", "format": "date-time" },
+            "date_prop": { "type": "string", "format": "date" }
+        },
+        "required": [ "simple_text_prop", "double_prop" ]
+    },
+    {
+        "title": "TestTypeWithReferencesAndEnum",
+        "type": "object",
+        "properties": {
+            "TestTypeWithAtomicProps": {},
+            "OtherReference": {
+                "type": "object",
+                "description": "Some generic refernced RecordType",
+                "properties": {}
+            },
+            "named_refernce": {
+                "type": "object",
+                "title": "NamedReference",
+                "properties": {
+                    "simple_text_prop": {}
+                }
+            },
+            "string_enum": {
+                "type": "string",
+                "enum": [ "StringEnumA", "StringEnumB", "StringEnumC" ]
+            },
+            "named_enum": {
+                "type": "string",
+                "title": "NamedEnum",
+                "enum": [ "NameA", "NameB", "NameC" ]
+            }
+        }
+    },
+    {
+        "title": "TestTypeWithLists",
+        "type": "object",
+        "properties": {
+            "string_list": {
+                "type": "array",
+                "description": "A list of words",
+                "items": { "type": "string" }
+            },
+            "named_int_list": {
+                "type": "array",
+                "title": "NamedIntList",
+                "items": { "type": "integer" }
+            },
+            "ListRecordType": {
+                "type": "array",
+                "items": { "type": "object", "properties": {} }
+            },
+            "NamedReferenceList": {
+                "type": "array",
+                "items": {
+                    "title": "ReferencedListTypeWithName",
+                    "type": "object",
+                    "description": "Referenced by a named list-of-references property",
+                    "properties": {
+                        "double_prop": {}
+                    }
+                }
+            },
+            "ListNumberEnum": {
+                "type": "array",
+                "items": {
+                    "type": "number",
+                    "enum": [ 1.1, 2.2, 3.3 ]
+                }
+            }
+        }
+    }
+]
diff --git a/integrationtests/test_datamodel_problems.py b/integrationtests/test_datamodel_problems.py
index 7d56f4da8eea34604ed1c820e14555f087c353bd..3bca302dd2a337cee7fd023ee6a64c5185bc99f5 100644
--- a/integrationtests/test_datamodel_problems.py
+++ b/integrationtests/test_datamodel_problems.py
@@ -44,12 +44,15 @@ def setup_module():
         print(delete_exc)
 
 
+@pytest.fixture(autouse=True)
 def setup():
-    """No further setup"""
+    """Same as module setup."""
+    setup_module()
+    yield None
     setup_module()
 
 
-def teardown():
+def teardown_module():
     """Clear and delete again."""
     setup_module()
 
diff --git a/integrationtests/test_im_und_export.py b/integrationtests/test_im_und_export.py
index 27995080aa5cbeeb6f562226d4f0c0ca19c64d83..8ea45fd2cebbcb2c3be6c8cb79805204486f7862 100644
--- a/integrationtests/test_im_und_export.py
+++ b/integrationtests/test_im_und_export.py
@@ -12,10 +12,12 @@ if __name__ == "__main__":
     directory = TemporaryDirectory()
     export_related_to(rec.id, directory=directory.name)
     # delete everything
+    print("Clearing database")
     recs = db.execute_query("FIND entity with id>99")
     recs.delete()
     assert 0 == len(db.execute_query("FIND File which is stored at "
                                      "**/poster.pdf"))
+    print("Importing stored elements")
     import_xml(os.path.join(directory.name, "caosdb_data.xml"), interactive=False)
 
     # The following tests the existence of some required entities.
@@ -26,3 +28,4 @@ if __name__ == "__main__":
     db.execute_query("FIND RecordType Person", unique=True)
     db.execute_query("FIND Record Person with firstname=Only", unique=True)
     db.execute_query("FIND File which is stored at **/poster.pdf", unique=True)
+    print("Found all required Records and Files.")
diff --git a/integrationtests/test_json_schema_datamodel_parser.py b/integrationtests/test_json_schema_datamodel_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..21ae8d2d7bad5527a7a314220b38af8ff816475f
--- /dev/null
+++ b/integrationtests/test_json_schema_datamodel_parser.py
@@ -0,0 +1,174 @@
+#
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2022 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the Free
+# Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Affero General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import os
+
+import caosdb as db
+from caosadvancedtools.models.parser import parse_model_from_json_schema
+
+
+def _clear_db():
+    ents = db.execute_query("FIND ENTITY WITH ID>99")
+    if ents:
+        ents.delete()
+
+
+def setup_module():
+    _clear_db()
+
+
+def teardown_module():
+    _clear_db()
+
+
+def _load_and_sync(fname):
+    """Load datamodel from json schema in fname and synchronize it without asking.
+
+    """
+    # @author Florian Spreckelsen
+    # @date 2022-03-23
+    fpath = os.path.join(os.path.dirname(os.path.abspath(__file__)), fname)
+    model = parse_model_from_json_schema(fpath)
+    model.sync_data_model(noquestion=True)
+
+
+def test_json_parsed_datamodel():
+    # @author Florian Spreckelsen
+    # @date 2022-03-23
+
+    _load_and_sync("test_datamodel.schema.json")
+
+    # RecordType with atomic properties
+    rt1 = db.execute_query(
+        "FIND RECORDTYPE TestTypeWithAtomicProps", unique=True)
+    assert rt1.description == "RecordType with scalar atomic properties"
+    assert rt1.get_property("simple_text_prop") is not None
+    assert rt1.get_property("simple_text_prop").datatype == db.TEXT
+    assert rt1.get_importance("simple_text_prop") == db.OBLIGATORY
+
+    assert rt1.get_property("IntegerProperty") is not None
+    assert rt1.get_property("IntegerProperty").datatype == db.INTEGER
+    assert rt1.get_importance("IntegerProperty") == db.RECOMMENDED
+
+    assert rt1.get_property("double_prop") is not None
+    assert rt1.get_property("double_prop").datatype == db.DOUBLE
+    assert rt1.get_importance("double_prop") == db.OBLIGATORY
+    assert (db.Property(name="double_prop").retrieve().description ==
+            "Some generic double-valued property")
+
+    further_props = [
+        ("bool_prop", db.BOOLEAN),
+        ("datetime_prop", db.DATETIME),
+        ("date_prop", db.DATETIME)
+    ]
+    for name, dtype in further_props:
+        assert rt1.get_property(name) is not None
+        assert rt1.get_property(name).datatype == dtype
+        assert rt1.get_importance(name) == db.RECOMMENDED
+
+    # RecordType with references and enums
+    rt2 = db.execute_query(
+        "FIND RECORDTYPE TestTypeWithReferencesAndEnum", unique=True)
+    assert rt2.get_property(rt1.name) is not None
+    assert rt2.get_property(rt1.name).is_reference()
+    assert rt2.get_property(rt1.name).name == rt1.name
+    assert rt2.get_property(rt1.name).id == rt1.id
+
+    other_ref_type = db.execute_query(
+        "FIND RECORDTYPE OtherReference", unique=True)
+    assert rt2.get_property(other_ref_type.name) is not None
+    assert rt2.get_property(other_ref_type.name).is_reference()
+    assert rt2.get_property(other_ref_type.name).name == other_ref_type.name
+    assert rt2.get_property(other_ref_type.name).id == other_ref_type.id
+    assert other_ref_type.description == "Some generic refernced RecordType"
+    assert len(other_ref_type.properties) == 0
+
+    named_ref_type = db.execute_query(
+        "FIND RECORDTYPE NamedReference", unique=True)
+    assert rt2.get_property(named_ref_type.name) is not None
+    assert rt2.get_property(named_ref_type.name).is_reference()
+    assert rt2.get_property(named_ref_type.name).name == named_ref_type.name
+    assert rt2.get_property(named_ref_type.name).id == named_ref_type.id
+    assert named_ref_type.get_property("simple_text_prop") is not None
+    assert (named_ref_type.get_property("simple_text_prop").id ==
+            rt1.get_property("simple_text_prop").id)
+    assert (named_ref_type.get_property("simple_text_prop").datatype ==
+            rt1.get_property("simple_text_prop").datatype)
+
+    enums = {
+        "string_enum": ["StringEnumA", "StringEnumB", "StringEnumC"],
+        "NamedEnum": ["NameA", "NameB", "NameC"]
+    }
+    for enum_type_name, enum_names in enums.items():
+        enum_type = db.execute_query(
+            f"FIND RECORDTYPE {enum_type_name}", unique=True)
+        assert len(enum_type.properties) == 0
+        enum_records = db.execute_query(f"FIND RECORD {enum_type_name}")
+        assert len(enum_records) == len(enum_names)
+        for rec in enum_records:
+            assert rec.name in enum_names
+        assert rt2.get_property(enum_type_name) is not None
+        assert rt2.get_property(enum_type_name).is_reference()
+        assert rt2.get_property(enum_type_name).name == enum_type.name
+        assert rt2.get_property(enum_type_name).id == enum_type.id
+
+    # Recordtype with lists
+    rt3 = db.execute_query("FIND RECORDTYPE TestTypeWithLists", unique=True)
+    assert rt3.get_property("string_list") is not None
+    assert rt3.get_property("string_list").datatype == db.LIST(db.TEXT)
+    string_list_prop = db.Property(name="string_list").retrieve()
+    assert string_list_prop.description == "A list of words"
+    assert string_list_prop.datatype == db.LIST(db.TEXT)
+    assert string_list_prop.id == rt3.get_property("string_list").id
+
+    assert rt3.get_property("NamedIntList") is not None
+    assert rt3.get_property("NamedIntList").datatype == db.LIST(db.INTEGER)
+
+    # This is a list of a plain references to a specific type
+    list_rt = db.execute_query("FIND RECORDTYPE ListRecordType", unique=True)
+    assert len(list_rt.properties) == 0
+    assert rt3.get_property(list_rt.name) is not None
+    assert rt3.get_property(list_rt.name).is_reference()
+    assert rt3.get_property(list_rt.name).datatype == db.LIST(list_rt)
+    assert rt3.get_property(list_rt.name).id == list_rt.id
+
+    # This is a list property of its own, referencing another separate RT
+    referenced_list_rt = db.execute_query(
+        "FIND RECORDTYPE ReferencedListTypeWithName", unique=True)
+    assert referenced_list_rt.description == "Referenced by a named list-of-references property"
+    assert referenced_list_rt.get_property("double_prop") is not None
+    assert (referenced_list_rt.get_property("double_prop").id ==
+            rt1.get_property("double_prop").id)
+    assert rt3.get_property("NamedReferenceList") is not None
+    assert rt3.get_property("NamedReferenceList").is_reference()
+    assert rt3.get_property(
+        "NamedReferenceList").datatype == db.LIST(referenced_list_rt)
+    assert rt3.get_property("NamedReferenceList").id != referenced_list_rt.id
+
+    enum_type = db.execute_query("FIND RECORDTYPE ListNumberEnum", unique=True)
+    assert len(enum_type.properties) == 0
+    enum_names = ["1.1", "2.2", "3.3"]
+    enum_records = db.execute_query("FIND RECORD ListNumberEnum")
+    assert len(enum_records) == len(enum_names)
+    for rec in enum_records:
+        assert rec.name in enum_names
+    assert rt3.get_property(enum_type.name) is not None
+    assert rt3.get_property(enum_type.name).datatype == db.LIST(enum_type)
+    assert rt3.get_property(enum_type.name).id == enum_type.id
diff --git a/integrationtests/test_yaml_parser.py b/integrationtests/test_yaml_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2a2c4c056ced56d2605d93914186c2cba97e137
--- /dev/null
+++ b/integrationtests/test_yaml_parser.py
@@ -0,0 +1,69 @@
+# encoding: utf-8
+#
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2022 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the Free
+# Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Affero General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import caosdb as db
+from caosadvancedtools.models.parser import parse_model_from_string
+
+
+def _delete_everything():
+    ents = db.execute_query("FIND ENTITY WITH ID > 99")
+    if ents:
+        ents.delete()
+
+
+def setup_module():
+    _delete_everything()
+
+
+def teardown_module():
+    _delete_everything()
+
+
+def test_internal_props_in_extern():
+    """Test adding the internal `name` property as a parent to an existing
+    property.
+
+    """
+
+    model = """
+extern:
+- name
+- test_name
+- description
+- unit
+test_name:
+  inherit_from_suggested:
+  - name
+  - description
+  - unit
+"""
+    db.Property(name="test_name", datatype=db.TEXT).insert()
+    ents = parse_model_from_string(model)
+    ents.sync_data_model(noquestion=True)
+
+    test_prop = db.Property(name="test_name").retrieve()
+    assert len(test_prop.parents) == 3
+    desc_prop = db.Property(name="description").retrieve()
+    name_prop = db.Property(name="name").retrieve()
+    unit_prop = db.Property(name="unit").retrieve()
+    assert test_prop.has_parent(desc_prop)
+    assert test_prop.has_parent(name_prop)
+    assert test_prop.has_parent(unit_prop)
diff --git a/integrationtests/update_analysis.py b/integrationtests/update_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd18ab375437bec02320dcfd269896c2ba7e2bb0
--- /dev/null
+++ b/integrationtests/update_analysis.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""Example update script. An anlysis Record is retrieved and passed to the
+generic run function which then calls the appropriate script based on the
+Record.
+
+The simple query here could be replaced with something that e.g. retrieves all
+entities that where changed within a certain period of time.
+
+"""
+
+import sys
+
+import caosdb as db
+from caosadvancedtools.serverside.generic_analysis import run
+
+
+def main():
+    da = db.execute_query("FIND Analysis with identifier=TEST", unique=True)
+    run(da)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/pylintrc b/pylintrc
new file mode 100644
index 0000000000000000000000000000000000000000..625f83ce950841f7a239538123ef7b5812fc5c5f
--- /dev/null
+++ b/pylintrc
@@ -0,0 +1,19 @@
+# -*- mode:conf; -*-
+
+[FORMAT]
+# Good variable names which should always be accepted, separated by a comma
+good-names=ii,rt,df
+
+[TYPECHECK]                                                                    
+# List of module names for which member attributes should not be checked
+# (useful for modules/projects where namespaces are manipulated during runtime
+# and thus existing member attributes cannot be deduced by static analysis
+ignored-modules=etree,h5py,labfolder
+
+[MASTER]
+# TODO: The max_inferred size is necessary for https://github.com/PyCQA/pylint/issues/4577,
+# otherwise pandas.read_csv's return value would be inferred as TextFileReader.
+init-hook=
+  import sys; sys.path.extend(["src/caosadvancedtools"]);
+  import astroid; astroid.context.InferenceContext.max_inferred = 500;
+
diff --git a/pytest.ini b/pytest.ini
index 211913fa06d4e0a46c9c9024e147c5313e4746e1..e65efaf9aaf061a8a1ec0040f87d682536fac4c2 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,4 +1,3 @@
 [pytest]
 testpaths = unittests
 addopts = -vv
-python_paths = src
diff --git a/setup.py b/setup.py
index 78040b9c2244ababdeafef5f59e31a42e3d723a6..98599d9a5ead13520726546c23cbe59c57242fc0 100755
--- a/setup.py
+++ b/setup.py
@@ -47,9 +47,9 @@ from setuptools import find_packages, setup
 
 MAJOR = 0
 MINOR = 3
-MICRO = 1
+MICRO = 2
 PRE = ""  # e.g. rc0, alpha.1, 0.beta-23
-ISRELEASED = True
+ISRELEASED = False
 
 if PRE:
     VERSION = "{}.{}.{}-{}".format(MAJOR, MINOR, MICRO, PRE)
@@ -154,10 +154,11 @@ def setup_package():
         long_description_content_type="text/markdown",
         author='Henrik tom Wörden',
         author_email='h.tomwoerden@indiscale.com',
-        install_requires=["caosdb>=0.6.0",
+        install_requires=["caosdb>=0.7.0",
+                          "jsonschema>=4.4.0",
+                          "numpy>=1.17.3",
                           "openpyxl>=3.0.0",
                           "pandas>=1.2.0",
-                          "numpy>=1.17.3",
                           "xlrd>=2.0",
                           ],
         extras_require={"h5-crawler": ["h5py>=3.3.0", ],
diff --git a/src/caosadvancedtools/cfood.py b/src/caosadvancedtools/cfood.py
index 2c56e171d8e70789bc4f0dd3881a7d8e716e75f7..0074d9a13ca8ab60314b59e2a52496cbacc441f7 100644
--- a/src/caosadvancedtools/cfood.py
+++ b/src/caosadvancedtools/cfood.py
@@ -391,7 +391,7 @@ class AbstractFileCFood(AbstractCFood):
 def assure_object_is_in_list(obj, containing_object, property_name,
                              to_be_updated=None, datatype=None):
     """Checks whether `obj` is one of the values in the list property
-    `property_name` of the supplied entity  containing_object`.
+    `property_name` of the supplied entity `containing_object`.
 
     If this is the case this function returns. Otherwise the entity is
     added to the property `property_name` and the entity
@@ -813,7 +813,7 @@ class RowCFood(AbstractCFood):
             rec.add_property(key, value)
 
 
-class CMeal(object):
+class CMeal():
     """
     CMeal groups equivalent items and allow their collected insertion.
 
@@ -841,12 +841,23 @@ class CMeal(object):
     matching_groups = []
 
     def __init__(self):
+        self.item = None
+        # FIXME is this only necessary, because of inconsistent use of super().__init__()?
+        if "match" not in self.__dict__:
+            self.match = None
         self.__class__.existing_instances.append(self)
 
+    @staticmethod
+    def get_re():
+        raise NotImplementedError("Subclasses must implement this function.")
+
     @classmethod
     def all_groups_equal(cls, m1, m2):
         equal = True
 
+        if m2 is None:
+            return False
+
         for group in cls.matching_groups:
             if (group not in m1.groupdict() or
                     group not in m2.groupdict() or
@@ -878,5 +889,5 @@ class CMeal(object):
 
         if match is None:
             return False
-        else:
-            return self.all_groups_equal(match, self.match)
+
+        return self.all_groups_equal(match, self.match)
diff --git a/src/caosadvancedtools/cfoods/h5.py b/src/caosadvancedtools/cfoods/h5.py
index 6c68edd3668fec957126aa3234a830aab98fcd25..cbf9d0baefa435b71eeaeefe63a9b018faabe7ea 100644
--- a/src/caosadvancedtools/cfoods/h5.py
+++ b/src/caosadvancedtools/cfoods/h5.py
@@ -124,6 +124,7 @@ class H5CFood(AbstractFileCFood):
         """CFood which consumes HDF5 files."""
         super().__init__(*args, **kwargs)
         self.h5file = None
+        self.identifiable_root = None
         self.root_name = "root"
         self.hdf5Container = db.Container()
         self.em = EntityMapping()
diff --git a/src/caosadvancedtools/converter/labfolder_api.py b/src/caosadvancedtools/converter/labfolder_api.py
index a29d965b1598285105a06871ee1017adfdf4e222..cf57c0155a3b3970834abb2fc1058215ef7ecba8 100644
--- a/src/caosadvancedtools/converter/labfolder_api.py
+++ b/src/caosadvancedtools/converter/labfolder_api.py
@@ -28,7 +28,7 @@ import time
 import html2text
 
 import caosdb as db
-from labfolder.connection import configure_connection
+from labfolder.connection import configure_connection  # pylint: disable=import-error
 
 
 class Importer(object):
diff --git a/src/caosadvancedtools/crawler.py b/src/caosadvancedtools/crawler.py
index 82ada2fa02cd3b87415493e1c1d5499fc3dc8a82..87b91a52a6034e906766a56ded787416e5c0027d 100644
--- a/src/caosadvancedtools/crawler.py
+++ b/src/caosadvancedtools/crawler.py
@@ -66,6 +66,82 @@ def separated(text):
     return "-"*60 + "\n" + text
 
 
+def apply_list_of_updates(to_be_updated, update_flags={},
+                          update_cache=None, run_id=None):
+    """Updates the `to_be_updated` Container, i.e., pushes the changes to CaosDB
+    after removing possible duplicates. If a chace is provided, uauthorized
+    updates can be cached for further authorization.
+
+    Parameters:
+    -----------
+    to_be_updated : db.Container
+        Container with the entities that will be updated.
+    update_flags : dict, optional
+        Dictionary of CaosDB server flags that will be used for the
+        update. Default is an empty dict.
+    update_cache : UpdateCache or None, optional
+        Cache in which the intended updates will be stored so they can be
+        authorized afterwards. Default is None.
+    run_id : String or None, optional
+        Id with which the pending updates are cached. Only meaningful if
+        `update_cache` is provided. Default is None.
+    """
+
+    if len(to_be_updated) == 0:
+        return
+
+    get_ids_for_entities_with_names(to_be_updated)
+
+    # remove duplicates
+    tmp = db.Container()
+
+    for el in to_be_updated:
+        if el not in tmp:
+            tmp.append(el)
+
+    to_be_updated = tmp
+
+    info = "UPDATE: updating the following entities\n"
+
+    baseurl = db.configuration.get_config()["Connection"]["url"]
+
+    def make_clickable(txt, id):
+        return "<a href='{}/Entity/{}'>{}</a>".format(baseurl, id, txt)
+
+    for el in to_be_updated:
+        info += str("\t" + make_clickable(el.name, el.id)
+                    if el.name is not None
+                    else "\t" + make_clickable(str(el.id), el.id))
+        info += "\n"
+    logger.info(info)
+
+    logger.debug(to_be_updated)
+    try:
+        if len(to_be_updated) > 0:
+            logger.info(
+                "Updating {} Records...".format(
+                    len(to_be_updated)))
+        guard.safe_update(to_be_updated, unique=False,
+                          flags=update_flags)
+    except FileNotFoundError as e:
+        logger.info("Cannot access {}. However, it might be needed for"
+                    " the correct execution".format(e.filename))
+    except ProhibitedException:
+        try:
+            update_cache.insert(to_be_updated, run_id)
+        except IntegrityError as e:
+            logger.warning(
+                "There were problems with the update of {}.".format(
+                    to_be_updated),
+                extra={"identifier": str(to_be_updated),
+                       "category": "update-cache"}
+            )
+            logger.debug(traceback.format_exc())
+            logger.debug(e)
+    except Exception as e:
+        DataModelProblems.evaluate_exception(e)
+
+
 class Crawler(object):
     def __init__(self, cfood_types, use_cache=False,
                  abort_on_exception=True, interactive=True, hideKnown=False,
@@ -203,6 +279,8 @@ class Crawler(object):
                     except DataInconsistencyError as e:
                         logger.debug(traceback.format_exc())
                         logger.debug(e)
+                        # TODO: Generally: in which cases should exceptions be raised? When is
+                        # errors_occured set to True? The expected behavior must be documented.
                     except Exception as e:
                         try:
                             DataModelProblems.evaluate_exception(e)
@@ -318,7 +396,11 @@ class Crawler(object):
                 self._cached_find_or_insert_identifiables(cfood.identifiables)
 
                 cfood.update_identifiables()
-                self.push_identifiables_to_CaosDB(cfood)
+                apply_list_of_updates(
+                    cfood.to_be_updated,
+                    cfood.update_flags,
+                    update_cache=self.update_cache,
+                    run_id=self.run_id)
             except FileNotFoundError as e:
                 logger.info("Cannot access {}. However, it might be needed for"
                             " the correct execution".format(e.filename))
@@ -516,64 +598,8 @@ carefully and if the changes are ok, click on the following link:
             subject="Crawler Update",
             body=text)
 
-    def push_identifiables_to_CaosDB(self, cfood):
-        """
-        Updates the to_be_updated Container, i.e. pushes the changes to CaosDB
-        """
-
-        if len(cfood.to_be_updated) == 0:
-            return
-
-        get_ids_for_entities_with_names(cfood.to_be_updated)
-
-        # remove duplicates
-        tmp = db.Container()
-
-        for el in cfood.to_be_updated:
-            if el not in tmp:
-                tmp.append(el)
-
-        cfood.to_be_updated = tmp
-
-        info = "UPDATE: updating the following entities\n"
-
-        baseurl = db.configuration.get_config()["Connection"]["url"]
-        for el in cfood.to_be_updated:
-            def make_clickable(txt, id):
-                return "<a href='{}/Entity/{}'>{}</a>".format(baseurl, id, txt)
-            info += str("\t" + make_clickable(el.name, el.id)
-                        if el.name is not None
-                        else "\t" + make_clickable(str(el.id), el.id))
-            info += "\n"
-        logger.info(info)
-
-        logger.debug(cfood.to_be_updated)
-        try:
-            if len(cfood.to_be_updated) > 0:
-                logger.info(
-                    "Updating {} Records...".format(
-                        len(cfood.to_be_updated)))
-            guard.safe_update(cfood.to_be_updated, unique=False,
-                              flags=cfood.update_flags)
-        except FileNotFoundError as e:
-            logger.info("Cannot access {}. However, it might be needed for"
-                        " the correct execution".format(e.filename))
-        except ProhibitedException:
-            try:
-                self.update_cache.insert(cfood.to_be_updated, self.run_id)
-            except IntegrityError as e:
-                logger.warning(
-                    "There were problems with the update of {}.".format(
-                        cfood.to_be_updated),
-                    extra={"identifier": str(cfood.to_be_updated),
-                           "category": "update-cache"}
-                )
-                logger.debug(traceback.format_exc())
-                logger.debug(e)
-        except Exception as e:
-            DataModelProblems.evaluate_exception(e)
-
     # TODO remove static?
+
     @staticmethod
     def find_or_insert_identifiables(identifiables):
         """ Sets the ids of identifiables (that do not have already an id from the
diff --git a/src/caosadvancedtools/models/data_model.py b/src/caosadvancedtools/models/data_model.py
index a4804dd0fb0300af9b166717f41f341a57b677d4..d9079e6196b4751ca86ba41275108330b946d57c 100644
--- a/src/caosadvancedtools/models/data_model.py
+++ b/src/caosadvancedtools/models/data_model.py
@@ -23,11 +23,23 @@
 # ** end header
 #
 from copy import deepcopy
+# TODO(fspreck) for backwards compatibility with Python < 3.9 but this is
+# actually
+# [deprecated](https://docs.python.org/3/library/typing.html#typing.List), so
+# remove this, when we drop support for old Python versions.
+from typing import List
 
 import caosdb as db
 from caosdb.apiutils import compare_entities, describe_diff
 
 
+CAOSDB_INTERNAL_PROPERTIES = [
+    "description",
+    "name",
+    "unit",
+]
+
+
 class DataModel(dict):
     """Provides tools for managing a data model.
 
@@ -68,14 +80,14 @@ class DataModel(dict):
         else:
             super().__init__(args)
 
-    def append(self, entity):
+    def append(self, entity: db.Entity):
         self[entity.name] = entity
 
-    def extend(self, entities):
+    def extend(self, entities: List[db.Entity]):
         for entity in entities:
             self.append(entity)
 
-    def sync_data_model(self, noquestion=False):
+    def sync_data_model(self, noquestion: bool = False, verbose: bool = True):
         """Synchronize this DataModel with a CaosDB instance.
 
         Updates existing entities from the CaosDB instance and inserts
@@ -100,44 +112,57 @@ class DataModel(dict):
         self.sync_ids_by_name(tmp_exist)
 
         if len(non_existing_entities) > 0:
-            print("New entities:")
+            if verbose:
+                print("New entities:")
 
-            for ent in non_existing_entities:
-                print(ent.name)
+                for ent in non_existing_entities:
+                    print(ent.name)
 
             if noquestion or str(input("Do you really want to insert those "
                                        "entities? [y/N] ")).lower() == "y":
                 non_existing_entities.insert()
                 self.sync_ids_by_name(non_existing_entities)
-                print("Updated entities.")
+                if verbose:
+                    print("Updated entities.")
             else:
                 return
         else:
-            print("No new entities.")
+            if verbose:
+                print("No new entities.")
 
         if len(existing_entities) > 0:
-            print("Inspecting changes that will be made...")
+            if verbose:
+                print("Inspecting changes that will be made...")
             any_change = False
 
             for ent in existing_entities:
-                q = db.Query("FIND * with id={}".format(ent.id))
-                ref = q.execute(unique=True)
+                if ent.name in CAOSDB_INTERNAL_PROPERTIES:
+                    # Workaround for the usage of internal properties like name
+                    # in via the extern keyword:
+                    ref = db.Property(name=ent.name).retrieve()
+                else:
+                    query = db.Query(f"FIND * with id={ent.id}")
+                    ref = query.execute(unique=True)
                 diff = (describe_diff(*compare_entities(ent, ref
                                                         ), name=ent.name))
 
                 if diff != "":
-                    print(diff)
+                    if verbose:
+                        print(diff)
                     any_change = True
 
             if any_change:
                 if noquestion or input("Do you really want to apply the above "
                                        "changes? [y/N]") == "y":
                     existing_entities.update()
-                    print("Synchronized existing entities.")
+                    if verbose:
+                        print("Synchronized existing entities.")
             else:
-                print("No differences found. No update")
+                if verbose:
+                    print("No differences found. No update")
         else:
-            print("No existing entities updated.")
+            if verbose:
+                print("No existing entities updated.")
 
     @staticmethod
     def get_existing_entities(entities):
diff --git a/src/caosadvancedtools/models/parser.py b/src/caosadvancedtools/models/parser.py
index e56a492fa3e9199a312d374a622770e7836f42cb..48fc1e722c4ce9e888b8b80dbb5f29595c2f6b26 100644
--- a/src/caosadvancedtools/models/parser.py
+++ b/src/caosadvancedtools/models/parser.py
@@ -1,3 +1,22 @@
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2022 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+# Copyright (C) 2022 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
 """
 This module (and script) provides methods to read a DataModel from a YAML file.
 
@@ -16,16 +35,21 @@ not defined, simply the name can be supplied with no value.
 Parents can be provided under the 'inherit_from_xxxx' keywords. The value needs
 to be a list with the names. Here, NO NEW entities can be defined.
 """
+import json
 import re
 import sys
+import yaml
+
+from typing import List
 
+import jsonschema
 import caosdb as db
-import yaml
 
-from .data_model import DataModel
+from .data_model import CAOSDB_INTERNAL_PROPERTIES, DataModel
 
 # Keywords which are allowed in data model descriptions.
-KEYWORDS = ["parent",
+KEYWORDS = ["parent",  # deprecated, use inherit_from_* instead:
+                       # https://gitlab.com/caosdb/caosdb-advanced-user-tools/-/issues/36
             "importance",
             "datatype",  # for example TEXT, INTEGER or REFERENCE
             "unit",
@@ -35,13 +59,24 @@ KEYWORDS = ["parent",
             "suggested_properties",
             "inherit_from_recommended",
             "inherit_from_suggested",
-            "inherit_from_obligatory", ]
+            "inherit_from_obligatory",
+            "role",
+            "value",
+            ]
 
+# TODO: check whether it's really ignored
 # These KEYWORDS are not forbidden as properties, but merely ignored.
 KEYWORDS_IGNORED = [
     "unit",
 ]
 
+JSON_SCHEMA_ATOMIC_TYPES = [
+    "string",
+    "boolean",
+    "integer",
+    "number"
+]
+
 
 def _get_listdatatype(dtype):
     """matches a string to check whether the type definition is a list
@@ -93,6 +128,14 @@ class YamlDefinitionError(RuntimeError):
         super().__init__(template.format(line))
 
 
+class JsonSchemaDefinitionError(RuntimeError):
+    # @author Florian Spreckelsen
+    # @date 2022-02-17
+    # @review Daniel Hornung 2022-02-18
+    def __init__(self, msg):
+        super().__init__(msg)
+
+
 def parse_model_from_yaml(filename):
     """Shortcut if the Parser object is not needed."""
     parser = Parser()
@@ -107,8 +150,40 @@ def parse_model_from_string(string):
     return parser.parse_model_from_string(string)
 
 
+def parse_model_from_json_schema(filename: str):
+    """Return a datamodel parsed from a json schema definition.
+
+    Parameters
+    ----------
+    filename : str
+        The path of the json schema file that is to be parsed
+
+    Returns
+    -------
+    out : Datamodel
+        The datamodel generated from the input schema which then can be used for
+        synchronizing with CaosDB.
+
+    Note
+    ----
+    This is an experimental feature, see ``JsonSchemaParser`` for information
+    about the limitations of the current implementation.
+
+    """
+    # @author Florian Spreckelsen
+    # @date 2022-02-17
+    # @review Daniel Hornung 2022-02-18
+    parser = JsonSchemaParser()
+
+    return parser.parse_model_from_json_schema(filename)
+
+
 class Parser(object):
     def __init__(self):
+        """Initialize an empty parser object and initialize the dictionary of entities and the list of
+        treated elements.
+
+        """
         self.model = {}
         self.treated = []
 
@@ -177,13 +252,14 @@ class Parser(object):
             ymlmodel["extern"] = []
 
         for name in ymlmodel["extern"]:
-            if db.execute_query("COUNT Property {}".format(name)) > 0:
-                self.model[name] = db.execute_query(
-                    "FIND Property WITH name={}".format(name), unique=True)
-
-            elif db.execute_query("COUNT RecordType {}".format(name)) > 0:
-                self.model[name] = db.execute_query(
-                    "FIND RecordType WITH name={}".format(name), unique=True)
+            if name in CAOSDB_INTERNAL_PROPERTIES:
+                self.model[name] = db.Property(name=name).retrieve()
+                continue
+            for role in ("Property", "RecordType", "Record", "File"):
+                if db.execute_query("COUNT {} {}".format(role, name)) > 0:
+                    self.model[name] = db.execute_query(
+                        "FIND {} WITH name={}".format(role, name), unique=True)
+                    break
             else:
                 raise Exception("Did not find {}".format(name))
 
@@ -235,6 +311,8 @@ class Parser(object):
         """ adds names of Properties and RecordTypes to the model dictionary
 
         Properties are also initialized.
+
+        name is the key of the yaml element and definition the value.
         """
 
         if name == "__line__":
@@ -258,9 +336,29 @@ class Parser(object):
             # and create the new property
             self.model[name] = db.Property(name=name,
                                            datatype=definition["datatype"])
+        elif (self.model[name] is None and isinstance(definition, dict)
+              and "role" in definition):
+            if definition["role"] == "RecordType":
+                self.model[name] = db.RecordType(name=name)
+            elif definition["role"] == "Record":
+                self.model[name] = db.Record(name=name)
+            elif definition["role"] == "File":
+                # TODO(fspreck) Implement files at some later point in time
+                raise NotImplementedError(
+                    "The definition of file objects is not yet implemented.")
+
+                # self.model[name] = db.File(name=name)
+            elif definition["role"] == "Property":
+                self.model[name] = db.Property(name=name)
+            else:
+                raise RuntimeError("Unknown role {} in definition of entity.".format(
+                    definition["role"]))
 
-        # add other definitions recursively
+        # for setting values of properties directly:
+        if not isinstance(definition, dict):
+            return
 
+        # add other definitions recursively
         for prop_type in ["recommended_properties",
                           "suggested_properties", "obligatory_properties"]:
 
@@ -284,7 +382,25 @@ class Parser(object):
                     raise
 
     def _add_to_recordtype(self, ent_name, props, importance):
-        """Add properties to a RecordType."""
+        """Add properties to a RecordType.
+
+        Parameters
+        ----------
+        ent_name : str
+          The name of the entity to which the properties shall be added.
+
+        props : dict [str -> dict or :doc:`Entity`]
+          The properties, indexed by their names.  Properties may be given as :doc:`Entity` objects
+          or as dictionaries.
+
+        importance
+          The importance as used in :doc:`Entity.add_property`.
+
+        Returns
+        -------
+        None
+
+        """
 
         for n, e in props.items():
             if n in KEYWORDS:
@@ -297,15 +413,28 @@ class Parser(object):
                 continue
             n = self._stringify(n)
 
-            if (isinstance(e, dict) and "datatype" in e
-                    and (_get_listdatatype(e["datatype"]) is not None)):
-                self.model[ent_name].add_property(
-                    name=n,
-                    importance=importance,
-                    datatype=db.LIST(_get_listdatatype(e["datatype"])))
+            if isinstance(e, dict):
+                if "datatype" in e and _get_listdatatype(e["datatype"]) is not None:
+                    # Reuse the existing datatype for lists.
+                    datatype = db.LIST(_get_listdatatype(e["datatype"]))
+                else:
+                    # Ignore a possible e["datatype"] here if it's not a list
+                    # since it has been treated in the definition of the
+                    # property (entity) already
+                    datatype = None
+                if "value" in e:
+                    value = e["value"]
+                else:
+                    value = None
+
             else:
-                self.model[ent_name].add_property(name=n,
-                                                  importance=importance)
+                value = e
+                datatype = None
+
+            self.model[ent_name].add_property(name=n,
+                                              value=value,
+                                              importance=importance,
+                                              datatype=datatype)
 
     def _inherit(self, name, prop, inheritance):
         if not isinstance(prop, list):
@@ -328,6 +457,10 @@ class Parser(object):
             if definition is None:
                 return
 
+            # for setting values of properties directly:
+            if not isinstance(definition, dict):
+                return
+
             if ("datatype" in definition
                     and definition["datatype"].startswith("LIST")):
 
@@ -344,6 +477,9 @@ class Parser(object):
                 if prop_name == "unit":
                     self.model[name].unit = prop
 
+                elif prop_name == "value":
+                    self.model[name].value = prop
+
                 elif prop_name == "description":
                     self.model[name].description = prop
 
@@ -372,6 +508,10 @@ class Parser(object):
                 elif prop_name == "datatype":
                     continue
 
+                # role has already been used
+                elif prop_name == "role":
+                    continue
+
                 elif prop_name == "inherit_from_obligatory":
                     self._inherit(name, prop, db.OBLIGATORY)
                 elif prop_name == "inherit_from_recommended":
@@ -426,13 +566,14 @@ class Parser(object):
                              db.BOOLEAN]:
 
                     if is_list:
-                        value.datatype = db.LIST(db.__getattribute__(dtype))
+                        value.datatype = db.LIST(db.__getattribute__(dtype))  # pylint: disable=no-member
                     else:
-                        value.datatype = db.__getattribute__(dtype)
+                        value.datatype = db.__getattribute__(dtype)  # pylint: disable=no-member
 
                     continue
 
-                raise ValueError("Property {} has an unknown datatype: {}".format(value.name, value.datatype))
+                raise ValueError("Property {} has an unknown datatype: {}".format(
+                    value.name, value.datatype))
 
     def _set_recordtypes(self):
         """ properties are defined in first iteration; set remaining as RTs """
@@ -442,6 +583,202 @@ class Parser(object):
                 self.model[key] = db.RecordType(name=key)
 
 
+class JsonSchemaParser(Parser):
+    """Extends the yaml parser to read in datamodels defined in a json schema.
+
+    **EXPERIMENTAL:** While this calss can already be used to create data models
+    from basic json schemas, there are the following limitations and missing
+    features:
+
+    * Due to limitations of json-schema itself, we currently do not support
+      inheritance in the imported data models
+    * The same goes for suggested properties of RecordTypes
+    * Currently, ``$defs`` and ``$ref`` in the input schema are not resolved.
+    * Already defined RecordTypes and (scalar) Properties can't be re-used as
+      list properties
+    * Reference properties that are different from the referenced RT. (Although
+      this is possible for list of references)
+    * Values
+    * Roles
+    * The extern keyword from the yaml parser
+    * Currently, a json-schema cannot be transformed into a data model if its
+      root element isn't a RecordType (or Property) with ``title`` and ``type``.
+
+    """
+    # @author Florian Spreckelsen
+    # @date 2022-02-17
+    # @review Timm Fitschen 2022-02-30
+
+    def parse_model_from_json_schema(self, filename: str):
+        """Return a datamodel created from the definition in the json schema in
+        `filename`.
+
+        Parameters
+        ----------
+        filename : str
+            The path to the json-schema file containing the datamodel definition
+
+        Returns
+        -------
+        out : DataModel
+            The created DataModel
+        """
+        # @author Florian Spreckelsen
+        # @date 2022-02-17
+        # @review Timm Fitschen 2022-02-30
+        with open(filename, 'r') as schema_file:
+            model_dict = json.load(schema_file)
+
+        return self._create_model_from_dict(model_dict)
+
+    def _create_model_from_dict(self, model_dict: [dict, List[dict]]):
+        """Parse a dictionary read in from the model definition in a json schema and
+        return the Datamodel created from it.
+
+        Parameters
+        ----------
+        model_dict : dict or list[dict]
+            One or several dictionaries read in from a json-schema file
+
+        Returns
+        -------
+        our : DataModel
+            The datamodel defined in `model_dict`
+        """
+        # @review Timm Fitschen 2022-02-30
+        if isinstance(model_dict, dict):
+            model_dict = [model_dict]
+
+        for ii, elt in enumerate(model_dict):
+            if "title" not in elt:
+                raise JsonSchemaDefinitionError(
+                    f"Object {ii+1} is lacking the `title` key word")
+            if "type" not in elt:
+                raise JsonSchemaDefinitionError(
+                    f"Object {ii+1} is lacking the `type` key word")
+            # Check if this is a valid Json Schema
+            try:
+                jsonschema.Draft202012Validator.check_schema(elt)
+            except jsonschema.SchemaError as err:
+                raise JsonSchemaDefinitionError(
+                    f"Json Schema error in {elt['title']}:\n{str(err)}") from err
+            name = self._stringify(elt["title"], context=elt)
+            self._treat_element(elt, name)
+
+        return DataModel(self.model.values())
+
+    def _get_atomic_datatype(self, elt):
+        # @review Timm Fitschen 2022-02-30
+        if elt["type"] == "string":
+            if "format" in elt and elt["format"] in ["date", "date-time"]:
+                return db.DATETIME
+            else:
+                return db.TEXT
+        elif elt["type"] == "integer":
+            return db.INTEGER
+        elif elt["type"] == "number":
+            return db.DOUBLE
+        elif elt["type"] == "boolean":
+            return db.BOOLEAN
+        else:
+            raise JsonSchemaDefinitionError(f"Unkown atomic type in {elt}.")
+
+    def _treat_element(self, elt: dict, name: str):
+        # @review Timm Fitschen 2022-02-30
+        force_list = False
+        if name in self.model:
+            return self.model[name], force_list
+        if "type" not in elt:
+            # Each element must have a specific type
+            raise JsonSchemaDefinitionError(
+                f"`type` is missing in element {name}.")
+        if "enum" in elt:
+            ent = self._treat_enum(elt, name)
+        elif elt["type"] in JSON_SCHEMA_ATOMIC_TYPES:
+            ent = db.Property(
+                name=name, datatype=self._get_atomic_datatype(elt))
+        elif elt["type"] == "object":
+            ent = self._treat_record_type(elt, name)
+        elif elt["type"] == "array":
+            ent, force_list = self._treat_list(elt, name)
+        else:
+            raise NotImplementedError(
+                f"Cannot parse items of type '{elt['type']}' (yet).")
+        if "description" in elt and ent.description is None:
+            # There is a description and it hasn't been set by another
+            # treat_something function
+            ent.description = elt["description"]
+
+        self.model[name] = ent
+        return ent, force_list
+
+    def _treat_record_type(self, elt: dict, name: str):
+        # @review Timm Fitschen 2022-02-30
+        rt = db.RecordType(name=name)
+        if "required" in elt:
+            required = elt["required"]
+        else:
+            required = []
+        if "properties" in elt:
+            for key, prop in elt["properties"].items():
+                if "title" in prop:
+                    name = self._stringify(prop["title"])
+                else:
+                    name = self._stringify(key)
+                prop_ent, force_list = self._treat_element(prop, name)
+                importance = db.OBLIGATORY if key in required else db.RECOMMENDED
+                if not force_list:
+                    rt.add_property(prop_ent, importance=importance)
+                else:
+                    # Special case of rt used as a list property
+                    rt.add_property(prop_ent, importance=importance,
+                                    datatype=db.LIST(prop_ent))
+
+        if "description" in elt:
+            rt.description = elt["description"]
+        return rt
+
+    def _treat_enum(self, elt: dict, name: str):
+        # @review Timm Fitschen 2022-02-30
+        if "type" in elt and elt["type"] == "integer":
+            raise NotImplementedError(
+                "Integer-enums are not allowd until "
+                "https://gitlab.indiscale.com/caosdb/src/caosdb-server/-/issues/224 "
+                "has been fixed."
+            )
+        rt = db.RecordType(name=name)
+        for enum_elt in elt["enum"]:
+            rec = db.Record(name=self._stringify(enum_elt))
+            rec.add_parent(rt)
+            self.model[enum_elt] = rec
+
+        return rt
+
+    def _treat_list(self, elt: dict, name: str):
+        # @review Timm Fitschen 2022-02-30
+
+        if "items" not in elt:
+            raise JsonSchemaDefinitionError(
+                f"The definition of the list items is missing in {elt}.")
+        items = elt["items"]
+        if "enum" in items:
+            return self._treat_enum(items, name), True
+        if items["type"] in JSON_SCHEMA_ATOMIC_TYPES:
+            datatype = db.LIST(self._get_atomic_datatype(items))
+            return db.Property(name=name, datatype=datatype), False
+        if items["type"] == "object":
+            if "title" not in items or self._stringify(items["title"]) == name:
+                # Property is RecordType
+                return self._treat_record_type(items, name), True
+            else:
+                # List property will be an entity of its own with a name
+                # different from the referenced RT
+                ref_rt = self._treat_record_type(
+                    items, self._stringify(items["title"]))
+                self.model[ref_rt.name] = ref_rt
+                return db.Property(name=name, datatype=db.LIST(ref_rt)), False
+
+
 if __name__ == "__main__":
     model = parse_model_from_yaml('data_model.yml')
     print(model)
diff --git a/src/caosadvancedtools/pandoc_header_tools.py b/src/caosadvancedtools/pandoc_header_tools.py
index 262defd2e46ea1a6fbe80ab6c476bb8f311cc9a5..e746a26ac19c00de4ee7785399ef98478472340c 100644
--- a/src/caosadvancedtools/pandoc_header_tools.py
+++ b/src/caosadvancedtools/pandoc_header_tools.py
@@ -136,10 +136,10 @@ it is not at the beginning, it must be preceded by a blank line.
     # If a header section was found:
     if state == 2:
         headerlines = []
-        for l in textlines[found_1:found_2]:
-            l = l.replace("\t", "  ")
-            l = l.rstrip()
-            headerlines.append(l)
+        for line in textlines[found_1:found_2]:
+            line = line.replace("\t", "  ")
+            line = line.rstrip()
+            headerlines.append(line)
         # try:
         try:
             yaml_part = yaml.load("\n".join(headerlines), Loader=yaml.BaseLoader)
@@ -156,7 +156,7 @@ it is not at the beginning, it must be preceded by a blank line.
     else:
         print("Adding header in: {fn}".format(fn=filename))
         add_header(filename)
-        return _get_header(filename)
+        return get_header(filename)
 
 
 def save_header(filename, header_data):
diff --git a/src/caosadvancedtools/scifolder/__init__.py b/src/caosadvancedtools/scifolder/__init__.py
index d7d67937b42ca23173fc93d4e704411f33d80bc4..cf753cfc0b72bf95e34edea1301b96ed18f040d0 100644
--- a/src/caosadvancedtools/scifolder/__init__.py
+++ b/src/caosadvancedtools/scifolder/__init__.py
@@ -3,3 +3,4 @@ from .experiment_cfood import ExperimentCFood
 from .publication_cfood import PublicationCFood
 from .simulation_cfood import SimulationCFood
 from .software_cfood import SoftwareCFood
+from .result_table_cfood import ResultTableCFood
diff --git a/src/caosadvancedtools/scifolder/result_table_cfood.py b/src/caosadvancedtools/scifolder/result_table_cfood.py
new file mode 100644
index 0000000000000000000000000000000000000000..deaa2d00118659a9b177a05fe40b19a1793a16fb
--- /dev/null
+++ b/src/caosadvancedtools/scifolder/result_table_cfood.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# encoding: utf-8
+#
+# Copyright (C) 2019 Henrik tom Wörden
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+import re
+
+import caosdb as db
+import pandas as pd
+from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_description,
+                                     assure_has_parent, assure_has_property,
+                                     assure_object_is_in_list, get_entity)
+from caosadvancedtools.read_md_header import get_header
+
+from ..cfood import assure_property_is, fileguide
+from .experiment_cfood import ExperimentCFood
+from .generic_pattern import date_pattern, date_suffix_pattern, project_pattern
+from .utils import parse_responsibles, reference_records_corresponding_to_files
+from .withreadme import DATAMODEL as dm
+from .withreadme import RESULTS, REVISIONOF, SCRIPTS, WithREADME, get_glob
+
+
+# TODO similarities with TableCrawler
+class ResultTableCFood(AbstractFileCFood):
+
+    # win_paths can be used to define fields that will contain windows style
+    # path instead of the default unix ones. Possible fields are:
+    # ["results", "revisionOf"]
+    win_paths = []
+    table_re = r"result_table_(?P<recordtype>.*).csv$"
+    property_name_re = re.compile(r"^(?P<pname>.+?)\s*(\[\s?(?P<unit>.*?)\s?\] *)?$")
+
+    @staticmethod
+    def name_beautifier(x): return x
+
+    def __init__(self,  *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.table = pd.read_csv(fileguide.access(self.crawled_path))
+
+    @staticmethod
+    def get_re():
+        return (".*/ExperimentalData/"+project_pattern + date_pattern +
+                date_suffix_pattern + ResultTableCFood.table_re)
+
+    def create_identifiables(self):
+        self.recs = []
+        self.experiment, self.project = (
+            ExperimentCFood.create_identifiable_experiment(self.match))
+
+        for idx, row in self.table.iterrows():
+            rec = db.Record()
+            rec.add_parent(self.match.group("recordtype"))
+
+            for col in self.table.columns[:2]:
+                match = re.match(ResultTableCFood.property_name_re, col)
+
+                if match.group("unit"):
+                    rec.add_property(match.group("pname"), row.loc[col], unit=match.group("unit"))
+                else:
+                    rec.add_property(match.group("pname"), row.loc[col])
+            self.identifiables.append(rec)
+            self.recs.append(rec)
+
+        self.identifiables.extend([self.project, self.experiment])
+
+    def update_identifiables(self):
+        for ii, (idx, row) in enumerate(self.table.iterrows()):
+            for col in row.index:
+                match = re.match(ResultTableCFood.property_name_re, col)
+                assure_property_is(self.recs[ii], match.group("pname"), row.loc[col], to_be_updated=self.to_be_updated)
+        assure_property_is(self.experiment, self.match.group("recordtype"),
+                           self.recs, to_be_updated=self.to_be_updated,
+                           datatype=db.LIST(self.match.group("recordtype")))
diff --git a/src/caosadvancedtools/scifolder/simulation_cfood.py b/src/caosadvancedtools/scifolder/simulation_cfood.py
index ae129e6a69ce25c6698b98124e81f8bc2921b472..c8f23f1485d7a1f64dcd940552051d2e1ec5bb07 100644
--- a/src/caosadvancedtools/scifolder/simulation_cfood.py
+++ b/src/caosadvancedtools/scifolder/simulation_cfood.py
@@ -88,22 +88,22 @@ class SimulationCFood(AbstractFileCFood, WithREADME):
                                  self.to_be_updated,
                                  datatype=db.LIST(db.REFERENCE))
 
-        if SOURCES.key in self.header:
+        if SOURCES.key in self.header:                         # pylint: disable=unsupported-membership-test
             reference_records_corresponding_to_files(
                     record=self.simulation,
                     recordtypes=["Experiment", "Publication", "Simulation",
                                  "Analysis"],
-                    globs=get_glob(self.header[SOURCES.key]),
+                    globs=get_glob(self.header[SOURCES.key]),  # pylint: disable=unsubscriptable-object
                     property_name=dm.sources,
                     path=self.crawled_path,
                     to_be_updated=self.to_be_updated)
         self.reference_files_from_header(record=self.simulation)
 
-        if REVISIONOF.key in self.header:
+        if REVISIONOF.key in self.header:                      # pylint: disable=unsupported-membership-test
             reference_records_corresponding_to_files(
                 record=self.simulation,
-                recordtypes=[dm.Software],
+                recordtypes=[dm.Software],                     # pylint: disable=no-member
                 property_name=dm.revisionOf,
-                globs=get_glob(self.header[dm.revisionOf]),
+                globs=get_glob(self.header[dm.revisionOf]),    # pylint: disable=unsubscriptable-object
                 path=self.crawled_path,
                 to_be_updated=self.to_be_updated)
diff --git a/src/caosadvancedtools/scifolder/withreadme.py b/src/caosadvancedtools/scifolder/withreadme.py
index 8a63e1f6d90ed4e78d01f76393cc72982cdc79d4..e1968ba49799827467c7ef93a7070b7f090010fb 100644
--- a/src/caosadvancedtools/scifolder/withreadme.py
+++ b/src/caosadvancedtools/scifolder/withreadme.py
@@ -121,12 +121,12 @@ class WithREADME(object):
     @property
     def header(self):
         if self._header is None:
-            if self.crawled_path.lower().endswith(".md"):
+            if self.crawled_path.lower().endswith(".md"):  # pylint: disable=no-member
                 self._header = get_md_header(
-                    fileguide.access(self.crawled_path))
-            elif self.crawled_path.lower().endswith(".xlsx"):
+                    fileguide.access(self.crawled_path))   # pylint: disable=no-member
+            elif self.crawled_path.lower().endswith(".xlsx"):  # pylint: disable=no-member
                 self._header = get_xls_header(
-                    fileguide.access(self.crawled_path))
+                    fileguide.access(self.crawled_path))       # pylint: disable=no-member
             else:
                 raise RuntimeError("Readme format not recognized.")
             self.convert_win_paths()
@@ -145,7 +145,7 @@ class WithREADME(object):
 
             globs = get_glob(self.header[field.key])
             files = get_files_referenced_by_field(
-                globs, prefix=os.path.dirname(self.crawled_path))
+                globs, prefix=os.path.dirname(self.crawled_path))  # pylint: disable=no-member
 
             description = [get_description(val) for val in
                            self.header[field.key]]
@@ -160,7 +160,7 @@ class WithREADME(object):
                 LOGGER.warn("ATTENTION: the field {} does not reference any "
                             "known files".format(field.key))
 
-            self.attached_filenames.extend(flat_list)
+            self.attached_filenames.extend(flat_list)  # pylint: disable=no-member
 
     def convert_path(self, el):
         """ converts the path in el to unix type
@@ -185,7 +185,7 @@ class WithREADME(object):
             return win_path_converter(el)
 
     def convert_win_paths(self):
-        for field in self.win_paths:
+        for field in self.win_paths:  # pylint: disable=no-member
             if field in self.header:
 
                 if isinstance(self.header[field], list):
@@ -245,7 +245,7 @@ class WithREADME(object):
                 references[ref_type],
                 record,
                 ref_type,
-                to_be_updated=self.to_be_updated,
+                to_be_updated=self.to_be_updated,  # pylint: disable=no-member
             )
 
     def reference_included_records(self, record, fields, to_be_updated):
@@ -255,16 +255,16 @@ class WithREADME(object):
 
         for field in fields:
 
-            if field.key not in self.header:
+            if field.key not in self.header:  # pylint: disable=no-member
                 continue
             included = []
 
-            for item in self.header[field.key]:
+            for item in self.header[field.key]:  # pylint: disable=no-member
                 if INCLUDE.key in item:
                     try:
                         included.extend(
                             get_entity_ids_from_include_file(
-                                os.path.dirname(self.crawled_path),
+                                os.path.dirname(self.crawled_path),  # pylint: disable=no-member
                                 item[INCLUDE.key]))
                     except ValueError:
                         al = logging.getLogger("caosadvancedtools")
diff --git a/src/caosadvancedtools/serverside/examples/example_script.py b/src/caosadvancedtools/serverside/examples/example_script.py
new file mode 100755
index 0000000000000000000000000000000000000000..d97d2d0d1f936b1c12e857d38fce043f0b514340
--- /dev/null
+++ b/src/caosadvancedtools/serverside/examples/example_script.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""An example script that illustrates how scripts can be used in conjunction
+with the generic_analysis module.
+
+The data model needed for this script is:
+
+Analysis:
+    sources: REFEERENCE
+    scripts: FILE
+    results: REFEERENCE
+    mean_value: DOUBLE
+
+Person:
+    Email: TEXT
+
+"""
+
+import argparse
+import logging
+import sys
+from argparse import RawTextHelpFormatter
+from datetime import datetime
+
+import caosdb as db
+import matplotlib.pyplot as plt
+import numpy as np
+from caosadvancedtools.cfood import assure_property_is
+from caosadvancedtools.crawler import apply_list_of_updates
+from caosadvancedtools.guard import INSERT, UPDATE
+from caosadvancedtools.guard import global_guard as guard
+from caosadvancedtools.serverside.helper import send_mail as main_send_mail
+
+# logging should be done like this in order to allow the caller script to
+# direct the output.
+logger = logging.getLogger(__name__)
+
+# allow updates of existing entities
+guard.set_level(level=UPDATE)
+
+
+def send_mail(changes: [db.Entity], receipient: str):
+    """ calls sendmail in order to send a mail to the curator about pending
+    changes
+
+    Parameters:
+    -----------
+    changes: The CaosDB entities in the version after the update.
+    receipient: The person who shall receive the mail.
+    """
+
+    caosdb_config = db.configuration.get_config()
+    text = """Dear Curator,
+The following changes where done automatically.
+
+{changes}
+    """.format(changes="\n".join(changes))
+    try:
+        fro = caosdb_config["advancedtools"]["automated_updates.from_mail"]
+    except KeyError:
+        logger.error("Server Configuration is missing a setting for "
+                     "sending mails. The administrator should check "
+                     "'from_mail'.")
+        return
+
+    main_send_mail(
+        from_addr=fro,
+        to=receipient,
+        subject="Automated Update",
+        body=text)
+
+
+def main(args):
+
+    # auth_token is provided by the server side scripting API
+    # use this token for authentication when creating a new connection
+    if hasattr(args, "auth_token") and args.auth_token:
+        db.configure_connection(auth_token=args.auth_token)
+        logger.debug("Established connection")
+
+    try:
+        dataAnalysisRecord = db.Record(id=args.entityid).retrieve()
+    except db.TransactionError:
+        logger.error("Cannot retrieve Record with id ={}".format(
+            args.entityid
+        ))
+
+    # The script may require certain information to exist. Here, we expect that
+    # a sources Property exists that references a numpy file.
+    # Similarly an InputDataSet could be used.
+
+    if (dataAnalysisRecord.get_property("sources") is None
+            or not db.apiutils.is_reference(
+                dataAnalysisRecord.get_property("sources"))):
+
+        raise RuntimeError("sources Refenrence must exist.")
+
+    logger.debug("Found required data.")
+
+    # ####### this core might be replaced by a call to another script ####### #
+    # Download the data
+    source_val = dataAnalysisRecord.get_property("sources").value
+    npobj = db.File(
+        id=(source_val[0]
+            if isinstance(source_val, list)
+            else source_val)).retrieve()
+    npfile = npobj.download()
+    logger.debug("Downloaded data.")
+    data = np.load(npfile)
+
+    # Plot data
+    filename = "hist.png"
+    plt.hist(data)
+    plt.savefig(filename)
+
+    mean = data.mean()
+    # ####################################################################### #
+
+    # Insert the result plot
+    fig = db.File(file=filename,
+                  path="/Analysis/results/"+str(datetime.now())+"/"+filename)
+    fig.insert()
+
+    # Add the mean value to the analysis Record
+    # If such a property existed before, it is changed if necessary. The old
+    # value will persist in the versioning of LinkAhead
+    to_be_updated = db.Container()
+    assure_property_is(
+        dataAnalysisRecord,
+        "mean_value",
+        mean,
+        to_be_updated=to_be_updated
+    )
+
+    # Add the file with the plot to the analysis Record
+    # If a file was already referenced, the new one will be referenced instead.
+    # The old file is being kept and is still referenced in an old version of
+    # the analysis Record.
+    assure_property_is(
+        dataAnalysisRecord,
+        "results",
+        [fig.id],
+        to_be_updated=to_be_updated
+    )
+
+    if len(to_be_updated) > 0:
+        print(to_be_updated)
+        apply_list_of_updates(to_be_updated, update_flags={})
+        logger.debug("Update sucessful.")
+        logger.info("The following Entities were changed:\n{}.".format(
+            [el.id for el in to_be_updated])
+        )
+
+        # Send mails to people that are referenced.
+        people = db.execute_query("FIND RECORD Person WHICH IS REFERENCED BY "
+                                  "{}".format(dataAnalysisRecord.id))
+        for person in people:
+            if person.get_property("Email") is not None:
+                send_mail([str(el) for el in to_be_updated],
+                          receipient=person.get_property("Email").value)
+        logger.debug("Mails send.")
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description=__doc__,
+                                     formatter_class=RawTextHelpFormatter)
+    parser.add_argument("--auth-token",
+                        help="Token provided by the server for authentication")
+    parser.add_argument("entityid",
+                        help="The ID of the DataAnalysis Record.", type=int)
+
+    return parser.parse_args()
+
+
+if __name__ == "__main__":
+    args = parse_args()
+    sys.exit(main(args))
diff --git a/src/caosadvancedtools/serverside/generic_analysis.py b/src/caosadvancedtools/serverside/generic_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..85d0c860df75fce205c5eaad77731fc04eee9e40
--- /dev/null
+++ b/src/caosadvancedtools/serverside/generic_analysis.py
@@ -0,0 +1,213 @@
+# encoding: utf-8
+#
+# Copyright (C) 2021 Alexander Schlemmer <alexander.schlemmer@ds.mpg.de>
+# Copyright (C) 2021 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+#
+# See: https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/55
+
+# This source file is work in progress and currently untested.
+
+
+"""
+Variante I: Python module implementiert eine 'main' function, die einen Record
+als Argument entgegennimmt und diesen um z.B. 'results' ergänzt und updated.
+
+Variante II: Ein skript erhält eine ID als Argument (z.B. auf der command line)
+und updated das Objekt selbstständig.
+
+Idealfall: Idempotenz; I.e. es ist egal, ob das Skript schon aufgerufen wurde.
+Ein weiterer Aufruf führt ggf. zu einem Update (aber nur bei Änderungen von
+z.B. Parametern)
+
+Das aufgerufene Skript kann beliebige Eigenschaften benutzen und erstellen.
+ABER wenn die Standardeigenschaften (InputDataSet, etc) verwendet werden, kann
+der Record leicht erzeugt werden.
+
+
+
+      "Analyze"       "Perform Anlysis"
+   Knopf an Record     Form im WebUI
+   im WebUI
+         |               |
+         |               |
+         v               v
+     Winzskript, dass einen
+     DataAnalysis-Stub erzeugt
+          |
+          |
+          v
+    execute_script Routine -->  AnalysisSkript
+    erhält den Stub und ggf.    Nutzt Funktionen um Updates durchzuführen falls
+    den Pythonmodulenamen       notwendig, Email
+         ^
+         |
+         |
+    Cronjob findet outdated
+    DataAnalysis
+
+
+Analyseskript macht update:
+    - flexibel welche Änderungen vorgenommen werden (z.B. mehrere Records)
+    - spezielle Funktionen sollten verwendet werden
+    - Logging und informieren muss im Skript passieren
+    - Skript kann mit subprocess aufgerufen werden (alternative unvollständige
+      DataAnalysis einfügen)
+
+
+# Features
+    - Emailversand bei Insert oder Update
+    - Kurze Info: "Create XY Analysis" kann vmtl automatisch erzeugt werden
+    - Debug Info: müsste optional/bei Fehler zur Verfügung stehen.
+    - Skript/Software version sollte gespeichert werden
+
+
+Outlook: the part of the called scripts that interact with LinkAhead might in
+future be replaced by the Crawler. The working directory would be copied to the
+file server and then crawled.
+"""
+
+import argparse
+import importlib
+import logging
+import os
+import sys
+
+import caosdb as db
+from caosdb.utils.server_side_scripting import run_server_side_script
+
+logger = logging.getLogger(__name__)
+
+
+def check_referenced_script(record: db.Record):
+    """ return the name of a referenced script
+
+    If the supplied record does not have an appropriate Property warings are
+    logged.
+    """
+
+    if record.get_property("scripts") is None:
+        logger.warning("The follwing changed Record is missing the 'scripts' "
+                       "Property:\n{}".format(str(record)))
+
+        return
+
+    script_prop = record.get_property("scripts")
+
+    if not db.apiutils.is_reference(script_prop):
+        logger.warning("The 'scripts' Property of the following Record should "
+                       "reference a File:\n{}".format(str(record)))
+
+        return
+
+    script = db.execute_query("FIND ENTITY WITH id={}".format(
+        script_prop.value[0] if isinstance(script_prop.value, list)
+        else script_prop.value), unique=True)
+
+    if (not isinstance(script, db.File)):
+        logger.warning("The 'scripts' Property of the Record {} should "
+                       "reference a File. Entity {} is not a File".format(
+                           record.id, script_prop.value))
+
+        return
+
+    script_name = os.path.basename(script.path)
+
+    return script_name
+
+
+def call_script(script_name: str, record_id: int):
+    ret = run_server_side_script(script_name, record_id)
+
+    if ret.code != 0:
+        logger.error("Script failed!")
+        logger.debug(ret.stdout)
+        logger.error(ret.stderr)
+    else:
+        logger.debug(ret.stdout)
+        logger.error(ret.stderr)
+
+
+def run(dataAnalysisRecord: db.Record):
+    """run a data analysis script.
+
+    There are two options:
+    1. A python script installed as a pip package.
+    2. A generic script that can be executed on the command line.
+
+    Using a python package:
+    It should be located in package plugin and implement at least
+    a main function that takes a DataAnalysisRecord as a single argument.
+    The script may perform changes to the Record and insert and update
+    Entities.
+
+    Using a generic script:
+    The only argument that is supplied to the script is the ID of the
+    dataAnalysisRecord. Apart from the different Argument everything that is
+    said for the python package holds here.
+    """
+
+    if dataAnalysisRecord.get_property("scripts") is not None:
+        script_name = check_referenced_script(dataAnalysisRecord)
+        logger.debug(
+            "Found 'scripts'. Call script '{}' in separate process".format(
+                script_name)
+            )
+        call_script(script_name, dataAnalysisRecord.id)
+        logger.debug(
+            "Script '{}' done.\n-----------------------------------".format(
+                script_name))
+
+    if dataAnalysisRecord.get_property("Software") is not None:
+        mod = dataAnalysisRecord.get_property("Software").value
+        logger.debug(
+            "Found 'Software'. Call '{}' as Python module".format(
+                mod)
+            )
+        m = importlib.import_module(mod)
+
+        m.main(dataAnalysisRecord)
+        logger.debug(
+            "'main' function of  Python module '{}' done"
+            ".\n-----------------------------------".format(mod))
+
+
+def _parse_arguments():
+    """ Parses the command line arguments.  """
+    parser = argparse.ArgumentParser(description='__doc__')
+    parser.add_argument("--module", help="An id an input dataset.")
+    parser.add_argument("--inputset", help="An id an input dataset.")
+    parser.add_argument("--parameterset", help="An id of a parameter record.")
+
+    return parser.parse_args()
+
+
+def main():
+    """ This is for testing only. """
+    args = _parse_arguments()
+
+    dataAnalysisRecord = db.Record()
+    dataAnalysisRecord.add_property(name="InputDataSet", value=args.entity)
+    dataAnalysisRecord.add_property(name="ParameterSet", value=args.parameter)
+    dataAnalysisRecord.add_property(name="Software", value=args.module)
+
+    dataAnalysisRecord.insert()
+    run(dataAnalysisRecord)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/src/caosadvancedtools/serverside/helper.py b/src/caosadvancedtools/serverside/helper.py
index 19efc9ed2b3e99e17eb28f5c87b0a6dbc0c47499..ba75739e0fdc0a83f235db6920471afb196f4246 100644
--- a/src/caosadvancedtools/serverside/helper.py
+++ b/src/caosadvancedtools/serverside/helper.py
@@ -390,11 +390,11 @@ def send_mail(from_addr, to, subject, body, cc=None, bcc=None,
     else:
         caosdb_config = db.configuration.get_config()
 
-        if not "Misc" in caosdb_config or not "sendmail" in caosdb_config["Misc"]:
+        if "Misc" not in caosdb_config or "sendmail" not in caosdb_config["Misc"]:
             err_msg = ("No sendmail executable configured. "
                        "Please configure `Misc.sendmail` "
                        "in your pycaosdb.ini.")
-            raise db.ConfigurationException(err_msg)
+            raise db.ConfigurationError(err_msg)
         sendmail = caosdb_config["Misc"]["sendmail"]
 
     # construct sendmail command
diff --git a/src/caosadvancedtools/serverside/model.yml b/src/caosadvancedtools/serverside/model.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2f5a9634a97e39da4c5b3a6dfe1bf0c587863231
--- /dev/null
+++ b/src/caosadvancedtools/serverside/model.yml
@@ -0,0 +1,15 @@
+# Parent of all datasets which are used as input to or output from
+# analysis scripts
+Dataset:
+
+# Parent of all parametersets which are used as input for analysis scripts
+ParameterSet:
+
+DataAnalysis:
+  recommended_properties:
+    InputDataset:
+      datatype: Dataset
+    OutputDataset:
+      datatype: Dataset
+    ParameterSet:
+    date:
\ No newline at end of file
diff --git a/src/caosadvancedtools/serverside/sync.py b/src/caosadvancedtools/serverside/sync.py
new file mode 100755
index 0000000000000000000000000000000000000000..04283a15ba7919af6027b53217ffb69355ddfc6f
--- /dev/null
+++ b/src/caosadvancedtools/serverside/sync.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+# Sync data model for generic data analysis method
+# A. Schlemmer, 09/2021
+
+from caosadvancedtools.models import parser
+model = parser.parse_model_from_yaml("model.yml")
+model.sync_data_model()
diff --git a/src/caosadvancedtools/table_export.py b/src/caosadvancedtools/table_export.py
index bed0edc97a794dd83b2bdd7b1c0449c710c18d3f..056207a76fa01357e2269cd4cb8e9a09905d5d90 100644
--- a/src/caosadvancedtools/table_export.py
+++ b/src/caosadvancedtools/table_export.py
@@ -308,7 +308,7 @@ class BaseTableExporter(object):
                         " was specified but no record is given."
                     )
                 else:
-                    if not "selector" in d:
+                    if "selector" not in d:
                         d["selector"] = d[QUERY].strip().split(" ")[1]
             # guess find function and insert if existing
             else:
diff --git a/src/caosadvancedtools/table_importer.py b/src/caosadvancedtools/table_importer.py
index fee39ced2ed47da9d85d9430cce2d18e9a4023ca..1f515e78e3ddbd198fa0336589a359ba9154f038 100755
--- a/src/caosadvancedtools/table_importer.py
+++ b/src/caosadvancedtools/table_importer.py
@@ -156,6 +156,9 @@ def win_path_converter(val):
     checks whether the value looks like a windows path and converts it to posix
     """
 
+    if val == "":
+        return val
+
     if not check_win_path(val):
         raise ValueError(
             "Field should be a Windows path, but is\n'{}'.".format(val))
@@ -202,27 +205,33 @@ def string_in_list(val, options, ignore_case=True):
     return val
 
 
-class TableImporter(object):
+class TableImporter():
+    """Abstract base class for importing data from tables.
+    """
+
     def __init__(self, converters, obligatory_columns=None, unique_keys=None,
                  datatypes=None):
         """
-        converters: dict with column names as keys and converter functions as
-                    values
-                    This dict also defines what columns are required to exist
-                    throught the existing keys. The converter functions are
-                    applied to the cell values. They should also check for
-                    ValueErrors, such that a separate value check is not
-                    necessary.
-        obligatory_columns: list of column names, optional
-                            each listed column must not have missing values
-        unique_columns : list of column names that in
-                            combination must be unique; i.e. each row has a
-                            unique combination of values in those columns.
-        datatypes: dict with column names as keys and datatypes as values
-                   All non-null values will be checked whether they have the
-                   provided datatype.
-                   This dict also defines what columns are required to exist
-                   throught the existing keys.
+        Parameters
+        ----------
+        converters : dict
+          Dict with column names as keys and converter functions as values. This dict also defines
+          what columns are required to exist throught the existing keys. The converter functions are
+          applied to the cell values. They should also check for ValueErrors, such that a separate
+          value check is not necessary.
+
+        obligatory_columns : list, optional
+          List of column names, each listed column must not have missing values.
+
+        unique_keys : list, optional
+          List of column names that in combination must be unique: each row has a unique
+          combination of values in those columns.
+
+        datatypes : dict, optional
+          Dict with column names as keys and datatypes as values.  All non-null values will be
+          checked whether they have the provided datatype.  This dict also defines what columns are
+          required to exist throught the existing keys.
+
         """
 
         if converters is None:
@@ -244,11 +253,14 @@ class TableImporter(object):
         raise NotImplementedError()
 
     def check_columns(self, df, filename=None):
-        """
-        checks whether all required columns, i.e. columns for which converters
-        were defined exist.
+        """Check whether all required columns exist.
+
+        Required columns are columns for which converters are defined.
+
+        Raises
+        ------
+        DataInconsistencyError
 
-        Raises: DataInconsistencyError
         """
 
         for col in self.required_columns:
@@ -264,12 +276,11 @@ class TableImporter(object):
                 raise DataInconsistencyError(errmsg)
 
     def check_unique(self, df, filename=None):
-        """
-        Check whether value combinations that shall be unique for each row are
-        unique.
+        """Check whether value combinations that shall be unique for each row are unique.
 
         If a second row is found, that uses the same combination of values as a
         previous one, the second one is removed.
+
         """
         df = df.copy()
         uniques = []
@@ -296,13 +307,32 @@ class TableImporter(object):
 
         return df
 
-    def check_datatype(self, df, filename=None):
-        """
-        Check for each column whether non-null fields are have the correct
-        datatype.
-        """
+    def check_datatype(self, df, filename=None, strict=False):
+        """Check for each column whether non-null fields have the correct datatype.
+
+        .. note::
 
+          If columns are integer, but should be float, this method converts the respective columns
+          in place.
+
+        Parameters
+        ----------
+
+        strict: boolean, optional
+          If False (the default), try to convert columns, otherwise raise an error.
+
+        """
         for key, datatype in self.datatypes.items():
+            # Check for castable numeric types first: We unconditionally cast int to the default
+            # float, because CaosDB does not have different sizes anyway.
+            col_dtype = df.dtypes[key]
+            if not strict and not np.issubdtype(col_dtype, datatype):
+                issub = np.issubdtype
+                #  These special cases should be fine.
+                if issub(col_dtype, np.integer) and issub(datatype, np.floating):
+                    df[key] = df[key].astype(datatype)
+
+            # Now check each element
             for idx, val in df.loc[
                     pd.notnull(df.loc[:, key]), key].iteritems():
 
@@ -323,6 +353,11 @@ class TableImporter(object):
         Check in each row whether obligatory fields are empty or null.
 
         Rows that have missing values are removed.
+
+        Returns
+        -------
+        out : pandas.DataFrame
+          The input DataFrame with incomplete rows removed.
         """
         df = df.copy()
 
@@ -359,22 +394,39 @@ class TableImporter(object):
 
         return df
 
-    def check_dataframe(self, df, filename):
+    def check_dataframe(self, df, filename=None, strict=False):
+        """Check if the dataframe conforms to the restrictions.
+
+        Checked restrictions are: Columns, data types, uniqueness requirements.
+
+        Parameters
+        ----------
+
+        df: pandas.DataFrame
+          The dataframe to be checked.
+
+        filename: string, optional
+          The file name, only used for output in case of problems.
+
+        strict: boolean, optional
+          If False (the default), try to convert columns, otherwise raise an error.
+        """
         self.check_columns(df, filename=filename)
         df = self.check_missing(df, filename=filename)
-        self.check_datatype(df, filename=filename)
+        self.check_datatype(df, filename=filename, strict=strict)
 
         if len(self.unique_keys) > 0:
             df = self.check_unique(df, filename=filename)
 
+        return df
+
 
 class XLSImporter(TableImporter):
     def read_file(self, filename, **kwargs):
         return self.read_xls(filename=filename, **kwargs)
 
     def read_xls(self, filename, **kwargs):
-        """
-        converts an xls file into a Pandas DataFrame.
+        """Convert an xls file into a Pandas DataFrame.
 
         The converters of the XLSImporter object are used.
 
@@ -405,7 +457,7 @@ class XLSImporter(TableImporter):
                        'category': "inconsistency"})
             raise DataInconsistencyError(*e.args)
 
-        self.check_dataframe(df, filename)
+        df = self.check_dataframe(df, filename)
 
         return df
 
@@ -422,7 +474,7 @@ class CSVImporter(TableImporter):
                        'category': "inconsistency"})
             raise DataInconsistencyError(*ve.args)
 
-        self.check_dataframe(df, filename)
+        df = self.check_dataframe(df, filename)
 
         return df
 
@@ -439,6 +491,6 @@ class TSVImporter(TableImporter):
                        'category': "inconsistency"})
             raise DataInconsistencyError(*ve.args)
 
-        self.check_dataframe(df, filename)
+        df = self.check_dataframe(df, filename)
 
         return df
diff --git a/src/doc/conf.py b/src/doc/conf.py
index 4ff5e33e958436fd35f80c24247f88eab348bd95..1e07336628b696a95bc821a462f3d78f3ae11df0 100644
--- a/src/doc/conf.py
+++ b/src/doc/conf.py
@@ -27,9 +27,9 @@ copyright = '2021, IndiScale GmbH'
 author = 'Daniel Hornung'
 
 # The short X.Y version
-version = '0.3.1'
+version = '0.3.2'
 # The full version, including alpha/beta/rc tags
-release = '0.3.1'
+release = '0.3.2'
 
 
 # -- General configuration ---------------------------------------------------
diff --git a/src/doc/yaml_interface.rst b/src/doc/yaml_interface.rst
index dcf4c5d6c7a674bd8d32d92df0a509e511af26f5..476e92829238a0fc9dac851c61790c022e9fcde9 100644
--- a/src/doc/yaml_interface.rst
+++ b/src/doc/yaml_interface.rst
@@ -50,7 +50,7 @@ This example defines 3 ``RecordType``s:
 - A Person with a ``firstName`` and a ``lastName`` (as recommended properties)
 - A ``LabbookEntry`` with multiple recommended properties of different data types
 - It is assumed that the server knows a RecordType or Property with the name 
-  "Textfile".
+  ``Textfile``.
 
 
 One major advantage of using this interface (in contrast to the standard python interface) is that properties can be defined and added to record types "on-the-fly". E.g. the three lines for ``firstName`` as sub entries of ``Person`` have two effects on CaosDB:
@@ -66,7 +66,8 @@ Note the difference between the three property declarations of ``LabbookEntry``:
 - ``responsible``: This defines and adds a property with name "responsible" to ``LabbookEntry`, which has a datatype ``Person``. ``Person`` is defined above.
 - ``firstName``: This defines and adds a property with the standard data type ``TEXT`` to record type ``Person``.
 
-If the data model depends on already existing parts, those can be added using the ``extern`` keyword.
+If the data model depends on record types or properties which already exist in CaosDB, those can be
+added using the ``extern`` keyword: ``extern`` takes a list of previously defined names.
 
 Datatypes
 ---------
diff --git a/tox.ini b/tox.ini
index 1b3cd4ef0d39955197448ace9fdf5d26ea6749b4..dde34b987b9b08bfdfc51a06dd46a9a0e0494f28 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-envlist=py36, py37, py38, py39
+envlist=py36, py37, py38, py39, py310
 skip_missing_interpreters = true
 [testenv]
 deps=nose
@@ -11,3 +11,6 @@ deps=nose
     xlrd == 1.2
     h5py
 commands=py.test --cov=caosadvancedtools -vv {posargs}
+
+[flake8]
+max-line-length=100
diff --git a/unittests/create_filetree.py b/unittests/create_filetree.py
index 6f95618dbc834c3bc140163efdc90aa51c8d5248..f80b9681163859027bb8f8c7cd6b1387bf2d378d 100644
--- a/unittests/create_filetree.py
+++ b/unittests/create_filetree.py
@@ -42,8 +42,6 @@ def main(folder, dry=True):
         if not dry:
             os.mkdir(series_path)
         for date in [datetime.today()-timedelta(days=i)-timedelta(weeks=50*ii) for i in range(10)]:
-            #import IPython
-            # IPython.embed()
             exp_path = os.path.join(series_path, "Exp_"+str(date.date()))
             print("Exp: "+os.path.basename(exp_path))
             if not dry:
diff --git a/unittests/data/datatypes.xlsx b/unittests/data/datatypes.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..34fc4cf43092a68b630e0e04ebc43609b8a0b17b
Binary files /dev/null and b/unittests/data/datatypes.xlsx differ
diff --git a/unittests/json-schema-models/datamodel_atomic_properties.schema.json b/unittests/json-schema-models/datamodel_atomic_properties.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..3828f131180a839d5c9b8bc5aa1a1285717da723
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_atomic_properties.schema.json
@@ -0,0 +1,24 @@
+[
+    {
+        "title": "Dataset1",
+        "description": "Some description",
+        "type": "object",
+        "properties": {
+            "title": { "type": "string", "description": "full dataset title" },
+            "campaign": { "type": "string", "description": "FIXME" },
+            "number_prop": { "type": "number", "description": "Some float property" }
+        },
+        "required": [ "title", "number_prop" ]
+    },
+    {
+        "title": "Dataset2",
+        "type": "object",
+        "properties": {
+            "date_time": { "type": "string", "format": "date-time" },
+            "date": { "type": "string", "format": "date" },
+            "integer": { "type": "integer", "description": "Some integer property" },
+            "boolean": { "type": "boolean" },
+            "number_prop": { "type": "number", "description": "Some float property" }
+        }
+    }
+]
diff --git a/unittests/json-schema-models/datamodel_enum_prop.schema.json b/unittests/json-schema-models/datamodel_enum_prop.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..a14008d141606368519c0caadc30b16a1dc9d16d
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_enum_prop.schema.json
@@ -0,0 +1,16 @@
+{
+    "title": "Dataset",
+    "description": "Some description",
+    "type": "object",
+    "properties": {
+        "license": {
+            "type": "string",
+            "enum": ["CC-BY", "CC-BY-SA", "CC0", "restricted access"]
+        },
+        "number_enum": {
+            "type": "number",
+            "enum": [1.1, 2.2, 3.3]
+        }
+    },
+    "required": ["license"]
+}
diff --git a/unittests/json-schema-models/datamodel_int_enum_broken.schema.json b/unittests/json-schema-models/datamodel_int_enum_broken.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..159b84ac36c26325b59cdd25d2830152c4acdaaa
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_int_enum_broken.schema.json
@@ -0,0 +1,11 @@
+{
+    "title": "Dataset",
+    "description": "Some description",
+    "type": "object",
+    "properties": {
+        "int_enum": {
+            "type": "integer",
+            "enum": [1, 2, 3]
+        }
+    }
+}
diff --git a/unittests/json-schema-models/datamodel_list_properties.schema.json b/unittests/json-schema-models/datamodel_list_properties.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..b95f468a1c13f1912266e65f029654077ce6a14e
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_list_properties.schema.json
@@ -0,0 +1,46 @@
+{
+    "title": "Dataset",
+    "description": "Dataset with list (array) properties",
+    "type": "object",
+    "properties": {
+        "keywords": {
+            "type": "array",
+            "items": { "type": "string" }
+        },
+        "booleans": {
+            "type": "array",
+            "items": { "type": "boolean" }
+        },
+        "integers": {
+            "type": "array",
+            "items": { "type": "integer" }
+        },
+        "floats": {
+            "type": "array",
+            "items": { "type": "number" }
+        },
+        "datetimes": {
+            "type": "array",
+            "items": { "type": "string", "format": "date-time" }
+        },
+        "dates": {
+            "type": "array",
+            "items": { "type": "string", "format": "date" }
+        },
+        "reference": {
+            "type": "array",
+            "items": { "type": "object", "properties": {} }
+        },
+        "reference_with_name": {
+            "type": "array",
+            "items": { "type": "object", "title": "event", "properties": {} }
+        },
+        "license": {
+            "type": "array",
+            "items": {
+                "type": "string",
+                "enum": ["CC-BY", "CC-BY-SA", "CC0", "restricted access"]
+            }
+        }
+    }
+}
diff --git a/unittests/json-schema-models/datamodel_missing_property_type.schema.json b/unittests/json-schema-models/datamodel_missing_property_type.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..eac3cc563df587568c4e9610d72618610566beef
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_missing_property_type.schema.json
@@ -0,0 +1,7 @@
+{
+    "title": "Dataset",
+    "type": "object",
+    "properties": {
+        "method": { "description": "Missing property type" }
+    }
+}
diff --git a/unittests/json-schema-models/datamodel_references.schema.json b/unittests/json-schema-models/datamodel_references.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b79a9bcdbbd8beaf9974a600e9c5ff30cb513f4
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_references.schema.json
@@ -0,0 +1,24 @@
+{
+    "title": "Dataset",
+    "description": "",
+    "type": "object",
+    "properties": {
+        "event": {
+            "type": "object",
+            "properties": {
+                "longitude": {
+                    "type": "number"
+                },
+                "latitude": {
+                    "type": "number"
+                },
+                "location": {
+                    "type": "string",
+                    "description": "geographical location (e.g., North Sea; Espoo, Finland)"
+                }
+            },
+            "required": ["longitude", "latitude"]
+        }
+    },
+    "required": ["event"]
+}
diff --git a/unittests/json-schema-models/datamodel_required_no_list.schema.json b/unittests/json-schema-models/datamodel_required_no_list.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..f3697a71320bc8baf05156bec2c71f3915378654
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_required_no_list.schema.json
@@ -0,0 +1,7 @@
+{
+    "title": "Dataset",
+    "description": "",
+    "type": "object",
+
+    "required": "Dataset"
+}
diff --git a/unittests/json-schema-models/datamodel_string_properties.schema.json b/unittests/json-schema-models/datamodel_string_properties.schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..62bc0a2a4250050e5433038bf61e7c9692bb0200
--- /dev/null
+++ b/unittests/json-schema-models/datamodel_string_properties.schema.json
@@ -0,0 +1,14 @@
+{
+    "title": "Dataset",
+    "description": "",
+    "type": "object",
+
+    "properties": {
+        "title": { "type": "string", "description": "full dataset title" },
+        "campaign": { "type": "string", "description": "FIXME" },
+        "method": { "type": "string", "description": "FIXME" },
+        "titled": { "title": "The title", "type": "string", "description": "None" }
+    },
+
+    "required": ["title"]
+}
diff --git a/unittests/test.csv b/unittests/test.csv
new file mode 100644
index 0000000000000000000000000000000000000000..a29679afce78089f3cdd4e5e388262456668cd90
--- /dev/null
+++ b/unittests/test.csv
@@ -0,0 +1,3 @@
+temperature [°C] ,depth 
+234.4,3.0
+344.6,5.1
diff --git a/unittests/test_cfood.py b/unittests/test_cfood.py
index f5125166106c4bace21121d58a025886f9b132b9..7055bc7c51962c0cbc487f29bcdacb391218a7d3 100644
--- a/unittests/test_cfood.py
+++ b/unittests/test_cfood.py
@@ -48,13 +48,14 @@ class ExampleCFoodMeal(AbstractFileCFood, CMeal):
         CMeal.__init__(self)
 
     @classmethod
-    def match_item(cls, item):
+    def match_item(cls, path):
         """ standard match_match, but returns False if a suitable cfood exists """
 
-        if cls.has_suitable_cfood(item):
+        print(path)
+        if cls.has_suitable_cfood(path):
             return False
 
-        return re.match(cls.get_re(), item) is not None
+        return re.match(cls.get_re(), path) is not None
 
     def looking_for(self, crawled_file):
         """ standard looking_for, but returns True if the file matches all
diff --git a/unittests/test_generic_analysis.py b/unittests/test_generic_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1077b97ec58f80c8534c89d5fa5f57d8d815cb9
--- /dev/null
+++ b/unittests/test_generic_analysis.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+module description
+"""
+
+import caosdb as db
+from caosadvancedtools.serverside.generic_analysis import \
+    check_referenced_script
+
+from test_utils import BaseMockUpTest
+
+
+class TestGAnalysisNoFile(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.entities = (
+            '<Response><Record name="script.py" path="/some/path/script.py'
+            '" id="1234"/><Query string="find record" results="1">'
+            '</Query></Response>')
+
+    def test_check_referenced_script(self):
+        # missing scripts
+        self.assertIsNone(check_referenced_script(db.Record()))
+        # wrong datatype
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.TEXT)))
+        # wrong value
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="hallo")))
+        # no file
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="1234")))
+
+
+class TestGAnalysisFile(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.entities = (
+            '<Response><File name="script.py" path="/some/path/script.py'
+            '" id="1234"/><Query string="find record" results="1">'
+            '</Query></Response>')
+
+    def test_check_referenced_script(self):
+        # all correct
+        self.assertEqual(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="1234")), "script.py")
diff --git a/unittests/test_json_schema_model_parser.py b/unittests/test_json_schema_model_parser.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b44f6efa1cda19c04ee13a6a50b04cefbff9177
--- /dev/null
+++ b/unittests/test_json_schema_model_parser.py
@@ -0,0 +1,342 @@
+#
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2022 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+# Copyright (C) 2022 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify it under
+# the terms of the GNU Affero General Public License as published by the Free
+# Software Foundation, either version 3 of the License, or (at your option) any
+# later version.
+#
+# This program is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
+# details.
+#
+# You should have received a copy of the GNU Affero General Public License along
+# with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+# @review Daniel Hornung 2022-02-18
+
+import os
+import pytest
+
+import caosdb as db
+from caosadvancedtools.models.parser import (parse_model_from_json_schema,
+                                             JsonSchemaDefinitionError)
+
+FILEPATH = os.path.join(os.path.dirname(
+    os.path.abspath(__file__)), 'json-schema-models')
+
+
+def test_rt_with_string_properties():
+    """Test datamodel parsing of datamodel_string_properties.schema.json"""
+    # @author Florian Spreckelsen
+    # @date 2022-02-17
+
+    model = parse_model_from_json_schema(
+        os.path.join(FILEPATH,
+                     "datamodel_string_properties.schema.json"))
+    assert "Dataset" in model
+    dataset_rt = model["Dataset"]
+    assert isinstance(dataset_rt, db.RecordType)
+    assert dataset_rt.name == "Dataset"
+    assert dataset_rt.description == ""
+    assert len(dataset_rt.get_properties()) == 4
+
+    assert dataset_rt.get_property("title") is not None
+    assert dataset_rt.get_property("campaign") is not None
+    assert dataset_rt.get_property("method") is not None
+
+    assert dataset_rt.get_property("The title") is not None
+    assert dataset_rt.get_property("titled") is None
+
+    title_prop = dataset_rt.get_property("title")
+    assert title_prop.datatype == db.TEXT
+    assert dataset_rt.get_importance(title_prop.name) == db.OBLIGATORY
+
+    campaign_prop = dataset_rt.get_property("campaign")
+    assert campaign_prop.datatype == db.TEXT
+    assert dataset_rt.get_importance(campaign_prop.name) == db.RECOMMENDED
+
+    method_prop = dataset_rt.get_property("method")
+    assert method_prop.datatype == db.TEXT
+    assert dataset_rt.get_importance(method_prop.name) == db.RECOMMENDED
+
+
+def test_datamodel_with_atomic_properties():
+    """Test read-in of two separate record types with atomic-typed properties."""
+    # @author Florian Spreckelsen
+    # @date 2022-02-18
+
+    model = parse_model_from_json_schema(os.path.join(
+        FILEPATH, "datamodel_atomic_properties.schema.json"))
+    assert "Dataset1" in model
+    assert "Dataset2" in model
+
+    rt1 = model["Dataset1"]
+    assert isinstance(rt1, db.RecordType)
+    assert rt1.name == "Dataset1"
+    assert rt1.description == "Some description"
+    assert len(rt1.get_properties()) == 3
+
+    assert rt1.get_property("title") is not None
+    assert rt1.get_property("campaign") is not None
+    assert rt1.get_property("number_prop") is not None
+
+    title_prop = rt1.get_property("title")
+    assert title_prop.datatype == db.TEXT
+    assert rt1.get_importance(title_prop.name) == db.OBLIGATORY
+
+    campaign_prop = rt1.get_property("campaign")
+    assert campaign_prop.datatype == db.TEXT
+    assert rt1.get_importance(campaign_prop.name) == db.RECOMMENDED
+
+    float_prop = rt1.get_property("number_prop")
+    assert float_prop.datatype == db.DOUBLE
+    assert rt1.get_importance(float_prop.name) == db.OBLIGATORY
+
+    rt2 = model["Dataset2"]
+    assert isinstance(rt2, db.RecordType)
+    assert rt2.name == "Dataset2"
+    assert not rt2.description
+    assert len(rt2.get_properties()) == 5
+
+    date_prop = rt2.get_property("date")
+    assert date_prop.datatype == db.DATETIME
+
+    datetime_prop = rt2.get_property("date_time")
+    assert date_prop.datatype == db.DATETIME
+
+    int_prop = rt2.get_property("integer")
+    assert int_prop.datatype == db.INTEGER
+    assert int_prop.description == "Some integer property"
+
+    bool_prop = rt2.get_property("boolean")
+    assert bool_prop.datatype == db.BOOLEAN
+
+    float_prop2 = rt2.get_property("number_prop")
+    assert float_prop.datatype == float_prop2.datatype
+
+
+def test_required_no_list():
+    """Exception must be raised when "required" is not a list."""
+    # @author Daniel Hornung
+    # @date 2022-02-18
+
+    with pytest.raises(JsonSchemaDefinitionError) as err:
+        parse_model_from_json_schema(
+            os.path.join(FILEPATH,
+                         "datamodel_required_no_list.schema.json"))
+    assert "'Dataset' is not of type 'array'" in str(err.value)
+
+
+def test_missing_property_type():
+    """Exception must be raised when "type" is missing."""
+    with pytest.raises(JsonSchemaDefinitionError) as err:
+        parse_model_from_json_schema(
+            os.path.join(FILEPATH,
+                         "datamodel_missing_property_type.schema.json"))
+    assert "`type` is missing" in str(err.value)
+
+
+def test_enum():
+    """Enums are represented in references to records of a specific type."""
+    # @author Florian Spreckelsen
+    # @date 2022-03-16
+
+    model = parse_model_from_json_schema(os.path.join(
+        FILEPATH, "datamodel_enum_prop.schema.json"))
+    licenses = ["CC-BY", "CC-BY-SA", "CC0", "restricted access"]
+    for name in ["Dataset", "license"] + licenses:
+        assert name in model
+
+    assert isinstance(model["Dataset"], db.RecordType)
+    assert model["Dataset"].get_property("license") is not None
+    assert model["Dataset"].get_property("license").is_reference()
+    assert model["Dataset"].get_property("license").datatype.name == "license"
+    assert isinstance(model["license"], db.RecordType)
+
+    for name in licenses:
+        assert isinstance(model[name], db.Record)
+        assert model[name].name == name
+        assert len(model[name].parents) == 1
+        assert model[name].has_parent(model["license"])
+
+    # Also allow enums with non-string types
+    number_enums = ["1.1", "2.2", "3.3"]
+    for name in ["number_enum"] + number_enums:
+        assert name in model
+
+    assert isinstance(model["number_enum"], db.RecordType)
+    assert model["Dataset"].get_property("number_enum") is not None
+    assert model["Dataset"].get_property("number_enum").is_reference()
+    assert model["Dataset"].get_property(
+        "number_enum").datatype.name == "number_enum"
+
+    for name in number_enums:
+        assert isinstance(model[name], db.Record)
+        assert model[name].name == name
+        assert len(model[name].parents) == 1
+        assert model[name].has_parent(model["number_enum"])
+
+
+@pytest.mark.xfail(reason="Don't allow integer enums until https://gitlab.indiscale.com/caosdb/src/caosdb-server/-/issues/224 has been fixed")
+def test_int_enum():
+    """Check an enum property with type: integer"""
+    # @author Florian Spreckelsen
+    # @date 2022-03-22
+
+    model = parse_model_from_json_schema(os.path.join(
+        FILEPATH, "datamodel_int_enum_broken.schema.json"))
+    int_enums = ["1", "2", "3"]
+    for name in ["Dataset", "int_enum"] + int_enums:
+        assert name in model
+
+    assert isinstance(model["Dataset"], db.RecordType)
+    assert model["Dataset"].get_property("int_enum") is not None
+    assert model["Dataset"].get_property("int_enum").is_reference
+    assert model["Dataset"].get_property(
+        "int_enum").datatype.name == "int_enum"
+    assert isinstance(model["int_enum"], db.RecordType)
+
+    for name in int_enums:
+        assert isinstance(model[name], db.Record)
+        assert model[name].name == name
+        assert len(model[name].parents) == 1
+        assert model[name].has_parent(model["int_enum"])
+
+
+def test_references():
+    """Test reference properties"""
+    # @author Florian Spreckelsen
+    # @date 2022-03-17
+
+    model = parse_model_from_json_schema(os.path.join(
+        FILEPATH, "datamodel_references.schema.json"))
+    for name in ["Dataset", "event", "longitude", "latitude", "location"]:
+        assert name in model
+
+    assert isinstance(model["Dataset"], db.RecordType)
+    assert model["Dataset"].get_property("event") is not None
+    assert model["Dataset"].get_importance("event") == db.OBLIGATORY
+    assert model["Dataset"].get_property("event").is_reference()
+    assert model["Dataset"].get_property("event").datatype.name == "event"
+
+    assert isinstance(model["event"], db.RecordType)
+    assert model["event"].get_property("longitude") is not None
+    assert model["event"].get_importance("longitude") == db.OBLIGATORY
+    assert model["event"].get_property("longitude").datatype == db.DOUBLE
+
+    assert model["event"].get_property("latitude") is not None
+    assert model["event"].get_importance("latitude") == db.OBLIGATORY
+    assert model["event"].get_property("latitude").datatype == db.DOUBLE
+
+    assert model["event"].get_property("location") is not None
+    assert model["event"].get_importance("location") == db.RECOMMENDED
+    assert model["event"].get_property("location").datatype == db.TEXT
+
+    assert isinstance(model["longitude"], db.Property)
+    assert model["longitude"].datatype == db.DOUBLE
+
+    assert isinstance(model["latitude"], db.Property)
+    assert model["latitude"].datatype == db.DOUBLE
+
+    assert isinstance(model["location"], db.Property)
+    assert model["location"].datatype == db.TEXT
+    assert model["location"].description == "geographical location (e.g., North Sea; Espoo, Finland)"
+
+
+def test_list():
+    """Test list properties with all possible datatypes."""
+    # @author Florian Spreckelsen
+    # @date 2022-03-17
+
+    model = parse_model_from_json_schema(os.path.join(
+        FILEPATH, "datamodel_list_properties.schema.json"))
+    licenses = ["CC-BY", "CC-BY-SA", "CC0", "restricted access"]
+    names = ["Dataset", "keywords", "booleans", "integers", "floats",
+             "datetimes", "dates", "reference", "reference_with_name", "event",
+             "license"]
+    for name in names + licenses:
+        assert name in model
+
+    dataset_rt = model["Dataset"]
+    assert dataset_rt.get_property("keywords") is not None
+    assert dataset_rt.get_property("keywords").datatype == db.LIST(db.TEXT)
+    assert isinstance(model["keywords"], db.Property)
+    assert model["keywords"].name == "keywords"
+    assert model["keywords"].datatype == db.LIST(db.TEXT)
+
+    assert dataset_rt.get_property("booleans") is not None
+    assert dataset_rt.get_property("booleans").datatype == db.LIST(db.BOOLEAN)
+    assert isinstance(model["booleans"], db.Property)
+    assert model["booleans"].name == "booleans"
+    assert model["booleans"].datatype == db.LIST(db.BOOLEAN)
+
+    assert dataset_rt.get_property("integers") is not None
+    assert dataset_rt.get_property("integers").datatype == db.LIST(db.INTEGER)
+    assert isinstance(model["integers"], db.Property)
+    assert model["integers"].name == "integers"
+    assert model["integers"].datatype == db.LIST(db.INTEGER)
+
+    assert dataset_rt.get_property("floats") is not None
+    assert dataset_rt.get_property("floats").datatype == db.LIST(db.DOUBLE)
+    assert isinstance(model["floats"], db.Property)
+    assert model["floats"].name == "floats"
+    assert model["floats"].datatype == db.LIST(db.DOUBLE)
+
+    assert dataset_rt.get_property("datetimes") is not None
+    assert dataset_rt.get_property(
+        "datetimes").datatype == db.LIST(db.DATETIME)
+    assert isinstance(model["datetimes"], db.Property)
+    assert model["datetimes"].name == "datetimes"
+    assert model["datetimes"].datatype == db.LIST(db.DATETIME)
+
+    assert dataset_rt.get_property("dates") is not None
+    assert dataset_rt.get_property(
+        "dates").datatype == db.LIST(db.DATETIME)
+    assert isinstance(model["dates"], db.Property)
+    assert model["dates"].name == "dates"
+    assert model["dates"].datatype == db.LIST(db.DATETIME)
+
+    # Simple reference list property
+    assert dataset_rt.get_property("reference") is not None
+    assert dataset_rt.get_property("reference").is_reference()
+    assert dataset_rt.get_property(
+        "reference").datatype == db.LIST("reference")
+    assert isinstance(model["reference"], db.RecordType)
+    assert model["reference"].name == "reference"
+    assert dataset_rt.get_property(
+        "reference").datatype == db.LIST(model["reference"])
+
+    # Reference list with name
+    assert dataset_rt.get_property("reference_with_name") is not None
+    assert dataset_rt.get_property("reference_with_name").is_reference()
+    assert dataset_rt.get_property(
+        "reference_with_name").datatype == db.LIST("event")
+    assert isinstance(model["event"], db.RecordType)
+    assert model["event"].name == "event"
+    assert dataset_rt.get_property(
+        "reference_with_name").datatype == db.LIST(model["event"])
+    assert isinstance(model["reference_with_name"], db.Property)
+    assert model["reference_with_name"].name == "reference_with_name"
+    assert model["reference_with_name"].datatype == db.LIST(model["event"])
+
+    # References to enum types
+    assert dataset_rt.get_property("license") is not None
+    assert dataset_rt.get_property("license").is_reference()
+    assert dataset_rt.get_property("license").datatype == db.LIST("license")
+    assert isinstance(model["license"], db.RecordType)
+    assert model["license"].name == "license"
+    assert dataset_rt.get_property(
+        "license").datatype == db.LIST(model["license"])
+
+    for name in licenses:
+        assert isinstance(model[name], db.Record)
+        assert model[name].name == name
+        assert len(model[name].parents) == 1
+        assert model[name].has_parent(model["license"])
diff --git a/unittests/test_result_table_cfood.py b/unittests/test_result_table_cfood.py
new file mode 100644
index 0000000000000000000000000000000000000000..3341a2394cc9ef15ae172bb8992445d87c60d063
--- /dev/null
+++ b/unittests/test_result_table_cfood.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2018 Research Group Biomedical Physics,
+# Max-Planck-Institute for Dynamics and Self-Organization Göttingen
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+test module for ResultTableCFood
+"""
+
+
+import os
+import re
+import unittest
+
+import caosdb as db
+from caosadvancedtools.scifolder.result_table_cfood import ResultTableCFood
+
+
+class CFoodTest(unittest.TestCase):
+    def test_re(self):
+        self.assertIsNotNone(re.match(ResultTableCFood.table_re, "result_table_Hallo.csv"))
+        self.assertEqual(re.match(ResultTableCFood.table_re, "result_table_Hallo.csv").group("recordtype"),
+                         "Hallo")
+        self.assertIsNotNone(re.match(ResultTableCFood.table_re,
+                                      "result_table_Cool RecordType.csv"))
+        self.assertEqual(re.match(ResultTableCFood.table_re, "result_table_Cool RecordType.csv").group("recordtype"),
+                         "Cool RecordType")
+        self.assertIsNone(re.match(ResultTableCFood.table_re, "result_tableCool RecordType.csv"))
+
+        self.assertIsNotNone(re.match(ResultTableCFood.property_name_re,
+                                      "temperature [C]"))
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [C]").group("pname"),
+                         "temperature")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [C]").group("unit"), "C")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [ C ]").group("unit"), "C")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature").group("pname"), "temperature")
+
+    def test_ident(self):
+        rtc = ResultTableCFood(os.path.join(os.path.dirname(__file__), "test.csv"))
+        rtc.match = re.match(ResultTableCFood.get_re(),
+                             "/ExperimentalData/2010_TestProject/2019-02-03_something/result_table_RT.csv")
+        rtc.create_identifiables()
+        rtc.update_identifiables()
diff --git a/unittests/test_table_importer.py b/unittests/test_table_importer.py
index 9c8a379d8c12def32c04cf82c5e09c0f5f6f175c..70f0f87f8706d72c386b18f54b7a9a10908eb477 100644
--- a/unittests/test_table_importer.py
+++ b/unittests/test_table_importer.py
@@ -23,7 +23,6 @@ import unittest
 from functools import partial
 from tempfile import NamedTemporaryFile
 
-import caosdb as db
 import numpy as np
 import pandas as pd
 import pytest
@@ -81,7 +80,6 @@ class ConverterTest(unittest.TestCase):
             r"\this\computer,\this\computer"),
             ["/this/computer", "/this/computer"])
 
-    @pytest.mark.xfail(reason="To be fixed, see Issue #34")
     def test_datetime(self):
         test_file = os.path.join(os.path.dirname(__file__), "date.xlsx")
         importer = XLSImporter(converters={'d': datetime_converter,
@@ -211,6 +209,20 @@ class XLSImporterTest(TableImporterTest):
         self.assertRaises(DataInconsistencyError, importer.read_xls,
                           tmp.name)
 
+    def test_datatypes(self):
+        """Test datataypes in columns."""
+        importer = XLSImporter(converters={},
+                               obligatory_columns=["float_as_float"],
+                               datatypes={
+                                   "float_as_float": float,
+                                   "int_as_float": float,
+                                   "int_as_int": int,
+        }
+        )
+        df = importer.read_xls(os.path.join(
+            os.path.dirname(__file__), "data", "datatypes.xlsx"))
+        assert np.issubdtype(df.loc[0, "int_as_float"], float)
+
 
 class CSVImporterTest(TableImporterTest):
     def test_full(self):
@@ -241,7 +253,7 @@ class CountQueryNoneConverterTest(BaseMockUpTest):
             '<Query string="count record" results="0">'
             '</Query>'
             '</Response>'
-            )
+        )
 
     def test_check_reference_field(self):
         self.assertRaises(ValueError, check_reference_field, "1232",  "Max")
@@ -256,7 +268,7 @@ class CountQuerySingleConverterTest(BaseMockUpTest):
             '<Query string="count record" results="1">'
             '</Query>'
             '</Response>'
-            )
+        )
 
     def test_check_reference_field(self):
         self.assertEqual(check_reference_field("1232",  "Max"),
diff --git a/unittests/test_parser.py b/unittests/test_yaml_model_parser.py
similarity index 63%
rename from unittests/test_parser.py
rename to unittests/test_yaml_model_parser.py
index 161e2873a9c01f9ce415818116b9e4cf9aeadb5c..a9f072b754618e38237cbf70e74c7944551f1045 100644
--- a/unittests/test_parser.py
+++ b/unittests/test_yaml_model_parser.py
@@ -1,5 +1,7 @@
 import unittest
+from datetime import date
 from tempfile import NamedTemporaryFile
+from pytest import raises
 
 import caosdb as db
 from caosadvancedtools.models.parser import (TwiceDefinedException,
@@ -15,6 +17,8 @@ def to_file(string):
 
     return f.name
 
+# TODO: check purpose of this function... add documentation
+
 
 def parse_str(string):
     parse_model_from_yaml(to_file(string))
@@ -68,7 +72,8 @@ RT2:
         a:
 """
 
-        self.assertRaises(TwiceDefinedException, lambda: parse_model_from_yaml(to_file(string)))
+        self.assertRaises(TwiceDefinedException,
+                          lambda: parse_model_from_yaml(to_file(string)))
 
     def test_typical_case(self):
         string = """
@@ -103,7 +108,8 @@ RT5:
 - RT1:
 - RT2:
 """
-        self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string)))
+        self.assertRaises(
+            ValueError, lambda: parse_model_from_yaml(to_file(string)))
 
     def test_unknown_kwarg(self):
         string = """
@@ -111,7 +117,8 @@ RT1:
   datetime:
     p1:
 """
-        self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string)))
+        self.assertRaises(
+            ValueError, lambda: parse_model_from_yaml(to_file(string)))
 
     def test_definition_in_inheritance(self):
         string = """
@@ -121,7 +128,8 @@ RT2:
   - RT1:
     description: "tach"
 """
-        self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string)))
+        self.assertRaises(
+            ValueError, lambda: parse_model_from_yaml(to_file(string)))
 
     def test_inheritance(self):
         string = """
@@ -283,12 +291,12 @@ A:
 """
         model = parse_model_from_string(modeldef)
         self.assertEqual(len(model), 2)
-        for key in model.keys():
+        for key, value in model.items():
             if key == "A":
-                self.assertTrue(isinstance(model[key], db.RecordType))
+                self.assertTrue(isinstance(value, db.RecordType))
             elif key == "ref":
-                self.assertTrue(isinstance(model[key], db.Property))
-                self.assertEqual(model[key].datatype, "LIST<A>")
+                self.assertTrue(isinstance(value, db.Property))
+                self.assertEqual(value.datatype, "LIST<A>")
 
 
 class ExternTest(unittest.TestCase):
@@ -301,6 +309,8 @@ class ExternTest(unittest.TestCase):
 class ErrorMessageTest(unittest.TestCase):
     """Tests for understandable error messages."""
 
+    # Note: This was changed with implementation of role keyword
+    @unittest.expectedFailure
     def test_non_dict(self):
         """When a value is given, where a list or mapping is expected."""
         recordtype_value = """
@@ -327,4 +337,140 @@ A:
             # parse_str(string)
             with self.assertRaises(YamlDefinitionError) as yde:
                 parse_str(string)
-            assert("line {}".format(line) in yde.exception.args[0])
+            assert "line {}".format(line) in yde.exception.args[0]
+
+
+def test_define_role():
+    model = """
+A:
+  role: Record
+"""
+    entities = parse_model_from_string(model)
+    assert "A" in entities
+    assert isinstance(entities["A"], db.Record)
+    assert entities["A"].role == "Record"
+
+    model = """
+A:
+  role: Record
+  inherit_from_obligatory:
+  - C
+  obligatory_properties:
+    b:
+b:
+  datatype: INTEGER
+C:
+  obligatory_properties:
+    b:
+D:
+  role: RecordType
+"""
+    entities = parse_model_from_string(model)
+    for name, ent in (("A", "Record"), ("b", "Property"),
+                      ("C", "RecordType"), ("D", "RecordType")):
+        assert name in entities
+        assert isinstance(entities[name], getattr(db, ent))
+        assert entities[name].role == ent
+
+    assert entities["A"].parents[0].name == "C"
+    assert entities["A"].name == "A"
+
+    assert entities["A"].properties[0].name == "b"
+    assert entities["A"].properties[0].value is None
+
+    assert entities["C"].properties[0].name == "b"
+    assert entities["C"].properties[0].value is None
+
+    model = """
+A:
+  role: Record
+  obligatory_properties:
+    b: 42
+b:
+  datatype: INTEGER
+"""
+
+    entities = parse_model_from_string(model)
+    assert entities["A"].get_property("b").value == 42
+    assert entities["b"].value is None
+
+    model = """
+b:
+  datatype: INTEGER
+  value: 18
+"""
+    entities = parse_model_from_string(model)
+    assert entities["b"].value == 18
+
+
+def test_issue_72():
+    """Tests for
+    https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/72
+
+    In some cases, faulty values would be read in for properties without a
+    specified value.
+
+    """
+    model = """
+Experiment:
+  obligatory_properties:
+    date:
+      datatype: DATETIME
+      description: 'date of the experiment'
+    identifier:
+      datatype: TEXT
+      description: 'identifier of the experiment'
+    temperature:
+      datatype: DOUBLE
+      description: 'temp'
+TestExperiment:
+  role: Record
+  inherit_from_obligatory:
+    - Experiment
+  obligatory_properties:
+    date: 2022-03-02
+    identifier: Test
+    temperature: 23
+  recommended_properties:
+    additional_prop:
+      datatype: INTEGER
+      value: 7
+"""
+    entities = parse_model_from_string(model)
+    assert "Experiment" in entities
+    assert "date" in entities
+    assert "identifier" in entities
+    assert "temperature" in entities
+    assert "TestExperiment" in entities
+    assert "additional_prop" in entities
+    assert isinstance(entities["Experiment"], db.RecordType)
+
+    assert entities["Experiment"].get_property("date") is not None
+    # No value is set, so this has to be None
+    assert entities["Experiment"].get_property("date").value is None
+
+    assert entities["Experiment"].get_property("identifier") is not None
+    assert entities["Experiment"].get_property("identifier").value is None
+
+    assert entities["Experiment"].get_property("temperature") is not None
+    assert entities["Experiment"].get_property("temperature").value is None
+
+    test_rec = entities["TestExperiment"]
+    assert isinstance(test_rec, db.Record)
+    assert test_rec.get_property("date").value == date(2022, 3, 2)
+    assert test_rec.get_property("identifier").value == "Test"
+    assert test_rec.get_property("temperature").value == 23
+    assert test_rec.get_property("additional_prop").value == 7
+
+
+def test_file_role():
+    """Not implemented for now, see
+    https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/74.
+
+    """
+    model = """
+F:
+  role: File
+"""
+    with raises(NotImplementedError):
+        entities = parse_model_from_string(model)