diff --git a/.docker/Dockerfile b/.docker/Dockerfile
index 43e5eff1171da8d69eb8897bea678bf90572570a..7fa7fc1e3ba287611b84d22cd969ef50655f8a8f 100644
--- a/.docker/Dockerfile
+++ b/.docker/Dockerfile
@@ -1,4 +1,4 @@
-FROM debian:10
+FROM debian:11
 RUN apt-get update && \
     apt-get install \
     curl \
@@ -13,7 +13,7 @@ RUN apt-get update && \
     tox \
     git \
     openjdk-11-jdk-headless \
-    python-autopep8 \
+    python3-autopep8 \
     python3-pytest \
     libxml2 \
     -y
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index c9cd5b631cea84f44c5296edf4b789d83982d074..ea5eb78bd8323b1dd7199dc5eb91e899b1d98f81 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -46,7 +46,7 @@ test:
   image: $CI_REGISTRY_IMAGE_BASE
   script:
       - if [[ "$CAOSDB_TAG" == "" ]]; then
-            CAOSDB_TAG=dev-latest;
+            CAOSDB_TAG=dev;
         fi
       - echo $CAOSDB_TAG
       - time docker load < /image-cache/caosdb-advanced-testenv.tar || true
@@ -64,6 +64,7 @@ test:
       - rc=`cat .docker/result`  
       - exit $rc
   dependencies: [cert]
+  needs: [cert]
   artifacts:
     paths:
       - caosdb_log.txt
@@ -95,6 +96,7 @@ cert:
   tags: [docker]
   stage: cert
   image: $CI_REGISTRY_IMAGE
+  needs: [build-testenv]
   artifacts:
     paths:
       - .docker/cert/
@@ -106,28 +108,28 @@ style:
   tags: [docker]
   stage: style
   image: $CI_REGISTRY_IMAGE
+  needs: []
   script:
-      - autopep8 -ar --diff --exit-code --exclude swagger_client .
+      - make style
   allow_failure: true
 
 unittest:
   tags: [docker]
   stage: unittest
   image: $CI_REGISTRY_IMAGE
+  needs: []
   script:
       - tox
 
 # Build the sphinx documentation and make it ready for deployment by Gitlab Pages
-# documentation:
-#   stage: deploy
-
 # Special job for serving a static website. See https://docs.gitlab.com/ee/ci/yaml/README.html#pages
-pages:
-  stage: deploy
-  image: $CI_REGISTRY_IMAGE
+pages_prepare: &pages_prepare
   tags: [docker]
+  image: $CI_REGISTRY_IMAGE
+  stage: deploy
   only:
-    - dev
+    refs:
+      - /^release-.*$/
   script:
     - echo "Deploying"
     - make doc
@@ -135,3 +137,9 @@ pages:
   artifacts:
     paths:
       - public
+pages:
+  <<: *pages_prepare
+  only:
+    refs:
+      # version tags: v0.1.1
+      - /^v(\d+\.\d+\.\d+)$/
diff --git a/.gitlab/merge_request_templates/Default.md b/.gitlab/merge_request_templates/Default.md
deleted file mode 100644
index 77a95da1cc40c815e4952a1283d345af56e80461..0000000000000000000000000000000000000000
--- a/.gitlab/merge_request_templates/Default.md
+++ /dev/null
@@ -1,49 +0,0 @@
-# Summary
-
-    Insert a meaningful description for this merge request here.  What is the
-    new/changed behavior? Which bug has been fixed? Are there related Issues?
-
-# Focus
-
-    Point the reviewer to the core of the code change. Where should they start
-    reading? What should they focus on (e.g. security, performance,
-    maintainability, user-friendliness, compliance with the specs, finding more
-    corner cases, concrete questions)?
-
-# Test Environment
-
-    How to set up a test environment for manual testing?
-
-# Check List for the Author
-
-Please, prepare your MR for a review. Be sure to write a summary and a
-focus and create gitlab comments for the reviewer. They should guide the
-reviewer through the changes, explain your changes and also point out open
-questions. For further good practices have a look at [our review
-guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md)
-
-- [ ] All automated tests pass
-- [ ] Reference related Issues
-- [ ] Up-to-date CHANGELOG.md
-- [ ] Annotations in code (Gitlab comments)
-  - Intent of new code
-  - Problems with old code
-  - Why this implementation?
-
-
-# Check List for the Reviewer
-
-
-- [ ] I understand the intent of this MR
-- [ ] All automated tests pass
-- [ ] Up-to-date CHANGELOG.md
-- [ ] The test environment setup works and the intended behavior is
-  reproducible in the test environment
-- [ ] In-code documentation and comments are up-to-date.
-- [ ] Check: Are there spezifications? Are they satisfied?
-
-For further good practices have a look at [our review guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md).
-
-
-/assign me
-/target_branch dev
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e0ec0b6b630e0310ceb81b955ceed7f0089a0e6e..a6b2de738c79b3ad38c6bf77a2abb3611a6511eb 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -8,10 +8,38 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 
 ### Added ###
 
+- CFood that creates a Record for each line in a csv file
+- `generic_analysis.py` allows to easily call scripts to perform analyses in
+  server side scripting [EXPERIMENTAL]
+
+### Changed ###
+
+- `TableConverter` now converts int to float and vice versa to match the desired dtype.
+
+### Deprecated ###
+
+### Removed ###
+
+### Fixed ###
+
+### Security ###
+
+## [0.3.1] - 2021-12-06  ##
+
+### Added ###
+- `check_reference_field` function to check whether entities with provided ids
+  exits (for example when importing data from a table)
+- added the `datatypes` argument to `TableImporter` for columns that do not 
+  need a special conversion function
+
+## [0.3.0] - 2021-11-02 ##
+
+### Added ###
+
 - Error handling for missing files when using the crawler
 - included the scifolder module
 - included the caosmodels module
-* `send_mail` function in `caosadvancedtools.serverside.helper` module
+- `send_mail` function in `caosadvancedtools.serverside.helper` module
 - New class to collect possible problems with the data model
 - New class for checking and importing tables
 - Function to get a file path to a shared resource directory
@@ -29,6 +57,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 - Proof-of-concept integration with Bloxberg.
 - Introduce a cfood that can create a Record structure based on the contents of a hdf5 file
   h5py is now an optional dependency
+- table importer implementations for csv and tsv
+- string-in-list check for table imports
+- AbtractCFood has new property, `update_flags`.
 
 ### Changed ###
 
@@ -36,9 +67,9 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
   allows them to have dependencies among each other if they are ordered 
   correctly
 - identifiables must have at least one property or a name
-* `caosadvancedtools.serverside.helper.init_data_model` also checks the role
+- `caosadvancedtools.serverside.helper.init_data_model` also checks the role
   and data type of entities.
-* The `caosadvancedtools.table_importer.date_converter` now actually returns
+- The `caosadvancedtools.table_importer.date_converter` now actually returns
   `datetime.date` instance. A new
   `caosadvancedtools.table_importer.datetime_converter` replaces the old
   `date_converter` and returns a `datetime.datetime` instance.
@@ -54,23 +85,18 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 - `caosadvancedtools.cfood.assure_object_is_in_list` conducts in-place
   updates if no `to_be_updated` object is supplied.
 
-### Deprecated ###
-
-### Removed ###
-
 ### Fixed ###
 - An exception in collect_information does no longer lead to a break down.
 - Removed dependency on discontiued xlrd version
-* Fixed an issue where `caosadvancedtools.cache.UpdateCache` would
+- Fixed an issue where `caosadvancedtools.cache.UpdateCache` would
   cause an `sqlite3.IntegrityError` if more than one change was cached
   for the same entity.
-* #40 Insertion of identifiables with missing obligatory properties
+- #40 Insertion of identifiables with missing obligatory properties
 - Before, a Property with the datatype "LIST(TEXT)" would lead to the creation 
   of a RecordType. This is fixed now.
-* #52 `XLSimporter.read_xls` throwed a wrong error when reading from a file with a wrong ending. 
+- #52 `XLSimporter.read_xls` throwed a wrong error when reading from a file with a wrong ending. 
   Now, a `DataInconsistencyError` is raised instead of a ValueError.
-
-### Security ###
+- List properties are no longer updated unnecessarily by the crawler.
 
 ## [0.2.0] - 2020-05-28 ##
 
diff --git a/Makefile b/Makefile
index 7609444bd4fd3a8ce980eca0bc3993b3cf2e168f..52ac04456cf59a24334003d4a0af9055dd3b11ec 100644
--- a/Makefile
+++ b/Makefile
@@ -34,3 +34,7 @@ install:
 
 unittest:
 	pytest-3 unittests
+
+style:
+	autopep8 -ar --diff --exit-code --exclude swagger_client .
+.PHONY: style
diff --git a/README.md b/README.md
index d4b0811f18eaea1e5bb6900744a1f9b72c61240e..83a767476286acba98d113b8fa7ab6b482751230 100644
--- a/README.md
+++ b/README.md
@@ -13,32 +13,38 @@ typically be used by a data curator.
 Please read the [README_SETUP.md](README_SETUP.md) for instructions on how to
 setup this code.
 
-
 ## Further Reading
 
-Please refer to the [official documentation](https://docs.indiscale.com/caosdb-advanced-user-tools/) for more information.
+Please refer to the [official
+documentation](https://docs.indiscale.com/caosdb-advanced-user-tools/) for more
+information.
 
 ## Contributing
 
-Thank you very much to all contributers—[past, present](https://gitlab.com/caosdb/caosdb/-/blob/dev/HUMANS.md), and prospective ones.
+Thank you very much to all contributers—[past,
+present](https://gitlab.com/caosdb/caosdb/-/blob/dev/HUMANS.md), and prospective
+ones.
 
 ### Code of Conduct
 
-By participating, you are expected to uphold our [Code of Conduct](https://gitlab.com/caosdb/caosdb/-/blob/dev/CODE_OF_CONDUCT.md).
+By participating, you are expected to uphold our [Code of
+Conduct](https://gitlab.com/caosdb/caosdb/-/blob/dev/CODE_OF_CONDUCT.md).
 
 ### How to Contribute
 
-* You found a bug, have a question, or want to request a feature? Please 
-[create an issue](https://gitlab.com/caosdb/caosdb-advanced-user-tools/-/issues).
-* You want to contribute code? Please fork the repository and create a merge 
-request in GitLab and choose this repository as target. Make sure to select
-"Allow commits from members who can merge the target branch" under Contribution
-when creating the merge request. This allows our team to work with you on your request.
-- If you have a suggestion for the [documentation](https://docs.indiscale.com/caosdb-advanced-user-tools/), 
-the preferred way is also a merge request as describe above (the documentation resides in `src/doc`).
-However, you can also create an issue for it. 
-- You can also contact us at **info (AT) caosdb.de** and join the
-  CaosDB community on
+- You found a bug, have a question, or want to request a feature? Please [create
+  an issue](https://gitlab.com/caosdb/caosdb-advanced-user-tools/-/issues).
+- You want to contribute code? Please fork the repository and create a merge
+  request in GitLab and choose this repository as target. Make sure to select
+  "Allow commits from members who can merge the target branch" under
+  Contribution when creating the merge request. This allows our team to work
+  with you on your request.
+- If you have a suggestion for the
+  [documentation](https://docs.indiscale.com/caosdb-advanced-user-tools/), the
+  preferred way is also a merge request as describe above (the documentation
+  resides in `src/doc`). However, you can also create an issue for it.
+- You can also contact us at **info (AT) caosdb.de** and join the CaosDB
+  community on
   [#caosdb:matrix.org](https://matrix.to/#/!unwwlTfOznjEnMMXxf:matrix.org).
 
 ## License
diff --git a/README_SETUP.md b/README_SETUP.md
index e5ebd969462f7d2c28a329e2c6b6e1bab1252775..d33316612c1d2870c3b2c416e842df4220ecf858 100644
--- a/README_SETUP.md
+++ b/README_SETUP.md
@@ -8,14 +8,16 @@ git clone 'https://gitlab.com/caosdb/caosdb-advanced-user-tools'
 ```
 
 ## Dependencies
-Dependencies will be installed automatically if you use the below described procedure.
-- `caosdb>=0.4.0`                                      
+Dependencies will be installed automatically if you use the below described
+procedure.
+- `caosdb>=0.6.0`
 - `openpyxl>=3.0.0`
 - `xlrd>=1.2.0`
--  `pandas>=1.2.0`
+- `pandas>=1.2.0`
 - `numpy>=1.17.3`
 
-If you want to use the optional h5-crawler the following dependencies will be installed additionally:
+If you want to use the optional h5-crawler the following dependencies will be
+installed additionally:
 - `h5py>=3.3.0`
 
 For testing:
@@ -35,19 +37,19 @@ Optional h5-crawler:
 ## Run Integration Tests Locally
 
 1. Change directory to `integrationtests/`.
-2. Mount `extroot` to the folder that will be used as
-   extroot. E.g. `sudo mount -o bind extroot
-   ../../caosdb-deploy/profiles/empty/paths/extroot` (or whatever path
-   the extroot of the empty profile to be used is located at).
-3. Start (or restart) an empty (!) CaosDB instance (with the mounted extroot).
-   The database will be cleared during testing, so it's important to use
-   an empty instance.
-   Make sure your configuration for the python caosdb module is correct and
-   allows to connect to the server.
-4. Run `test.sh`.  Note that this may modify content of the `integrationtest/extroot/` directory.
+2. Mount `extroot` to the folder that will be used as extroot. E.g. `sudo mount
+   -o bind extroot ../../caosdb-deploy/profiles/empty/paths/extroot` (or
+   whatever path the extroot of the empty profile to be used is located at).
+3. Start (or restart) an empty (!) CaosDB instance (with the mounted
+   extroot). The database will be cleared during testing, so it's important to
+   use an empty instance. Make sure your configuration for the python caosdb
+   module is correct and allows to connect to the server.
+4. Run `test.sh`. Note that this may modify content of the
+   `integrationtest/extroot/` directory.
 
 ## Code Formatting
-`autopep8 -i -r ./`
+
+`make style`
 
 ## Documentation #
 
diff --git a/RELEASE_GUIDELINES.md b/RELEASE_GUIDELINES.md
new file mode 100644
index 0000000000000000000000000000000000000000..e71234b8e2bc95f954ffbebdc26acf6edd8e0b2d
--- /dev/null
+++ b/RELEASE_GUIDELINES.md
@@ -0,0 +1,43 @@
+# Release Guidelines for the CaosDB Python Client Library
+
+This document specifies release guidelines in addition to the general release
+guidelines of the CaosDB Project
+([RELEASE_GUIDELINES.md](https://gitlab.com/caosdb/caosdb/blob/dev/RELEASE_GUIDELINES.md))
+
+## General Prerequisites
+
+* All tests are passing.
+* FEATURES.md is up-to-date and a public API is being declared in that document.
+* CHANGELOG.md is up-to-date.
+* DEPENDENCIES.md is up-to-date.
+
+## Steps
+
+1. Create a release branch from the dev branch. This prevents further changes
+   to the code base and a never ending release process. Naming: `release-<VERSION>`
+
+2. Update CHANGELOG.md
+
+3. Check all general prerequisites.
+
+4. Update the version:
+   - `version` variables in `src/doc/conf.py`
+   - Version on [setup.py](./setup.py): Check the `MAJOR`, `MINOR`, `MICRO`, `PRE` variables and set
+     `ISRELEASED` to `True`. Use the possibility to issue pre-release versions for testing.
+
+5. Merge the release branch into the main branch.
+
+6. Tag the latest commit of the main branch with `v<VERSION>`.
+
+7. Delete the release branch.
+
+8. Remove possibly existing `./dist` directory with old release.
+
+9. Publish the release by executing `./release.sh` with uploads the caosdb
+   module to the Python Package Index [pypi.org](https://pypi.org).
+
+10. Merge the main branch back into the dev branch.
+
+11. After the merge of main to dev, start a new development version by
+    setting `ISRELEASED` to `False` and by increasing at least the `MICRO`
+    version in [setup.py](./setup.py) and preparing CHANGELOG.md.
diff --git a/integrationtests/crawl.py b/integrationtests/crawl.py
index 79ed3b5ffe52d276677e2a7914f70923e5c9e70c..defed2cb4f5fb0a0f349898e555c5d25924e2f9b 100755
--- a/integrationtests/crawl.py
+++ b/integrationtests/crawl.py
@@ -34,7 +34,7 @@ from caosadvancedtools.crawler import FileCrawler
 from caosadvancedtools.guard import INSERT, UPDATE
 from caosadvancedtools.scifolder import (AnalysisCFood, ExperimentCFood,
                                          PublicationCFood, SimulationCFood,
-                                         SoftwareCFood)
+                                         SoftwareCFood, ResultTableCFood)
 
 from example_hdf5cfood import ExampleH5CFood
 
@@ -91,6 +91,7 @@ if __name__ == "__main__":
                     interactive=False, hideKnown=False,
                     cfood_types=[ExperimentCFood, AnalysisCFood, SoftwareCFood,
                                  PublicationCFood, SimulationCFood,
+                                 ResultTableCFood,
                                  ExampleH5CFood
                                  ])
 
diff --git a/integrationtests/create_analysis.py b/integrationtests/create_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b7aa0d2d6671f14a3c65cf5ed135dfecb0aa69c
--- /dev/null
+++ b/integrationtests/create_analysis.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+A small script that creates an Analysis Record that can be used for testing the
+automated analysis pipeline.
+"""
+
+import sys
+from datetime import datetime
+
+import caosdb as db
+
+
+def main():
+    script = db.File(
+        file="../src/caosadvancedtools/serverside/examples/example_script.py",
+        path=("AutomatedAnalysis/scripts/"
+              + str(datetime.now())+"example_script.py"),
+    )
+    script.insert()
+
+    da = db.Record()
+    da.add_parent("Analysis")
+    da.add_property("scripts", value=[script], datatype=db.LIST(db.FILE))
+    da.add_property("sources",
+                    value=db.execute_query(
+                        "FIND FILE which is stored at '**/timeseries.npy'",
+                        unique=True),
+                    )
+    da.add_property("date", "2020-01-01")
+    da.add_property("identifier", "TEST")
+    only = db.execute_query(
+                        "FIND RECORD Person WITH firstname=Only",
+                        unique=True)
+    only.add_property(db.Property("Email").retrieve().id, "only@example.com")
+    only.update()
+    da.add_property("responsible", only)
+    da.insert()
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/integrationtests/example_script.py b/integrationtests/example_script.py
new file mode 120000
index 0000000000000000000000000000000000000000..f6e9b498ff97638cb4105e019424c0c677a7f414
--- /dev/null
+++ b/integrationtests/example_script.py
@@ -0,0 +1 @@
+../src/caosadvancedtools/serverside/examples/example_script.py
\ No newline at end of file
diff --git a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
+++ b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..09157f2c0961d412efea36ea0e56db5aac03fd36 100644
Binary files a/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf and b/integrationtests/extroot/DataAnalysis/2010_TestProject/2019-02-03/results.pdf differ
diff --git a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..e29553fe01c8706e15a042e5ac6f85ed1a2cc8ce 100644
--- a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
+++ b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/datafile.dat
@@ -0,0 +1 @@
+datadatadata
diff --git a/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv
new file mode 100644
index 0000000000000000000000000000000000000000..a29679afce78089f3cdd4e5e388262456668cd90
--- /dev/null
+++ b/integrationtests/extroot/ExperimentalData/2010_TestProject/2019-02-03/result_table_DepthTest.csv
@@ -0,0 +1,3 @@
+temperature [°C] ,depth 
+234.4,3.0
+344.6,5.1
diff --git a/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf b/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..09157f2c0961d412efea36ea0e56db5aac03fd36 100644
Binary files a/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf and b/integrationtests/extroot/Publications/Posters/2019-02-03_really_cool_finding/poster.pdf differ
diff --git a/integrationtests/extroot/README.md b/integrationtests/extroot/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..4d45129ba23fffc825b2631e0eaa39f3d048427d
--- /dev/null
+++ b/integrationtests/extroot/README.md
@@ -0,0 +1,3 @@
+This directory is mounted into the LinkAhead docker container, to allow the
+inclusion of external file systems.  For production use, please set the
+`paths:extroot` option in the profile.
diff --git a/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy b/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..18da9b18cda23d411d0f2666629377dd7991ac8f 100644
Binary files a/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy and b/integrationtests/extroot/SimulationData/2010_TestProject/2019-02-03_something/timeseries.npy differ
diff --git a/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py b/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
+++ b/integrationtests/extroot/Software/2010_TestSoftware/2019-02-03_v0.1/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py b/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
+++ b/integrationtests/extroot/Software/2020NewProject0X/2020-02-03/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py b/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2c99b82a33e496eb31cf7fdc354767fe31919033 100644
--- a/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
+++ b/integrationtests/extroot/Software/2020NewProject0X/2020-02-04/plot.py
@@ -0,0 +1 @@
+import plot
diff --git a/integrationtests/insert_model.py b/integrationtests/insert_model.py
index ae3dd7701b44f5008bd976d81f8ecc8d9a02bf89..26bf478cdf0d3709e7c0c086fecf722b8c7f90fa 100755
--- a/integrationtests/insert_model.py
+++ b/integrationtests/insert_model.py
@@ -5,18 +5,29 @@ from caosadvancedtools.cfoods.h5 import H5CFood
 from caosadvancedtools.models.data_model import DataModel
 from caosadvancedtools.models.parser import parse_model_from_yaml
 
-model = parse_model_from_yaml("model.yml")
-model.sync_data_model(noquestion=True)
-
-if len(db.execute_query("FIND Property alias")) == 0:
-    al = db.Property(name="alias")
-    al.add_parent(name="name")
-    al.insert()
-
-h5model = db.Container()
-h5file = h5py.File('extroot/ExperimentalData/2010_TestProject/2019-02-03/hdf5_dummy_file.hdf5', 'r')
-H5CFood.create_structure(h5file, create_recordTypes=True, collection=h5model,
-                         root_name="ExampleH5")
-print(h5model)
-h5model = DataModel(h5model)
-h5model.sync_data_model(noquestion=True)
+
+def main():
+
+    # for testing existing data model parts with the YAML Interface
+    db.RecordType(name="TestRT1", datatype=db.TEXT).insert()
+    db.Property(name="TestP1", datatype=db.TEXT).insert()
+
+    model = parse_model_from_yaml("model.yml")
+    model.sync_data_model(noquestion=True)
+
+    if len(db.execute_query("FIND Property alias")) == 0:
+        al = db.Property(name="alias")
+        al.add_parent(name="name")
+        al.insert()
+
+    h5model = db.Container()
+    h5file = h5py.File(
+        'extroot/ExperimentalData/2010_TestProject/2019-02-03/hdf5_dummy_file.hdf5', 'r')
+    H5CFood.create_structure(h5file, create_recordTypes=True, collection=h5model,
+                             root_name="ExampleH5")
+    h5model = DataModel(h5model)
+    h5model.sync_data_model(noquestion=True)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/integrationtests/model.yml b/integrationtests/model.yml
index ab302f089de8ddc1782c075ad8ee26ea1259fee2..9f7a62d1d0befbc7225353380c79db2f368c969c 100644
--- a/integrationtests/model.yml
+++ b/integrationtests/model.yml
@@ -9,6 +9,7 @@ Experiment:
   # TODO empty  recommended_properties is a problem
   #recommended_properties:
     responsible:
+      datatype: LIST<Person>
 Project:
 SoftwareVersion:
   recommended_properties:
@@ -18,6 +19,14 @@ SoftwareVersion:
     binaries:
     sourceCode:
     Software:
+DepthTest:
+  obligatory_properties:
+    temperature:
+      datatype: DOUBLE 
+      description: 'temp'
+    depth:
+      datatype: DOUBLE 
+      description: 'temp'
 Person:
   obligatory_properties:
     firstName:
@@ -30,8 +39,6 @@ Person:
     email:
       datatype: TEXT 
       description: 'Email of a Person.'
-responsible:
-  datatype: REFERENCE
 revisionOf:
   datatype: REFERENCE
 results:
@@ -52,6 +59,9 @@ Analysis:
     date:
     identifier:
     responsible:
+  suggested_properties:
+    mean_value:
+      datatype: DOUBLE
 Publication:
 Thesis:
   inherit_from_suggested:
@@ -70,3 +80,6 @@ Report:
   - Publication
 hdf5File:
   datatype: REFERENCE
+extern:
+  - TestRT1
+  - TestP1
diff --git a/integrationtests/test.sh b/integrationtests/test.sh
index 71af543643a35cb082f10a24440c5ea87df946c9..1c0357e265eec770069166e614fc0a3aa6ecc548 100755
--- a/integrationtests/test.sh
+++ b/integrationtests/test.sh
@@ -65,6 +65,12 @@ python3 test_table.py
 # TODO the following test deletes lots of the data inserted by the crawler
 echo "Testing im and export"
 python3 test_im_und_export.py
+
+# automated analysis
+# for some reason the loadFiles of sim data has to be called again
+python3 -m caosadvancedtools.loadFiles /opt/caosdb/mnt/extroot/SimulationData
+python3 create_analysis.py
+
 # Better safe than sorry:
 python3 clear_database.py
 
diff --git a/integrationtests/test_assure_functions.py b/integrationtests/test_assure_functions.py
index 56f9767a0f436201ab6003ffd88f631bdb089544..9f4e387d52f25382d18cfb21372a06346d2b5465 100644
--- a/integrationtests/test_assure_functions.py
+++ b/integrationtests/test_assure_functions.py
@@ -32,26 +32,25 @@ from caosadvancedtools.cfood import (assure_object_is_in_list)
 from caosadvancedtools.guard import (global_guard, RETRIEVE, UPDATE)
 
 
-def setup_module():
+def setup():
     """Delete all test entities."""
     db.execute_query("FIND Test*").delete(raise_exception_on_error=False)
 
 
-def setup():
+def setup_module():
     """Allow all updates and delete test data"""
     global_guard.level = UPDATE
-    setup_module()
+    setup()
 
 
-def teardown():
+def teardown_module():
     """Reset guard level and delete test data."""
     global_guard.level = RETRIEVE
-    setup_module()
+    setup()
 
 
 def test_assure_list_in_place():
     """Test an in-place update with `assure_object_is_in_list`."""
-
     int_list_prop = db.Property(name="TestIntListProperty",
                                 datatype=db.LIST(db.INTEGER)).insert()
     rt1 = db.RecordType(name="TestType1").add_property(
diff --git a/integrationtests/test_base_table_exporter_integration.py b/integrationtests/test_base_table_exporter_integration.py
index 1c9158bd1d9600884571957d4916939f82c1a9ca..9d79e857fe706d78103ade3b92ee38498a2a1607 100644
--- a/integrationtests/test_base_table_exporter_integration.py
+++ b/integrationtests/test_base_table_exporter_integration.py
@@ -23,6 +23,7 @@
 # ** end header
 #
 import caosdb as db
+import pytest
 from caosadvancedtools import table_export as te
 
 
@@ -85,8 +86,11 @@ def setup_module():
         pass
 
 
+@pytest.fixture(autouse=True)
 def setup():
-    """No further setup"""
+    """Same as module setup."""
+    setup_module()
+    yield None
     setup_module()
 
 
diff --git a/integrationtests/test_crawl_with_datamodel_problems.py b/integrationtests/test_crawl_with_datamodel_problems.py
index 2831bb67ec67f8c2b19519ef5457829712669a6e..0c6a145afdab682f82af09a17fb9aa0770769959 100644
--- a/integrationtests/test_crawl_with_datamodel_problems.py
+++ b/integrationtests/test_crawl_with_datamodel_problems.py
@@ -20,10 +20,11 @@
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 #
 # ** end header
-"""Test whether the crawler correctly identifies the data model
-problems caused by a faulty model.
+
+"""Test whether the crawler correctly identifies the data model problems caused by a faulty model.
 
 """
+
 import caosdb as db
 from caosadvancedtools import loadFiles
 from caosadvancedtools.cfood import fileguide
@@ -34,6 +35,8 @@ from caosadvancedtools.models.parser import parse_model_from_yaml
 from caosadvancedtools.scifolder import (AnalysisCFood, ExperimentCFood,
                                          PublicationCFood, SimulationCFood)
 
+from insert_model import main as insert_model
+
 
 def setup_module():
     """Clear problems and remove all entities except for built-in ones."""
@@ -67,8 +70,7 @@ def test_crawler_with_data_model_problems():
                            prefix="", dryrun=False, forceAllowSymlinks=False)
 
     # load and damage the model
-    model = parse_model_from_yaml("model.yml")
-    model.sync_data_model(noquestion=True)
+    insert_model()
     deleted_entities = {"Experiment", "Poster", "results"}
 
     for ent in deleted_entities:
diff --git a/integrationtests/test_crawler_with_cfoods.py b/integrationtests/test_crawler_with_cfoods.py
index 05bb581058a964d76ab78583cc290c348e8c4566..4efef87cef52e4a2a20a615afe210c32f52a276a 100755
--- a/integrationtests/test_crawler_with_cfoods.py
+++ b/integrationtests/test_crawler_with_cfoods.py
@@ -66,6 +66,17 @@ class CrawlerTest(unittest.TestCase):
                          datfile.description)
         assert os.path.basename(datfile.path) == "datafile.dat"
 
+        # There should be two DepthTest Properties
+        depthtests = exp.get_property("DepthTest")
+        assert depthtests is not None
+        assert len(depthtests.value) == 2
+        depthtest = db.Record(id=depthtests.value[0])
+        depthtest.retrieve()
+        assert "DepthTest" in [p.name for p in depthtest.get_parents()]
+        assert 234.4 == depthtest.get_property("temperature").value
+        assert "°C" == depthtest.get_property("temperature").unit
+        assert 3.0 == depthtest.get_property("depth").value
+
         # Should have a responsible person
         self.assertIsNotNone(exp.get_property("responsible"))
         person = db.Record(id=exp.get_property("responsible").value[0])
diff --git a/integrationtests/test_datamodel_problems.py b/integrationtests/test_datamodel_problems.py
index 7d56f4da8eea34604ed1c820e14555f087c353bd..3bca302dd2a337cee7fd023ee6a64c5185bc99f5 100644
--- a/integrationtests/test_datamodel_problems.py
+++ b/integrationtests/test_datamodel_problems.py
@@ -44,12 +44,15 @@ def setup_module():
         print(delete_exc)
 
 
+@pytest.fixture(autouse=True)
 def setup():
-    """No further setup"""
+    """Same as module setup."""
+    setup_module()
+    yield None
     setup_module()
 
 
-def teardown():
+def teardown_module():
     """Clear and delete again."""
     setup_module()
 
diff --git a/integrationtests/test_im_und_export.py b/integrationtests/test_im_und_export.py
index 27995080aa5cbeeb6f562226d4f0c0ca19c64d83..8ea45fd2cebbcb2c3be6c8cb79805204486f7862 100644
--- a/integrationtests/test_im_und_export.py
+++ b/integrationtests/test_im_und_export.py
@@ -12,10 +12,12 @@ if __name__ == "__main__":
     directory = TemporaryDirectory()
     export_related_to(rec.id, directory=directory.name)
     # delete everything
+    print("Clearing database")
     recs = db.execute_query("FIND entity with id>99")
     recs.delete()
     assert 0 == len(db.execute_query("FIND File which is stored at "
                                      "**/poster.pdf"))
+    print("Importing stored elements")
     import_xml(os.path.join(directory.name, "caosdb_data.xml"), interactive=False)
 
     # The following tests the existence of some required entities.
@@ -26,3 +28,4 @@ if __name__ == "__main__":
     db.execute_query("FIND RecordType Person", unique=True)
     db.execute_query("FIND Record Person with firstname=Only", unique=True)
     db.execute_query("FIND File which is stored at **/poster.pdf", unique=True)
+    print("Found all required Records and Files.")
diff --git a/integrationtests/update_analysis.py b/integrationtests/update_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd18ab375437bec02320dcfd269896c2ba7e2bb0
--- /dev/null
+++ b/integrationtests/update_analysis.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""Example update script. An anlysis Record is retrieved and passed to the
+generic run function which then calls the appropriate script based on the
+Record.
+
+The simple query here could be replaced with something that e.g. retrieves all
+entities that where changed within a certain period of time.
+
+"""
+
+import sys
+
+import caosdb as db
+from caosadvancedtools.serverside.generic_analysis import run
+
+
+def main():
+    da = db.execute_query("FIND Analysis with identifier=TEST", unique=True)
+    run(da)
+
+
+if __name__ == "__main__":
+    sys.exit(main())
diff --git a/pytest.ini b/pytest.ini
index 211913fa06d4e0a46c9c9024e147c5313e4746e1..e65efaf9aaf061a8a1ec0040f87d682536fac4c2 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,4 +1,3 @@
 [pytest]
 testpaths = unittests
 addopts = -vv
-python_paths = src
diff --git a/setup.py b/setup.py
index 772866537d02b71adddfab2a351a3e3372b05ab2..411a5c3dcd6ba362e7e7c8e6015e103acdf5bd31 100755
--- a/setup.py
+++ b/setup.py
@@ -46,8 +46,8 @@ from setuptools import find_packages, setup
 ########################################################################
 
 MAJOR = 0
-MINOR = 2
-MICRO = 0
+MINOR = 3
+MICRO = 2
 PRE = ""  # e.g. rc0, alpha.1, 0.beta-23
 ISRELEASED = False
 
@@ -154,10 +154,10 @@ def setup_package():
         long_description_content_type="text/markdown",
         author='Henrik tom Wörden',
         author_email='h.tomwoerden@indiscale.com',
-        install_requires=["caosdb>=0.4.0",
+        install_requires=["caosdb>=0.7.0",
+                          "numpy>=1.17.3",
                           "openpyxl>=3.0.0",
                           "pandas>=1.2.0",
-                          "numpy>=1.17.3",
                           "xlrd>=2.0",
                           ],
         extras_require={"h5-crawler": ["h5py>=3.3.0", ],
diff --git a/src/caosadvancedtools/cfood.py b/src/caosadvancedtools/cfood.py
index 48b423e01894220d2bd31dab5784932d601f9f62..3c2d5408ef4d857f62ce4e908f90c4ffccef4d19 100644
--- a/src/caosadvancedtools/cfood.py
+++ b/src/caosadvancedtools/cfood.py
@@ -47,6 +47,7 @@ from abc import ABCMeta, abstractmethod
 from datetime import datetime
 
 import caosdb as db
+from caosdb.common.models import Entity
 from caosdb.exceptions import (BadQueryError, EmptyUniqueQueryError,
                                QueryNotUniqueError, TransactionError)
 
@@ -169,6 +170,7 @@ class AbstractCFood(object, metaclass=ABCMeta):
         self.identifiables = db.Container()
         self.item = item
         self.attached_items = []
+        self.update_flags = {}
 
     @abstractmethod
     def create_identifiables(self):
@@ -389,7 +391,7 @@ class AbstractFileCFood(AbstractCFood):
 def assure_object_is_in_list(obj, containing_object, property_name,
                              to_be_updated=None, datatype=None):
     """Checks whether `obj` is one of the values in the list property
-    `property_name` of the supplied entity  containing_object`.
+    `property_name` of the supplied entity `containing_object`.
 
     If this is the case this function returns. Otherwise the entity is
     added to the property `property_name` and the entity
@@ -662,12 +664,18 @@ def assure_has_property(entity, name, value, to_be_updated=None,
     if isinstance(value, db.Entity):
         value = value.id
 
+    if isinstance(value, list):
+        value = [i.id if isinstance(i, db.Entity) else i for i in value]
+
     for el in possible_properties:
         tmp_value = el.value
 
         if isinstance(tmp_value, db.Entity):
             tmp_value = el.value.id
 
+        if isinstance(tmp_value, list):
+            tmp_value = [i.id if isinstance(i, db.Entity) else i for i in tmp_value]
+
         if tmp_value == value:
             contained = True
 
diff --git a/src/caosadvancedtools/cfoods/h5.py b/src/caosadvancedtools/cfoods/h5.py
index 9defe77115db7687d3a6c5f27bf7f3d268e605fc..6c68edd3668fec957126aa3234a830aab98fcd25 100644
--- a/src/caosadvancedtools/cfoods/h5.py
+++ b/src/caosadvancedtools/cfoods/h5.py
@@ -6,7 +6,7 @@
 # Copyright (C) 2020 Daniel Hornung <d.hornung@indiscale.com>
 # Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
 # Copyright (C) 2021 Alexander Kreft
-# Copyright (C) 2021 Laboratory for Fluid Physics and Biocomplexity, 
+# Copyright (C) 2021 Laboratory for Fluid Physics and Biocomplexity,
 # Max-Planck-Insitute für Dynamik und Selbstorganisation <www.lfpn.ds.mpg.de>
 #
 # This program is free software: you can redistribute it and/or modify
diff --git a/src/caosadvancedtools/crawler.py b/src/caosadvancedtools/crawler.py
index 5a8d428655791169557f5c292d30698f6ad69798..87b91a52a6034e906766a56ded787416e5c0027d 100644
--- a/src/caosadvancedtools/crawler.py
+++ b/src/caosadvancedtools/crawler.py
@@ -66,6 +66,82 @@ def separated(text):
     return "-"*60 + "\n" + text
 
 
+def apply_list_of_updates(to_be_updated, update_flags={},
+                          update_cache=None, run_id=None):
+    """Updates the `to_be_updated` Container, i.e., pushes the changes to CaosDB
+    after removing possible duplicates. If a chace is provided, uauthorized
+    updates can be cached for further authorization.
+
+    Parameters:
+    -----------
+    to_be_updated : db.Container
+        Container with the entities that will be updated.
+    update_flags : dict, optional
+        Dictionary of CaosDB server flags that will be used for the
+        update. Default is an empty dict.
+    update_cache : UpdateCache or None, optional
+        Cache in which the intended updates will be stored so they can be
+        authorized afterwards. Default is None.
+    run_id : String or None, optional
+        Id with which the pending updates are cached. Only meaningful if
+        `update_cache` is provided. Default is None.
+    """
+
+    if len(to_be_updated) == 0:
+        return
+
+    get_ids_for_entities_with_names(to_be_updated)
+
+    # remove duplicates
+    tmp = db.Container()
+
+    for el in to_be_updated:
+        if el not in tmp:
+            tmp.append(el)
+
+    to_be_updated = tmp
+
+    info = "UPDATE: updating the following entities\n"
+
+    baseurl = db.configuration.get_config()["Connection"]["url"]
+
+    def make_clickable(txt, id):
+        return "<a href='{}/Entity/{}'>{}</a>".format(baseurl, id, txt)
+
+    for el in to_be_updated:
+        info += str("\t" + make_clickable(el.name, el.id)
+                    if el.name is not None
+                    else "\t" + make_clickable(str(el.id), el.id))
+        info += "\n"
+    logger.info(info)
+
+    logger.debug(to_be_updated)
+    try:
+        if len(to_be_updated) > 0:
+            logger.info(
+                "Updating {} Records...".format(
+                    len(to_be_updated)))
+        guard.safe_update(to_be_updated, unique=False,
+                          flags=update_flags)
+    except FileNotFoundError as e:
+        logger.info("Cannot access {}. However, it might be needed for"
+                    " the correct execution".format(e.filename))
+    except ProhibitedException:
+        try:
+            update_cache.insert(to_be_updated, run_id)
+        except IntegrityError as e:
+            logger.warning(
+                "There were problems with the update of {}.".format(
+                    to_be_updated),
+                extra={"identifier": str(to_be_updated),
+                       "category": "update-cache"}
+            )
+            logger.debug(traceback.format_exc())
+            logger.debug(e)
+    except Exception as e:
+        DataModelProblems.evaluate_exception(e)
+
+
 class Crawler(object):
     def __init__(self, cfood_types, use_cache=False,
                  abort_on_exception=True, interactive=True, hideKnown=False,
@@ -203,6 +279,8 @@ class Crawler(object):
                     except DataInconsistencyError as e:
                         logger.debug(traceback.format_exc())
                         logger.debug(e)
+                        # TODO: Generally: in which cases should exceptions be raised? When is
+                        # errors_occured set to True? The expected behavior must be documented.
                     except Exception as e:
                         try:
                             DataModelProblems.evaluate_exception(e)
@@ -318,7 +396,11 @@ class Crawler(object):
                 self._cached_find_or_insert_identifiables(cfood.identifiables)
 
                 cfood.update_identifiables()
-                self.push_identifiables_to_CaosDB(cfood)
+                apply_list_of_updates(
+                    cfood.to_be_updated,
+                    cfood.update_flags,
+                    update_cache=self.update_cache,
+                    run_id=self.run_id)
             except FileNotFoundError as e:
                 logger.info("Cannot access {}. However, it might be needed for"
                             " the correct execution".format(e.filename))
@@ -516,59 +598,8 @@ carefully and if the changes are ok, click on the following link:
             subject="Crawler Update",
             body=text)
 
-    def push_identifiables_to_CaosDB(self, cfood):
-        """
-        Updates the to_be_updated Container, i.e. pushes the changes to CaosDB
-        """
-
-        if len(cfood.to_be_updated) == 0:
-            return
-
-        get_ids_for_entities_with_names(cfood.to_be_updated)
-
-        # remove duplicates
-        tmp = db.Container()
-
-        for el in cfood.to_be_updated:
-            if el not in tmp:
-                tmp.append(el)
-
-        cfood.to_be_updated = tmp
-
-        info = "UPDATE: updating the following entities\n"
-
-        for el in cfood.to_be_updated:
-            info += str("\t" + el.name if el.name is not None else "\t" +
-                        str(el.id))
-            info += "\n"
-        logger.info(info)
-
-        logger.debug(cfood.to_be_updated)
-        try:
-            if len(cfood.to_be_updated) > 0:
-                logger.info(
-                    "Updating {} Records...".format(
-                        len(cfood.to_be_updated)))
-            guard.safe_update(cfood.to_be_updated, unique=False)
-        except FileNotFoundError as e:
-            logger.info("Cannot access {}. However, it might be needed for"
-                        " the correct execution".format(e.filename))
-        except ProhibitedException:
-            try:
-                self.update_cache.insert(cfood.to_be_updated, self.run_id)
-            except IntegrityError as e:
-                logger.warning(
-                    "There were problems with the update of {}.".format(
-                        cfood.to_be_updated),
-                    extra={"identifier": str(cfood.to_be_updated),
-                           "category": "update-cache"}
-                )
-                logger.debug(traceback.format_exc())
-                logger.debug(e)
-        except Exception as e:
-            DataModelProblems.evaluate_exception(e)
-
     # TODO remove static?
+
     @staticmethod
     def find_or_insert_identifiables(identifiables):
         """ Sets the ids of identifiables (that do not have already an id from the
diff --git a/src/caosadvancedtools/models/data_model.py b/src/caosadvancedtools/models/data_model.py
index a4804dd0fb0300af9b166717f41f341a57b677d4..e198d15ca2c56eceec29c356cabdf28ac44895b2 100644
--- a/src/caosadvancedtools/models/data_model.py
+++ b/src/caosadvancedtools/models/data_model.py
@@ -23,6 +23,11 @@
 # ** end header
 #
 from copy import deepcopy
+# TODO(fspreck) for backwards compatibility with Python < 3.9 but this is
+# actually
+# [deprecated](https://docs.python.org/3/library/typing.html#typing.List), so
+# remove this, when we drop support for old Python versions.
+from typing import List
 
 import caosdb as db
 from caosdb.apiutils import compare_entities, describe_diff
@@ -68,14 +73,14 @@ class DataModel(dict):
         else:
             super().__init__(args)
 
-    def append(self, entity):
+    def append(self, entity: db.Entity):
         self[entity.name] = entity
 
-    def extend(self, entities):
+    def extend(self, entities: List[db.Entity]):
         for entity in entities:
             self.append(entity)
 
-    def sync_data_model(self, noquestion=False):
+    def sync_data_model(self, noquestion: bool = False, verbose: bool = True):
         """Synchronize this DataModel with a CaosDB instance.
 
         Updates existing entities from the CaosDB instance and inserts
@@ -100,23 +105,27 @@ class DataModel(dict):
         self.sync_ids_by_name(tmp_exist)
 
         if len(non_existing_entities) > 0:
-            print("New entities:")
+            if verbose:
+                print("New entities:")
 
-            for ent in non_existing_entities:
-                print(ent.name)
+                for ent in non_existing_entities:
+                    print(ent.name)
 
             if noquestion or str(input("Do you really want to insert those "
                                        "entities? [y/N] ")).lower() == "y":
                 non_existing_entities.insert()
                 self.sync_ids_by_name(non_existing_entities)
-                print("Updated entities.")
+                if verbose:
+                    print("Updated entities.")
             else:
                 return
         else:
-            print("No new entities.")
+            if verbose:
+                print("No new entities.")
 
         if len(existing_entities) > 0:
-            print("Inspecting changes that will be made...")
+            if verbose:
+                print("Inspecting changes that will be made...")
             any_change = False
 
             for ent in existing_entities:
@@ -126,18 +135,22 @@ class DataModel(dict):
                                                         ), name=ent.name))
 
                 if diff != "":
-                    print(diff)
+                    if verbose:
+                        print(diff)
                     any_change = True
 
             if any_change:
                 if noquestion or input("Do you really want to apply the above "
                                        "changes? [y/N]") == "y":
                     existing_entities.update()
-                    print("Synchronized existing entities.")
+                    if verbose:
+                        print("Synchronized existing entities.")
             else:
-                print("No differences found. No update")
+                if verbose:
+                    print("No differences found. No update")
         else:
-            print("No existing entities updated.")
+            if verbose:
+                print("No existing entities updated.")
 
     @staticmethod
     def get_existing_entities(entities):
diff --git a/src/caosadvancedtools/models/parser.py b/src/caosadvancedtools/models/parser.py
index 28f8182c0722f8467d1781966d1bbeea406e92ce..d87bc9a4d22231f339e877bd80c63e602cb116ec 100644
--- a/src/caosadvancedtools/models/parser.py
+++ b/src/caosadvancedtools/models/parser.py
@@ -268,9 +268,8 @@ class Parser(object):
                 # is it a property
                 and "datatype" in definition
                 # but not simply an RT of the model
-                and not (definition["datatype"] in self.model
-                         or _get_listdatatype(
-                             definition["datatype"]) in self.model)):
+                and not (_get_listdatatype(definition["datatype"]) == name and
+                         _get_listdatatype(definition["datatype"]) in self.model)):
 
             # and create the new property
             self.model[name] = db.Property(name=name,
diff --git a/src/caosadvancedtools/scifolder/__init__.py b/src/caosadvancedtools/scifolder/__init__.py
index d7d67937b42ca23173fc93d4e704411f33d80bc4..cf753cfc0b72bf95e34edea1301b96ed18f040d0 100644
--- a/src/caosadvancedtools/scifolder/__init__.py
+++ b/src/caosadvancedtools/scifolder/__init__.py
@@ -3,3 +3,4 @@ from .experiment_cfood import ExperimentCFood
 from .publication_cfood import PublicationCFood
 from .simulation_cfood import SimulationCFood
 from .software_cfood import SoftwareCFood
+from .result_table_cfood import ResultTableCFood
diff --git a/src/caosadvancedtools/scifolder/result_table_cfood.py b/src/caosadvancedtools/scifolder/result_table_cfood.py
new file mode 100644
index 0000000000000000000000000000000000000000..deaa2d00118659a9b177a05fe40b19a1793a16fb
--- /dev/null
+++ b/src/caosadvancedtools/scifolder/result_table_cfood.py
@@ -0,0 +1,86 @@
+#!/usr/bin/env python
+# encoding: utf-8
+#
+# Copyright (C) 2019 Henrik tom Wörden
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+import re
+
+import caosdb as db
+import pandas as pd
+from caosadvancedtools.cfood import (AbstractFileCFood, assure_has_description,
+                                     assure_has_parent, assure_has_property,
+                                     assure_object_is_in_list, get_entity)
+from caosadvancedtools.read_md_header import get_header
+
+from ..cfood import assure_property_is, fileguide
+from .experiment_cfood import ExperimentCFood
+from .generic_pattern import date_pattern, date_suffix_pattern, project_pattern
+from .utils import parse_responsibles, reference_records_corresponding_to_files
+from .withreadme import DATAMODEL as dm
+from .withreadme import RESULTS, REVISIONOF, SCRIPTS, WithREADME, get_glob
+
+
+# TODO similarities with TableCrawler
+class ResultTableCFood(AbstractFileCFood):
+
+    # win_paths can be used to define fields that will contain windows style
+    # path instead of the default unix ones. Possible fields are:
+    # ["results", "revisionOf"]
+    win_paths = []
+    table_re = r"result_table_(?P<recordtype>.*).csv$"
+    property_name_re = re.compile(r"^(?P<pname>.+?)\s*(\[\s?(?P<unit>.*?)\s?\] *)?$")
+
+    @staticmethod
+    def name_beautifier(x): return x
+
+    def __init__(self,  *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.table = pd.read_csv(fileguide.access(self.crawled_path))
+
+    @staticmethod
+    def get_re():
+        return (".*/ExperimentalData/"+project_pattern + date_pattern +
+                date_suffix_pattern + ResultTableCFood.table_re)
+
+    def create_identifiables(self):
+        self.recs = []
+        self.experiment, self.project = (
+            ExperimentCFood.create_identifiable_experiment(self.match))
+
+        for idx, row in self.table.iterrows():
+            rec = db.Record()
+            rec.add_parent(self.match.group("recordtype"))
+
+            for col in self.table.columns[:2]:
+                match = re.match(ResultTableCFood.property_name_re, col)
+
+                if match.group("unit"):
+                    rec.add_property(match.group("pname"), row.loc[col], unit=match.group("unit"))
+                else:
+                    rec.add_property(match.group("pname"), row.loc[col])
+            self.identifiables.append(rec)
+            self.recs.append(rec)
+
+        self.identifiables.extend([self.project, self.experiment])
+
+    def update_identifiables(self):
+        for ii, (idx, row) in enumerate(self.table.iterrows()):
+            for col in row.index:
+                match = re.match(ResultTableCFood.property_name_re, col)
+                assure_property_is(self.recs[ii], match.group("pname"), row.loc[col], to_be_updated=self.to_be_updated)
+        assure_property_is(self.experiment, self.match.group("recordtype"),
+                           self.recs, to_be_updated=self.to_be_updated,
+                           datatype=db.LIST(self.match.group("recordtype")))
diff --git a/src/caosadvancedtools/serverside/examples/example_script.py b/src/caosadvancedtools/serverside/examples/example_script.py
new file mode 100755
index 0000000000000000000000000000000000000000..d97d2d0d1f936b1c12e857d38fce043f0b514340
--- /dev/null
+++ b/src/caosadvancedtools/serverside/examples/example_script.py
@@ -0,0 +1,200 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""An example script that illustrates how scripts can be used in conjunction
+with the generic_analysis module.
+
+The data model needed for this script is:
+
+Analysis:
+    sources: REFEERENCE
+    scripts: FILE
+    results: REFEERENCE
+    mean_value: DOUBLE
+
+Person:
+    Email: TEXT
+
+"""
+
+import argparse
+import logging
+import sys
+from argparse import RawTextHelpFormatter
+from datetime import datetime
+
+import caosdb as db
+import matplotlib.pyplot as plt
+import numpy as np
+from caosadvancedtools.cfood import assure_property_is
+from caosadvancedtools.crawler import apply_list_of_updates
+from caosadvancedtools.guard import INSERT, UPDATE
+from caosadvancedtools.guard import global_guard as guard
+from caosadvancedtools.serverside.helper import send_mail as main_send_mail
+
+# logging should be done like this in order to allow the caller script to
+# direct the output.
+logger = logging.getLogger(__name__)
+
+# allow updates of existing entities
+guard.set_level(level=UPDATE)
+
+
+def send_mail(changes: [db.Entity], receipient: str):
+    """ calls sendmail in order to send a mail to the curator about pending
+    changes
+
+    Parameters:
+    -----------
+    changes: The CaosDB entities in the version after the update.
+    receipient: The person who shall receive the mail.
+    """
+
+    caosdb_config = db.configuration.get_config()
+    text = """Dear Curator,
+The following changes where done automatically.
+
+{changes}
+    """.format(changes="\n".join(changes))
+    try:
+        fro = caosdb_config["advancedtools"]["automated_updates.from_mail"]
+    except KeyError:
+        logger.error("Server Configuration is missing a setting for "
+                     "sending mails. The administrator should check "
+                     "'from_mail'.")
+        return
+
+    main_send_mail(
+        from_addr=fro,
+        to=receipient,
+        subject="Automated Update",
+        body=text)
+
+
+def main(args):
+
+    # auth_token is provided by the server side scripting API
+    # use this token for authentication when creating a new connection
+    if hasattr(args, "auth_token") and args.auth_token:
+        db.configure_connection(auth_token=args.auth_token)
+        logger.debug("Established connection")
+
+    try:
+        dataAnalysisRecord = db.Record(id=args.entityid).retrieve()
+    except db.TransactionError:
+        logger.error("Cannot retrieve Record with id ={}".format(
+            args.entityid
+        ))
+
+    # The script may require certain information to exist. Here, we expect that
+    # a sources Property exists that references a numpy file.
+    # Similarly an InputDataSet could be used.
+
+    if (dataAnalysisRecord.get_property("sources") is None
+            or not db.apiutils.is_reference(
+                dataAnalysisRecord.get_property("sources"))):
+
+        raise RuntimeError("sources Refenrence must exist.")
+
+    logger.debug("Found required data.")
+
+    # ####### this core might be replaced by a call to another script ####### #
+    # Download the data
+    source_val = dataAnalysisRecord.get_property("sources").value
+    npobj = db.File(
+        id=(source_val[0]
+            if isinstance(source_val, list)
+            else source_val)).retrieve()
+    npfile = npobj.download()
+    logger.debug("Downloaded data.")
+    data = np.load(npfile)
+
+    # Plot data
+    filename = "hist.png"
+    plt.hist(data)
+    plt.savefig(filename)
+
+    mean = data.mean()
+    # ####################################################################### #
+
+    # Insert the result plot
+    fig = db.File(file=filename,
+                  path="/Analysis/results/"+str(datetime.now())+"/"+filename)
+    fig.insert()
+
+    # Add the mean value to the analysis Record
+    # If such a property existed before, it is changed if necessary. The old
+    # value will persist in the versioning of LinkAhead
+    to_be_updated = db.Container()
+    assure_property_is(
+        dataAnalysisRecord,
+        "mean_value",
+        mean,
+        to_be_updated=to_be_updated
+    )
+
+    # Add the file with the plot to the analysis Record
+    # If a file was already referenced, the new one will be referenced instead.
+    # The old file is being kept and is still referenced in an old version of
+    # the analysis Record.
+    assure_property_is(
+        dataAnalysisRecord,
+        "results",
+        [fig.id],
+        to_be_updated=to_be_updated
+    )
+
+    if len(to_be_updated) > 0:
+        print(to_be_updated)
+        apply_list_of_updates(to_be_updated, update_flags={})
+        logger.debug("Update sucessful.")
+        logger.info("The following Entities were changed:\n{}.".format(
+            [el.id for el in to_be_updated])
+        )
+
+        # Send mails to people that are referenced.
+        people = db.execute_query("FIND RECORD Person WHICH IS REFERENCED BY "
+                                  "{}".format(dataAnalysisRecord.id))
+        for person in people:
+            if person.get_property("Email") is not None:
+                send_mail([str(el) for el in to_be_updated],
+                          receipient=person.get_property("Email").value)
+        logger.debug("Mails send.")
+
+
+def parse_args():
+    parser = argparse.ArgumentParser(description=__doc__,
+                                     formatter_class=RawTextHelpFormatter)
+    parser.add_argument("--auth-token",
+                        help="Token provided by the server for authentication")
+    parser.add_argument("entityid",
+                        help="The ID of the DataAnalysis Record.", type=int)
+
+    return parser.parse_args()
+
+
+if __name__ == "__main__":
+    args = parse_args()
+    sys.exit(main(args))
diff --git a/src/caosadvancedtools/serverside/generic_analysis.py b/src/caosadvancedtools/serverside/generic_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..66bec8a77e55709434b4285699e2cc2f8f804894
--- /dev/null
+++ b/src/caosadvancedtools/serverside/generic_analysis.py
@@ -0,0 +1,214 @@
+# encoding: utf-8
+#
+# Copyright (C) 2021 Alexander Schlemmer <alexander.schlemmer@ds.mpg.de>
+# Copyright (C) 2021 IndiScale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+#
+# See: https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/55
+
+# This source file is work in progress and currently untested.
+
+
+"""
+Variante I: Python module implementiert eine 'main' function, die einen Record
+als Argument entgegennimmt und diesen um z.B. 'results' ergänzt und updated.
+
+Variante II: Ein skript erhält eine ID als Argument (z.B. auf der command line)
+und updated das Objekt selbstständig.
+
+Idealfall: Idempotenz; I.e. es ist egal, ob das Skript schon aufgerufen wurde.
+Ein weiterer Aufruf führt ggf. zu einem Update (aber nur bei Änderungen von
+z.B. Parametern)
+
+Das aufgerufene Skript kann beliebige Eigenschaften benutzen und erstellen.
+ABER wenn die Standardeigenschaften (InputDataSet, etc) verwendet werden, kann
+der Record leicht erzeugt werden.
+
+
+
+      "Analyze"       "Perform Anlysis"
+   Knopf an Record     Form im WebUI
+   im WebUI
+         |               |
+         |               |
+         v               v
+     Winzskript, dass einen
+     DataAnalysis-Stub erzeugt
+          |
+          |
+          v
+    execute_script Routine -->  AnalysisSkript
+    erhält den Stub und ggf.    Nutzt Funktionen um Updates durchzuführen falls
+    den Pythonmodulenamen       notwendig, Email
+         ^
+         |
+         |
+    Cronjob findet outdated
+    DataAnalysis
+
+
+Analyseskript macht update:
+    - flexibel welche Änderungen vorgenommen werden (z.B. mehrere Records)
+    - spezielle Funktionen sollten verwendet werden
+    - Logging und informieren muss im Skript passieren
+    - Skript kann mit subprocess aufgerufen werden (alternative unvollständige
+      DataAnalysis einfügen)
+
+
+# Features
+    - Emailversand bei Insert oder Update
+    - Kurze Info: "Create XY Analysis" kann vmtl automatisch erzeugt werden
+    - Debug Info: müsste optional/bei Fehler zur Verfügung stehen.
+    - Skript/Software version sollte gespeichert werden
+
+
+Outlook: the part of the called scripts that interact with LinkAhead might in
+future be replaced by the Crawler. The working directory would be copied to the
+file server and then crawled.
+"""
+
+import argparse
+import importlib
+import logging
+import os
+import sys
+
+import caosdb as db
+from caosdb.utils.server_side_scripting import run_server_side_script
+
+logger = logging.getLogger(__name__)
+
+
+def check_referenced_script(record: db.Record):
+    """ return the name of a referenced script
+
+    If the supplied record does not have an appropriate Property warings are
+    logged.
+    """
+
+    if record.get_property("scripts") is None:
+        logger.warning("The follwing changed Record is missing the 'scripts' "
+                       "Property:\n{}".format(str(record)))
+
+        return
+
+    script_prop = record.get_property("scripts")
+
+    if not db.apiutils.is_reference(script_prop):
+        logger.warning("The 'scripts' Property of the following Record should "
+                       "reference a File:\n{}".format(str(record)))
+
+        return
+
+    script = db.execute_query("FIND ENTITY WITH id={}".format(
+        script_prop.value[0] if isinstance(script_prop.value, list)
+        else script_prop.value), unique=True)
+
+    if (not isinstance(script, db.File)):
+        logger.warning("The 'scripts' Property of the Record {} should "
+                       "reference a File. Entity {} is not a File".format(
+                           record.id, script_prop.value))
+
+        return
+
+    script_name = os.path.basename(script.path)
+
+    return script_name
+
+
+def call_script(script_name: str, record_id: int):
+    ret = run_server_side_script(script_name, record_id)
+
+    if ret.code != 0:
+        logger.error("Script failed!")
+        logger.debug(ret.stdout)
+        logger.error(ret.stderr)
+    else:
+        logger.debug(ret.stdout)
+        logger.error(ret.stderr)
+
+
+def run(dataAnalysisRecord: db.Record):
+    """run a data analysis script.
+
+    There are two options:
+    1. A python script installed as a pip package.
+    2. A generic script that can be executed on the command line.
+
+    Using a python package:
+    It should be located in package plugin and implement at least
+    a main function that takes a DataAnalysisRecord as a single argument.
+    The script may perform changes to the Record and insert and update
+    Entities.
+
+    Using a generic script:
+    The only argument that is supplied to the script is the ID of the
+    dataAnalysisRecord. Apart from the different Argument everything that is
+    said for the python package holds here.
+    """
+
+    if dataAnalysisRecord.get_property("scripts") is not None:
+        script_name = check_referenced_script(dataAnalysisRecord)
+        logger.debug(
+            "Found 'scripts'. Call script '{}' in separate process".format(
+                script_name)
+            )
+        call_script(script_name, dataAnalysisRecord.id)
+        logger.debug(
+            "Script '{}' done.\n-----------------------------------".format(
+                script_name))
+
+    if dataAnalysisRecord.get_property("Software") is not None:
+        mod = dataAnalysisRecord.get_property("Software").value
+        logger.debug(
+            "Found 'Software'. Call '{}' as Python module".format(
+                mod)
+            )
+        m = importlib.import_module(mod)
+
+        m.main(dataAnalysisRecord)
+        logger.debug(
+            "'main' function of  Python module '{}' done"
+            ".\n-----------------------------------".format(mod))
+
+
+def _parse_arguments():
+    """ Parses the command line arguments.  """
+    parser = argparse.ArgumentParser(description='__doc__')
+    parser.add_argument("--module", help="An id an input dataset.")
+    parser.add_argument("--inputset", help="An id an input dataset.")
+    parser.add_argument("--parameterset", help="An id of a parameter record.")
+
+    return parser.parse_args()
+
+
+def main():
+    """ This is for testing only. """
+    args = _parse_arguments()
+
+    dataAnalysisRecord = db.Record()
+    dataAnalysisRecord.add_property(name="InputDataSet", value=args.entity)
+    dataAnalysisRecord.add_property(name="ParameterSet", value=args.parameter)
+    dataAnalysisRecord.add_property(name="Software", value=args.module)
+
+    dataAnalysisRecord.insert()
+    run(dataAnalysisRecord)
+
+
+if __name__ == "__main__":
+    args = _parse_arguments()
+    sys.exit(main(args))
diff --git a/src/caosadvancedtools/serverside/model.yml b/src/caosadvancedtools/serverside/model.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2f5a9634a97e39da4c5b3a6dfe1bf0c587863231
--- /dev/null
+++ b/src/caosadvancedtools/serverside/model.yml
@@ -0,0 +1,15 @@
+# Parent of all datasets which are used as input to or output from
+# analysis scripts
+Dataset:
+
+# Parent of all parametersets which are used as input for analysis scripts
+ParameterSet:
+
+DataAnalysis:
+  recommended_properties:
+    InputDataset:
+      datatype: Dataset
+    OutputDataset:
+      datatype: Dataset
+    ParameterSet:
+    date:
\ No newline at end of file
diff --git a/src/caosadvancedtools/serverside/sync.py b/src/caosadvancedtools/serverside/sync.py
new file mode 100755
index 0000000000000000000000000000000000000000..04283a15ba7919af6027b53217ffb69355ddfc6f
--- /dev/null
+++ b/src/caosadvancedtools/serverside/sync.py
@@ -0,0 +1,7 @@
+#!/usr/bin/env python3
+# Sync data model for generic data analysis method
+# A. Schlemmer, 09/2021
+
+from caosadvancedtools.models import parser
+model = parser.parse_model_from_yaml("model.yml")
+model.sync_data_model()
diff --git a/src/caosadvancedtools/table_importer.py b/src/caosadvancedtools/table_importer.py
index 8665eb30cf7162478472a9d31401732ed6edd87b..1f515e78e3ddbd198fa0336589a359ba9154f038 100755
--- a/src/caosadvancedtools/table_importer.py
+++ b/src/caosadvancedtools/table_importer.py
@@ -31,6 +31,7 @@ import logging
 import pathlib
 from datetime import datetime
 
+import caosdb as db
 import numpy as np
 import pandas as pd
 from xlrd import XLRDError
@@ -50,12 +51,27 @@ def assure_name_format(name):
     name = str(name)
 
     if len(name.split(",")) != 2:
-        raise ValueError("Name field should be 'LastName, FirstName'."
+        raise ValueError("The field value should be 'LastName, FirstName'. "
                          "The supplied value was '{}'.".format(name))
 
     return name
 
 
+def check_reference_field(ent_id, recordtype):
+    if 1 != db.execute_query("COUNT {} WITH id={}".format(
+            recordtype,
+            ent_id),
+            unique=True):
+        raise ValueError(
+            "No {} with the supplied id={} exists. \n"
+            "Please supply a valid ID.".format(
+                recordtype,
+                ent_id
+            ))
+
+    return ent_id
+
+
 def yes_no_converter(val):
     """
     converts a string to True or False if possible.
@@ -88,7 +104,10 @@ def date_converter(val, fmt="%Y-%m-%d"):
     converts it using format string
     """
 
-    return datetime_converter(val, fmt=fmt).date()
+    if val is None:
+        return None
+    else:
+        return datetime_converter(val, fmt=fmt).date()
 
 
 def incomplete_date_converter(val, fmts={"%Y-%m-%d": "%Y-%m-%d",
@@ -137,6 +156,9 @@ def win_path_converter(val):
     checks whether the value looks like a windows path and converts it to posix
     """
 
+    if val == "":
+        return val
+
     if not check_win_path(val):
         raise ValueError(
             "Field should be a Windows path, but is\n'{}'.".format(val))
@@ -145,80 +167,100 @@ def win_path_converter(val):
     return path.as_posix()
 
 
-class TSVImporter(object):
-    def __init__(self, converters, obligatory_columns=[], unique_columns=[]):
-        raise NotImplementedError()
+def string_in_list(val, options, ignore_case=True):
+    """Return the given value if it is contained in options, raise an
+    error otherwise.
 
+    Parameters
+    ----------
+    val : str
+        String value to be checked.
+    options : list<str>
+        List of possible values that val may obtain
+    ignore_case : bool, optional
+        Specify whether the comparison of val and the possible options
+        should ignor capitalization. Default is True.
+
+    Returns
+    -------
+    val : str
+       The original value if it is contained in options
 
-class XLSImporter(object):
-    def __init__(self, converters, obligatory_columns=None, unique_keys=None):
-        """
-        converters: dict with column names as keys and converter functions as
-                    values
-                    This dict also defines what columns are required to exist
-                    throught the existing keys. The converter functions are
-                    applied to the cell values. They should also check for
-                    ValueErrors, such that a separate value check is not
-                    necessary.
-        obligatory_columns: list of column names, optional
-                            each listed column must not have missing values
-        unique_columns : list of column names that in
-                            combination must be unique; i.e. each row has a
-                            unique combination of values in those columns.
-        """
-        self.sup = SuppressKnown()
-        self.required_columns = list(converters.keys())
-        self.obligatory_columns = [] if obligatory_columns is None else obligatory_columns
-        self.unique_keys = [] if unique_keys is None else unique_keys
-        self.converters = converters
+    Raises
+    ------
+    ValueError
+       If val is not contained in options.
+    """
 
-    def read_xls(self, filename, **kwargs):
-        """
-        converts an xls file into a Pandas DataFrame.
+    if ignore_case:
+        val = val.lower()
+        options = [o.lower() for o in options]
 
-        The converters of the XLSImporter object are used.
+    if val not in options:
+        raise ValueError(
+            "Field value is '{}', but it should be one of the following "
+            "values:  {}.".format(val, ", ".join(
+                ["'{}'".format(o) for o in options])))
 
-        Raises: DataInconsistencyError
+    return val
+
+
+class TableImporter():
+    """Abstract base class for importing data from tables.
+    """
+
+    def __init__(self, converters, obligatory_columns=None, unique_keys=None,
+                 datatypes=None):
         """
-        try:
-            xls_file = pd.io.excel.ExcelFile(filename)
-        except (XLRDError, ValueError) as e:
-            logger.warning(
-                "Cannot read \n{}.\nError:{}".format(filename,
-                                                     str(e)),
-                extra={'identifier': str(filename),
-                       'category': "inconsistency"})
-            raise DataInconsistencyError(*e.args)
+        Parameters
+        ----------
+        converters : dict
+          Dict with column names as keys and converter functions as values. This dict also defines
+          what columns are required to exist throught the existing keys. The converter functions are
+          applied to the cell values. They should also check for ValueErrors, such that a separate
+          value check is not necessary.
+
+        obligatory_columns : list, optional
+          List of column names, each listed column must not have missing values.
+
+        unique_keys : list, optional
+          List of column names that in combination must be unique: each row has a unique
+          combination of values in those columns.
+
+        datatypes : dict, optional
+          Dict with column names as keys and datatypes as values.  All non-null values will be
+          checked whether they have the provided datatype.  This dict also defines what columns are
+          required to exist throught the existing keys.
 
-        if len(xls_file.sheet_names) > 1:
-            # Multiple sheets is the default now. Only show in debug
-            logger.debug(
-                "Excel file {} contains multiple sheets. "
-                "All but the first are being ignored.".format(filename))
+        """
 
-        try:
-            df = xls_file.parse(converters=self.converters, **kwargs)
-        except Exception as e:
-            logger.warning(
-                "Cannot parse {}.".format(filename),
-                extra={'identifier': str(filename),
-                       'category': "inconsistency"})
-            raise DataInconsistencyError(*e.args)
+        if converters is None:
+            converters = {}
 
-        self.check_columns(df, filename=filename)
-        df = self.check_missing(df, filename=filename)
+        if datatypes is None:
+            datatypes = {}
 
-        if len(self.unique_keys) > 0:
-            df = self.check_unique(df, filename=filename)
+        self.sup = SuppressKnown()
+        self.required_columns = list(converters.keys())+list(datatypes.keys())
+        self.obligatory_columns = ([]
+                                   if obligatory_columns is None
+                                   else obligatory_columns)
+        self.unique_keys = [] if unique_keys is None else unique_keys
+        self.converters = converters
+        self.datatypes = datatypes
 
-        return df
+    def read_file(self, filename, **kwargs):
+        raise NotImplementedError()
 
     def check_columns(self, df, filename=None):
-        """
-        checks whether all required columns, i.e. columns for which converters
-        were defined exist.
+        """Check whether all required columns exist.
+
+        Required columns are columns for which converters are defined.
+
+        Raises
+        ------
+        DataInconsistencyError
 
-        Raises: DataInconsistencyError
         """
 
         for col in self.required_columns:
@@ -234,12 +276,11 @@ class XLSImporter(object):
                 raise DataInconsistencyError(errmsg)
 
     def check_unique(self, df, filename=None):
-        """
-        Check whether value combinations that shall be unique for each row are
-        unique.
+        """Check whether value combinations that shall be unique for each row are unique.
 
         If a second row is found, that uses the same combination of values as a
         previous one, the second one is removed.
+
         """
         df = df.copy()
         uniques = []
@@ -266,11 +307,57 @@ class XLSImporter(object):
 
         return df
 
+    def check_datatype(self, df, filename=None, strict=False):
+        """Check for each column whether non-null fields have the correct datatype.
+
+        .. note::
+
+          If columns are integer, but should be float, this method converts the respective columns
+          in place.
+
+        Parameters
+        ----------
+
+        strict: boolean, optional
+          If False (the default), try to convert columns, otherwise raise an error.
+
+        """
+        for key, datatype in self.datatypes.items():
+            # Check for castable numeric types first: We unconditionally cast int to the default
+            # float, because CaosDB does not have different sizes anyway.
+            col_dtype = df.dtypes[key]
+            if not strict and not np.issubdtype(col_dtype, datatype):
+                issub = np.issubdtype
+                #  These special cases should be fine.
+                if issub(col_dtype, np.integer) and issub(datatype, np.floating):
+                    df[key] = df[key].astype(datatype)
+
+            # Now check each element
+            for idx, val in df.loc[
+                    pd.notnull(df.loc[:, key]), key].iteritems():
+
+                if not isinstance(val, datatype):
+                    msg = (
+                        "In row no. {rn} and column '{c}' of file '{fi}' the "
+                        "datatype was {was} but it should be "
+                        "{expected}".format(rn=idx, c=key, fi=filename,
+                                            was=str(type(val)).strip("<>"),
+                                            expected=str(datatype).strip("<>"))
+                    )
+                    logger.warning(msg, extra={'identifier': filename,
+                                               'category': "inconsistency"})
+                    raise DataInconsistencyError(msg)
+
     def check_missing(self, df, filename=None):
         """
         Check in each row whether obligatory fields are empty or null.
 
         Rows that have missing values are removed.
+
+        Returns
+        -------
+        out : pandas.DataFrame
+          The input DataFrame with incomplete rows removed.
         """
         df = df.copy()
 
@@ -306,3 +393,104 @@ class XLSImporter(object):
                     okay = False
 
         return df
+
+    def check_dataframe(self, df, filename=None, strict=False):
+        """Check if the dataframe conforms to the restrictions.
+
+        Checked restrictions are: Columns, data types, uniqueness requirements.
+
+        Parameters
+        ----------
+
+        df: pandas.DataFrame
+          The dataframe to be checked.
+
+        filename: string, optional
+          The file name, only used for output in case of problems.
+
+        strict: boolean, optional
+          If False (the default), try to convert columns, otherwise raise an error.
+        """
+        self.check_columns(df, filename=filename)
+        df = self.check_missing(df, filename=filename)
+        self.check_datatype(df, filename=filename, strict=strict)
+
+        if len(self.unique_keys) > 0:
+            df = self.check_unique(df, filename=filename)
+
+        return df
+
+
+class XLSImporter(TableImporter):
+    def read_file(self, filename, **kwargs):
+        return self.read_xls(filename=filename, **kwargs)
+
+    def read_xls(self, filename, **kwargs):
+        """Convert an xls file into a Pandas DataFrame.
+
+        The converters of the XLSImporter object are used.
+
+        Raises: DataInconsistencyError
+        """
+        try:
+            xls_file = pd.io.excel.ExcelFile(filename)
+        except (XLRDError, ValueError) as e:
+            logger.warning(
+                "Cannot read \n{}.\nError:{}".format(filename,
+                                                     str(e)),
+                extra={'identifier': str(filename),
+                       'category': "inconsistency"})
+            raise DataInconsistencyError(*e.args)
+
+        if len(xls_file.sheet_names) > 1:
+            # Multiple sheets is the default now. Only show in debug
+            logger.debug(
+                "Excel file {} contains multiple sheets. "
+                "All but the first are being ignored.".format(filename))
+
+        try:
+            df = xls_file.parse(converters=self.converters, **kwargs)
+        except Exception as e:
+            logger.warning(
+                "Cannot parse {}.\n{}".format(filename, e),
+                extra={'identifier': str(filename),
+                       'category': "inconsistency"})
+            raise DataInconsistencyError(*e.args)
+
+        df = self.check_dataframe(df, filename)
+
+        return df
+
+
+class CSVImporter(TableImporter):
+    def read_file(self, filename, sep=",", **kwargs):
+        try:
+            df = pd.read_csv(filename, sep=sep, converters=self.converters,
+                             **kwargs)
+        except ValueError as ve:
+            logger.warning(
+                "Cannot parse {}.\n{}".format(filename, ve),
+                extra={'identifier': str(filename),
+                       'category': "inconsistency"})
+            raise DataInconsistencyError(*ve.args)
+
+        df = self.check_dataframe(df, filename)
+
+        return df
+
+
+class TSVImporter(TableImporter):
+    def read_file(self, filename, **kwargs):
+        try:
+            df = pd.read_csv(filename, sep="\t", converters=self.converters,
+                             **kwargs)
+        except ValueError as ve:
+            logger.warning(
+                "Cannot parse {}.\n{}".format(filename, ve),
+                extra={'identifier': str(filename),
+                       'category': "inconsistency"})
+            raise DataInconsistencyError(*ve.args)
+
+        df = self.check_dataframe(df, filename)
+
+        return df
diff --git a/src/doc/conf.py b/src/doc/conf.py
index fef2ee6760d71cdbe544c4c0c8dbabe34eb79475..1e07336628b696a95bc821a462f3d78f3ae11df0 100644
--- a/src/doc/conf.py
+++ b/src/doc/conf.py
@@ -27,9 +27,9 @@ copyright = '2021, IndiScale GmbH'
 author = 'Daniel Hornung'
 
 # The short X.Y version
-version = '0.X.Y'
+version = '0.3.2'
 # The full version, including alpha/beta/rc tags
-release = '0.x.y-beta-rc2'
+release = '0.3.2'
 
 
 # -- General configuration ---------------------------------------------------
diff --git a/src/doc/crawler.rst b/src/doc/crawler.rst
index c52bbf2fe9b9f5fd77805e45ec85d195f5aa95f3..4b99c97e6db16e5691f373fa5fb4903e4d078155 100644
--- a/src/doc/crawler.rst
+++ b/src/doc/crawler.rst
@@ -131,6 +131,10 @@ The behavior and rules of the crawler are defined in logical units
 called CFoods. In order to extend the crawler you need to extend an
 existing CFood or create new one.
 
+.. Note:: A crawler always needs a corresponding data model to exits in the 
+          server. The following does not cover this aspect. Please refer  
+          for example to documentation of the YAML Interface.
+
 .. _c-food-introduction:
 
 CFood -- Introduction
diff --git a/src/doc/yaml_interface.rst b/src/doc/yaml_interface.rst
index 06248f2b5c17f40b6f15f5f55664c5a4a5530a86..dcf4c5d6c7a674bd8d32d92df0a509e511af26f5 100644
--- a/src/doc/yaml_interface.rst
+++ b/src/doc/yaml_interface.rst
@@ -39,6 +39,9 @@ Let's start with an example taken from https://gitlab.indiscale.com/caosdb/src/c
           table:
              datatype: FILE
              description: 'A table document associated with this recording'
+    extern:
+       - Textfile
+
 
 
 This example defines 3 ``RecordType``s:
@@ -46,6 +49,9 @@ This example defines 3 ``RecordType``s:
 - A ``Project`` with one obligatory property ``datatype``
 - A Person with a ``firstName`` and a ``lastName`` (as recommended properties)
 - A ``LabbookEntry`` with multiple recommended properties of different data types
+- It is assumed that the server knows a RecordType or Property with the name 
+  "Textfile".
+
 
 One major advantage of using this interface (in contrast to the standard python interface) is that properties can be defined and added to record types "on-the-fly". E.g. the three lines for ``firstName`` as sub entries of ``Person`` have two effects on CaosDB:
 
@@ -60,6 +66,8 @@ Note the difference between the three property declarations of ``LabbookEntry``:
 - ``responsible``: This defines and adds a property with name "responsible" to ``LabbookEntry`, which has a datatype ``Person``. ``Person`` is defined above.
 - ``firstName``: This defines and adds a property with the standard data type ``TEXT`` to record type ``Person``.
 
+If the data model depends on already existing parts, those can be added using the ``extern`` keyword.
+
 Datatypes
 ---------
 
diff --git a/unittests/data/datatypes.xlsx b/unittests/data/datatypes.xlsx
new file mode 100644
index 0000000000000000000000000000000000000000..34fc4cf43092a68b630e0e04ebc43609b8a0b17b
Binary files /dev/null and b/unittests/data/datatypes.xlsx differ
diff --git a/unittests/test.csv b/unittests/test.csv
new file mode 100644
index 0000000000000000000000000000000000000000..a29679afce78089f3cdd4e5e388262456668cd90
--- /dev/null
+++ b/unittests/test.csv
@@ -0,0 +1,3 @@
+temperature [°C] ,depth 
+234.4,3.0
+344.6,5.1
diff --git a/unittests/test_cfood.py b/unittests/test_cfood.py
index ab5cb11e9dc89faf26527d72e64459cae73b1d88..f5125166106c4bace21121d58a025886f9b132b9 100644
--- a/unittests/test_cfood.py
+++ b/unittests/test_cfood.py
@@ -190,6 +190,35 @@ class InsertionTest(unittest.TestCase):
                             value=new_int, to_be_updated=to_be_updated)
         assert to_be_updated[0] is entity
 
+        """Test properties with lists"""
+        rec1 = db.Record(id=12345)
+        rec1.add_property("Exp", value=[98765], datatype=db.LIST("Exp"))
+        rec2 = db.Record(id=98765)
+        update = []
+        # compare Entity with id
+        assure_has_property(rec1, "Exp", [rec2], to_be_updated=update)
+        assert len(update) == 0
+        update = []
+        # compare id with id
+        assure_has_property(rec1, "Exp", [98765], to_be_updated=update)
+        assert len(update) == 0
+        update = []
+        # compare id with different list of ids
+        assure_has_property(rec1, "Exp2", [98765, 444, 555],
+                            to_be_updated=update)
+        assert len(update) == 1
+
+        rec = db.Record(id=666666)
+        rec3 = db.Record(id=777777)
+        rec.add_property("Exp", value=[888888, rec3], datatype=db.LIST("Exp"))
+        rec2 = db.Record(id=888888)
+        update = []
+        # compare id and Entity with id and Entity
+        # i.e. check that conversion from Entity to id works in both
+        # directions.
+        assure_has_property(rec, "Exp", [rec2, 777777], to_be_updated=update)
+        assert len(update) == 0
+
     def test_property_is(self):
         """Test properties with string, int, float, and Boolean values"""
         entity = db.Record()
diff --git a/unittests/test_generic_analysis.py b/unittests/test_generic_analysis.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1077b97ec58f80c8534c89d5fa5f57d8d815cb9
--- /dev/null
+++ b/unittests/test_generic_analysis.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2021 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2021 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+module description
+"""
+
+import caosdb as db
+from caosadvancedtools.serverside.generic_analysis import \
+    check_referenced_script
+
+from test_utils import BaseMockUpTest
+
+
+class TestGAnalysisNoFile(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.entities = (
+            '<Response><Record name="script.py" path="/some/path/script.py'
+            '" id="1234"/><Query string="find record" results="1">'
+            '</Query></Response>')
+
+    def test_check_referenced_script(self):
+        # missing scripts
+        self.assertIsNone(check_referenced_script(db.Record()))
+        # wrong datatype
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.TEXT)))
+        # wrong value
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="hallo")))
+        # no file
+        self.assertIsNone(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="1234")))
+
+
+class TestGAnalysisFile(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.entities = (
+            '<Response><File name="script.py" path="/some/path/script.py'
+            '" id="1234"/><Query string="find record" results="1">'
+            '</Query></Response>')
+
+    def test_check_referenced_script(self):
+        # all correct
+        self.assertEqual(check_referenced_script(db.Record().add_property(
+            "scripts", datatype=db.REFERENCE, value="1234")), "script.py")
diff --git a/unittests/test_h5.py b/unittests/test_h5.py
index e5ae94686fe4542f6833e21e9a80f01e4257538d..360d4b28938492d0f2af6d696e39dffb1cc3fead 100644
--- a/unittests/test_h5.py
+++ b/unittests/test_h5.py
@@ -74,6 +74,9 @@ class H5CFoodTest(unittest.TestCase):
             self.assertEqual(i.name, "group_level2_aa")
 
     def test_collect_existing_structure(self):
+        # TODO this does probably break the code: The function will not be
+        # restored correctly.
+        # Change it to use the BaseMockUpTest
         real_retrieve = caosdb.apiutils.retrieve_entity_with_id
         caosdb.apiutils.retrieve_entity_with_id = dummy_get
 
diff --git a/unittests/test_parser.py b/unittests/test_parser.py
index 6af5cd1693f97726476713e22edd10e044c6d200..85e6b7e5fe5f0337ac1ae5a711f50484866d98b3 100644
--- a/unittests/test_parser.py
+++ b/unittests/test_parser.py
@@ -276,6 +276,22 @@ A:
             parse_model_from_string(yaml)
         self.assertIn("line 3", yde.exception.args[0])
 
+    def test_reference_property(self):
+        """Test correct creation of reference property using an RT."""
+        modeldef = """A:
+  recommended_properties:
+    ref:
+      datatype: LIST<A>
+"""
+        model = parse_model_from_string(modeldef)
+        self.assertEqual(len(model), 2)
+        for key in model.keys():
+            if key == "A":
+                self.assertTrue(isinstance(model[key], db.RecordType))
+            elif key == "ref":
+                self.assertTrue(isinstance(model[key], db.Property))
+                self.assertEqual(model[key].datatype, "LIST<A>")
+
 
 class ExternTest(unittest.TestCase):
     """TODO Testing the "extern" keyword in the YAML."""
diff --git a/unittests/test_result_table_cfood.py b/unittests/test_result_table_cfood.py
new file mode 100644
index 0000000000000000000000000000000000000000..3341a2394cc9ef15ae172bb8992445d87c60d063
--- /dev/null
+++ b/unittests/test_result_table_cfood.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python3
+# encoding: utf-8
+#
+# ** header v3.0
+# This file is a part of the CaosDB Project.
+#
+# Copyright (C) 2018 Research Group Biomedical Physics,
+# Max-Planck-Institute for Dynamics and Self-Organization Göttingen
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+# ** end header
+#
+
+"""
+test module for ResultTableCFood
+"""
+
+
+import os
+import re
+import unittest
+
+import caosdb as db
+from caosadvancedtools.scifolder.result_table_cfood import ResultTableCFood
+
+
+class CFoodTest(unittest.TestCase):
+    def test_re(self):
+        self.assertIsNotNone(re.match(ResultTableCFood.table_re, "result_table_Hallo.csv"))
+        self.assertEqual(re.match(ResultTableCFood.table_re, "result_table_Hallo.csv").group("recordtype"),
+                         "Hallo")
+        self.assertIsNotNone(re.match(ResultTableCFood.table_re,
+                                      "result_table_Cool RecordType.csv"))
+        self.assertEqual(re.match(ResultTableCFood.table_re, "result_table_Cool RecordType.csv").group("recordtype"),
+                         "Cool RecordType")
+        self.assertIsNone(re.match(ResultTableCFood.table_re, "result_tableCool RecordType.csv"))
+
+        self.assertIsNotNone(re.match(ResultTableCFood.property_name_re,
+                                      "temperature [C]"))
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [C]").group("pname"),
+                         "temperature")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [C]").group("unit"), "C")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature [ C ]").group("unit"), "C")
+        self.assertEqual(re.match(ResultTableCFood.property_name_re,
+                                  "temperature").group("pname"), "temperature")
+
+    def test_ident(self):
+        rtc = ResultTableCFood(os.path.join(os.path.dirname(__file__), "test.csv"))
+        rtc.match = re.match(ResultTableCFood.get_re(),
+                             "/ExperimentalData/2010_TestProject/2019-02-03_something/result_table_RT.csv")
+        rtc.create_identifiables()
+        rtc.update_identifiables()
diff --git a/unittests/test_table_importer.py b/unittests/test_table_importer.py
index bb26f5031f2ae781899ddc4354ba4527cf669c83..4c7d044ef1de877cf4072034c96aca7113f75cc0 100644
--- a/unittests/test_table_importer.py
+++ b/unittests/test_table_importer.py
@@ -27,14 +27,20 @@ import numpy as np
 import pandas as pd
 import pytest
 from caosadvancedtools.datainconsistency import DataInconsistencyError
-from caosadvancedtools.table_importer import (XLSImporter, assure_name_format,
+from caosadvancedtools.table_importer import (CSVImporter, TableImporter,
+                                              TSVImporter, XLSImporter,
+                                              assure_name_format,
+                                              check_reference_field,
                                               date_converter,
                                               datetime_converter,
                                               incomplete_date_converter,
+                                              string_in_list,
                                               win_path_converter,
                                               win_path_list_converter,
                                               yes_no_converter)
 
+from test_utils import BaseMockUpTest
+
 
 class ConverterTest(unittest.TestCase):
     def test_yes_no(self):
@@ -49,6 +55,16 @@ class ConverterTest(unittest.TestCase):
         self.assertRaises(ValueError, yes_no_converter, "True")
         self.assertRaises(ValueError, yes_no_converter, "true")
 
+    def test_string_in_list(self):
+        self.assertEqual("false", string_in_list("false",
+                                                 ["FALSE", "TRUE"]))
+        self.assertEqual("FALSE", string_in_list("FALSE",
+                                                 ["FALSE", "TRUE"], False))
+        self.assertRaises(ValueError, string_in_list, "FALSE", [])
+        self.assertRaises(ValueError, string_in_list, "FALSE", ["fals"])
+        self.assertRaises(ValueError, string_in_list,
+                          "FALSE", ["false"], False)
+
     def test_assure_name_format(self):
         self.assertEqual(assure_name_format("Müstermann, Max"),
                          "Müstermann, Max")
@@ -62,16 +78,17 @@ class ConverterTest(unittest.TestCase):
                          ["/this/computer"])
         self.assertEqual(win_path_list_converter(
             r"\this\computer,\this\computer"),
-                         ["/this/computer", "/this/computer"])
+            ["/this/computer", "/this/computer"])
 
+    @pytest.mark.xfail(reason="To be fixed, see Issue #34")
     def test_datetime(self):
         test_file = os.path.join(os.path.dirname(__file__), "date.xlsx")
-        self.importer = XLSImporter(converters={'d': datetime_converter,
-                                                }, obligatory_columns=['d'])
+        importer = XLSImporter(converters={'d': datetime_converter,
+                                           }, obligatory_columns=['d'])
 
         xls_file = pd.io.excel.ExcelFile(test_file)
         df = xls_file.parse()
-        df = self.importer.read_xls(test_file)
+        df = importer.read_xls(test_file)
         assert df.shape[0] == 2
         # TODO datatypes are different; fix it
         assert df.d.iloc[0] == datetime.datetime(1980, 12, 31, 13, 24, 23)
@@ -79,30 +96,30 @@ class ConverterTest(unittest.TestCase):
     def test_date_xlsx(self):
         """Test with .xlsx in order to check openpyxl engine."""
         test_file = os.path.join(os.path.dirname(__file__), "date.xlsx")
-        self.importer = XLSImporter(converters={'a': date_converter,
-                                                'b': date_converter,
-                                                'c': partial(date_converter,
-                                                             fmt="%d.%m.%y")
-                                                }, obligatory_columns=['a'])
+        importer = XLSImporter(converters={'a': date_converter,
+                                           'b': date_converter,
+                                           'c': partial(date_converter,
+                                                        fmt="%d.%m.%y")
+                                           }, obligatory_columns=['a'])
 
         xls_file = pd.io.excel.ExcelFile(test_file)
         df = xls_file.parse()
-        df = self.importer.read_xls(test_file)
+        df = importer.read_xls(test_file)
         assert df.shape[0] == 2
         assert df.a.iloc[0] == df.b.iloc[0] == df.c.iloc[0]
 
     def test_date_xls(self):
         """Test with .xls in order to check xlrd engine."""
         test_file = os.path.join(os.path.dirname(__file__), "date.xls")
-        self.importer = XLSImporter(converters={'a': date_converter,
-                                                'b': date_converter,
-                                                'c': partial(date_converter,
-                                                             fmt="%d.%m.%y")
-                                                }, obligatory_columns=['a'])
+        importer = XLSImporter(converters={'a': date_converter,
+                                           'b': date_converter,
+                                           'c': partial(date_converter,
+                                                        fmt="%d.%m.%y")
+                                           }, obligatory_columns=['a'])
 
         xls_file = pd.io.excel.ExcelFile(test_file)
         df = xls_file.parse()
-        df = self.importer.read_xls(test_file)
+        df = importer.read_xls(test_file)
         assert df.shape[0] == 2
         assert df.a.iloc[0] == df.b.iloc[0] == df.c.iloc[0]
 
@@ -125,48 +142,134 @@ class ConverterTest(unittest.TestCase):
                           fmts={"%Y": "%Y"})
 
 
-class XLSImporterTest(unittest.TestCase):
+class TableImporterTest(unittest.TestCase):
     def setUp(self):
-        self.importer = XLSImporter(
-            converters={'a': str, 'b': int, 'c': float, 'd': yes_no_converter},
+        self.importer_kwargs = dict(
+            converters={'c': float, 'd': yes_no_converter},
+            datatypes={'a': str, 'b': int},
             obligatory_columns=['a', 'b'], unique_keys=[('a', 'b')])
         self.valid_df = pd.DataFrame(
             [['a', 1, 2.0, 'yes']], columns=['a', 'b', 'c', 'd'])
 
     def test_missing_col(self):
-        df = pd.DataFrame(columns=['a', 'b'])
-        self.assertRaises(ValueError, self.importer.check_columns, df)
-        self.importer.check_columns(self.valid_df)
+        # check missing from converters
+        df = pd.DataFrame(columns=['a', 'b', 'c'])
+        importer = TableImporter(**self.importer_kwargs)
+        self.assertRaises(ValueError, importer.check_columns, df)
+        # check missing from datatypes
+        df = pd.DataFrame(columns=['a', 'd', 'c'])
+        importer = TableImporter(**self.importer_kwargs)
+        self.assertRaises(ValueError, importer.check_columns, df)
+        # check valid
+        importer.check_columns(self.valid_df)
 
     def test_missing_val(self):
-        self.importer.check_missing(self.valid_df)
+        importer = TableImporter(**self.importer_kwargs)
+        # check valid
+        importer.check_missing(self.valid_df)
+        # check invalid
         df = pd.DataFrame([[None, np.nan, 2.0, 'yes'],
                            [None, 1, 2.0, 'yes'],
                            ['a', np.nan, 2.0, 'yes'],
                            ['b', 5, 3.0, 'no']],
                           columns=['a', 'b', 'c', 'd'])
-        df_new = self.importer.check_missing(df)
+        df_new = importer.check_missing(df)
         self.assertEqual(df_new.shape[0], 1)
         self.assertEqual(df_new.shape[1], 4)
         self.assertEqual(df_new.iloc[0].b, 5)
 
-    def test_full(self):
-        """ test full run with example data """
-        tmp = NamedTemporaryFile(delete=False, suffix=".xlsx")
-        tmp.close()
-        self.valid_df.to_excel(tmp.name)
-        self.importer.read_xls(tmp.name)
+    def test_wrong_datatype(self):
+        importer = TableImporter(**self.importer_kwargs)
+        df = pd.DataFrame([[None, np.nan, 2.0, 'yes'],
+                           [5, 1, 2.0, 'yes']],
+                          columns=['a', 'b', 'c', 'd'])
+        self.assertRaises(DataInconsistencyError, importer.check_datatype, df)
 
     def test_unique(self):
-        self.importer.check_missing(self.valid_df)
+        importer = TableImporter(**self.importer_kwargs)
+        importer.check_missing(self.valid_df)
         df = pd.DataFrame([['b', 5, 3.0, 'no'], ['b', 5, 3.0, 'no']],
                           columns=['a', 'b', 'c', 'd'])
-        df_new = self.importer.check_unique(df)
+        df_new = importer.check_unique(df)
         self.assertEqual(df_new.shape[0], 1)
 
+
+class XLSImporterTest(TableImporterTest):
+    def test_full(self):
+        """ test full run with example data """
+        tmp = NamedTemporaryFile(delete=False, suffix=".xlsx")
+        tmp.close()
+        self.valid_df.to_excel(tmp.name)
+        importer = XLSImporter(**self.importer_kwargs)
+        importer.read_file(tmp.name)
+
     def test_raise(self):
+        importer = XLSImporter(**self.importer_kwargs)
         tmp = NamedTemporaryFile(delete=False, suffix=".lol")
         tmp.close()
-        # TODO ValueError is raised instead
-        self.assertRaises(DataInconsistencyError, self.importer.read_xls,
+        self.assertRaises(DataInconsistencyError, importer.read_xls,
                           tmp.name)
+
+    def test_datatypes(self):
+        """Test datataypes in columns."""
+        importer = XLSImporter(converters={},
+                               obligatory_columns=["float_as_float"],
+                               datatypes={
+                                   "float_as_float": float,
+                                   "int_as_float": float,
+                                   "int_as_int": int,
+                               }
+                               )
+        df = importer.read_xls(os.path.join(os.path.dirname(__file__), "data", "datatypes.xlsx"))
+        assert np.issubdtype(df.loc[0, "int_as_float"], float)
+
+
+class CSVImporterTest(TableImporterTest):
+    def test_full(self):
+        """ test full run with example data """
+        tmp = NamedTemporaryFile(delete=False, suffix=".csv")
+        tmp.close()
+        self.valid_df.to_csv(tmp.name)
+        importer = CSVImporter(**self.importer_kwargs)
+        importer.read_file(tmp.name)
+
+
+class TSVImporterTest(TableImporterTest):
+    def test_full(self):
+        """ test full run with example data """
+        tmp = NamedTemporaryFile(delete=False, suffix=".tsv")
+        tmp.close()
+        self.valid_df.to_csv(tmp.name, sep="\t")
+        importer = TSVImporter(**self.importer_kwargs)
+        importer.read_file(tmp.name)
+
+
+class CountQueryNoneConverterTest(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        # simulate that 0 entity exists
+        self.entities = (
+            '<Response count="0">'
+            '<Query string="count record" results="0">'
+            '</Query>'
+            '</Response>'
+            )
+
+    def test_check_reference_field(self):
+        self.assertRaises(ValueError, check_reference_field, "1232",  "Max")
+
+
+class CountQuerySingleConverterTest(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        # simulate that 1 entity exists
+        self.entities = (
+            '<Response count="1">'
+            '<Query string="count record" results="1">'
+            '</Query>'
+            '</Response>'
+            )
+
+    def test_check_reference_field(self):
+        self.assertEqual(check_reference_field("1232",  "Max"),
+                         "1232")
diff --git a/unittests/test_utils.py b/unittests/test_utils.py
index 054d7c99069f294e9975742c1c0261fd7ebc768c..7369931799b00eba5a835458a6fad474de1d9039 100644
--- a/unittests/test_utils.py
+++ b/unittests/test_utils.py
@@ -32,14 +32,7 @@ from caosdb.connection.mockup import MockUpResponse, MockUpServerConnection
 from caosdb.exceptions import TransactionError
 
 
-class ReferencesBaseTest(unittest.TestCase):
-    def __init__(self, *args, **kwargs):
-        super().__init__(*args, **kwargs)
-        self.entities = (
-            '<Response><File name="test.npy" path="/some/path/test.npy'
-            '" id="1234"/><Query string="find record" results="1">'
-            '</Query></Response>')
-
+class BaseMockUpTest(unittest.TestCase):
     def setUp(self):
         conlogger = logging.getLogger("connection")
         conlogger.setLevel(level=logging.ERROR)
@@ -70,6 +63,15 @@ class ReferencesBaseTest(unittest.TestCase):
 
         return log
 
+
+class ReferencesBaseTest(BaseMockUpTest):
+    def __init__(self, *args, **kwargs):
+        super().__init__(*args, **kwargs)
+        self.entities = (
+            '<Response><File name="test.npy" path="/some/path/test.npy'
+            '" id="1234"/><Query string="find record" results="1">'
+            '</Query></Response>')
+
     def test_ref(self):
         self.clear_log()
         files = get_referenced_files("test.npy", prefix=None, filename=None,