diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4fe401389b5201894f22ad7a829e820f01c8958f..e0589ec4e056a494e79762ef048cf2e644f4f40a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
   variables to `int`, `float`, `str` and `bool`.
 - Transformer function definition in the cfood support variable
   substitutions now.
+- `crawler_main` and `scanner.scan_directory` now support list of
+  directories to be crawled, too. Note that giving a list of
+  directories is currently incompatible with
+  `securityMode=SecurityMode.RETRIEVE` or
+  `securityMode=SecurityMode.INSERT` since the functionality to
+  authoriye pending inserts or updates doesn't support path lists yet
+  and will raise a NotImplementedError for now.
+- `match_newer_than_file` option for `DirectoryConverter`: A reference
+  file containing (only) an ISO-formatted datetime string can be
+  specified here. Directories with this option won't match if all
+  their contents were last modified before that datetime.
 
 ### Changed ###
 
@@ -33,6 +44,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
 ### Fixed ###
 
 - `spss_to_datamodel` script works again.
+- The cfood now supports bi-directional references when defining records on the same level.
+  (See: https://gitlab.indiscale.com/caosdb/src/caosdb-crawler/-/issues/175)
 
 ### Security ###
 
diff --git a/integrationtests/test_crawler_main.py b/integrationtests/test_crawler_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2eebf4f04e195754eaf71dc5e829b6a77a4cc4b
--- /dev/null
+++ b/integrationtests/test_crawler_main.py
@@ -0,0 +1,95 @@
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+#               2024 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import logging
+import tempfile
+
+from pathlib import Path
+
+import linkahead as db
+
+from caoscrawler import crawl
+from caoscrawler.crawl import (crawler_main, SecurityMode)
+from linkahead.utils.register_tests import clear_database, set_test_key
+
+set_test_key("10b128cf8a1372f30aa3697466bb55e76974e0c16a599bb44ace88f19c8f61e2")
+
+INTTESTDIR = Path(__file__).parent
+
+
+def test_list_of_paths(clear_database, monkeypatch):
+
+    # Mock the status record
+    dummy_status = {
+        "n_calls": 0
+    }
+
+    def _mock_update_status_record(run_id, n_inserts, n_updates, status):
+        print("Update mocked status")
+        dummy_status["run_id"] = run_id
+        dummy_status["n_inserts"] = n_inserts
+        dummy_status["n_updates"] = n_updates
+        dummy_status["status"] = status
+        dummy_status["n_calls"] += 1
+    monkeypatch.setattr(crawl, "_update_status_record", _mock_update_status_record)
+
+    # mock SSS environment
+    monkeypatch.setenv("SHARED_DIR", tempfile.gettempdir())
+
+    # We need only one dummy RT
+    rt = db.RecordType(name="TestType").insert()
+    basepath = INTTESTDIR / "test_data" / "crawler_main_with_list_of_dirs"
+    dirlist = [basepath / "dir1", basepath / "dir2"]
+    crawler_main(
+        dirlist,
+        cfood_file_name=basepath / "cfood.yml",
+        identifiables_definition_file=basepath / "identifiable.yml"
+    )
+    recs = db.execute_query("FIND TestType")
+    assert len(recs) == 2
+    assert "Test1" in [r.name for r in recs]
+    assert "Test2" in [r.name for r in recs]
+
+    assert dummy_status["n_inserts"] == 2
+    assert dummy_status["n_updates"] == 0
+    assert dummy_status["status"] == "OK"
+    assert dummy_status["n_calls"] == 1
+
+
+def test_not_implemented_list_with_authorization(caplog, clear_database):
+
+    rt = db.RecordType(name="TestType").insert()
+    basepath = INTTESTDIR / "test_data" / "crawler_main_with_list_of_dirs"
+    dirlist = [basepath / "dir1", basepath / "dir2"]
+
+    # This is not implemented yet, so check log for correct error.
+    ret = crawler_main(
+        dirlist,
+        cfood_file_name=basepath / "cfood.yml",
+        identifiables_definition_file=basepath / "identifiable.yml",
+        securityMode=SecurityMode.RETRIEVE
+    )
+    # crawler_main hides the error, but has a non-zero return code and
+    # errors in the log:
+    assert ret != 0
+    err_tuples = [t for t in caplog.record_tuples if t[1] == logging.ERROR]
+    assert len(err_tuples) == 1
+    assert "currently implemented only for single paths, not for lists of paths" in err_tuples[0][2]
+    # No inserts after the errors
+    assert len(db.execute_query("FIND TestType")) == 0
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml b/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f22ce07e9b401915aefde3bf7e3a78d92e2bd6
--- /dev/null
+++ b/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml
@@ -0,0 +1,10 @@
+---
+metadata:
+  crawler-version: 0.10.2
+---
+BaseDirElement:
+  type: Directory
+  match: ^dir(?P<dir_number>[0-9]+)$$
+  records:
+    TestType:
+      name: Test$dir_number
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/dir1/.gitkeep b/integrationtests/test_data/crawler_main_with_list_of_dirs/dir1/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/dir2/.gitkeep b/integrationtests/test_data/crawler_main_with_list_of_dirs/dir2/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml b/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6d608cece0ae7c2aa6461fb56025a8ac8e4faf6f
--- /dev/null
+++ b/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml
@@ -0,0 +1,2 @@
+TestType:
+  - name
diff --git a/integrationtests/test_issues.py b/integrationtests/test_issues.py
index cb1e2e0925dd85b9f6cadf2b56b22aface4bb468..0506fa4db03e9b3638051e6ec4fa132bd348a988 100644
--- a/integrationtests/test_issues.py
+++ b/integrationtests/test_issues.py
@@ -1,4 +1,4 @@
-# This file is a part of the CaosDB Project.
+# This file is a part of the LinkAhead Project.
 #
 # Copyright (C) 2022 Indiscale GmbH <info@indiscale.com>
 #               2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
@@ -16,20 +16,22 @@
 # You should have received a copy of the GNU Affero General Public License
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 #
+import tempfile
+
 import linkahead as db
+import yaml
 from caosadvancedtools.models.parser import parse_model_from_string
 from caoscrawler.crawl import Crawler
 from caoscrawler.identifiable import Identifiable
 from caoscrawler.identifiable_adapters import CaosDBIdentifiableAdapter
-from caoscrawler.scanner import (create_converter_registry,
+from caoscrawler.scanner import (_load_definition_from_yaml_dict,
+                                 create_converter_registry,
                                  scan_structure_elements)
 from caoscrawler.structure_elements import DictElement
 from linkahead.cached import cache_clear
 from linkahead.utils.register_tests import clear_database, set_test_key
 from pytest import fixture, mark, raises
 
-import tempfile
-
 set_test_key("10b128cf8a1372f30aa3697466bb55e76974e0c16a599bb44ace88f19c8f61e2")
 
 
@@ -332,6 +334,64 @@ def test_indiscale_87(clear_database):
         print("---")
 
 
+def test_issue_16(clear_database):
+    """
+    This is another  a test for:
+    https://gitlab.indiscale.com/caosdb/src/caosdb-crawler/-/issues/16
+
+    In addition to the two unit tests for recursive definition in `test_scanner.py` this system test
+    tests whether recursively defined records can be synchronized correctly using the crawler.
+    """
+    recursive_yaml = """
+FirstConverter:
+  type: DictElement
+  records:
+    Experiment:
+  subtree:
+    Converter:
+      type: DictElement
+      records:
+        Block:
+          name: block 1
+          Experiment: $Experiment
+        Experiment:
+          name: experiment 1
+          Block: $Block
+    """
+
+    crawler_definition = _load_definition_from_yaml_dict(
+        [yaml.load(recursive_yaml, Loader=yaml.SafeLoader)])
+    converter_registry = create_converter_registry(crawler_definition)
+
+    # Nested DictElements that match the yaml structure in recursive_yaml:
+    data = {"data": {
+    }}
+    records = scan_structure_elements(DictElement(name="", value=data), crawler_definition,
+                                      converter_registry)
+
+    rt_exp = db.RecordType(name="Experiment").insert()
+    rt_block = db.RecordType(name="Block").insert()
+
+    ident = CaosDBIdentifiableAdapter()
+    ident.load_from_yaml_object(yaml.safe_load("""
+Experiment:
+- name
+Block:
+- name
+"""))
+
+    crawler = Crawler(identifiableAdapter=ident)
+    crawler.synchronize(crawled_data=records)
+
+    exp_res = db.execute_query("FIND Experiment")
+    assert len(exp_res) == 1
+    exp_block = db.execute_query("FIND Block")
+    assert len(exp_block) == 1
+
+    assert exp_res[0].get_property("Block").value == exp_block[0].id
+    assert exp_block[0].get_property("Experiment").value == exp_res[0].id
+
+
 def test_issue_14(clear_database):
     """
     Issue title: Some parent updates are required before inserts
diff --git a/src/caoscrawler/cfood-schema.yml b/src/caoscrawler/cfood-schema.yml
index c5e0eaad092c12efbceb5f55b62b3d7cf8afdccf..d2e4cea24f0f2803499116420091b36e95b2c781 100644
--- a/src/caoscrawler/cfood-schema.yml
+++ b/src/caoscrawler/cfood-schema.yml
@@ -88,6 +88,12 @@ cfood:
         match_value:
           description: a regexp that is matched to the value of a key-value pair
           type: string
+        match_newer_than_file:
+          description: |
+            Only relevant for Directory. A path to a file containing
+            an ISO-formatted datetime. Only match if the contents of the
+            Directory have been modified after that datetime.
+          type: string
         record_from_dict:
           description: Only relevant for PropertiesFromDictElement.  Specify the root record which is generated from the contained dictionary.
           type: object
diff --git a/src/caoscrawler/converters/converters.py b/src/caoscrawler/converters/converters.py
index d06415f78df2949dfee5a7a352b631e4a0b0264f..09942918a3818978b1b4b0c3ded1635f5f9053fc 100644
--- a/src/caoscrawler/converters/converters.py
+++ b/src/caoscrawler/converters/converters.py
@@ -249,11 +249,35 @@ out: tuple
     return (propvalue, propunit, collection_mode)
 
 
-def create_records(values: GeneralStore, records: RecordStore, def_records: dict):
-    # list of keys to identify, which variables have been set by which paths:
-    # the items are tuples:
-    # 0: record name
-    # 1: property name
+def create_records(values: GeneralStore,
+                   records: RecordStore,
+                   def_records: dict) -> list[tuple[str, str]]:
+    """
+    Create records in GeneralStore `values` and RecordStore `records` as given
+    by the definition in `def_records`.
+
+    This function will be called during scanning using the cfood definition.
+    It also should be used by CustomConverters to set records as automatic substitution
+    and other crawler features are applied automatically.
+
+    Parameters
+    ----------
+    values: GeneralStore
+      This GeneralStore will be used to access variables that are needed during variable substitution
+      in setting the properties of records and files.
+      Furthermore, the records that are generated in this function will be stored in this GeneralStore
+      **additionally to** storing them in the RecordStore given as the second argument to this function.
+
+    records: RecordStore
+      The RecordStore where the generated records will be stored.
+
+    Returns
+    -------
+    : list[tuple[str, str]]
+      A list of tuples containing the record names (1st element of tuple) and respective property names
+      as 2nd element of the tuples. This list will be used by the scanner for creating the debug tree.
+
+    """
     keys_modified = []
 
     for name, record in def_records.items():
@@ -286,11 +310,22 @@ def create_records(values: GeneralStore, records: RecordStore, def_records: dict
             if (role == "Record" and "parents" not in record):
                 c_record.add_parent(name)
 
-        c_record = records[name]
-
         if isinstance(record, str):
             raise RuntimeError(
                 "dict expected, but found str: {}".format(record))
+
+    # We do a second run over the def_records, here. Having finished the first run
+    # for creating the records (in the variable and records stores) makes sure that
+    # records, that are defined on this level can already be accessed during variable substitution
+    # in the properties that will be set in the next block.
+    for name, record in def_records.items():
+        # See above:
+        if record is None:
+            record = {}
+
+        c_record = records[name]
+
+        # Set the properties:
         for key, value in record.items():
             if key == "parents" or key == "role":
                 continue
@@ -320,7 +355,8 @@ def create_records(values: GeneralStore, records: RecordStore, def_records: dict
                         c_record.add_property(name=key, value=propvalue, unit=propunit)
                 else:
                     if collection_mode == "list":
-                        if propunit and c_record.get_property(key).unit and propunit != c_record.get_property(key).unit:
+                        if (propunit and c_record.get_property(key).unit
+                                and propunit != c_record.get_property(key).unit):
                             raise RuntimeError(
                                 f"Property '{key}' has contradictory units: "
                                 f"{propunit} and {c_record.get_property(key).unit}"
@@ -514,7 +550,7 @@ class Converter(object, metaclass=ABCMeta):
                 matched_m_prop = None
                 matched_m_prop_value = None
                 for prop_key, prop_value in properties.items():
-                    print("{} = {}".format(prop_key, prop_value))
+                    # print("{} = {}".format(prop_key, prop_value))
                     # TODO: automatic conversion to str ok?
                     m_prop = re.match(prop_def_key, str(prop_key))
                     if m_prop is not None:
@@ -769,6 +805,11 @@ class DirectoryConverter(Converter):
         m = re.match(self.definition["match"], element.name)
         if m is None:
             return None
+        if "match_newer_than_file" in self.definition:
+            last_modified = self._get_most_recent_change_in_dir(element)
+            reference = self._get_reference_file_timestamp()
+            if last_modified < reference:
+                return None
         return m.groupdict()
 
     @staticmethod
@@ -791,6 +832,49 @@ class DirectoryConverter(Converter):
 
         return children
 
+    @staticmethod
+    def _get_most_recent_change_in_dir(element: Directory) -> datetime.datetime:
+        """Return the datetime of the most recent change of any file
+        or directory in the given Directory element.
+
+        """
+        most_recent = os.path.getmtime(element.path)
+
+        for root, _, files in os.walk(element.path):
+            mtimes = [os.path.getmtime(root)] + \
+                [os.path.getmtime(os.path.join(root, fname)) for fname in files]
+            if max(mtimes) > most_recent:
+                most_recent = max(mtimes)
+
+        return datetime.datetime.fromtimestamp(most_recent)
+
+    def _get_reference_file_timestamp(self) -> datetime.datetime:
+        """Return a time stamp read from a reference file if it
+        exists. Otherwise return datetime.datetime.min, i.e., the
+        earliest datetime known to datetime.
+
+        """
+
+        if "match_newer_than_file" not in self.definition:
+            logger.debug("No reference file specified.")
+            return datetime.datetime.min
+
+        elif not os.path.isfile(self.definition["match_newer_than_file"]):
+            logger.debug("Reference file doesn't exist.")
+            return datetime.datetime.min
+
+        with open(self.definition["match_newer_than_file"]) as ref_file:
+            stamp_str = ref_file.readline().strip()
+            try:
+                return datetime.datetime.fromisoformat(stamp_str)
+            except ValueError as e:
+                logger.error(
+                    f"Reference file in {self.definition['match_newer_than_file']} "
+                    "doesn't contain a ISO formatted datetime in its first line. "
+                    "Match regardless of modification times."
+                )
+                raise e
+
 
 class SimpleFileConverter(Converter):
     """Just a file, ignore the contents."""
diff --git a/src/caoscrawler/crawl.py b/src/caoscrawler/crawl.py
index a79e4434ee8f58fd1cc2646ced85c0d02d3fb66b..e0d243979faee8f44cdcee3b0e49c15af640c378 100644
--- a/src/caoscrawler/crawl.py
+++ b/src/caoscrawler/crawl.py
@@ -531,8 +531,8 @@ one with the entities that need to be updated and the other with entities to be
                     prop.value = Crawler._get_property_id_for_datatype(
                         rtname=prop.datatype, name=prop.value)
                 except (db.EmptyUniqueQueryError, db.QueryNotUniqueError):
-                    logger.error("The Property {prop.name} with datatype={prop.datatype} has the "
-                                 "value {prop.value} and there is no appropriate Entity with such "
+                    logger.error(f"The Property {prop.name} with datatype={prop.datatype} has the "
+                                 f"value {prop.value} and there is no appropriate Entity with such "
                                  "a name.")
                     raise
         else:
@@ -548,8 +548,8 @@ one with the entities that need to be updated and the other with entities to be
                                                                              name=el))
                     except (db.EmptyUniqueQueryError, db.QueryNotUniqueError):
                         logger.error(
-                            "The Property {prop.name} with datatype={prop.datatype} has the "
-                            "value {prop.value} and there is no appropriate Entity with such "
+                            f"The Property {prop.name} with datatype={prop.datatype} has the "
+                            f"value {prop.value} and there is no appropriate Entity with such "
                             "a name.")
                         raise
                 else:
@@ -621,7 +621,7 @@ one with the entities that need to be updated and the other with entities to be
                     crawled_data: Optional[list[db.Record]] = None,
                     no_insert_RTs: Optional[list[str]] = None,
                     no_update_RTs: Optional[list[str]] = None,
-                    path_for_authorized_run: Optional[str] = "",
+                    path_for_authorized_run: Optional[Union[str, list[str]]] = "",
                     ):
         """
         This function applies several stages:
@@ -643,7 +643,7 @@ one with the entities that need to be updated and the other with entities to be
         no_update_RTs : list[str], optional
             list of RecordType names. Records that have one of those RecordTypes
             as parent will not be updated
-        path_for_authorized_run : str, optional
+        path_for_authorized_run : str or list[str], optional
             only used if there are changes that need authorization before being
             applied. The form for rerunning the crawler with the authorization
             of these changes will be generated with this path. See
@@ -661,6 +661,12 @@ one with the entities that need to be updated and the other with entities to be
                 "use for example the Scanner to create this data."))
             crawled_data = self.crawled_data
 
+        if isinstance(path_for_authorized_run, list) and self.securityMode != SecurityMode.UPDATE:
+            raise NotImplementedError(
+                "Authorization of inserts and updates is currently implemented only "
+                "for single paths, not for lists of paths."
+            )
+
         to_be_inserted, to_be_updated = self._split_into_inserts_and_updates(
             SyncGraph(crawled_data, self.identifiableAdapter))
 
@@ -1004,7 +1010,7 @@ def _store_dry_run_data(ins, upd):
             "update": updates}))
 
 
-def crawler_main(crawled_directory_path: str,
+def crawler_main(crawled_directory_path: Union[str, list[str]],
                  cfood_file_name: str,
                  identifiables_definition_file: Optional[str] = None,
                  debug: bool = False,
@@ -1022,8 +1028,8 @@ def crawler_main(crawled_directory_path: str,
 
     Parameters
     ----------
-    crawled_directory_path : str
-        path to be crawled
+    crawled_directory_path : str or list[str]
+        path(s) to be crawled
     cfood_file_name : str
         filename of the cfood to be used
     identifiables_definition_file : str
@@ -1115,42 +1121,28 @@ def crawler_main(crawled_directory_path: str,
                                                   crawler.run_id)
                 _update_status_record(crawler.run_id, len(inserts), len(updates), status="OK")
         return 0
-    except ForbiddenTransaction as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except ConverterValidationError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except ImpossibleMergeError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(
-            "Encountered conflicting information when creating Records from the crawled "
-            f"data:\n\n{err}"
-        )
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except TransactionError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        logger.error("Transaction error details:")
-        for suberr in err.errors:
-            logger.error("---")
-            logger.error(suberr.msg)
-            logger.error(suberr.entity)
-        return 1
     except Exception as err:
         logger.debug(traceback.format_exc())
         logger.error(err)
-
-        if "SHARED_DIR" in os.environ:
-            # pylint: disable=E0601
-            domain = get_config_setting("public_host_url")
-            logger.error("Unexpected Error: Please tell your administrator about this and provide "
-                         f"the following path.\n{get_shared_resource_link(domain, debuglog_public)}")
+        # Special treatment for known error types
+        if isinstance(err, ImpossibleMergeError):
+            logger.error(
+                "Encountered conflicting information when creating Records from the crawled "
+                f"data:\n\n{err}"
+            )
+        elif isinstance(err, TransactionError):
+            logger.error("Transaction error details:")
+            for suberr in err.errors:
+                logger.error("---")
+                logger.error(suberr.msg)
+                logger.error(suberr.entity)
+        # Unkown errors get a special message
+        elif not isinstance(err, (ConverterValidationError, ForbiddenTransaction)):
+            if "SHARED_DIR" in os.environ:
+                # pylint: disable=E0601
+                domain = get_config_setting("public_host_url")
+                logger.error("Unexpected Error: Please tell your administrator about this and provide "
+                             f"the following path.\n{get_shared_resource_link(domain, debuglog_public)}")
         _update_status_record(crawler.run_id, 0, 0, status="FAILED")
         return 1
 
@@ -1174,6 +1166,7 @@ def parse_args():
                         "This file will only be generated if this option is set.")
     parser.add_argument("--debug", required=False, action="store_true",
                         help="Path name of the cfood yaml file to be used.")
+    # TODO allow to provide multiple directories to be crawled on the commandline
     parser.add_argument("crawled_directory_path",
                         help="The subtree of files below the given path will "
                         "be considered. Use '/' for everything.")
diff --git a/src/caoscrawler/scanner.py b/src/caoscrawler/scanner.py
index fa04e44f35c8670337ded6f15cbf00f95ae3a6ba..96eaaa8c8cd89acd113b773cb031fa602c33fab6 100644
--- a/src/caoscrawler/scanner.py
+++ b/src/caoscrawler/scanner.py
@@ -423,7 +423,7 @@ def scanner(items: list[StructureElement],
 # --------------------------------------------------------------------------------
 
 
-def scan_directory(dirname: str, crawler_definition_path: str,
+def scan_directory(dirname: Union[str, list[str]], crawler_definition_path: str,
                    restricted_path: Optional[list[str]] = None,
                    debug_tree: Optional[DebugTree] = None):
     """ Crawl a single directory.
@@ -436,10 +436,12 @@ def scan_directory(dirname: str, crawler_definition_path: str,
     Parameters
     ----------
 
+    dirname: str or list[str]
+        directory or list of directories to be scanned
     restricted_path: optional, list of strings
-            Traverse the data tree only along the given path. When the end of the given path
-            is reached, traverse the full tree as normal. See docstring of 'scanner' for
-            more details.
+        Traverse the data tree only along the given path. When the end
+        of the given path is reached, traverse the full tree as
+        normal. See docstring of 'scanner' for more details.
 
     Returns
     -------
@@ -457,26 +459,31 @@ def scan_directory(dirname: str, crawler_definition_path: str,
     if not dirname:
         raise ValueError(
             "You have to provide a non-empty path for crawling.")
-    dir_structure_name = os.path.basename(dirname)
-
-    # TODO: needs to be covered somewhere else
-    crawled_directory = dirname
-    if not dir_structure_name and dirname.endswith('/'):
-        if dirname == '/':
-            # Crawling the entire file system
-            dir_structure_name = "root"
-        else:
-            # dirname had a trailing '/'
-            dir_structure_name = os.path.basename(dirname[:-1])
-
-    return scan_structure_elements(Directory(dir_structure_name,
-                                             dirname),
-                                   crawler_definition,
-                                   converter_registry,
-                                   restricted_path=restricted_path,
-                                   debug_tree=debug_tree,
-                                   registered_transformer_functions=registered_transformer_functions
-                                   )
+    if not isinstance(dirname, list):
+        dirname = [dirname]
+    dir_element_list = []
+    for dname in dirname:
+        dir_structure_name = os.path.basename(dname)
+
+        # TODO: needs to be covered somewhere else
+        crawled_directory = dname
+        if not dir_structure_name and dname.endswith(os.path.sep):
+            if dname == os.path.sep:
+                # Crawling the entire file system
+                dir_structure_name = "root"
+            else:
+                # dirname had a trailing '/'
+                dir_structure_name = os.path.basename(dname[:-1])
+        dir_element_list.append(Directory(dir_structure_name, dname))
+
+    return scan_structure_elements(
+        dir_element_list,
+        crawler_definition,
+        converter_registry,
+        restricted_path=restricted_path,
+        debug_tree=debug_tree,
+        registered_transformer_functions=registered_transformer_functions
+    )
 
 
 def scan_structure_elements(items: Union[list[StructureElement], StructureElement],
diff --git a/src/doc/converters/standard_converters.rst b/src/doc/converters/standard_converters.rst
index f7f18794496e5e658a8abdb5676b562d5e047675..5f86abb5b324e0cc1584e42e6abb2612acc8067f 100644
--- a/src/doc/converters/standard_converters.rst
+++ b/src/doc/converters/standard_converters.rst
@@ -6,9 +6,17 @@ These are the standard converters that exist in a default installation.  For wri
 
 Directory Converter
 ===================
-The Directory Converter creates StructureElements for each File and Directory
-inside the current Directory. You can match a regular expression against the
-directory name using the 'match' key.
+
+The Directory Converter creates StructureElements for each File and
+Directory inside the current Directory. You can match a regular
+expression against the directory name using the 'match' key.
+
+With the optional ``match_newer_than_file`` key, a path to file
+containing only an ISO-formatted datetime string can be specified. If
+this is done, a directory will only match if it contains at least one
+file or directory that has been modified since that datetime. If the
+file doesn't exist or contains an invalid string, the directory will
+be matched regardless of the modification times.
 
 Simple File Converter
 =====================
diff --git a/unittests/test_cfood_metadata.py b/unittests/test_cfood_metadata.py
index c606a0a1afcc15d48164694768bae02adfb0fc0b..b123f98584ba99ed4fec412732cb2bf536034a91 100644
--- a/unittests/test_cfood_metadata.py
+++ b/unittests/test_cfood_metadata.py
@@ -18,7 +18,7 @@
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 #
 from tempfile import NamedTemporaryFile
-from unittest.mock import MagicMock, Mock, patch
+from unittest.mock import patch
 
 import pytest
 import yaml
@@ -33,7 +33,7 @@ def _temp_file_load(txt: str):
     definition using load_definition from Crawler.
     """
     definition = None
-    with NamedTemporaryFile() as f:
+    with NamedTemporaryFile(delete=False) as f:
         f.write(txt.encode())
         f.flush()
         definition = load_definition(f.name)
diff --git a/unittests/test_converters.py b/unittests/test_converters.py
index 12285e463cdcab12f853931abc5f314ed6b20782..e4b442d91060c7ba98cb1a910156b1800f050be3 100644
--- a/unittests/test_converters.py
+++ b/unittests/test_converters.py
@@ -29,12 +29,15 @@ import importlib
 import json
 import logging
 import os
+import pytest
+import yaml
+
 from itertools import product
 from pathlib import Path
+from tempfile import NamedTemporaryFile
 
 import linkahead as db
-import pytest
-import yaml
+
 from caoscrawler.converters import (Converter, ConverterValidationError,
                                     DateElementConverter, DictElementConverter,
                                     DictIntegerElementConverter,
@@ -1070,3 +1073,59 @@ def test_dict_match_properties(converter_registry):
             "prop_d": 24  # duplicate matches
         })
         records = scan_structure_elements(root_dict_element, def_dict, converter_registry)
+
+
+def test_directory_converter_change_date(caplog, converter_registry):
+    """Test that only directories that were modified after a certain
+    date are crawled.
+
+    """
+    test_dir_element = Directory("test_directories", UNITTESTDIR / "test_directories")
+    date_of_dir_change = DirectoryConverter._get_most_recent_change_in_dir(test_dir_element)
+    past_date = date_of_dir_change - datetime.timedelta(days=1)
+    future_date = date_of_dir_change + datetime.timedelta(days=1)
+
+    tmpfi = NamedTemporaryFile(delete=False)
+
+    # Write down past
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{past_date.isoformat()}\n")
+
+    converter_def = {
+        "type": "Directory",
+        "match": "^test_directories$",
+        "match_newer_than_file": tmpfi.name
+    }
+    dc = DirectoryConverter(name="DC1", definition=converter_def,
+                            converter_registry=converter_registry)
+    assert dc.match(test_dir_element) is not None
+
+    # Write down future, so nothing should match
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{future_date.isoformat()}\n")
+    assert dc.match(test_dir_element) is None
+
+    # Also match in the corner case of equality:
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{date_of_dir_change.isoformat()}\n")
+    assert dc.match(test_dir_element) is not None
+
+    # Match but warn
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"This is garbage.\n")
+    with pytest.raises(ValueError):
+        dc.match(test_dir_element)
+    assert len(caplog.record_tuples) == 1
+    assert caplog.record_tuples[0][1] == logging.ERROR
+    assert tmpfi.name in caplog.record_tuples[0][2]
+    assert "doesn't contain a ISO formatted datetime in its first line" in caplog.record_tuples[0][2]
+
+    # Match anything since file doesn't exist, inform in debug log.
+    os.remove(tmpfi.name)
+    # Clear log and enforce debug level.
+    caplog.clear()
+    caplog.set_level(logging.DEBUG)
+    assert dc.match(test_dir_element) is not None
+    assert len(caplog.record_tuples) == 1
+    assert caplog.record_tuples[0][1] == logging.DEBUG
+    assert "Reference file doesn't exist." == caplog.record_tuples[0][2]
diff --git a/unittests/test_crawler.py b/unittests/test_crawler.py
index e88ce454061fb268fa49e986f8392f71296beb07..ad69c6f57cbc8d48d194507d7c1aa79c9da7521b 100644
--- a/unittests/test_crawler.py
+++ b/unittests/test_crawler.py
@@ -824,9 +824,9 @@ def test_restricted_path(create_mock):
 
 
 def test_split_restricted_path():
-    assert ["el"] == split_restricted_path("/el")
-    assert ["el"] == split_restricted_path("/el/")
-    assert ["el", "el"] == split_restricted_path("/el/el")
+    assert ["el"] == split_restricted_path(os.path.sep + "el")
+    assert ["el"] == split_restricted_path(os.path.sep + "el" + os.path.sep)
+    assert ["el", "el"] == split_restricted_path(os.path.sep + "el" + os.path.sep + "el")
 
 
 # Filter the warning because we want to have it here and this way it does not hinder running
diff --git a/unittests/test_macros.py b/unittests/test_macros.py
index a87b633e8585a03431575426733cae6ba31b7acf..03fe0e665652bb12e204d76857771c1d064ec28a 100644
--- a/unittests/test_macros.py
+++ b/unittests/test_macros.py
@@ -50,10 +50,10 @@ def _temp_file_load(txt: str):
     definition using load_definition from Crawler.
     """
     definition = None
-    with NamedTemporaryFile() as f:
+    with NamedTemporaryFile(delete=False) as f:
         f.write(txt.encode())
         f.flush()
-        definition = load_definition(f.name)
+    definition = load_definition(f.name)
     return definition
 
 
diff --git a/unittests/test_scanner.py b/unittests/test_scanner.py
index 5cbbc63406ffb3f5ec1f9019ed7877d7880d7b69..c531f66fd38a714ba4f6f538d41c9fbaeb364d44 100644
--- a/unittests/test_scanner.py
+++ b/unittests/test_scanner.py
@@ -30,7 +30,7 @@ from functools import partial
 from pathlib import Path
 from tempfile import NamedTemporaryFile
 from unittest.mock import MagicMock, Mock, patch
-
+import os
 import linkahead as db
 import pytest
 import yaml
@@ -110,7 +110,7 @@ def test_record_structure_generation():
     assert len(subc[1]) == 0
 
     # The data analysis node creates one variable for the node itself:
-    assert subd[0]["DataAnalysis"] == "examples_article/DataAnalysis"
+    assert subd[0]["DataAnalysis"] == os.path.join("examples_article", "DataAnalysis")
     assert subc[0]["DataAnalysis"] is False
 
     subd = dbt.debug_tree[dircheckstr("DataAnalysis", "2020_climate-model-predict")]
@@ -128,9 +128,10 @@ def test_record_structure_generation():
     assert subd[0]["identifier"] == "climate-model-predict"
     assert subd[0]["Project"].__class__ == db.Record
 
-    assert subd[0]["DataAnalysis"] == "examples_article/DataAnalysis"
+    assert subd[0]["DataAnalysis"] == os.path.join("examples_article", "DataAnalysis")
     assert subc[0]["DataAnalysis"] is True
-    assert subd[0]["project_dir"] == "examples_article/DataAnalysis/2020_climate-model-predict"
+    assert subd[0]["project_dir"] == os.path.join(
+        "examples_article", "DataAnalysis", "2020_climate-model-predict")
     assert subc[0]["project_dir"] is False
 
     # Check the copy flags for the first level in the hierarchy:
@@ -405,3 +406,92 @@ def test_units():
     assert rec.get_property("may_be_overwritten") is not None
     assert rec.get_property("may_be_overwritten").value == "400"
     assert rec.get_property("may_be_overwritten").unit == "°C"
+
+
+def test_recursive_definition():
+    """
+    This is basically a test for:
+    https://gitlab.indiscale.com/caosdb/src/caosdb-crawler/-/issues/16
+    """
+
+    recursive_yaml = """
+Converter:
+  type: DictElement
+  records:
+    Block:
+      Experiment: $Experiment
+    Experiment:
+      Block: $Block
+    """
+
+    crawler_definition = _load_definition_from_yaml_dict(
+        [yaml.load(recursive_yaml, Loader=yaml.SafeLoader)])
+    converter_registry = create_converter_registry(crawler_definition)
+
+    data = {
+        "value_with_unit": "1.1 m",
+        "array_with_units": [
+            "1.1 cm",
+            "2.2 cm"
+        ]
+    }
+    records = scan_structure_elements(DictElement(name="", value=data), crawler_definition,
+                                      converter_registry)
+
+    assert len(records) == 2
+    assert len(records[0].parents) == 1
+    assert records[0].parents[0].name == "Block"
+    assert len(records[1].parents) == 1
+    assert records[1].parents[0].name == "Experiment"
+
+    assert records[0].get_property("Experiment").value == records[1]
+    assert records[1].get_property("Block").value == records[0]
+
+
+def test_recursive_definition_2():
+    """
+    This is another  a test for:
+    https://gitlab.indiscale.com/caosdb/src/caosdb-crawler/-/issues/16
+
+    It defines Experiment on a different level, therefore allowing the recursive definition.
+    This is, however, no workaround for test_recursive_definition as a bidirectional link on the
+    same level is still not achieved.
+    """
+
+    recursive_yaml = """
+FirstConverter:
+  type: DictElement
+  records:
+    Experiment:
+  subtree:
+    Converter:
+      type: DictElement
+      records:
+        Block:
+          Experiment: $Experiment
+        Experiment:
+          Block: $Block
+    """
+
+    crawler_definition = _load_definition_from_yaml_dict(
+        [yaml.load(recursive_yaml, Loader=yaml.SafeLoader)])
+    converter_registry = create_converter_registry(crawler_definition)
+
+    data = {"data": {
+        "value_with_unit": "1.1 m",
+        "array_with_units": [
+            "1.1 cm",
+            "2.2 cm"
+        ]
+    }}
+    records = scan_structure_elements(DictElement(name="", value=data), crawler_definition,
+                                      converter_registry)
+
+    assert len(records) == 2
+    assert len(records[0].parents) == 1
+    assert records[0].parents[0].name == "Block"
+    assert len(records[1].parents) == 1
+    assert records[1].parents[0].name == "Experiment"
+
+    assert records[0].get_property("Experiment").value == records[1]
+    assert records[1].get_property("Block").value == records[0]
diff --git a/unittests/test_utilities.py b/unittests/test_utilities.py
index 463e304a99161f2294e5d202611dcf0b829e2045..a9b052524957b6f8c1e0378e3153fc06f4f36806 100644
--- a/unittests/test_utilities.py
+++ b/unittests/test_utilities.py
@@ -20,22 +20,23 @@
 #
 
 import pytest
-
+from os.path import sep
 from caoscrawler.crawl import split_restricted_path
 from caoscrawler.utils import MissingImport, get_shared_resource_link
 
 
 def test_split_restricted_path():
     assert split_restricted_path("") == []
-    assert split_restricted_path("/") == []
-    assert split_restricted_path("test/") == ["test"]
-    assert split_restricted_path("/test/") == ["test"]
-    assert split_restricted_path("test/bla") == ["test", "bla"]
-    assert split_restricted_path("/test/bla") == ["test", "bla"]
-    assert split_restricted_path("/test1/test2/bla") == ["test1", "test2", "bla"]
-    assert split_restricted_path("/test//bla") == ["test", "bla"]
-    assert split_restricted_path("//test/bla") == ["test", "bla"]
-    assert split_restricted_path("///test//bla////") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}") == []
+    assert split_restricted_path(f"test{sep}") == ["test"]
+    assert split_restricted_path(f"{sep}test{sep}") == ["test"]
+    assert split_restricted_path(f"test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}test1{sep}test2{sep}bla") == ["test1", "test2", "bla"]
+    assert split_restricted_path(f"{sep}test{sep}{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}{sep}test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(
+        f"{sep}{sep}{sep}test{sep}{sep}bla{sep}{sep}{sep}{sep}") == ["test", "bla"]
 
 
 def test_dummy_class():