diff --git a/CHANGELOG.md b/CHANGELOG.md
index 4fe401389b5201894f22ad7a829e820f01c8958f..354024f9be37fc102f035a5d6562b6d522aaa915 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -23,6 +23,17 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
   variables to `int`, `float`, `str` and `bool`.
 - Transformer function definition in the cfood support variable
   substitutions now.
+- `crawler_main` and `scanner.scan_directory` now support list of
+  directories to be crawled, too. Note that giving a list of
+  directories is currently incompatible with
+  `securityMode=SecurityMode.RETRIEVE` or
+  `securityMode=SecurityMode.INSERT` since the functionality to
+  authoriye pending inserts or updates doesn't support path lists yet
+  and will raise a NotImplementedError for now.
+- `match_newer_than_file` option for `DirectoryConverter`: A reference
+  file containing (only) an ISO-formatted datetime string can be
+  specified here. Directories with this option won't match if all
+  their contents were last modified before that datetime.
 
 ### Changed ###
 
diff --git a/integrationtests/test_crawler_main.py b/integrationtests/test_crawler_main.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2eebf4f04e195754eaf71dc5e829b6a77a4cc4b
--- /dev/null
+++ b/integrationtests/test_crawler_main.py
@@ -0,0 +1,95 @@
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+#               2024 Florian Spreckelsen <f.spreckelsen@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+#
+
+import logging
+import tempfile
+
+from pathlib import Path
+
+import linkahead as db
+
+from caoscrawler import crawl
+from caoscrawler.crawl import (crawler_main, SecurityMode)
+from linkahead.utils.register_tests import clear_database, set_test_key
+
+set_test_key("10b128cf8a1372f30aa3697466bb55e76974e0c16a599bb44ace88f19c8f61e2")
+
+INTTESTDIR = Path(__file__).parent
+
+
+def test_list_of_paths(clear_database, monkeypatch):
+
+    # Mock the status record
+    dummy_status = {
+        "n_calls": 0
+    }
+
+    def _mock_update_status_record(run_id, n_inserts, n_updates, status):
+        print("Update mocked status")
+        dummy_status["run_id"] = run_id
+        dummy_status["n_inserts"] = n_inserts
+        dummy_status["n_updates"] = n_updates
+        dummy_status["status"] = status
+        dummy_status["n_calls"] += 1
+    monkeypatch.setattr(crawl, "_update_status_record", _mock_update_status_record)
+
+    # mock SSS environment
+    monkeypatch.setenv("SHARED_DIR", tempfile.gettempdir())
+
+    # We need only one dummy RT
+    rt = db.RecordType(name="TestType").insert()
+    basepath = INTTESTDIR / "test_data" / "crawler_main_with_list_of_dirs"
+    dirlist = [basepath / "dir1", basepath / "dir2"]
+    crawler_main(
+        dirlist,
+        cfood_file_name=basepath / "cfood.yml",
+        identifiables_definition_file=basepath / "identifiable.yml"
+    )
+    recs = db.execute_query("FIND TestType")
+    assert len(recs) == 2
+    assert "Test1" in [r.name for r in recs]
+    assert "Test2" in [r.name for r in recs]
+
+    assert dummy_status["n_inserts"] == 2
+    assert dummy_status["n_updates"] == 0
+    assert dummy_status["status"] == "OK"
+    assert dummy_status["n_calls"] == 1
+
+
+def test_not_implemented_list_with_authorization(caplog, clear_database):
+
+    rt = db.RecordType(name="TestType").insert()
+    basepath = INTTESTDIR / "test_data" / "crawler_main_with_list_of_dirs"
+    dirlist = [basepath / "dir1", basepath / "dir2"]
+
+    # This is not implemented yet, so check log for correct error.
+    ret = crawler_main(
+        dirlist,
+        cfood_file_name=basepath / "cfood.yml",
+        identifiables_definition_file=basepath / "identifiable.yml",
+        securityMode=SecurityMode.RETRIEVE
+    )
+    # crawler_main hides the error, but has a non-zero return code and
+    # errors in the log:
+    assert ret != 0
+    err_tuples = [t for t in caplog.record_tuples if t[1] == logging.ERROR]
+    assert len(err_tuples) == 1
+    assert "currently implemented only for single paths, not for lists of paths" in err_tuples[0][2]
+    # No inserts after the errors
+    assert len(db.execute_query("FIND TestType")) == 0
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml b/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml
new file mode 100644
index 0000000000000000000000000000000000000000..c7f22ce07e9b401915aefde3bf7e3a78d92e2bd6
--- /dev/null
+++ b/integrationtests/test_data/crawler_main_with_list_of_dirs/cfood.yml
@@ -0,0 +1,10 @@
+---
+metadata:
+  crawler-version: 0.10.2
+---
+BaseDirElement:
+  type: Directory
+  match: ^dir(?P<dir_number>[0-9]+)$$
+  records:
+    TestType:
+      name: Test$dir_number
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/dir1/.gitkeep b/integrationtests/test_data/crawler_main_with_list_of_dirs/dir1/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/dir2/.gitkeep b/integrationtests/test_data/crawler_main_with_list_of_dirs/dir2/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml b/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml
new file mode 100644
index 0000000000000000000000000000000000000000..6d608cece0ae7c2aa6461fb56025a8ac8e4faf6f
--- /dev/null
+++ b/integrationtests/test_data/crawler_main_with_list_of_dirs/identifiable.yml
@@ -0,0 +1,2 @@
+TestType:
+  - name
diff --git a/integrationtests/test_issues.py b/integrationtests/test_issues.py
index cb1e2e0925dd85b9f6cadf2b56b22aface4bb468..c699e0ab84a0d928c1f84a1b421d97e1b2d848b6 100644
--- a/integrationtests/test_issues.py
+++ b/integrationtests/test_issues.py
@@ -1,4 +1,4 @@
-# This file is a part of the CaosDB Project.
+# This file is a part of the LinkAhead Project.
 #
 # Copyright (C) 2022 Indiscale GmbH <info@indiscale.com>
 #               2022 Florian Spreckelsen <f.spreckelsen@indiscale.com>
diff --git a/src/caoscrawler/cfood-schema.yml b/src/caoscrawler/cfood-schema.yml
index c5e0eaad092c12efbceb5f55b62b3d7cf8afdccf..d2e4cea24f0f2803499116420091b36e95b2c781 100644
--- a/src/caoscrawler/cfood-schema.yml
+++ b/src/caoscrawler/cfood-schema.yml
@@ -88,6 +88,12 @@ cfood:
         match_value:
           description: a regexp that is matched to the value of a key-value pair
           type: string
+        match_newer_than_file:
+          description: |
+            Only relevant for Directory. A path to a file containing
+            an ISO-formatted datetime. Only match if the contents of the
+            Directory have been modified after that datetime.
+          type: string
         record_from_dict:
           description: Only relevant for PropertiesFromDictElement.  Specify the root record which is generated from the contained dictionary.
           type: object
diff --git a/src/caoscrawler/converters/converters.py b/src/caoscrawler/converters/converters.py
index ef35f1b0fa956a85cc32f01b54bab8c68e10644b..f95862a900b46d9d92a2d3389d41487266a790dc 100644
--- a/src/caoscrawler/converters/converters.py
+++ b/src/caoscrawler/converters/converters.py
@@ -769,6 +769,11 @@ class DirectoryConverter(Converter):
         m = re.match(self.definition["match"], element.name)
         if m is None:
             return None
+        if "match_newer_than_file" in self.definition:
+            last_modified = self._get_most_recent_change_in_dir(element)
+            reference = self._get_reference_file_timestamp()
+            if last_modified < reference:
+                return None
         return m.groupdict()
 
     @staticmethod
@@ -791,6 +796,49 @@ class DirectoryConverter(Converter):
 
         return children
 
+    @staticmethod
+    def _get_most_recent_change_in_dir(element: Directory) -> datetime.datetime:
+        """Return the datetime of the most recent change of any file
+        or directory in the given Directory element.
+
+        """
+        most_recent = os.path.getmtime(element.path)
+
+        for root, _, files in os.walk(element.path):
+            mtimes = [os.path.getmtime(root)] + \
+                [os.path.getmtime(os.path.join(root, fname)) for fname in files]
+            if max(mtimes) > most_recent:
+                most_recent = max(mtimes)
+
+        return datetime.datetime.fromtimestamp(most_recent)
+
+    def _get_reference_file_timestamp(self) -> datetime.datetime:
+        """Return a time stamp read from a reference file if it
+        exists. Otherwise return datetime.datetime.min, i.e., the
+        earliest datetime known to datetime.
+
+        """
+
+        if "match_newer_than_file" not in self.definition:
+            logger.debug("No reference file specified.")
+            return datetime.datetime.min
+
+        elif not os.path.isfile(self.definition["match_newer_than_file"]):
+            logger.debug("Reference file doesn't exist.")
+            return datetime.datetime.min
+
+        with open(self.definition["match_newer_than_file"]) as ref_file:
+            stamp_str = ref_file.readline().strip()
+            try:
+                return datetime.datetime.fromisoformat(stamp_str)
+            except ValueError as e:
+                logger.error(
+                    f"Reference file in {self.definition['match_newer_than_file']} "
+                    "doesn't contain a ISO formatted datetime in its first line. "
+                    "Match regardless of modification times."
+                )
+                raise e
+
 
 class SimpleFileConverter(Converter):
     """Just a file, ignore the contents."""
diff --git a/src/caoscrawler/crawl.py b/src/caoscrawler/crawl.py
index a79e4434ee8f58fd1cc2646ced85c0d02d3fb66b..e0d243979faee8f44cdcee3b0e49c15af640c378 100644
--- a/src/caoscrawler/crawl.py
+++ b/src/caoscrawler/crawl.py
@@ -531,8 +531,8 @@ one with the entities that need to be updated and the other with entities to be
                     prop.value = Crawler._get_property_id_for_datatype(
                         rtname=prop.datatype, name=prop.value)
                 except (db.EmptyUniqueQueryError, db.QueryNotUniqueError):
-                    logger.error("The Property {prop.name} with datatype={prop.datatype} has the "
-                                 "value {prop.value} and there is no appropriate Entity with such "
+                    logger.error(f"The Property {prop.name} with datatype={prop.datatype} has the "
+                                 f"value {prop.value} and there is no appropriate Entity with such "
                                  "a name.")
                     raise
         else:
@@ -548,8 +548,8 @@ one with the entities that need to be updated and the other with entities to be
                                                                              name=el))
                     except (db.EmptyUniqueQueryError, db.QueryNotUniqueError):
                         logger.error(
-                            "The Property {prop.name} with datatype={prop.datatype} has the "
-                            "value {prop.value} and there is no appropriate Entity with such "
+                            f"The Property {prop.name} with datatype={prop.datatype} has the "
+                            f"value {prop.value} and there is no appropriate Entity with such "
                             "a name.")
                         raise
                 else:
@@ -621,7 +621,7 @@ one with the entities that need to be updated and the other with entities to be
                     crawled_data: Optional[list[db.Record]] = None,
                     no_insert_RTs: Optional[list[str]] = None,
                     no_update_RTs: Optional[list[str]] = None,
-                    path_for_authorized_run: Optional[str] = "",
+                    path_for_authorized_run: Optional[Union[str, list[str]]] = "",
                     ):
         """
         This function applies several stages:
@@ -643,7 +643,7 @@ one with the entities that need to be updated and the other with entities to be
         no_update_RTs : list[str], optional
             list of RecordType names. Records that have one of those RecordTypes
             as parent will not be updated
-        path_for_authorized_run : str, optional
+        path_for_authorized_run : str or list[str], optional
             only used if there are changes that need authorization before being
             applied. The form for rerunning the crawler with the authorization
             of these changes will be generated with this path. See
@@ -661,6 +661,12 @@ one with the entities that need to be updated and the other with entities to be
                 "use for example the Scanner to create this data."))
             crawled_data = self.crawled_data
 
+        if isinstance(path_for_authorized_run, list) and self.securityMode != SecurityMode.UPDATE:
+            raise NotImplementedError(
+                "Authorization of inserts and updates is currently implemented only "
+                "for single paths, not for lists of paths."
+            )
+
         to_be_inserted, to_be_updated = self._split_into_inserts_and_updates(
             SyncGraph(crawled_data, self.identifiableAdapter))
 
@@ -1004,7 +1010,7 @@ def _store_dry_run_data(ins, upd):
             "update": updates}))
 
 
-def crawler_main(crawled_directory_path: str,
+def crawler_main(crawled_directory_path: Union[str, list[str]],
                  cfood_file_name: str,
                  identifiables_definition_file: Optional[str] = None,
                  debug: bool = False,
@@ -1022,8 +1028,8 @@ def crawler_main(crawled_directory_path: str,
 
     Parameters
     ----------
-    crawled_directory_path : str
-        path to be crawled
+    crawled_directory_path : str or list[str]
+        path(s) to be crawled
     cfood_file_name : str
         filename of the cfood to be used
     identifiables_definition_file : str
@@ -1115,42 +1121,28 @@ def crawler_main(crawled_directory_path: str,
                                                   crawler.run_id)
                 _update_status_record(crawler.run_id, len(inserts), len(updates), status="OK")
         return 0
-    except ForbiddenTransaction as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except ConverterValidationError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except ImpossibleMergeError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(
-            "Encountered conflicting information when creating Records from the crawled "
-            f"data:\n\n{err}"
-        )
-        _update_status_record(crawler.run_id, 0, 0, status="FAILED")
-        return 1
-    except TransactionError as err:
-        logger.debug(traceback.format_exc())
-        logger.error(err)
-        logger.error("Transaction error details:")
-        for suberr in err.errors:
-            logger.error("---")
-            logger.error(suberr.msg)
-            logger.error(suberr.entity)
-        return 1
     except Exception as err:
         logger.debug(traceback.format_exc())
         logger.error(err)
-
-        if "SHARED_DIR" in os.environ:
-            # pylint: disable=E0601
-            domain = get_config_setting("public_host_url")
-            logger.error("Unexpected Error: Please tell your administrator about this and provide "
-                         f"the following path.\n{get_shared_resource_link(domain, debuglog_public)}")
+        # Special treatment for known error types
+        if isinstance(err, ImpossibleMergeError):
+            logger.error(
+                "Encountered conflicting information when creating Records from the crawled "
+                f"data:\n\n{err}"
+            )
+        elif isinstance(err, TransactionError):
+            logger.error("Transaction error details:")
+            for suberr in err.errors:
+                logger.error("---")
+                logger.error(suberr.msg)
+                logger.error(suberr.entity)
+        # Unkown errors get a special message
+        elif not isinstance(err, (ConverterValidationError, ForbiddenTransaction)):
+            if "SHARED_DIR" in os.environ:
+                # pylint: disable=E0601
+                domain = get_config_setting("public_host_url")
+                logger.error("Unexpected Error: Please tell your administrator about this and provide "
+                             f"the following path.\n{get_shared_resource_link(domain, debuglog_public)}")
         _update_status_record(crawler.run_id, 0, 0, status="FAILED")
         return 1
 
@@ -1174,6 +1166,7 @@ def parse_args():
                         "This file will only be generated if this option is set.")
     parser.add_argument("--debug", required=False, action="store_true",
                         help="Path name of the cfood yaml file to be used.")
+    # TODO allow to provide multiple directories to be crawled on the commandline
     parser.add_argument("crawled_directory_path",
                         help="The subtree of files below the given path will "
                         "be considered. Use '/' for everything.")
diff --git a/src/caoscrawler/scanner.py b/src/caoscrawler/scanner.py
index 89bd1c04411665bf4832d6bccce69bbe1b11cad1..af1f4173e95827606a02979ddd6d7fcd9f133271 100644
--- a/src/caoscrawler/scanner.py
+++ b/src/caoscrawler/scanner.py
@@ -421,7 +421,7 @@ def scanner(items: list[StructureElement],
 # --------------------------------------------------------------------------------
 
 
-def scan_directory(dirname: str, crawler_definition_path: str,
+def scan_directory(dirname: Union[str, list[str]], crawler_definition_path: str,
                    restricted_path: Optional[list[str]] = None,
                    debug_tree: Optional[DebugTree] = None):
     """ Crawl a single directory.
@@ -434,10 +434,12 @@ def scan_directory(dirname: str, crawler_definition_path: str,
     Parameters
     ----------
 
+    dirname: str or list[str]
+        directory or list of directories to be scanned
     restricted_path: optional, list of strings
-            Traverse the data tree only along the given path. When the end of the given path
-            is reached, traverse the full tree as normal. See docstring of 'scanner' for
-            more details.
+        Traverse the data tree only along the given path. When the end
+        of the given path is reached, traverse the full tree as
+        normal. See docstring of 'scanner' for more details.
 
     Returns
     -------
@@ -455,26 +457,31 @@ def scan_directory(dirname: str, crawler_definition_path: str,
     if not dirname:
         raise ValueError(
             "You have to provide a non-empty path for crawling.")
-    dir_structure_name = os.path.basename(dirname)
-
-    # TODO: needs to be covered somewhere else
-    crawled_directory = dirname
-    if not dir_structure_name and dirname.endswith('/'):
-        if dirname == '/':
-            # Crawling the entire file system
-            dir_structure_name = "root"
-        else:
-            # dirname had a trailing '/'
-            dir_structure_name = os.path.basename(dirname[:-1])
-
-    return scan_structure_elements(Directory(dir_structure_name,
-                                             dirname),
-                                   crawler_definition,
-                                   converter_registry,
-                                   restricted_path=restricted_path,
-                                   debug_tree=debug_tree,
-                                   registered_transformer_functions=registered_transformer_functions
-                                   )
+    if not isinstance(dirname, list):
+        dirname = [dirname]
+    dir_element_list = []
+    for dname in dirname:
+        dir_structure_name = os.path.basename(dname)
+
+        # TODO: needs to be covered somewhere else
+        crawled_directory = dname
+        if not dir_structure_name and dname.endswith(os.path.sep):
+            if dname == os.path.sep:
+                # Crawling the entire file system
+                dir_structure_name = "root"
+            else:
+                # dirname had a trailing '/'
+                dir_structure_name = os.path.basename(dname[:-1])
+        dir_element_list.append(Directory(dir_structure_name, dname))
+
+    return scan_structure_elements(
+        dir_element_list,
+        crawler_definition,
+        converter_registry,
+        restricted_path=restricted_path,
+        debug_tree=debug_tree,
+        registered_transformer_functions=registered_transformer_functions
+    )
 
 
 def scan_structure_elements(items: Union[list[StructureElement], StructureElement],
diff --git a/src/doc/converters/standard_converters.rst b/src/doc/converters/standard_converters.rst
index f7f18794496e5e658a8abdb5676b562d5e047675..5f86abb5b324e0cc1584e42e6abb2612acc8067f 100644
--- a/src/doc/converters/standard_converters.rst
+++ b/src/doc/converters/standard_converters.rst
@@ -6,9 +6,17 @@ These are the standard converters that exist in a default installation.  For wri
 
 Directory Converter
 ===================
-The Directory Converter creates StructureElements for each File and Directory
-inside the current Directory. You can match a regular expression against the
-directory name using the 'match' key.
+
+The Directory Converter creates StructureElements for each File and
+Directory inside the current Directory. You can match a regular
+expression against the directory name using the 'match' key.
+
+With the optional ``match_newer_than_file`` key, a path to file
+containing only an ISO-formatted datetime string can be specified. If
+this is done, a directory will only match if it contains at least one
+file or directory that has been modified since that datetime. If the
+file doesn't exist or contains an invalid string, the directory will
+be matched regardless of the modification times.
 
 Simple File Converter
 =====================
diff --git a/unittests/test_cfood_metadata.py b/unittests/test_cfood_metadata.py
index c606a0a1afcc15d48164694768bae02adfb0fc0b..b123f98584ba99ed4fec412732cb2bf536034a91 100644
--- a/unittests/test_cfood_metadata.py
+++ b/unittests/test_cfood_metadata.py
@@ -18,7 +18,7 @@
 # along with this program. If not, see <https://www.gnu.org/licenses/>.
 #
 from tempfile import NamedTemporaryFile
-from unittest.mock import MagicMock, Mock, patch
+from unittest.mock import patch
 
 import pytest
 import yaml
@@ -33,7 +33,7 @@ def _temp_file_load(txt: str):
     definition using load_definition from Crawler.
     """
     definition = None
-    with NamedTemporaryFile() as f:
+    with NamedTemporaryFile(delete=False) as f:
         f.write(txt.encode())
         f.flush()
         definition = load_definition(f.name)
diff --git a/unittests/test_converters.py b/unittests/test_converters.py
index 12285e463cdcab12f853931abc5f314ed6b20782..e4b442d91060c7ba98cb1a910156b1800f050be3 100644
--- a/unittests/test_converters.py
+++ b/unittests/test_converters.py
@@ -29,12 +29,15 @@ import importlib
 import json
 import logging
 import os
+import pytest
+import yaml
+
 from itertools import product
 from pathlib import Path
+from tempfile import NamedTemporaryFile
 
 import linkahead as db
-import pytest
-import yaml
+
 from caoscrawler.converters import (Converter, ConverterValidationError,
                                     DateElementConverter, DictElementConverter,
                                     DictIntegerElementConverter,
@@ -1070,3 +1073,59 @@ def test_dict_match_properties(converter_registry):
             "prop_d": 24  # duplicate matches
         })
         records = scan_structure_elements(root_dict_element, def_dict, converter_registry)
+
+
+def test_directory_converter_change_date(caplog, converter_registry):
+    """Test that only directories that were modified after a certain
+    date are crawled.
+
+    """
+    test_dir_element = Directory("test_directories", UNITTESTDIR / "test_directories")
+    date_of_dir_change = DirectoryConverter._get_most_recent_change_in_dir(test_dir_element)
+    past_date = date_of_dir_change - datetime.timedelta(days=1)
+    future_date = date_of_dir_change + datetime.timedelta(days=1)
+
+    tmpfi = NamedTemporaryFile(delete=False)
+
+    # Write down past
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{past_date.isoformat()}\n")
+
+    converter_def = {
+        "type": "Directory",
+        "match": "^test_directories$",
+        "match_newer_than_file": tmpfi.name
+    }
+    dc = DirectoryConverter(name="DC1", definition=converter_def,
+                            converter_registry=converter_registry)
+    assert dc.match(test_dir_element) is not None
+
+    # Write down future, so nothing should match
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{future_date.isoformat()}\n")
+    assert dc.match(test_dir_element) is None
+
+    # Also match in the corner case of equality:
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"{date_of_dir_change.isoformat()}\n")
+    assert dc.match(test_dir_element) is not None
+
+    # Match but warn
+    with open(tmpfi.name, "w") as fi:
+        fi.write(f"This is garbage.\n")
+    with pytest.raises(ValueError):
+        dc.match(test_dir_element)
+    assert len(caplog.record_tuples) == 1
+    assert caplog.record_tuples[0][1] == logging.ERROR
+    assert tmpfi.name in caplog.record_tuples[0][2]
+    assert "doesn't contain a ISO formatted datetime in its first line" in caplog.record_tuples[0][2]
+
+    # Match anything since file doesn't exist, inform in debug log.
+    os.remove(tmpfi.name)
+    # Clear log and enforce debug level.
+    caplog.clear()
+    caplog.set_level(logging.DEBUG)
+    assert dc.match(test_dir_element) is not None
+    assert len(caplog.record_tuples) == 1
+    assert caplog.record_tuples[0][1] == logging.DEBUG
+    assert "Reference file doesn't exist." == caplog.record_tuples[0][2]
diff --git a/unittests/test_crawler.py b/unittests/test_crawler.py
index e88ce454061fb268fa49e986f8392f71296beb07..ad69c6f57cbc8d48d194507d7c1aa79c9da7521b 100644
--- a/unittests/test_crawler.py
+++ b/unittests/test_crawler.py
@@ -824,9 +824,9 @@ def test_restricted_path(create_mock):
 
 
 def test_split_restricted_path():
-    assert ["el"] == split_restricted_path("/el")
-    assert ["el"] == split_restricted_path("/el/")
-    assert ["el", "el"] == split_restricted_path("/el/el")
+    assert ["el"] == split_restricted_path(os.path.sep + "el")
+    assert ["el"] == split_restricted_path(os.path.sep + "el" + os.path.sep)
+    assert ["el", "el"] == split_restricted_path(os.path.sep + "el" + os.path.sep + "el")
 
 
 # Filter the warning because we want to have it here and this way it does not hinder running
diff --git a/unittests/test_macros.py b/unittests/test_macros.py
index a87b633e8585a03431575426733cae6ba31b7acf..03fe0e665652bb12e204d76857771c1d064ec28a 100644
--- a/unittests/test_macros.py
+++ b/unittests/test_macros.py
@@ -50,10 +50,10 @@ def _temp_file_load(txt: str):
     definition using load_definition from Crawler.
     """
     definition = None
-    with NamedTemporaryFile() as f:
+    with NamedTemporaryFile(delete=False) as f:
         f.write(txt.encode())
         f.flush()
-        definition = load_definition(f.name)
+    definition = load_definition(f.name)
     return definition
 
 
diff --git a/unittests/test_scanner.py b/unittests/test_scanner.py
index 5cbbc63406ffb3f5ec1f9019ed7877d7880d7b69..120d804c7895b8411b4f051b6ac8a08495f71359 100644
--- a/unittests/test_scanner.py
+++ b/unittests/test_scanner.py
@@ -30,7 +30,7 @@ from functools import partial
 from pathlib import Path
 from tempfile import NamedTemporaryFile
 from unittest.mock import MagicMock, Mock, patch
-
+import os
 import linkahead as db
 import pytest
 import yaml
@@ -110,7 +110,7 @@ def test_record_structure_generation():
     assert len(subc[1]) == 0
 
     # The data analysis node creates one variable for the node itself:
-    assert subd[0]["DataAnalysis"] == "examples_article/DataAnalysis"
+    assert subd[0]["DataAnalysis"] == os.path.join("examples_article", "DataAnalysis")
     assert subc[0]["DataAnalysis"] is False
 
     subd = dbt.debug_tree[dircheckstr("DataAnalysis", "2020_climate-model-predict")]
@@ -128,9 +128,10 @@ def test_record_structure_generation():
     assert subd[0]["identifier"] == "climate-model-predict"
     assert subd[0]["Project"].__class__ == db.Record
 
-    assert subd[0]["DataAnalysis"] == "examples_article/DataAnalysis"
+    assert subd[0]["DataAnalysis"] == os.path.join("examples_article", "DataAnalysis")
     assert subc[0]["DataAnalysis"] is True
-    assert subd[0]["project_dir"] == "examples_article/DataAnalysis/2020_climate-model-predict"
+    assert subd[0]["project_dir"] == os.path.join(
+        "examples_article", "DataAnalysis", "2020_climate-model-predict")
     assert subc[0]["project_dir"] is False
 
     # Check the copy flags for the first level in the hierarchy:
diff --git a/unittests/test_utilities.py b/unittests/test_utilities.py
index 463e304a99161f2294e5d202611dcf0b829e2045..a9b052524957b6f8c1e0378e3153fc06f4f36806 100644
--- a/unittests/test_utilities.py
+++ b/unittests/test_utilities.py
@@ -20,22 +20,23 @@
 #
 
 import pytest
-
+from os.path import sep
 from caoscrawler.crawl import split_restricted_path
 from caoscrawler.utils import MissingImport, get_shared_resource_link
 
 
 def test_split_restricted_path():
     assert split_restricted_path("") == []
-    assert split_restricted_path("/") == []
-    assert split_restricted_path("test/") == ["test"]
-    assert split_restricted_path("/test/") == ["test"]
-    assert split_restricted_path("test/bla") == ["test", "bla"]
-    assert split_restricted_path("/test/bla") == ["test", "bla"]
-    assert split_restricted_path("/test1/test2/bla") == ["test1", "test2", "bla"]
-    assert split_restricted_path("/test//bla") == ["test", "bla"]
-    assert split_restricted_path("//test/bla") == ["test", "bla"]
-    assert split_restricted_path("///test//bla////") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}") == []
+    assert split_restricted_path(f"test{sep}") == ["test"]
+    assert split_restricted_path(f"{sep}test{sep}") == ["test"]
+    assert split_restricted_path(f"test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}test1{sep}test2{sep}bla") == ["test1", "test2", "bla"]
+    assert split_restricted_path(f"{sep}test{sep}{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(f"{sep}{sep}test{sep}bla") == ["test", "bla"]
+    assert split_restricted_path(
+        f"{sep}{sep}{sep}test{sep}{sep}bla{sep}{sep}{sep}{sep}") == ["test", "bla"]
 
 
 def test_dummy_class():