diff --git a/src/caosadvancedtools/table_json_conversion/fill_xlsx.py b/src/caosadvancedtools/table_json_conversion/fill_xlsx.py
index 585bc6bf283083b343c85cdbe3d0d9dbdeb9decc..8019fb4440b361a5ac8623322df0e388375c4ece 100644
--- a/src/caosadvancedtools/table_json_conversion/fill_xlsx.py
+++ b/src/caosadvancedtools/table_json_conversion/fill_xlsx.py
@@ -22,115 +22,27 @@
 
 from __future__ import annotations
 
-import json
 import pathlib
-from collections import OrderedDict
 from types import SimpleNamespace
 from typing import Any, Dict, List, Optional, TextIO, Union
 from warnings import warn
 
 from jsonschema import FormatChecker, validate
 from jsonschema.exceptions import ValidationError
-from openpyxl import Workbook, load_workbook
+from openpyxl import load_workbook, Workbook
 from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
-from openpyxl.worksheet.worksheet import Worksheet
 
-from .table_generator import ColumnType, RowType
-from .utils import p2s
-
-
-def _is_exploded_sheet(sheet: Worksheet) -> bool:
-    """Return True if this is a an "exploded" sheet.
-
-    An exploded sheet is a sheet whose data entries are LIST valued properties of entries in another
-    sheet.  A sheet is detected as exploded iff it has FOREIGN columns.
-    """
-    column_types = _get_column_types(sheet)
-    return ColumnType.FOREIGN.name in column_types.values()
-
-
-def _get_column_types(sheet: Worksheet) -> OrderedDict:
-    """Return an OrderedDict: column index -> column type for the sheet.
-    """
-    result = OrderedDict()
-    type_row_index = _get_row_type_column_index(sheet)
-    for idx, col in enumerate(sheet.columns):
-        type_cell = col[type_row_index]
-        result[idx] = type_cell.value if type_cell.value is not None else ColumnType.IGNORE.name
-        assert (hasattr(ColumnType, result[idx])
-                or result[idx] == RowType.COL_TYPE.name), (
-            f"Unexpected column type value ({idx}{type_row_index}): {type_cell.value}")
-    return result
-
-
-def _get_foreign_key_columns(sheet: Worksheet) -> Dict[str, SimpleNamespace]:
-    """Return the foreign keys of the worksheet.
-
-Returns
--------
-out: dict[str, SimpleNamespace]
-  The keys are the stringified paths.  The values are SimpleNamespace objects with ``index``,
-  ``path`` and ``column`` attributes.
-    """
-    column_types = _get_column_types(sheet)
-    path_rows = _get_path_rows(sheet)
-    result = OrderedDict()
-    for for_idx, name in column_types.items():
-        if name != ColumnType.FOREIGN.name:
-            continue
-        path = []
-        for row in path_rows:
-            component = sheet.cell(row=row+1, column=for_idx+1).value
-            if component is None:
-                break
-            assert isinstance(component, str), f"Expected string: {component}"
-            path.append(component)
-        result[p2s(path)] = SimpleNamespace(index=for_idx, path=path,
-                                            column=list(sheet.columns)[for_idx])
-    return result
-
-
-def _get_row_type_column_index(sheet: Worksheet):
-    """Return the column index (0-indexed) of the column which defines the row types.
-    """
-    for col in sheet.columns:
-        for cell in col:
-            if cell.value == RowType.COL_TYPE.name:
-                return cell.column - 1
-    raise ValueError("The column which defines row types (COL_TYPE, PATH, ...) is missing")
-
-
-def _get_path_rows(sheet: Worksheet):
-    """Return the 0-based indices of the rows which represent paths."""
-    rows = []
-    rt_col = _get_row_type_column_index(sheet)
-    for cell in list(sheet.columns)[rt_col]:
-        if cell.value == RowType.PATH.name:
-            rows.append(cell.row-1)
-    return rows
-
-
-def _next_row_index(sheet: Worksheet) -> int:
-    """Return the index for the next data row.
-
-    This is defined as the first row without any content.
-    """
-    return sheet.max_row
-
-
-def _read_or_dict(data: Union[dict, str, TextIO]) -> dict:
-    """If data is a json file name or input stream, read data from there."""
-    if isinstance(data, dict):
-        pass
-    elif isinstance(data, str):
-        with open(data, encoding="utf-8") as infile:
-            data = json.load(infile)
-    elif hasattr(data, "read"):
-        data = json.load(data)
-    else:
-        raise ValueError(f"I don't know how to handle the datatype of `data`: {type(data)}")
-    assert isinstance(data, dict)
-    return data
+from .xlsx_utils import (
+    array_schema_from_model_schema,
+    get_foreign_key_columns,
+    get_row_type_column_index,
+    is_exploded_sheet,
+    next_row_index,
+    p2s,
+    read_or_dict,
+    ColumnType,
+    RowType
+)
 
 
 class TemplateFiller:
@@ -143,6 +55,7 @@ class TemplateFiller:
 
     @property
     def workbook(self):
+        """Return the workbook of this TemplateFiller."""
         return self._workbook
 
     def fill_data(self, data: dict):
@@ -172,6 +85,7 @@ class TemplateFiller:
             return result
 
         def next_level(self, next_level: str) -> TemplateFiller.Context:
+            """Return a copy of this Context, with the path appended by ``next_level``."""
             result = self.copy()
             result._current_path.append(next_level)  # pylint: disable=protected-access
             return result
@@ -212,7 +126,7 @@ class TemplateFiller:
         for sheetname in self._workbook.sheetnames:
             sheet = self._workbook[sheetname]
             type_column = [x.value for x in list(sheet.columns)[
-                _get_row_type_column_index(sheet)]]
+                get_row_type_column_index(sheet)]]
             # 0-indexed, as everything outside of sheet.cell(...):
             coltype_idx = type_column.index(RowType.COL_TYPE.name)
             path_indices = [i for i, typ in enumerate(type_column) if typ == RowType.PATH.name]
@@ -342,14 +256,14 @@ out: union[dict, None]
             assert sheet is sheet_meta.sheet, "All entries must be in the same sheet."
             col_index = sheet_meta.col_index
             if insert_row is None:
-                insert_row = _next_row_index(sheet)
+                insert_row = next_row_index(sheet)
 
             sheet.cell(row=insert_row+1, column=col_index+1, value=value)
 
         # Insert foreign keys
-        if insert_row is not None and sheet is not None and _is_exploded_sheet(sheet):
+        if insert_row is not None and sheet is not None and is_exploded_sheet(sheet):
             try:
-                foreigns = _get_foreign_key_columns(sheet)
+                foreigns = get_foreign_key_columns(sheet)
             except ValueError:
                 print(f"Sheet: {sheet}")
                 raise
@@ -414,17 +328,17 @@ validation_schema: dict, optional
   fails.  If no validation schema is given, try to ignore more errors in the data when filling the
   XLSX template.
 """
-    data = _read_or_dict(data)
+    data = read_or_dict(data)
     assert isinstance(data, dict)
 
     # Validation
     if validation_schema is not None:
-        validation_schema = _read_or_dict(validation_schema)
+        validation_schema = array_schema_from_model_schema(read_or_dict(validation_schema))
         try:
             validate(data, validation_schema, format_checker=FormatChecker())
-        except ValidationError as ve:
-            print(ve.message)
-            raise ve
+        except ValidationError as verr:
+            print(verr.message)
+            raise verr
     else:
         print("No validation schema given, continue at your own risk.")
 
diff --git a/src/caosadvancedtools/table_json_conversion/table_generator.py b/src/caosadvancedtools/table_json_conversion/table_generator.py
index 857100ef2d1a36eccedcf118d2c59343c4c674c2..851173e2d51acec0da1e7a5f1f776bcef8db0f97 100644
--- a/src/caosadvancedtools/table_json_conversion/table_generator.py
+++ b/src/caosadvancedtools/table_json_conversion/table_generator.py
@@ -27,30 +27,13 @@ This module allows to generate template tables from JSON schemas.
 import pathlib
 import re
 from abc import ABC, abstractmethod
-from enum import Enum
 from typing import Dict, List, Optional, Tuple
 
 from openpyxl import Workbook
 from openpyxl.styles import PatternFill
 from openpyxl.workbook.child import INVALID_TITLE_REGEX
 
-from .utils import p2s
-
-
-class ColumnType(Enum):
-    """ column types enum """
-    SCALAR = 1
-    LIST = 2
-    FOREIGN = 3
-    MULTIPLE_CHOICE = 4
-    IGNORE = 5
-
-
-class RowType(Enum):
-    """ row types enum """
-    COL_TYPE = 1
-    PATH = 2
-    IGNORE = 3
+from .xlsx_utils import p2s, ColumnType, RowType
 
 
 class TableTemplateGenerator(ABC):
diff --git a/src/caosadvancedtools/table_json_conversion/utils.py b/src/caosadvancedtools/table_json_conversion/utils.py
deleted file mode 100644
index 15ae488d7cb8e142afba58424b49e8fc3a15e0d6..0000000000000000000000000000000000000000
--- a/src/caosadvancedtools/table_json_conversion/utils.py
+++ /dev/null
@@ -1,25 +0,0 @@
-# This file is a part of the LinkAhead Project.
-#
-# Copyright (C) 2024 IndiScale GmbH <info@indiscale.com>
-# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
-#
-# This program is free software: you can redistribute it and/or modify
-# it under the terms of the GNU Affero General Public License as
-# published by the Free Software Foundation, either version 3 of the
-# License, or (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-# GNU Affero General Public License for more details.
-#
-# You should have received a copy of the GNU Affero General Public License
-# along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-from typing import List
-
-
-def p2s(path: List[str]):
-    """Path to string: dot-separated.
-    """
-    return ".".join(path)
diff --git a/src/caosadvancedtools/table_json_conversion/xlsx_utils.py b/src/caosadvancedtools/table_json_conversion/xlsx_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..374cdefb70737907839ba0a3339fefad28949340
--- /dev/null
+++ b/src/caosadvancedtools/table_json_conversion/xlsx_utils.py
@@ -0,0 +1,319 @@
+# encoding: utf-8
+#
+# This file is a part of the LinkAhead Project.
+#
+# Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+"""General utilities to work with XLSX files with (hidden) column and row annotations and typing.
+
+The most prominent functions are:
+
+- ``p2s``: Path to string: ``["some", "path"] -> "some.path"``
+- ``read_or_dict``: Load JSON object from path, file or dict.
+
+This module also defines these enums:
+
+- ColumnType
+- RowType
+"""
+
+from __future__ import annotations
+
+import json
+
+from collections import OrderedDict
+from copy import deepcopy
+from enum import Enum
+from types import SimpleNamespace
+from typing import Dict, List, TextIO, Union
+
+from openpyxl import Workbook
+from openpyxl.worksheet.worksheet import Worksheet
+
+
+class ColumnType(Enum):
+    """ column types enum """
+    SCALAR = 1
+    LIST = 2
+    FOREIGN = 3
+    MULTIPLE_CHOICE = 4
+    IGNORE = 5
+
+
+class RowType(Enum):
+    """ row types enum """
+    COL_TYPE = 1
+    PATH = 2
+    IGNORE = 3
+
+
+def array_schema_from_model_schema(model_schema: dict) -> dict:
+    """Convert a *data model* schema to a *data array* schema.
+
+Practically, this means that the top level properties are converted into lists.  In a simplified
+notation, this can be expressed as:
+
+``array_schema = { elem: [elem typed data...] for elem in model_schema }``
+
+Parameters
+----------
+model_schema: dict
+  The schema description of the data model.  Must be a json schema *object*, with a number of
+  *object* typed properties.
+
+Returns
+-------
+array_schema: dict
+  A corresponding json schema, where the properties are arrays with the types of the input's
+  top-level properties.
+    """
+    assert model_schema["type"] == "object"
+    result = deepcopy(model_schema)
+    for name, prop in result["properties"].items():
+        assert prop["type"] == "object"
+        new_prop = {
+            "type": "array",
+            "items": prop
+        }
+        result["properties"][name] = new_prop
+    return result
+
+
+def get_defining_paths(workbook: Workbook) -> dict[str, list[list[str]]]:
+    """For all sheets in ``workbook``, list the paths which they define.
+
+A sheet is said to define a path, if it has data columns for properties inside that path.  For
+example, consider the following worksheet:
+
+| `COL_TYPE` | `SCALAR`       | `SCALAR`      | `LIST`       | `SCALAR`           |
+| `PATH`     | `Training`     | `Training`    | `Training`   | `Training`         |
+| `PATH`     | `url`          | `date`        | `subjects`   | `supervisor`       |
+| `PATH`     |                |               |              | `email`            |
+|------------|----------------|---------------|--------------|--------------------|
+|            | example.com/mp | 2024-02-27    | Math;Physics | steve@example.com  |
+|            | example.com/m  | 2024-02-27    | Math         | stella@example.com |
+
+This worksheet defines properties for the paths `["Training"]` and `["Training", "supervisor"]`, and
+thus these two path lists would be returned for the key with this sheet's sheetname.
+
+Parameters
+----------
+workbook: Workbook
+  The workbook to analyze.
+
+Returns
+-------
+out: dict[str, list[list[str]]
+  A dict with worksheet names as keys and lists of paths (represented as string lists) as values.
+"""
+    result: dict[str, list[list[str]]] = {}
+    for sheet in workbook.worksheets:
+        paths = []
+        added = set()
+        for col in get_data_columns(sheet).values():
+            rep = p2s(col.path[:-1])
+            if rep not in added:
+                paths.append(col.path[:-1])
+                added.add(rep)
+        result[sheet.title] = paths
+    return result
+
+
+def get_data_columns(sheet: Worksheet) -> Dict[str, SimpleNamespace]:
+    """Return the data paths of the worksheet.
+
+Returns
+-------
+out: dict[str, SimpleNamespace]
+  The keys are the stringified paths.  The values are SimpleNamespace objects with ``index``,
+  ``path`` and ``column`` attributes.
+    """
+    column_types = _get_column_types(sheet)
+    path_rows = get_path_rows(sheet)
+    result = OrderedDict()
+    for for_idx, name in column_types.items():
+        if name not in (
+                ColumnType.SCALAR.name,
+                ColumnType.LIST.name,
+                ColumnType.MULTIPLE_CHOICE.name,
+        ):
+            continue
+        path = []
+        for row in path_rows:
+            component = sheet.cell(row=row+1, column=for_idx+1).value
+            if component is None:
+                break
+            assert isinstance(component, str), f"Expected string: {component}"
+            path.append(component)
+        result[p2s(path)] = SimpleNamespace(index=for_idx, path=path,
+                                            column=list(sheet.columns)[for_idx])
+    return result
+
+
+def get_foreign_key_columns(sheet: Worksheet) -> Dict[str, SimpleNamespace]:
+    """Return the foreign keys of the worksheet.
+
+Returns
+-------
+out: dict[str, SimpleNamespace]
+  The keys are the stringified paths.  The values are SimpleNamespace objects with ``index``,
+  ``path`` and ``column`` attributes.
+    """
+    column_types = _get_column_types(sheet)
+    path_rows = get_path_rows(sheet)
+    result = OrderedDict()
+    for for_idx, name in column_types.items():
+        if name != ColumnType.FOREIGN.name:
+            continue
+        path = []
+        for row in path_rows:
+            component = sheet.cell(row=row+1, column=for_idx+1).value
+            if component is None:
+                break
+            assert isinstance(component, str), f"Expected string: {component}"
+            path.append(component)
+        result[p2s(path)] = SimpleNamespace(index=for_idx, path=path,
+                                            column=list(sheet.columns)[for_idx])
+    return result
+
+
+def get_path_position(sheet: Worksheet) -> tuple[list[str], str]:
+    """Return a path which represents the parent element, and the sheet's "proper name".
+
+For top-level sheets / entries (those without foreign columns), the path is an empty list.
+
+A sheet's "proper name" is detected from the data column paths: it is the first component after the
+parent components.
+
+Returns
+-------
+parent: list[str]
+  Path to the parent element.  Note that there may be list elements on the path which are **not**
+  represented in this return value.
+
+proper_name: str
+  The "proper name" of this sheet.  This defines an array where all the data lives, relative to the
+  parent path.
+    """
+    # Parent element: longest common path shared among any foreign column and all the data columns
+    parent: list[str] = []
+
+    # longest common path in data colums
+    data_paths = [el.path for el in get_data_columns(sheet).values()]
+    for ii in range(min([len(path) for path in data_paths])):
+        components_at_index = {path[ii] for path in data_paths}
+        if len(components_at_index) > 1:
+            break
+    longest_data_path = data_paths[0][:ii]
+
+    # longest common overall path
+    foreign_paths = [el.path for el in get_foreign_key_columns(sheet).values()]
+    ii = 0  # If no foreign_paths, proper name is the first element
+    for foreign_path in foreign_paths:
+        for ii in range(min([len(foreign_path), len(longest_data_path)])):
+            components_at_index = {foreign_path[ii], longest_data_path[ii]}
+            if len(components_at_index) > 1:
+                break
+        if ii > len(parent):
+            parent = foreign_path[:ii]
+
+    # print(data_paths, ii)
+    # breakpoint()
+    return parent, data_paths[0][ii]
+
+
+def get_path_rows(sheet: Worksheet):
+    """Return the 0-based indices of the rows which represent paths."""
+    rows = []
+    rt_col = get_row_type_column_index(sheet)
+    for cell in list(sheet.columns)[rt_col]:
+        if cell.value == RowType.PATH.name:
+            rows.append(cell.row-1)
+    return rows
+
+
+def get_row_type_column_index(sheet: Worksheet):
+    """Return the column index (0-indexed) of the column which defines the row types.
+    """
+    for col in sheet.columns:
+        for cell in col:
+            if cell.value == RowType.COL_TYPE.name:
+                return cell.column - 1
+    raise ValueError("The column which defines row types (COL_TYPE, PATH, ...) is missing")
+
+
+def get_worksheet_for_path(path: list[str], defining_path_index: dict[str, list[list[str]]]) -> str:
+    """Find the sheet name which corresponds to the given path."""
+    for sheetname, paths in defining_path_index.items():
+        if path in paths:
+            return sheetname
+    raise KeyError(f"Could not find defining worksheet for path: {path}")
+
+
+def is_exploded_sheet(sheet: Worksheet) -> bool:
+    """Return True if this is a an "exploded" sheet.
+
+    An exploded sheet is a sheet whose data entries are LIST valued properties of entries in another
+    sheet.  A sheet is detected as exploded iff it has FOREIGN columns.
+    """
+    column_types = _get_column_types(sheet)
+    return ColumnType.FOREIGN.name in column_types.values()
+
+
+def next_row_index(sheet: Worksheet) -> int:
+    """Return the index for the next data row.
+
+    This is defined as the first row without any content.
+    """
+    return sheet.max_row
+
+
+def p2s(path: List[str]) -> str:
+    """Path to string: dot-separated.
+    """
+    return ".".join(path)
+
+
+def read_or_dict(data: Union[dict, str, TextIO]) -> dict:
+    """If data is a json file name or input stream, read data from there.
+If it is a dict already, just return it."""
+    if isinstance(data, dict):
+        return data
+
+    if isinstance(data, str):
+        with open(data, encoding="utf-8") as infile:
+            data = json.load(infile)
+    elif hasattr(data, "read"):
+        data = json.load(data)
+    else:
+        raise ValueError(f"I don't know how to handle the datatype of `data`: {type(data)}")
+    assert isinstance(data, dict)
+    return data
+
+
+def _get_column_types(sheet: Worksheet) -> OrderedDict:
+    """Return an OrderedDict: column index -> column type for the sheet.
+    """
+    result = OrderedDict()
+    type_row_index = get_row_type_column_index(sheet)
+    for idx, col in enumerate(sheet.columns):
+        type_cell = col[type_row_index]
+        result[idx] = type_cell.value if type_cell.value is not None else (
+            ColumnType.IGNORE.name)
+        assert (hasattr(ColumnType, result[idx]) or result[idx] == RowType.COL_TYPE.name), (
+            f"Unexpected column type value ({idx}{type_row_index}): {type_cell.value}")
+    return result
diff --git a/src/doc/table-json-conversion/specs.md b/src/doc/table-json-conversion/specs.md
index b4fe5b7152e7b9b9c9273d4e829bd189dd2e4f50..5a5197473d82886fcb3ee54f8ac9c5865c456710 100644
--- a/src/doc/table-json-conversion/specs.md
+++ b/src/doc/table-json-conversion/specs.md
@@ -13,14 +13,16 @@ The data model in LinkAhead defines the types of records present in a LinkAhead
 structure. This data model can also be represented in a JSON Schema, which defines the structure of
 JSON files containing records pertaining to the data model.
 
-For example, the following JSON can describe a "Person" Record:
+For example, the following JSON can describe a singe "Person" Record:
 
 ```JSON
 {
-    "Person": {
-        "family_name": "Steve",
-        "given_name": "Stevie"
-    }
+    "Person": [
+        {
+            "family_name": "Steve",
+            "given_name": "Stevie"
+        }
+    ]
 }
 ```
 
@@ -30,6 +32,43 @@ the storage of "Training" Records containing information about conducted trainin
 particularly valuable for data import and export. One could generate web forms from the JSON Schema
 or use it to export objects stored in LinkAhead as JSON.
 
+### Note: Data models and data arrays ###
+
+The schema as created by ``json_schema_exporter.recordtype_to_json_schema(...)`` is, from a broad
+view, a dict with all the top level recordtypes (the recordtype names are the keys).  While this is
+appropriate for the generation of user input forms, data often consists of multiple entries of the
+same type.  XLSX files are no exception, users expect that they may enter multiple rows of data.
+
+Since the data model schema does not match multiple data sets, there is a utility function which
+create a *data array* schema out of the *data model* schema: It basically replaces the top-level
+entries of the data model by lists which may contain data.
+
+A **short example** illustrates this well.  Consider a *data model* schema which fits to this data
+content:
+
+```JSON
+{
+  "Person": {
+    "name": "Charly"
+  }
+}
+```
+
+Now the automatically generated *data array* schema would accept the following data:
+
+```JSON
+{
+  "Person": [
+    {
+      "name": "Charly"
+    },
+    {
+      "name": "Sam"
+    }
+  ]
+}
+```
+
 ## From JSON to XLSX: Data Representation ##
 
 The following describes how JSON files representing LinkAhead records are converted into XLSX files,
@@ -67,33 +106,45 @@ Let's now consider these four cases in detail and with examples:
 
 ```JSON
 {
-    "Training": {
+    "Training": [
+      {
         "date": "2023-01-01",
         "url": "www.indiscale.com",
         "duration": 1.0,
         "participants": 1,
         "remote": false
-    }
+      },
+      {
+        "date": "2023-06-15",
+        "url": "www.indiscale.com/next",
+        "duration": 2.5,
+        "participants": None,
+        "remote": true
+      }
+    ]
 }
 ```
 
 This entry will be represented in an XLSX sheet with the following content:
 
-| date       | url               | duration | participants | remote |
-|------------|-------------------|----------|--------------|--------|
-| 2023-01-01 | www.indiscale.com | 1.0      | 1            | false  |
+| date       | url                    | duration | participants | remote |
+|------------|------------------------|----------|--------------|--------|
+| 2023-01-01 | www.indiscale.com      | 1.0      | 1            | false  |
+| 2023-06-15 | www.indiscale.com/next | 2.5      |              | true   |
 
 ### b. Property referencing a record ###
 
 ```JSON
 {
-    "Training": {
+    "Training": [
+      {
         "date": "2023-01-01",
         "supervisor": {
             "family_name": "Stevenson",
             "given_name": "Stevie",
         }
-    }
+      }
+    ]
 }
 ```
 
@@ -110,10 +161,12 @@ through the content of hidden rows.  (See below for the definition of hidden row
 
 ```JSON
 {
-    "Training": {
+    "Training": [
+      {
         "url": "www.indiscale.com",
         "subjects": ["Math", "Physics"],
-    }
+      }
+    ]
 }
 ```
 
@@ -130,13 +183,15 @@ the separator `;`, it is escaped with `\\`.
 
 ```JSON
 {
-    "Training": {
+    "Training": [
+      {
         "date": "2024-04-17",
         "skills": [
               "Planning",
               "Evaluation"
         ]
-    }
+      }
+    ]
 }
 ```
 
@@ -154,7 +209,8 @@ Note that this example assumes that the list of possible choices, as given in th
 
 ```JSON
 {
-    "Training": {
+    "Training": [
+      {
         "date": "2023-01-01",
         "coach": [
             {
@@ -166,7 +222,8 @@ Note that this example assumes that the list of possible choices, as given in th
               "given_name": "Min",
             }
         ]
-    }
+      }
+    ]
 }
 ```
 
@@ -281,6 +338,4 @@ These rows correspond to:
 
 The current implementation still lacks the following:
 
-- Lists of enum references are not yet implemented as columns where matching cell can simply be
-  ticked/crossed.
 - Files handling is not implemented yet.
diff --git a/unittests/table_json_conversion/data/error_simple_data.json b/unittests/table_json_conversion/data/error_simple_data.json
index bfea88b675ab2a6e0c1787fc401afec5c564c006..4d57b0335b4685ea82f1668d50b52a9d30ef1759 100644
--- a/unittests/table_json_conversion/data/error_simple_data.json
+++ b/unittests/table_json_conversion/data/error_simple_data.json
@@ -1,11 +1,11 @@
 {
-  "Training": {
+  "Training": [{
     "duration": 1.0,
     "participants": 0.5
-  },
-  "Person": {
+  }],
+  "Person": [{
     "family_name": "Auric",
     "given_name": "Goldfinger",
     "Organisation": "Federal Reserve"
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/indirect_data.json b/unittests/table_json_conversion/data/indirect_data.json
index c77dd1ff2a703af6b6b2a0db19f450ac10616d9b..76db75d97e1dafff223ea2b27ecca1086d6bc4af 100644
--- a/unittests/table_json_conversion/data/indirect_data.json
+++ b/unittests/table_json_conversion/data/indirect_data.json
@@ -1,5 +1,5 @@
 {
-  "Wrapper": {
+  "Wrapper": [{
     "Results": [
       {
         "year": 2022,
@@ -14,5 +14,5 @@
       "name": "Basic Training",
       "url": "www.example.com/training/basic"
     }
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/multiple_choice_data.json b/unittests/table_json_conversion/data/multiple_choice_data.json
index 1f14911ea79e8d78a452bb221f693d1a01cce744..ee24ef7adbd61abf22d47bb3d49f43f3e1e26501 100644
--- a/unittests/table_json_conversion/data/multiple_choice_data.json
+++ b/unittests/table_json_conversion/data/multiple_choice_data.json
@@ -1,5 +1,5 @@
 {
-  "Training": {
+  "Training": [{
     "name": "Super Skill Training",
     "date": "2024-04-17",
     "skills": [
@@ -7,5 +7,5 @@
       "Evaluation"
     ],
     "exam_types": []
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/multiple_refs_data.json b/unittests/table_json_conversion/data/multiple_refs_data.json
index 5b8ce9136635832111abb2206d8afe1bc7c58444..fa7c7af8e25096d15683bc924a41fb9572db3eb5 100644
--- a/unittests/table_json_conversion/data/multiple_refs_data.json
+++ b/unittests/table_json_conversion/data/multiple_refs_data.json
@@ -1,5 +1,5 @@
 {
-  "Training": {
+  "Training": [{
     "trainer": [],
     "participant": [
       {
@@ -44,5 +44,5 @@
     "date": "2024-03-21T14:12:00.000Z",
     "url": "www.indiscale.com",
     "name": "Example training with multiple organizations."
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/simple_data.json b/unittests/table_json_conversion/data/simple_data.json
index 9997f17e76a46d5e97d842fdee40626047e7a347..92a1661a7e975747fa346997c0a3309e740c7324 100644
--- a/unittests/table_json_conversion/data/simple_data.json
+++ b/unittests/table_json_conversion/data/simple_data.json
@@ -1,5 +1,5 @@
 {
-  "Training": {
+  "Training": [{
     "date": "2023-01-01",
     "url": "www.indiscale.com",
     "coach": [
@@ -23,10 +23,10 @@
     "participants": 1,
     "subjects": ["Math", "Physics"],
     "remote": false
-  },
-  "Person": {
+  }],
+  "Person": [{
     "family_name": "Steve",
     "given_name": "Stevie",
     "Organisation": "IMF"
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/simple_data_ascii_chars.json b/unittests/table_json_conversion/data/simple_data_ascii_chars.json
index b1d13ebee5d6e3949fa606a130e6f5819bfc4bc8..84e22b9bcbf3b5c053d955ed398b442379a99395 100644
--- a/unittests/table_json_conversion/data/simple_data_ascii_chars.json
+++ b/unittests/table_json_conversion/data/simple_data_ascii_chars.json
@@ -1,5 +1,5 @@
 {
-  "Training": {
+  "Training": [{
     "date": "2023-01-01",
     "url": "char: >\u0001\u0002\u0003\u0004\u0005\u0006\u0007\u0008\u0009<",
     "subjects": [
@@ -9,10 +9,10 @@
       ">\u0020\u0021\u0022\u0023\u0024\u0025\u0026\u0027<",
       ">\u0028\u0029\u002a\u002b\u002c\u002d\u002e\u002f<"
     ]
-  },
-  "Person": {
+  }],
+  "Person": [{
     "family_name": "Steve",
     "given_name": "Stevie",
     "Organisation": "IMF"
-  }
+  }]
 }
diff --git a/unittests/table_json_conversion/data/simple_data_schema.json b/unittests/table_json_conversion/data/simple_data_schema.json
new file mode 100644
index 0000000000000000000000000000000000000000..0a4d44f733b3a8301e2d053cd570c904ef02750f
--- /dev/null
+++ b/unittests/table_json_conversion/data/simple_data_schema.json
@@ -0,0 +1,145 @@
+{
+  "type": "object",
+  "properties": {
+    "Training": {
+      "type": "array",
+      "items": {
+        "type": "object",
+        "required": [],
+        "additionalProperties": false,
+        "title": "Training",
+        "properties": {
+          "name": {
+            "type": "string",
+            "description": "The name of the Record to be created"
+          },
+          "date": {
+            "description": "The date of the training.",
+            "anyOf": [
+              {
+                "type": "string",
+                "format": "date"
+              },
+              {
+                "type": "string",
+                "format": "date-time"
+              }
+            ]
+          },
+          "url": {
+            "type": "string",
+            "description": "The URL"
+          },
+          "subjects": {
+            "type": "array",
+            "items": {
+              "type": "string"
+            }
+          },
+          "coach": {
+            "type": "array",
+            "items": {
+              "type": "object",
+              "required": [],
+              "additionalProperties": false,
+              "title": "coach",
+              "properties": {
+                "name": {
+                  "type": "string",
+                  "description": "The name of the Record to be created"
+                },
+                "family_name": {
+                  "type": "string"
+                },
+                "given_name": {
+                  "type": "string"
+                },
+                "Organisation": {
+                  "enum": [
+                    "Federal Reserve",
+                    "IMF",
+                    "ECB"
+                  ]
+                }
+              }
+            }
+          },
+          "supervisor": {
+            "type": "object",
+            "required": [],
+            "additionalProperties": false,
+            "title": "supervisor",
+            "properties": {
+              "name": {
+                "type": "string",
+                "description": "The name of the Record to be created"
+              },
+              "family_name": {
+                "type": "string"
+              },
+              "given_name": {
+                "type": "string"
+              },
+              "Organisation": {
+                "enum": [
+                  "Federal Reserve",
+                  "IMF",
+                  "ECB"
+                ]
+              }
+            }
+          },
+          "duration": {
+            "type": "number"
+          },
+          "participants": {
+            "type": "integer"
+          },
+          "remote": {
+            "type": "boolean"
+          },
+          "slides": {
+            "type": "string",
+            "format": "data-url"
+          }
+        },
+        "$schema": "https://json-schema.org/draft/2020-12/schema"
+      }
+    },
+    "Person": {
+      "type": "array",
+      "items": {
+        "type": "object",
+        "required": [],
+        "additionalProperties": false,
+        "title": "Person",
+        "properties": {
+          "name": {
+            "type": "string",
+            "description": "The name of the Record to be created"
+          },
+          "family_name": {
+            "type": "string"
+          },
+          "given_name": {
+            "type": "string"
+          },
+          "Organisation": {
+            "enum": [
+              "Federal Reserve",
+              "IMF",
+              "ECB"
+            ]
+          }
+        },
+        "$schema": "https://json-schema.org/draft/2020-12/schema"
+      }
+    }
+  },
+  "required": [
+    "Training",
+    "Person"
+  ],
+  "additionalProperties": false,
+  "$schema": "https://json-schema.org/draft/2020-12/schema"
+}
diff --git a/unittests/table_json_conversion/test_fill_xlsx.py b/unittests/table_json_conversion/test_fill_xlsx.py
index 1315bd9fe06196ba5df31d34182293887d5a2bb1..f580fdbf867f08db0d72ade3537d4f2c1e8301d6 100644
--- a/unittests/table_json_conversion/test_fill_xlsx.py
+++ b/unittests/table_json_conversion/test_fill_xlsx.py
@@ -1,10 +1,10 @@
-#!/usr/bin/env python3
 # encoding: utf-8
 #
 # This file is a part of the LinkAhead Project.
 #
 # Copyright (C) 2024 Indiscale GmbH <info@indiscale.com>
 # Copyright (C) 2024 Henrik tom Wörden <h.tomwoerden@indiscale.com>
+# Copyright (C) 2024 Daniel Hornung <d.hornung@indiscale.com>
 #
 # This program is free software: you can redistribute it and/or modify
 # it under the terms of the GNU Affero General Public License as
@@ -26,10 +26,15 @@ import tempfile
 
 import jsonschema.exceptions as schema_exc
 import pytest
-from caosadvancedtools.table_json_conversion.fill_xlsx import (
-    _get_path_rows, _get_row_type_column_index, fill_template)
 from openpyxl import load_workbook
 
+from caosadvancedtools.table_json_conversion import xlsx_utils
+from caosadvancedtools.table_json_conversion.fill_xlsx import fill_template
+from caosadvancedtools.table_json_conversion.xlsx_utils import (
+    get_row_type_column_index,
+    get_path_rows,
+)
+
 from .utils import compare_workbooks
 
 
@@ -67,8 +72,8 @@ custom_output: str, optional
 
 def test_detect():
     example = load_workbook(rfp("data/simple_template.xlsx"))
-    assert 0 == _get_row_type_column_index(example['Person'])
-    assert [1, 2] == _get_path_rows(example['Person'])
+    assert 0 == get_row_type_column_index(example['Person'])
+    assert [1, 2] == get_path_rows(example['Person'])
 
 
 def test_temporary():
@@ -156,3 +161,10 @@ def test_errors():
                          known_good=rfp("data/simple_data.xlsx"),
                          schema=rfp("data/simple_schema.json"))
     assert exc.value.message == "0.5 is not of type 'integer'"
+
+
+def test_data_schema_generation():
+    model_schema = xlsx_utils.read_or_dict(rfp("data/simple_schema.json"))
+    array_schema = xlsx_utils.array_schema_from_model_schema(model_schema)
+    expected = xlsx_utils.read_or_dict(rfp("data/simple_data_schema.json"))
+    assert array_schema == expected
diff --git a/unittests/table_json_conversion/test_table_template_generator.py b/unittests/table_json_conversion/test_table_template_generator.py
index 61da2142d1965adf63fda54c1fbbde1ce1e2a060..070a7908dc3884a5a3f721140ff245617753d5e5 100644
--- a/unittests/table_json_conversion/test_table_template_generator.py
+++ b/unittests/table_json_conversion/test_table_template_generator.py
@@ -25,8 +25,8 @@ import tempfile
 from typing import Tuple
 
 import pytest
-from caosadvancedtools.table_json_conversion.table_generator import (
-    ColumnType, XLSXTemplateGenerator)
+from caosadvancedtools.table_json_conversion.table_generator import XLSXTemplateGenerator
+from caosadvancedtools.table_json_conversion.xlsx_utils import ColumnType
 from openpyxl import load_workbook
 
 from .utils import compare_workbooks