diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f620aeffd5146254bf630645eaded34d69f35f1c..ea5eb78bd8323b1dd7199dc5eb91e899b1d98f81 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -64,6 +64,7 @@ test: - rc=`cat .docker/result` - exit $rc dependencies: [cert] + needs: [cert] artifacts: paths: - caosdb_log.txt @@ -95,6 +96,7 @@ cert: tags: [docker] stage: cert image: $CI_REGISTRY_IMAGE + needs: [build-testenv] artifacts: paths: - .docker/cert/ @@ -106,14 +108,16 @@ style: tags: [docker] stage: style image: $CI_REGISTRY_IMAGE + needs: [] script: - - autopep8 -ar --diff --exit-code --exclude swagger_client . + - make style allow_failure: true unittest: tags: [docker] stage: unittest image: $CI_REGISTRY_IMAGE + needs: [] script: - tox diff --git a/.gitlab/merge_request_templates/Default.md b/.gitlab/merge_request_templates/Default.md deleted file mode 100644 index 77a95da1cc40c815e4952a1283d345af56e80461..0000000000000000000000000000000000000000 --- a/.gitlab/merge_request_templates/Default.md +++ /dev/null @@ -1,49 +0,0 @@ -# Summary - - Insert a meaningful description for this merge request here. What is the - new/changed behavior? Which bug has been fixed? Are there related Issues? - -# Focus - - Point the reviewer to the core of the code change. Where should they start - reading? What should they focus on (e.g. security, performance, - maintainability, user-friendliness, compliance with the specs, finding more - corner cases, concrete questions)? - -# Test Environment - - How to set up a test environment for manual testing? - -# Check List for the Author - -Please, prepare your MR for a review. Be sure to write a summary and a -focus and create gitlab comments for the reviewer. They should guide the -reviewer through the changes, explain your changes and also point out open -questions. For further good practices have a look at [our review -guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md) - -- [ ] All automated tests pass -- [ ] Reference related Issues -- [ ] Up-to-date CHANGELOG.md -- [ ] Annotations in code (Gitlab comments) - - Intent of new code - - Problems with old code - - Why this implementation? - - -# Check List for the Reviewer - - -- [ ] I understand the intent of this MR -- [ ] All automated tests pass -- [ ] Up-to-date CHANGELOG.md -- [ ] The test environment setup works and the intended behavior is - reproducible in the test environment -- [ ] In-code documentation and comments are up-to-date. -- [ ] Check: Are there spezifications? Are they satisfied? - -For further good practices have a look at [our review guidelines](https://gitlab.com/caosdb/caosdb/-/blob/dev/REVIEW_GUIDELINES.md). - - -/assign me -/target_branch dev diff --git a/CHANGELOG.md b/CHANGELOG.md index 5cd85d320a265ec9b6a27742823a0c1a5e92fae1..bfc2e12f8c6722526028a5670197be1ace229d53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,9 +13,14 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 server side scripting [EXPERIMENTAL] - Models parser can import from Json Schema files now: `models.parser.parse_model_from_json_schema(...)` +- New keyword "role" in yaml data model that allows creation of Records and Files. +- It is now possible to set values of properties and default values of properties + directly in the yaml model. ### Changed ### +- `TableConverter` now converts int to float and vice versa to match the desired dtype. + ### Deprecated ### ### Removed ### diff --git a/Makefile b/Makefile index bf90a5c51a61a25ba65693575a087f5d8bba2b11..52ac04456cf59a24334003d4a0af9055dd3b11ec 100644 --- a/Makefile +++ b/Makefile @@ -36,10 +36,5 @@ unittest: pytest-3 unittests style: - pycodestyle --count src unittests autopep8 -ar --diff --exit-code --exclude swagger_client . .PHONY: style - -lint: - pylint --unsafe-load-any-extension=y -d all -e E,F --ignore=swagger_client src/caosadvancedtools -.PHONY: lint diff --git a/README_SETUP.md b/README_SETUP.md index 09a368a3d33f9fb2be799a5ba9961d293a2fd6c3..43047d554afbe8ffba11aef67b20dde44d29bdcf 100644 --- a/README_SETUP.md +++ b/README_SETUP.md @@ -38,7 +38,7 @@ Optional h5-crawler: 1. Change directory to `integrationtests/`. 2. Mount `extroot` to the folder that will be used as extroot. E.g. `sudo mount - -o bind extroot ../../caosdb-deploy/profiles/empty/paths/extroot` (or + -o bind extroot ../../caosdb-deploy/profiles/debug/paths/extroot` (or whatever path the extroot of the empty profile to be used is located at). 3. Start (or restart) an empty (!) CaosDB instance (with the mounted extroot). The database will be cleared during testing, so it's important to @@ -48,7 +48,8 @@ Optional h5-crawler: `integrationtest/extroot/` directory. ## Code Formatting -`autopep8 -i -r ./` + +`make style` ## Documentation # diff --git a/integrationtests/test_assure_functions.py b/integrationtests/test_assure_functions.py index 56f9767a0f436201ab6003ffd88f631bdb089544..9f4e387d52f25382d18cfb21372a06346d2b5465 100644 --- a/integrationtests/test_assure_functions.py +++ b/integrationtests/test_assure_functions.py @@ -32,26 +32,25 @@ from caosadvancedtools.cfood import (assure_object_is_in_list) from caosadvancedtools.guard import (global_guard, RETRIEVE, UPDATE) -def setup_module(): +def setup(): """Delete all test entities.""" db.execute_query("FIND Test*").delete(raise_exception_on_error=False) -def setup(): +def setup_module(): """Allow all updates and delete test data""" global_guard.level = UPDATE - setup_module() + setup() -def teardown(): +def teardown_module(): """Reset guard level and delete test data.""" global_guard.level = RETRIEVE - setup_module() + setup() def test_assure_list_in_place(): """Test an in-place update with `assure_object_is_in_list`.""" - int_list_prop = db.Property(name="TestIntListProperty", datatype=db.LIST(db.INTEGER)).insert() rt1 = db.RecordType(name="TestType1").add_property( diff --git a/integrationtests/test_base_table_exporter_integration.py b/integrationtests/test_base_table_exporter_integration.py index 1c9158bd1d9600884571957d4916939f82c1a9ca..9d79e857fe706d78103ade3b92ee38498a2a1607 100644 --- a/integrationtests/test_base_table_exporter_integration.py +++ b/integrationtests/test_base_table_exporter_integration.py @@ -23,6 +23,7 @@ # ** end header # import caosdb as db +import pytest from caosadvancedtools import table_export as te @@ -85,8 +86,11 @@ def setup_module(): pass +@pytest.fixture(autouse=True) def setup(): - """No further setup""" + """Same as module setup.""" + setup_module() + yield None setup_module() diff --git a/integrationtests/test_crawl_with_datamodel_problems.py b/integrationtests/test_crawl_with_datamodel_problems.py index 61fec39026a1a3480ecc5b52551c712d7a459b08..0c6a145afdab682f82af09a17fb9aa0770769959 100644 --- a/integrationtests/test_crawl_with_datamodel_problems.py +++ b/integrationtests/test_crawl_with_datamodel_problems.py @@ -20,10 +20,11 @@ # along with this program. If not, see <https://www.gnu.org/licenses/>. # # ** end header -"""Test whether the crawler correctly identifies the data model -problems caused by a faulty model. + +"""Test whether the crawler correctly identifies the data model problems caused by a faulty model. """ + import caosdb as db from caosadvancedtools import loadFiles from caosadvancedtools.cfood import fileguide diff --git a/integrationtests/test_datamodel_problems.py b/integrationtests/test_datamodel_problems.py index 7d56f4da8eea34604ed1c820e14555f087c353bd..3bca302dd2a337cee7fd023ee6a64c5185bc99f5 100644 --- a/integrationtests/test_datamodel_problems.py +++ b/integrationtests/test_datamodel_problems.py @@ -44,12 +44,15 @@ def setup_module(): print(delete_exc) +@pytest.fixture(autouse=True) def setup(): - """No further setup""" + """Same as module setup.""" + setup_module() + yield None setup_module() -def teardown(): +def teardown_module(): """Clear and delete again.""" setup_module() diff --git a/setup.py b/setup.py index 4b2b05b8689cd14b26abaa299646b85d685ab95c..98599d9a5ead13520726546c23cbe59c57242fc0 100755 --- a/setup.py +++ b/setup.py @@ -154,11 +154,11 @@ def setup_package(): long_description_content_type="text/markdown", author='Henrik tom Wörden', author_email='h.tomwoerden@indiscale.com', - install_requires=["caosdb>=0.6.0", + install_requires=["caosdb>=0.7.0", "jsonschema>=4.4.0", + "numpy>=1.17.3", "openpyxl>=3.0.0", "pandas>=1.2.0", - "numpy>=1.17.3", "xlrd>=2.0", ], extras_require={"h5-crawler": ["h5py>=3.3.0", ], diff --git a/src/caosadvancedtools/cfood.py b/src/caosadvancedtools/cfood.py index 2c56e171d8e70789bc4f0dd3881a7d8e716e75f7..3c2d5408ef4d857f62ce4e908f90c4ffccef4d19 100644 --- a/src/caosadvancedtools/cfood.py +++ b/src/caosadvancedtools/cfood.py @@ -391,7 +391,7 @@ class AbstractFileCFood(AbstractCFood): def assure_object_is_in_list(obj, containing_object, property_name, to_be_updated=None, datatype=None): """Checks whether `obj` is one of the values in the list property - `property_name` of the supplied entity containing_object`. + `property_name` of the supplied entity `containing_object`. If this is the case this function returns. Otherwise the entity is added to the property `property_name` and the entity diff --git a/src/caosadvancedtools/crawler.py b/src/caosadvancedtools/crawler.py index 5d91d85cbbbff5b6f64ce9a9de1f29ca603d3b8a..87b91a52a6034e906766a56ded787416e5c0027d 100644 --- a/src/caosadvancedtools/crawler.py +++ b/src/caosadvancedtools/crawler.py @@ -279,6 +279,8 @@ class Crawler(object): except DataInconsistencyError as e: logger.debug(traceback.format_exc()) logger.debug(e) + # TODO: Generally: in which cases should exceptions be raised? When is + # errors_occured set to True? The expected behavior must be documented. except Exception as e: try: DataModelProblems.evaluate_exception(e) diff --git a/src/caosadvancedtools/models/parser.py b/src/caosadvancedtools/models/parser.py index d542af686a1c3e76b8a87d5084d841bce910a151..37981f8a392a5b2dbd31b1bff1bb691854395bc6 100644 --- a/src/caosadvancedtools/models/parser.py +++ b/src/caosadvancedtools/models/parser.py @@ -48,7 +48,7 @@ import caosdb as db from .data_model import DataModel # Keywords which are allowed in data model descriptions. -KEYWORDS = ["parent", +KEYWORDS = ["parent", # TODO: can we remove that, see: #36 "importance", "datatype", # for example TEXT, INTEGER or REFERENCE "unit", @@ -58,8 +58,11 @@ KEYWORDS = ["parent", "suggested_properties", "inherit_from_recommended", "inherit_from_suggested", - "inherit_from_obligatory", ] + "inherit_from_obligatory", + "role", + "value", ] +# TODO: check whether it's really ignored # These KEYWORDS are not forbidden as properties, but merely ignored. KEYWORDS_IGNORED = [ "unit", @@ -150,6 +153,10 @@ def parse_model_from_json_schema(filename: str): class Parser(object): def __init__(self): + """ + Initialize an empty parer object and initialize + the dictionary of entities and the list of treated elements. + """ self.model = {} self.treated = [] @@ -218,13 +225,11 @@ class Parser(object): ymlmodel["extern"] = [] for name in ymlmodel["extern"]: - if db.execute_query("COUNT Property {}".format(name)) > 0: - self.model[name] = db.execute_query( - "FIND Property WITH name={}".format(name), unique=True) - - elif db.execute_query("COUNT RecordType {}".format(name)) > 0: - self.model[name] = db.execute_query( - "FIND RecordType WITH name={}".format(name), unique=True) + for role in ("Property", "RecordType", "Record", "File"): + if db.execute_query("COUNT {} {}".format(role, name)) > 0: + self.model[name] = db.execute_query( + "FIND {} WITH name={}".format(role, name), unique=True) + break else: raise Exception("Did not find {}".format(name)) @@ -276,6 +281,8 @@ class Parser(object): """ adds names of Properties and RecordTypes to the model dictionary Properties are also initialized. + + name is the key of the yaml element and definition the value. """ if name == "__line__": @@ -299,9 +306,29 @@ class Parser(object): # and create the new property self.model[name] = db.Property(name=name, datatype=definition["datatype"]) + elif (self.model[name] is None and isinstance(definition, dict) + and "role" in definition): + if definition["role"] == "RecordType": + self.model[name] = db.RecordType(name=name) + elif definition["role"] == "Record": + self.model[name] = db.Record(name=name) + elif definition["role"] == "File": + # TODO(fspreck) Implement files at some later point in time + raise NotImplementedError( + "The definition of file objects is not yet implemented.") + + # self.model[name] = db.File(name=name) + elif definition["role"] == "Property": + self.model[name] = db.Property(name=name) + else: + raise RuntimeError("Unknown role {} in definition of entity.".format( + definition["role"])) - # add other definitions recursively + # for setting values of properties directly: + if not isinstance(definition, dict): + return + # add other definitions recursively for prop_type in ["recommended_properties", "suggested_properties", "obligatory_properties"]: @@ -338,15 +365,27 @@ class Parser(object): continue n = self._stringify(n) - if (isinstance(e, dict) and "datatype" in e - and (_get_listdatatype(e["datatype"]) is not None)): - self.model[ent_name].add_property( - name=n, - importance=importance, - datatype=db.LIST(_get_listdatatype(e["datatype"]))) + if isinstance(e, dict): + if "datatype" in e and _get_listdatatype(e["datatype"]) is not None: + datatype = db.LIST(_get_listdatatype(e["datatype"])) + else: + # ignore a possible e["datatype"] here if it's not a list + # since it has been treated in the definition of the + # property (entity) already + datatype = None + if "value" in e: + value = e["value"] + else: + value = None + else: - self.model[ent_name].add_property(name=n, - importance=importance) + value = e + datatype = None + + self.model[ent_name].add_property(name=n, + value=value, + importance=importance, + datatype=datatype) def _inherit(self, name, prop, inheritance): if not isinstance(prop, list): @@ -369,6 +408,10 @@ class Parser(object): if definition is None: return + # for setting values of properties directly: + if not isinstance(definition, dict): + return + if ("datatype" in definition and definition["datatype"].startswith("LIST")): @@ -385,6 +428,9 @@ class Parser(object): if prop_name == "unit": self.model[name].unit = prop + elif prop_name == "value": + self.model[name].value = prop + elif prop_name == "description": self.model[name].description = prop @@ -413,6 +459,10 @@ class Parser(object): elif prop_name == "datatype": continue + # role has already been used + elif prop_name == "role": + continue + elif prop_name == "inherit_from_obligatory": self._inherit(name, prop, db.OBLIGATORY) elif prop_name == "inherit_from_recommended": diff --git a/src/caosadvancedtools/table_importer.py b/src/caosadvancedtools/table_importer.py index 0b55252bbf4d65cde1ffdf0711f396dda0f29546..1f515e78e3ddbd198fa0336589a359ba9154f038 100755 --- a/src/caosadvancedtools/table_importer.py +++ b/src/caosadvancedtools/table_importer.py @@ -205,27 +205,33 @@ def string_in_list(val, options, ignore_case=True): return val -class TableImporter(object): +class TableImporter(): + """Abstract base class for importing data from tables. + """ + def __init__(self, converters, obligatory_columns=None, unique_keys=None, datatypes=None): """ - converters: dict with column names as keys and converter functions as - values - This dict also defines what columns are required to exist - throught the existing keys. The converter functions are - applied to the cell values. They should also check for - ValueErrors, such that a separate value check is not - necessary. - obligatory_columns: list of column names, optional - each listed column must not have missing values - unique_columns : list of column names that in - combination must be unique; i.e. each row has a - unique combination of values in those columns. - datatypes: dict with column names as keys and datatypes as values - All non-null values will be checked whether they have the - provided datatype. - This dict also defines what columns are required to exist - throught the existing keys. + Parameters + ---------- + converters : dict + Dict with column names as keys and converter functions as values. This dict also defines + what columns are required to exist throught the existing keys. The converter functions are + applied to the cell values. They should also check for ValueErrors, such that a separate + value check is not necessary. + + obligatory_columns : list, optional + List of column names, each listed column must not have missing values. + + unique_keys : list, optional + List of column names that in combination must be unique: each row has a unique + combination of values in those columns. + + datatypes : dict, optional + Dict with column names as keys and datatypes as values. All non-null values will be + checked whether they have the provided datatype. This dict also defines what columns are + required to exist throught the existing keys. + """ if converters is None: @@ -247,11 +253,14 @@ class TableImporter(object): raise NotImplementedError() def check_columns(self, df, filename=None): - """ - checks whether all required columns, i.e. columns for which converters - were defined exist. + """Check whether all required columns exist. + + Required columns are columns for which converters are defined. + + Raises + ------ + DataInconsistencyError - Raises: DataInconsistencyError """ for col in self.required_columns: @@ -267,12 +276,11 @@ class TableImporter(object): raise DataInconsistencyError(errmsg) def check_unique(self, df, filename=None): - """ - Check whether value combinations that shall be unique for each row are - unique. + """Check whether value combinations that shall be unique for each row are unique. If a second row is found, that uses the same combination of values as a previous one, the second one is removed. + """ df = df.copy() uniques = [] @@ -299,13 +307,32 @@ class TableImporter(object): return df - def check_datatype(self, df, filename=None): - """ - Check for each column whether non-null fields are have the correct - datatype. - """ + def check_datatype(self, df, filename=None, strict=False): + """Check for each column whether non-null fields have the correct datatype. + + .. note:: + If columns are integer, but should be float, this method converts the respective columns + in place. + + Parameters + ---------- + + strict: boolean, optional + If False (the default), try to convert columns, otherwise raise an error. + + """ for key, datatype in self.datatypes.items(): + # Check for castable numeric types first: We unconditionally cast int to the default + # float, because CaosDB does not have different sizes anyway. + col_dtype = df.dtypes[key] + if not strict and not np.issubdtype(col_dtype, datatype): + issub = np.issubdtype + # These special cases should be fine. + if issub(col_dtype, np.integer) and issub(datatype, np.floating): + df[key] = df[key].astype(datatype) + + # Now check each element for idx, val in df.loc[ pd.notnull(df.loc[:, key]), key].iteritems(): @@ -326,6 +353,11 @@ class TableImporter(object): Check in each row whether obligatory fields are empty or null. Rows that have missing values are removed. + + Returns + ------- + out : pandas.DataFrame + The input DataFrame with incomplete rows removed. """ df = df.copy() @@ -362,10 +394,26 @@ class TableImporter(object): return df - def check_dataframe(self, df, filename): + def check_dataframe(self, df, filename=None, strict=False): + """Check if the dataframe conforms to the restrictions. + + Checked restrictions are: Columns, data types, uniqueness requirements. + + Parameters + ---------- + + df: pandas.DataFrame + The dataframe to be checked. + + filename: string, optional + The file name, only used for output in case of problems. + + strict: boolean, optional + If False (the default), try to convert columns, otherwise raise an error. + """ self.check_columns(df, filename=filename) df = self.check_missing(df, filename=filename) - self.check_datatype(df, filename=filename) + self.check_datatype(df, filename=filename, strict=strict) if len(self.unique_keys) > 0: df = self.check_unique(df, filename=filename) @@ -378,8 +426,7 @@ class XLSImporter(TableImporter): return self.read_xls(filename=filename, **kwargs) def read_xls(self, filename, **kwargs): - """ - converts an xls file into a Pandas DataFrame. + """Convert an xls file into a Pandas DataFrame. The converters of the XLSImporter object are used. diff --git a/unittests/data/datatypes.xlsx b/unittests/data/datatypes.xlsx new file mode 100644 index 0000000000000000000000000000000000000000..34fc4cf43092a68b630e0e04ebc43609b8a0b17b Binary files /dev/null and b/unittests/data/datatypes.xlsx differ diff --git a/unittests/test_table_importer.py b/unittests/test_table_importer.py index 9c8a379d8c12def32c04cf82c5e09c0f5f6f175c..4c7d044ef1de877cf4072034c96aca7113f75cc0 100644 --- a/unittests/test_table_importer.py +++ b/unittests/test_table_importer.py @@ -23,7 +23,6 @@ import unittest from functools import partial from tempfile import NamedTemporaryFile -import caosdb as db import numpy as np import pandas as pd import pytest @@ -211,6 +210,19 @@ class XLSImporterTest(TableImporterTest): self.assertRaises(DataInconsistencyError, importer.read_xls, tmp.name) + def test_datatypes(self): + """Test datataypes in columns.""" + importer = XLSImporter(converters={}, + obligatory_columns=["float_as_float"], + datatypes={ + "float_as_float": float, + "int_as_float": float, + "int_as_int": int, + } + ) + df = importer.read_xls(os.path.join(os.path.dirname(__file__), "data", "datatypes.xlsx")) + assert np.issubdtype(df.loc[0, "int_as_float"], float) + class CSVImporterTest(TableImporterTest): def test_full(self): diff --git a/unittests/test_yaml_model_parser.py b/unittests/test_yaml_model_parser.py index 161e2873a9c01f9ce415818116b9e4cf9aeadb5c..c3f09462fe0dbf67afa9c2bb9bf17224bd91eef6 100644 --- a/unittests/test_yaml_model_parser.py +++ b/unittests/test_yaml_model_parser.py @@ -1,5 +1,7 @@ import unittest +from datetime import date from tempfile import NamedTemporaryFile +from pytest import raises import caosdb as db from caosadvancedtools.models.parser import (TwiceDefinedException, @@ -15,6 +17,8 @@ def to_file(string): return f.name +# TODO: check purpose of this function... add documentation + def parse_str(string): parse_model_from_yaml(to_file(string)) @@ -68,7 +72,8 @@ RT2: a: """ - self.assertRaises(TwiceDefinedException, lambda: parse_model_from_yaml(to_file(string))) + self.assertRaises(TwiceDefinedException, + lambda: parse_model_from_yaml(to_file(string))) def test_typical_case(self): string = """ @@ -103,7 +108,8 @@ RT5: - RT1: - RT2: """ - self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string))) + self.assertRaises( + ValueError, lambda: parse_model_from_yaml(to_file(string))) def test_unknown_kwarg(self): string = """ @@ -111,7 +117,8 @@ RT1: datetime: p1: """ - self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string))) + self.assertRaises( + ValueError, lambda: parse_model_from_yaml(to_file(string))) def test_definition_in_inheritance(self): string = """ @@ -121,7 +128,8 @@ RT2: - RT1: description: "tach" """ - self.assertRaises(ValueError, lambda: parse_model_from_yaml(to_file(string))) + self.assertRaises( + ValueError, lambda: parse_model_from_yaml(to_file(string))) def test_inheritance(self): string = """ @@ -301,6 +309,8 @@ class ExternTest(unittest.TestCase): class ErrorMessageTest(unittest.TestCase): """Tests for understandable error messages.""" + # Note: This was changed with implementation of role keyword + @unittest.expectedFailure def test_non_dict(self): """When a value is given, where a list or mapping is expected.""" recordtype_value = """ @@ -328,3 +338,139 @@ A: with self.assertRaises(YamlDefinitionError) as yde: parse_str(string) assert("line {}".format(line) in yde.exception.args[0]) + + +def test_define_role(): + model = """ +A: + role: Record +""" + entities = parse_model_from_string(model) + assert "A" in entities + assert isinstance(entities["A"], db.Record) + assert entities["A"].role == "Record" + + model = """ +A: + role: Record + inherit_from_obligatory: + - C + obligatory_properties: + b: +b: + datatype: INTEGER +C: + obligatory_properties: + b: +D: + role: RecordType +""" + entities = parse_model_from_string(model) + for l, ent in (("A", "Record"), ("b", "Property"), + ("C", "RecordType"), ("D", "RecordType")): + assert l in entities + assert isinstance(entities[l], getattr(db, ent)) + assert entities[l].role == ent + + assert entities["A"].parents[0].name == "C" + assert entities["A"].name == "A" + + assert entities["A"].properties[0].name == "b" + assert entities["A"].properties[0].value is None + + assert entities["C"].properties[0].name == "b" + assert entities["C"].properties[0].value is None + + model = """ +A: + role: Record + obligatory_properties: + b: 42 +b: + datatype: INTEGER +""" + + entities = parse_model_from_string(model) + assert entities["A"].get_property("b").value == 42 + assert entities["b"].value is None + + model = """ +b: + datatype: INTEGER + value: 18 +""" + entities = parse_model_from_string(model) + assert entities["b"].value == 18 + + +def test_issue_72(): + """Tests for + https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/72 + + In some cases, faulty values would be read in for properties without a + secified value. + + """ + model = """ +Experiment: + obligatory_properties: + date: + datatype: DATETIME + description: 'date of the experiment' + identifier: + datatype: TEXT + description: 'identifier of the experiment' + temperature: + datatype: DOUBLE + description: 'temp' +TestExperiment: + role: Record + inherit_from_obligatory: + - Experiment + obligatory_properties: + date: 2022-03-02 + identifier: Test + temperature: 23 + recommended_properties: + additional_prop: + datatype: INTEGER + value: 7 +""" + entities = parse_model_from_string(model) + assert "Experiment" in entities + assert "date" in entities + assert "identifier" in entities + assert "temperature" in entities + assert "TestExperiment" in entities + assert "additional_prop" in entities + assert isinstance(entities["Experiment"], db.RecordType) + + assert entities["Experiment"].get_property("date") is not None + # No value is set, so this has to be None + assert entities["Experiment"].get_property("date").value is None + + assert entities["Experiment"].get_property("identifier") is not None + assert entities["Experiment"].get_property("identifier").value is None + + assert entities["Experiment"].get_property("temperature") is not None + assert entities["Experiment"].get_property("temperature").value is None + + test_rec = entities["TestExperiment"] + assert isinstance(test_rec, db.Record) + assert test_rec.get_property("date").value == date(2022, 3, 2) + assert test_rec.get_property("identifier").value == "Test" + assert test_rec.get_property("temperature").value == 23 + assert test_rec.get_property("additional_prop").value == 7 + + +def test_file_role(): + """Not implemented for now, see + https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/74. + + """ + model = """ +F: + role: File +""" + with raises(NotImplementedError): + entities = parse_model_from_string(model)