Skip to content
Snippets Groups Projects
Commit 839d0ea5 authored by Daniel Hornung's avatar Daniel Hornung
Browse files

Merge branch 'f-fix-linter-errors' into 'dev'

Fix pylint errors

See merge request !126
parents ef70481e 522ec28e
Branches
Tags
2 merge requests!128MNT: Added a warning when column metadata is not configured, and a better...,!126Fix pylint errors
Pipeline #59750 failed
Showing
with 1182 additions and 1145 deletions
......@@ -20,6 +20,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `h5` instead of `h5-crawler`
- `dev`, `doc`, `test` and `all` are new, they install the dependencies for developing, testing,
documentation and everything.
- The `pandoc_header_tools.get_header()` parameter `add_header` has been renamed to `add_header_to_file`
to resolve a name collision.
### Deprecated ###
......
......@@ -41,5 +41,5 @@ style:
.PHONY: style
lint:
pylint --unsafe-load-any-extension=y --fail-under=9.72 -d R,C --ignore=swagger_client src/caosadvancedtools
pylint --unsafe-load-any-extension=y -d R,C --ignore=swagger_client src/caosadvancedtools
.PHONY: lint
......@@ -82,7 +82,7 @@ def setup_module():
"""Clear all test entities"""
try:
db.execute_query("FIND ENTITY Test*").delete()
except BaseException:
except Exception:
pass
......
......@@ -67,7 +67,7 @@ class CacheTest(unittest.TestCase):
print(db.execute_query("FIND entity with id="+str(rec.id), unique=True))
try:
print(db.execute_query("FIND Record "+str(rec.id), unique=True))
except BaseException:
except Exception:
print("Query does not work as expected")
update.insert(cont, run_id)
assert len(update.get_updates(run_id)) == 1
......
......@@ -114,7 +114,7 @@ class CrawlerTest(unittest.TestCase):
for el in [self.rec1, self.rec2, self.rec3]:
try:
el.delete()
except BaseException:
except Exception:
pass
......
......@@ -19,4 +19,5 @@ init-hook=
disable=
fixme,
logging-format-interpolation,
logging-fstring-interpolation,
logging-not-lazy,
......@@ -94,7 +94,6 @@ class AbstractCache(ABC):
Increase this variable, when changes to the cache tables are made.
"""
pass
@abstractmethod
def create_cache(self):
......@@ -102,14 +101,12 @@ class AbstractCache(ABC):
Provide an overloaded function here that creates the cache in
the most recent version.
"""
pass
@abstractmethod
def get_default_file_name(self):
"""
Supply a default file name for the cache here.
"""
pass
def check_cache(self):
"""
......@@ -192,9 +189,6 @@ class IdentifiableCache(AbstractCache):
def get_default_file_name(self):
return "caosdb_identifiable_cache.db"
def __init__(self, db_file=None, force_creation=False):
super().__init__(db_file, force_creation)
def create_cache(self):
"""
Create a new SQLITE cache file in self.db_file.
......
......@@ -46,13 +46,17 @@ from abc import ABCMeta, abstractmethod
from datetime import datetime
import linkahead as db
from linkahead.common.models import Entity
from linkahead.exceptions import (BadQueryError, EmptyUniqueQueryError,
QueryNotUniqueError, TransactionError)
QueryNotUniqueError)
from .datamodel_problems import DataModelProblems
from .guard import global_guard as guard
# The pylint warnings triggered in this file are ignored, as this code is
# assumed to be deprecated in the near future. Should this change, they need
# to be reevaluated.
ENTITIES = {}
PROPERTIES = {}
RECORDS = {}
......@@ -184,7 +188,7 @@ class AbstractCFood(object, metaclass=ABCMeta):
"""
@classmethod
def match_item(cls, item):
def match_item(cls, item): # pylint: disable=unused-argument
""" Matches an item found by the crawler against this class. Returns
True if the item shall be treated by this class, i.e. if this class
matches the item.
......@@ -209,7 +213,6 @@ class AbstractCFood(object, metaclass=ABCMeta):
To be overwritten by subclasses
"""
pass
def attach(self, item):
self.attached_items.append(item)
......@@ -217,7 +220,7 @@ class AbstractCFood(object, metaclass=ABCMeta):
# TODO looking for should `attach` the files itsself. This would allow to
# group them right away and makes it unnecessary to check matches later
# again.
def looking_for(self, item):
def looking_for(self, item): # pylint: disable=unused-argument
"""
returns True if item can be added to this CFood.
......@@ -353,7 +356,7 @@ class AbstractFileCFood(AbstractCFood):
raise NotImplementedError()
@classmethod
def match_item(cls, path):
def match_item(cls, path): # pylint: disable=arguments-renamed
""" Matches the regular expression of this class against file names
Parameters
......@@ -367,7 +370,7 @@ class AbstractFileCFood(AbstractCFood):
# TODO looking for should `attach` the files itsself. This would allow to
# group them right away and makes it unnecessary to check matches later
# again.
def looking_for(self, crawled_file):
def looking_for(self, crawled_file): # pylint: disable=arguments-renamed
"""
returns True if crawled_file can be added to this CFood.
......@@ -569,6 +572,7 @@ def assure_parents_are(entity, parents, to_be_updated=None,
the new parents and the old ones are discarded.
Note that parent matching occurs based on names.
If a parent does not have a name, a ValueError is raised.
If the list to_be_updated is supplied, the entity is added to
the list in order to indicate, that the entity `entity` should be updated.
......@@ -583,7 +587,7 @@ def assure_parents_are(entity, parents, to_be_updated=None,
for i, e in enumerate(parents):
if isinstance(e, db.Entity):
if e.name is None:
raise Exception("Entity should have name")
raise ValueError("Entity should have name")
else:
parents[i] = db.Entity(name=e)
......@@ -690,7 +694,7 @@ def assure_has_property(entity, name, value, to_be_updated=None,
try:
compare_time = datetime.fromisoformat(el.value)
except ValueError:
except ValueError as e:
# special case of wrong iso format
# time zone
tmp = el.value.split("+")
......@@ -708,7 +712,7 @@ def assure_has_property(entity, name, value, to_be_updated=None,
ms = '.' + tmp[1] + '0'*(6-len(tmp[1]))
else:
raise ValueError(
"invalid millisecond format in {}".format(el.value))
"invalid millisecond format in {}".format(el.value)) from e
else:
ms = ""
tmp = tmp[0] + ms + tz_str
......@@ -746,7 +750,7 @@ def assure_has_property(entity, name, value, to_be_updated=None,
def assure_property_is(entity, name, value, datatype=None, to_be_updated=None,
force=False):
force=False): # pylint: disable=unused-argument
"""
Checks whether `entity` has a Property `name` with the given value.
......
......@@ -62,9 +62,9 @@ def store(directory, xml=False):
rts, ps = get_dm()
os.makedirs(directory, exist_ok=True)
with open(os.path.join(directory, "recordtypes.txt"), "w") as fi:
with open(os.path.join(directory, "recordtypes.txt"), "w", encoding="utf-8") as fi:
fi.write(",".join([el[1] for el in rts]))
with open(os.path.join(directory, "properties.txt"), "w") as fi:
with open(os.path.join(directory, "properties.txt"), "w", encoding="utf-8") as fi:
fi.write(",".join([el[1] for el in ps]))
if xml:
......@@ -75,10 +75,10 @@ def store(directory, xml=False):
def load_dm(directory):
with open(os.path.join(directory, "recordtypes.txt"), "r") as fi:
with open(os.path.join(directory, "recordtypes.txt"), "r", encoding="utf-8") as fi:
text = fi.read()
rts = [el.strip() for el in text.split(",")]
with open(os.path.join(directory, "properties.txt"), "r") as fi:
with open(os.path.join(directory, "properties.txt"), "r", encoding="utf-8") as fi:
text = fi.read()
ps = [el.strip() for el in text.split(",")]
......@@ -112,7 +112,7 @@ def compare(directory):
print("{} is missing in the existing properties".format(p))
if __name__ == "__main__":
def main():
p = get_parser()
args = p.parse_args()
......@@ -121,3 +121,7 @@ if __name__ == "__main__":
if args.compare:
compare(args.compare)
if __name__ == "__main__":
main()
......@@ -41,7 +41,6 @@ match. This occurs in basically three steps:
import logging
import os
import subprocess
import traceback
import uuid
from datetime import datetime
......@@ -60,6 +59,11 @@ from .serverside.helper import send_mail as main_send_mail
from .suppressKnown import SuppressKnown
from .utils import create_entity_link
# The pylint warnings triggered in this file are ignored, as this code is
# assumed to be deprecated in the near future. Should this change, they need
# to be reevaluated.
logger = logging.getLogger(__name__)
......@@ -67,7 +71,7 @@ def separated(text):
return "-"*60 + "\n" + text
def apply_list_of_updates(to_be_updated, update_flags={},
def apply_list_of_updates(to_be_updated, update_flags=None,
update_cache=None, run_id=None):
"""Updates the `to_be_updated` Container, i.e., pushes the changes to LinkAhead
after removing possible duplicates. If a chace is provided, uauthorized
......@@ -87,6 +91,8 @@ def apply_list_of_updates(to_be_updated, update_flags={},
Id with which the pending updates are cached. Only meaningful if
`update_cache` is provided. Default is None.
"""
if update_flags is None:
update_flags = {}
if len(to_be_updated) == 0:
return
......@@ -132,7 +138,7 @@ def apply_list_of_updates(to_be_updated, update_flags={},
)
logger.debug(traceback.format_exc())
logger.debug(e)
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
DataModelProblems.evaluate_exception(e)
......@@ -169,6 +175,7 @@ class Crawler(object):
self.abort_on_exception = abort_on_exception
self.update_cache = UpdateCache()
self.filterKnown = SuppressKnown()
self.run_id = None
advancedtoolslogger = logging.getLogger("caosadvancedtools")
# TODO this seems to be a bad idea. What if the handler was not added
......@@ -220,7 +227,7 @@ class Crawler(object):
new_cont = db.Container.from_xml(new)
ids = []
tmp = db.Container()
update_incomplete = False
update_incomplete = False # pylint: disable=unused-variable
# remove duplicate entities
for el in new_cont:
if el.id not in ids:
......@@ -229,13 +236,13 @@ class Crawler(object):
else:
update_incomplete = True
new_cont = tmp
if new_cont[0].version:
if new_cont[0].version: # pylint: disable=no-member
valids = db.Container()
nonvalids = db.Container()
for ent in new_cont:
remote_ent = db.Entity(id=ent.id).retrieve()
if ent.version == remote_ent.version:
if ent.version == remote_ent.version: # pylint: disable=no-member
valids.append(ent)
else:
update_incomplete = True
......@@ -317,10 +324,10 @@ class Crawler(object):
logger.debug(e)
# TODO: Generally: in which cases should exceptions be raised? When is
# errors_occured set to True? The expected behavior must be documented.
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
try:
DataModelProblems.evaluate_exception(e)
except BaseException:
except Exception: # pylint: disable=broad-exception-caught
pass
logger.debug("Failed during execution of {}!".format(
Cfood.__name__))
......@@ -349,13 +356,13 @@ class Crawler(object):
logger.info("Cannot access {}. However, it might be needed for"
" the correct execution".format(e.filename))
remove_cfoods.append(cfood)
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
try:
DataModelProblems.evaluate_exception(e)
except BaseException:
except Exception: # pylint: disable=broad-exception-caught
pass
logger.debug("Failed during execution of {}!".format(
Cfood.__name__))
cfood.__name__))
logger.debug(traceback.format_exc())
logger.debug(e)
remove_cfoods.append(cfood)
......@@ -442,10 +449,10 @@ class Crawler(object):
except DataInconsistencyError as e:
logger.debug(traceback.format_exc())
logger.debug(e)
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
try:
DataModelProblems.evaluate_exception(e)
except Exception:
except Exception: # pylint: disable=broad-exception-caught
pass
logger.info("Failed during execution of {}!".format(
cfood.__class__.__name__))
......@@ -488,8 +495,8 @@ ____________________\n""".format(i+1, len(pending_changes)) + str(el[3]))
logger.error(err_msg)
logger.error('Crawler finished with Datamodel Errors')
elif errors_occured:
msg = "There were fatal errors during execution, please "
"contact the system administrator!"
msg = ("There were fatal errors during execution, please contact "
"the system administrator!")
if self.debug_file:
msg += "\nPlease provide the following path:\n{}".format(
......@@ -600,7 +607,7 @@ ____________________\n""".format(i+1, len(pending_changes)) + str(el[3]))
randname = os.path.basename(os.path.abspath(directory))
filepath = os.path.abspath(os.path.join(directory, filename))
filename = os.path.join(randname, filename)
with open(filepath, "w") as f:
with open(filepath, "w", encoding="utf-8") as f:
f.write(form)
return filename
......@@ -680,7 +687,7 @@ carefully and if the changes are ok, click on the following link:
guard.safe_insert(missing, unique=False,
flags={"force-missing-obligatory": "ignore"})
inserted.append(ent)
except Exception as e:
except Exception as e: # pylint: disable=broad-exception-caught
DataModelProblems.evaluate_exception(e)
if len(existing) > 0:
info = "Identified the following existing entities:\n"
......
......@@ -31,6 +31,10 @@ class ExampleCFood(AbstractFileCFood):
return (r".*/(?P<species>[^/]+)/"
r"(?P<date>\d{4}-\d{2}-\d{2})/README.md")
def __init__(self, crawled_path, *args, **kwargs):
super().__init__(crawled_path, *args, **kwargs)
self.experiment = None
def create_identifiables(self):
self.experiment = db.Record()
self.experiment.add_parent(name="Experiment")
......
......@@ -118,7 +118,7 @@ def export(cont, directory="."):
try:
el.download(target)
print("Downloaded:", target)
except BaseException:
except Exception: # pylint: disable=broad-exception-caught
print("Failed download of:", target)
invert_ids(cont)
......@@ -128,7 +128,7 @@ def export(cont, directory="."):
xml = etree.tounicode(cont.to_xml(
local_serialization=True), pretty_print=True)
with open(os.path.join(directory, "linkahead_data.xml"), "w") as fi:
with open(os.path.join(directory, "linkahead_data.xml"), "w", encoding="utf-8") as fi:
fi.write(xml)
......@@ -149,8 +149,12 @@ def defineParser():
return parser
if __name__ == "__main__":
def main():
parser = defineParser()
args = parser.parse_args()
export_related_to(args.id, directory=args.directory)
if __name__ == "__main__":
main()
......@@ -39,7 +39,7 @@ from caosadvancedtools.models.data_model import DataModel
def create_dummy_file(text="Please ask the administrator for this file."):
tmpfile = NamedTemporaryFile(delete=False)
tmpfile.close()
with open(tmpfile.name, "w") as tm:
with open(tmpfile.name, "w", encoding="utf-8") as tm:
tm.write(text)
return tmpfile.name
......@@ -51,7 +51,7 @@ def import_xml(filename, rerun=False, interactive=True):
rerun: boolean; if true, files are not inserted as paths would conflict.
"""
cont = db.Container()
with open(filename) as fi:
with open(filename, encoding="utf-8") as fi:
cont = cont.from_xml(fi.read())
tmpfile = create_dummy_file()
......@@ -63,7 +63,7 @@ def import_xml(filename, rerun=False, interactive=True):
for el in cont:
if isinstance(el, db.File):
el._checksum = None
el._checksum = None # pylint: disable=protected-access
target = os.path.join("downloads", el.path[1:])
if os.path.exists(target):
......@@ -94,7 +94,7 @@ def import_xml(filename, rerun=False, interactive=True):
if not rerun:
for _, el in enumerate(files.values()):
r = el.insert(unique=False)
el.insert(unique=False)
else:
for _, el in enumerate(files.values()):
el.id = None
......@@ -122,8 +122,12 @@ def defineParser():
return parser
if __name__ == "__main__":
def main():
parser = defineParser()
args = parser.parse_args()
import_xml(args.file, args.rerun)
if __name__ == "__main__":
main()
......@@ -90,9 +90,9 @@ def combine_ignore_files(caosdbignore: str, localignore: str, dirname=None) -> s
tmp = NamedTemporaryFile(delete=False, mode="w",
dir=dirname, prefix=".caosdbignore")
with open(caosdbignore, "r") as base:
with open(caosdbignore, "r", encoding="utf-8") as base:
tmp.write(base.read())
with open(localignore, "r") as local:
with open(localignore, "r", encoding="utf-8") as local:
tmp.write(local.read())
tmp.close()
return tmp.name
......@@ -122,7 +122,7 @@ def compile_file_list(caosdbignore: str, localpath: str) -> list[str]:
current_ignore = caosdbignore
non_ignored_files = []
ignore_files: list[tuple[str, str]] = []
for root, dirs, files in os.walk(localpath):
for root, _, files in os.walk(localpath):
# remove local ignore files that do no longer apply to the current subtree (branch switch)
while len(ignore_files) > 0 and not root.startswith(ignore_files[-1][0]):
os.remove(ignore_files[-1][1])
......@@ -375,9 +375,7 @@ exclude is given preference over include.
logger.addHandler(logging.StreamHandler(stream=sys.stdout))
logger.setLevel(logging.INFO)
con = db.get_connection()
con.timeout = float(args.timeout)
con._login()
db.configure_connection(timeout=float(args.timeout))
loadpath(
path=args.path,
......
......@@ -27,7 +27,7 @@ from copy import deepcopy
# actually
# [deprecated](https://docs.python.org/3/library/typing.html#typing.List), so
# remove this, when we drop support for old Python versions.
from typing import List
from typing import List, Optional
import linkahead as db
import linkahead.common.models as models
......@@ -251,7 +251,7 @@ class DataModel(dict):
for par in entity.get_parents():
if par.name.lower() == valid_e.name.lower():
par._wrap(valid_e)
par.id = valid_e.id
def collect_entities(self):
""" Collects all entities: explicitly defined RecordTypes and
......@@ -267,7 +267,8 @@ class DataModel(dict):
return list(all_ents.values())
def get_deep(self, name: str, visited_props: dict = None, visited_parents: set = None):
def get_deep(self, name: str, visited_props: Optional[dict] = None,
visited_parents: Optional[set] = None):
"""Attempt to resolve references for the given ``name``.
The returned entity has all the properties it inherits from its ancestry and all properties
......@@ -296,7 +297,7 @@ class DataModel(dict):
if parent.name in visited_parents:
continue
visited_parents.add(parent.name)
parent_importance = importances.get(parent._flags.get("inheritance"), 999)
parent_importance = importances.get(parent.flags.get("inheritance"), 999)
if parent.name in self:
deep_parent = self.get_deep(parent.name, # visited_props=visited_props,
visited_parents=visited_parents
......@@ -305,7 +306,7 @@ class DataModel(dict):
for prop in deep_parent.properties:
importance = importances[deep_parent.get_importance(prop.name)]
if (importance <= parent_importance
and prop.name not in [prop.name for prop in entity.properties]):
and prop.name not in [p.name for p in entity.properties]):
entity.add_property(prop)
else:
print(f"Referenced parent \"{parent.name}\" not found in data model.")
......
......@@ -36,11 +36,8 @@ Parents can be provided under the 'inherit_from_xxxx' keywords. The value needs
to be a list with the names. Here, NO NEW entities can be defined.
"""
import argparse
import json
import re
import sys
from typing import List, Optional
from warnings import warn
from typing import List, Optional, Union
import jsonref
import jsonschema
......@@ -165,7 +162,7 @@ debug : bool, optional
def parse_model_from_json_schema(
filename: str,
top_level_recordtype: bool = True,
types_for_missing_array_items: dict = {},
types_for_missing_array_items: Optional[dict] = None,
ignore_unspecified_array_items: bool = False,
existing_model: Optional[dict] = None
):
......@@ -207,6 +204,9 @@ def parse_model_from_json_schema(
about the limitations of the current implementation.
"""
if types_for_missing_array_items is None:
types_for_missing_array_items = {}
if existing_model is not None:
raise NotImplementedError("Adding to an existing model is not implemented yet.")
......@@ -250,7 +250,7 @@ debug : bool, optional
out : data_model.DataModel
The created DataModel
"""
with open(filename, 'r') as outfile:
with open(filename, 'r', encoding="utf-8") as outfile:
ymlmodel = yaml.load(outfile, Loader=SafeLineLoader)
return self._create_model_from_dict(ymlmodel, existing_model=existing_model)
......@@ -286,6 +286,12 @@ debug : bool, optional
existing_model : dict, optional
An existing model to which the created model shall be added.
Raises
------
ValueError
If model_dict is not a dict, model_dict["extern"] contains an
unknown entry, or there is an unknown entry in model_dict.
Returns
-------
out : data_model.DataModel
......@@ -320,7 +326,7 @@ debug : bool, optional
f"FIND {role} WITH name=\"{name}\"", unique=True)
break
else:
raise Exception("Did not find {}".format(name))
raise ValueError("Did not find {}".format(name))
ymlmodel.pop("extern")
......@@ -636,7 +642,7 @@ debug : bool, optional
"""
for key, value in self.model.items():
for _, value in self.model.items():
if isinstance(value, db.Property):
dtype = value.datatype
......@@ -709,8 +715,11 @@ class JsonSchemaParser(Parser):
# @date 2022-02-17
# @review Timm Fitschen 2023-05-25
def __init__(self, types_for_missing_array_items={}, ignore_unspecified_array_items=False):
def __init__(self, types_for_missing_array_items=None,
ignore_unspecified_array_items=False):
super().__init__()
if types_for_missing_array_items is None:
types_for_missing_array_items = {}
self.types_for_missing_array_items = types_for_missing_array_items
self.ignore_unspecified_array_items = ignore_unspecified_array_items
......@@ -734,12 +743,14 @@ class JsonSchemaParser(Parser):
# @author Florian Spreckelsen
# @date 2022-02-17
# @review Timm Fitschen 2023-05-25
with open(filename, 'r') as schema_file:
with open(filename, 'r', encoding="utf-8") as schema_file:
model_dict = jsonref.load(schema_file)
return self._create_model_from_dict(model_dict, top_level_recordtype=top_level_recordtype)
def _create_model_from_dict(self, model_dict: [dict, List[dict]], top_level_recordtype: bool = True):
# ToDo: Fix https://gitlab.indiscale.com/caosdb/src/caosdb-advanced-user-tools/-/issues/139
# and remove pylint disable
def _create_model_from_dict(self, model_dict: Union[dict, List[dict]], top_level_recordtype: bool = True): # pylint: disable=arguments-renamed
"""Parse a dictionary and return the Datamodel created from it.
The dictionary was typically created from the model definition in a json schema file.
......@@ -1019,7 +1030,7 @@ class JsonSchemaParser(Parser):
return returns
if __name__ == "__main__":
def main():
parser = argparse.ArgumentParser(description=__doc__,
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("data_model",
......@@ -1042,3 +1053,7 @@ if __name__ == "__main__":
print(model)
if args.sync:
model.sync_data_model(noquestion=args.noquestion)
if __name__ == "__main__":
main()
......@@ -30,10 +30,7 @@
# D. Hornung 2019-02
# T. Fitschen 2019-02
import argparse
import glob
import os
import re
import yaml
......@@ -71,31 +68,30 @@ description:
"""
def get_header(filename, add_header=False):
"""Open an md file identified by filename and read out the yaml
header.
def get_header(filename, add_header_to_file=False):
"""Open an md file identified by filename and read out the yaml header.
filename can also be a folder. In this case folder/README.md will be used for
getting the header.
filename can also be a folder. In this case folder/README.md will be used
for getting the header.
If a header is found a tuple is returned: (first yaml header line index, last+1
yaml header line index, header)
If a header is found a tuple is returned: (first yaml header line index,
last+1 yaml header line index, header)
Otherwise, if `add_header` is True, a header is added and the function is called
again.
Otherwise, if `add_header_to_file` is True, a header is added and the
function is called again.
The header is normalized in the following way:
- If the value to a key is a string, a list with that string as only element is
returned.
- If the value to a key is a string, a list with that string as only
element is returned.
From https://pandoc.org/MANUAL.html:
A YAML metadata block is a valid YAML object, delimited by a line of three
hyphens (---) at the top and a line of three hyphens (---) or three dots (...)
at the bottom. A YAML metadata block may occur anywhere in the document, but if
it is not at the beginning, it must be preceded by a blank line.
hyphens (---) at the top and a line of three hyphens (---) or three
dots (...) at the bottom. A YAML metadata block may occur anywhere in the
document, but if it is not at the beginning, it must be preceded by a blank
line.
"""
if os.path.isdir(filename):
......@@ -106,16 +102,14 @@ it is not at the beginning, it must be preceded by a blank line.
if not os.path.exists(filename):
raise MetadataFileMissing(filename)
with open(filename) as f:
with open(filename, encoding="utf-8") as f:
textlines = f.readlines()
state = 0
found_0 = -1
found_1 = -1
found_2 = -1
for i, line in enumerate(textlines):
if len(line) == 1 and state in {-1, 0}:
found_0 = i
state = 0
continue
if line.rstrip() == "---" and state == 0:
......@@ -144,14 +138,14 @@ it is not at the beginning, it must be preceded by a blank line.
try:
yaml_part = yaml.load("\n".join(headerlines), Loader=yaml.BaseLoader)
except yaml.scanner.ScannerError as e:
raise ParseErrorsInHeader(filename, e)
raise ParseErrorsInHeader(filename, e) from e
# except yaml.error.MarkedYAMLError as e:
# raise NoValidHeader(filename)
if not isinstance(yaml_part, dict):
raise NoValidHeader(filename)
return (found_1, found_2, clean_header(yaml_part))
if not add_header:
if not add_header_to_file:
raise NoValidHeader(filename)
else:
print("Adding header in: {fn}".format(fn=filename))
......@@ -171,7 +165,7 @@ def save_header(filename, header_data):
if os.path.isdir(filename):
filename = os.path.join(filename, "README.md")
with open(filename) as f:
with open(filename, encoding="utf-8") as f:
textlines = f.readlines()
while textlines[header_data[0]] != "...\n":
......@@ -184,7 +178,7 @@ def save_header(filename, header_data):
default_flow_style=False,
allow_unicode=True))
with open(filename, "w") as f:
with open(filename, "w", encoding="utf-8") as f:
f.writelines(textlines)
......@@ -202,7 +196,7 @@ def add_header(filename, header_dict=None):
filename = os.path.join(filename, "README.md")
if os.path.exists(filename):
with open(filename) as f:
with open(filename, encoding="utf-8") as f:
textlines = f.readlines()
else:
textlines = ""
......@@ -214,7 +208,7 @@ def add_header(filename, header_dict=None):
default_flow_style=False,
allow_unicode=True) + "...\n"
with open(filename, "w") as f:
with open(filename, "w", encoding="utf-8") as f:
f.write(localheader)
f.writelines(textlines)
......
......@@ -156,8 +156,8 @@ class WithREADME(object):
for f in sublist]
if len(flat_list) == 0:
LOGGER.warn("ATTENTION: the field {} does not reference any "
"known files".format(field.key))
LOGGER.warning(f"ATTENTION: the field {field.key} does not"
" reference any known files")
self.attached_filenames.extend(flat_list) # pylint: disable=no-member
......
......@@ -25,6 +25,10 @@ from linkahead.common.utils import uuid
from .cfood import (assure_has_description, assure_has_parent,
assure_property_is)
# The pylint warnings triggered in this file are ignored, as this code is
# assumed to be deprecated in the near future. Should this change, they need
# to be reevaluated.
class EntityMapping(object):
"""
......@@ -39,9 +43,9 @@ class EntityMapping(object):
self.to_target = {}
def add(self, target, existing):
if target._cuid is None:
target._cuid = str(uuid())
self.to_existing[str(target._cuid)] = existing
if target.cuid is None:
target._cuid = str(uuid()) # pylint: disable=protected-access
self.to_existing[str(target.cuid)] = existing
self.to_target[existing.id] = target
......@@ -103,11 +107,11 @@ def update_structure(em, updating: db.Container, target_structure: db.Record):
A record which may have references to other records. Must be a DAG.
"""
if target_structure._cuid in em.to_existing:
if target_structure.cuid in em.to_existing:
update_matched_entity(em,
updating,
target_structure,
em.to_existing[target_structure._cuid])
em.to_existing[target_structure.cuid])
for prop in target_structure.get_properties():
if prop.is_reference(server_retrieval=True):
......@@ -134,8 +138,8 @@ def update_matched_entity(em, updating, target_record, existing_record):
# check for remaining property types
if isinstance(prop.value, db.Entity):
if prop.value._cuid in em.to_existing:
value = em.to_existing[prop.value._cuid].id
if prop.value.cuid in em.to_existing:
value = em.to_existing[prop.value.cuid].id
else:
value = prop.value.id
else:
......
......@@ -28,6 +28,7 @@ class SuppressKnown(logging.Filter):
"""
def __init__(self, db_file=None):
super().__init__()
if db_file:
self.db_file = db_file
else:
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment