Skip to content
Snippets Groups Projects

ENH: setup logging and reporting for serverside execution

Files

@@ -22,6 +22,7 @@
@@ -22,6 +22,7 @@
# ** end header
# ** end header
#
#
 
import logging
import os
import os
import pytest
import pytest
from subprocess import run
from subprocess import run
@@ -38,7 +39,7 @@ DATADIR = os.path.join(os.path.dirname(__file__), "test_data",
@@ -38,7 +39,7 @@ DATADIR = os.path.join(os.path.dirname(__file__), "test_data",
"extroot", "use_case_simple_presentation")
"extroot", "use_case_simple_presentation")
def test_complete_crawler(clear_database):
def test_complete_crawler(clear_database, caplog):
# Setup the data model:
# Setup the data model:
model = parser.parse_model_from_yaml(os.path.join(DATADIR, "model.yml"))
model = parser.parse_model_from_yaml(os.path.join(DATADIR, "model.yml"))
model.sync_data_model(noquestion=True, verbose=False)
model.sync_data_model(noquestion=True, verbose=False)
@@ -56,16 +57,17 @@ def test_complete_crawler(clear_database):
@@ -56,16 +57,17 @@ def test_complete_crawler(clear_database):
forceAllowSymlinks=False)
forceAllowSymlinks=False)
# test that a bad value for "remove_prefix" leads to runtime error
# test that a bad value for "remove_prefix" leads to runtime error
with pytest.raises(RuntimeError) as re:
caplog.set_level(logging.DEBUG, logger="caoscrawler.crawl")
crawler_main(
assert 1 == crawler_main(
crawled_directory_path=os.path.join(DATADIR),
crawled_directory_path=os.path.join(DATADIR),
cfood_file_name=os.path.join(DATADIR, "cfood.yml"),
cfood_file_name=os.path.join(DATADIR, "cfood.yml"),
identifiables_definition_file=os.path.join(DATADIR, "identifiables.yml"),
identifiables_definition_file=os.path.join(DATADIR, "identifiables.yml"),
provenance_file=os.path.join(DATADIR, "provenance.yml"),
provenance_file=os.path.join(DATADIR, "provenance.yml"),
dry_run=False,
dry_run=False,
remove_prefix="sldkfjsldf",
remove_prefix="sldkfjsldf",
)
)
assert "path does not start with the prefix" in str(re.value)
assert "path does not start with the prefix" in caplog.text
 
caplog.clear()
crawler_main(
crawler_main(
crawled_directory_path=os.path.join(DATADIR),
crawled_directory_path=os.path.join(DATADIR),
Loading