diff --git a/src/caoscrawler/conv_impl/spss.py b/src/caoscrawler/conv_impl/spss.py index 5bd980728b61cfc65818ffb833018ce036fc5a9d..5a4abd4f7230f6b87cb932e4e26d20911ea06e42 100644 --- a/src/caoscrawler/conv_impl/spss.py +++ b/src/caoscrawler/conv_impl/spss.py @@ -56,12 +56,6 @@ class SPSSConverter(converters.TableConverter): df = pd.io.spss.read_spss(element.path, dtype_backend="pyarrow") dtypes = read_column_types(element.path) - # if element.path.endswith(".sav"): - # sav_df, meta = pyreadstat.read_sav(element.path, metadataonly=True) - # from IPython import embed - # embed() - df.drop(range(15, len(df.index)), inplace=True) - # Fix datetime columns for name, dtype in dtypes.items(): if dtype != "DATETIME": @@ -70,9 +64,6 @@ class SPSSConverter(converters.TableConverter): col.fillna(np.nan, inplace=True) col.replace([np.nan], [None], inplace=True) - # from IPython import embed - # embed() - return self._children_from_dataframe(df) @@ -216,21 +207,6 @@ directory: # corresponds to the directory given to the crawler # python3 -m caosadvancedtools.models.parser datamodel.yaml --sync """ -# # -# # Code for creating enum records: -# # -# """ -# for name, values in enums.items(): -# for line in f""" -# cont = db.Container() -# for value in {repr(values)}: -# rec = db.Record(name=value).add_parent(name="{name}") -# cont.append(rec) -# cont.insert() -# """.splitlines(keepends=True): -# if line.strip(): -# output += f"# {line}" -# output += "#\n" # Actual datamodel output += """