Skip to content

Commit

Permalink
Code formatted with black.
Browse files Browse the repository at this point in the history
  • Loading branch information
stefancoe committed Dec 12, 2023
1 parent c636cfb commit 45f74ea
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 13 deletions.
6 changes: 3 additions & 3 deletions activitysim/core/configuration/top.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ class OutputTables(PydanticBase):
h5_store: bool = False
"""Write tables into a single HDF5 store instead of individual CSVs."""

file_type: str = 'csv'
file_type: str = "csv"
"""
Specifies the file type for output tables. Options are limited to 'csv',
'h5' or 'parquet'. Only applied if h5_store is set to False."""
Expand Down Expand Up @@ -149,12 +149,12 @@ class OutputTables(PydanticBase):
If omitted, the all tables are written out and no decoding will be
applied to any output tables.
"""

@validator("file_type")
def method_is_valid(cls, method: str) -> str:
"""Validates file_type setting."""

allowed_set = {'csv', 'h5', 'parquet'}
allowed_set = {"csv", "h5", "parquet"}
if method not in allowed_set:
raise ValueError(f"must be in {allowed_set}, got '{method}'")
return method
Expand Down
20 changes: 10 additions & 10 deletions activitysim/core/steps/output.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,13 +227,13 @@ def write_data_dictionary(state: workflow.State) -> None:
@workflow.step
def write_tables(state: workflow.State) -> None:
"""
Write pipeline tables as csv or parquet files (in output directory) as specified
by output_tables list in settings file. Output to parquet or a single h5 file is
also supported.
Write pipeline tables as csv or parquet files (in output directory) as specified
by output_tables list in settings file. Output to parquet or a single h5 file is
also supported.
'h5_store' defaults to False, which means the output will be written out to csv.
'file_type' defaults to 'csv' but can also be used to specify 'parquet' or 'h5'.
When 'h5_store' is set to True, 'file_type' is ingored and the outputs are written to h5.
'h5_store' defaults to False, which means the output will be written out to csv.
'file_type' defaults to 'csv' but can also be used to specify 'parquet' or 'h5'.
When 'h5_store' is set to True, 'file_type' is ingored and the outputs are written to h5.
'output_tables' can specify either a list of output tables to include or to skip
if no output_tables list is specified, then all checkpointed tables will be written
Expand Down Expand Up @@ -400,18 +400,18 @@ def map_func(x):
):
dt = dt.drop([f"_original_{lookup_col}"])

if h5_store or file_type == 'h5':
if h5_store or file_type == "h5":
file_path = state.get_output_file_path("%soutput_tables.h5" % prefix)
dt.to_pandas().to_hdf(
str(file_path), key=table_name, mode="a", format="fixed"
)
else:

else:
file_name = f"{prefix}{table_name}.{file_type}"
file_path = state.get_output_file_path(file_name)

# include the index if it has a name or is a MultiIndex
if file_type =='csv':
if file_type == "csv":
csv.write_csv(dt, file_path)
else:
parquet.write_table(dt, file_path)

0 comments on commit 45f74ea

Please sign in to comment.