Skip to content

Commit

Permalink
Update conf.py
Browse files Browse the repository at this point in the history
  • Loading branch information
romainsacchi committed Oct 6, 2023
1 parent 1c607f4 commit dfad6cb
Show file tree
Hide file tree
Showing 9 changed files with 780 additions and 106 deletions.
747 changes: 676 additions & 71 deletions dev/test_custom_scenarios.ipynb

Large diffs are not rendered by default.

6 changes: 3 additions & 3 deletions premise/activity_maps.py
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,8 @@ def generate_sets_from_filters(self, filtr: dict, database=None) -> dict:

# check if all keys have values
# if not, print warning
for key, val in mapping.items():
if not val:
print(f"Warning: No activities found for {key} -- revise mapping.")
#for key, val in mapping.items():
# if not val:
# print(f"Warning: No activities found for {key} -- revise mapping.")

return mapping
16 changes: 13 additions & 3 deletions premise/ecoinvent_modification.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@
print_version,
warning_about_biogenic_co2,
write_brightway2_database,
create_scenario_list
)

DIR_CACHED_DB = DATA_DIR / "cache"
Expand Down Expand Up @@ -1589,6 +1590,10 @@ def write_db_to_simapro(self, filepath: str = None):
self.generate_change_report()

def write_datapackage(self, name: str = f"datapackage_{date.today()}"):

if not isinstance(name, str):
raise TypeError("`name` should be a string.")

cached_inventories = self.__find_cached_inventories(self.source)

if not cached_inventories:
Expand Down Expand Up @@ -1624,17 +1629,22 @@ def write_datapackage(self, name: str = f"datapackage_{date.today()}"):
modified_datasets=self.modified_datasets,
)

if hasattr(self, "datapackages"):
list_scenarios = create_scenario_list(self.scenarios, self.datapackages)
else:
list_scenarios = create_scenario_list(self.scenarios)

df, extra_inventories = generate_scenario_factor_file(
origin_db=self.database,
scenarios=self.scenarios,
db_name=name,
version=self.version,
scenario_list=list_scenarios,
)

cached_inventories.extend(extra_inventories)
list_scenarios = ["original"] + [
f"{s['model']} - {s['pathway']} - {s['year']}" for s in self.scenarios
]

list_scenarios = ["original"] + list_scenarios

build_datapackage(
df=df,
Expand Down
2 changes: 2 additions & 0 deletions premise/electricity.py
Original file line number Diff line number Diff line change
Expand Up @@ -400,6 +400,7 @@ def create_new_markets_low_voltage(self) -> None:
names=ecoinvent_technologies[technology],
reference_prod="electricity",
unit="kilowatt hour",
exact_match=True,
)
)
counter += 1
Expand Down Expand Up @@ -932,6 +933,7 @@ def create_new_markets_high_voltage(self) -> None:
names=ecoinvent_technologies[technology],
reference_prod="electricity",
unit="kilowatt hour",
exact_match=True,
)
)
counter += 1
Expand Down
58 changes: 42 additions & 16 deletions premise/export.py
Original file line number Diff line number Diff line change
Expand Up @@ -546,16 +546,31 @@ def build_datapackage(df, inventories, list_scenarios, ei_version, name):
"name": "biosphere3",
},
]
package.descriptor["scenarios"] = [
{
"name": s,
"description": f"Prospective db, "
f"based on {s.split(' - ')[0].upper()}, "
f"pathway {s.split(' - ')[1].upper()}, "
f"for the year {s.split(' - ')[2]}.",
}
for s in list_scenarios[1:]
]

if len(list_scenarios[0]) == 3:
package.descriptor["scenarios"] = [
{
"name": s,
"description": f"Prospective db, "
f"based on {s.split(' - ')[0].upper()}, "
f"pathway {s.split(' - ')[1].upper()}, "
f"for the year {s.split(' - ')[-1]}.",
}
for s in list_scenarios[1:]
]
else:
package.descriptor["scenarios"] = [
{
"name": s,
"description": f"Prospective db, "
f"based on {s.split(' - ')[0].upper()}, "
f"pathway {s.split(' - ')[1].upper()}, "
f"for the year {s.split(' - ')[2]}, and "
f"external scenario {' '.join(s.split(' - ')[3:])}.",
}
for s in list_scenarios[1:]
]

package.descriptor["keywords"] = [
"ecoinvent",
"scenario",
Expand All @@ -580,19 +595,31 @@ def build_datapackage(df, inventories, list_scenarios, ei_version, name):
print(f"Data package saved at {DIR_DATAPACKAGE / f'{name}.zip'}")


def generate_scenario_factor_file(origin_db, scenarios, db_name, version):
def generate_scenario_factor_file(
origin_db: list,
scenarios: dict,
db_name: str,
version: str,
scenario_list: list = None,
):
"""
Generate a scenario factor file from a list of databases
:param origin_db: the original database
:param scenarios: a list of databases
:param db_name: the name of the database
:param version: the version of ecoinvent
:param scenario_list: a list of external scenarios
"""

print("Building scenario factor file...")

# create the dataframe
df, new_db, list_unique_acts = generate_scenario_difference_file(
origin_db=origin_db, scenarios=scenarios, db_name=db_name, version=version
origin_db=origin_db,
scenarios=scenarios,
db_name=db_name,
version=version,
scenario_list=scenario_list,
)

original = df["original"]
Expand Down Expand Up @@ -643,7 +670,7 @@ def generate_new_activities(args):


def generate_scenario_difference_file(
db_name, origin_db, scenarios, version
db_name,origin_db, scenarios, version, scenario_list
) -> tuple[DataFrame, list[dict], set[Any]]:
"""
Generate a scenario difference file for a given list of databases
Expand All @@ -666,9 +693,8 @@ def generate_scenario_difference_file(
acts_ind = dict(enumerate(list_acts))
acts_ind_rev = {v: k for k, v in acts_ind.items()}

list_scenarios = ["original"] + [
f"{s['model']} - {s['pathway']} - {s['year']}" for s in scenarios
]
list_scenarios = ["original"] + scenario_list

list_dbs = [origin_db] + [a["database"] for a in scenarios]

matrices = {
Expand Down
2 changes: 1 addition & 1 deletion premise/external_data_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def check_inventories(
and (i[0], i[1]) not in list_datasets
]

raise f"The following datasets are not in the inventory data: {list_missing_datasets}"
raise AssertionError(f"The following datasets are not in the inventory data: {list_missing_datasets}") from e

# flag imported inventories
for i, dataset in enumerate(inventory_data):
Expand Down
25 changes: 15 additions & 10 deletions premise/fuels.py
Original file line number Diff line number Diff line change
Expand Up @@ -441,17 +441,22 @@ def find_suppliers(

# while we do not find a result
while len(suppliers) == 0:
suppliers = list(
get_suppliers_of_a_region(
database=self.database,
locations=possible_locations[counter],
names=[name] if isinstance(name, str) else name,
reference_prod=ref_prod,
unit=unit,
exclude=exclude,
try:
suppliers = list(
get_suppliers_of_a_region(
database=self.database,
locations=possible_locations[counter],
names=[name] if isinstance(name, str) else name,
reference_prod=ref_prod,
unit=unit,
exclude=exclude,
)
)
)
counter += 1
counter += 1
except IndexError as err:
raise IndexError(
f"Could not find any supplier for {name} {ref_prod} in {possible_locations}."
) from err

suppliers = [s for s in suppliers if s] # filter out empty lists

Expand Down
13 changes: 11 additions & 2 deletions premise/transformation.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ def get_suppliers_of_a_region(
reference_prod: str,
unit: str,
exclude: List[str] = None,
exact_match: bool = False,
) -> filter:
"""
Return a list of datasets, for which the location, name,
Expand All @@ -65,8 +66,16 @@ def get_suppliers_of_a_region(
:param exclude: list of terms to exclude
"""

filters = [
ws.either(*[ws.equals("name", supplier) for supplier in names]),
if exact_match:
filters = [
ws.either(*[ws.equals("name", supplier) for supplier in names]),
]
else:
filters = [
ws.either(*[ws.contains("name", supplier) for supplier in names]),
]

filters += [
ws.either(*[ws.equals("location", loc) for loc in locations]),
ws.contains("reference product", reference_prod),
ws.equals("unit", unit),
Expand Down
17 changes: 17 additions & 0 deletions premise/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -319,3 +319,20 @@ def delete_log():
log_path = Path.cwd() / "premise.log"
if log_path.exists():
log_path.unlink()


def create_scenario_list(scenarios, datapackages=None):

list_scenarios = [
f"{s['model']} - {s['pathway']} - {s['year']}" for s in scenarios
]

if "external scenarios" in scenarios[0]:
external_model_name = "External model"
for s, scenario in enumerate(scenarios):
for e, ext_scenario in enumerate(scenario["external scenarios"]):
if datapackages is not None:
external_model_name = datapackages[e].descriptor.get("name", "External model")
list_scenarios[s] += f" - {external_model_name} - {ext_scenario}"

return list_scenarios

0 comments on commit dfad6cb

Please sign in to comment.