From 44d1c052589b7741ba98a26f570a46d230c2d8f8 Mon Sep 17 00:00:00 2001 From: romainsacchi Date: Thu, 11 Jul 2024 12:49:38 +0000 Subject: [PATCH] Black reformating --- premise/data_collection.py | 28 +-- premise/report.py | 32 +++- premise/transport.py | 49 +++-- premise/transport_new.py | 380 +++++++++++++++++++++---------------- premise/validation.py | 17 +- 5 files changed, 294 insertions(+), 212 deletions(-) diff --git a/premise/data_collection.py b/premise/data_collection.py index afc46101..6aa4202b 100644 --- a/premise/data_collection.py +++ b/premise/data_collection.py @@ -24,7 +24,6 @@ from .geomap import Geomap from .marginal_mixes import consequential_method - IAM_ELEC_VARS = VARIABLES_DIR / "electricity_variables.yaml" IAM_FUELS_VARS = VARIABLES_DIR / "fuels_variables.yaml" IAM_BIOMASS_VARS = VARIABLES_DIR / "biomass_variables.yaml" @@ -216,6 +215,7 @@ def get_gains_EU_data() -> xr.DataArray: return array + def fix_efficiencies(data: xr.DataArray, min_year: int) -> xr.DataArray: """ Fix the efficiency data to ensure plausibility. @@ -377,11 +377,11 @@ def __init__( other_vars = self.__get_iam_variable_labels( IAM_OTHER_VARS, variable="iam_aliases" ) - + roadfreight_prod_vars = self.__get_iam_variable_labels( IAM_TRANS_ROADFREIGHT_VARS, variable="iam_aliases" ) - + roadfreight_energy_vars = self.__get_iam_variable_labels( IAM_TRANS_ROADFREIGHT_VARS, variable="energy_use_aliases" ) @@ -389,7 +389,7 @@ def __init__( railfreight_prod_vars = self.__get_iam_variable_labels( IAM_TRANS_RAILFREIGHT_VARS, variable="iam_aliases" ) - + railfreight_energy_vars = self.__get_iam_variable_labels( IAM_TRANS_RAILFREIGHT_VARS, variable="energy_use_aliases" ) @@ -463,7 +463,7 @@ def __init__( fuel_prod_vars if "liquid fossil fuels" in fuel_prod_vars else None ), ) - + self.data = data self.regions = data.region.values.tolist() @@ -613,39 +613,39 @@ def __init__( normalize=False, system_model="cutoff", ) - + self.roadfreight_markets = self.__fetch_market_data( data=data, input_vars=roadfreight_prod_vars, - system_model="cutoff", # TODO: check how to handle this for consequencial + system_model="cutoff", # TODO: check how to handle this for consequencial sector="transport", ) - + self.railfreight_markets = self.__fetch_market_data( data=data, input_vars=railfreight_prod_vars, - system_model="cutoff", # TODO: check how to handle this for consequencial + system_model="cutoff", # TODO: check how to handle this for consequencial sector="transport", ) self.passenger_car_markets = self.__fetch_market_data( data=data, input_vars=passenger_cars_prod_vars, - system_model="cutoff", # TODO: check how to handle this for consequencial + system_model="cutoff", # TODO: check how to handle this for consequencial sector="transport", ) self.bus_markets = self.__fetch_market_data( data=data, input_vars=bus_prod_vars, - system_model="cutoff", # TODO: check how to handle this for consequencial + system_model="cutoff", # TODO: check how to handle this for consequencial sector="transport", ) self.two_wheelers_markets = self.__fetch_market_data( data=data, input_vars=two_wheelers_prod_vars, - system_model="cutoff", # TODO: check how to handle this for consequencial + system_model="cutoff", # TODO: check how to handle this for consequencial sector="transport", ) @@ -761,13 +761,13 @@ def __init__( production_labels=dac_prod_vars, energy_labels=dac_electricity_vars, ) - + self.roadfreight_efficiencies = self.get_iam_efficiencies( data=data, production_labels=roadfreight_prod_vars, energy_labels=roadfreight_energy_vars, ) - + self.railfreight_efficiencies = self.get_iam_efficiencies( data=data, production_labels=railfreight_prod_vars, diff --git a/premise/report.py b/premise/report.py index 593c3b8c..5073cf15 100644 --- a/premise/report.py +++ b/premise/report.py @@ -188,16 +188,24 @@ def fetch_data( else None ), "Transport (two-wheelers)": ( - iam_data.production_volumes if hasattr(iam_data, "two_wheelers_markets") else None + iam_data.production_volumes + if hasattr(iam_data, "two_wheelers_markets") + else None ), "Transport (two-wheelers) - eff": ( - iam_data.two_wheelers_efficiencies if hasattr(iam_data, "two_wheelers_efficiencies") else None + iam_data.two_wheelers_efficiencies + if hasattr(iam_data, "two_wheelers_efficiencies") + else None ), "Transport (cars)": ( - iam_data.production_volumes if hasattr(iam_data, "passenger_car_markets") else None + iam_data.production_volumes + if hasattr(iam_data, "passenger_car_markets") + else None ), "Transport (cars) - eff": ( - iam_data.passenger_car_efficiencies if hasattr(iam_data, "passenger_car_efficiencies") else None + iam_data.passenger_car_efficiencies + if hasattr(iam_data, "passenger_car_efficiencies") + else None ), "Transport (buses)": ( iam_data.production_volumes if hasattr(iam_data, "bus_markets") else None @@ -206,16 +214,24 @@ def fetch_data( iam_data.bus_efficiencies if hasattr(iam_data, "bus_efficiencies") else None ), "Transport (trucks)": ( - iam_data.production_volumes if hasattr(iam_data, "roadfreight_markets") else None + iam_data.production_volumes + if hasattr(iam_data, "roadfreight_markets") + else None ), "Transport (trucks) - eff": ( - iam_data.roadfreight_efficiencies if hasattr(iam_data, "roadfreight_efficiencies") else None + iam_data.roadfreight_efficiencies + if hasattr(iam_data, "roadfreight_efficiencies") + else None ), "Transport (trains)": ( - iam_data.production_volumes if hasattr(iam_data, "railfreight_markets") else None + iam_data.production_volumes + if hasattr(iam_data, "railfreight_markets") + else None ), "Transport (trains) - eff": ( - iam_data.railfreight_efficiencies if hasattr(iam_data, "railfreight_efficiencies") else None + iam_data.railfreight_efficiencies + if hasattr(iam_data, "railfreight_efficiencies") + else None ), } diff --git a/premise/transport.py b/premise/transport.py index 2e4d8920..8eb36c08 100644 --- a/premise/transport.py +++ b/premise/transport.py @@ -11,12 +11,12 @@ import yaml from wurst import searching as ws +from .activity_maps import InventorySet from .filesystem_constants import DATA_DIR, IAM_OUTPUT_DIR +from .logger import create_logger from .transformation import BaseTransformation, IAMDataCollection from .utils import eidb_label, rescale_exchanges -from .activity_maps import InventorySet from .validation import CarValidation, TruckValidation -from .logger import create_logger logger = create_logger("transport") @@ -162,15 +162,10 @@ def create_fleet_vehicles( for region in regions: if year in arr.coords["year"].values: - region_size_fleet = arr.sel( - region=region, - year=year - ) + region_size_fleet = arr.sel(region=region, year=year) else: - region_size_fleet = arr.sel(region=region).interp( - year=year - ) + region_size_fleet = arr.sel(region=region).interp(year=year) total_km = region_size_fleet.sum() @@ -247,7 +242,9 @@ def create_fleet_vehicles( sizes = ["3.5t", "7.5t", "18t", "26t", "40t"] for size in sizes: total_size_km = region_size_fleet.sel( - variables=[v for v in arr.coords["variables"].values if size in v] + variables=[ + v for v in arr.coords["variables"].values if size in v + ] ).sum() if total_size_km > 0: @@ -279,7 +276,9 @@ def create_fleet_vehicles( "comment": f"Fleet-average vehicle for the year {year}, for the region {region}.", } - for pwt in [v for v in arr.coords["variables"].values if size in v]: + for pwt in [ + v for v in arr.coords["variables"].values if size in v + ]: indiv_km = region_size_fleet.sel( variables=pwt, ) @@ -366,7 +365,9 @@ def __init__( mapping = InventorySet(database=database, version=version, model=model) self.vehicle_map = mapping.generate_transport_map(transport_type=vehicle_type) self.rev_map = {next(iter(v)): k for k, v in self.vehicle_map.items()} - self.vehicle_fuel_map = mapping.generate_vehicle_fuel_map(transport_type=vehicle_type) + self.vehicle_fuel_map = mapping.generate_vehicle_fuel_map( + transport_type=vehicle_type + ) # check if vehicle map is empty for v in self.vehicle_map.values(): @@ -399,7 +400,9 @@ def create_vehicle_markets(self) -> list: new_datasets = [] - for ds in list(set([(v["name"], v["reference product"]) for v in vehicle_datasets])): + for ds in list( + set([(v["name"], v["reference product"]) for v in vehicle_datasets]) + ): new_datasets.extend( self.fetch_proxies( subset=vehicle_datasets, @@ -415,7 +418,9 @@ def create_vehicle_markets(self) -> list: self.database.append(new_ds) else: - print(f"Dataset {new_ds['name'], new_ds['location']} already in the database.") + print( + f"Dataset {new_ds['name'], new_ds['location']} already in the database." + ) fleet_act = [] @@ -445,7 +450,7 @@ def create_vehicle_markets(self) -> list: scenario=self.scenario, regions=self.regions, arr=arr, - mapping=self.vehicle_map + mapping=self.vehicle_map, ) ) @@ -529,12 +534,14 @@ def adjust_transport_efficiency(self, dataset): dataset = rescale_exchanges( dataset, scaling_factor, - technosphere_filters=[ws.either( - *[ - ws.contains("name", v) - for v in self.vehicle_fuel_map[variable] - ] - )], + technosphere_filters=[ + ws.either( + *[ + ws.contains("name", v) + for v in self.vehicle_fuel_map[variable] + ] + ) + ], ) if "log parameters" not in dataset: dataset["log parameters"] = {} diff --git a/premise/transport_new.py b/premise/transport_new.py index 29c539ea..25d25a4d 100644 --- a/premise/transport_new.py +++ b/premise/transport_new.py @@ -7,19 +7,20 @@ import copy import json -import numpy as np import uuid +from typing import Dict, List + +import numpy as np import xarray as xr import yaml -from typing import Dict, List +from wurst import searching as ws +from wurst.errors import NoResults from .filesystem_constants import DATA_DIR from .logger import create_logger from .transformation import BaseTransformation, IAMDataCollection from .utils import rescale_exchanges from .validation import TransportValidationNEW -from wurst import searching as ws -from wurst.errors import NoResults FILEPATH_VEHICLES_MAP = DATA_DIR / "transport" / "vehicles_map_NEW.yaml" @@ -28,6 +29,7 @@ # TODO: work on change report # TODO: work on scenario report + def _update_transport(scenario, version, system_model): transport = Transport( database=scenario["database"], @@ -39,18 +41,21 @@ def _update_transport(scenario, version, system_model): system_model=system_model, index=scenario.get("index"), ) - - if scenario["iam data"].roadfreight_markets is not None and scenario["iam data"].railfreight_markets is not None: + + if ( + scenario["iam data"].roadfreight_markets is not None + and scenario["iam data"].railfreight_markets is not None + ): transport.generate_datasets() transport.relink_datasets() transport.generate_transport_markets() transport.generate_unspecified_transport_vehicles() transport.relink_exchanges() transport.delete_inventory_datasets() - scenario["database"] = transport.database + scenario["database"] = transport.database scenario["cache"] = transport.cache scenario["index"] = transport.index - + validate = TransportValidationNEW( model=scenario["model"], scenario=scenario["pathway"], @@ -59,20 +64,26 @@ def _update_transport(scenario, version, system_model): database=transport.database, iam_data=scenario["iam data"], ) - + validate.run_transport_checks() - - elif scenario["iam data"].roadfreight_markets is not None and scenario["iam data"].railfreight_markets is None: + + elif ( + scenario["iam data"].roadfreight_markets is not None + and scenario["iam data"].railfreight_markets is None + ): print("No railfreight markets found in IAM data. Skipping freight transport.") - - elif scenario["iam data"].roadfreight_markets is None and scenario["iam data"].railfreight_markets is not None: + + elif ( + scenario["iam data"].roadfreight_markets is None + and scenario["iam data"].railfreight_markets is not None + ): print("No roadfreight markets found in IAM data. Skipping freight transport.") - + else: print("No transport markets found in IAM data. Skipping freight transport.") - + # TODO: if one transport market is not found the other one will still be updateable? - + return scenario @@ -81,9 +92,9 @@ def get_vehicles_mapping() -> Dict[str, dict]: Return a dictionary that contains mapping between `ecoinvent` terminology and `premise` terminology regarding size classes, powertrain types, etc. - + :return: dictionary to map terminology between carculator and ecoinvent - + """ with open(FILEPATH_VEHICLES_MAP, "r", encoding="utf-8") as stream: out = yaml.safe_load(stream) @@ -119,9 +130,9 @@ class Transport(BaseTransformation): It stores functions to generate transport datasets for all IAM regions, based on newly imported LCIs, incl. adjusting their efficiencies for a given year. It creates market processes for freight transport and relinks exchanges of - datasets using transport inventories. It deletes the old ecoinvent transport datasets. + datasets using transport inventories. It deletes the old ecoinvent transport datasets. """ - + def __init__( self, database: List[dict], @@ -143,112 +154,136 @@ def __init__( system_model, index, ) - + def generate_datasets(self): """ Function that creates inventories for IAM region based on additional imported inventories. """ - - roadfreight_dataset_names = self.iam_data.roadfreight_markets.coords["variables"].values.tolist() - railfreight_dataset_names = self.iam_data.railfreight_markets.coords["variables"].values.tolist() - freight_transport_dataset_names = roadfreight_dataset_names + railfreight_dataset_names - + + roadfreight_dataset_names = self.iam_data.roadfreight_markets.coords[ + "variables" + ].values.tolist() + railfreight_dataset_names = self.iam_data.railfreight_markets.coords[ + "variables" + ].values.tolist() + freight_transport_dataset_names = ( + roadfreight_dataset_names + railfreight_dataset_names + ) + new_datasets = [] old_datasets = [] changed_datasets_location = [] - + # change the location of the datasets to IAM regions for dataset in self.database: # only applicable for freight train datasets as road freight only exist for the region of RER if dataset["name"] in railfreight_dataset_names: if dataset["location"] != "RoW": - + region_mapping = self.region_to_proxy_dataset_mapping( name=dataset["name"], ref_prod=dataset["reference product"], ) - + ecoinv_region = dataset["location"] - + # change dataset location for IAM_reg, eco_reg in region_mapping.items(): if eco_reg == ecoinv_region: dataset["location"] = IAM_reg # change 'production' exchange location - for exchange in dataset['exchanges']: - if exchange['name'] == dataset['name']: - exchange['location'] = IAM_reg + for exchange in dataset["exchanges"]: + if exchange["name"] == dataset["name"]: + exchange["location"] = IAM_reg break - - changed_datasets_location.append([dataset["name"],dataset["location"]]) - + + changed_datasets_location.append( + [dataset["name"], dataset["location"]] + ) + # add to log self.write_log(dataset=dataset, status="updated") # add it to list of created datasets self.add_to_index(dataset) - + self.adjust_transport_efficiency(dataset) # create new datasets for IAM regions that are not covered yet, based on the "RoW" or "RER" dataset for region in self.iam_data.regions: for freight_transport_ds in freight_transport_dataset_names: - if [freight_transport_ds, region] not in changed_datasets_location and region != "World": - try: # RoW dataset to be used for other IAM regions - new_dataset = copy.deepcopy(ws.get_one(self.database, - ws.equals("name", freight_transport_ds), - ws.equals("location", "RoW") - ) - ) - except NoResults: # if no RoW dataset can be found use RER dataset - new_dataset = copy.deepcopy(ws.get_one(self.database, - ws.equals("name", freight_transport_ds), - ws.equals("location", "RER") - ) - ) - + if [ + freight_transport_ds, + region, + ] not in changed_datasets_location and region != "World": + try: # RoW dataset to be used for other IAM regions + new_dataset = copy.deepcopy( + ws.get_one( + self.database, + ws.equals("name", freight_transport_ds), + ws.equals("location", "RoW"), + ) + ) + except NoResults: # if no RoW dataset can be found use RER dataset + new_dataset = copy.deepcopy( + ws.get_one( + self.database, + ws.equals("name", freight_transport_ds), + ws.equals("location", "RER"), + ) + ) + # Create a list that stores the dataset used for copy to later delete them from the database - if not any(dataset["name"] == new_dataset["name"] and dataset["location"] == new_dataset["location"] for dataset in old_datasets): + if not any( + dataset["name"] == new_dataset["name"] + and dataset["location"] == new_dataset["location"] + for dataset in old_datasets + ): old_datasets.append(copy.deepcopy(new_dataset)) new_dataset["location"] = region new_dataset["code"] = str(uuid.uuid4().hex) - new_dataset["comment"] = f"Dataset for the region {region}. {new_dataset['comment']}" + new_dataset["comment"] = ( + f"Dataset for the region {region}. {new_dataset['comment']}" + ) for exchange in new_dataset["exchanges"]: if exchange["type"] == "production": exchange["location"] = region - + # add to log self.write_log(dataset=new_dataset, status="created") # add it to list of created datasets self.add_to_index(new_dataset) - + self.adjust_transport_efficiency(new_dataset) - + new_datasets.append(new_dataset) self.database.extend(new_datasets) for dataset in list(self.database): # Create a copy for iteration - if any(old_dataset["name"] == dataset["name"] and old_dataset["location"] == dataset["location"] for old_dataset in old_datasets): + if any( + old_dataset["name"] == dataset["name"] + and old_dataset["location"] == dataset["location"] + for old_dataset in old_datasets + ): self.database.remove(dataset) - def adjust_transport_efficiency(self, dataset): """ The function updates the efficiencies of transport datasets using the transport efficiencies, created in data_collection.py. """ - + vehicles_map = get_vehicles_mapping() - + # create a list that contains all energy carrier markets used in transport energy_carriers = vehicles_map["energy carriers"] - + # create a list that contains all biosphere flows that are related to the direct combustion of fuel fuel_combustion_emissions = vehicles_map["fuel combustion emissions"] - - # calculate scaling factor + + # calculate scaling factor if "lorry" in dataset["name"]: scaling_factor = 1 / self.find_iam_efficiency_change( data=self.iam_data.roadfreight_efficiencies, @@ -264,20 +299,24 @@ def adjust_transport_efficiency(self, dataset): if scaling_factor is None: scaling_factor = 1 - + # rescale exchanges if scaling_factor != 1 and scaling_factor > 0: rescale_exchanges( dataset, scaling_factor, technosphere_filters=[ - ws.either(*[ws.contains("name", x) for x in energy_carriers]) # TODO: apply diesel efficiency increase to diesel shunting for electricity and hydrogen datasets + ws.either( + *[ws.contains("name", x) for x in energy_carriers] + ) # TODO: apply diesel efficiency increase to diesel shunting for electricity and hydrogen datasets + ], + biosphere_filters=[ + ws.contains("name", x) for x in fuel_combustion_emissions ], - biosphere_filters=[ws.contains("name", x) for x in fuel_combustion_emissions], remove_uncertainty=False, ) ########## how can there be a scaling factor for hydrogen if FE and ES variables are 0? - + # Update the comments text = ( f"This dataset has been modified by `premise`, according to the energy transport " @@ -285,37 +324,45 @@ def adjust_transport_efficiency(self, dataset): f"region {dataset['location']} in {self.year}, following the scenario {self.scenario}. " f"The energy efficiency of the process has been improved by {int((1 - scaling_factor) * 100)}%." ) - dataset["comment"] = text + (dataset["comment"] if dataset["comment"] is not None else "") + dataset["comment"] = text + ( + dataset["comment"] if dataset["comment"] is not None else "" + ) if "log parameters" not in dataset: dataset["log parameters"] = {} - dataset["log parameters"].update({"efficiency change": scaling_factor,}) - + dataset["log parameters"].update( + { + "efficiency change": scaling_factor, + } + ) + def generate_transport_markets(self): """ Function that creates market processes and adds them to the database. - It calculates the share of inputs to each market process and - creates the process by multiplying the share with the amount of reference product, + It calculates the share of inputs to each market process and + creates the process by multiplying the share with the amount of reference product, assigning it to the respective input. Regional market processes then make up the world market processes. """ - + # regional transport markets to be created (keys) with inputs list (values) transport_markets_tbc = { - "market for transport, freight, lorry, unspecified powertrain": - self.iam_data.roadfreight_markets.coords["variables"].values.tolist(), - "market for transport, freight train, unspecified powertrain": - self.iam_data.railfreight_markets.coords["variables"].values.tolist(), + "market for transport, freight, lorry, unspecified powertrain": self.iam_data.roadfreight_markets.coords[ + "variables" + ].values.tolist(), + "market for transport, freight train, unspecified powertrain": self.iam_data.railfreight_markets.coords[ + "variables" + ].values.tolist(), } new_transport_markets = [] - + # create regional market processes for markets, vehicles in transport_markets_tbc.items(): for region in self.iam_data.regions: market = { - "name": markets, + "name": markets, "reference product": markets.replace("market for ", ""), "unit": "ton kilometer", "location": region, @@ -334,15 +381,25 @@ def generate_transport_markets(self): "comment": f"Fleet-average vehicle for the year {self.year}, " f"for the region {region}.", } - + # add exchanges if region != "World": for vehicle in vehicles: - if markets == "market for transport, freight, lorry, unspecified powertrain": - market_share = self.iam_data.roadfreight_markets.sel(region=region, variables=vehicle, year=self.year).item() - elif markets == "market for transport, freight train, unspecified powertrain": - market_share = self.iam_data.railfreight_markets.sel(region=region, variables=vehicle, year=self.year).item() - + if ( + markets + == "market for transport, freight, lorry, unspecified powertrain" + ): + market_share = self.iam_data.roadfreight_markets.sel( + region=region, variables=vehicle, year=self.year + ).item() + elif ( + markets + == "market for transport, freight train, unspecified powertrain" + ): + market_share = self.iam_data.railfreight_markets.sel( + region=region, variables=vehicle, year=self.year + ).item() + # determine the reference product if "lorry" in vehicle: product = "transport, freight, lorry" @@ -364,42 +421,39 @@ def generate_transport_markets(self): "type": "technosphere", "amount": market_share, } - ) - + ) + # add to log self.write_log(dataset=market, status="created") # add it to list of created datasets self.add_to_index(market) - + new_transport_markets.append(market) - - + # world markets to be created vehicles_map = get_vehicles_mapping() - dict_transport_ES_var = vehicles_map["energy service variables"][self.model]["mode"] - + dict_transport_ES_var = vehicles_map["energy service variables"][self.model][ + "mode" + ] + dict_regional_shares = {} - + # create world market transport datasets exchanges for market, var in dict_transport_ES_var.items(): for region in self.iam_data.regions: if region != "World": # calculate regional shares dict_regional_shares[region] = ( - ( - self.iam_data.data.sel( - region=region, - variables=var, - year=self.year).values - )/( self.iam_data.data.sel( - region="World", - variables=var, - year=self.year).item() - ) + region=region, variables=var, year=self.year + ).values + ) / ( + self.iam_data.data.sel( + region="World", variables=var, year=self.year + ).item() ) - - # add exchanges + + # add exchanges for ds in new_transport_markets: if ds["location"] == "World": for region in self.iam_data.regions: @@ -414,10 +468,9 @@ def generate_transport_markets(self): "amount": dict_regional_shares[region], } ) - + self.database.extend(new_transport_markets) - - + def generate_unspecified_transport_vehicles(self): """ This function generates unspecified transport vehicles for the IAM regions. @@ -425,18 +478,20 @@ def generate_unspecified_transport_vehicles(self): an average of powertrain technology for a specific region. This only applies to freight lorries so far. """ - + vehicles_map = get_vehicles_mapping() - dict_transport_ES_var = vehicles_map["energy service variables"][self.model]["size"] + dict_transport_ES_var = vehicles_map["energy service variables"][self.model][ + "size" + ] dict_vehicle_types = vehicles_map["vehicle types"] weight_specific_ds = [] - + # create regional size dependent technology-average markets for region in self.iam_data.regions: for market, var in dict_transport_ES_var.items(): vehicle_unspecified = { - "name": market, + "name": market, "reference product": market.replace("market for ", ""), "unit": "ton kilometer", "location": region, @@ -455,7 +510,7 @@ def generate_unspecified_transport_vehicles(self): "comment": f"Fleet-average vehicle for the year {self.year}, " f"for the region {region}.", } - + # add exchanges for regional datasets if region != "World": for vehicle_types, names in dict_vehicle_types.items(): @@ -463,17 +518,15 @@ def generate_unspecified_transport_vehicles(self): if variable_key in self.iam_data.data.variables: # calculate regional shares regional_weight_shares = ( - ( self.iam_data.data.sel( - region=region, - variables=variable_key, - year=self.year).values - )/( + region=region, + variables=variable_key, + year=self.year, + ).values + ) / ( self.iam_data.data.sel( - region=region, - variables=var, - year=self.year).item() - ) + region=region, variables=var, year=self.year + ).item() ) if regional_weight_shares > 0: @@ -481,10 +534,14 @@ def generate_unspecified_transport_vehicles(self): euro = ", EURO-VI" else: euro = "" - + vehicle_unspecified["exchanges"].append( { - "name": names + ", " + market.split(',')[3].strip() + euro + ", long haul", + "name": names + + ", " + + market.split(",")[3].strip() + + euro + + ", long haul", "product": "transport, freight, lorry" + euro, "unit": "ton kilometer", "location": region, @@ -492,25 +549,21 @@ def generate_unspecified_transport_vehicles(self): "amount": regional_weight_shares, } ) - - # add exchanges for global dataset + + # add exchanges for global dataset elif region == "World": for region in self.iam_data.regions: if region != "World": regional_weight_shares = ( - ( self.iam_data.data.sel( - region=region, - variables=var, - year=self.year).values - )/( + region=region, variables=var, year=self.year + ).values + ) / ( self.iam_data.data.sel( - region="World", - variables=var, - year=self.year).item() - ) + region="World", variables=var, year=self.year + ).item() ) - + if regional_weight_shares > 0: vehicle_unspecified["exchanges"].append( { @@ -522,51 +575,58 @@ def generate_unspecified_transport_vehicles(self): "amount": regional_weight_shares, } ) - + # only add markets that have inputs - if len(vehicle_unspecified["exchanges"]) > 1: + if len(vehicle_unspecified["exchanges"]) > 1: weight_specific_ds.append(vehicle_unspecified) - + # add to log self.write_log(dataset=vehicle_unspecified, status="created") # add it to list of created datasets self.add_to_index(vehicle_unspecified) - + self.database.extend(weight_specific_ds) - - #TODO: regional unspecified vehicles per driving cycle could have same shares but are not used for markets? - - + + # TODO: regional unspecified vehicles per driving cycle could have same shares but are not used for markets? + def relink_exchanges(self): """ This function goes through all datasets in the database that use transport, freight (lorry or train) as one of their exchanges. It replaced those ecoinvent transport exchanges with the new transport inventories and the newly created transport markets. """ - + vehicles_map = get_vehicles_mapping() - + for dataset in self.database: if "transport, freight" not in dataset["name"]: for exc in ws.technosphere( dataset, ws.contains("name", "transport, freight"), ws.equals("unit", "ton kilometer"), + ): + + if any( + key.lower() in exc["name"].lower() + for key in vehicles_map["freight transport"][self.model] ): - - if any(key.lower() in exc["name"].lower() for key in vehicles_map['freight transport'][self.model]): key = [ - k - for k in vehicles_map['freight transport'][self.model] + k + for k in vehicles_map["freight transport"][self.model] if k.lower() in exc["name"].lower() ][0] - + if "input" in exc: del exc["input"] - - exc["name"] = f"{vehicles_map['freight transport'][self.model][key]}" - exc["location"] = self.geo.ecoinvent_to_iam_location(dataset["location"]) - exc["product"] = (f"{vehicles_map['freight transport'][self.model][key]}").replace("market for ", "") - + + exc["name"] = ( + f"{vehicles_map['freight transport'][self.model][key]}" + ) + exc["location"] = self.geo.ecoinvent_to_iam_location( + dataset["location"] + ) + exc["product"] = ( + f"{vehicles_map['freight transport'][self.model][key]}" + ).replace("market for ", "") def delete_inventory_datasets(self): """ @@ -576,11 +636,7 @@ def delete_inventory_datasets(self): """ vehicles_map = get_vehicles_mapping() - + ds_to_delete = vehicles_map["ecoinvent freight transport"] - self.database = [ - ds - for ds in self.database - if ds["name"] not in ds_to_delete - ] + self.database = [ds for ds in self.database if ds["name"] not in ds_to_delete] diff --git a/premise/validation.py b/premise/validation.py index 946bee3c..fea1c5d6 100644 --- a/premise/validation.py +++ b/premise/validation.py @@ -1846,13 +1846,13 @@ def run_biomass_checks(self): self.check_biomass_markets() self.check_residual_biomass_share() self.save_log() - + class TransportValidationNEW(BaseDatasetValidator): def __init__(self, model, scenario, year, regions, database, iam_data): super().__init__(model, scenario, year, regions, database) self.iam_data = iam_data - + def check_transport_markets(self): # check that the transport markets inputs equal to 1 @@ -1877,23 +1877,26 @@ def check_transport_markets(self): message, issue_type="major", ) - + def check_vehicles(self): for act in [ a for a in self.database - if a["name"].startswith("transport, freight") and ", unspecified powertrain" in a["name"] + if a["name"].startswith("transport, freight") + and ", unspecified powertrain" in a["name"] ]: # check that all transport exchanges are differently named or are from a different location names_locations = [ - (exc["name"], exc["location"]) for exc in act["exchanges"] if exc["type"] == "technosphere" + (exc["name"], exc["location"]) + for exc in act["exchanges"] + if exc["type"] == "technosphere" ] if len(names_locations) != len(set(names_locations)): message = "Duplicate transport exchanges" self.log_issue( act, "duplicate transport exchanges", message, issue_type="major" ) - + def check_vehicle_efficiency( self, vehicle_name, @@ -2006,4 +2009,4 @@ def run_transport_checks(self): elec_minimum=0.0, elec_maximum=0.5, ) - self.save_log() \ No newline at end of file + self.save_log()