diff --git a/conda/meta.yaml b/conda/meta.yaml index 072a4bda..b4164c36 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -20,7 +20,7 @@ requirements: run: - numpy - pandas - - bw2io==0.8.7 + - bw2io >=0.8.7, != 0.8.8 - bw2data - wurst - xarray diff --git a/dev/Untitled.ipynb b/dev/Untitled.ipynb index 8de806b2..5843c3ca 100644 --- a/dev/Untitled.ipynb +++ b/dev/Untitled.ipynb @@ -20,7 +20,7 @@ "outputs": [], "source": [ "scenarios = [\n", - " {\"model\":\"tiam\", \"pathway\":\"SSP2-RCP19\", \"year\":2030, \"filepath\": \"/Users/romain/Downloads/Scenario Files/Production volumes & efficiencies\"}, # optimistic renewable scenario, 2.3-2.8 °C\n", + " {\"model\":\"tiam\", \"pathway\":\"SSP2-RCP19\", \"year\":2020, \"filepath\": \"/Users/romain/Downloads/Scenario Files/Production volumes & efficiencies\"}, # optimistic renewable scenario, 2.3-2.8 °C\n", "]" ] }, @@ -34,7 +34,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "premise v.(1, 6, 6)\n", + "premise v.(1, 7, 1)\n", "+------------------------------------------------------------------+\n", "| Warning |\n", "+------------------------------------------------------------------+\n", @@ -66,372 +66,16 @@ "Keep uncertainty data?\n", "NewDatabase(..., keep_uncertainty_data=True)\n", "\n", + "Disable multiprocessing?\n", + "NewDatabase(..., use_multiprocessing=False)\n", + "\n", "Hide these messages?\n", "NewDatabase(..., quiet=True)\n", "\n", "//////////////////// EXTRACTING SOURCE DATABASE ////////////////////\n", - "Cannot find cached database. Will create one now for next time...\n", - "Getting activity data\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|█████████████████████████████████| 21238/21238 [00:00<00:00, 304494.43it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Adding exchange data to activities\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|████████████████████████████████| 674593/674593 [00:39<00:00, 17190.81it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Filling out exchange data\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|███████████████████████████████████| 21238/21238 [00:03<00:00, 6066.49it/s]\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Set missing location of datasets to global scope.\n", - "Set missing location of production exchanges to scope of dataset.\n", - "Correct missing location of technosphere exchanges.\n", - "Correct missing flow categories for biosphere exchanges\n", - "Remove empty exchanges.\n", - "Remove uncertainty data.\n", "Done!\n", "\n", "////////////////// IMPORTING DEFAULT INVENTORIES ///////////////////\n", - "Cannot find cached inventories. Will create them now for next time...\n", - "Importing default inventories...\n", - "\n", - "Extracted 1 worksheets in 0.12 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.03 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Remove uncertainty data.\n", - "Extracted 7 worksheets in 0.05 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.03 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.03 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.46 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "The following datasets to import already exist in the source database. They will not be imported\n", - "+----------------------------------------------------+--------------------------------+----------+-------------+\n", - "| Name | Reference product | Location | File |\n", - "+----------------------------------------------------+--------------------------------+----------+-------------+\n", - "| fluorspar production, 97% purity | fluorspar, 97% purity | GLO | lci-PV.xlsx |\n", - "| metallization paste production, back side | metallization paste, back side | RER | lci-PV.xlsx |\n", - "| metallization paste production, back side, alumini | metallization paste, back side | RER | lci-PV.xlsx |\n", - "| metallization paste production, front side | metallization paste, front sid | RER | lci-PV.xlsx |\n", - "| photovoltaic module production, building-integrate | photovoltaic module, building- | RER | lci-PV.xlsx |\n", - "| photovoltaic module production, building-integrate | photovoltaic module, building- | RER | lci-PV.xlsx |\n", - "| photovoltaic mounting system production, for facad | photovoltaic mounting system, | RER | lci-PV.xlsx |\n", - "| photovoltaic mounting system production, for flat- | photovoltaic mounting system, | RER | lci-PV.xlsx |\n", - "| photovoltaic mounting system production, for slant | photovoltaic mounting system, | RER | lci-PV.xlsx |\n", - "| photovoltaic panel factory construction | photovoltaic panel factory | GLO | lci-PV.xlsx |\n", - "| polyvinylfluoride production | polyvinylfluoride | US | lci-PV.xlsx |\n", - "| polyvinylfluoride production, dispersion | polyvinylfluoride, dispersion | US | lci-PV.xlsx |\n", - "| polyvinylfluoride, film production | polyvinylfluoride, film | US | lci-PV.xlsx |\n", - "| silicon production, metallurgical grade | silicon, metallurgical grade | NO | lci-PV.xlsx |\n", - "| vinyl fluoride production | vinyl fluoride | US | lci-PV.xlsx |\n", - "| wafer factory construction | wafer factory | DE | lci-PV.xlsx |\n", - "+----------------------------------------------------+--------------------------------+----------+-------------+\n", - "Extracted 1 worksheets in 0.05 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "The following datasets to import already exist in the source database. They will not be imported\n", - "+----------------------------------------------------+--------------------------------+----------+---------------------------------------------------+\n", - "| Name | Reference product | Location | File |\n", - "+----------------------------------------------------+--------------------------------+----------+---------------------------------------------------+\n", - "| carbon dioxide, captured at cement production plan | carbon dioxide, captured and r | RER | lci-synfuels-from-methanol-from-cement-plant.xlsx |\n", - "+----------------------------------------------------+--------------------------------+----------+---------------------------------------------------+\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "The following datasets to import already exist in the source database. They will not be imported\n", - "+----------------------------------------------------+----------------------+----------+-------------------------------------------+\n", - "| Name | Reference product | Location | File |\n", - "+----------------------------------------------------+----------------------+----------+-------------------------------------------+\n", - "| methanol distillation, hydrogen from coal gasifica | methanol, purified | RER | lci-synfuels-from-methanol-from-coal.xlsx |\n", - "| methanol synthesis, hydrogen from coal gasificatio | methanol, unpurified | RER | lci-synfuels-from-methanol-from-coal.xlsx |\n", - "+----------------------------------------------------+----------------------+----------+-------------------------------------------+\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 4 worksheets in 0.19 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.04 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.10 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Migrating to 3.8 first\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Applying strategy: migrate_datasets\n", - "Applying strategy: migrate_exchanges\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.02 seconds\n", - "Remove uncertainty data.\n", - "Extracted 1 worksheets in 0.01 seconds\n", - "Remove uncertainty data.\n", - "Data cached. It is advised to restart your workflow at this point.\n", - "This allows premise to use the cached data instead, which results in\n", - "a faster workflow.\n", "Done!\n", "\n", "/////////////////////// EXTRACTING IAM DATA ////////////////////////\n", @@ -445,6 +89,7 @@ " scenarios=scenarios,\n", " source_db=\"ecoinvent 3.9.1 cutoff\",\n", " source_version=\"3.9.1\",\n", + " use_absolute_efficiency=True,\n", " #source_type=\"ecospold\",\n", " #source_file_path=f\"/Users/romain/Documents/ecoinvent 3.9.1_cutoff_ecoSpold02/datasets\", # <-- this is NEW\n", " #key='tUePmX_S5B8ieZkkM7WUU2CnO8SmShwmAeWK9x2rTFo=',\n", @@ -465,6 +110,106 @@ "text": [ "\n", "/////////////////////////// ELECTRICITY ////////////////////////////\n", + "Warning: No activities found for Biomass CHP (existing) -- revise mapping.\n", + "Warning: No activities found for Coal USC -- revise mapping.\n", + "Warning: No activities found for Coal USC CCS -- revise mapping.\n", + "Warning: No activities found for Gas GT -- revise mapping.\n", + "{'Biogas CHP': {'heat and power co-generation, biogas, gas engine',\n", + " 'heat and power co-generation, biogas, gas engine, renewable '\n", + " 'energy products'},\n", + " 'Biomass CHP': {'heat and power co-generation, wood chips, 2000 kW',\n", + " 'heat and power co-generation, wood chips, 2000 kW, '\n", + " 'state-of-the-art 2014',\n", + " 'heat and power co-generation, wood chips, 6667 kW',\n", + " 'heat and power co-generation, wood chips, 6667 kW, '\n", + " 'state-of-the-art 2014'},\n", + " 'Biomass CHP (existing)': {'heat and power co-generation, existing, wood '\n", + " 'chips'},\n", + " 'Biomass IGCC': {'electricity production, at biomass-fired IGCC power plant'},\n", + " 'Biomass IGCC CCS': {'electricity production, at biomass-fired IGCC power '\n", + " 'plant, pre, pipeline 200km, storage 1000m'},\n", + " 'Biomass MSW': {'treatment of municipal solid waste, incineration'},\n", + " 'Biomass ST': {'electricity production, at wood burning power plant'},\n", + " 'Biomass ST CCS': {'electricity production, at wood burning power plant, '\n", + " 'post, pipeline 200km, storage 1000m'},\n", + " 'Coal CF 50-50': {'electricity production, at co-firing wood and coal power '\n", + " 'plant, 50-50'},\n", + " 'Coal CF 80-20': {'electricity production, at co-firing wood and coal power '\n", + " 'plant, 80-20'},\n", + " 'Coal CHP': {'heat and power co-generation, hard coal',\n", + " 'heat and power co-generation, lignite'},\n", + " 'Coal IGCC': {'electricity production, at hard coal-fired IGCC power plant',\n", + " 'electricity production, at lignite-fired IGCC power plant'},\n", + " 'Coal IGCC CCS': {'electricity production, at hard coal-fired IGCC power '\n", + " 'plant, pre, pipeline 200km, storage 1000m',\n", + " 'electricity production, at lignite-fired IGCC power plant, '\n", + " 'pre, pipeline 200km, storage 1000m'},\n", + " 'Coal PC': {'electricity production, hard coal',\n", + " 'electricity production, hard coal, conventional',\n", + " 'electricity production, hard coal, subcritical',\n", + " 'electricity production, lignite'},\n", + " 'Coal SC': {'electricity production, hard coal, supercritical'},\n", + " 'Coal USC': {'electricity production, hard coal, ultra-supercritical'},\n", + " 'Coal USC CCS': {'electricity production, at hard coal-fired power plant, '\n", + " 'ultra-super critical, oxy, pipeline 200km, storage 1000m'},\n", + " 'Gas CC': {'electricity production, natural gas, combined cycle power plant'},\n", + " 'Gas CC CCS': {'electricity production, at natural gas-fired combined cycle '\n", + " 'power plant, post, pipeline 200km, storage 1000m'},\n", + " 'Gas CHP': {'heat and power co-generation, natural gas, combined cycle power '\n", + " 'plant, 400MW electrical',\n", + " 'heat and power co-generation, natural gas, conventional power '\n", + " 'plant, 100MW electrical'},\n", + " 'Gas GT': {'electricity production, natural gas, gas turbine, conventional '\n", + " 'power plant'},\n", + " 'Gas OC': {'electricity production, natural gas, conventional power plant'},\n", + " 'Geothermal': {'electricity production, deep geothermal'},\n", + " 'Hydro': {'electricity production, hydro, reservoir, alpine region',\n", + " 'electricity production, hydro, reservoir, non-alpine region',\n", + " 'electricity production, hydro, reservoir, tropical region',\n", + " 'electricity production, hydro, run-of-river'},\n", + " 'Hydro, run-of-river': {'electricity production, hydro, run-of-river'},\n", + " 'Nuclear': {'electricity production, nuclear, boiling water reactor',\n", + " 'electricity production, nuclear, pressure water reactor',\n", + " 'electricity production, nuclear, pressure water reactor, heavy '\n", + " 'water moderated'},\n", + " 'Oil CC': {'electricity production, oil'},\n", + " 'Oil CHP': {'heat and power co-generation, oil'},\n", + " 'Oil ST': {'electricity production, oil'},\n", + " 'Solar CSP': {'electricity production, solar thermal parabolic trough, 50 MW',\n", + " 'electricity production, solar tower power plant, 20 MW'},\n", + " 'Solar PV Centralized': {'electricity production, photovoltaic, commercial'},\n", + " 'Solar PV Residential': {'electricity production, photovoltaic, residential'},\n", + " 'Wave': {'electricity production, wave energy converter'},\n", + " 'Wind Offshore': {'electricity production, wind, 1-3MW turbine, offshore'},\n", + " 'Wind Onshore': {'electricity production, wind, 1-3MW turbine, onshore',\n", + " 'electricity production, wind, <1MW turbine, onshore',\n", + " 'electricity production, wind, >3MW turbine, onshore'}}\n", + "None\n", + "Gas CHP\n", + "Coal USC\n", + "Coal PC\n", + "Biomass CHP (existing)\n", + "Biomass ST\n", + "Biogas CHP\n", + "Biomass MSW\n", + "Biomass IGCC\n", + "Biomass IGCC CCS\n", + "Coal CF 80-20\n", + "Gas GT\n", + "Biomass CHP\n", + "Coal SC\n", + "Gas OC\n", + "Oil CC\n", + "Coal IGCC\n", + "Coal CF 50-50\n", + "Coal IGCC CCS\n", + "Gas CC CCS\n", + "Coal USC CCS\n", + "Oil CHP\n", + "Coal CHP\n", + "Biomass ST CCS\n", + "Gas CC\n", + "Oil ST\n", "Done!\n", "\n" ] @@ -476,54 +221,21 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "90f32b85-6451-419f-bab8-a99bb1fa65c9", + "execution_count": 5, + "id": "9cabccc2-a9c3-4056-bfe3-b3093850199b", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Write new database(s) to Brightway2.\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "Writing activities to SQLite3 database:\n", - "0% [## ] 100% | ETA: 00:01:35" - ] - } - ], - "source": [ - "ndb.write_db_to_brightway()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "id": "fd525328-d802-473f-8266-7315392cac23", - "metadata": {}, - "outputs": [ - { - "ename": "AssertionError", - "evalue": "('electricity production, at co-firing wood and coal power plant, 50-50', 'electricity production, at co-firing wood and coal power plant, 50-50')", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", - "Input \u001b[0;32mIn [8]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m e \u001b[38;5;129;01min\u001b[39;00m ds[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mexchanges\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[1;32m 4\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m e[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtype\u001b[39m\u001b[38;5;124m\"\u001b[39m] \u001b[38;5;129;01min\u001b[39;00m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mtechnosphere\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mproduction\u001b[39m\u001b[38;5;124m\"\u001b[39m]:\n\u001b[0;32m----> 5\u001b[0m \u001b[38;5;28;01massert\u001b[39;00m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mlocation\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m e, (ds[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m], e[\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mname\u001b[39m\u001b[38;5;124m\"\u001b[39m])\n", - "\u001b[0;31mAssertionError\u001b[0m: ('electricity production, at co-firing wood and coal power plant, 50-50', 'electricity production, at co-firing wood and coal power plant, 50-50')" + "Generate change report.\n", + "Report saved under /Users/romain/GitHub/premise/dev.\n" ] } ], "source": [ - "for ds in ndb.scenarios[0][\"database\"]:\n", - " assert \"location\" in ds, ds[\"name\"]\n", - " for e in ds[\"exchanges\"]:\n", - " if e[\"type\"] in [\"technosphere\", \"production\"]:\n", - " assert \"location\" in e, (ds[\"name\"], e[\"name\"])" + "ndb.generate_change_report()" ] }, { @@ -2047,7 +1759,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/docs/extract.rst b/docs/extract.rst index 8dc8915e..87c4185b 100644 --- a/docs/extract.rst +++ b/docs/extract.rst @@ -112,7 +112,9 @@ indicate the database name in `source_db` and its version in `source_version`: ], source_db="ecoinvent 3.7 cutoff", # <-- this is NEW. source_version="3.7.1", # <-- this is NEW - key='xxxxxxxxxxxxxxxxxxxxxxxxx' + key='xxxxxxxxxxxxxxxxxxxxxxxxx', + use_multiprocessing=True, # True by default, set to False if multiprocessing is causing troubles + keep_uncertainty_data=False # False by default, set to True if you want to keep ecoinvent's uncertainty data ) Note that a cache of the database will be created the first time and diff --git a/docs/introduction.rst b/docs/introduction.rst index 4e518ae5..5158d3a2 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -74,9 +74,7 @@ Provided a decryption key (ask the maintainers_), the following IAM scenarios ar CarbonBrief_ wrote a good article explaining the meaning of the SSP/RCP system. -Additionally, we provided a summary of the main characteristics of each scenario in a spreadsheet_. - -.. _spreadsheet: https://github.com/polca/premise/raw/master/docs/scenario_report_2023-05-02.xlsx +Additionally, we provided a summary of the main characteristics of each scenario `here `_. .. _CarbonBrief: https://www.carbonbrief.org/explainer-how-shared-socioeconomic-pathways-explore-future-climate-change diff --git a/docs/load.rst b/docs/load.rst index 9f64beaf..255b996a 100644 --- a/docs/load.rst +++ b/docs/load.rst @@ -162,9 +162,12 @@ This is done as follows: ndb.write_db_to_datapackage() This creates a zip file that contains the all the data necessary for -other users to replicate teh databases, provided they have access +other users to replicate the databases, provided they have access to the source database locally. -See the library ``unfold`` for more information on data packages -for sharing LCA databases. +See the library <``unfold`` https://github.com/polca/unfold/tree/main>_ for more information on data packages +for sharing LCA databases. ``unfold`` can read these data packages and create +brightway2 databases (or superstructure databases) from them. +``unfold`` can also fold premise databases registered in your brightway2 project +into data packages, to be shared with and recreated by others. diff --git a/examples/examples.ipynb b/examples/examples.ipynb index b96bef1a..c5d84bae 100644 --- a/examples/examples.ipynb +++ b/examples/examples.ipynb @@ -1030,7 +1030,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.12" + "version": "3.10.12" } }, "nbformat": 4, diff --git a/premise/__init__.py b/premise/__init__.py index 8b7a3b1d..c0bab8f3 100644 --- a/premise/__init__.py +++ b/premise/__init__.py @@ -1,5 +1,5 @@ __all__ = ("NewDatabase", "clear_cache", "get_regions_definition") -__version__ = (1, 7, 0) +__version__ = (1, 7, 1) from pathlib import Path diff --git a/premise/activity_maps.py b/premise/activity_maps.py index b62fa740..467f290b 100644 --- a/premise/activity_maps.py +++ b/premise/activity_maps.py @@ -3,6 +3,7 @@ mapping between ``premise`` and ``ecoinvent`` terminology. """ +import sys from collections import defaultdict from pathlib import Path from typing import List, Union @@ -23,11 +24,12 @@ ) -def get_mapping(filepath: Path, var: str) -> dict: +def get_mapping(filepath: Path, var: str, model: str = None) -> dict: """ Loa a YAML file and return a dictionary given a variable. :param filepath: YAML file path :param var: variable to return the dictionary for. + :param model: if provided, only return the dictionary for this model. :return: a dictionary """ @@ -37,7 +39,11 @@ def get_mapping(filepath: Path, var: str) -> dict: mapping = {} for key, val in techs.items(): if var in val: - mapping[key] = val[var] + if model is None: + mapping[key] = val[var] + else: + if model in val.get("iam_aliases", {}): + mapping[key] = val[var] return mapping @@ -114,12 +120,13 @@ class InventorySet: These functions return the result of applying :func:`act_fltr` to the filter dictionaries. """ - def __init__(self, database: List[dict], version: str = None) -> None: + def __init__(self, database: List[dict], version: str = None, model: str = None) -> None: self.database = database self.version = version + self.model = model self.powerplant_filters = get_mapping( - filepath=POWERPLANT_TECHS, var="ecoinvent_aliases" + filepath=POWERPLANT_TECHS, var="ecoinvent_aliases", model=self.model ) self.powerplant_fuels_filters = get_mapping( @@ -253,4 +260,13 @@ def generate_sets_from_filters(self, filtr: dict, database=None) -> dict: tech: act_fltr(database, fltr.get("fltr"), fltr.get("mask")) for tech, fltr in filtr.items() } - return {tech: {act["name"] for act in actlst} for tech, actlst in techs.items()} + + mapping = {tech: {act["name"] for act in actlst} for tech, actlst in techs.items()} + + # check if all keys have values + # if not, print warning + for key, val in mapping.items(): + if not val: + print(f"Warning: No activities found for {key} -- revise mapping.") + + return mapping diff --git a/premise/data/additional_inventories/lci-biofuels.xlsx b/premise/data/additional_inventories/lci-biofuels.xlsx index 116f725a..3507a852 100644 Binary files a/premise/data/additional_inventories/lci-biofuels.xlsx and b/premise/data/additional_inventories/lci-biofuels.xlsx differ diff --git a/premise/data/additional_inventories/migration_map.csv b/premise/data/additional_inventories/migration_map.csv index 8c4b6764..3f9a90a6 100644 --- a/premise/data/additional_inventories/migration_map.csv +++ b/premise/data/additional_inventories/migration_map.csv @@ -616,4 +616,10 @@ from;to;name_from;ref_prod_from;location_from;name_to;ref_prod_to;location_to 36;39;transmission network construction, electricity, high voltage;transmission network, long-distance;CH;transmission network construction, electricity, high voltage;transmission network, electricity, high voltage;CH 37;39;transmission network construction, electricity, high voltage;transmission network, long-distance;CH;transmission network construction, electricity, high voltage;transmission network, electricity, high voltage;CH 38;39;transmission network construction, electricity, high voltage;transmission network, long-distance;CH;transmission network construction, electricity, high voltage;transmission network, electricity, high voltage;CH -39;38;market for battery cell, Li-ion, LFP;battery cell, Li-ion, LFP;GLO;market for battery cell, Li-ion;battery cell, Li-ion;GLO \ No newline at end of file +39;38;market for battery cell, Li-ion, LFP;battery cell, Li-ion, LFP;GLO;market for battery cell, Li-ion;battery cell, Li-ion;GLO +39;37;market for battery cell, Li-ion, LFP;battery cell, Li-ion, LFP;GLO;market for battery cell, Li-ion;battery cell, Li-ion;GLO +39;37;heat pump production, heat and power co-generation unit, 160kW electrical;heat pump, heat and power co-generation unit, 160kW electrical;RoW;heat pump production, for heat and power co-generation unit, 160kW electrical;heat pump, for heat and power co-generation unit, 160kW electrical;RoW +39;37;heat pump production, heat and power co-generation unit, 160kW electrical;heat pump, heat and power co-generation unit, 160kW electrical;RER;heat pump production, for heat and power co-generation unit, 160kW electrical;heat pump, for heat and power co-generation unit, 160kW electrical;RER +38;37;heat pump production, heat and power co-generation unit, 160kW electrical;heat pump, heat and power co-generation unit, 160kW electrical;RoW;heat pump production, for heat and power co-generation unit, 160kW electrical;heat pump, for heat and power co-generation unit, 160kW electrical;RoW +38;37;heat pump production, heat and power co-generation unit, 160kW electrical;heat pump, heat and power co-generation unit, 160kW electrical;RER;heat pump production, for heat and power co-generation unit, 160kW electrical;heat pump, for heat and power co-generation unit, 160kW electrical;RER +39;37;market for battery cell, Li-ion, NMC811;battery cell, Li-ion, NMC811;GLO;market for battery cell, Li-ion;battery cell, Li-ion;GLO \ No newline at end of file diff --git a/premise/data_collection.py b/premise/data_collection.py index ac7cbe55..e02e7f37 100644 --- a/premise/data_collection.py +++ b/premise/data_collection.py @@ -8,7 +8,7 @@ import csv import os from functools import lru_cache -from io import StringIO +from io import BytesIO, StringIO from itertools import chain from pathlib import Path from typing import Dict, List, Union @@ -717,6 +717,13 @@ def __get_iam_data( dataframe = dataframe.rename(columns={"variable": "variables"}) + # make a list of headers that are integer + + headers = [x for x in dataframe.columns if isinstance(x, int)] + + # convert the values in these columns to numeric + dataframe[headers] = dataframe[headers].apply(pd.to_numeric, errors="coerce") + array = ( dataframe.melt( id_vars=["region", "variables", "unit"], @@ -1087,9 +1094,11 @@ def get_external_data(self, datapackages): data[i] = {} resource = dp.get_resource("scenario_data") - scenario_data = resource.read() - scenario_headers = resource.headers - df = pd.DataFrame(scenario_data, columns=scenario_headers) + # getting scenario data in binary format + scenario_data = resource.raw_read() + df = pd.read_csv(BytesIO(scenario_data), encoding="latin1") + # set headers from first row + df.columns = resource.headers resource = dp.get_resource("config") config_file = yaml.safe_load(resource.raw_read()) diff --git a/premise/ecoinvent_modification.py b/premise/ecoinvent_modification.py index 10372d34..e2881d17 100644 --- a/premise/ecoinvent_modification.py +++ b/premise/ecoinvent_modification.py @@ -508,6 +508,7 @@ def __init__( keep_uncertainty_data=False, gains_scenario="CLE", use_absolute_efficiency=False, + use_multiprocessing=True, ) -> None: self.source = source_db self.version = check_db_version(source_version) @@ -515,6 +516,7 @@ def __init__( self.system_model = check_system_model(system_model) self.system_model_args = system_args self.use_absolute_efficiency = use_absolute_efficiency + self.multiprocessing = use_multiprocessing # if version is anything other than 3.8 or 3.9 # and system_model is "consequential" @@ -611,8 +613,12 @@ def _fetch_iam_data(scenario): scenario["database"] = copy.deepcopy(self.database) # use multiprocessing to speed up the process - with Pool(processes=multiprocessing.cpu_count()) as pool: - pool.map(_fetch_iam_data, self.scenarios) + if self.multiprocessing: + with Pool(processes=multiprocessing.cpu_count()) as pool: + pool.map(_fetch_iam_data, self.scenarios) + else: + for scenario in self.scenarios: + _fetch_iam_data(scenario) print("Done!") @@ -831,22 +837,32 @@ def update_electricity(self) -> None: print("\n/////////////////////////// ELECTRICITY ////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - self.use_absolute_efficiency, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_electricity, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + self.use_absolute_efficiency, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_electricity, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_electricity( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + use_absolute_efficiency=self.use_absolute_efficiency, + ) print("Done!\n") @@ -860,21 +876,30 @@ def update_dac(self) -> None: print("\n//////////////////////// DIRECT AIR CAPTURE ////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_dac, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_dac, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_dac( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -886,21 +911,30 @@ def update_fuels(self) -> None: print("\n////////////////////////////// FUELS ///////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_fuels, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_fuels, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_fuels( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -912,21 +946,30 @@ def update_cement(self) -> None: print("\n///////////////////////////// CEMENT //////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_cement, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_cement, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_cement( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -938,21 +981,30 @@ def update_steel(self) -> None: print("\n////////////////////////////// STEEL //////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_steel, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_steel, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_steel( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -964,22 +1016,32 @@ def update_cars(self) -> None: print("\n///////////////////////// PASSENGER CARS ///////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - "car", - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_vehicles, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + "car", + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_vehicles, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_vehicles( + scenario=scenario, + vehicle_type="car", + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -991,22 +1053,32 @@ def update_two_wheelers(self) -> None: print("\n////////////////////////// TWO-WHEELERS ////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - "two wheeler", - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_vehicles, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + "two wheeler", + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_vehicles, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_vehicles( + scenario=scenario, + vehicle_type="two wheeler", + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -1030,12 +1102,22 @@ def update_trucks(self) -> None: ] # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - results = pool.starmap(_update_vehicles, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + results = pool.starmap(_update_vehicles, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_vehicles( + scenario=scenario, + vehicle_type="truck", + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -1048,22 +1130,32 @@ def update_buses(self) -> None: print("\n////////////////////////////// BUSES ///////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - "bus", - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_vehicles, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + "bus", + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_vehicles, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets, _ = _update_vehicles( + scenario=scenario, + vehicle_type="bus", + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -1122,22 +1214,32 @@ def update_emissions(self) -> None: print("\n/////////////////////////// EMISSIONS //////////////////////////////") # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.gains_scenario, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_emissions, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.gains_scenario, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_emissions, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets = _update_emissions( + scenario=scenario, + version=self.version, + system_model=self.system_model, + gains_scenario=self.gains_scenario, + modified_datasets=self.modified_datasets, + ) print("Done!\n") @@ -1154,24 +1256,36 @@ def update_all(self) -> None: ) # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - self.version, - self.system_model, - self.modified_datasets, - self.use_absolute_efficiency, - "truck", - self.gains_scenario, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_update_all, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + self.version, + self.system_model, + self.modified_datasets, + self.use_absolute_efficiency, + "truck", + self.gains_scenario, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_update_all, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - self.modified_datasets.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + self.modified_datasets.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, self.modified_datasets = _update_all( + scenario=scenario, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + use_absolute_efficiency=self.use_absolute_efficiency, + vehicle_type="truck", + gains_scenario=self.gains_scenario, + ) self.update_external_scenario() @@ -1199,22 +1313,32 @@ def write_superstructure_db_to_brightway( cache = {} # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - cache, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_prepare_database, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + cache, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_prepare_database, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - cache.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + cache.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, cache = _prepare_database( + scenario=scenario, + scenario_cache=cache, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) self.database = generate_superstructure_db( origin_db=self.database, @@ -1277,22 +1401,32 @@ def write_db_to_brightway(self, name: [str, List[str]] = None): cache = {} # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - cache, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_prepare_database, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + cache, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_prepare_database, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - cache.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + cache.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, cache = _prepare_database( + scenario=scenario, + scenario_cache=cache, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) for scen, scenario in enumerate(self.scenarios): write_brightway2_database( @@ -1345,29 +1479,42 @@ def write_db_to_matrices(self, filepath: str = None): # use multiprocessing to speed up the process # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - cache, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_prepare_database, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + cache, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_prepare_database, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - cache.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + cache.update(results[s][1]) - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - Export(scenario, filepath[scen], self.version) - for scen, scenario in enumerate(self.scenarios) - ] - pool.map(_export_to_matrices, args) + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + Export(scenario, filepath[scen], self.version) + for scen, scenario in enumerate(self.scenarios) + ] + pool.map(_export_to_matrices, args) + else: + for scenario in self.scenarios: + scenario, cache = _prepare_database( + scenario=scenario, + scenario_cache=cache, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) + + for scen, scenario in enumerate(self.scenarios): + Export(scenario, filepath[scen], self.version).export_db_to_matrices() # generate scenario report self.generate_scenario_report() @@ -1393,29 +1540,42 @@ def write_db_to_simapro(self, filepath: str = None): cache = {} # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - cache, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_prepare_database, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + cache, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_prepare_database, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - cache.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + cache.update(results[s][1]) - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - Export(scenario, filepath, self.version) - for scen, scenario in enumerate(self.scenarios) - ] - pool.map(_export_to_simapro, args) + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + Export(scenario, filepath, self.version) + for scen, scenario in enumerate(self.scenarios) + ] + pool.map(_export_to_simapro, args) + else: + for scenario in self.scenarios: + scenario, cache = _prepare_database( + scenario=scenario, + scenario_cache=cache, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) + + for scen, scenario in enumerate(self.scenarios): + Export(scenario, filepath, self.version).export_db_to_simapro() # generate scenario report self.generate_scenario_report() @@ -1431,22 +1591,32 @@ def write_datapackage(self, name: str = f"datapackage_{date.today()}"): cache = {} # use multiprocessing to speed up the process - with ProcessPool(processes=multiprocessing.cpu_count()) as pool: - args = [ - ( - scenario, - cache, - self.version, - self.system_model, - self.modified_datasets, - ) - for scenario in self.scenarios - ] - results = pool.starmap(_prepare_database, args) + if self.multiprocessing: + with ProcessPool(processes=multiprocessing.cpu_count()) as pool: + args = [ + ( + scenario, + cache, + self.version, + self.system_model, + self.modified_datasets, + ) + for scenario in self.scenarios + ] + results = pool.starmap(_prepare_database, args) - for s, scenario in enumerate(self.scenarios): - self.scenarios[s] = results[s][0] - cache.update(results[s][1]) + for s, scenario in enumerate(self.scenarios): + self.scenarios[s] = results[s][0] + cache.update(results[s][1]) + else: + for scenario in self.scenarios: + scenario, cache = _prepare_database( + scenario=scenario, + scenario_cache=cache, + version=self.version, + system_model=self.system_model, + modified_datasets=self.modified_datasets, + ) df, extra_inventories = generate_scenario_factor_file( origin_db=self.database, diff --git a/premise/electricity.py b/premise/electricity.py index 6b7103e3..ae0668e4 100644 --- a/premise/electricity.py +++ b/premise/electricity.py @@ -13,6 +13,7 @@ import re from collections import defaultdict from functools import lru_cache +from pprint import pprint import wurst import yaml @@ -255,7 +256,7 @@ def __init__( modified_datasets, cache, ) - mapping = InventorySet(self.database) + mapping = InventorySet(self.database, model=self.model) self.powerplant_map = mapping.generate_powerplant_map() # reverse dictionary of self.powerplant_map self.powerplant_map_rev = {} @@ -1622,9 +1623,13 @@ def create_biomass_markets(self) -> None: ): exc["name"] = "market for biomass, used as fuel" exc["product"] = "biomass, used as fuel" - exc["location"] = self.ecoinvent_to_iam_loc[dataset["location"]] - mapping = InventorySet(self.database) + if dataset["location"] in self.regions: + exc["location"] = dataset["location"] + else: + exc["location"] = self.ecoinvent_to_iam_loc[dataset["location"]] + + mapping = InventorySet(self.database, model=self.model) self.powerplant_fuels_map = mapping.generate_powerplant_fuels_map() def create_region_specific_power_plants(self): @@ -1653,6 +1658,10 @@ def create_region_specific_power_plants(self): "Gas CHP CCS", "Gas CC CCS", "Oil CC CCS", + "Oil ST", + "Oil CC", + "Coal CF 80-20", + "Coal CF 50-50" ] list_datasets_to_duplicate = [ @@ -1761,7 +1770,7 @@ def update_electricity_efficiency(self) -> None: # print("Adjust efficiency of power plants...") - mapping = InventorySet(self.database) + mapping = InventorySet(self.database, model=self.model) self.fuel_map = mapping.generate_fuel_map() # reverse the fuel map to get a mapping from ecoinvent to premise self.fuel_map_reverse: Dict = {} @@ -2021,7 +2030,7 @@ def create_missing_power_plant_datasets(self) -> None: self.database.extend(new_datasets.values()) - mapping = InventorySet(self.database) + mapping = InventorySet(self.database, model=self.model) self.powerplant_map = mapping.generate_powerplant_map() # reverse dictionary of self.powerplant_map self.powerplant_map_rev = {} diff --git a/premise/export.py b/premise/export.py index 3b3b23e7..c38a8e4f 100644 --- a/premise/export.py +++ b/premise/export.py @@ -844,7 +844,7 @@ def generate_superstructure_db( :param db_name: the name of the new database :param filepath: the filepath of the new database :param version: the version of the new database - :param format: the format of the scenario difference file. Cna be "excel", "csv" or "feather". + :param format: the format of the scenario difference file. Can be "excel", "csv" or "feather". :return: a superstructure database """ diff --git a/premise/iam_variables_mapping/electricity_variables.yaml b/premise/iam_variables_mapping/electricity_variables.yaml index fbadc5a9..fe29e2da 100644 --- a/premise/iam_variables_mapping/electricity_variables.yaml +++ b/premise/iam_variables_mapping/electricity_variables.yaml @@ -184,6 +184,7 @@ Coal PC: name: - mine - supercritical + - subcritical ecoinvent_fuel_aliases: fltr: - market for hard coal diff --git a/premise/iam_variables_mapping/fuels_variables.yaml b/premise/iam_variables_mapping/fuels_variables.yaml index 1765d922..a5a261d9 100644 --- a/premise/iam_variables_mapping/fuels_variables.yaml +++ b/premise/iam_variables_mapping/fuels_variables.yaml @@ -221,16 +221,10 @@ methane, fossil: lhv: 47.5 co2: 0.058 biogenic_share: 0 - ecoinvent_aliases: - fltr: - - methane, fossil methane, biogenic: lhv: 47.5 co2: 0.058 biogenic_share: 1 - ecoinvent_aliases: - fltr: - - methane, biogenic methane, synthetic: lhv: 47.5 co2: 0.058 diff --git a/premise/iam_variables_mapping/mapping_overview.xlsx b/premise/iam_variables_mapping/mapping_overview.xlsx index d5512913..8b935af0 100644 Binary files a/premise/iam_variables_mapping/mapping_overview.xlsx and b/premise/iam_variables_mapping/mapping_overview.xlsx differ diff --git a/premise/inventory_imports.py b/premise/inventory_imports.py index 069f677b..0049cd19 100644 --- a/premise/inventory_imports.py +++ b/premise/inventory_imports.py @@ -579,20 +579,34 @@ def lower_case_technosphere_exchanges(self) -> None: for ds in self.import_db.data: # lower case name and reference product - if not any([x in ds["name"] for x in blakclist]): + # only if they are not in the blacklist + # and if the first word is not an acronym + if ( + not any([x in ds["name"] for x in blakclist]) + and not ds["name"].split(" ")[0].isupper() + ): ds["name"] = ds["name"][0].lower() + ds["name"][1:] - if not any([x in ds["reference product"] for x in blakclist]): + if ( + not any([x in ds["reference product"] for x in blakclist]) + and not ds["reference product"].split(" ")[0].isupper() + ): ds["reference product"] = ( ds["reference product"][0].lower() + ds["reference product"][1:] ) for exc in ds["exchanges"]: if exc["type"] in ["technosphere", "production"]: - if not any([x in exc["name"] for x in blakclist]): + if ( + not any([x in exc["name"] for x in blakclist]) + and not exc["name"].split(" ")[0].isupper() + ): exc["name"] = exc["name"][0].lower() + exc["name"][1:] - if not any( - [x in exc.get("reference product", "") for x in blakclist] + if ( + not any( + [x in exc.get("reference product", "") for x in blakclist] + ) + and not exc.get("reference product", "").split(" ")[0].isupper() ): if exc.get("reference product") is not None: exc["reference product"] = ( @@ -600,7 +614,10 @@ def lower_case_technosphere_exchanges(self) -> None: + exc["reference product"][1:] ) - if not any([x in exc.get("product", "") for x in blakclist]): + if ( + not any([x in exc.get("product", "") for x in blakclist]) + and not exc.get("product", "").split(" ")[0].isupper() + ): if exc.get("product") is not None: exc["product"] = ( exc["product"][0].lower() + exc["product"][1:] diff --git a/premise/report.py b/premise/report.py index dca1e044..dced5b6b 100644 --- a/premise/report.py +++ b/premise/report.py @@ -327,7 +327,7 @@ def generate_summary_report(scenarios: list, filename: Path) -> None: if scenario_idx > 0: col = last_col_used + metadata[sector]["offset"] - row += 2 + row = 3 worksheet.cell( column=col, diff --git a/premise/utils.py b/premise/utils.py index 56bb9115..4ec03d8e 100644 --- a/premise/utils.py +++ b/premise/utils.py @@ -261,6 +261,9 @@ def hide_messages(): print("Keep uncertainty data?") print("NewDatabase(..., keep_uncertainty_data=True)") print("") + print("Disable multiprocessing?") + print("NewDatabase(..., use_multiprocessing=False)") + print("") print("Hide these messages?") print("NewDatabase(..., quiet=True)") diff --git a/requirements.txt b/requirements.txt index c2100521..b97502bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ numpy pandas -bw2io==0.8.7 +bw2io >=0.8.7, != 0.8.8 bw2data wurst xarray diff --git a/setup.py b/setup.py index 762a7da8..f5f19d68 100644 --- a/setup.py +++ b/setup.py @@ -32,7 +32,7 @@ def package_files(directory): setup( name="premise", - version="1.7.0", + version="1.7.1", python_requires=">=3.9,<3.11", packages=packages, author="Romain Sacchi , Alois Dirnaichner , Chris Mutel " @@ -43,7 +43,8 @@ def package_files(directory): install_requires=[ "numpy", "wurst", - "bw2io==0.8.7", + # bw2io 0.8.7 or 0.8.10 + "bw2io >=0.8.7, != 0.8.8", "pandas", "bw2data", "xarray",