diff --git a/dev/Untitled1.ipynb b/dev/Untitled1.ipynb
index c7ddfd8e..0b8a1a0f 100644
--- a/dev/Untitled1.ipynb
+++ b/dev/Untitled1.ipynb
@@ -2,31 +2,22 @@
"cells": [
{
"cell_type": "code",
- "execution_count": 5,
+ "execution_count": 1,
"id": "5018b7ee-0169-49d7-9455-2f1aea562e9e",
"metadata": {},
"outputs": [],
"source": [
"from premise import *\n",
"from datapackage import Package\n",
- "import bw2io as bw"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 6,
- "id": "0ca27c06-948c-4584-bebe-d505cb72d4f9",
- "metadata": {},
- "outputs": [],
- "source": [
+ "import bw2io as bw\n",
"import bw2data\n",
"bw2data.projects.set_current(\"ei310\")"
]
},
{
"cell_type": "code",
- "execution_count": 7,
- "id": "e408d76b-8288-4661-9089-f8c3f6191743",
+ "execution_count": 2,
+ "id": "a7e06058-5dac-4823-9666-356705977382",
"metadata": {},
"outputs": [
{
@@ -43,7 +34,7 @@
},
{
"cell_type": "code",
- "execution_count": 8,
+ "execution_count": 2,
"id": "bee86950-ac96-49e0-8a9c-43920ae26096",
"metadata": {},
"outputs": [
@@ -51,7 +42,7 @@
"name": "stdout",
"output_type": "stream",
"text": [
- "premise v.(2, 1, 1, 'dev6')\n",
+ "premise v.(2, 1, 1, 'dev8')\n",
"+------------------------------------------------------------------+\n",
"| Warning |\n",
"+------------------------------------------------------------------+\n",
@@ -87,507 +78,6 @@
"NewDatabase(..., quiet=True)\n",
"- Extracting source database\n",
"- Extracting inventories\n",
- "Cannot find cached inventories. Will create them now for next time...\n",
- "Importing default inventories...\n",
- "\n",
- "Extracted 1 worksheets in 0.09 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 4 worksheets in 0.17 seconds\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 7 worksheets in 0.03 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.03 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.03 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.03 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 2 worksheets in 0.02 seconds\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.32 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "The following datasets to import already exist in the source database. They will not be imported\n",
- "+--------------------------------+--------------------------------+----------+-------------+\n",
- "| Name | Reference product | Location | File |\n",
- "+--------------------------------+--------------------------------+----------+-------------+\n",
- "| fluorspar production, 97% puri | fluorspar, 97% purity | GLO | lci-PV.xlsx |\n",
- "| metallization paste production | metallization paste, back side | RER | lci-PV.xlsx |\n",
- "| metallization paste production | metallization paste, back side | RER | lci-PV.xlsx |\n",
- "| metallization paste production | metallization paste, front sid | RER | lci-PV.xlsx |\n",
- "| photovoltaic module production | photovoltaic module, building- | RER | lci-PV.xlsx |\n",
- "| photovoltaic module production | photovoltaic module, building- | RER | lci-PV.xlsx |\n",
- "| photovoltaic mounting system p | photovoltaic mounting system, | RER | lci-PV.xlsx |\n",
- "| photovoltaic mounting system p | photovoltaic mounting system, | RER | lci-PV.xlsx |\n",
- "| photovoltaic mounting system p | photovoltaic mounting system, | RER | lci-PV.xlsx |\n",
- "| photovoltaic panel factory con | photovoltaic panel factory | GLO | lci-PV.xlsx |\n",
- "| polyvinylfluoride production | polyvinylfluoride | US | lci-PV.xlsx |\n",
- "| polyvinylfluoride production, | polyvinylfluoride, dispersion | US | lci-PV.xlsx |\n",
- "| polyvinylfluoride, film produc | polyvinylfluoride, film | US | lci-PV.xlsx |\n",
- "| silicon production, metallurgi | silicon, metallurgical grade | NO | lci-PV.xlsx |\n",
- "| vinyl fluoride production | vinyl fluoride | US | lci-PV.xlsx |\n",
- "| wafer factory construction | wafer factory | DE | lci-PV.xlsx |\n",
- "+--------------------------------+--------------------------------+----------+-------------+\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.04 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "The following datasets to import already exist in the source database. They will not be imported\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| Name | Reference product | Location | File |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| carbon dioxide, captured at ce | carbon dioxide, captured and r | RER | lci-synfuels-from-methanol-fro |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "The following datasets to import already exist in the source database. They will not be imported\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| Name | Reference product | Location | File |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| hydrogen production, coal gasi | hydrogen, gaseous, low pressur | RoW | lci-hydrogen-coal-gasification |\n",
- "| methanol production, coal gasi | methanol | RoW | lci-hydrogen-coal-gasification |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "The following datasets to import already exist in the source database. They will not be imported\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| Name | Reference product | Location | File |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| hydrogen production, steam met | hydrogen, gaseous, low pressur | RER | lci-hydrogen-smr-atr-natgas.xl |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "The following datasets to import already exist in the source database. They will not be imported\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| Name | Reference product | Location | File |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "| methanol production facility, | methanol production facility, | RER | lci-synfuels-from-methanol-fro |\n",
- "+--------------------------------+--------------------------------+----------+--------------------------------+\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 5 worksheets in 0.14 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.03 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.07 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.04 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.01 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 19 worksheets in 0.30 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.21 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.16 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 1.57 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.03 seconds\n",
- "Migrating to 3.8 first\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Applying strategy: migrate_datasets\n",
- "Applying strategy: migrate_exchanges\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Extracted 1 worksheets in 0.02 seconds\n",
- "Data cached. It is advised to restart your workflow at this point.\n",
- "This allows premise to use the cached data instead, which results in\n",
- "a faster workflow.\n",
"- Fetching IAM data\n",
"Done!\n"
]
@@ -599,7 +89,7 @@
" #{\"model\":\"remind\", \"pathway\":\"SSP2-Base\", \"year\":2050},\n",
" #{\"model\":\"remind\", \"pathway\":\"SSP2-Base\", \"year\":2010},\n",
" #{\"model\":\"image\", \"pathway\":\"SSP2-Base\", \"year\":2020},\n",
- " #{\"model\":\"image\", \"pathway\":\"SSP2-Base\", \"year\":2050},\n",
+ " {\"model\":\"image\", \"pathway\":\"SSP2-Base\", \"year\":2050},\n",
" {\"model\":\"remind\", \"pathway\":\"SSP2-Base\", \"year\":2050},\n",
" #{\"model\":\"image\", \"pathway\":\"SSP2-Base\", \"year\":2040},\n",
" #{\"model\":\"image\", \"pathway\":\"SSP2-Base\", \"year\":2050},\n",
@@ -619,7 +109,7 @@
},
{
"cell_type": "code",
- "execution_count": null,
+ "execution_count": 3,
"id": "0c80994c-cbac-4143-81ee-1de1531a6f95",
"metadata": {},
"outputs": [
@@ -627,36 +117,69 @@
"name": "stderr",
"output_type": "stream",
"text": [
- "Processing scenarios: 0%| | 0/1 [00:00, ?it/s]"
+ "Processing scenarios: 0%| | 0/2 [00:00, ?it/s]"
]
- }
- ],
- "source": [
- "ndb.update()"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 5,
- "id": "c790e095-ff02-4a19-ad26-a03089563d69",
- "metadata": {},
- "outputs": [
+ },
{
"name": "stdout",
"output_type": "stream",
"text": [
- "Generate change report.\n",
- "Report saved under /Users/romain/GitHub/premise/dev.\n"
+ "Warning: No activities found for CON_LOSSHF -- revise mapping.\n",
+ "Warning: No activities found for PP_ENGGAS -- revise mapping.\n",
+ "Warning: No activities found for PP_MOD_CCSBIOG -- revise mapping.\n",
+ "Warning: No activities found for PP_MOD_CCSHC1 -- revise mapping.\n",
+ "Warning: No activities found for TRA_OT_AIRGSL -- revise mapping.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Processing scenarios: 50%|██████ | 1/2 [02:24<02:24, 144.65s/it]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Warning: No activities found for Gas ST -- revise mapping.\n",
+ "Warning: No activities found for CON_LOSSHF -- revise mapping.\n",
+ "Warning: No activities found for PP_ENGGAS -- revise mapping.\n",
+ "Warning: No activities found for PP_MOD_CCSBIOG -- revise mapping.\n",
+ "Warning: No activities found for PP_MOD_CCSHC1 -- revise mapping.\n",
+ "Warning: No activities found for TRA_OT_AIRGSL -- revise mapping.\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "Processing scenarios: 100%|████████████| 2/2 [04:14<00:00, 127.22s/it]"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "Done!\n",
+ "\n"
+ ]
+ },
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "\n"
]
}
],
"source": [
- "ndb.generate_change_report()"
+ "ndb.update()"
]
},
{
"cell_type": "code",
- "execution_count": 10,
+ "execution_count": 4,
"id": "5e8438a5-44d8-46f7-8fda-a85a35c52912",
"metadata": {},
"outputs": [
@@ -675,7 +198,7 @@
"text": [
"Writing activities to SQLite3 database:\n",
"0% [##############################] 100% | ETA: 00:00:00\n",
- "Total time elapsed: 00:00:28\n"
+ "Total time elapsed: 00:00:34\n"
]
},
{
@@ -683,12 +206,12 @@
"output_type": "stream",
"text": [
"Title: Writing activities to SQLite3 database:\n",
- " Started: 08/04/2024 20:48:38\n",
- " Finished: 08/04/2024 20:49:06\n",
- " Total time elapsed: 00:00:28\n",
- " CPU %: 87.60\n",
- " Memory %: 16.15\n",
- "Created database: conseq 1\n",
+ " Started: 08/18/2024 12:28:18\n",
+ " Finished: 08/18/2024 12:28:53\n",
+ " Total time elapsed: 00:00:34\n",
+ " CPU %: 85.40\n",
+ " Memory %: 15.17\n",
+ "Created database: test h2 5\n",
"Running all checks...\n",
"Minor anomalies found: check the change report.\n"
]
@@ -699,7 +222,7 @@
"text": [
"Writing activities to SQLite3 database:\n",
"0% [##############################] 100% | ETA: 00:00:00\n",
- "Total time elapsed: 00:00:19\n"
+ "Total time elapsed: 00:00:22\n"
]
},
{
@@ -707,24 +230,940 @@
"output_type": "stream",
"text": [
"Title: Writing activities to SQLite3 database:\n",
- " Started: 08/04/2024 20:55:15\n",
- " Finished: 08/04/2024 20:55:34\n",
- " Total time elapsed: 00:00:19\n",
- " CPU %: 90.70\n",
- " Memory %: 11.09\n",
- "Created database: conseq 2\n",
+ " Started: 08/18/2024 12:35:55\n",
+ " Finished: 08/18/2024 12:36:18\n",
+ " Total time elapsed: 00:00:22\n",
+ " CPU %: 91.70\n",
+ " Memory %: 10.18\n",
+ "Created database: test h2 6\n",
"Generate scenario report.\n",
"Report saved under /Users/romain/GitHub/premise/dev/export/scenario_report.\n",
"Generate change report.\n",
- "ValueError: column names ['timestamp', 'module', 'level', 'status', 'model', 'pathway', 'year', 'dataset', 'region', 'battery input', 'old battery mass', 'new battery mass', 'NMC111 market share', 'NMC532 market share', 'NMC622 market share', 'NMC811 market share', 'NMC900-Si market share', 'LFP market share', 'NCA market share', 'LAB market share', 'LSB market share', 'SIB market share'] not found in /Users/romain/GitHub/premise/dev/export/logs/premise_battery.log.Instead, found: Int64Index([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,\n",
- " 17, 18, 19, 20, 21, 22, 23],\n",
- " dtype='int64')\n",
"Report saved under /Users/romain/GitHub/premise/dev.\n"
]
}
],
"source": [
- "ndb.write_db_to_brightway([\"conseq 1\", \"conseq 2\"])"
+ "ndb.write_db_to_brightway([\"test h2 5\", \"test h2 6\"])"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "id": "c87ff6f7-8f43-48a0-b6cd-7615035d616b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from premise.electricity import get_efficiency_solar_photovoltaics"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 2,
+ "id": "85fccdfb-2774-486d-8b0d-9c441508c237",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "arr = get_efficiency_solar_photovoltaics()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 3,
+ "id": "54028544-81f2-4159-beb8-8191fdd23be3",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "
\n",
+ "
<xarray.DataArray 'efficiency' (year: 4, technology: 8, efficiency_type: 3)> Size: 768B\n",
+ "array([[[0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.18875, 0.15 , 0.11325]],\n",
+ "\n",
+ " [[0.175 , 0.14 , 0.105 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.14875, 0.12 , 0.08925],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.22375, 0.18 , 0.13425]],\n",
+ "\n",
+ " [[0.19 , 0.15 , 0.11 ],\n",
+ " [0.18675, 0.149 , 0.11205],\n",
+ " [0.2 , 0.19 , 0.17 ],\n",
+ " [0.343 , 0.28 , 0.214 ],\n",
+ " [0.1495 , 0.121 , 0.0897 ],\n",
+ " [0.2195 , 0.177 , 0.1317 ],\n",
+ " [0.3125 , 0.2502 , 0.19075],\n",
+ " [0.235 , 0.22 , 0.174 ]],\n",
+ "\n",
+ " [[0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.25 , 0.226 , 0.22 ],\n",
+ " [0.28 , 0.28 , 0.25 ],\n",
+ " [0.15625, 0.13 , 0.09375],\n",
+ " [0.305 , 0.24 , 0.183 ],\n",
+ " [0.3125 , 0.252 , 0.22 ],\n",
+ " [0.34125, 0.27 , 0.20475]]])\n",
+ "Coordinates:\n",
+ " * year (year) int64 32B 2010 2020 2023 2050\n",
+ " * technology (technology) object 64B 'CIGS' 'CIS' ... 'single-Si'\n",
+ " * efficiency_type (efficiency_type) object 24B 'max efficiency' ... 'min e...
0.1375 0.11 0.0825 0.1375 0.11 ... 0.252 0.22 0.3412 0.27 0.2047
array([[[0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.18875, 0.15 , 0.11325]],\n",
+ "\n",
+ " [[0.175 , 0.14 , 0.105 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.14875, 0.12 , 0.08925],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.22375, 0.18 , 0.13425]],\n",
+ "\n",
+ " [[0.19 , 0.15 , 0.11 ],\n",
+ " [0.18675, 0.149 , 0.11205],\n",
+ " [0.2 , 0.19 , 0.17 ],\n",
+ " [0.343 , 0.28 , 0.214 ],\n",
+ " [0.1495 , 0.121 , 0.0897 ],\n",
+ " [0.2195 , 0.177 , 0.1317 ],\n",
+ " [0.3125 , 0.2502 , 0.19075],\n",
+ " [0.235 , 0.22 , 0.174 ]],\n",
+ "\n",
+ " [[0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.25 , 0.226 , 0.22 ],\n",
+ " [0.28 , 0.28 , 0.25 ],\n",
+ " [0.15625, 0.13 , 0.09375],\n",
+ " [0.305 , 0.24 , 0.183 ],\n",
+ " [0.3125 , 0.252 , 0.22 ],\n",
+ " [0.34125, 0.27 , 0.20475]]])
PandasIndex
PandasIndex(Int64Index([2010, 2020, 2023, 2050], dtype='int64', name='year'))
PandasIndex
PandasIndex(Index(['CIGS', 'CIS', 'CdTe', 'GaAs', 'micro-Si', 'multi-Si', 'perovskite',\n",
+ " 'single-Si'],\n",
+ " dtype='object', name='technology'))
PandasIndex
PandasIndex(Index(['max efficiency', 'mean efficiency', 'min efficiency'], dtype='object', name='efficiency_type'))
"
+ ],
+ "text/plain": [
+ " Size: 768B\n",
+ "array([[[0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.1375 , 0.11 , 0.0825 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.125 , 0.1 , 0.075 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.18875, 0.15 , 0.11325]],\n",
+ "\n",
+ " [[0.175 , 0.14 , 0.105 ],\n",
+ " [0.175 , 0.14 , 0.105 ],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.35 , 0.28 , 0.21 ],\n",
+ " [0.14875, 0.12 , 0.08925],\n",
+ " [0.21 , 0.17 , 0.126 ],\n",
+ " [0.3125 , 0.25 , 0.1875 ],\n",
+ " [0.22375, 0.18 , 0.13425]],\n",
+ "\n",
+ " [[0.19 , 0.15 , 0.11 ],\n",
+ " [0.18675, 0.149 , 0.11205],\n",
+ " [0.2 , 0.19 , 0.17 ],\n",
+ " [0.343 , 0.28 , 0.214 ],\n",
+ " [0.1495 , 0.121 , 0.0897 ],\n",
+ " [0.2195 , 0.177 , 0.1317 ],\n",
+ " [0.3125 , 0.2502 , 0.19075],\n",
+ " [0.235 , 0.22 , 0.174 ]],\n",
+ "\n",
+ " [[0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.2925 , 0.23 , 0.1755 ],\n",
+ " [0.25 , 0.226 , 0.22 ],\n",
+ " [0.28 , 0.28 , 0.25 ],\n",
+ " [0.15625, 0.13 , 0.09375],\n",
+ " [0.305 , 0.24 , 0.183 ],\n",
+ " [0.3125 , 0.252 , 0.22 ],\n",
+ " [0.34125, 0.27 , 0.20475]]])\n",
+ "Coordinates:\n",
+ " * year (year) int64 32B 2010 2020 2023 2050\n",
+ " * technology (technology) object 64B 'CIGS' 'CIS' ... 'single-Si'\n",
+ " * efficiency_type (efficiency_type) object 24B 'max efficiency' ... 'min e..."
+ ]
+ },
+ "execution_count": 3,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "arr"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 7,
+ "id": "76069a0c-6bf1-49ed-a57b-e042026aa2b4",
+ "metadata": {},
+ "outputs": [
+ {
+ "data": {
+ "text/html": [
+ "\n",
+ "
<xarray.DataArray 'efficiency' ()> Size: 8B\n",
+ "array(0.1375)\n",
+ "Coordinates:\n",
+ " year int64 8B 2010\n",
+ " technology <U4 16B 'CIGS'\n",
+ " efficiency_type <U14 56B 'max efficiency'
"
+ ],
+ "text/plain": [
+ " Size: 8B\n",
+ "array(0.1375)\n",
+ "Coordinates:\n",
+ " year int64 8B 2010\n",
+ " technology 3\u001B[0m scenario \u001B[38;5;241m=\u001B[39m \u001B[43mload_database\u001B[49m\u001B[43m(\u001B[49m\u001B[43mscenario\u001B[49m\u001B[43m)\u001B[49m\n",
- "File \u001B[0;32m~/GitHub/premise/premise/utils.py:388\u001B[0m, in \u001B[0;36mload_database\u001B[0;34m(scenario)\u001B[0m\n\u001B[1;32m 385\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m scenario\u001B[38;5;241m.\u001B[39mget(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mdatabase\u001B[39m\u001B[38;5;124m\"\u001B[39m) \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[1;32m 386\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m scenario\n\u001B[0;32m--> 388\u001B[0m filepath \u001B[38;5;241m=\u001B[39m \u001B[43mscenario\u001B[49m\u001B[43m[\u001B[49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[38;5;124;43mdatabase filepath\u001B[39;49m\u001B[38;5;124;43m\"\u001B[39;49m\u001B[43m]\u001B[49m\n\u001B[1;32m 390\u001B[0m \u001B[38;5;66;03m# load pickle\u001B[39;00m\n\u001B[1;32m 391\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28mopen\u001B[39m(filepath, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mrb\u001B[39m\u001B[38;5;124m\"\u001B[39m) \u001B[38;5;28;01mas\u001B[39;00m f:\n",
- "\u001B[0;31mKeyError\u001B[0m: 'database filepath'"
+ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)",
+ "Cell \u001b[0;32mIn[12], line 3\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;28;01mfrom\u001b[39;00m \u001b[38;5;21;01mpremise\u001b[39;00m\u001b[38;5;21;01m.\u001b[39;00m\u001b[38;5;21;01mutils\u001b[39;00m \u001b[38;5;28;01mimport\u001b[39;00m load_database\n\u001b[1;32m 2\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m scenario \u001b[38;5;129;01min\u001b[39;00m ndb\u001b[38;5;241m.\u001b[39mscenarios:\n\u001b[0;32m----> 3\u001b[0m scenario \u001b[38;5;241m=\u001b[39m \u001b[43mload_database\u001b[49m\u001b[43m(\u001b[49m\u001b[43mscenario\u001b[49m\u001b[43m)\u001b[49m\n",
+ "File \u001b[0;32m~/GitHub/premise/premise/utils.py:388\u001b[0m, in \u001b[0;36mload_database\u001b[0;34m(scenario)\u001b[0m\n\u001b[1;32m 385\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m scenario\u001b[38;5;241m.\u001b[39mget(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mdatabase\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 386\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m scenario\n\u001b[0;32m--> 388\u001b[0m filepath \u001b[38;5;241m=\u001b[39m \u001b[43mscenario\u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mdatabase filepath\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\n\u001b[1;32m 390\u001b[0m \u001b[38;5;66;03m# load pickle\u001b[39;00m\n\u001b[1;32m 391\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28mopen\u001b[39m(filepath, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mrb\u001b[39m\u001b[38;5;124m\"\u001b[39m) \u001b[38;5;28;01mas\u001b[39;00m f:\n",
+ "\u001b[0;31mKeyError\u001b[0m: 'database filepath'"
]
}
],
@@ -1883,13 +2322,13 @@
"evalue": "",
"output_type": "error",
"traceback": [
- "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
- "\u001B[1;31mKeyboardInterrupt\u001B[0m Traceback (most recent call last)",
- "\u001B[1;32m~\\AppData\\Local\\Temp/ipykernel_20348/44398783.py\u001B[0m in \u001B[0;36m\u001B[1;34m\u001B[0m\n\u001B[0;32m 18\u001B[0m \u001B[0mprint\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;34m\"1\"\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mdataset\u001B[0m\u001B[1;33m[\u001B[0m\u001B[1;34m\"name\"\u001B[0m\u001B[1;33m]\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mi\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mj\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mtype\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mj\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mfloat\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mj\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 19\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m---> 20\u001B[1;33m \u001B[1;32mfor\u001B[0m \u001B[0me\u001B[0m \u001B[1;32min\u001B[0m \u001B[0mdataset\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mexchanges\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m 21\u001B[0m \u001B[1;32mfor\u001B[0m \u001B[0mk\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mv\u001B[0m \u001B[1;32min\u001B[0m \u001B[0me\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mitems\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 22\u001B[0m \u001B[1;32mif\u001B[0m \u001B[0misinstance\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mv\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mnp\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mfloat64\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
- "\u001B[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\bw2data\\backends\\peewee\\proxies.py\u001B[0m in \u001B[0;36m__iter__\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m 74\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 75\u001B[0m \u001B[1;32mdef\u001B[0m \u001B[0m__iter__\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m---> 76\u001B[1;33m \u001B[1;32mfor\u001B[0m \u001B[0mobj\u001B[0m \u001B[1;32min\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0m_get_queryset\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m 77\u001B[0m \u001B[1;32myield\u001B[0m \u001B[0mExchange\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mobj\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 78\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n",
- "\u001B[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\peewee.py\u001B[0m in \u001B[0;36mnext\u001B[1;34m(self)\u001B[0m\n\u001B[0;32m 4376\u001B[0m \u001B[0mobj\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcursor_wrapper\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mrow_cache\u001B[0m\u001B[1;33m[\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mindex\u001B[0m\u001B[1;33m]\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 4377\u001B[0m \u001B[1;32melif\u001B[0m \u001B[1;32mnot\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcursor_wrapper\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mpopulated\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m-> 4378\u001B[1;33m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcursor_wrapper\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0miterate\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m 4379\u001B[0m \u001B[0mobj\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcursor_wrapper\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mrow_cache\u001B[0m\u001B[1;33m[\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mindex\u001B[0m\u001B[1;33m]\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 4380\u001B[0m \u001B[1;32melse\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
- "\u001B[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\peewee.py\u001B[0m in \u001B[0;36miterate\u001B[1;34m(self, cache)\u001B[0m\n\u001B[0;32m 4286\u001B[0m \u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 4287\u001B[0m \u001B[1;32mdef\u001B[0m \u001B[0miterate\u001B[0m\u001B[1;33m(\u001B[0m\u001B[0mself\u001B[0m\u001B[1;33m,\u001B[0m \u001B[0mcache\u001B[0m\u001B[1;33m=\u001B[0m\u001B[1;32mTrue\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[1;32m-> 4288\u001B[1;33m \u001B[0mrow\u001B[0m \u001B[1;33m=\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mcursor\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mfetchone\u001B[0m\u001B[1;33m(\u001B[0m\u001B[1;33m)\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0m\u001B[0;32m 4289\u001B[0m \u001B[1;32mif\u001B[0m \u001B[0mrow\u001B[0m \u001B[1;32mis\u001B[0m \u001B[1;32mNone\u001B[0m\u001B[1;33m:\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n\u001B[0;32m 4290\u001B[0m \u001B[0mself\u001B[0m\u001B[1;33m.\u001B[0m\u001B[0mpopulated\u001B[0m \u001B[1;33m=\u001B[0m \u001B[1;32mTrue\u001B[0m\u001B[1;33m\u001B[0m\u001B[1;33m\u001B[0m\u001B[0m\n",
- "\u001B[1;31mKeyboardInterrupt\u001B[0m: "
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
+ "\u001b[1;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)",
+ "\u001b[1;32m~\\AppData\\Local\\Temp/ipykernel_20348/44398783.py\u001b[0m in \u001b[0;36m\u001b[1;34m\u001b[0m\n\u001b[0;32m 18\u001b[0m \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"1\"\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdataset\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m\"name\"\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mi\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mj\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mtype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mfloat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 19\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 20\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0me\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mdataset\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mexchanges\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 21\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mv\u001b[0m \u001b[1;32min\u001b[0m \u001b[0me\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 22\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mv\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfloat64\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\bw2data\\backends\\peewee\\proxies.py\u001b[0m in \u001b[0;36m__iter__\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 74\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 75\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0m__iter__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 76\u001b[1;33m \u001b[1;32mfor\u001b[0m \u001b[0mobj\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m_get_queryset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 77\u001b[0m \u001b[1;32myield\u001b[0m \u001b[0mExchange\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 78\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\peewee.py\u001b[0m in \u001b[0;36mnext\u001b[1;34m(self)\u001b[0m\n\u001b[0;32m 4376\u001b[0m \u001b[0mobj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcursor_wrapper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrow_cache\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4377\u001b[0m \u001b[1;32melif\u001b[0m \u001b[1;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcursor_wrapper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpopulated\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 4378\u001b[1;33m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcursor_wrapper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0miterate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4379\u001b[0m \u001b[0mobj\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcursor_wrapper\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mrow_cache\u001b[0m\u001b[1;33m[\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mindex\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4380\u001b[0m \u001b[1;32melse\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;32m~\\AppData\\Local\\Continuum\\miniconda3\\envs\\main_env\\lib\\site-packages\\peewee.py\u001b[0m in \u001b[0;36miterate\u001b[1;34m(self, cache)\u001b[0m\n\u001b[0;32m 4286\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4287\u001b[0m \u001b[1;32mdef\u001b[0m \u001b[0miterate\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mcache\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m-> 4288\u001b[1;33m \u001b[0mrow\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcursor\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfetchone\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m 4289\u001b[0m \u001b[1;32mif\u001b[0m \u001b[0mrow\u001b[0m \u001b[1;32mis\u001b[0m \u001b[1;32mNone\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m 4290\u001b[0m \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpopulated\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
+ "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
]
}
],
@@ -3541,7 +3980,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
- "version": "3.10.13"
+ "version": "3.11.8"
}
},
"nbformat": 4,
diff --git a/premise/data/additional_inventories/lci-PV-GaAs.xlsx b/premise/data/additional_inventories/lci-PV-GaAs.xlsx
index 5f805384..3a304ee2 100644
Binary files a/premise/data/additional_inventories/lci-PV-GaAs.xlsx and b/premise/data/additional_inventories/lci-PV-GaAs.xlsx differ
diff --git a/premise/data/additional_inventories/lci-PV-perovskite.xlsx b/premise/data/additional_inventories/lci-PV-perovskite.xlsx
index 02db0dd9..ebf92a91 100644
Binary files a/premise/data/additional_inventories/lci-PV-perovskite.xlsx and b/premise/data/additional_inventories/lci-PV-perovskite.xlsx differ
diff --git a/premise/data/additional_inventories/lci-hydrogen-electrolysis.xlsx b/premise/data/additional_inventories/lci-hydrogen-electrolysis.xlsx
index 8c068c87..6a64394f 100644
Binary files a/premise/data/additional_inventories/lci-hydrogen-electrolysis.xlsx and b/premise/data/additional_inventories/lci-hydrogen-electrolysis.xlsx differ
diff --git a/premise/data/additional_inventories/lci-hydrogen-thermochemical-water-splitting.xlsx b/premise/data/additional_inventories/lci-hydrogen-thermochemical-water-splitting.xlsx
index c48c9f47..0a1cee59 100644
Binary files a/premise/data/additional_inventories/lci-hydrogen-thermochemical-water-splitting.xlsx and b/premise/data/additional_inventories/lci-hydrogen-thermochemical-water-splitting.xlsx differ
diff --git a/premise/data/fuels/fuel_groups.yaml b/premise/data/fuels/fuel_groups.yaml
index 0e440939..458dc10b 100644
--- a/premise/data/fuels/fuel_groups.yaml
+++ b/premise/data/fuels/fuel_groups.yaml
@@ -36,7 +36,7 @@ gas:
hydrogen:
- hydrogen, from petroleum
- - hydrogen, from electrolysis
+ - hydrogen, from electrolysis, PEM
- hydrogen, from solar
- hydrogen, from pyrolysis
- hydrogen, from biomass
diff --git a/premise/data/fuels/hydrogen_efficiency_parameters.yml b/premise/data/fuels/hydrogen_efficiency_parameters.yml
index 451bcb23..d4269255 100644
--- a/premise/data/fuels/hydrogen_efficiency_parameters.yml
+++ b/premise/data/fuels/hydrogen_efficiency_parameters.yml
@@ -1,6 +1,4 @@
-from electrolysis (PEM):
- name: hydrogen production, gaseous, 30 bar, from PEM electrolysis, from grid electricity
- var: hydrogen, from electrolysis
+hydrogen, from electrolysis, PEM:
feedstock name: electricity, low voltage
feedstock unit: kilowatt hour
efficiency:
@@ -14,63 +12,80 @@ from electrolysis (PEM):
maximum: 52.5
floor value: 45.3
-from SMR of biogas, with CCS:
- name: hydrogen production, steam methane reforming, from biomethane, with CCS
- var: hydrogen, from biogas, with CCS
+hydrogen, from electrolysis, AEC:
+ feedstock name: electricity, low voltage
+ feedstock unit: kilowatt hour
+ efficiency:
+ 2020:
+ mean: 51.8
+ minimum: 48.7
+ maximum: 54.9
+ 2050:
+ mean: 48.5
+ minimum: 47.1
+ maximum: 49.9
+ floor value: 47.1
+
+hydrogen, from electrolysis, SOEC:
+ feedstock name: electricity, low voltage
+ feedstock unit: kilowatt hour
+ efficiency:
+ 2020:
+ mean: 42.3
+ minimum: 41.2
+ maximum: 43.4
+ 2050:
+ mean: 40.6
+ minimum: 40.0
+ maximum: 41.2
+ floor value: 40.0
+
+hydrogen, from biogas, with CCS:
feedstock name: biomethane
feedstock unit: kilogram
floor value: 3.2
-from SMR of biogas:
- name: hydrogen production, steam methane reforming, from biomethane
- var: hydrogen, from biogas
+
+hydrogen, from biogas:
feedstock name: biomethane
feedstock unit: kilogram
floor value: 3.2
-from SMR of natural gas:
- name: hydrogen production, steam methane reforming
- var: hydrogen, from natural gas
+
+hydrogen, from natural gas:
feedstock name: natural gas
feedstock unit: cubic meter
floor value: 3.5
-from SMR of natural gas, with CCS:
- name: hydrogen production, steam methane reforming, with CCS
- var: hydrogen, from natural gas, with CCS
+
+hydrogen, from natural gas, with CCS:
feedstock name: natural gas
feedstock unit: cubic meter
floor value: 3.5
-from gasification of biomass, with CCS:
- name: hydrogen production, gaseous, 25 bar, from gasification of woody biomass in entrained flow gasifier, with CCS, at gasification plant
- var: hydrogen, from biomass, with CCS
+
+hydrogen, from biomass, with CCS:
feedstock name: wood chips
feedstock unit: kilogram
floor value: 7
-from gasification of biomass:
- name: hydrogen production, gaseous, 25 bar, from gasification of woody biomass in entrained flow gasifier, at gasification plant
- var: hydrogen, from biomass
+
+hydrogen, from biomass:
feedstock name: wood chips
feedstock unit: kilogram
floor value: 7
-from coal gasification:
- name: hydrogen production, coal gasification
- var: hydrogen, from coal
+
+hydrogen, from coal:
feedstock name: hard coal
feedstock unit: kilogram
floor value: 5
-from coal gasification, with CCS:
- name: hydrogen production, coal gasification, with CCS
- var: hydrogen, from coal, with CCS
+
+hydrogen, from coal, with CCS:
feedstock name: hard coal
feedstock unit: kilogram
floor value: 5
-from pyrolysis:
- name: hydrogen production, gaseous, 100 bar, from methane pyrolysis
- var: hydrogen, from pyrolysis
+
+hydrogen, from pyrolysis:
feedstock name: natural gas
feedstock unit: cubic meter
floor value: 6.5
-from thermochemical water splitting:
- name: hydrogen production, gaseous, 25 bar, from thermochemical water splitting, at solar tower
- var: hydrogen, from solar
+
+hydrogen, from solar:
feedstock name: Energy, solar, converted
feedstock unit: megajoule
floor value: 180
\ No newline at end of file
diff --git a/premise/data/renewables/efficiency_solar_PV.csv b/premise/data/renewables/efficiency_solar_PV.csv
index 30c4fe42..50621954 100644
--- a/premise/data/renewables/efficiency_solar_PV.csv
+++ b/premise/data/renewables/efficiency_solar_PV.csv
@@ -1,23 +1,28 @@
-technology;year;efficiency
-micro-Si;2010;0.1
-single-Si;2010;0.151
-multi-Si;2010;0.14
-CIGS;2010;0.11
-CIS;2010;0.11
-CdTe;2010;0.1
-micro-Si;2020;0.119
-single-Si;2020;0.179
-multi-Si;2020;0.168
-perovskite;2020;0.25
-GaAs;2020;0.28
-CIGS;2020;0.14
-CIS;2020;0.14
-CdTe;2020;0.168
-micro-Si;2050;0.125
-single-Si;2050;0.267
-multi-Si;2050;0.244
-CIGS;2050;0.234
-CIS;2050;0.234
-CdTe;2050;0.21
-perovskite;2050;0.25
-GaAs;2050;0.28
\ No newline at end of file
+technology,year,mean,min,max,source
+micro-Si,2010,0.1,0.075,0.125,Past eff: https://iea.blob.core.windows.net/assets/3a99654f-ffff-469f-b83c-bf0386ed8537/pv_roadmap.pdf; Uncertainty: Own assumption: -+25%
+single-Si,2010,0.15,0.11325,0.18875,Past eff: https://treeze.ch/fileadmin/user_upload/downloads/Publications/Case_Studies/Energy/Future-PV-LCA-IEA-PVPS-Task-12-March-2015.pdf; Uncertainty: Own assumption: -+25%
+multi-Si,2010,0.14,0.105,0.175,Past eff: https://iea.blob.core.windows.net/assets/3a99654f-ffff-469f-b83c-bf0386ed8537/pv_roadmap.pdf; Uncertainty: Own assumption: -+25%
+CIGS,2010,0.11,0.0825,0.1375,Past eff: https://iea.blob.core.windows.net/assets/3a99654f-ffff-469f-b83c-bf0386ed8537/pv_roadmap.pdf; Uncertainty: Own assumption: -+25%
+CIS,2010,0.11,0.0825,0.1375,Past eff: https://iea.blob.core.windows.net/assets/3a99654f-ffff-469f-b83c-bf0386ed8537/pv_roadmap.pdf; Uncertainty: Own assumption: -+25%
+CdTe,2010,0.1,0.075,0.125,Past eff: https://iea.blob.core.windows.net/assets/3a99654f-ffff-469f-b83c-bf0386ed8537/pv_roadmap.pdf; Uncertainty: Own assumption: -+25%
+GaAs,2010,0.28,0.21,0.35,Past efficiencies same as current.
+perovskite,2010,0.25,0.1875,0.3125,Past efficiencies same as current.
+micro-Si,2020,0.12,0.08925,0.14875,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+single-Si,2020,0.18,0.13425,0.22375,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+multi-Si,2020,0.17,0.126,0.21,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+perovskite,2020,0.25,0.1875,0.3125,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+GaAs,2020,0.28,0.21,0.35,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+CIGS,2020,0.14,0.105,0.175,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+CIS,2020,0.14,0.105,0.175,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+CdTe,2020,0.17,0.126,0.21,Current eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+single-Si,2023,0.22,0.174,0.235,https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+CdTe,2023,0.19,0.17,0.2,https://www.sciencedirect.com/science/article/pii/S0927024823001101; https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+CIGS,2023,0.15,0.11,0.19,https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+micro-Si,2050,0.13,0.09375,0.15625,https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+single-Si,2050,0.27,0.20475,0.34125,https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+multi-Si,2050,0.24,0.183,0.305,https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf
+CIGS,2050,0.23,0.1755,0.2925,"https://www.ise.fraunhofer.de/content/dam/ise/de/documents/publications/studies/Photovoltaics-Report.pdf. For future efficiency: own assumption, -+25%."
+CIS,2050,0.23,0.1755,0.2925,Future eff: Fraunhofer ISE Photovoltaics Report 2019; Uncertainty: Own assumption: -+25%.
+CdTe,2050,0.226,0.22,0.25,https://www.sciencedirect.com/science/article/pii/S0927024823001101
+perovskite,2050,0.252,0.22,0.3125,https://pubs.rsc.org/en/content/articlelanding/2022/se/d2se00096b; https://www.csem.ch/en/news/photovoltaic-technology-breakthrough-achieving-31.25-efficiency/
+GaAs,2050,0.28,0.25,0.28,https://link.springer.com/article/10.1007/s11367-020-01791-z
\ No newline at end of file
diff --git a/premise/electricity.py b/premise/electricity.py
index 134be54c..9a60fc05 100644
--- a/premise/electricity.py
+++ b/premise/electricity.py
@@ -1454,32 +1454,50 @@ def update_efficiency_of_solar_pv(self) -> None:
current_eff = power / max_power
if self.year in module_eff.coords["year"].values:
- new_eff = module_eff.sel(technology=pv_tech, year=self.year).values
+ new_mean_eff = module_eff.sel(technology=pv_tech, year=self.year, efficiency_type="mean").values
+ new_min_eff = module_eff.sel(technology=pv_tech, year=self.year, efficiency_type="min").values
+ new_max_eff = module_eff.sel(technology=pv_tech, year=self.year, efficiency_type="max").values
else:
- new_eff = (
- module_eff.sel(technology=pv_tech)
+ new_mean_eff = (
+ module_eff.sel(technology=pv_tech, efficiency_type="mean")
+ .interp(year=self.year, kwargs={"fill_value": "extrapolate"})
+ .values
+ )
+ new_min_eff = (
+ module_eff.sel(technology=pv_tech, efficiency_type="min")
+ .interp(year=self.year, kwargs={"fill_value": "extrapolate"})
+ .values
+ )
+ new_max_eff = (
+ module_eff.sel(technology=pv_tech, efficiency_type="max")
.interp(year=self.year, kwargs={"fill_value": "extrapolate"})
.values
)
# in case self.year <10 or >2050
- new_eff = np.clip(new_eff, 0.1, 0.27)
+ new_mean_eff = np.clip(new_mean_eff, 0.1, 0.30)
+ new_min_eff = np.clip(new_min_eff, 0.1, 0.30)
+ new_max_eff = np.clip(new_max_eff, 0.1, 0.30)
# We only update the efficiency if it is higher than the current one.
- if new_eff.sum() > current_eff:
- exc["amount"] *= float(current_eff / new_eff)
+ if new_mean_eff.sum() > current_eff:
+ exc["amount"] *= float(current_eff / new_mean_eff)
+ exc["uncertainty type"] = 5
+ exc["loc"] = exc["amount"]
+ exc["minimum"] = exc["amount"] * (new_min_eff / new_mean_eff)
+ exc["maximum"] = exc["amount"] * (new_max_eff / new_mean_eff)
dataset["comment"] = (
f"`premise` has changed the efficiency "
f"of this photovoltaic installation "
- f"from {int(current_eff * 100)} pct. to {int(new_eff * 100)} pt."
+ f"from {int(current_eff * 100)} pct. to {int(new_mean_eff * 100)} pt."
)
if "log parameters" not in dataset:
dataset["log parameters"] = {}
dataset["log parameters"].update({"old efficiency": current_eff})
- dataset["log parameters"].update({"new efficiency": new_eff})
+ dataset["log parameters"].update({"new efficiency": new_mean_eff})
# add to log
self.write_log(dataset=dataset, status="updated")
diff --git a/premise/fuels.py b/premise/fuels.py
index 92e7705a..54ec1f00 100644
--- a/premise/fuels.py
+++ b/premise/fuels.py
@@ -126,32 +126,50 @@ def get_pre_cooling_energy(
return energy_pre_cooling
-@lru_cache()
-def adjust_electrolysis_electricity_requirement(year: int) -> ndarray:
+def adjust_electrolysis_electricity_requirement(
+ year: int,
+ projected_efficiency: dict
+) -> [float, float, float]:
"""
Calculate the adjusted electricity requirement for hydrogen electrolysis
based on the given year.
:param year: the year for which to calculate the adjusted electricity requirement
- :return: the adjusted electricity requirement in kWh/kg H2
+ :param hydrogen_type: the type of hydrogen production
+ :param projected_efficiency: the projected efficiency of the electrolysis process
+ :return: the adjusted mena, min and max electricity requirement in kWh/kg H2
"""
- # Constants
- MIN_ELECTRICITY_REQUIREMENT = 48
- MAX_ELECTRICITY_REQUIREMENT = 60 # no maximum
- # Calculate adjusted electricity requirement
- electricity_requirement = -0.3538 * (year - 2010) + 58.589
+ if year < 2020:
+ mean = projected_efficiency[2020]["mean"]
+ min = projected_efficiency[2020]["minimum"]
+ max = projected_efficiency[2020]["maximum"]
- # Clip to minimum and maximum values
- adjusted_requirement = np.clip(
- electricity_requirement,
- MIN_ELECTRICITY_REQUIREMENT,
- MAX_ELECTRICITY_REQUIREMENT,
- )
+ elif year > 2050:
+ mean = projected_efficiency[2050]["mean"]
+ min = projected_efficiency[2050]["minimum"]
+ max = projected_efficiency[2050]["maximum"]
+
+ else:
+ mean = np.interp(
+ year,
+ [2020, 2050],
+ [projected_efficiency[2020]["mean"], projected_efficiency[2050]["mean"]]
+ )
+ min = np.interp(
+ year,
+ [2020, 2050],
+ [projected_efficiency[2020]["minimum"], projected_efficiency[2050]["minimum"]]
+ )
+ max = np.interp(
+ year,
+ [2020, 2050],
+ [projected_efficiency[2020]["maximum"], projected_efficiency[2050]["maximum"]]
+ )
- return adjusted_requirement
+ return mean, min, max
def is_fuel_production(name):
@@ -285,7 +303,6 @@ def _update_fuels(scenario, version, system_model):
)
):
fuels.generate_fuel_markets()
- fuels.adjust_fuel_conversion_efficiency()
fuels.relink_datasets()
scenario["database"] = fuels.database
scenario["cache"] = fuels.cache
@@ -378,6 +395,16 @@ def __init__(
dim="variables",
)
+ # create fuel filters
+ mapping = InventorySet(self.database)
+ self.fuel_map = mapping.generate_fuel_map()
+ # reverse fuel map
+ self.rev_fuel_map = {}
+ for fuel, activities in self.fuel_map.items():
+ for activity in activities:
+ self.rev_fuel_map[activity] = fuel
+
+
def find_suppliers(
self,
name: str,
@@ -454,12 +481,12 @@ def generate_hydrogen_activities(self) -> None:
Defines regional variants for hydrogen production, but also different supply
chain designs:
* by truck (500 km), gaseous, liquid and LOHC
- * by reassigned CNG pipeline (500 km), gaseous, with and without inhibitors
* by dedicated H2 pipeline (500 km), gaseous
* by ship, liquid (2000 km)
- For truck and pipeline supply chains, we assume a transmission and a distribution part, for which
- we have specific pipeline designs. We also assume a means for regional storage in between (salt cavern).
+ For truck and pipeline supply chains, we assume a transmission and
+ a distribution part, for which we have specific pipeline designs.
+ We also assume a means for regional storage in between (salt cavern).
We apply distance-based losses along the way.
Most of these supply chain design options are based on the work of:
@@ -471,96 +498,130 @@ def generate_hydrogen_activities(self) -> None:
Sustain Energy Fuels 2020;4:2256–73. https://doi.org/10.1039/d0se00067a.
* Petitpas G. Boil-off losses along the LH2 pathway. US Dep Energy Off Sci Tech Inf 2018.
-
"""
- hydrogen_sources = fetch_mapping(HYDROGEN_SOURCES)
+ hydrogen_parameters = fetch_mapping(HYDROGEN_SOURCES)
- for hydrogen_type, hydrogen_vars in hydrogen_sources.items():
- hydrogen_activity_name = hydrogen_sources[hydrogen_type].get("name")
- hydrogen_efficiency_variable = hydrogen_sources[hydrogen_type].get("var")
- hydrogen_feedstock_name = hydrogen_sources[hydrogen_type].get(
- "feedstock name"
- )
- hydrogen_feedstock_unit = hydrogen_sources[hydrogen_type].get(
- "feedstock unit"
- )
- efficiency_floor_value = hydrogen_sources[hydrogen_type].get("floor value")
+ for fuel_type, dataset_name in self.fuel_map.items():
+ if fuel_type in hydrogen_parameters:
+ hydrogen_feedstock_name = hydrogen_parameters[fuel_type].get("feedstock name")
+ hydrogen_feedstock_unit = hydrogen_parameters[fuel_type].get("feedstock unit")
+ projected_efficiency = hydrogen_parameters[fuel_type].get("efficiency")
+ floor_value = hydrogen_parameters[fuel_type].get("floor value")
- new_ds = self.fetch_proxies(
- name=hydrogen_activity_name,
- ref_prod="hydrogen",
- production_variable=hydrogen_efficiency_variable,
- exact_name_match=True,
- )
+ if isinstance(dataset_name, set):
+ if len(dataset_name) > 1:
+ print(f"Multiple datasets found for {fuel_type}.")
- for region, dataset in new_ds.items():
- # find current energy consumption in dataset
- initial_energy_consumption = sum(
- exc["amount"]
- for exc in dataset["exchanges"]
- if exc["unit"] == hydrogen_feedstock_unit
- and hydrogen_feedstock_name in exc["name"]
- and exc["type"] != "production"
+ new_ds = self.fetch_proxies(
+ name=list(dataset_name)[0],
+ ref_prod="hydrogen",
+ production_variable=fuel_type,
+ exact_name_match=True,
)
- # add it to "log parameters"
- if "log parameters" not in dataset:
- dataset["log parameters"] = {}
+ for region, dataset in new_ds.items():
+ # find current energy consumption in dataset
+ initial_energy_use = sum(
+ exc["amount"]
+ for exc in dataset["exchanges"]
+ if exc["unit"] == hydrogen_feedstock_unit
+ and hydrogen_feedstock_name in exc["name"]
+ and exc["type"] != "production"
+ )
- dataset["log parameters"].update(
- {
- "initial energy input for hydrogen production": initial_energy_consumption
- }
- )
+ if initial_energy_use is None or initial_energy_use == 0:
+ print(f"Initial energy consumption for {fuel_type} in {region} is None.")
- # Fetch the efficiency change of the
- # electrolysis process over time,
- # according to the IAM scenario,
- # if available.
+ # add it to "log parameters"
+ if "log parameters" not in dataset:
+ dataset["log parameters"] = {}
- if (
- hydrogen_efficiency_variable
- in self.fuel_efficiencies.variables.values
- ):
- # Find scaling factor compared to 2020
- scaling_factor = 1 / self.find_iam_efficiency_change(
- data=self.fuel_efficiencies,
- variable=hydrogen_efficiency_variable,
- location=region,
+ dataset["log parameters"].update(
+ {
+ "initial energy input for hydrogen production": initial_energy_use
+ }
)
- # new energy consumption
- new_energy_consumption = scaling_factor * initial_energy_consumption
+ # Fetch the efficiency change of the
+ # electrolysis process over time,
+ # according to the IAM scenario,
+ # if available.
- # set a floor value/kg H2
- new_energy_consumption = max(
- new_energy_consumption, efficiency_floor_value
- )
+ new_energy_use, min_energy_use, max_energy_use = None, None, None
- else:
- if "from electrolysis" in hydrogen_type:
- # get the electricity consumption
- new_energy_consumption = (
- adjust_electrolysis_electricity_requirement(self.year)
+ if (
+ fuel_type
+ in self.fuel_efficiencies.variables.values
+ ):
+ # Find scaling factor compared to 2020
+ scaling_factor = 1 / self.find_iam_efficiency_change(
+ data=self.fuel_efficiencies,
+ variable=fuel_type,
+ location=region,
)
+
+ if scaling_factor != 1:
+ # new energy consumption
+ new_energy_use = scaling_factor * initial_energy_use
+
+ # set a floor value/kg H2
+ new_energy_use = max(
+ new_energy_use, floor_value
+ )
+
+ else:
+ if "from electrolysis" in fuel_type:
+
+ # get the electricity consumption
+ new_energy_use, min_energy_use, max_energy_use = (
+ adjust_electrolysis_electricity_requirement(
+ self.year,
+ projected_efficiency
+ )
+ )
+
else:
- new_energy_consumption = None
+ if "from electrolysis" in fuel_type:
+ # get the electricity consumption
+ new_energy_use, min_energy_use, max_energy_use = (
+ adjust_electrolysis_electricity_requirement(
+ self.year,
+ projected_efficiency
+ )
+ )
- if new_energy_consumption is not None:
# recalculate scaling factor
- scaling_factor = new_energy_consumption / initial_energy_consumption
-
- if not np.isnan(scaling_factor) and scaling_factor > 0.0:
- # rescale the fuel consumption exchange
- dataset = rescale_exchanges(
- dataset,
- scaling_factor,
- technosphere_filters=[
+ try:
+ scaling_factor = new_energy_use / initial_energy_use
+ except ZeroDivisionError:
+ scaling_factor = 1
+ except TypeError:
+ scaling_factor = 1
+
+ if scaling_factor != 1:
+ if min_energy_use is not None:
+ for exc in ws.technosphere(
+ dataset,
ws.contains("name", hydrogen_feedstock_name),
ws.equals("unit", hydrogen_feedstock_unit),
- ],
- )
+ ):
+ exc["amount"] = new_energy_use
+ exc["uncertainty type"] = 5
+ exc["loc"] = new_energy_use
+ exc["minimum"] = min_energy_use
+ exc["maximum"] = max_energy_use
+
+ else:
+ # rescale the fuel consumption exchange
+ rescale_exchanges(
+ dataset,
+ scaling_factor,
+ technosphere_filters=[
+ ws.contains("name", hydrogen_feedstock_name),
+ ws.equals("unit", hydrogen_feedstock_unit),
+ ],
+ )
# add it to "log parameters"
if "log parameters" not in dataset:
@@ -569,7 +630,7 @@ def generate_hydrogen_activities(self) -> None:
# add it to "log parameters"
dataset["log parameters"].update(
{
- "new energy input for hydrogen production": new_energy_consumption
+ "new energy input for hydrogen production": new_energy_use
}
)
@@ -586,8 +647,8 @@ def generate_hydrogen_activities(self) -> None:
"generated by `premise`. "
)
- self.database.extend(new_ds.values())
- self.add_to_index(new_ds.values())
+ self.database.extend(new_ds.values())
+ self.add_to_index(new_ds.values())
# loss coefficients for hydrogen supply
losses = fetch_mapping(HYDROGEN_SUPPLY_LOSSES)
@@ -618,21 +679,32 @@ def generate_hydrogen_activities(self) -> None:
# add it to list of created datasets
self.add_to_index(dataset)
- datasets = (
- [
+ datasets = [
"hydrogenation of hydrogen",
"dehydrogenation of hydrogen",
"market group for electricity, low voltage",
"hydrogen embrittlement inhibition",
"hydrogen refuelling station",
]
- + [
+
+ datasets.extend(
+ [
c.get("regional storage", {}).get("name")
for c in supply_chain_scenarios.values()
if c.get("regional storage", {}).get("name")
]
- + [x["name"] for x in hydrogen_sources.values()]
- + [
+ )
+
+ datasets.extend(
+ [
+ x for k, v in self.fuel_map.items()
+ for x in v
+ if k.startswith("hydrogen, from")
+ ]
+ )
+
+ datasets.extend(
+ [
v["name"]
for config in supply_chain_scenarios.values()
for v in config["vehicle"]
@@ -645,105 +717,98 @@ def generate_hydrogen_activities(self) -> None:
)
for region in self.regions:
- for hydrogen_type, hydrogen_vars in hydrogen_sources.items():
- for vehicle, config in supply_chain_scenarios.items():
- for state in config["state"]:
- for distance in config["distance"]:
- # dataset creation
- dataset: dict[
- str,
- Union[
- Union[
- str, list[dict[str, Union[int, str]]], ndarray
+ for hydrogen_type, hydrogen_datasets in self.fuel_map.items():
+ if "hydrogen, from" in hydrogen_type:
+ for vehicle, config in supply_chain_scenarios.items():
+ for state in config["state"]:
+ for distance in config["distance"]:
+ # dataset creation
+ dataset = {
+ "location": region,
+ "name": f"hydrogen supply, {hydrogen_type}, by {vehicle}, as {state}, over {distance} km",
+ "reference product": "hydrogen, 700 bar",
+ "unit": "kilogram",
+ "database": self.database[1]["database"],
+ "code": str(uuid.uuid4().hex),
+ "comment": "Dataset representing hydrogen supply, generated by `premise`.",
+ "exchanges": [
+ {
+ "uncertainty type": 0,
+ "loc": 1,
+ "amount": 1,
+ "type": "production",
+ "production volume": 1,
+ "product": "hydrogen, 700 bar",
+ "name": f"hydrogen supply, {hydrogen_type}, "
+ f"by {vehicle}, as {state}, over {distance} km",
+ "unit": "kilogram",
+ "location": region,
+ }
],
- Any,
- ],
- ] = {
- "location": region,
- "name": f"hydrogen supply, {hydrogen_type}, by {vehicle}, as {state}, over {distance} km",
- "reference product": "hydrogen, 700 bar",
- "unit": "kilogram",
- "database": self.database[1]["database"],
- "code": str(uuid.uuid4().hex),
- "comment": "Dataset representing hydrogen supply, generated by `premise`.",
- "exchanges": [
- {
- "uncertainty type": 0,
- "loc": 1,
- "amount": 1,
- "type": "production",
- "production volume": 1,
- "product": "hydrogen, 700 bar",
- "name": f"hydrogen supply, {hydrogen_type}, "
- f"by {vehicle}, as {state}, over {distance} km",
- "unit": "kilogram",
- "location": region,
- }
- ],
- }
-
- # transport
- dataset = self.add_hydrogen_transport(
- dataset, config, region, distance, vehicle, subset
- )
+ }
- # need for inhibitor and purification if CNG pipeline
- # electricity for purification: 2.46 kWh/kg H2
- if vehicle == "CNG pipeline":
- dataset = self.add_hydrogen_inhibitor(
- dataset, region, subset
+ # transport
+ dataset = self.add_hydrogen_transport(
+ dataset, config, region, distance, vehicle, subset
)
- if "regional storage" in config:
- dataset = self.add_hydrogen_regional_storage(
- dataset, region, config, subset
- )
+ # need for inhibitor and purification if CNG pipeline
+ # electricity for purification: 2.46 kWh/kg H2
+ if vehicle == "CNG pipeline":
+ dataset = self.add_hydrogen_inhibitor(
+ dataset, region, subset
+ )
- # electricity for compression
- if state in ["gaseous", "liquid"]:
- dataset = self.add_compression_electricity(
- state, vehicle, distance, region, dataset, subset
- )
+ if "regional storage" in config:
+ dataset = self.add_hydrogen_regional_storage(
+ dataset, region, config, subset
+ )
- # electricity for hydrogenation, dehydrogenation and
- # compression at delivery
- if state == "liquid organic compound":
- dataset = self.add_hydrogenation_energy(
- region, dataset, subset
- )
+ # electricity for compression
+ if state in ["gaseous", "liquid"]:
+ dataset = self.add_compression_electricity(
+ state, vehicle, distance, region, dataset, subset
+ )
- dataset = self.add_hydrogen_input_and_losses(
- hydrogen_vars,
- region,
- losses,
- vehicle,
- state,
- distance,
- dataset,
- subset,
- )
+ # electricity for hydrogenation, dehydrogenation and
+ # compression at delivery
+ if state == "liquid organic compound":
+ dataset = self.add_hydrogenation_energy(
+ region, dataset, subset
+ )
- # add fuelling station, including storage tank
- dataset["exchanges"].append(
- self.add_h2_fuelling_station(region, subset)
- )
+ dataset = self.add_hydrogen_input_and_losses(
+ list(hydrogen_datasets)[0],
+ region,
+ losses,
+ vehicle,
+ state,
+ distance,
+ dataset,
+ subset,
+ )
- # add pre-cooling
- dataset = self.add_pre_cooling_electricity(
- dataset, region, subset
- )
+ # add fuelling station, including storage tank
+ dataset["exchanges"].append(
+ self.add_h2_fuelling_station(region, subset)
+ )
- dataset = self.relink_technosphere_exchanges(
- dataset,
- )
+ # add pre-cooling
+ dataset = self.add_pre_cooling_electricity(
+ dataset, region, subset
+ )
- self.database.append(dataset)
+ dataset = self.relink_technosphere_exchanges(
+ dataset,
+ )
- # add to log
- self.write_log(dataset)
+ self.database.append(dataset)
- # add it to list of created datasets
- self.add_to_index(dataset)
+ # add to log
+ self.write_log(dataset)
+
+ # add it to list of created datasets
+ self.add_to_index(dataset)
def add_hydrogen_transport(
self,
@@ -821,7 +886,7 @@ def add_hydrogen_input_and_losses(
# fetch the H2 production activity
h2_ds = list(
self.find_suppliers(
- name=hydrogen_activity["name"],
+ name=hydrogen_activity,
ref_prod="hydrogen",
unit="kilogram",
loc=region,
@@ -1668,68 +1733,6 @@ def adjust_land_use_change_emissions(
return dataset
- def adjust_fuel_conversion_efficiency(self):
- """
- Adjust the input to output fuel conversion efficiency.
- """
-
- for fuel, activities in self.fuel_map.items():
- for activity in activities:
- for ds in ws.get_many(self.database, ws.equals("name", activity)):
- variable = self.rev_fuel_map.get(activity)
- scaling_factor = 1.0
- if variable in self.fuel_efficiencies.coords["variables"]:
- if ds["location"] in self.regions:
- region = ds["location"]
- else:
- region = self.ecoinvent_to_iam_loc[ds["location"]]
-
- if self.year in self.fuel_efficiencies.coords["year"].values:
- scaling_factor = self.fuel_efficiencies.sel(
- variables=variable,
- region=region,
- year=self.year,
- ).values
- else:
- scaling_factor = (
- self.fuel_efficiencies.sel(
- variables=variable,
- region=region,
- )
- .interp(year=self.year)
- .values
- )
- if (
- scaling_factor != 1.0
- and "market for" not in ds["name"]
- and "fuel conversion efficiency"
- not in ds.get("log parameters", {})
- ):
- rescale_exchanges(
- ds,
- 1 / scaling_factor,
- )
-
- if "log parameters" not in ds:
- ds["log parameters"] = {}
-
- ds["log parameters"].update(
- {
- "fuel conversion efficiency": 1 / scaling_factor,
- }
- )
-
- # update comment section
- txt = (
- f" The inputs of this dataset have been multiplied by {1 / scaling_factor}"
- f"to reflect changes in efficiency according to {self.model.upper()} - {self.scenario.upper()}."
- )
- if "comment" in ds:
- ds["comment"] += txt
- else:
- ds["comment"] = txt
- self.write_log(ds, status="updated")
-
def get_production_label(self, crop_type: str) -> [str, None]:
"""
Get the production label for the dataset.
@@ -1917,7 +1920,8 @@ def fetch_fuel_share(
else:
start_period = self.year
end_period = self.year + period
- # make sure end_period is not greater than the last year in the dataset
+ # make sure end_period is not greater than
+ # the last year in the dataset
end_period = min(
end_period, self.iam_fuel_markets.coords["year"].values[-1]
)
@@ -2419,8 +2423,10 @@ def generate_fuel_markets(self):
for activity in activities:
self.rev_fuel_map[activity] = fuel
+
d_fuels = self.get_fuel_mapping()
+
datasets = [
item
for key in d_fuels
diff --git a/premise/iam_variables_mapping/fuels_variables.yaml b/premise/iam_variables_mapping/fuels_variables.yaml
index d01793fd..0d5feb42 100644
--- a/premise/iam_variables_mapping/fuels_variables.yaml
+++ b/premise/iam_variables_mapping/fuels_variables.yaml
@@ -514,7 +514,7 @@ hydrogen, from petroleum:
fltr:
- hydrogen production, gaseous, petroleum refinery operation
-hydrogen, from electrolysis:
+hydrogen, from electrolysis, PEM:
lhv: 120
co2: 0
biogenic_share: 1
@@ -528,8 +528,28 @@ hydrogen, from electrolysis:
remind: Tech|Hydrogen|Electricity|Efficiency
tiam-ucl: Efficiency|Hydrogen|Electrolysis
ecoinvent_aliases:
- fltr:
- - hydrogen production, gaseous, 30 bar, from PEM electrolysis, from grid electricity
+ fltr: hydrogen production, gaseous, 30 bar, from PEM electrolysis, from grid electricity
+ mask:
+ - carbon dioxide
+
+hydrogen, from electrolysis, AEC:
+ lhv: 120
+ co2: 0
+ biogenic_share: 1
+ ecoinvent_aliases:
+ fltr: hydrogen production, gaseous, 20 bar, from AEC electrolysis, from grid electricity
+ mask:
+ - carbon dioxide
+
+hydrogen, from electrolysis, SOEC:
+ lhv: 120
+ co2: 0
+ biogenic_share: 1
+ ecoinvent_aliases:
+ fltr: hydrogen production, gaseous, 1 bar, from SOEC electrolysis, from grid electricity
+ mask:
+ - steam
+ - carbon dioxide
hydrogen, from solar:
lhv: 120
@@ -596,6 +616,7 @@ hydrogen, from coal:
remind: Tech|Hydrogen|Coal|w/o CC|Efficiency
ecoinvent_aliases:
fltr: hydrogen production, coal gasification
+ mask: CCS
hydrogen, from coal, with CCS:
lhv: 120
diff --git a/premise/utils.py b/premise/utils.py
index 1c7f7147..6d1569fe 100644
--- a/premise/utils.py
+++ b/premise/utils.py
@@ -164,7 +164,15 @@ def get_efficiency_solar_photovoltaics() -> xr.DataArray:
EFFICIENCY_RATIO_SOLAR_PV, sep=get_delimiter(filepath=EFFICIENCY_RATIO_SOLAR_PV)
)
- return dataframe.groupby(["technology", "year"]).mean()["efficiency"].to_xarray()
+ dataframe = dataframe.melt(id_vars=["technology", "year"],
+ value_vars=["mean", "min", "max"],
+ var_name="efficiency_type", value_name="efficiency")
+
+ # Convert the DataFrame to an xarray Dataset
+ array = dataframe.set_index(["year", "technology", "efficiency_type"])["efficiency"].to_xarray()
+ array = array.interpolate_na(dim="year", method="linear")
+
+ return array
def default_global_location(database):