diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 000000000..4f2eb32f3 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,58 @@ +name: "CI" + +on: {push: {branches: ['**']}, pull_request: {branches: [dev, master]}} + +jobs: + build: + name: ${{ matrix.os.name }} + runs-on: ${{ matrix.os.image }} + + strategy: + matrix: + os: + - {image: ubuntu-latest, name: Linux} + - {image: windows-latest, name: Windows} + - {image: macos-latest, name: macOS} + max-parallel: 4 + fail-fast: false + + steps: + - uses: "actions/checkout@main" + - uses: "actions/setup-python@main" + with: + python-version: | + 3 + 3.8 + 3.9 + 3.10 + 3.11 + - name: "Install dependencies" + run: | + python -mpip install --progress-bar=off nox + python --version + pip --version + nox --version + - name: "Run custom checks" + run: "python -m nox -s check" + env: + PLATFORM: ${{ matrix.os.image }} + - name: "Check with `black`" + run: "python -m nox -s black" + env: + PLATFORM: ${{ matrix.os.image }} + - name: "Check with `flake8`" + run: "python -m nox -s flake8" + env: + PLATFORM: ${{ matrix.os.image }} + - name: "Check with `isort`" + run: "python -m nox -s isort" + env: + PLATFORM: ${{ matrix.os.image }} + - name: "Build and check for packaging errors" + run: "python -m nox -s build" + env: + PLATFORM: ${{ matrix.os.image }} + - name: "Install the package" + run: "python -m nox -s install" + env: + PLATFORM: ${{ matrix.os.image }} diff --git a/.gitignore b/.gitignore index 9c1a04499..0dc1acd18 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ -etrago/cluster/__pycache__/networkclustering.cpython-36.pyc --eTraGo.egg-info/* --src/* +**/__pycache__/* +build/* +dist/* +eTraGo.egg-info/* diff --git a/.readthedocs.yaml b/.readthedocs.yaml new file mode 100644 index 000000000..629eb1fa8 --- /dev/null +++ b/.readthedocs.yaml @@ -0,0 +1,22 @@ +# .readthedocs.yaml +# Read the Docs configuration file +# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details + +# Required +version: 2 + +# Set the version of Python and other tools you might need +build: + os: ubuntu-22.04 + tools: + python: "3.8" + +# Build documentation in the docs/ directory with Sphinx +sphinx: + configuration: doc/conf.py + +# We recommend specifying your dependencies to enable reproducible builds: +# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html +python: + install: + - requirements: requirements-doc.txt diff --git a/README.rst b/README.rst index cecbb1d16..e16b97a32 100644 --- a/README.rst +++ b/README.rst @@ -1,6 +1,17 @@ -.. image:: https://readthedocs.org/projects/etrago/badge/?version=latest - :target: http://etrago.readthedocs.io/en/latest/?badge=latest +|ci| |docs| + +.. |ci| image:: + https://img.shields.io/github/actions/workflow/status + /openego/eTraGo/ci.yaml?branch=dev&event=push&label=ci + :alt: Continuous Integration Workflow Status + :target: https://github.com/openego/eTraGo/actions/workflows/ci.yaml + +.. |docs| image:: + https://readthedocs.org/projects/etrago/badge/?version=latest :alt: Documentation Status + :target: http://etrago.readthedocs.io/en/latest/?badge=latest + +.. end-header eTraGo ====== @@ -8,7 +19,7 @@ eTraGo Optimization of flexibility options for transmission grids based on PyPSA A speciality in this context is that transmission grids are described by the -380, 220 and 110 kV in Germany. Conventionally the 110kV grid is part of the +380, 220 and 110 kV in Germany. Conventionally, the 110kV grid is part of the distribution grid. The integration of the transmission and 'upper' distribution grid is part of eTraGo. @@ -16,6 +27,21 @@ The focus of optimization are flexibility options with a special focus on energy storages. Grid expansion measures are not part of this tool and will be instead part of 'eGo' https://github.com/openego/eGo +eTraGo is documented on `readthedocs `_. + + +Input data +========== +The grid model data for eTraGo was created with the open source tool +`eGon-data `_. The resulting data will +be pubished on the `OpenEnergyPlatform `_. +As long as the data is not published there, a local database is needed. +We published a backup of the required tables and instruction on how to use it +on zenodo: + +.. image:: https://zenodo.org/badge/DOI/10.5281/zenodo.8376714.svg + :target: https://doi.org/10.5281/zenodo.8376714 + Installation ============ @@ -48,6 +74,18 @@ Use the pip -e to install eTraGo directly from the cloned repository: $ pip3 install -e /path/to/eTraGo/ +When you want to draw geographical features in the background of network plots, +please install cartopy: + +.. code-block:: + + $ pip3 install cartopy + +If you run into problems when using cartopy, try to install shapely without binaries: + +.. code-block:: + + $ pip3 install shapely --no-binary shapely Using a virtual environment =========================== @@ -57,7 +95,7 @@ you create a virtual environment (where you like it) and activate it: .. code-block:: bash - $ virtualenv venv --clear -p python3.7 + $ virtualenv venv --clear -p python3.8 $ source venv/bin/activate $ cd venv diff --git a/doc/about.rst b/doc/about.rst index 3bad066df..ad11ac9a7 100644 --- a/doc/about.rst +++ b/doc/about.rst @@ -15,15 +15,17 @@ energy storage and grid expansion measures. -The open_eGo project +Research projects ==================== -This software project is part of the research project +This software project was initially developed in the research project `open_eGo `_. +It is constantly further developed in different reserach projects, +e.g. `eGon `_ and `PoWerD `_. The OpenEnergy Platform ======================= -Within this project we developed the OpenEnergy Platform which this software +Within the open_eGo project we developed the OpenEnergy Platform which this software is using in order to get and store the in- and output data. Before you start to calculate a registration on the platform is needed. For more information see `openenergy-platform `_ and login. @@ -39,12 +41,12 @@ this platform. -Model overview -============== +Tool overview +============= -.. figure:: images/eTraGo_model.png +.. figure:: images/ego_tools.svg :align: center :scale: 75% @@ -63,24 +65,17 @@ eGo The python package eGo is a toolbox and application which connects the tool eTraGo (optimization of flexibility options at transmission grid level) and eDisGo (optimization of distribution grids). All those python -packages are part of the research project +packages were initially developed in the research project `open_eGo `_. `Learn more here `_. -Dataprocessing -============== +Data model creation +=================== +For the eGon project the python-tool `eGon-data `_ was implemented, which creates input data for the optimization tools `eTraGo `_, `ding0 `_ and `eDisGo `_ and delivers for example data on grid topologies, demands/demand curves and generation capacities in a high spatial resolution. The outputs of egon-data are published under open source and open data licenses. -For the open_eGo project several python packages are developed which are feeded -by the input data of the data processing. The Dataprocessing is writen in -SQL and Python. `Learn more here `_. - -ego.io -====== - -The ego.io serves as a SQLAlchemy Interface to the OpenEnergy database (oedb). The -oedb table ORM objects are defined here and small helpers for io tasks are contained. -`Learn more here `_. +eGon-data is a further development of the `Data processing `_ developed in the former research project `open_eGo `_. It aims for an extensions of the data models as well as for a better replicability and manageability of the data preparation and processing. +The resulting data set serves as an input for the optimization tools `eTraGo `_, `ding0 `_ and `eDisGo `_ and delivers for example data on grid topologies, demands/demand curves and generation capacities in a high spatial resolution. The outputs of egon-data are published under open source and open data licenses. Dingo @@ -97,7 +92,7 @@ medium and low voltage power distribution grids based on open LICENSE ======= -© Copyright 2015-2018 +© Copyright 2015-2023 Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems and diff --git a/doc/api.rst b/doc/api.rst index 83b7a11a8..d65337975 100644 --- a/doc/api.rst +++ b/doc/api.rst @@ -1,8 +1,11 @@ -.. make doc-string generated documentation appear here +API +==== .. toctree:: :maxdepth: 7 :glob: :titlesonly: - API + api/etrago.cluster + api/etrago.tools + api/appl.rst diff --git a/doc/api/appl.rst b/doc/api/appl.rst new file mode 100644 index 000000000..fe45967b0 --- /dev/null +++ b/doc/api/appl.rst @@ -0,0 +1,7 @@ +etrago.appl module +------------------- + +.. automodule:: etrago.appl + :members: + :undoc-members: + :show-inheritance: \ No newline at end of file diff --git a/doc/api/etrago.cluster.rst b/doc/api/etrago.cluster.rst index 1e2fc858b..40fc8db3c 100644 --- a/doc/api/etrago.cluster.rst +++ b/doc/api/etrago.cluster.rst @@ -1,18 +1,31 @@ -etrago\.cluster package +etrago.cluster package ======================= -Submodules ----------- +etrago.cluster.disaggregation module +----------------------------------------- -etrago\.cluster\.networkclustering module +.. automodule:: etrago.cluster.disaggregation + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.electrical module ----------------------------------------- -.. automodule:: etrago.cluster.networkclustering +.. automodule:: etrago.cluster.electrical :members: :undoc-members: :show-inheritance: -etrago\.cluster\.snapshot module +etrago.cluster.gas module +----------------------------------------- + +.. automodule:: etrago.cluster.gas + :members: + :undoc-members: + :show-inheritance: + +etrago.cluster.snapshot module -------------------------------- .. automodule:: etrago.cluster.snapshot @@ -20,11 +33,10 @@ etrago\.cluster\.snapshot module :undoc-members: :show-inheritance: -Module contents ---------------- +etrago.cluster.spatial module +-------------------------------- -.. automodule:: etrago.cluster +.. automodule:: etrago.cluster.spatial :members: :undoc-members: :show-inheritance: - diff --git a/doc/api/etrago.rst b/doc/api/etrago.rst deleted file mode 100644 index a09ffc783..000000000 --- a/doc/api/etrago.rst +++ /dev/null @@ -1,30 +0,0 @@ -etrago package -============== - -Subpackages ------------ - -.. toctree:: - - etrago.cluster - etrago.tools - -Submodules ----------- - -etrago\.appl module -------------------- - -.. automodule:: etrago.appl - :members: - :undoc-members: - :show-inheritance: - - -Module contents ---------------- - -.. automodule:: etrago - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/api/etrago.tools.rst b/doc/api/etrago.tools.rst index 33586cbf9..2b5495d38 100644 --- a/doc/api/etrago.tools.rst +++ b/doc/api/etrago.tools.rst @@ -1,56 +1,66 @@ -etrago\.tools package +etrago.tools package ===================== -Submodules ----------- +etrago.tools.calc\_results module +---------------------------------- -etrago\.tools\.io module ------------------------- +.. automodule:: etrago.tools.calc_results + :members: + :undoc-members: + :show-inheritance: -.. automodule:: etrago.tools.io +etrago.tools.constraints module +---------------------------------- + +.. automodule:: etrago.tools.constraints :members: :undoc-members: :show-inheritance: +etrago.tools.execute module +---------------------------------- -etrago\.tools\.extendable module ------------------------- +.. automodule:: etrago.tools.execute + :members: + :undoc-members: + :show-inheritance: + +etrago.tools.extendable module +---------------------------------- .. automodule:: etrago.tools.extendable :members: :undoc-members: :show-inheritance: +etrago.tools.io module +------------------------ -etrago\.tools\.plot module --------------------------- - -.. automodule:: etrago.tools.plot +.. automodule:: etrago.tools.io :members: :undoc-members: :show-inheritance: -etrago\.tools\.snapshot\_clustering module ------------------------------------------- +etrago.tools.network module +----------------------------- -.. automodule:: etrago.tools.snapshot_clustering +.. automodule:: etrago.tools.network :members: :undoc-members: :show-inheritance: -etrago\.tools\.utilities module -------------------------------- +etrago.tools.plot module +--------------------------- -.. automodule:: etrago.tools.utilities +.. automodule:: etrago.tools.plot :members: :undoc-members: :show-inheritance: +etrago.tools.utilities module +------------------------------- -Module contents ---------------- - -.. automodule:: etrago.tools +.. automodule:: etrago.tools.utilities :members: :undoc-members: :show-inheritance: diff --git a/doc/api/modules.rst b/doc/api/modules.rst deleted file mode 100644 index fda8d67db..000000000 --- a/doc/api/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -etrago -====== - -.. toctree:: - :maxdepth: 4 - - etrago diff --git a/doc/conf.py b/doc/conf.py index 313e191cb..05f1f4625 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -105,7 +105,7 @@ # General information about the project. project = u'eTraGo' -copyright = u'2015-2018, Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems' +copyright = u'2015-2023, Flensburg University of Applied Sciences, Europa-Universität Flensburg, Centre for Sustainable Energy Systems, DLR-Institute for Networked Energy Systems' author = u'ulfmueller, lukasol, wolfbunke, mariusves, s3pp' # The version info for the project you're documenting, acts as replacement for @@ -113,16 +113,16 @@ # built documents. # # The short X.Y version. -version = '0.6.1' +version = '0.9' # The full version, including alpha/beta/rc tags. -release = '0.6.1' +release = '0.9.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. -language = None +language = "English" # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: diff --git a/doc/developer_notes.rst b/doc/developer_notes.rst index 37a3bc4ff..a4a3d036a 100644 --- a/doc/developer_notes.rst +++ b/doc/developer_notes.rst @@ -15,7 +15,7 @@ Installation for Developers .. code-block:: bash - $ virtualenv --clear -p python3.7 etrago`` + $ virtualenv --clear -p python3.8 etrago`` $ cd etrago/ $ source bin/activate diff --git a/doc/eTraGo_tutorial_release0.9.ipynb b/doc/eTraGo_tutorial_release0.9.ipynb new file mode 100644 index 000000000..e7cdc9e67 --- /dev/null +++ b/doc/eTraGo_tutorial_release0.9.ipynb @@ -0,0 +1,961 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "__copyright__ = (\n", + " \"Flensburg University of Applied Sciences, \"\n", + " \"Europa-Universität Flensburg, Centre for Sustainable Energy Systems, \"\n", + " \"DLR-Institute for Networked Energy Systems\"\n", + ")\n", + "__license__ = \"GNU Affero General Public License Version 3 (AGPL-3.0)\"\n", + "__author__ = (\n", + " \"ulfmueller, lukasol, wolfbunke, mariusves, s3pp, ClaraBuettner, \"\n", + " \"CarlosEpia, KathiEsterl, fwitte, gnn, pieterhexen, AmeliaNadal\"\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\"HSF\"\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Introduction to eTraGo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Important links\n", + "\n", + "* __[eTraGo Source Code](https://github.com/openego/eTraGo)__\n", + "* __[eTraGo Documentation](http://etrago.readthedocs.io/)__\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Installation\n", + "The current eTraGo version as well as the python packages jupyterlab and contextily are required to use this notebook. Install these with\n", + "\n", + "`pip install eTraGo`\n", + "\n", + "`pip install jupyterlab contextily`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "## Import required general and eTraGo specific python packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%capture\n", + "# enable jupyter interactive plotting\n", + "%matplotlib widget\n", + "\n", + "# import Etrago API class\n", + "from etrago import Etrago\n", + "\n", + "# import plotting function\n", + "from etrago.tools.plot import plot_carrier\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define parameters to run eTraGo" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "args = {\n", + " # Setup:\n", + " \"db\": \"egon-data\", # database session\n", + " \"scn_name\": \"eGon2035\", # scenario: eGon2035, eGon100RE, eGon2035_lowflex, eGon100RE_lowflex\n", + " \"start_snapshot\": 1,\n", + " \"end_snapshot\": 15,\n", + " \"gridversion\": None, # Currently not needed\n", + " \"branch_capacity_factor\": {\"HV\": 0.5, \"eHV\": 0.7}, # p.u. branch rating\n", + " \"foreign_lines\": {\n", + " \"carrier\": \"AC\", # 'DC' for modeling foreign lines as links\n", + " \"capacity\": \"osmTGmod\", # 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer'\n", + " },\n", + " \"scn_extension\": None, # None or array of extension scenarios (currently not provided, but needed once new lines from NEP are set up)\n", + " \"scn_decommissioning\": None, # None or decommissioning scenario (currently not provided)\n", + " \n", + " # Optimisation and powerflow:\n", + " \"method\": { # Choose method and settings for optimization\n", + " \"type\": \"lopf\", # type of optimization, currently only 'lopf'\n", + " \"n_iter\": 1, # abort criterion of iterative optimization, 'n_iter' or 'threshold'\n", + " \"pyomo\": True, # set if pyomo is used for model building\n", + " },\n", + " \"solver\": \"gurobi\", # glpk, cplex or gurobi\n", + " \"solver_options\": {\n", + " \"BarConvTol\": 1.0e-5,\n", + " \"FeasibilityTol\": 1.0e-5,\n", + " \"method\": 2,\n", + " \"crossover\": 0,\n", + " \"logFile\": \"solver_etrago.log\",\n", + " \"threads\": 4,\n", + " },\n", + " \"model_formulation\": \"kirchhoff\", # formulation of the LPF problem (all are equivalent)\n", + " \"extendable\": {\n", + " \"extendable_components\": [\n", + " \"as_in_db\"\n", + " ], # Array of components to optimize\n", + " \"upper_bounds_grid\": { # Set upper bounds for grid expansion\n", + " # lines in Germany\n", + " \"grid_max_D\": None, # relative to existing capacity\n", + " \"grid_max_abs_D\": { # absolute capacity per voltage level\n", + " \"380\": {\"i\": 1020, \"wires\": 4, \"circuits\": 4},\n", + " \"220\": {\"i\": 1020, \"wires\": 4, \"circuits\": 4},\n", + " \"110\": {\"i\": 1020, \"wires\": 4, \"circuits\": 2},\n", + " \"dc\": 0,\n", + " },\n", + " # border crossing lines\n", + " \"grid_max_foreign\": 4, # relative to existing capacity\n", + " \"grid_max_abs_foreign\": None, # absolute capacity per voltage level\n", + " },\n", + " },\n", + " \"generator_noise\": 789456, # a small random noise to the marginal costs of each generator in order to prevent an optima plateau\n", + " \"extra_functionality\": {}, # Choose function name (e.g. \"min_renewable_share\" or \"cross_border_flow\") or {} \n", + " \"load_shedding\": False, # helpful when debugging - a very expensive generator is added to each bus \n", + " \"lpfile\": False, # save pyomo's lp file: False or /path/to/lpfile.lp\n", + " \"csv_export\": \"results\", # save results as csv: False or /path/tofolder\n", + " \"pf_post_lopf\": {\n", + " \"active\": True, # choose if a pf should be performed after the lopf\n", + " \"add_foreign_lopf\": True, # keep results of lopf for foreign DC-links\n", + " \"q_allocation\": \"p_nom\", # allocate reactive power via 'p_nom' or 'p'\n", + " },\n", + " \n", + " # Spatial complexity reduction and disaggregation:\n", + " \"network_clustering_ehv\": False, # clustering of HV buses to EHV buses\n", + " \"network_clustering\": {\n", + " \"active\": True, # choose if clustering is activated\n", + " \"method\": \"kmedoids-dijkstra\", # choose clustering method: kmeans or kmedoids-dijkstra\n", + " \"n_clusters_AC\": 60, # total number of resulting AC nodes (DE+foreign)\n", + " \"cluster_foreign_AC\": False, # take foreign AC buses into account, True or False\n", + " \"method_gas\": \"kmedoids-dijkstra\", # choose clustering method: kmeans or kmedoids-dijkstra\n", + " \"n_clusters_gas\": 17, # total number of resulting CH4 nodes (DE+foreign)\n", + " \"cluster_foreign_gas\": False, # take foreign CH4 buses into account, True or False\n", + " \"k_elec_busmap\": False, # False or path/to/busmap.csv\n", + " \"k_gas_busmap\": False, # False or path/to/ch4_busmap.csv\n", + " \"bus_weight_tocsv\": None, # None or path/to/bus_weight.csv\n", + " \"bus_weight_fromcsv\": None, # None or path/to/bus_weight.csv\n", + " \"gas_weight_tocsv\": None, # None or path/to/gas_bus_weight.csv\n", + " \"gas_weight_fromcsv\": None, # None or path/to/gas_bus_weight.csv\n", + " \"line_length_factor\": 1, # Factor to multiply distance between new buses for new line lengths\n", + " \"remove_stubs\": False, # remove stubs bevore kmeans clustering\n", + " \"use_reduced_coordinates\": False, # If True, do not average cluster coordinates\n", + " \"random_state\": 42, # random state for replicability of clustering results\n", + " \"n_init\": 10, # affects clustering algorithm, only change when neccesary\n", + " \"max_iter\": 100, # affects clustering algorithm, only change when neccesary\n", + " \"tol\": 1e-6, # affects clustering algorithm, only change when neccesary\n", + " \"CPU_cores\": 4, # number of cores used during clustering, \"max\" for all cores available.\n", + " },\n", + " \"sector_coupled_clustering\": {\n", + " \"active\": True, # choose if clustering is activated\n", + " \"carrier_data\": { # select carriers affected by sector coupling\n", + " \"central_heat\": {\n", + " \"base\": [\"CH4\", \"AC\"],\n", + " \"strategy\": \"simultaneous\", # select strategy to cluster other sectors\n", + " },\n", + " },\n", + " },\n", + " \"disaggregation\": None, # None or 'uniform'\n", + " \n", + " # Temporal complexity reduction and disaggregation:\n", + " \"snapshot_clustering\": {\n", + " \"active\": False, # choose if clustering is activated\n", + " \"method\": \"segmentation\", # 'typical_periods' or 'segmentation'\n", + " \"extreme_periods\": None, # consideration of extreme timesteps; e.g. 'append'\n", + " \"how\": \"daily\", # type of period - only relevant for 'typical_periods'\n", + " \"storage_constraints\": \"soc_constraints\", # additional constraints for storages - only relevant for 'typical_periods'\n", + " \"n_clusters\": 5, # number of periods - only relevant for 'typical_periods'\n", + " \"n_segments\": 5, # number of segments - only relevant for segmentation\n", + " },\n", + " \"skip_snapshots\": 5, # False or number of snapshots to skip\n", + " \"temporal_disaggregation\": {\n", + " \"active\": False, # choose if temporally full complex dispatch optimization should be conducted\n", + " \"no_slices\": 8, # number of subproblems optimization is divided into\n", + " },\n", + "\n", + " # Other\n", + " \"comments\": None,\n", + "}" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Import and export of the network and data structure\n", + "\n", + "The network can either be imported from a local database or from an online repository.\n", + "\n", + "Follow the instructions [here](https://github.com/openego/eTraGo/tree/features/release-0.9.0#input-data) to get the data-base." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago = Etrago(args, json_path=None)\n", + "etrago.build_network_from_db()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After importing the network from the database, call `adjust_network` to adjust the network imported from the database according to given input-parameters, e.g. add load shedding, set generator noise, set foreign lines to links." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "etrago.adjust_network()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Etrago uses pypsa's data structure:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# the pypsa network is stored in:\n", + "etrago.network" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.buses.head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.buses.carrier.value_counts()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"AC\", \"DC\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"CH4\"], carrier_buses=[\"CH4\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To export and import an Etrago network to csv files, you can do the following:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "path_export = \"etrago_network\"\n", + "\n", + "# export\n", + "etrago.export_to_csv(path_export)\n", + "\n", + "# import\n", + "path_import = \"etrago_network\"\n", + "etrago_import = Etrago(csv_folder_name=path_import)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "## Spatial clustering" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following arguments define the settings for the spatial clustering:\n", + "\n", + "```\n", + "args = { \n", + " # Spatial complexity reduction and disaggregation:\n", + " \"network_clustering_ehv\": False, # clustering of HV buses to EHV buses\n", + " \"network_clustering\": {\n", + " \"active\": True, # choose if clustering is activated\n", + " \"method\": \"kmedoids-dijkstra\", # choose clustering method: kmeans or kmedoids-dijkstra\n", + " \"n_clusters_AC\": 60, # total number of resulting AC nodes (DE+foreign)\n", + " \"cluster_foreign_AC\": False, # take foreign AC buses into account, True or False\n", + " \"exclusion_area\": [\"Cuxhaven\", \"Bremerhaven\", \"Wesermarsch\", \"Osterholz\", \"Bremen\"], # False, path to shapefile or list of nuts names of not cluster area\n", + " \"method_gas\": \"kmedoids-dijkstra\", # choose clustering method: kmeans or kmedoids-dijkstra\n", + " \"n_clusters_gas\": 17, # total number of resulting CH4 nodes (DE+foreign)\n", + " \"cluster_foreign_gas\": False, # take foreign CH4 buses into account, True or False\n", + " \"k_elec_busmap\": False, # False or path/to/busmap.csv\n", + " \"k_gas_busmap\": False, # False or path/to/ch4_busmap.csv\n", + " \"bus_weight_tocsv\": None, # None or path/to/bus_weight.csv\n", + " \"bus_weight_fromcsv\": None, # None or path/to/bus_weight.csv\n", + " \"gas_weight_tocsv\": None, # None or path/to/gas_bus_weight.csv\n", + " \"gas_weight_fromcsv\": None, # None or path/to/gas_bus_weight.csv\n", + " \"line_length_factor\": 1, # Factor to multiply distance between new buses for new line lengths\n", + " \"remove_stubs\": False, # remove stubs bevore kmeans clustering\n", + " \"use_reduced_coordinates\": False, # If True, do not average cluster coordinates\n", + " \"random_state\": 42, # random state for replicability of clustering results\n", + " \"n_init\": 10, # affects clustering algorithm, only change when neccesary\n", + " \"max_iter\": 100, # affects clustering algorithm, only change when neccesary\n", + " \"tol\": 1e-6, # affects clustering algorithm, only change when neccesary\n", + " \"CPU_cores\": 8, # number of cores used during clustering, \"max\" for all cores available.\n", + " },\n", + " \"sector_coupled_clustering\": {\n", + " \"active\": True, # choose if clustering is activated\n", + " \"carrier_data\": { # select carriers affected by sector coupling\n", + " \"central_heat\": {\n", + " \"base\": [\"CH4\", \"AC\"],\n", + " \"strategy\": \"simultaneous\", # select strategy to cluster other sectors\n", + " },\n", + " },\n", + " },\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### EHV clustering" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.args[\"network_clustering_ehv\"] = True\n", + "etrago.ehv_clustering()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"AC\", \"DC\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Network clustering" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run clustering of electrical network:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "%%capture\n", + "etrago.spatial_clustering()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"AC\", \"DC\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_clusters()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"CH4\"], carrier_buses=[\"CH4\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Which bus in the original network corresponds to which bus in the clustered network as well as the original network is stored in `etrago.busmap`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# A copy of the main element of the network is stored in:\n", + "etrago.busmap[\"orig_network\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "pd.Series(etrago.busmap[\"busmap\"]).head()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Run clustering of the gas network and attached technologies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "%%capture\n", + "etrago.spatial_clustering_gas()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"CH4\"], carrier_buses=[\"CH4\"])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_clusters(carrier=\"CH4\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.plot_carrier(carrier_links=[\"central_resistive_heater\", \"central_heat_pump\"], carrier_buses=[\"AC\", \"central_heat\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Reduce temporal complexity" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Implemented are:\n", + "\n", + "**Downsampling**\n", + "\n", + "* time-based method\n", + "* groups of consecutive time steps are represented by one time step\n", + "* for each group, one time step is assumed to be representative\n", + "* this representative time step is weighted according to the number of time steps in its group\n", + "\n", + "**Segmentation**\n", + "\n", + "* property-based determination of representative time steps\n", + "* time steps are divided into a certain number of clusters so that similar time steps belong to the same clusters\n", + "* clusters can have different sizes, i.e. represent segments of different length\n", + "* only consecutive time steps are placed in the same clusters\n", + "* for each cluster, a representative time step is defined and weithed based on the number of assigned time steps\n", + "\n", + "**Typical periods**\n", + "\n", + "* typical periods are identified based on time-dependent attributes\n", + "* first, the original time series is divided into time periods of equal length\n", + "* then, the time periods are clustered and representative time periods are selected, which are called typical periods\n", + "* to model storage behavior correctly, additional constraints are required\n", + "\n", + "In case of 'typical periods' and 'segmentation' all load p_set time series as wenn as all renewables p_max_pu time series are used to determine clusters.\n", + "\n", + "The following arguments define the settings for the temporal complexity reduction:\n", + "\n", + "```\n", + "args = { \n", + " # Temporal complexity reduction and disaggregation:\n", + " \"snapshot_clustering\": {\n", + " \"active\": False, # choose if clustering is activated\n", + " \"method\": \"segmentation\", # 'typical_periods' or 'segmentation'\n", + " \"extreme_periods\": None, # consideration of extreme timesteps; e.g. 'append'\n", + " \"how\": \"daily\", # type of period - only relevant for 'typical_periods'\n", + " \"storage_constraints\": \"soc_constraints\", # additional constraints for storages - only relevant for 'typical_periods'\n", + " \"n_clusters\": 5, # number of periods - only relevant for 'typical_periods'\n", + " \"n_segments\": 5, # number of segments - only relevant for 'segmentation'\n", + " },\n", + " \"skip_snapshots\": 5, # Downsampling: False or number of snapshots to skip\n", + " \"temporal_disaggregation\": {\n", + " \"active\": False, # choose if temporally full complex dispatch optimization should be conducted\n", + " \"no_slices\": 8, # number of subproblems optimization is divided into\n", + " },\n", + "}\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# 'typical_periods' and 'segmentation' are called by the following function\n", + "#etrago.snapshot_clustering()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.loads_t.p_set.sum(axis=1).to_frame(\"load_p_set\").plot(figsize=(8, 3))\n", + "plt.tight_layout()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.network.snapshots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# run downsampling\n", + "etrago.skip_snapshots()\n", + "etrago.network.snapshots" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.loads_t.p_set.sum(axis=1).to_frame(\"load_p_set\").plot(figsize=(8, 3))\n", + "plt.tight_layout()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Weight of each snapshot is given in:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.snapshot_weightings" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run linear optimal power flow" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following arguments define the settings for the optimisation:\n", + "\n", + "```\n", + "args = {\n", + " \"method\": { # Choose method and settings for optimization\n", + " \"type\": \"lopf\", # type of optimization, currently only 'lopf'\n", + " \"n_iter\": 1, # abort criterion of iterative optimization, 'n_iter' or 'threshold'\n", + " \"pyomo\": True, # set if pyomo is used for model building\n", + " },\n", + " \"solver\": \"glpk\", # glpk, cplex or gurobi\n", + " \"solver_options\": {},\n", + " \"model_formulation\": \"kirchhoff\", # formulation of the LPF problem (all are equivalent)\n", + " \"extendable\": {\n", + " \"extendable_components\": [\n", + " \"as_in_db\"\n", + " ], # Array of components to optimize\n", + " \"upper_bounds_grid\": { # Set upper bounds for grid expansion\n", + " # lines in Germany\n", + " \"grid_max_D\": None, # relative to existing capacity\n", + " \"grid_max_abs_D\": { # absolute capacity per voltage level\n", + " \"380\": {\"i\": 1020, \"wires\": 4, \"circuits\": 4},\n", + " \"220\": {\"i\": 1020, \"wires\": 4, \"circuits\": 4},\n", + " \"110\": {\"i\": 1020, \"wires\": 4, \"circuits\": 2},\n", + " \"dc\": 0,\n", + " },\n", + " # border crossing lines\n", + " \"grid_max_foreign\": 4, # relative to existing capacity\n", + " \"grid_max_abs_foreign\": None, # absolute capacity per voltage level\n", + " },\n", + " },\n", + " \"generator_noise\": 789456, # a small random noise to the marginal costs of each generator in order to prevent an optima plateau\n", + " \"extra_functionality\": {}, # Choose function name (e.g. \"min_renewable_share\" or \"cross_border_flow\") or {} \n", + " \"load_shedding\": False, # helpful when debugging - a very expensive generator is added to each bus \n", + " \"lpfile\": False, # save pyomo's lp file: False or /path/to/lpfile.lp\n", + " \"csv_export\": \"results\", # save results as csv: False or /path/tofolder\n", + " \"pf_post_lopf\": {\n", + " \"active\": True, # choose if a pf should be performed after the lopf\n", + " \"add_foreign_lopf\": True, # keep results of lopf for foreign DC-links\n", + " \"q_allocation\": \"p_nom\", # allocate reactive power via 'p_nom' or 'p'\n", + " },\n", + "}\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "#path = \"before_lopf\"\n", + "#etrago.export_to_csv(path)\n", + "#etrago = Etrago(csv_folder_name=path)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Extendable storage units:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.storage_units[etrago.network.storage_units.p_nom_extendable].loc[\n", + " :, [\"p_nom\", \"p_nom_min\", \"p_nom_max\", \"p_nom_extendable\", \"carrier\", \"marginal_cost\", \"capital_cost\"]].head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.storage_units[etrago.network.storage_units.p_nom_extendable].carrier.unique()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.storage_units.carrier.unique()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Extendable stores:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.stores[etrago.network.stores.e_nom_extendable].loc[\n", + " :, [\"e_nom\", \"e_nom_min\", \"e_nom_max\", \"e_nom_extendable\", \"carrier\", \"marginal_cost\", \"capital_cost\"]].head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.stores[etrago.network.stores.e_nom_extendable].carrier.unique()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.stores.carrier.unique()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Extendable lines:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.network.lines[etrago.network.lines.s_nom_extendable].loc[\n", + " :, [\"s_nom\", \"s_nom_min\", \"s_nom_max\", \"s_nom_extendable\", \"carrier\", \"v_nom\", \"capital_cost\", \"country\"]].head()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "etrago.lopf()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "After the optimisation you can run the following to:\n", + "\n", + "* conduct LOPF with full complex time series for dispatch disaggregation\n", + "\n", + "```\n", + " etrago.dispatch_disaggregation()\n", + "```\n", + "\n", + "* run power flow to obtain reactive power flows over lines\n", + "\n", + "```\n", + " etrago.pf_post_lopf()\n", + "```\n", + "\n", + "* conduct spatial disaggregation of clustered results\n", + "\n", + "```\n", + " etrago.disaggregation()\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.plot_grid(line_colors=\"expansion_abs\", bus_colors=\"storage_expansion\", bus_sizes= 0.000001)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "etrago.calc_results()\n", + "etrago.results" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from etrago.tools.plot import curtailment, nodal_gen_dispatch, flexibility_usage" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "nodal_gen_dispatch(etrago.network)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "curtailment(etrago.network, carrier=\"wind_onshore\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "flexibility_usage(etrago, \"heat\")" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.11" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/doc/howToUse.rst b/doc/howToUse.rst index c1f782a4b..20f97f2c3 100644 --- a/doc/howToUse.rst +++ b/doc/howToUse.rst @@ -5,7 +5,7 @@ How to use eTraGo? After you installed eTraGo you would typically start optimization runs by executing the ‘appl.py’ which is situated in -``./eTrago/etrago/`` (e.g by ``python3 appl.py``). +``./eTrago/etrago/`` (e.g by ``python3 appl.py`` from the terminal). eTraGo doesn't have a graphical user interface, the ‘appl.py’ is used as a simple user interface which can be edited with @@ -13,7 +13,7 @@ the preferred python-editor. Here parameters, calculation methods and scenario settings are set in a python dictionary called 'args'. To run the desired calculation, it is crucial to understand these parameters. -In addition, some of them contradict the usage of others. +In addition, some of them contradict the usage of others. You find the documentation of all defined parameters from the 'args' here: :func:`etrago.appl.run_etrago`. @@ -35,7 +35,7 @@ For more specific or extensive changes you are highly invited to write code and add new functionalities. Once the calculation has finished the PyPSA network of the Etrago-object will -contain all results. Som main results (e.g. anuual system costs) are calculated +contain all results. Some main results (e.g. anuual system costs) are calculated by :meth:`etrago.calc_results` and can be accesed via 'etrago.results'. You can use several plotting functions from the :meth:`etrago.tools.plot` in order to visualize the results. For example @@ -52,7 +52,8 @@ Examples and tutorial notebooks -.. toctree:: - :maxdepth: 7 +**eTraGo version 0.5.1:** +`etrago_OpenMod_Zuerich18 `_. - OpenMod +**eTraGo version 0.9:** +`eTraGo_eGon_final_workshop `_. diff --git a/doc/images/eTraGo_model.png b/doc/images/eTraGo_model.png deleted file mode 100644 index 1be7f3a97..000000000 Binary files a/doc/images/eTraGo_model.png and /dev/null differ diff --git a/doc/images/ego_tools.svg b/doc/images/ego_tools.svg new file mode 100644 index 000000000..37c6edb3b --- /dev/null +++ b/doc/images/ego_tools.svg @@ -0,0 +1,679 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/doc/images/modelling_concept.png b/doc/images/modelling_concept.png new file mode 100644 index 000000000..d0f756e8b Binary files /dev/null and b/doc/images/modelling_concept.png differ diff --git a/doc/installation.rst b/doc/installation.rst index 75128c3ca..f10ef9895 100644 --- a/doc/installation.rst +++ b/doc/installation.rst @@ -20,7 +20,7 @@ you create a virtual environment (where you like it) and activate it: .. code-block:: bash - $ virtualenv venv --clear -p python3.7 + $ virtualenv venv --clear -p python3.8 $ source venv/bin/activate $ cd venv @@ -30,7 +30,7 @@ install eTraGo with the pip command, as previously explained. Linux and Ubuntu ================ -The Package eTraGo is tested with Ubuntu 16.04, 18.04 and 20.04 inside the virtual +The Package eTraGo is tested with Ubuntu 16.04, 18.04, 20.04 and 22.04 inside the virtual environments of `virtualenv `_. The installation is shown above. @@ -40,14 +40,15 @@ Windows or Mac OSX users ======================== For Windows and/or Mac OSX user we highly recommend to install and use Anaconda -for you Python3 installation. First install anaconda including python 3.5 or +for your Python3 installation. First install Conda including python 3.8 or higher version from https://www.anaconda.com/download/ and open an anaconda prompt as administrator and run: .. code-block:: bash $ conda config --add channels conda-forge - $ conda install shapely + $ conda create -n etrago_env python=3.8 + $ conda activate etrago_env $ pip install eTraGo @@ -62,9 +63,8 @@ environments. Setup database connection ========================= -The package `ego.io `_ will be installed -automatically when eTraGo is installed. The ``egoio`` -gives you python SQL-Alchemy representations of +The eTraGo module `db `_ +gives you a python SQL-Alchemy representations of the `OpenEnergy-Database(oedb) `_ and access to it by using the `oedialect `_, which is a SQL-Alchemy binding @@ -73,7 +73,7 @@ Python package for the REST-API used by the OpenEnergy Platform (OEP). In order to connect eTraGo via the oedialect with the oedb you have to create an account at `openenergy-platform.org/login `_. -You can name the `'db' `_ +You can name the `'db' `_ argument of the 'args' of the :func:`etrago.appl.etrago` as you wish. Once the :func:`etrago.appl.etrago` is executed you will be asked to enter how you want to connect to which database. If you want to use @@ -81,7 +81,7 @@ the oedialect enter the following connection parameter. For and you have to take your credentials which you obtained by registering at `openenergy-platform.org/login `_. -Your API access / login data will be saved in the folder ``.egoio`` in the file +Your API access / login data will be saved in the folder ``.etrago_database`` in the file ``config.ini``. Consequently, in the config.ini you can also change your connection parameters or add new ones. In the following you can see how the config.ini looks like when you use the @@ -89,7 +89,7 @@ oedialect, a local postgresql database or the old psycopg2 developer connection. Once you have created a connection (which is saved in the config.ini) you do not have to enter the connection parameter again. The software will take the connection parameter -which corresponds to the entry at the `'db' `_ argument. +which corresponds to the entry at the `'db' `_ argument. oedialect connection diff --git a/doc/theoretical_background.rst b/doc/theoretical_background.rst index 73e4d96ce..62c8e2d9e 100644 --- a/doc/theoretical_background.rst +++ b/doc/theoretical_background.rst @@ -7,158 +7,112 @@ Theoretical Background Definitions and Units ===================== -eTraGo executes the Open Source software PyPSA to perform power flow -simulations and uses its definitions and -`units `_. - +eTraGo is based on the open source tool `PyPSA `_ and uses its definitions and units. Assumptions on Data =================== -eTraGo fetches its necessary input data from the OpenEnergy Platform including -load, generation, grid and scenario-related data. More details can be found in -the `Data-Processing `_. +eTraGo fetches the input data from the `OpenEnergy Platform `_. The data includes electricity and gas grid topology as well as data on energy supply and load for the considered sectors (electricity, gas, heat and e-mobility) plus data on flexibility potential deriving from those sectors e.g. Dynamic Line Rating, Demand Side Management and flexibility potentials arising from e-mobility. More details on the data model can be found in the documentaton of `eGon-data `_. + +At the moment, there are two scenarios available basing on scenario C2035 of the network expansion plan ([NEP]_), version 2021. The base one is called eGon2035. To analyse the effect of flexibility options, there is an eGon2035_lowflex scenario available which depicts a lower penetration of flexibilities. More scenarios are being developed. The eGon100RE scenario is being implemented which is characterised by a 100% renewable generation. Analog to the scenario above, a eGon100RE_lowflex scenario will be available. + +You can see the modeling concepts of the scenarios in the figure below. The components marked green have exogenous capacity and endogenous dispatch whereas the components marked in red are optimised endogenously in capacity and dispatch. -As overview, the Open Source grid structure is developed by processing data -from `OpenStreetMap (OSM) `_ to obtain -geo-referenced locations of substations and links equal or above the 110 kV -voltage level. OSM also provides information about residential, retail, -industrial and agricultural areas which is used with standardized profiles to -obtain load data. Generation data of solar and wind rely on weather data from -[coastdat-2]_. Both, load and generation data, match the annual amount for the -year 2011. eTraGo enables the investigation of three scenarios - Status Quo, -NEP 2035 and eGo100. Status Quo corresponds to the actual grid, NEP2035 -follows assumptions for the year 2035 by [NEP2015]_ and eGo100 assumes to -operate the future energy system completely by renewables [ehighway2050]_. +.. figure:: images/modelling_concept.png + :align: center + :scale: 75% Methods -=========== - -PyPSA ------ -The power flow simulations are performed by the Open Source tool -`PyPSA `_ with a linear approximation for the -optimization of power flows in general. Expecting that eTraGo fulfills the -assumptions to perfom a LOPF (small voltage angle differences, branch -resistances negligible to their reactances, voltage magnitudes can be kept at -nominal values) since it focuses on the extra-high and high voltage levels. As -objective value of the optimization, the overall system costs are considered. - - -Clustering approaches ---------------- - -EHV-Clustering -^^^^^^^^^^^^^^ - -This method maps an input network to an output network with the nodes of the -extra-high voltage level. All nodes with a voltage level below the extra-high -voltage level are mapped to their nearest neighboring node in the extra-high -voltage level with the -`dijkstra algorithm `_ -(110 kV ---> 220,380 kV). - -K-Means Clustering -^^^^^^^^^^^^^^^^^^ - -This `method `_ maps an input -network to a new output network with an adjustable number of nodes and new -coordinates. The algorithm sets these coordinates randomly and minimizes a -certain parameter like for example the distances between old coordinates and -their nearest neighbor in the set of new coordinates. The method was -implemented by `Hoersch et al. `_ within -PyPSA. - -Snapshot skipping -^^^^^^^^^^^^^^^^^ -This method simplifies the simulation temporally by considering every n-th -snapshot of a given time series. The regarded snapshots are weighted by the -number of neglected snapshots to ensure a comparable calculation of costs. -This method assumes the chosen snapshots to be represenative for the next n-th -snapshots in the time series. - -Snapshot-Clustering -^^^^^^^^^^^^^^^^^^^ -This method aggregate given time series for various time intervals like i.e. -days using the `tsam `_ package. Contrary to -snapshot skipping, this approach averages a certain period of snapshots -instead of choosing a representative snapshot. - - -Storage expansion ------------------ -To evaluate the amount of storage units in future energy systems, the possible -installation of new storage units at every node in the network is allowed. The -size and operation of these storages are part of the optimization problem. - -Two types of storage technologies are considered - batteries and hydrogen in -underground caverns. Li-Ion battery storages as representatives for short-term -(several hours) technologies, which can be installed at every node. -Underground hydrogen storages represent long-term or seasonal (weeks) -technologies and can be build at every node with appropriate salt formations -in the underground. The storage parameters for both types are reached by -[Acatech2015]_, the information about salt formations are given by [BGR]_. - - -Grid expansion --------------- -The grid expansion is realized by extending the capacities of existing -lines and substations. These capacities are regarded as part of the -optimization problem, whereby the possible extension is unlimited. With respect -to the different voltage levels and lengths MVA-specific costs are considered -in the linear optimization of the power flow. Besides, several planned grid -expansion scenarios from the German grid development plan can be considered as -possible additional power lines by using the 'scn_extension' argument. +======= + + +Optimisation with PyPSA +----------------------- + +Within eTraGo, the fetched data model is translated into a `PyPSA `_-network. The optimisation is performed with a linear approximation assuming eTraGo to fulfill the assumptions to perfom a LOPF (as those are small voltage angle differences, branch resistances negligible to their reactances, voltage magnitudes can be kept at nominal values) since it focuses on the extra-high and high voltage levels. As objective value of the optimisation, the overall system costs are considered. + +With the argument ‘pf_post_lopf’, after the LOPF a non-linear power flow simulation can be conducted. + + +Complexity Reduction +--------------------- + +The data model is characterised by a high spatial (about 8,000 electrical and 600 gas nodes) and temporal resolution (8,760 timesteps). To reduce the complexity of the resulting optimisation problem, several methods can be applied. + + +Reduction in spatial dimension: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The **ehv clustering** maps all electrical nodes with a voltage level below the extra-high voltage level to their nearest neighboring node in the extra-high voltage level with the Dijkstra’s algorithm (110 kV —> 220 / 380 kV). + +The **k-means Clustering** reduces the electrical or gas network to an adjustable number of nodes by considering the geographical position of the respective nodes. This method has been implemented within PyPSA by [Hoersch]_. + +The **k-medoids Dijkstra Clustering** aggregates nodes considering the network topology. First, a k-medoids Clustering is used dividing the original nodes of the network into groups by their geographical positions while identifiying the geographical medoid nodes per cluster. Afterwards, the original nodes in the original network are assigned to the former identified medoids considering the original network’s topology applying a Dijkstra’s algorithm considering the line lengths. Afterall, the original nodes are represented by one aggregated node per cluster at the position of the former identified medoid node. + +In general, the clustering of the **sector-coupled system** is divided into two steps: +First, the electrical and gas grid are clustered independently using one of the methods described above. Afterwards, nodes of the other sectors (hydrogen, heat, e-mobility and DSM nodes) are mapped according to their connection to electricity or gas buses and aggregated to one node per carrier. + +After optimising the spatially reduced network, a **spatial disaggregation** can be conducted. + + +Reduction in temporal dimension: +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The method **Skip Snapshots** implies a downsampling to every nth time step. The considered snapshots are weighted respectively to account for the analysis of one whole year. + +By using the method called **Segmentation**, a hierarchical clustering of consecutive timesteps to segments with variable lengths is applied [Pineda]_. + +The **Snapshot Clustering on Typical Periods** implies a hierarchical clustering of time periods with a predefined length (e.g. days or weeks) to typical periods. Those typical periods are weighted according to the number of periods in their cluster. This method optionally includes the linkage of the typical periods in a second time layer to account for the intertemporal dependencies following [Kotzur]_. + +By applying a 2-level-approach, a **temporal disaggregation** can be conducted. This means optimising dispatch using the fullcomplex time series in the second step after having optimised grid and storage expansion using the complexity-reduced time series in the first step. + + +Grid and Storage / Store expansion +----------------------------------- + +The grid expansion is realized by extending the capacities of existing lines and substations. These capacities are considered as part of the optimisation problem whereby the possible extension is unlimited. With respect to the different voltage levels and lengths, MVA-specific costs are considered in the optimisation. + +As shown in the figure above, several options to store energy are part of the modeling concept. Extendable batteries (modeled as storage units) are assigned to every node in the electrical grid. A minimum installed capacity is being considered to account for home batteries ([NEP]_). The expansion and operation is part of the optimisation. Furthermore, two types of hydrogen stores (modeled as stores) are available. Overground stores are optimised in operation and dispatch without limitations whereas underground stores depicting saltcaverns are limited by geographical conditions ([BGR]_). Additionally, heat stores part of the optimisation in terms of power and energy without upper limits. Miscellaneous Features --------- -Several features were developed to enhance the functionality of eTraGo. As -appropriate computer setting, the 'solver_options' and a 'generator_noise' are -possible arguments. The latter adds a reproducible small random noise to the -marginal costs of each generator in order to prevent an optima plateau. The -specific solver options depend on the applied solver like for example Gurobi, -CPLEX or GLPK. Considering reproducibility, the 'load_cluster' argument -enables to load a former calculated clustered network. Besides, -'line_grouping' provides a grouping of lines which connect the same buses. -The 'branch_capacity_factor' adds a factor to adapt all line capacities in order -to consider (n-1) security. Because the average number of HV systems is much -smaller than the one of eHV lines, you can choose factors for 'HV' and 'eHV'. -The 'load_shedding' argument is used for debugging complex grids in order to avoid -infeasibilities. It introduces a very expensive generator at each bus to meet -the demand. When optimizing storage units and grid expansion without limiting -constraints, the need for load shedding should not be existent. The -'minimize_loading' argument forces to minimize the loading of the lines next -to the costs. 'Parallelization' provides the opportunity to devide the -optimization problem into a given number of sub-problems. For a group of -snapshots the problem will be solved separately. This functionality can -only be used for problems which do not have dependencies from one snapshot -to another. Therefore this option can not be used -with the optimization of storage units due to their state of charge. +---------------------- +Several features were developed to enhance the functionality of eTraGo. -References -========== +To customize computation settings, ‘solver_options’ and ‘generator_noise’ should be adapted. The latter adds a reproducible small random noise to the marginal costs of each generator in order to prevent an optima plateau. The specific solver options depend on the applied solver (e.g. Gurobi, CPLEX or GLPK). + +In ‚extendable‘ you can adapt the type of components you want to be optimised in capacity and set upper limits for gird expansion inside Germany and of lines to foreign countries. -.. [NEP2015] Übertragungsnetzbetreiber Deutschland. (2015).: - *Netzentwicklungsplan Strom 2025*, Version 2015, 1. Entwurf, 2015. - (``_) +The ‚extra_functionality‘-argument allows to consider extra constraints like limits for energy imort and export or minimal renewable shares in generation. -.. [coastdat-2] coastDat-2 (2017).: - Hindcast model ``_ +‘branch_capacity_factor’ adds a factor to adapt all line capacities in order to consider (n-1) security. Because the average number of HV systems is much smaller than the one of eHV lines, you can choose factors for ‘HV’ and ‘eHV’ separately. -.. [ehighway2050] e-Highway2050. (2015).: - e-HIGHWAY 2050 Modular Development Plan of the Pan-European Transmission - System 2050 - database per country. Retrieved from - (``_) +The ‘load_shedding’-argument is used for debugging complex grids in order to avoid infeasibilities. It introduces a very expensive generator at each bus to meet the demand. When optimising storage units and grid expansion without limiting constraints, the need for load shedding should not be existent. -.. [Acatech2015] 'Flexibilitätskonzepte für die Stromversorgung 2050 - ``_' +With ‘foreign_lines‘ you can adapt the foreign lines to be modeled as DC-links (e.g. to avoid loop flows). + + +References +========== -.. [BGR] 'Salzstruktur in Norddeutschland <>'_. 2015.: - Data provided by the Federal Institute for Geosciences and Natural - Resources (Bundesanstalt für Geowissenschaften und Rohstoffe, BGR) +.. [NEP] Übertragungsnetzbetreiber Deutschland (2021): + *Netzentwicklungsplan Strom 2035*, Version 2021, 1. Entwurf. 2021. + +.. [Hoersch] Jonas Hoersch et al. (2017): + *The role of spatial scale in joint optimisations of generation and transmission for European highly renewable scenarios*. 2017. + ``_ + +.. [Pineda] Salvador Pineda et al. (2018): + *Chronological Time-Period Clustering for Optimal Capacity Expansion Planning With Storage*. 2018. + ``_ + +.. [Kotzur] Leander Kotzur et al. (2018): + *Time series aggregation for energy system design: Modeling seasonal storage*. 2018. + ``_ + +.. [BGR] Bundesanstalt fuer Geowissenschaften und Rohstoffe et al. (2020): + *nSpEE-DS - Teilprojekt Bewertungskriterien und Potenzialabschätzung*. 2020. + ``_ diff --git a/doc/whatsnew.rst b/doc/whatsnew.rst index c7894943c..87ed81be6 100644 --- a/doc/whatsnew.rst +++ b/doc/whatsnew.rst @@ -8,6 +8,7 @@ These are new features and improvements of note in each release. :local: :backlinks: top +.. include:: whatsnew/v0_9_0.rst .. include:: whatsnew/v0_8_0.rst .. include:: whatsnew/v0_7_2.rst .. include:: whatsnew/v0_7_1.rst diff --git a/doc/whatsnew/v0_5_1.rst b/doc/whatsnew/v0_5_1.rst index ac1f83a9f..ad37004f1 100644 --- a/doc/whatsnew/v0_5_1.rst +++ b/doc/whatsnew/v0_5_1.rst @@ -1,5 +1,5 @@ Release 0.5.1 (February 01, 2018) -++++++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo works with ego.io 0.3.0 diff --git a/doc/whatsnew/v0_6_1.rst b/doc/whatsnew/v0_6_1.rst index 5b9ec69d5..630828a3b 100644 --- a/doc/whatsnew/v0_6_1.rst +++ b/doc/whatsnew/v0_6_1.rst @@ -1,5 +1,5 @@ Release 0.6.1 (Juli 18, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo works with pypi and is suitable for eGo 0.2.0 Added features diff --git a/doc/whatsnew/v0_7_0.rst b/doc/whatsnew/v0_7_0.rst index e690bd5e2..abedaaf9d 100644 --- a/doc/whatsnew/v0_7_0.rst +++ b/doc/whatsnew/v0_7_0.rst @@ -1,9 +1,9 @@ Release 0.7.0 (September 6, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++++ eTraGo is able to produce feasible non-linear power flows based on optimization results and allows the disaggregation of clustered results to original spatial complexities. Added features --------------- +--------------- * The pf_post_lopf function was improved. Due to changes in the data set now the non-linear power flow (pf) creates feasible solutions. If network optimization is turned on, a second lopf which regards the updated reactances and optimizes only dispatch is performed before the pf is executed. * The disaggregation method was included. When using a network clustering method to reduce the spatial complexity of the given network, a disaggregation method can be used afterwards to distribute the nodal results (generation and storage timeseries) to the original complexity. The method 'disaggregation': 'uniform' can be used as an interface functionality for distribution grid planning tools like eDisGo. diff --git a/doc/whatsnew/v0_7_1.rst b/doc/whatsnew/v0_7_1.rst index 53eebb24c..d7e692677 100644 --- a/doc/whatsnew/v0_7_1.rst +++ b/doc/whatsnew/v0_7_1.rst @@ -1,5 +1,5 @@ Release 0.7.1 (October 25, 2018) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++++ A minor release adding new options for additional constraints, modelling assumptions and plotting. Added features diff --git a/doc/whatsnew/v0_7_2.rst b/doc/whatsnew/v0_7_2.rst index 58ee28ca6..c972f76bc 100644 --- a/doc/whatsnew/v0_7_2.rst +++ b/doc/whatsnew/v0_7_2.rst @@ -1,5 +1,5 @@ Release 0.7.2 (Juni 15, 2020) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ A minor release adding the following features. Added features diff --git a/doc/whatsnew/v0_8_0.rst b/doc/whatsnew/v0_8_0.rst index 420ce9e08..b4f28df3e 100644 --- a/doc/whatsnew/v0_8_0.rst +++ b/doc/whatsnew/v0_8_0.rst @@ -1,5 +1,5 @@ Release 0.8.0 (April 8, 2021) -++++++++++++++++++++++++++++ +++++++++++++++++++++++++++++++ eTraGo has now a more object-oriented programming design. Added features diff --git a/doc/whatsnew/v0_9_0.rst b/doc/whatsnew/v0_9_0.rst new file mode 100644 index 000000000..469187656 --- /dev/null +++ b/doc/whatsnew/v0_9_0.rst @@ -0,0 +1,28 @@ +Release 0.9.0 (November 21, 2023) +++++++++++++++++++++++++++++ + +Added features +-------------- + +* eTraGo is now compatible with Python 3.8 +* eTraGo can now import and optimize networks that include other energy sectors such as gas, heating and mobility +* Various flexibility options from different energy sectors can be considered in the optimization: +- Weather dependent capacity of transmission lines (Dynamic Line Rating) +- Demand Side Management +- Flexible charging of electric vehicles +- Heat and hydrogen stores +- Power2Hydrogen, Hydrogen2Power +- Methanation and Steam Methane Reforming +* eTraGo arguments can now be partially provided and updated +* eTraGo can now import datamodels from databases without using the ego.io +* Existing clustering methods were adapted to be able to reduce the complexity of not electrical sectors +* Improvement of the ehv clustering (much faster now) +* A new clustering method named "k-medoids Dijkstra Clustering" (can be called by "kmedoids-dijkstra") was implemented. This method considers the electrical distance between the buses in the network. It is also available for the methane grid. +* It is possible to select if foreign buses are considered or not during the clustering process. +* The number of CPUs used to perform the clustering can be provided by the user. +* Some more options are available to conduct a reduction in temporal dimension: +- segmentation: clustering of adjacent hours to segments of variable length +- clustering to typical periods extended to cluster on weeks and months +* A temporal disaggregation is available through a 2-level-approach including a dispatch optimization on the temporally fullcomplex model. To limit the RAM usage, you can optionally divide the optimisation problem into a chosen number of slices. +* New plotting functions to visualize the optimization results from all the included energy sectors were implemented +* Functions to analyze results were updated to consider new sectors diff --git a/etrago/appl.py b/etrago/appl.py old mode 100755 new mode 100644 index 587150f2e..95ba59b65 --- a/etrago/appl.py +++ b/etrago/appl.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -21,7 +21,7 @@ """ This is the application file for the tool eTraGo. Define your connection parameters and power flow settings before executing -the function etrago. +the function run_etrago. """ @@ -29,15 +29,19 @@ import os import os.path + __copyright__ = ( "Flensburg University of Applied Sciences, " "Europa-Universität Flensburg, Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, lukasol, wolfbunke, mariusves, s3pp" - +__author__ = ( + "ulfmueller, lukasol, wolfbunke, mariusves, s3pp, ClaraBuettner, " + "CarlosEpia, KathiEsterl, fwitte, gnn, pieterhexen, AmeliaNadal" +) -if 'READTHEDOCS' not in os.environ: +if "READTHEDOCS" not in os.environ: # Sphinx does not run this code. # Do not import internal packages directly @@ -45,337 +49,677 @@ args = { # Setup and Configuration: - 'db': 'oedb', # database session - 'gridversion': 'v0.4.6', # None for model_draft or Version number - 'method': { # Choose method and settings for optimization - 'type': 'lopf', # type of optimization, currently only 'lopf' - 'n_iter': 2, # abort criterion of iterative optimization, 'n_iter' or 'threshold' - 'pyomo': True}, # set if pyomo is used for model building - 'pf_post_lopf': { - 'active': False, # choose if perform a pf after a lopf simulation - 'add_foreign_lopf': True, # keep results of lopf for foreign DC-links - 'q_allocation': 'p_nom'}, # allocate reactive power via 'p_nom' or 'p' - 'start_snapshot': 1, - 'end_snapshot': 3, - 'solver': 'gurobi', # glpk, cplex or gurobi - 'solver_options': { # {} for default options, specific for solver - 'BarConvTol': 1.e-5, - 'FeasibilityTol': 1.e-5, - 'method':2, - 'crossover':0, - 'logFile': 'solver.log'}, - 'model_formulation': 'kirchhoff', # angles or kirchhoff - 'scn_name': 'NEP 2035', # a scenario: Status Quo, NEP 2035, eGo 100 + "db": "egon-data", # database session + "gridversion": None, # None for model_draft or Version number + "method": { # Choose method and settings for optimization + "type": "lopf", # type of optimization, currently only 'lopf' + "n_iter": 4, # abort criterion of iterative optimization, 'n_iter' or 'threshold' + "pyomo": True, # set if pyomo is used for model building + }, + "pf_post_lopf": { + "active": True, # choose if perform a pf after lopf + "add_foreign_lopf": True, # keep results of lopf for foreign DC-links + "q_allocation": "p_nom", # allocate reactive power via 'p_nom' or 'p' + }, + "start_snapshot": 1, + "end_snapshot": 10, + "solver": "gurobi", # glpk, cplex or gurobi + "solver_options": { + "BarConvTol": 1.0e-5, + "FeasibilityTol": 1.0e-5, + "method": 2, + "crossover": 0, + "logFile": "solver_etrago.log", + "threads": 4, + }, + "model_formulation": "kirchhoff", # angles or kirchhoff + "scn_name": "eGon2035", # scenario: eGon2035, eGon100RE or status2019 # Scenario variations: - 'scn_extension': None, # None or array of extension scenarios - 'scn_decommissioning': None, # None or decommissioning scenario + "scn_extension": None, # None or array of extension scenarios + "scn_decommissioning": None, # None or decommissioning scenario # Export options: - 'lpfile': False, # save pyomo's lp file: False or /path/tofolder - 'csv_export': 'results', # save results as csv: False or /path/tofolder + "lpfile": False, # save pyomo's lp file: False or /path/to/lpfile.lp + "csv_export": "results", # save results as csv: False or /path/tofolder # Settings: - 'extendable': ['network', 'storage'], # Array of components to optimize - 'generator_noise': 789456, # apply generator noise, False or seed number - 'extra_functionality':{}, # Choose function name or {} - # Clustering: - 'network_clustering_kmeans': { - 'active': True, # choose if clustering is activated - 'n_clusters': 10, # number of resulting nodes - 'kmeans_busmap': False, # False or path/to/busmap.csv - 'line_length_factor': 1, # - 'remove_stubs': False, # remove stubs bevore kmeans clustering - 'use_reduced_coordinates': False, # - 'bus_weight_tocsv': None, # None or path/to/bus_weight.csv - 'bus_weight_fromcsv': None, # None or path/to/bus_weight.csv - 'n_init': 10, # affects clustering algorithm, only change when neccesary - 'max_iter': 100, # affects clustering algorithm, only change when neccesary - 'tol': 1e-6, # affects clustering algorithm, only change when neccesary - 'n_jobs': -1}, # affects clustering algorithm, only change when neccesary - 'network_clustering_ehv': False, # clustering of HV buses to EHV buses. - 'disaggregation': None, # None, 'mini' or 'uniform' - 'snapshot_clustering': { - 'active': False, # choose if clustering is activated - 'n_clusters': 2, # number of periods - 'how': 'daily', # type of period, currently only 'daily' - 'storage_constraints': 'soc_constraints'}, # additional constraints for storages + "extendable": { + "extendable_components": [ + "as_in_db" + ], # Array of components to optimize + "upper_bounds_grid": { # Set upper bounds for grid expansion + # lines in Germany + "grid_max_D": None, # relative to existing capacity + "grid_max_abs_D": { # absolute capacity per voltage level + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + }, + # border crossing lines + "grid_max_foreign": 4, # relative to existing capacity + "grid_max_abs_foreign": None, # absolute capacity per voltage level + }, + }, + "generator_noise": 789456, # apply generator noise, False or seed number + "extra_functionality": {}, # Choose function name or {} + # Spatial Complexity: + "delete_dispensable_ac_buses": True, # bool. Find and delete expendable buses + "network_clustering_ehv": { + "active": False, # choose if clustering of HV buses to EHV buses is activated + "busmap": False, # False or path to stored busmap + }, + "network_clustering": { + "active": True, # choose if clustering is activated + "method": "kmedoids-dijkstra", # choose clustering method: kmeans or kmedoids-dijkstra + "n_clusters_AC": 30, # total number of resulting AC nodes (DE+foreign) + "cluster_foreign_AC": False, # take foreign AC buses into account, True or False + "method_gas": "kmedoids-dijkstra", # choose clustering method: kmeans or kmedoids-dijkstra + "n_clusters_gas": 17, # total number of resulting CH4 nodes (DE+foreign) + "cluster_foreign_gas": False, # take foreign CH4 buses into account, True or False + "k_elec_busmap": False, # False or path/to/busmap.csv + "k_gas_busmap": False, # False or path/to/ch4_busmap.csv + "bus_weight_tocsv": None, # None or path/to/bus_weight.csv + "bus_weight_fromcsv": None, # None or path/to/bus_weight.csv + "gas_weight_tocsv": None, # None or path/to/gas_bus_weight.csv + "gas_weight_fromcsv": None, # None or path/to/gas_bus_weight.csv + "line_length_factor": 1, # Factor to multiply distance between new buses for new line lengths + "remove_stubs": False, # remove stubs bevore kmeans clustering + "use_reduced_coordinates": False, # If True, do not average cluster coordinates + "random_state": 42, # random state for replicability of clustering results + "n_init": 10, # affects clustering algorithm, only change when neccesary + "max_iter": 100, # affects clustering algorithm, only change when neccesary + "tol": 1e-6, # affects clustering algorithm, only change when neccesary + "CPU_cores": 4, # number of cores used during clustering, "max" for all cores available. + }, + "sector_coupled_clustering": { + "active": True, # choose if clustering is activated + "carrier_data": { # select carriers affected by sector coupling + "central_heat": { + "base": ["CH4", "AC"], + "strategy": "simultaneous", # select strategy to cluster other sectors + }, + }, + }, + "spatial_disaggregation": None, # None or 'uniform' + # Temporal Complexity: + "snapshot_clustering": { + "active": False, # choose if clustering is activated + "method": "segmentation", # 'typical_periods' or 'segmentation' + "extreme_periods": None, # consideration of extreme timesteps; e.g. 'append' + "how": "daily", # type of period - only relevant for 'typical_periods' + "storage_constraints": "soc_constraints", # additional constraints for storages - only relevant for 'typical_periods' + "n_clusters": 5, # number of periods - only relevant for 'typical_periods' + "n_segments": 5, # number of segments - only relevant for segmentation + }, + "skip_snapshots": 5, # False or number of snapshots to skip + "temporal_disaggregation": { + "active": False, # choose if temporally full complex dispatch optimization should be conducted + "no_slices": 8, # number of subproblems optimization is divided into + }, # Simplifications: - 'skip_snapshots': False, # False or number of snapshots to skip - 'branch_capacity_factor': {'HV': 0.5, 'eHV': 0.7}, # p.u. branch derating - 'load_shedding': False, # meet the demand at value of loss load cost - 'foreign_lines': {'carrier': 'AC', # 'DC' for modeling foreign lines as links - 'capacity': 'osmTGmod'}, # 'osmTGmod', 'ntc_acer' or 'thermal_acer' - 'comments': None} + "branch_capacity_factor": {"HV": 0.5, "eHV": 0.7}, # p.u. branch derating + "load_shedding": True, # meet the demand at value of loss load cost + "foreign_lines": { + "carrier": "AC", # 'DC' for modeling foreign lines as links + "capacity": "osmTGmod", # 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + }, + "comments": None, +} def run_etrago(args, json_path): - """The etrago function works with following arguments: - + """Function to conduct optimization considering the following arguments. Parameters ---------- - db : str - ``'oedb'``, - Name of Database session setting stored in *config.ini* of *.egoio* - - gridversion : NoneType or str - ``'v0.4.6'``, + Name of Database session setting stored in *config.ini* of *.egoio*, + e.g. ``'oedb'``. + gridversion : None or str Name of the data version number of oedb: state ``'None'`` for model_draft (sand-box) or an explicit version number (e.g. 'v0.4.6') for the grid schema. - method : dict - {'type': 'lopf', 'n_iter': 5, 'pyomo': True}, - Choose 'lopf' for 'type'. In case of extendable lines, several lopfs - have to be performed. Choose either 'n_init' and a fixed number of - iterations or 'thershold' and a threashold of the objective function as - abort criteria. - Set 'pyomo' to False for big optimization problems, currently only - possible when solver is 'gurobi'. - - pf_post_lopf :dict - {'active': True, 'add_foreign_lopf': True, 'q_allocation': 'p_nom'}, - Option to run a non-linear power flow (pf) directly after the - linear optimal power flow (and thus the dispatch) has finished. - If foreign lines are modeled as DC-links (see foreign_lines), results - of the lopf can be added by setting 'add_foreign_lopf'. - Reactive power can be distributed either by 'p_nom' or 'p'. + Choose method and settings for optimization. + The provided dictionary can have the following entries: + + * "lopf" : str + Type of optimization, currently only "lopf". Default: "lopf". + * "n_iter" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of + iterations or set "threshold" and specify a threshold of the + objective function as abort criteria of the iterative optimization. + Default: 4. + * "threshold" : int + In case of extendable lines, several LOPFs have to be performed. + You can either set "n_iter" and specify a fixed number of + iterations or set "threshold" and specify a threshold of the + objective function as abort criteria of the iterative optimization. + Per default, "n_iter" of 4 is set. + * "pyomo" : bool + Set to True, if pyomo is used for model building. + Set to False for big optimization problems - currently only + possible when solver is "gurobi". + + pf_post_lopf : dict + Settings for option to run a non-linear power flow (PF) directly after + the linear optimal power flow (LOPF), and thus the dispatch + optimisation, has finished. + The provided dictionary can have the following entries: + + * "active" : bool + If True, a PF is performed after the LOPF. Default: True. + * "add_foreign_lopf" : bool + If foreign lines are modeled as DC-links (see parameter + `foreign_lines`), results of the LOPF can be added by setting + "add_foreign_lopf" to True. Default: True. + * "q_allocation" : bool + Allocate reactive power to all generators at the same bus either + by "p_nom" or "p". + Default: "p_nom". start_snapshot : int - 1, - Start hour of the scenario year to be calculated. - + Start hour of the scenario year to be calculated. Default: 1. end_snapshot : int - 2, - End hour of the scenario year to be calculated. - + End hour of the scenario year to be calculated. If snapshot clustering + is used (see parameter `snapshot_clustering`), the selected snapshots + should cover the number of periods / segments. Default: 2. solver : str - 'glpk', - Choose your preferred solver. Current options: 'glpk' (open-source), - 'cplex' or 'gurobi'. - - solver_options: dict + Choose your preferred solver. Current options: "glpk" (open-source), + "cplex" or "gurobi". Default: "gurobi". + solver_options : dict Choose settings of solver to improve simulation time and result. - Options are described in documentation of choosen solver. - - model_formulation: str - 'angles' + Options are described in documentation of chosen solver. Per default, + the following dictionary is set: + + { + "BarConvTol": 1.0e-5, + "FeasibilityTol": 1.0e-5, + "method": 2, + "crossover": 0, + "logFile": "solver_etrago.log", + "threads": 4, + } + + Make sure to reset or adapt these settings when using another solver! + Otherwise, you may run into errors. + model_formulation : str Choose formulation of pyomo-model. - Current options: angles, cycles, kirchhoff, ptdf - + Current options are: "angles", "cycles", "kirchhoff", "ptdf". + "angels" works best for small networks, while "kirchhoff" works best + for larger networks. + Default: "kirchhoff". scn_name : str - 'Status Quo', - Choose your scenario. Currently, there are three different - scenarios: 'Status Quo', 'NEP 2035', 'eGo100'. If you do not - want to use the full German dataset, you can use the excerpt of - Schleswig-Holstein by adding the acronym SH to the scenario - name (e.g. 'SH Status Quo'). - - scn_extension : NoneType or list - None, - Choose extension-scenarios which will be added to the existing - network container. Data of the extension scenarios are located in - extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) - with the prefix 'extension_'. - Currently there are three overlay networks: - 'nep2035_confirmed' includes all planed new lines confirmed by the - Bundesnetzagentur - 'nep2035_b2' includes all new lines planned by the - Netzentwicklungsplan 2025 in scenario 2035 B2 - 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and - adds BE and NO as electrical neighbours - - scn_decommissioning : str - None, - Choose an extra scenario which includes lines you want to decommise + Choose your scenario. Currently, there are two different + scenarios: "eGon2035", "eGon100RE". Default: "eGon2035". + scn_extension : None or str + This option does currently not work! + + Choose extension-scenarios which will be added to the existing + network container. Data of the extension scenarios are located in + extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) + with the prefix 'extension\_'. + There are three overlay networks: + + * 'nep2035_confirmed' includes all planed new lines confirmed by the + Bundesnetzagentur + * 'nep2035_b2' includes all new lines planned by the + Netzentwicklungsplan 2025 in scenario 2035 B2 + * 'BE_NO_NEP 2035' includes planned lines to Belgium and Norway and + adds BE and NO as electrical neighbours + + Default: None. + scn_decommissioning : NoneType or str + This option does currently not work! + + Choose an extra scenario which includes lines you want to decommission from the existing network. Data of the decommissioning scenarios are located in extension-tables (e.g. model_draft.ego_grid_pf_hv_extension_bus) with the prefix - 'decommissioning_'. + 'decommissioning\_'. Currently, there are two decommissioning_scenarios which are linked to extension-scenarios: - 'nep2035_confirmed' includes all lines that will be replaced in - confirmed projects - 'nep2035_b2' includes all lines that will be replaced in - NEP-scenario 2035 B2 - lpfile : obj - False, - State if and where you want to save pyomo's lp file. Options: - False or '/path/tofolder'.import numpy as np - - csv_export : obj - False, - State if and where you want to save results as csv files.Options: - False or '/path/tofolder'. - - extendable : list - ['network', 'storages'], - Choose components you want to optimize. - Settings can be added in /tools/extendable.py. - The most important possibilities: - 'network': set all lines, links and transformers extendable - 'german_network': set lines and transformers in German grid - extendable - 'foreign_network': set foreign lines and transformers extendable - 'transformers': set all transformers extendable - 'overlay_network': set all components of the 'scn_extension' - extendable - 'storages': allow to install extendable storages - (unlimited in size) at each grid node in order to meet - the flexibility demand. - 'network_preselection': set only preselected lines extendable, - method is chosen in function call + * 'nep2035_confirmed' includes all lines that will be replaced in + confirmed projects + * 'nep2035_b2' includes all lines that will be replaced in + NEP-scenario 2035 B2 + Default: None. + lpfile : bool or str + State if and where you want to save pyomo's lp file. Options: + False or '/path/tofile.lp'. Default: False. + csv_export : bool or str + State if and where you want to save results as csv files. Options: + False or '/path/tofolder'. Default: False. + + extendable : dict + Choose components you want to optimize and set upper bounds for grid + expansion. The provided dictionary can have the following entries: + + * "extendable_components" : list(str) + The list defines a set of components to optimize. + Settings can be added in /tools/extendable.py. + The most important possibilities: + + * 'as_in_db' + leaves everything as it is defined in the data coming from the + database + * 'network' + set all lines, links and transformers in electrical grid + extendable + * 'german_network' + set lines and transformers in German electrical grid extendable + * 'foreign_network' + set foreign lines and transformers in electrical grid + extendable + * 'transformers' + set all transformers extendable + * 'storages' / 'stores' + allow to install extendable storages (unlimited in size) at + each grid node in order to meet the flexibility demand + + Default: "as_in_db". + + * "upper_bounds_grid" : dict + Dictionary can have the following entries: + + * 'grid_max_D' + Upper bounds for electrical grid expansion can be defined for + lines in Germany relative to the existing capacity. + Alternatively, 'grid_max_abs_D' can be used. Per default, this + is set to None and 'grid_max_abs_D' is set. + + * 'grid_max_abs_D' + Upper bounds for electrical grid expansion can be defined for + lines in Germany as absolute maximum capacities between two + electrical buses per voltage level. Per default the following + dictionary is set: + + { + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + } + * 'grid_max_foreign' + Upper bounds for border-crossing electrical lines can be + defined relative to the existing capacity. Alternatively, + 'grid_max_abs_foreign' can be set. + Default: 4. + * 'grid_max_abs_foreign' + Upper bounds for border-crossing electrical lines can be + defined equally to 'grid_max_abs_D' as absolute capacity per + voltage level. + Default: None. generator_noise : bool or int State if you want to apply a small random noise to the marginal costs of each generator in order to prevent an optima plateau. To reproduce - a noise, choose the same integer (seed number). - + a noise, choose the same integer (seed number). Default: 789456. extra_functionality : dict or None - None, - Choose extra functionalities and their parameters for PyPSA-model. + Choose extra functionalities and their parameters. Settings can be added in /tools/constraints.py. Current options are: - 'max_line_ext': float - Maximal share of network extension in p.u. - 'min_renewable_share': float - Minimal share of renewable generation in p.u. - 'cross_border_flow': array of two floats - Limit cross-border-flows between Germany and its neigbouring - countries, set values in p.u. of german loads in snapshots - for all countries - (positiv: export from Germany) - 'cross_border_flows_per_country': dict of cntr and array of floats - Limit cross-border-flows between Germany and its neigbouring - countries, set values in p.u. of german loads in snapshots - for each country - (positiv: export from Germany) - 'max_curtailment_per_gen': float - Limit curtailment of all wind and solar generators in Germany, - values set in p.u. of generation potential. - 'max_curtailment_per_gen': float - Limit curtailment of each wind and solar generator in Germany, - values set in p.u. of generation potential. - 'capacity_factor': dict of arrays - Limit overall energy production for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen': dict of arrays - Limit overall energy production for each generator by carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_cntr': dict of dict of arrays - Limit overall energy production country-wise for each carrier, - set upper/lower limit in p.u. - 'capacity_factor_per_gen_cntr': dict of dict of arrays - Limit overall energy production country-wise for each generator - by carrier, set upper/lower limit in p.u. - - network_clustering_kmeans :dict - {'active': True, 'n_clusters': 10, 'kmeans_busmap': False, - 'line_length_factor': 1.25, 'remove_stubs': False, - 'use_reduced_coordinates': False, 'bus_weight_tocsv': None, - 'bus_weight_fromcsv': None, 'n_init': 10, 'max_iter': 300, - 'tol': 1e-4, 'n_jobs': 1}, - State if you want to apply a clustering of all network buses down to - only ``'n_clusters'`` buses. The weighting takes place considering - generation and load at each node. - With ``'kmeans_busmap'`` you can choose if you want to load cluster - coordinates from a previous run. - Option ``'remove_stubs'`` reduces the overestimating of line meshes. - The other options affect the kmeans algorithm and should only be - changed carefully, documentation and possible settings are described - in sklearn-package (sklearn/cluster/k_means_.py). - This function doesn't work together with ``'line_grouping = True'``. - - network_clustering_ehv : bool - False, + + * 'max_line_ext' : float + Maximal share of network extension in p.u. + * 'min_renewable_share' : float + Minimal share of renewable generation in p.u. + * 'cross_border_flow' : array of two floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for all snapshots, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'cross_border_flows_per_country' : dict of cntr and array of floats + Limit AC cross-border-flows between Germany and its neighbouring + countries. Set values in MWh for each country, e.g. [-x, y] + (with x Import, y Export, positive: export from Germany). + * 'capacity_factor' : dict of arrays + Limit overall energy production for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen' : dict of arrays + Limit overall energy production for each generator by carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_cntr': dict of dict of arrays + Limit overall energy production country-wise for each carrier. + Set upper/lower limit in p.u. + * 'capacity_factor_per_gen_cntr': dict of dict of arrays + Limit overall energy production country-wise for each generator + by carrier. Set upper/lower limit in p.u. + + delete_dispensable_ac_buses: bool + Choose if electrical buses that are only connecting two lines should be + removed. These buses have no other components attached to them. The + connected lines are merged. This reduces the spatial complexity without + losing any accuracy. + Default: True. + network_clustering_ehv : dict + Choose if you want to apply an extra high voltage clustering to the + electrical network. + The provided dictionary can have the following entries: + + * "active" : bool Choose if you want to cluster the full HV/EHV dataset down to only the EHV buses. In that case, all HV buses are assigned to their closest EHV - sub-station, taking into account the shortest distance on power lines. - + substation, taking into account the shortest distance on power lines. + Default: False. + * "busmap" : str + Choose if an stored busmap can be used to make the process quicker, or + a new busmap must be calculated. False or path to the busmap in csv + format should be given. + Default: False + + network_clustering : dict + Choose if you want to apply a clustering of all network buses and + specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + If True, the AC buses are clustered down to ``'n_clusters_AC'`` + and the gas buses are clustered down to``'n_clusters_gas'``. + Default: True. + * "method" : str + Method used for AC clustering. You can choose between two + clustering methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers electrical distances between + buses + + Default: "kmedoids-dijkstra". + * "n_clusters_AC" : int + Defines total number of resulting AC nodes including DE and foreign + nodes if `cluster_foreign_AC` is set to True, otherwise only DE + nodes. + Default: 30. + * "cluster_foreign_AC" : bool + If set to False, the AC buses outside Germany are not clustered + and the buses inside Germany are clustered to complete + ``'n_clusters_AC'``. If set to True, foreign AC buses are clustered + as well and included in number of clusters specified through + ``'n_clusters_AC'``. + Default: False. + * "method_gas" : str + Method used for gas clustering. You can choose between two + clustering methods: + * "kmeans": considers geographical locations of buses + * "kmedoids-dijkstra": considers 'electrical' distances between + buses + + Default: "kmedoids-dijkstra". + * "n_clusters_gas" : int + Defines total number of resulting CH4 nodes including DE and + foreign nodes if `cluster_foreign_gas` is set to True, otherwise + only DE nodes. + Default: 17. + * "cluster_foreign_gas" : bool + If set to False, the gas buses outside Germany are not clustered + and the buses inside Germany are clustered to complete + ``'n_clusters_gas'``. If set to True, foreign gas buses are + clustered as well and included in number of clusters specified + through ``'n_clusters_gas'``. + Default: False. + * "k_elec_busmap" : bool or str + With this option you can load cluster coordinates from a previous + AC clustering run. Options are False, in which case no previous + busmap is loaded, and path/to/busmap.csv in which case the busmap + is loaded from the specified file. Please note, that when a path is + provided, the set number of clusters will be ignored. + Default: False. + * "k_gas_busmap" : bool or str + With this option you can load cluster coordinates from a previous + gas clustering run. Options are False, in which case no previous + busmap is loaded, and path/to/busmap.csv in which case the busmap + is loaded from the specified file. Please note, that when a path is + provided, the set number of clusters will be ignored. + Default: False. + * "bus_weight_fromcsv" : None or str + In general, the weighting of AC buses takes place considering + generation and load at each node. With this option, you can load an + own weighting for the AC buses by providing a path to a csv file. + If None, weighting is conducted as described above. + Default: None. + * "bus_weight_tocsv" : None or str + Specifies whether to store the weighting of AC buses to csv or not. + If None, it is not stored. Otherwise, it is stored to the provided + path/to/bus_weight.csv. + Default: None. + * "gas_weight_fromcsv" : None or str + In general, the weighting of CH4 nodes takes place considering + generation and load at each node, as well as non-transport + capacities at each node. With this option, you can load an own + weighting for the CH4 buses by providing a path to a csv file. If + None, weighting is conducted as described above. + Default: None. + * "gas_weight_tocsv" : None or str + Specifies whether to store the weighting of gas buses to csv or + not. If None, it is not stored. Otherwise, it is stored to the + provided path/to/gas_bus_weight.csv. + Default: None. + * "line_length_factor" : float + Defines the factor to multiply the crow-flies distance + between new buses by, in order to get new line lengths. + Default: 1. + * "remove_stubs" : bool + If True, remove stubs before k-means clustering, which reduces the + overestimating of line meshes. + This option is only used within the k-means clustering. + Default: False. + * "use_reduced_coordinates" : bool + If True, do not average cluster coordinates, but take from busmap. + This option is only used within the k-means clustering. + Default: False. + * "random_state" : int + Random state for replicability of clustering results. Default: 42. + * "n_init" : int + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). + Default: 10. + * "max_iter" : int + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). + Default: 100. + * "tol" : float + Affects clustering algorithm, only change when necessary! + Documentation and possible settings are described in + sklearn-package (sklearn/cluster/kmeans.py). + Default: 1e-6. + * "CPU_cores" : int or str + Number of cores used in clustering. Specify a concrete number or + "max" to use all cores available. + Default: 4. + + sector_coupled_clustering : dict + Choose if you want to apply a clustering of sector coupled carriers, + such as central_heat, and specify settings. + The provided dictionary can have the following entries: + + * "active" : bool + State if you want to apply clustering of sector coupled carriers, + such as central_heat. + Default: True. + * "carrier_data" : dict[str, dict] + Keys of the dictionary specify carriers affected by sector + coupling, e.g. "central_heat". The corresponding dictionaries + specify, how the carrier should be clustered. This dictionary must + contain the following entries: + + * "base" : list(str) + The approach bases on already clustered buses (AC and CH4) and + builds clusters around the topology of those buses. With this + option, you can specify the carriers to use as base. See + `strategy` for more information. + * "strategy" : str + Strategy to use in the clustering. Possible options are: + + * "consecutive" + This strategy clusters around the buses of the first + carrier in the `'base'`` list. The links to other buses are + preserved. All buses, that have no connection to the first + carrier will then be clustered around the buses of the + second carrier in the list. + * "simultaneous" + This strategy looks for links connecting the buses of the + carriers in the ``'base'`` list and aggregates buses in + case they have the same set of links connected. For + example, a heat bus connected to CH4 via gas boiler and to + AC via heat pump will only form a cluster with other buses, + if these have the same links to the same clusters of CH4 + and AC. + + Per default, the following dictionary is set: + { + "central_heat": { + "base": ["CH4", "AC"], + "strategy": "simultaneous", + }, + } + + disaggregation : None or str + Specify None, in order to not perform a spatial disaggregation, or the + method you want to use for the spatial disaggregation. Only possible + option is currently "uniform". snapshot_clustering : dict - {'active': True, 'n_clusters': 2, 'how': 'daily', - 'storage_constraints': 'soc_constraints'}, - State if you want to cluster the snapshots and run the optimization - only on a subset of snapshot periods. The 'n_clusters' value defines - the number of periods which will be clustered to. - With 'how' you can choose the period, currently 'daily' is the only - option. Choose 'daily_bounds' or 'soc_constraints' to add extra - contraints for the SOC of storage units. - - branch_capacity_factor : dict - {'HV': 0.5, 'eHV' : 0.7}, + State if you want to apply a temporal clustering and run the + optimization only on a subset of snapshot periods, and specify + settings. The provided dictionary can have the following entries: + + * "active" : bool + Choose, if clustering is activated or not. If True, it is + activated. + Default: False. + * "method" : str + Method to apply. Possible options are "typical_periods" and + "segmentation". + Default: "segmentation". + * "extreme_periods" : None or str + Method used to consider extreme snapshots (time steps with extreme + residual load) in reduced timeseries. + Possible options are None, "append", "new_cluster_center", and + "replace_cluster_center". The default is None, in which case + extreme periods are not considered. + * "how" : str + Definition of period in case `method` is set to "typical_periods". + Possible options are "daily", "weekly", and "monthly". + Default: "daily". + * "storage_constraints" : str + Defines additional constraints for storage units in case `method` + is set to "typical_periods". Possible options are "daily_bounds", + "soc_constraints" and "soc_constraints_simplified". + Default: "soc_constraints". + * "n_clusters" : int + Number of clusters in case `method` is set to "typical_periods". + Default: 5. + * "n_segments" : int + Number of segments in case `method` is set to "segmentation". + Default: 5. + + skip_snapshots : bool or int + State None, if you want to use all time steps, or provide a number, + if you only want to consider every n-th timestep to reduce + temporal complexity. Default: 5. + temporal_disaggregation : dict + State if you want to apply a second LOPF considering dispatch only + (no capacity optimization) to disaggregate the dispatch to the whole + temporal complexity. Be aware that a load shedding will be applied in + this optimization. The provided dictionary must have the following + entries: + + * "active" : bool + Choose, if temporal disaggregation is activated or not. If True, + it is activated. + Default: False. + * "no_slices" : int + With "no_slices" the optimization problem will be calculated as a + given number of sub-problems while using some information on the + state of charge of storage units and stores from the former + optimization (at the moment only possible with skip_snapshots and + extra_functionalities are disregarded). + Default: 8. + + branch_capacity_factor : dict[str, float] Add a factor here if you want to globally change line capacities (e.g. to "consider" an (n-1) criterion or for debugging purposes). - + The factor specifies the p.u. branch rating, e.g. 0.5 to allow half the + line capacity. Per default, it is set to {'HV': 0.5, 'eHV' : 0.7}. load_shedding : bool - False, State here if you want to make use of the load shedding function which is helpful when debugging: a very expensive generator is set to each - bus and meets the demand when regular - generators cannot do so. - + bus and meets the demand when regular generators cannot do so. + Default: False. foreign_lines : dict - {'carrier':'AC', 'capacity': 'osmTGmod}' Choose transmission technology and capacity of foreign lines: - 'carrier': 'AC' or 'DC' - 'capacity': 'osmTGmod', 'ntc_acer' or 'thermal_acer' + + * 'carrier': 'AC' or 'DC' + * 'capacity': 'osmTGmod', 'tyndp2020', 'ntc_acer' or 'thermal_acer' + + Per default, it is set to {'carrier':'AC', 'capacity': 'osmTGmod'}. comments : str - None + Can be any comment you wish to make. Returns ------- - network : `pandas.DataFrame` - eTraGo result network based on `PyPSA network + etrago : etrago object + eTraGo containing all network information and a PyPSA network `_ + """ - etrago = Etrago(args, json_path) + etrago = Etrago(args, json_path=json_path) # import network from database etrago.build_network_from_db() - # adjust network, e.g. set (n-1)-security factor + # adjust network regarding eTraGo setting etrago.adjust_network() # ehv network clustering etrago.ehv_clustering() - # k-mean clustering - etrago.kmean_clustering() - - # skip snapshots - etrago.skip_snapshots() + # spatial clustering + etrago.spatial_clustering() + etrago.spatial_clustering_gas() # snapshot clustering etrago.snapshot_clustering() + # skip snapshots + etrago.skip_snapshots() + # start linear optimal powerflow calculations etrago.lopf() - # TODO: check if should be combined with etrago.lopf() + # conduct lopf with full complex timeseries for dispatch disaggregation + etrago.temporal_disaggregation() + + # start power flow based on lopf results etrago.pf_post_lopf() - # spaital disaggregation - etrago.disaggregation() + # spatial disaggregation + # needs to be adjusted for new sectors + etrago.spatial_disaggregation() # calculate central etrago results - # etrago.calc_results() + etrago.calc_results() return etrago -if __name__ == '__main__': +if __name__ == "__main__": # execute etrago function print(datetime.datetime.now()) etrago = run_etrago(args, json_path=None) print(datetime.datetime.now()) etrago.session.close() - # plots + # plots: more in tools/plot.py # make a line loading plot - # plot_line_loading(network) - # plot stacked sum of nominal power for each generator type and timestep - # plot_stacked_gen(network, resolution="MW") - # plot to show extendable storages - # storage_distribution(network) - # extension_overlay_network(network) + # etrago.plot_grid( + # line_colors='line_loading', bus_sizes=0.0001, timesteps=range(2)) + # network and storage + # etrago.plot_grid( + # line_colors='expansion_abs', + # bus_colors='storage_expansion', + # bus_sizes=0.0001) + # flexibility usage + # etrago.flexibility_usage('DSM') diff --git a/etrago/args.json b/etrago/args.json index 28efd1ed1..80a9f30a1 100644 --- a/etrago/args.json +++ b/etrago/args.json @@ -1,63 +1,119 @@ { - "db": "oedb", - "gridversion": "v0.4.6", - "method": {"type": "lopf", - "n_iter":5, - "pyomo":true}, + "db": "egon-data", + "gridversion": null, + "method": { + "type": "lopf", + "n_iter": 4, + "pyomo": true + }, "pf_post_lopf": { - "active": true, - "add_foreign_lopf": true, - "q_allocation": "p_nom", - "calc_losses": true}, + "active": false, + "add_foreign_lopf": true, + "q_allocation": "p_nom" + }, "start_snapshot": 1, - "end_snapshot" : 72, + "end_snapshot": 2, "solver": "gurobi", - "solver_options":{"threads":2, - "method":2, - "BarHomogeneous":1, - "NumericFocus": 3, - "BarConvTol":"1.e-5", - "FeasibilityTol":"1.e-6", - "logFile":"gurobi_eTraGo.log"}, + "solver_options": {}, "model_formulation": "kirchhoff", - "scn_name": "eGo 100", + "scn_name": "eGon2035", "scn_extension": null, "scn_decommissioning": null, - "lpfile": false, - "csv_export": "./results/", - "db_export": false, - "extendable": ["network", "storage"], + "lpfile": false, + "csv_export": "results", + "extendable": { + "extendable_components": [ + "as_in_db" + ], + "upper_bounds_grid": { + "grid_max_D": null, + "grid_max_abs_D": { + "380": { + "i": 1020, + "wires": 4, + "circuits": 4 + }, + "220": { + "i": 1020, + "wires": 4, + "circuits": 4 + }, + "110": { + "i": 1020, + "wires": 4, + "circuits": 2 + }, + "dc": 0 + }, + "grid_max_foreign": 4, + "grid_max_abs_foreign": null + } + }, "generator_noise": 789456, - "minimize_loading": false, - "ramp_limits": false, "extra_functionality": {}, - "network_clustering_kmeans": { - "active": true, - "n_clusters": 10, - "kmeans_busmap": false, - "line_length_factor": 1.25, - "remove_stubs": false, - "use_reduced_coordinates": false, - "bus_weight_tocsv": null, - "bus_weight_fromcsv": null, - "n_init": 10, - "max_iter": 300, - "tol": 1e-4, - "n_jobs": 1}, - "network_clustering_ehv": false, - "disaggregation": "uniform", + "delete_dispensable_ac_buses": true, + "network_clustering_ehv": { + "active": false, + "busmap": false + }, + "network_clustering": { + "active": true, + "method": "kmedoids-dijkstra", + "n_clusters_AC": 30, + "cluster_foreign_AC": false, + "method_gas": "kmedoids-dijkstra", + "n_clusters_gas": 17, + "cluster_foreign_gas": false, + "k_elec_busmap": false, + "k_gas_busmap": false, + "bus_weight_tocsv": null, + "bus_weight_fromcsv": null, + "gas_weight_tocsv": null, + "gas_weight_fromcsv": null, + "line_length_factor": 1, + "remove_stubs": false, + "use_reduced_coordinates": false, + "random_state": 42, + "n_init": 10, + "max_iter": 100, + "tol": 1e-6, + "CPU_cores": 4 + }, + "sector_coupled_clustering": { + "active": true, + "carrier_data": { + "central_heat": { + "base": [ + "CH4", + "AC" + ], + "strategy": "simultaneous" + } + } + }, + "spatial_disaggregation": null, "snapshot_clustering": { - "active": true, - "n_clusters": 2, - "how": "daily", - "storage_constraints": "soc_constraints"}, - "parallelisation": false, - "skip_snapshots": false, - "line_grouping": false, - "branch_capacity_factor": {"HV": 0.5, - "eHV" : 0.7}, + "active": false, + "method": "segmentation", + "extreme_periods": null, + "how": "daily", + "storage_constraints": "soc_constraints", + "n_clusters": 5, + "n_segments": 5 + }, + "skip_snapshots": 5, + "temporal_disaggregation": { + "active": false, + "no_slices": 8 + }, + "branch_capacity_factor": { + "HV": 0.5, + "eHV": 0.7 + }, "load_shedding": false, - "foreign_lines": {"carrier": "AC", - "capacity": "osmTGmod"}, - "comments": "" - } + "foreign_lines": { + "carrier": "AC", + "capacity": "osmTGmod" + }, + "comments": null +} diff --git a/etrago/cluster/__init__.py b/etrago/cluster/__init__.py index cc95f41d1..5dd475b7e 100644 --- a/etrago/cluster/__init__.py +++ b/etrago/cluster/__init__.py @@ -5,5 +5,3 @@ __copyright__ = "tba" __license__ = "tba" __author__ = "tba" - - diff --git a/etrago/cluster/disaggregation.py b/etrago/cluster/disaggregation.py index a23e2a40f..d1f5f84b3 100644 --- a/etrago/cluster/disaggregation.py +++ b/etrago/cluster/disaggregation.py @@ -1,19 +1,20 @@ from functools import reduce -from itertools import count, product +from itertools import product from operator import methodcaller as mc, mul as multiply import cProfile -import random -import string import time -import pandas as pd +from loguru import logger as log from pyomo.environ import Constraint from pypsa import Network +import pandas as pd + +from etrago.tools import noops +from etrago.tools.utilities import residual_load class Disaggregation: - def __init__(self, original_network, clustered_network, clustering, - skip=()): + def __init__(self, original_network, clustered_network, busmap, skip=()): """ :param original_network: Initial (unclustered) network structure :param clustered_network: Clustered network used for the optimization @@ -22,15 +23,18 @@ def __init__(self, original_network, clustered_network, clustering, """ self.original_network = original_network self.clustered_network = clustered_network - self.clustering = clustering + self.busmap = busmap - self.buses = pd.merge(original_network.buses, - clustering.busmap.to_frame(name='cluster'), - left_index=True, right_index=True) + self.buses = pd.merge( + original_network.buses, + busmap.to_frame(name="cluster"), + left_index=True, + right_index=True, + ) self.skip = skip - self.idx_prefix = '_' + self.idx_prefix = "_" def add_constraints(self, cluster, extra_functionality=None): """ @@ -48,9 +52,8 @@ def reindex_with_prefix(self, dataframe, prefix=None): prefix = self.idx_prefix dataframe.set_index( dataframe.index.map(lambda x: self.idx_prefix + x), - inplace=True) - - + inplace=True, + ) def construct_partial_network(self, cluster, scenario): """ @@ -62,126 +65,165 @@ def construct_partial_network(self, cluster, scenario): :param cluster: Index of the cluster to disaggregate :return: Tuple of (partial_network, external_buses) where - `partial_network` is the result of the partial decomposition - and `external_buses` represent clusters adjacent to `cluster` that may - be influenced by calculations done on the partial network. + `partial_network` is the result of the partial decomposition + and `external_buses` represent clusters adjacent to `cluster` that + may be influenced by calculations done on the partial network. """ - #Create an empty network + # Create an empty network partial_network = Network() # find all lines that have at least one bus inside the cluster - busflags = (self.buses['cluster'] == cluster) + busflags = self.buses["cluster"] == cluster - def is_bus_in_cluster(conn): + def is_bus_in_cluster(conn, busflags=busflags): return busflags[conn] # Copy configurations to new network partial_network.snapshots = self.original_network.snapshots - partial_network.snapshot_weightings = (self.original_network - .snapshot_weightings) + partial_network.snapshot_weightings = ( + self.original_network.snapshot_weightings + ) partial_network.carriers = self.original_network.carriers # Collect all connectors that have some node inside the cluster external_buses = pd.DataFrame() - line_types = ['lines', 'links', 'transformers'] + line_types = ["lines", "links", "transformers"] for line_type in line_types: + rows: pd.DataFrame = getattr(self.original_network, line_type) + timeseries: dict[str, pd.DataFrame] = getattr( + self.original_network, line_type + "_t" + ) # Copy all lines that reside entirely inside the cluster ... - setattr(partial_network, line_type, - filter_internal_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster)) + setattr( + partial_network, + line_type, + filter_internal_connector(rows, is_bus_in_cluster), + ) # ... and their time series # TODO: These are all time series, not just the ones from lines - # residing entirely in side the cluster. + # residing entirely inside the cluster. # Is this a problem? - setattr(partial_network, line_type + '_t', - getattr(self.original_network, line_type + '_t')) + # I hope not, because neither is `rows.index` a subset + # of the columns of one of the values of `timeseries`, + # nor the other way around, so it's not clear how to + # align both. + setattr(partial_network, line_type + "_t", timeseries) # Copy all lines whose `bus0` lies within the cluster left_external_connectors = filter_left_external_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster) + rows, is_bus_in_cluster + ) + + def from_busmap(x): + return self.idx_prefix + self.buses.loc[x, "cluster"] if not left_external_connectors.empty: - f = lambda x: self.idx_prefix + self.clustering.busmap.loc[x] - ca_option = pd.get_option('mode.chained_assignment') - pd.set_option('mode.chained_assignment', None) - left_external_connectors.loc[:, 'bus0'] = ( - left_external_connectors.loc[:, 'bus0'].apply(f)) - pd.set_option('mode.chained_assignment', ca_option) - external_buses = pd.concat((external_buses, - left_external_connectors.bus0)) + ca_option = pd.get_option("mode.chained_assignment") + pd.set_option("mode.chained_assignment", None) + left_external_connectors.loc[ + :, "bus0" + ] = left_external_connectors.loc[:, "bus0"].apply(from_busmap) + pd.set_option("mode.chained_assignment", ca_option) + external_buses = pd.concat( + (external_buses, left_external_connectors.bus0) + ) # Copy all lines whose `bus1` lies within the cluster right_external_connectors = filter_right_external_connector( - getattr(self.original_network, line_type), - is_bus_in_cluster) + rows, is_bus_in_cluster + ) if not right_external_connectors.empty: - f = lambda x: self.idx_prefix + self.clustering.busmap.loc[x] - ca_option = pd.get_option('mode.chained_assignment') - pd.set_option('mode.chained_assignment', None) - right_external_connectors.loc[:, 'bus1'] = ( - right_external_connectors.loc[:, 'bus1'].apply(f)) - pd.set_option('mode.chained_assignment', ca_option) - external_buses = pd.concat((external_buses, - right_external_connectors.bus1)) + ca_option = pd.get_option("mode.chained_assignment") + pd.set_option("mode.chained_assignment", None) + right_external_connectors.loc[ + :, "bus1" + ] = right_external_connectors.loc[:, "bus1"].apply(from_busmap) + pd.set_option("mode.chained_assignment", ca_option) + external_buses = pd.concat( + (external_buses, right_external_connectors.bus1) + ) # Collect all buses that are contained in or somehow connected to the # cluster buses_in_lines = self.buses[busflags].index - bus_types = ['loads', 'generators', 'stores', 'storage_units', - 'shunt_impedances'] + bus_types = [ + "loads", + "generators", + "stores", + "storage_units", + "shunt_impedances", + ] # Copy all values that are part of the cluster partial_network.buses = self.original_network.buses[ - self.original_network.buses.index.isin(buses_in_lines)] + self.original_network.buses.index.isin(buses_in_lines) + ] # Collect all buses that are external, but connected to the cluster ... externals_to_insert = self.clustered_network.buses[ self.clustered_network.buses.index.isin( - map(lambda x: x[0][len(self.idx_prefix):], - external_buses.values))] + map( + lambda x: x[0][len(self.idx_prefix) :], + external_buses.values, + ) + ) + ] # ... prefix them to avoid name clashes with buses from the original # network ... self.reindex_with_prefix(externals_to_insert) # .. and insert them as well as their time series - partial_network.buses = (partial_network.buses - .append(externals_to_insert)) + partial_network.buses = pd.concat( + [partial_network.buses, externals_to_insert] + ) partial_network.buses_t = self.original_network.buses_t # TODO: Rename `bustype` to on_bus_type for bustype in bus_types: # Copy loads, generators, ... from original network to network copy - setattr(partial_network, bustype, - filter_buses(getattr(self.original_network, bustype), - buses_in_lines)) + setattr( + partial_network, + bustype, + filter_buses( + getattr(self.original_network, bustype), buses_in_lines + ), + ) # Collect on-bus components from external, connected clusters buses_to_insert = filter_buses( getattr(self.clustered_network, bustype), - map(lambda x: x[0][len(self.idx_prefix):], - external_buses.values)) + map( + lambda x: x[0][len(self.idx_prefix) :], + external_buses.values, + ), + ) # Prefix their external bindings - buses_to_insert.loc[:, 'bus'] = ( - self.idx_prefix + - buses_to_insert.loc[:, 'bus']) - - setattr(partial_network, bustype, - getattr(partial_network, bustype).append(buses_to_insert)) + buses_to_insert.loc[:, "bus"] = ( + self.idx_prefix + buses_to_insert.loc[:, "bus"] + ) + + setattr( + partial_network, + bustype, + pd.concat( + [getattr(partial_network, bustype), buses_to_insert] + ), + ) # Also copy their time series - setattr(partial_network, - bustype + '_t', - getattr(self.original_network, bustype + '_t')) + setattr( + partial_network, + bustype + "_t", + getattr(self.original_network, bustype + "_t"), + ) # Note: The code above copies more than necessary, because it # copies every time series for `bustype` from the original # network and not only the subset belonging to the partial @@ -189,24 +231,29 @@ def is_bus_in_cluster(conn): # series accordingly, but there must be bug somewhere because # using it, the time series in the clusters and sums of the # time series after disaggregation don't match up. - """ - series = getattr(self.original_network, bustype + '_t') - partial_series = type(series)() - for s in series: - partial_series[s] = series[s].loc[ - :, - getattr(partial_network, bustype) - .index.intersection(series[s].columns)] - setattr(partial_network, bustype + '_t', partial_series) - """ + + # series = getattr(self.original_network, bustype + '_t') + # partial_series = type(series)() + # for s in series: + # partial_series[s] = series[s].loc[ + # :, + # getattr(partial_network, bustype) + # .index.intersection(series[s].columns)] + # setattr(partial_network, bustype + '_t', partial_series) # Just a simple sanity check # TODO: Remove when sure that disaggregation will not go insane anymore for line_type in line_types: - assert (getattr(partial_network, line_type).bus0.isin( - partial_network.buses.index).all()) - assert (getattr(partial_network, line_type).bus1.isin( - partial_network.buses.index).all()) + rows = getattr(partial_network, line_type) + + left = rows.bus0.isin(partial_network.buses.index) + right = rows.bus1.isin(partial_network.buses.index) + assert rows.loc[~(left | right), :].empty, ( + f"Not all `partial_network.{line_type}` have an endpoint," + " i.e. `bus0` or `bus1`," + f" contained in `partial_network.buses.index`." + f" Spurious additional rows:\nf{rows.loc[~(left | right), :]}" + ) return partial_network, external_buses @@ -217,133 +264,203 @@ def solve(self, scenario, solver): """ Decompose each cluster into separate units and try to optimize them separately + :param scenario: :param solver: Solver that may be used to optimize partial networks """ - clusters = set(self.clustering.busmap.values) + clusters = set(self.buses.loc[:, "cluster"].values) n = len(clusters) - self.stats = {'clusters': pd.DataFrame( - index=sorted(clusters), - columns=["decompose", "spread", "transfer"])} + self.stats = { + "clusters": pd.DataFrame( + index=sorted(clusters), + columns=["decompose", "spread", "transfer"], + ) + } profile = cProfile.Profile() + profile = noops + for i, cluster in enumerate(sorted(clusters)): - print('---') - print('Decompose cluster %s (%d/%d)' % (cluster, i+1, n)) + log.info(f"Decompose {cluster=} ({i + 1}/{n})") profile.enable() t = time.time() partial_network, externals = self.construct_partial_network( - cluster, - scenario) + cluster, scenario + ) + profile.disable() - self.stats['clusters'].loc[cluster, 'decompose'] = time.time() - t - print('Decomposed in ', - self.stats['clusters'].loc[cluster, 'decompose']) + self.stats["clusters"].loc[cluster, "decompose"] = time.time() - t + log.info( + "Decomposed in " + f'{self.stats["clusters"].loc[cluster, "decompose"]}' + ) t = time.time() profile.enable() - self.solve_partial_network(cluster, partial_network, scenario, - solver) + self.solve_partial_network( + cluster, partial_network, scenario, solver + ) profile.disable() - self.stats['clusters'].loc[cluster, 'spread'] = time.time() - t - print('Result distributed in ', - self.stats['clusters'].loc[cluster, 'spread']) + self.stats["clusters"].loc[cluster, "spread"] = time.time() - t + log.info( + "Result distributed in " + f'{self.stats["clusters"].loc[cluster, "spread"]}' + ) profile.enable() t = time.time() self.transfer_results(partial_network, externals) profile.disable() - self.stats['clusters'].loc[cluster, 'transfer'] = time.time() - t - print('Results transferred in ', - self.stats['clusters'].loc[cluster, 'transfer']) + self.stats["clusters"].loc[cluster, "transfer"] = time.time() - t + log.info( + "Results transferred in " + f'{self.stats["clusters"].loc[cluster, "transfer"]}' + ) profile.enable() t = time.time() - print('---') fs = (mc("sum"), mc("sum")) for bt, ts in ( - ('generators', {'p': fs, 'q': fs}), - ('storage_units', {'p': fs, 'state_of_charge': fs, 'q': fs})): - print("Attribute sums, {}, clustered - disaggregated:" .format(bt)) + ("generators", {"p": fs, "q": fs}), + ("storage_units", {"p": fs, "state_of_charge": fs, "q": fs}), + ("links", {"p0": fs, "p1": fs}), + ): + log.info(f"Attribute sums, {bt}, clustered - disaggregated:") cnb = getattr(self.clustered_network, bt) + cnb = cnb[cnb.carrier != "DC"] onb = getattr(self.original_network, bt) - print("{:>{}}: {}".format('p_nom_opt', 4 + len('state_of_charge'), - reduce(lambda x, f: f(x), fs[:-1], cnb['p_nom_opt']) - - - reduce(lambda x, f: f(x), fs[:-1], onb['p_nom_opt']))) - - print("Series sums, {}, clustered - disaggregated:" .format(bt)) - cnb = getattr(self.clustered_network, bt + '_t') - onb = getattr(self.original_network, bt + '_t') + onb = onb[onb.carrier != "DC"] + log.info( + "{:>{}}: {}".format( + "p_nom_opt", + 4 + len("state_of_charge"), + reduce(lambda x, f: f(x), fs[:-1], cnb["p_nom_opt"]) + - reduce(lambda x, f: f(x), fs[:-1], onb["p_nom_opt"]), + ) + ) + + log.info(f"Series sums, {bt}, clustered - disaggregated:") + cnb = getattr(self.clustered_network, bt + "_t") + onb = getattr(self.original_network, bt + "_t") for s in ts: - print("{:>{}}: {}".format(s, 4 + len('state_of_charge'), - reduce(lambda x, f: f(x), ts[s], cnb[s]) - - - reduce(lambda x, f: f(x), ts[s], onb[s]))) + log.info( + "{:>{}}: {}".format( + s, + 4 + len("state_of_charge"), + reduce(lambda x, f: f(x), ts[s], cnb[s]) + - reduce(lambda x, f: f(x), ts[s], onb[s]), + ) + ) profile.disable() - self.stats['check'] = time.time() - t - print('Checks computed in ', self.stats['check']) - - # profile.print_stats(sort='cumtime') - - def transfer_results(self, partial_network, externals, - bustypes=['loads', 'generators', 'stores', - 'storage_units', 'shunt_impedances'], - series=None): + self.stats["check"] = time.time() - t + log.info(f"Checks computed in {self.stats['check']}s.") + + profile.print_stats(sort="cumtime") + + def transfer_results( + self, + partial_network, + externals, + bustypes=[ + "loads", + "generators", + "stores", + "storage_units", + "shunt_impedances", + ], + series=None, + ): for bustype in bustypes: - orig_buses = getattr(self.original_network, bustype + '_t') - part_buses = getattr(partial_network, bustype + '_t') - for key in (orig_buses.keys() - if series is None - else (k for k in orig_buses.keys() - if k in series.get(bustype, {}))): + orig_buses = getattr(self.original_network, bustype + "_t") + part_buses = getattr(partial_network, bustype + "_t") + for key in ( + orig_buses.keys() + if series is None + else ( + k + for k in orig_buses.keys() + if k in series.get(bustype, {}) + ) + ): for snap in partial_network.snapshots: orig_buses[key].loc[snap].update(part_buses[key].loc[snap]) - - def solve_partial_network(self, cluster, partial_network, scenario, - solver=None): + def solve_partial_network( + self, cluster, partial_network, scenario, solver=None + ): extras = self.add_constraints(cluster) - partial_network.lopf(scenario.timeindex, - solver_name=solver, - extra_functionality=extras) + partial_network.lopf( + scenario.timeindex, solver_name=solver, extra_functionality=extras + ) + + def residual_load(self, sector="electricity"): + """ + Calculates the residual load for the specified sector. + + See :attr:`~.tools.utilities.residual_load` for more information. + + Parameters + ----------- + sector : str + Sector to determine residual load for. Possible options are + 'electricity' and 'central_heat'. Default: 'electricity'. + + Returns + -------- + pd.DataFrame + Dataframe with residual load for each bus in the network. + Columns of the dataframe contain the corresponding bus name + and index of the dataframe is a datetime index with the + corresponding time step. + + """ + return residual_load(self.original_network, sector) + class MiniSolverDisaggregation(Disaggregation): - def add_constraints(self, cluster, extra_functionality=None): - if extra_functionality is None: - extra_functionality = lambda network, snapshots: None + def add_constraints( + self, cluster, extra_functionality=lambda network, snapshots: None + ): extra_functionality = self._validate_disaggregation_generators( - cluster, - extra_functionality) + cluster, extra_functionality + ) return extra_functionality def _validate_disaggregation_generators(self, cluster, f): def extra_functionality(network, snapshots): f(network, snapshots) generators = self.original_network.generators.assign( - bus=lambda df: df.bus.map(self.clustering.busmap)) - grouper = [generators.carrier] + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) + ) + def construct_constraint(model, snapshot, carrier): # TODO: Optimize - generator_p = [model.generator_p[(x, snapshot)] - for x in generators.loc[ - (generators.bus == cluster) & - (generators.carrier == carrier)].index] + generator_p = [ + model.generator_p[(x, snapshot)] + for x in generators.loc[ + (generators.bus == cluster) + & (generators.carrier == carrier) + ].index + ] if not generator_p: return Constraint.Feasible sum_generator_p = sum(generator_p) cluster_generators = self.clustered_network.generators[ - (self.clustered_network.generators.bus == cluster) & - (self.clustered_network.generators.carrier == carrier)] + (self.clustered_network.generators.bus == cluster) + & (self.clustered_network.generators.carrier == carrier) + ] sum_clustered_p = sum( - self.clustered_network.generators_t['p'].loc[snapshot, c] - for c in cluster_generators.index) + self.clustered_network.generators_t["p"].loc[snapshot, c] + for c in cluster_generators.index + ) return sum_generator_p == sum_clustered_p # TODO: Generate a better name network.model.validate_generators = Constraint( - list(snapshots), - set(generators.carrier), - rule=construct_constraint) + list(snapshots), + set(generators.carrier), + rule=construct_constraint, + ) + return extra_functionality # TODO: This function is never used. @@ -353,164 +470,321 @@ def extra_functionality(network, snapshots): f(network, snapshots) for bustype, bustype_pypsa, suffixes in [ - ('storage', 'storage_units', ['_dispatch', '_spill', '_store']), - ('store', 'stores',[''])]: - generators = getattr(self.original_network, - bustype_pypsa).assign( - bus=lambda df: df.bus.map(self.clustering.busmap)) + ( + "storage", + "storage_units", + ["_dispatch", "_spill", "_store"], + ), + ("store", "stores", [""]), + ]: + generators = getattr( + self.original_network, bustype_pypsa + ).assign( + bus=lambda df: df.bus.map(self.buses.loc[:, "cluster"]) + ) for suffix in suffixes: + def construct_constraint(model, snapshot): # TODO: Optimize - buses_p = [getattr( - model, - bustype + '_p' + suffix - )[(x, snapshot)] for x in - generators.loc[( - generators.bus == cluster - )].index] + buses_p = [ + getattr(model, bustype + "_p" + suffix)[ + (x, snapshot) + ] + for x in generators.loc[ + (generators.bus == cluster) + ].index + ] if not buses_p: return Constraint.Feasible sum_bus_p = sum(buses_p) cluster_buses = getattr( - self.clustered_network, bustype_pypsa)[ - (getattr(self.clustered_network, - bustype_pypsa).bus == cluster)] + self.clustered_network, bustype_pypsa + )[ + ( + getattr( + self.clustered_network, bustype_pypsa + ).bus + == cluster + ) + ] sum_clustered_p = sum( - getattr(self.clustered_network, bustype_pypsa + - '_t')['p'].loc[snapshot,c] for - c in cluster_buses.index) + getattr( + self.clustered_network, bustype_pypsa + "_t" + )["p"].loc[snapshot, c] + for c in cluster_buses.index + ) return sum_bus_p == sum_clustered_p # TODO: Generate a better name network.model.add_component( - 'validate_' + bustype + suffix, - Constraint(list(snapshots), - rule=construct_constraint)) + "validate_" + bustype + suffix, + Constraint(list(snapshots), rule=construct_constraint), + ) + return extra_functionality + class UniformDisaggregation(Disaggregation): - def solve_partial_network(self, cluster, partial_network, scenario, - solver=None): + def solve_partial_network( + self, cluster, partial_network, scenario, solver=None + ): + log.debug("Solving partial network.") bustypes = { - 'generators': { - 'group_by': ('carrier',), - 'series': ('p', 'q')}, - 'storage_units': { - 'group_by': ('carrier', 'max_hours'), - 'series': ('p', 'state_of_charge', 'q')}} - weights = {'p': ('p_nom_opt', 'p_max_pu'), - 'q': (('p_nom_opt',) - if (getattr(self.clustered_network, 'allocation', None) - == - 'p_nom') - else ('p_nom_opt', 'p_max_pu')), - 'state_of_charge': ('p_nom_opt',)} - filters = {'q': lambda o: o.control == "PV"} + "links": { + "group_by": ("carrier", "bus1"), + "series": ("p0", "p1"), + }, + "generators": {"group_by": ("carrier",), "series": ("p", "q")}, + "storage_units": { + "group_by": ("carrier", "max_hours"), + "series": ("p", "state_of_charge", "q"), + }, + "stores": { + "group_by": ("carrier",), + "series": ("e", "p"), + }, + } + weights = { + "p": ("p_nom_opt", "p_max_pu"), + "q": ( + ("p_nom_opt",) + if ( + getattr(self.clustered_network, "allocation", None) + == "p_nom" + ) + else ("p_nom_opt", "p_max_pu") + ), + "p0": ("p_nom_opt",), + "p1": ("p_nom_opt",), + "state_of_charge": ("p_nom_opt",), + "e": ("e_nom_opt",), + } + filters = {"q": lambda o: o.control == "PV"} + for bustype in bustypes: - pn_t = getattr(partial_network, bustype + '_t') - cl_t = getattr(self.clustered_network, bustype + '_t') + # Define attributeof components which are available + if bustype == "stores": + extendable_flag = "e_nom_extendable" + nominal_capacity = "e_nom" + optimal_capacity = "e_nom_opt" + maximal_capacity = "e_nom_max" + weights["p"] = ("e_nom_opt", "e_max_pu") + else: + extendable_flag = "p_nom_extendable" + nominal_capacity = "p_nom" + optimal_capacity = "p_nom_opt" + maximal_capacity = "p_nom_max" + weights["p"] = ("p_nom_opt", "p_max_pu") + + log.debug(f"Decomposing {bustype}.") + pn_t = getattr(partial_network, bustype + "_t") + cl_t = getattr(self.clustered_network, bustype + "_t") pn_buses = getattr(partial_network, bustype) - cl_buses = getattr(self.clustered_network, bustype) - groups = product(* - [ [ {'key': key, 'value': value} - for value in set(pn_buses.loc[:, key])] - for key in bustypes[bustype]['group_by']]) + cl_buses = getattr(self.clustered_network, bustype)[ + lambda df: df.loc[:, "bus" if "bus" in df.columns else "bus0"] + == cluster + ] + groups = product( + *[ + [ + {"key": key, "value": value} + for value in set(cl_buses.loc[:, key]) + ] + for key in bustypes[bustype]["group_by"] + ] + ) for group in groups: - clb = cl_buses[cl_buses.bus == cluster] - query = " & ".join(["({key} == {value!r})".format(**axis) - for axis in group]) - clb = clb.query(query) + query = " & ".join( + ["({key} == {value!r})".format(**axis) for axis in group] + ) + clb = cl_buses.query(query) if len(clb) == 0: continue assert len(clb) == 1, ( - "Cluster {} has {} buses for group {}.\n" - .format(cluster, len(clb), group) + - "Should be exactly one.") + f"Cluster {cluster} has {len(clb)} buses for {group=}." + "\nShould be exactly one." + ) # Remove buses not belonging to the partial network pnb = pn_buses.iloc[ - [i for i, row in enumerate(pn_buses.itertuples()) - if not row.bus.startswith(self.idx_prefix) ]] + [ + i + for i, row in enumerate(pn_buses.itertuples()) + for bus in [ + row.bus if hasattr(row, "bus") else row.bus0 + ] + if not bus.startswith(self.idx_prefix) + ] + ] + if bustype == "links": + index = self.buses[ + self.buses.loc[:, "cluster"] == group[1]["value"] + ].index.tolist() + query = ( + f"(carrier == {group[0]['value']!r})" + f" & (bus1 in {index})" + ) pnb = pnb.query(query) - assert not pnb.empty, ( - "Cluster has a bus for:" + - "\n ".join(["{key}: {value!r}".format(**axis) - for axis in group]) + - "\nbut no matching buses in its corresponding " + - "partial network.") - - if not (pnb.loc[:, 'p_nom_extendable'].all() or - not pnb.loc[:, 'p_nom_extendable'].any()): - raise NotImplemented( - "The `'p_nom_extendable'` flag for buses in the" + - " partial network with:" + - "\n ".join(["{key}: {value!r}".format(**axis) - for axis in group]) + - "\ndoesn't have the same value." + - "\nThis is not supported.") - else: - assert (pnb.loc[:, 'p_nom_extendable'] == - clb.iloc[0].at['p_nom_extendable']).all(), ( - "The `'p_nom_extendable'` flag for the current " + - "cluster's bus does not have the same value " + - "it has on the buses of it's partial network.") - if clb.iloc[0].at['p_nom_extendable']: + assert not pnb.empty or ( + # In some cases, a district heating grid is connected to a + # substation only via a resistive_heater but not e.g. by a + # heat_pump or one of the other listed `carrier`s. + # In the clustered network, there are both. + # In these cases, the `pnb` can actually be empty. + group[0]["value"] + in [ + "central_gas_boiler", + "central_heat_pump", + "central_gas_CHP_heat", + "central_gas_CHP", + "CH4", + "DC", + "OCGT", + ] + ), ( + "Cluster has a bus for:" + + "\n ".join( + ["{key}: {value!r}".format(**axis) for axis in group] + ) + + "\nbut no matching buses in its corresponding " + + "partial network." + ) + if pnb.empty: + continue + + # Exclude DC links from the disaggregation because it does not + # make sense to disaggregated them uniformly. + # A new power flow calculation in the high resolution would + # be required. + if pnb.carrier.iloc[0] == "DC": + continue + + if not ( + pnb.loc[:, extendable_flag].all() + or not pnb.loc[:, extendable_flag].any() + ): + raise NotImplementedError( + "The `'p_nom_extendable'` flag for buses in the" + + " partial network with:" + + "\n ".join( + [ + "{key}: {value!r}".format(**axis) + for axis in group + ] + ) + + "\ndoesn't have the same value." + + "\nThis is not supported." + ) + else: + assert ( + pnb.loc[:, extendable_flag] + == clb.iloc[0].at[extendable_flag] + ).all(), ( + "The `'p_nom_extendable'` flag for the current" + " cluster's bus does not have the same value" + " it has on the buses of it's partial network." + ) + + if clb.iloc[0].at[extendable_flag]: # That means, `p_nom` got computed via optimization and we # have to distribute it into the subnetwork first. - pnb_p_nom_max = pnb.loc[:, 'p_nom_max'] - p_nom_max_global = pnb_p_nom_max.sum(axis='index') - pnb.loc[:, 'p_nom_opt'] = ( - clb.iloc[0].at['p_nom_opt'] * - pnb_p_nom_max / - p_nom_max_global) - getattr(self.original_network, - bustype).loc[ - pnb.index, - 'p_nom_opt'] = pnb.loc[:, 'p_nom_opt'] - pnb.loc[:, 'p_nom'] = pnb.loc[:, 'p_nom_opt'] + pnb_p_nom_max = pnb.loc[:, maximal_capacity] + + # If upper limit is infinite, replace it by a very large + # number to avoid NaN values in the calculation + pnb_p_nom_max.replace(float("inf"), 10000000, inplace=True) + + p_nom_max_global = pnb_p_nom_max.sum(axis="index") + + pnb.loc[:, optimal_capacity] = ( + clb.iloc[0].at[optimal_capacity] + * pnb_p_nom_max + / p_nom_max_global + ) + getattr(self.original_network, bustype).loc[ + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + pnb.loc[:, nominal_capacity] = pnb.loc[:, optimal_capacity] else: # That means 'p_nom_opt' didn't get computed and is # potentially not present in the dataframe. But we want to # always use 'p_nom_opt' in the remaining code, so save a # view of the computed 'p_nom' values under 'p_nom_opt'. - pnb.loc[:, 'p_nom_opt'] = pnb.loc[:, 'p_nom'] + pnb.loc[:, optimal_capacity] = pnb.loc[:, nominal_capacity] # This probably shouldn't be here, but rather in # `transfer_results`, but it's easier to do it this way right # now. getattr(self.original_network, bustype).loc[ - pnb.index, - 'p_nom_opt'] = pnb.loc[:, 'p_nom_opt'] - timed = lambda key, series=set(s - for s in cl_t - if not cl_t[s].empty - if not pn_t[s].columns.intersection(pnb.index).empty - ): key in series - - for s in bustypes[bustype]['series']: + pnb.index, optimal_capacity + ] = pnb.loc[:, optimal_capacity] + timed = lambda key, series={ # noqa: 731 + s + for s in cl_t + if not cl_t[s].empty + if not pn_t[s].columns.intersection(pnb.index).empty + }: (key in series) + + for s in bustypes[bustype]["series"]: if s in self.skip: continue + filtered = pnb.loc[filters.get(s, slice(None))] - clt = cl_t[s].loc[:, next(clb.itertuples()).Index] - weight = reduce(multiply, - (filtered.loc[:, key] - if not timed(key) - else pn_t[key].loc[:, filtered.index] - for key in weights[s]), - 1) - loc = ((slice(None),) - if any(timed(w) for w in weights[s]) - else ()) - ws = weight.sum(axis=len(loc)) - for bus_id in filtered.index: - values = clt * weight.loc[loc + (bus_id,)] / ws - pn_t[s].insert(len(pn_t[s].columns), bus_id, values) + if filtered.empty: + continue + + clt = cl_t[s].loc[:, clb.index[0]] + weight = reduce( + multiply, + ( + filtered.loc[:, key] + if not timed(key) + else pn_t[key].loc[:, filtered.index] + for key in weights[s] + ), + 1, + ) + loc = ( + (slice(None),) + if any(timed(w) for w in weights[s]) + else () + ) + ws = weight.sum(axis=len(loc)) + new_columns = pd.DataFrame( + { + bus_id: clt * weight.loc[loc + (bus_id,)] / ws + for bus_id in filtered.index + } + ) + delta = abs((new_columns.sum(axis=1) - clt).sum()) + epsilon = 1e-5 + + assert delta < epsilon, ( + "Sum of disaggregated time series does not match" + f" aggregated timeseries: {delta=} > {epsilon=}." + ) + pn_t[s].loc[:, new_columns.columns] = new_columns def transfer_results(self, *args, **kwargs): - kwargs['bustypes'] = ['generators', 'storage_units'] - kwargs['series'] = {'generators': {'p'}, - 'storage_units': {'p', 'state_of_charge'}} + kwargs["bustypes"] = ["generators", "links", "storage_units", "stores"] + + # Only disaggregate reactive power (q) if a pf_post_lopf was performed + # and there is data in resulting q time series + if self.original_network.generators_t.q.empty: + kwargs["series"] = { + "generators": {"p"}, + "links": {"p0", "p1"}, + "storage_units": {"p", "state_of_charge"}, + "stores": {"e", "p"}, + } + else: + kwargs["series"] = { + "generators": {"p", "q"}, + "links": {"p0", "p1"}, + "storage_units": {"p", "q", "state_of_charge"}, + "stores": {"e", "p"}, + } return super().transfer_results(*args, **kwargs) @@ -519,18 +793,23 @@ def swap_series(s): def filter_internal_connector(conn, is_bus_in_cluster): - return conn[conn.bus0.apply(is_bus_in_cluster) - & conn.bus1.apply(is_bus_in_cluster)] + return conn[ + conn.bus0.apply(is_bus_in_cluster) | conn.bus1.apply(is_bus_in_cluster) + ] def filter_left_external_connector(conn, is_bus_in_cluster): - return conn[~ conn.loc[:, 'bus0'].apply(is_bus_in_cluster) - & conn.loc[:, 'bus1'].apply(is_bus_in_cluster)] + return conn[ + ~conn.loc[:, "bus0"].apply(is_bus_in_cluster) + & conn.loc[:, "bus1"].apply(is_bus_in_cluster) + ] def filter_right_external_connector(conn, is_bus_in_cluster): - return conn[conn.bus0.apply(is_bus_in_cluster) - & ~conn.bus1.apply(is_bus_in_cluster)] + return conn[ + conn.bus0.apply(is_bus_in_cluster) + & ~conn.bus1.apply(is_bus_in_cluster) + ] def filter_buses(bus, buses): @@ -544,37 +823,44 @@ def filter_on_buses(connecitve, buses): def update_constraints(network, externals): pass + def run_disaggregation(self): - if self.clustering: - disagg = self.args.get('disaggregation') - skip = () if self.args['pf_post_lopf']['active'] else ('q',) + log.debug("Running disaggregation.") + if self.args["network_clustering"]["active"]: + disagg = self.args.get("spatial_disaggregation") + skip = () if self.args["pf_post_lopf"]["active"] else ("q",) t = time.time() if disagg: - if disagg == 'mini': + if disagg == "mini": disaggregation = MiniSolverDisaggregation( self.disaggregated_network, self.network, - self.clustering, - skip=skip) - elif disagg == 'uniform': - disaggregation = UniformDisaggregation(self.disaggregated_network, - self.network, - self.clustering, - skip=skip) + self.busmap, + skip=skip, + ) + elif disagg == "uniform": + disaggregation = UniformDisaggregation( + original_network=self.disaggregated_network, + clustered_network=self.network, + busmap=pd.Series(self.busmap["busmap"]), + skip=skip, + ) else: - raise Exception('Invalid disaggregation command: ' + disagg) + raise Exception("Invalid disaggregation command: " + disagg) - disaggregation.execute(self.scenario, solver=self.args['solver']) + disaggregation.execute(self.scenario, solver=self.args["solver"]) # temporal bug fix for solar generator which ar during night time # nan instead of 0 self.disaggregated_network.generators_t.p.fillna(0, inplace=True) self.disaggregated_network.generators_t.q.fillna(0, inplace=True) - self.disaggregated_network.results = self.network.results - print("Time for overall desaggregation [min]: {:.2}" - .format((time.time() - t) / 60)) + log.info( + "Time for overall desaggregation [min]: {:.2}".format( + (time.time() - t) / 60 + ) + ) - if self.args['csv_export'] != False: - path = self.args['csv_export'] + '/disaggregated_network' - self.disaggregated_network.export_to_csv_folder(path) \ No newline at end of file + if self.args["csv_export"]: + path = self.args["csv_export"] + "/disaggregated_network" + self.disaggregated_network.export_to_csv_folder(path) diff --git a/etrago/cluster/electrical.py b/etrago/cluster/electrical.py new file mode 100755 index 000000000..6660031b9 --- /dev/null +++ b/etrago/cluster/electrical.py @@ -0,0 +1,1077 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2023 Flensburg University of Applied Sciences, +# Europa-Universität Flensburg, +# Centre for Sustainable Energy Systems, +# DLR-Institute for Networked Energy Systems + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# File description for read-the-docs +""" electrical.py defines the methods to cluster power grid networks +spatially for applications within the tool eTraGo.""" + +import os + +if "READTHEDOCS" not in os.environ: + import logging + + from pypsa import Network + from pypsa.networkclustering import ( + aggregatebuses, + aggregategenerators, + aggregateoneport, + get_clustering_from_busmap, + ) + from six import iteritems + import numpy as np + import pandas as pd + import pypsa.io as io + + from etrago.cluster.spatial import ( + busmap_ehv_clustering, + group_links, + kmean_clustering, + kmedoids_dijkstra_clustering, + strategies_generators, + strategies_one_ports, + ) + from etrago.tools.utilities import set_control_strategies + + logger = logging.getLogger(__name__) + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) + + +# TODO: Workaround because of agg + + +def _leading(busmap, df): + """ + Returns a function that computes the leading bus_id for a given mapped + list of buses. + + Parameters + ----------- + busmap : dict + A dictionary that maps old bus_ids to new bus_ids. + df : pandas.DataFrame + A DataFrame containing network.buses data. Each row corresponds + to a unique bus + + Returns + -------- + leader : function + A function that returns the leading bus_id for the argument `x`. + """ + + def leader(x): + ix = busmap[x.index[0]] + return df.loc[ix, x.name] + + return leader + + +def adjust_no_electric_network(etrago, busmap, cluster_met): + """ + Adjusts the non-electric network based on the electrical network + (esp. eHV network), adds the gas buses to the busmap, and creates the + new buses for the non-electric network. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class. + busmap : dict + A dictionary that maps old bus_ids to new bus_ids. + cluster_met : str + A string indicating the clustering method to be used. + + Returns + ------- + network : pypsa.Network + Container for all network components of the clustered network. + busmap : dict + Maps old bus_ids to new bus_ids including all sectors. + + """ + + def find_de_closest(network, bus_ne): + ac_ehv = network.buses[ + (network.buses.v_nom > 110) + & (network.buses.carrier == "AC") + & (network.buses.country == "DE") + ] + + bus_ne_x = network.buses.loc[bus_ne, "x"] + bus_ne_y = network.buses.loc[bus_ne, "y"] + + ac_ehv["dist"] = ac_ehv.apply( + lambda x: ((x.x - bus_ne_x) ** 2 + (x.y - bus_ne_y) ** 2) + ** (1 / 2), + axis=1, + ) + + new_ehv_bus = ac_ehv.dist.idxmin() + + return new_ehv_bus + + network = etrago.network + # network2 is supposed to contain all the not electrical or gas buses + # and links + network2 = network.copy(with_time=False) + network2.buses = network2.buses[ + (network2.buses["carrier"] != "AC") + & (network2.buses["carrier"] != "CH4") + & (network2.buses["carrier"] != "H2_grid") + & (network2.buses["carrier"] != "rural_heat_store") + & (network2.buses["carrier"] != "central_heat") + & (network2.buses["carrier"] != "central_heat_store") + ] + map_carrier = { + "H2_saltcavern": "power_to_H2", + "dsm": "dsm", + "Li ion": "BEV charger", + "Li_ion": "BEV_charger", + "rural_heat": "rural_heat_pump", + } + + no_elec_conex = [] + # busmap2 defines how the no electrical buses directly connected to AC + # are going to be clustered + busmap2 = {} + + # Map crossborder AC buses in case that they were not part of the k-mean + # clustering + if (not etrago.args["network_clustering"]["cluster_foreign_AC"]) & ( + cluster_met in ["kmeans", "kmedoids-dijkstra"] + ): + buses_orig = network.buses.copy() + ac_buses_out = buses_orig[ + (buses_orig["country"] != "DE") & (buses_orig["carrier"] == "AC") + ].dropna(subset=["country", "carrier"]) + + for bus_out in ac_buses_out.index: + busmap2[bus_out] = bus_out + + foreign_hv = network.buses[ + (network.buses.country != "DE") + & (network.buses.carrier == "AC") + & (network.buses.v_nom > 110) + ].index + busmap3 = pd.DataFrame(columns=["elec_bus", "carrier", "cluster"]) + for bus_ne in network2.buses.index: + carry = network2.buses.loc[bus_ne, "carrier"] + busmap3.at[bus_ne, "carrier"] = carry + try: + df = network2.links[ + (network2.links["bus1"] == bus_ne) + & (network2.links["carrier"] == map_carrier[carry]) + ].copy() + df["elec"] = df["bus0"].isin(busmap.keys()) + bus_hv = df[df["elec"]]["bus0"][0] + bus_ehv = busmap[bus_hv] + if bus_ehv not in foreign_hv: + busmap3.at[bus_ne, "elec_bus"] = bus_ehv + else: + busmap3.at[bus_ne, "elec_bus"] = find_de_closest( + network, bus_ne + ) + except: + no_elec_conex.append(bus_ne) + busmap3.at[bus_ne, "elec_bus"] = bus_ne + + for a, df in busmap3.groupby(["elec_bus", "carrier"]): + busmap3.loc[df.index, "cluster"] = df.index[0] + + busmap3 = busmap3["cluster"].to_dict() + + if no_elec_conex: + logger.info( + f"""There are {len(no_elec_conex)} buses that have no direct + connection to the electric network: {no_elec_conex}""" + ) + + # rural_heat_store buses are clustered based on the AC buses connected to + # their corresponding rural_heat buses. Results saved in busmap4 + links_rural_store = etrago.network.links[ + etrago.network.links.carrier == "rural_heat_store_charger" + ].copy() + + busmap4 = {} + links_rural_store["to_ac"] = links_rural_store["bus0"].map(busmap3) + for rural_heat_bus, df in links_rural_store.groupby("to_ac"): + cluster_bus = df.bus1.iat[0] + for rural_store_bus in df.bus1: + busmap4[rural_store_bus] = cluster_bus + + # Add the gas buses to the busmap and map them to themself + for gas_bus in network.buses[ + (network.buses["carrier"] == "H2_grid") + | (network.buses["carrier"] == "CH4") + | (network.buses["carrier"] == "central_heat") + | (network.buses["carrier"] == "central_heat_store") + ].index: + busmap2[gas_bus] = gas_bus + + busmap = {**busmap, **busmap2, **busmap3, **busmap4} + + return network, busmap + + +def cluster_on_extra_high_voltage(etrago, busmap, with_time=True): + """ + Main function of the EHV-Clustering approach. Creates a new clustered + pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the + same network. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap : dict + Maps old bus_ids to new bus_ids. + with_time : bool + If true time-varying data will also be aggregated. + + Returns + ------- + network : pypsa.Network + Container for all network components of the clustered network. + busmap : dict + Maps old bus_ids to new bus_ids including all sectors. + """ + + network_c = Network() + + network, busmap = adjust_no_electric_network( + etrago, busmap, cluster_met="ehv" + ) + + buses = aggregatebuses( + network, + busmap, + { + "x": _leading(busmap, network.buses), + "y": _leading(busmap, network.buses), + }, + ) + + # keep attached lines + lines = network.lines.copy() + mask = lines.bus0.isin(buses.index) + lines = lines.loc[mask, :] + + # keep attached transformer + transformers = network.transformers.copy() + mask = transformers.bus0.isin(buses.index) + transformers = transformers.loc[mask, :] + + io.import_components_from_dataframe(network_c, buses, "Bus") + io.import_components_from_dataframe(network_c, lines, "Line") + io.import_components_from_dataframe(network_c, transformers, "Transformer") + + # Dealing with links + links = network.links.copy() + dc_links = links[links["carrier"] == "DC"] + # Discard links connected to buses under 220 kV + dc_links = dc_links[dc_links.bus0.isin(buses.index)] + links = links[links["carrier"] != "DC"] + + new_links = ( + links.assign(bus0=links.bus0.map(busmap), bus1=links.bus1.map(busmap)) + .dropna(subset=["bus0", "bus1"]) + .loc[lambda df: df.bus0 != df.bus1] + ) + + new_links = pd.concat([new_links, dc_links]) + new_links["topo"] = np.nan + io.import_components_from_dataframe(network_c, new_links, "Link") + + if with_time: + network_c.snapshots = network.snapshots + network_c.set_snapshots(network.snapshots) + network_c.snapshot_weightings = network.snapshot_weightings.copy() + + for attr, df in network.lines_t.items(): + mask = df.columns[df.columns.isin(lines.index)] + df = df.loc[:, mask] + if not df.empty: + io.import_series_from_dataframe(network_c, df, "Line", attr) + + for attr, df in network.links_t.items(): + mask = df.columns[df.columns.isin(links.index)] + df = df.loc[:, mask] + if not df.empty: + io.import_series_from_dataframe(network_c, df, "Link", attr) + + # dealing with generators + network.generators["weight"] = 1 + + new_df, new_pnl = aggregategenerators( + network, busmap, with_time, custom_strategies=strategies_generators() + ) + io.import_components_from_dataframe(network_c, new_df, "Generator") + for attr, df in iteritems(new_pnl): + io.import_series_from_dataframe(network_c, df, "Generator", attr) + + # dealing with all other components + aggregate_one_ports = network.one_port_components.copy() + aggregate_one_ports.discard("Generator") + + for one_port in aggregate_one_ports: + one_port_strategies = strategies_one_ports() + new_df, new_pnl = aggregateoneport( + network, + busmap, + component=one_port, + with_time=with_time, + custom_strategies=one_port_strategies.get(one_port, {}), + ) + io.import_components_from_dataframe(network_c, new_df, one_port) + for attr, df in iteritems(new_pnl): + io.import_series_from_dataframe(network_c, df, one_port, attr) + + network_c.links, network_c.links_t = group_links(network_c) + network_c.determine_network_topology() + + return (network_c, busmap) + + +def delete_ehv_buses_no_lines(network): + """ + When there are AC buses totally isolated, this function deletes them in + order to make possible the creation of busmaps based on electrical + connections and other purposes. Additionally, it throws a warning to + inform the user in case that any correction should be done. + + Parameters + ---------- + network : pypsa.network + + Returns + ------- + None + """ + lines = network.lines + buses_ac = network.buses[ + (network.buses.carrier == "AC") & (network.buses.country == "DE") + ] + buses_in_lines = set(list(lines.bus0) + list(lines.bus1)) + buses_ac["with_line"] = buses_ac.index.isin(buses_in_lines) + buses_ac["with_load"] = buses_ac.index.isin(network.loads.bus) + buses_in_links = list(network.links.bus0) + list(network.links.bus1) + buses_ac["with_link"] = buses_ac.index.isin(buses_in_links) + buses_ac["with_gen"] = buses_ac.index.isin(network.generators.bus) + + delete_buses = buses_ac[ + (~buses_ac["with_line"]) + & (~buses_ac["with_load"]) + & (~buses_ac["with_link"]) + & (~buses_ac["with_gen"]) + ].index + + if len(delete_buses): + logger.info( + f""" + + ----------------------- WARNING --------------------------- + THE FOLLOWING BUSES WERE DELETED BECAUSE THEY WERE ISOLATED: + {delete_buses.to_list()}. + IT IS POTENTIALLY A SIGN OF A PROBLEM IN THE DATASET + ----------------------- WARNING --------------------------- + + """ + ) + + network.mremove("Bus", delete_buses) + + delete_trafo = network.transformers[ + (network.transformers.bus0.isin(delete_buses)) + | (network.transformers.bus1.isin(delete_buses)) + ].index + + network.mremove("Transformer", delete_trafo) + + delete_sto_units = network.storage_units[ + network.storage_units.bus.isin(delete_buses) + ].index + + network.mremove("StorageUnit", delete_sto_units) + + return + + +def ehv_clustering(self): + """ + Cluster the network based on Extra High Voltage (EHV) grid. + + If 'active' in the `network_clustering_ehv` argument is True, the function + clusters the network based on the EHV grid. + + Parameters + ---------- + self: Etrago object pointer + The object pointer for an Etrago object. + + Returns + ------- + None + """ + + if self.args["network_clustering_ehv"]["active"]: + logger.info("Start ehv clustering") + + delete_ehv_buses_no_lines(self.network) + + busmap = busmap_ehv_clustering(self) + + self.network, busmap = cluster_on_extra_high_voltage( + self, busmap, with_time=True + ) + + self.update_busmap(busmap) + self.buses_by_country() + + logger.info("Network clustered to EHV-grid") + + +def select_elec_network(etrago): + """ + Selects the electric network based on the clustering settings specified + in the Etrago object. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + Tuple containing: + elec_network : pypsa.Network + Contains the electric network + n_clusters : int + number of clusters used in the clustering process. + """ + elec_network = etrago.network.copy() + settings = etrago.args["network_clustering"] + if settings["cluster_foreign_AC"]: + elec_network.buses = elec_network.buses[ + elec_network.buses.carrier == "AC" + ] + elec_network.links = elec_network.links[ + (elec_network.links.carrier == "AC") + | (elec_network.links.carrier == "DC") + ] + n_clusters = settings["n_clusters_AC"] + else: + AC_filter = elec_network.buses.carrier.values == "AC" + + foreign_buses = elec_network.buses[ + (elec_network.buses.country != "DE") + & (elec_network.buses.carrier == "AC") + ] + + num_neighboring_country = len( + foreign_buses[foreign_buses.index.isin(elec_network.loads.bus)] + ) + + elec_network.buses = elec_network.buses[ + AC_filter & (elec_network.buses.country.values == "DE") + ] + n_clusters = settings["n_clusters_AC"] - num_neighboring_country + + # Dealing with generators + elec_network.generators = elec_network.generators[ + elec_network.generators.bus.isin(elec_network.buses.index) + ] + + for attr in elec_network.generators_t: + elec_network.generators_t[attr] = elec_network.generators_t[attr].loc[ + :, + elec_network.generators_t[attr].columns.isin( + elec_network.generators.index + ), + ] + + # Dealing with loads + elec_network.loads = elec_network.loads[ + elec_network.loads.bus.isin(elec_network.buses.index) + ] + + for attr in elec_network.loads_t: + elec_network.loads_t[attr] = elec_network.loads_t[attr].loc[ + :, + elec_network.loads_t[attr].columns.isin(elec_network.loads.index), + ] + + # Dealing with storage_units + elec_network.storage_units = elec_network.storage_units[ + elec_network.storage_units.bus.isin(elec_network.buses.index) + ] + + for attr in elec_network.storage_units_t: + elec_network.storage_units_t[attr] = elec_network.storage_units_t[ + attr + ].loc[ + :, + elec_network.storage_units_t[attr].columns.isin( + elec_network.storage_units.index + ), + ] + + # Dealing with stores + elec_network.stores = elec_network.stores[ + elec_network.stores.bus.isin(elec_network.buses.index) + ] + + for attr in elec_network.stores_t: + elec_network.stores_t[attr] = elec_network.stores_t[attr].loc[ + :, + elec_network.stores_t[attr].columns.isin( + elec_network.stores.index + ), + ] + + return elec_network, n_clusters + + +def unify_foreign_buses(etrago): + """ + Unifies foreign AC buses into clusters using the k-medoids algorithm with + Dijkstra distance as a similarity measure. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + busmap_foreign : pd.Series + A pandas series that maps the foreign buses to their respective + clusters. The series index is the bus ID and the values are the + corresponding cluster medoid IDs. + """ + network = etrago.network.copy(with_time=False) + + foreign_buses = network.buses[ + (network.buses.country != "DE") & (network.buses.carrier == "AC") + ] + foreign_buses_load = foreign_buses[ + (foreign_buses.index.isin(network.loads.bus)) + & (foreign_buses.carrier == "AC") + ] + + lines_col = network.lines.columns + # The Dijkstra clustering works using the shortest electrical path between + # buses. In some cases, a bus has just DC connections, which are considered + # links. Therefore it is necessary to include temporarily the DC links + # into the lines table. + dc = network.links[network.links.carrier == "DC"] + str1 = "DC_" + dc.index = f"{str1}" + dc.index + lines_plus_dc = lines_plus_dc = pd.concat([network.lines, dc]) + lines_plus_dc = lines_plus_dc[lines_col] + lines_plus_dc["carrier"] = "AC" + + busmap_foreign = pd.Series(dtype=str) + + for country, df in foreign_buses.groupby(by="country"): + weight = df.apply( + lambda x: 1 if x.name in foreign_buses_load.index else 0, + axis=1, + ) + n_clusters = (foreign_buses_load.country == country).sum() + + if n_clusters < len(df): + ( + busmap_country, + medoid_idx_country, + ) = kmedoids_dijkstra_clustering( + etrago, df, lines_plus_dc, weight, n_clusters + ) + medoid_idx_country.index = medoid_idx_country.index.astype(str) + busmap_country = busmap_country.map(medoid_idx_country) + busmap_foreign = pd.concat([busmap_foreign, busmap_country]) + else: + for bus in df.index: + busmap_foreign[bus] = bus + + busmap_foreign.name = "foreign" + busmap_foreign.index.name = "bus" + + return busmap_foreign + + +def preprocessing(etrago): + """ + Preprocesses an Etrago object to prepare it for network clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + network_elec : pypsa.Network + Container for all network components of the electrical network. + weight : pandas.Series + A pandas.Series with the bus weighting data. + n_clusters : int + The number of clusters to use for network clustering. + busmap_foreign : pandas.Series + The Series object with the foreign bus mapping data. + """ + + network = etrago.network + settings = etrago.args["network_clustering"] + + # problem our lines have no v_nom. this is implicitly defined by the + # connected buses: + network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) + + # adjust the electrical parameters of the lines which are not 380. + lines_v_nom_b = network.lines.v_nom != 380 + + voltage_factor = (network.lines.loc[lines_v_nom_b, "v_nom"] / 380.0) ** 2 + + network.lines.loc[lines_v_nom_b, "x"] *= 1 / voltage_factor + + network.lines.loc[lines_v_nom_b, "r"] *= 1 / voltage_factor + + network.lines.loc[lines_v_nom_b, "b"] *= voltage_factor + + network.lines.loc[lines_v_nom_b, "g"] *= voltage_factor + + network.lines.loc[lines_v_nom_b, "v_nom"] = 380.0 + + trafo_index = network.transformers.index + transformer_voltages = pd.concat( + [ + network.transformers.bus0.map(network.buses.v_nom), + network.transformers.bus1.map(network.buses.v_nom), + ], + axis=1, + ) + + network.import_components_from_dataframe( + network.transformers.loc[ + :, + [ + "bus0", + "bus1", + "x", + "s_nom", + "capital_cost", + "sub_network", + "s_max_pu", + "lifetime", + ], + ] + .assign( + x=network.transformers.x + * (380.0 / transformer_voltages.max(axis=1)) ** 2, + length=1, + ) + .set_index("T" + trafo_index), + "Line", + ) + network.lines.carrier = "AC" + + network.transformers.drop(trafo_index, inplace=True) + + for attr in network.transformers_t: + network.transformers_t[attr] = network.transformers_t[attr].reindex( + columns=[] + ) + + network.buses["v_nom"].loc[network.buses.carrier.values == "AC"] = 380.0 + + if network.buses.country.isna().any(): + logger.info( + f""" + + ----------------------- WARNING --------------------------- + THE FOLLOWING BUSES HAVE NOT COUNTRY DATA: + {network.buses[network.buses.country.isna()].index.to_list()}. + THEY WILL BE ASSIGNED TO GERMANY, BUT IT IS POTENTIALLY A + SIGN OF A PROBLEM IN THE DATASET. + ----------------------- WARNING --------------------------- + + """ + ) + network.buses.country.loc[network.buses.country.isna()] = "DE" + + if settings["k_elec_busmap"] is False: + busmap_foreign = unify_foreign_buses(etrago) + else: + busmap_foreign = pd.Series(name="foreign", dtype=str) + + network_elec, n_clusters = select_elec_network(etrago) + + if settings["method"] == "kmedoids-dijkstra": + lines_col = network_elec.lines.columns + + # The Dijkstra clustering works using the shortest electrical path + # between buses. In some cases, a bus has just DC connections, which + # are considered links. Therefore it is necessary to include + # temporarily the DC links into the lines table. + dc = network.links[network.links.carrier == "DC"] + str1 = "DC_" + dc.index = f"{str1}" + dc.index + lines_plus_dc = lines_plus_dc = pd.concat([network_elec.lines, dc]) + lines_plus_dc = lines_plus_dc[lines_col] + network_elec.lines = lines_plus_dc.copy() + network_elec.lines["carrier"] = "AC" + + # State whether to create a bus weighting and save it, create or not save + # it, or use a bus weighting from a csv file + if settings["bus_weight_tocsv"] is not None: + weight = weighting_for_scenario( + network=network_elec, save=settings["bus_weight_tocsv"] + ) + elif settings["bus_weight_fromcsv"] is not None: + weight = pd.read_csv( + settings["bus_weight_fromcsv"], index_col="Bus", squeeze=True + ) + weight.index = weight.index.astype(str) + else: + weight = weighting_for_scenario(network=network_elec, save=False) + + return network_elec, weight, n_clusters, busmap_foreign + + +def postprocessing(etrago, busmap, busmap_foreign, medoid_idx=None): + """ + Postprocessing function for network clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap : pandas.Series + mapping between buses and clusters + busmap_foreign : pandas.DataFrame + mapping between foreign buses and clusters + medoid_idx : pandas.DataFrame + mapping between cluster indices and medoids + + Returns + ------- + Tuple containing: + clustering : pypsa.network + Network object containing the clustered network + busmap : pandas.Series + Updated mapping between buses and clusters + """ + settings = etrago.args["network_clustering"] + method = settings["method"] + num_clusters = settings["n_clusters_AC"] + + if not settings["k_elec_busmap"]: + busmap.name = "cluster" + busmap_elec = pd.DataFrame(busmap.copy(), dtype="string") + busmap_elec.index.name = "bus" + busmap_elec = busmap_elec.join(busmap_foreign, how="outer") + busmap_elec = busmap_elec.join( + pd.Series( + medoid_idx.index.values.astype(str), + medoid_idx, + name="medoid_idx", + ) + ) + + busmap_elec.to_csv( + f"{method}_elecgrid_busmap_{num_clusters}_result.csv" + ) + + else: + logger.info("Import Busmap for spatial clustering") + busmap_foreign = pd.read_csv( + settings["k_elec_busmap"], + dtype={"bus": str, "foreign": str}, + usecols=["bus", "foreign"], + index_col="bus", + ).dropna()["foreign"] + busmap = pd.read_csv( + settings["k_elec_busmap"], + usecols=["bus", "cluster"], + dtype={"bus": str, "cluster": str}, + index_col="bus", + ).dropna()["cluster"] + medoid_idx = pd.read_csv( + settings["k_elec_busmap"], + usecols=["bus", "medoid_idx"], + index_col="bus", + ).dropna()["medoid_idx"] + + medoid_idx = pd.Series( + medoid_idx.index.values.astype(str), medoid_idx.values.astype(int) + ) + + network, busmap = adjust_no_electric_network( + etrago, busmap, cluster_met=method + ) + + # merge busmap for foreign buses with the German buses + if not settings["cluster_foreign_AC"]: + for bus in busmap_foreign.index: + busmap[bus] = busmap_foreign[bus] + if bus == busmap_foreign[bus]: + medoid_idx[bus] = bus + medoid_idx.index = medoid_idx.index.astype("int") + + network.generators["weight"] = network.generators["p_nom"] + aggregate_one_ports = network.one_port_components.copy() + aggregate_one_ports.discard("Generator") + + clustering = get_clustering_from_busmap( + network, + busmap, + aggregate_generators_weighted=True, + one_port_strategies=strategies_one_ports(), + generator_strategies=strategies_generators(), + aggregate_one_ports=aggregate_one_ports, + line_length_factor=settings["line_length_factor"], + ) + + if method == "kmedoids-dijkstra": + for i in clustering.network.buses[ + clustering.network.buses.carrier == "AC" + ].index: + cluster = int(i) + if cluster in medoid_idx.index: + medoid = str(medoid_idx.loc[cluster]) + + clustering.network.buses.at[i, "x"] = etrago.network.buses[ + "x" + ].loc[medoid] + clustering.network.buses.at[i, "y"] = etrago.network.buses[ + "y" + ].loc[medoid] + + clustering.network.links, clustering.network.links_t = group_links( + clustering.network + ) + + return (clustering, busmap) + + +def weighting_for_scenario(network, save=None): + """ + define bus weighting based on generation, load and storage + + Parameters + ---------- + network : pypsa.network + Each bus in this network will receive a weight based on the + generator, load and storages also available in the network object. + save : str or bool, optional + If defined, the result of the weighting will be saved in the path + supplied here. The default is None. + + Returns + ------- + weight : pandas.series + Serie with the weight assigned to each bus to perform a k-mean + clustering. + + """ + + def calc_availability_factor(gen): + """ + Calculate the availability factor for a given generator. + + Parameters + ----------- + gen : pandas.DataFrame + A `pypsa.Network.generators` DataFrame. + + Returns + ------- + cf : float + The availability factor of the generator. + + Notes + ----- + Availability factor is defined as the ratio of the average power + output of the generator over the maximum power output capacity of + the generator. If the generator is time-dependent, its average power + output is calculated using the `network.generators_t` DataFrame. + Otherwise, its availability factor is obtained from the + `fixed_capacity_fac` dictionary, which contains pre-defined factors + for fixed capacity generators. If the generator's availability factor + cannot be found in the dictionary, it is assumed to be 1. + + """ + + if gen["carrier"] in time_dependent: + cf = network.generators_t["p_max_pu"].loc[:, gen.name].mean() + else: + try: + cf = fixed_capacity_fac[gen["carrier"]] + except KeyError: + cf = 1 + return cf + + time_dependent = [ + "solar_rooftop", + "solar", + "wind_onshore", + "wind_offshore", + ] + fixed_capacity_fac = { + # A value of 1 is given to power plants where its availability + # does not depend on the weather + "industrial_gas_CHP": 1, + "industrial_biomass_CHP": 1, + "biomass": 1, + "central_biomass_CHP": 1, + "central_gas_CHP": 1, + "OCGT": 1, + "other_non_renewable": 1, + "run_of_river": 0.50, + "reservoir": 1, + "gas": 1, + "oil": 1, + "others": 1, + "coal": 1, + "lignite": 1, + "nuclear": 1, + } + + gen = network.generators[network.generators.carrier != "load shedding"][ + ["bus", "carrier", "p_nom"] + ].copy() + gen["cf"] = gen.apply(calc_availability_factor, axis=1) + gen["weight"] = gen["p_nom"] * gen["cf"] + + gen = ( + gen.groupby("bus") + .weight.sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + storage = ( + network.storage_units.groupby("bus") + .p_nom.sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + load = ( + network.loads_t.p_set.mean() + .groupby(network.loads.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + w = gen + storage + load + weight = ((w * (100000.0 / w.max())).astype(int)).reindex( + network.buses.index, fill_value=1 + ) + + weight[weight == 0] = 1 + + if save: + weight.to_csv(save) + + return weight + + +def run_spatial_clustering(self): + """ + Main method for running spatial clustering on the electrical network. + Allows for clustering based on k-means and k-medoids dijkstra. + + Parameters + ----------- + self + The object pointer for an Etrago object containing all relevant + parameters and data + + Returns + ------- + None + """ + if self.args["network_clustering"]["active"]: + if self.args["spatial_disaggregation"] is not None: + self.disaggregated_network = self.network.copy() + else: + self.disaggregated_network = self.network.copy(with_time=False) + + elec_network, weight, n_clusters, busmap_foreign = preprocessing(self) + + if self.args["network_clustering"]["method"] == "kmeans": + if not self.args["network_clustering"]["k_elec_busmap"]: + logger.info("Start k-means Clustering") + + busmap = kmean_clustering( + self, elec_network, weight, n_clusters + ) + medoid_idx = pd.Series(dtype=str) + else: + busmap = pd.Series(dtype=str) + medoid_idx = pd.Series(dtype=str) + + elif self.args["network_clustering"]["method"] == "kmedoids-dijkstra": + if not self.args["network_clustering"]["k_elec_busmap"]: + logger.info("Start k-medoids Dijkstra Clustering") + + busmap, medoid_idx = kmedoids_dijkstra_clustering( + self, + elec_network.buses, + elec_network.lines, + weight, + n_clusters, + ) + + else: + busmap = pd.Series(dtype=str) + medoid_idx = pd.Series(dtype=str) + + clustering, busmap = postprocessing( + self, busmap, busmap_foreign, medoid_idx + ) + self.update_busmap(busmap) + + self.network = clustering.network + + self.buses_by_country() + + self.geolocation_buses() + + # The control parameter is overwritten in pypsa's clustering. + # The function network.determine_network_topology is called, + # which sets slack bus(es). + set_control_strategies(self.network) + + logger.info( + "Network clustered to {} buses with ".format( + self.args["network_clustering"]["n_clusters_AC"] + ) + + self.args["network_clustering"]["method"] + ) diff --git a/etrago/cluster/gas.py b/etrago/cluster/gas.py new file mode 100644 index 000000000..cca92bbf6 --- /dev/null +++ b/etrago/cluster/gas.py @@ -0,0 +1,1025 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2023 Flensburg University of Applied Sciences, +# Europa-Universität Flensburg, +# Centre for Sustainable Energy Systems, +# DLR-Institute for Networked Energy Systems + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# File description for read-the-docs +""" gas.py defines the methods to cluster gas grid networks +spatially for applications within the tool eTraGo.""" + +import os + +if "READTHEDOCS" not in os.environ: + import logging + + from pypsa import Network + from pypsa.networkclustering import ( + aggregatebuses, + aggregateoneport, + busmap_by_kmeans, + ) + from six import iteritems + import numpy as np + import pandas as pd + import pypsa.io as io + + from etrago.cluster.spatial import ( + group_links, + kmedoids_dijkstra_clustering, + sum_with_inf, + ) + from etrago.tools.utilities import set_control_strategies + +logger = logging.getLogger(__name__) + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) + + +def preprocessing(etrago): + """ + Preprocesses the gas network data from the given Etrago object for the + spatial clustering process of the CH4 grid. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + None + + Raises + ------ + ValueError + If `settings["n_clusters_gas"]` is less than or equal to the number of + neighboring country gas buses. + """ + + # Create network_ch4 (grid nodes in order to create the busmap basis) + network_ch4 = Network() + + buses_ch4 = etrago.network.buses + links_ch4 = etrago.network.links + io.import_components_from_dataframe(network_ch4, buses_ch4, "Bus") + io.import_components_from_dataframe(network_ch4, links_ch4, "Link") + + # Cluster ch4 buses + settings = etrago.args["network_clustering"] + + ch4_filter = network_ch4.buses["carrier"].values == "CH4" + + num_neighboring_country = ( + ch4_filter & (network_ch4.buses["country"] != "DE") + ).sum() + + network_ch4.links = network_ch4.links.loc[ + network_ch4.links["bus0"].isin(network_ch4.buses.loc[ch4_filter].index) + & network_ch4.links["bus1"].isin( + network_ch4.buses.loc[ch4_filter].index + ) + ] + + # select buses dependent on whether they should be clustered in + # (only DE or DE+foreign) + if not settings["cluster_foreign_gas"]: + network_ch4.buses = network_ch4.buses.loc[ + ch4_filter & (network_ch4.buses["country"].values == "DE") + ] + + if settings["n_clusters_gas"] <= num_neighboring_country: + msg = ( + "The number of clusters for the gas sector (" + + str(settings["n_clusters_gas"]) + + ") must be higher than the number of neighboring country " + + "gas buses (" + + str(num_neighboring_country) + + ")." + ) + raise ValueError(msg) + n_clusters = settings["n_clusters_gas"] - num_neighboring_country + else: + network_ch4.buses = network_ch4.buses.loc[ch4_filter] + n_clusters = settings["n_clusters_gas"] + + def weighting_for_scenario(ch4_buses, save=None): + """ + Calculate CH4-bus weightings dependant on the connected + CH4-loads, CH4-generators and non-transport link capacities. + Stores are not considered for the clustering. + + Parameters + ---------- + ch4_buses : pandas.DataFrame + Dataframe with CH4 etrago.network.buses to weight. + save : str or bool + Path to save weightings to as .csv + + Returns + ------- + weightings : pandas.Series + Integer weighting for each ch4_buses.index + """ + + MAX_WEIGHT = 1e5 # relevant only for foreign nodes with extra high + # CH4 generation capacity + + to_neglect = [ + "CH4", + "H2_to_CH4", + "CH4_to_H2", + "H2_feedin", + ] + + # get all non-transport and non-H2 related links for each bus + rel_links = {} + for i in ch4_buses.index: + rel_links[i] = etrago.network.links.loc[ + ( + etrago.network.links.bus0.isin([i]) + | etrago.network.links.bus1.isin([i]) + ) + & ~etrago.network.links.carrier.isin(to_neglect) + ].index + # get all generators and loads related to ch4_buses + generators_ = pd.Series( + etrago.network.generators[ + etrago.network.generators.carrier != "load shedding" + ].index, + index=etrago.network.generators[ + etrago.network.generators.carrier != "load shedding" + ].bus, + ) + buses_CH4_gen = generators_.index.intersection(rel_links.keys()) + loads_ = pd.Series( + etrago.network.loads.index, index=etrago.network.loads.bus + ) + buses_CH4_load = loads_.index.intersection(rel_links.keys()) + + # sum up all relevant entities and cast to integer + # Note: rel_links will hold the weightings for each bus afterwards + for i in rel_links: + rel_links[i] = etrago.network.links.loc[rel_links[i]].p_nom.sum() + if i in buses_CH4_gen: + rel_links[i] += etrago.network.generators.loc[ + generators_.loc[i] + ].p_nom.sum() + if i in buses_CH4_load: + rel_links[i] += ( + etrago.network.loads_t.p_set.loc[:, loads_.loc[i]] + .mean() + .sum() + ) + rel_links[i] = min(int(rel_links[i]), MAX_WEIGHT) + weightings = pd.DataFrame.from_dict(rel_links, orient="index") + + if save: + weightings.to_csv(save) + return weightings + + # State whether to create a bus weighting and save it, create or not save + # it, or use a bus weighting from a csv file + if settings["gas_weight_tocsv"] is not None: + weight_ch4 = weighting_for_scenario( + network_ch4.buses, + settings["gas_weight_tocsv"], + ) + elif settings["gas_weight_fromcsv"] is not None: + # create DataFrame with uniform weightings for all ch4_buses + weight_ch4 = pd.DataFrame([1] * len(buses_ch4), index=buses_ch4.index) + loaded_weights = pd.read_csv( + settings["gas_weight_fromcsv"], index_col=0 + ) + # load weights into previously created DataFrame + loaded_weights.index = loaded_weights.index.astype(str) + weight_ch4.loc[loaded_weights.index] = loaded_weights + else: + weight_ch4 = weighting_for_scenario(network_ch4.buses, save=False) + return network_ch4, weight_ch4.squeeze(), n_clusters + + +def kmean_clustering_gas(etrago, network_ch4, weight, n_clusters): + """ + Performs K-means clustering on the gas network data in the given + `network_ch4` pypsa.Network object. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + network_ch4 : pypsa.Network + A Network object containing the gas network data. + weight : str or None + The name of the bus weighting column to use for clustering. If None, + unweighted clustering is performed. + n_clusters : int + The number of clusters to create. + + Returns + ------- + busmap : pandas.Series + A pandas.Series object mapping each bus in the CH4 network to its + corresponding cluster ID + None + None is returned because k-means clustering makes no use of medoids + """ + settings = etrago.args["network_clustering"] + + busmap = busmap_by_kmeans( + network_ch4, + bus_weightings=weight, + n_clusters=n_clusters, + n_init=settings["n_init"], + max_iter=settings["max_iter"], + tol=settings["tol"], + random_state=settings["random_state"], + ) + + return busmap, None + + +def get_h2_clusters(etrago, busmap_ch4): + """ + Maps H2 buses to CH4 cluster IDds and creates unique H2 cluster IDs. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap_ch4 : pd.Series + A Pandas Series mapping each bus in the CH4 network to its + corresponding cluster ID. + + Returns + ------- + busmap : pd.Series + A Pandas Series mapping each bus in the combined CH4 and H2 network + to its corresponding cluster ID. + """ + # Mapping of H2 buses to new CH4 cluster IDs + busmap_h2 = pd.Series( + busmap_ch4.loc[etrago.ch4_h2_mapping.index].values, + index=etrago.ch4_h2_mapping.values, + ) + + # Create unique H2 cluster IDs + n_gas = etrago.args["network_clustering"]["n_clusters_gas"] + busmap_h2 = (busmap_h2.astype(int) + n_gas).astype(str) + + busmap_h2 = busmap_h2.squeeze() + + busmap = pd.concat([busmap_ch4, busmap_h2]) + + return busmap + + +def gas_postprocessing(etrago, busmap, medoid_idx=None): + """ + Performs the postprocessing for the gas grid clustering based on the + provided busmap + and returns the clustered network. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + busmap : pd.Series + A Pandas Series mapping each bus to its corresponding cluster ID. + medoid_idx : pd.Series + A pandas.Series object containing the medoid indices for the gas + network. + + Returns + ------- + network_gasgrid_c : pypsa.Network + A pypsa.Network containing the clustered network. + busmap : pd.Series + A Pandas Series mapping each bus to its corresponding cluster ID. + """ + settings = etrago.args["network_clustering"] + + if settings["k_gas_busmap"] is False: + if settings["method_gas"] == "kmeans": + busmap.index.name = "bus_id" + busmap.name = "cluster" + busmap.to_csv( + "kmeans_gasgrid_busmap_" + + str(settings["n_clusters_gas"]) + + "_result.csv" + ) + + else: + busmap.name = "cluster" + busmap_ind = pd.Series( + medoid_idx[busmap.values.astype(int)].values, + index=busmap.index, + dtype=pd.StringDtype(), + ) + busmap_ind.name = "medoid_idx" + + export = pd.concat([busmap, busmap_ind], axis=1) + export.index.name = "bus_id" + export.to_csv( + "kmedoids-dijkstra_gasgrid_busmap_" + + str(settings["n_clusters_gas"]) + + "_result.csv" + ) + + if "H2" in etrago.network.buses.carrier.unique(): + busmap = get_h2_clusters(etrago, busmap) + + # Add all other buses to busmap + missing_idx = list( + etrago.network.buses[ + (~etrago.network.buses.index.isin(busmap.index)) + ].index + ) + next_bus_id = highestInteger(etrago.network.buses.index) + 1 + new_gas_buses = [str(int(x) + next_bus_id) for x in busmap] + + busmap_idx = list(busmap.index) + missing_idx + busmap_values = new_gas_buses + missing_idx + busmap = pd.Series(busmap_values, index=busmap_idx) + + if etrago.args["sector_coupled_clustering"]["active"]: + for name, data in etrago.args["sector_coupled_clustering"][ + "carrier_data" + ].items(): + strategy = data["strategy"] + if strategy == "consecutive": + busmap_sector_coupling = consecutive_sector_coupling( + etrago.network, + busmap, + data["base"], + name, + ) + elif strategy == "simultaneous": + if len(data["base"]) < 2: + msg = ( + "To apply simultaneous clustering for the " + + name + + " buses, at least 2 base buses must be selected." + ) + raise ValueError(msg) + busmap_sector_coupling = simultaneous_sector_coupling( + etrago.network, + busmap, + data["base"], + name, + ) + else: + msg = ( + "Strategy for sector coupled clustering must be either " + "'consecutive' or 'coupled'." + ) + raise ValueError(msg) + for key, value in busmap_sector_coupling.items(): + busmap.loc[key] = value + busmap = busmap.astype(str) + busmap.index = busmap.index.astype(str) + + network_gasgrid_c = get_clustering_from_busmap( + etrago.network, + busmap, + bus_strategies={ + "country": "first", + }, + one_port_strategies={ + "Generator": { + "marginal_cost": np.mean, + "capital_cost": np.mean, + "p_nom_max": np.sum, + "p_nom_min": np.sum, + "e_nom_max": np.sum, + }, + "Store": { + "marginal_cost": np.mean, + "capital_cost": np.mean, + "e_nom": np.sum, + "e_nom_max": sum_with_inf, + }, + "Load": { + "p_set": np.sum, + }, + }, + ) + + # aggregation of the links and links time series + network_gasgrid_c.links, network_gasgrid_c.links_t = group_links( + network_gasgrid_c + ) + + # Overwrite p_nom of links with carrier "H2_feedin" (eGon2035 only) + if etrago.args["scn_name"] == "eGon2035": + H2_energy_share = 0.05053 # H2 energy share via volumetric share + # outsourced in a mixture of H2 and CH4 with 15 %vol share + feed_in = network_gasgrid_c.links.loc[ + network_gasgrid_c.links.carrier == "H2_feedin" + ] + pipeline_capacities = network_gasgrid_c.links.loc[ + network_gasgrid_c.links.carrier == "CH4" + ] + + for bus in feed_in["bus1"].values: + # calculate the total pipeline capacity connected to a specific bus + nodal_capacity = pipeline_capacities.loc[ + (pipeline_capacities["bus0"] == bus) + | (pipeline_capacities["bus1"] == bus), + "p_nom", + ].sum() + # multiply total pipeline capacity with H2 energy share + # corresponding to volumetric share + network_gasgrid_c.links.loc[ + (network_gasgrid_c.links["bus1"].values == bus) + & (network_gasgrid_c.links["carrier"].values == "H2_feedin"), + "p_nom", + ] = ( + nodal_capacity * H2_energy_share + ) + # Insert components not related to the gas clustering + other_components = ["Line", "StorageUnit", "ShuntImpedance", "Transformer"] + + for c in etrago.network.iterate_components(other_components): + io.import_components_from_dataframe( + network_gasgrid_c, + c.df, + c.name, + ) + for attr, df in c.pnl.items(): + if not df.empty: + io.import_series_from_dataframe( + network_gasgrid_c, + df, + c.name, + attr, + ) + io.import_components_from_dataframe( + network_gasgrid_c, etrago.network.carriers, "Carrier" + ) + + network_gasgrid_c.determine_network_topology() + + # Adjust x and y coordinates of 'CH4' and 'H2_grid' medoids + if settings["method_gas"] == "kmedoids-dijkstra" and len(medoid_idx) > 0: + for i in network_gasgrid_c.buses[ + network_gasgrid_c.buses.carrier == "CH4" + ].index: + cluster = str(i) + if cluster in busmap[medoid_idx].values: + medoid = busmap[medoid_idx][ + busmap[medoid_idx] == cluster + ].index + h2_idx = network_gasgrid_c.buses.loc[ + (network_gasgrid_c.buses.carrier == "H2_grid") + & ( + network_gasgrid_c.buses.y + == network_gasgrid_c.buses.at[i, "y"] + ) + & ( + network_gasgrid_c.buses.x + == network_gasgrid_c.buses.at[i, "x"] + ) + ] + if len(h2_idx) > 0: + h2_idx = h2_idx.index.tolist()[0] + network_gasgrid_c.buses.at[ + h2_idx, "x" + ] = etrago.network.buses["x"].loc[medoid] + network_gasgrid_c.buses.at[ + h2_idx, "y" + ] = etrago.network.buses["y"].loc[medoid] + network_gasgrid_c.buses.at[i, "x"] = etrago.network.buses[ + "x" + ].loc[medoid] + network_gasgrid_c.buses.at[i, "y"] = etrago.network.buses[ + "y" + ].loc[medoid] + return (network_gasgrid_c, busmap) + + +def highestInteger(potentially_numbers): + """Fetch the highest number of a series with mixed types + + Parameters + ---------- + potentially_numbers : pandas.Series + Series with mixed dtypes, potentially containing numbers. + + Returns + ------- + highest : int + Highest integer found in series. + """ + highest = 0 + for number in potentially_numbers: + try: + num = int(number) + if num > highest: + highest = num + except ValueError: + pass + return highest + + +def simultaneous_sector_coupling( + network, busmap, carrier_based, carrier_to_cluster +): + """ + Cluster sector coupling technology based on multiple connected carriers. + + The topology of the sector coupling technology must be in a way, that the + links connected to other sectors do only point inwards. E.g. for the heat + sector, heat generating technologies from electricity or gas only point to + the heat sector and not vice-versa. + + Parameters + ---------- + network : pypsa.Network + PyPSA network instance. + busmap : pandas.Series + Series with lookup table for clustered buses. + carrier_based : list + Carriers on which the clustering of the sector coupling is based. + carrier_to_cluster : str + Name of the carrier which should be clustered + + Returns + ------- + dict + Busmap for the sector coupling cluster. + """ + next_bus_id = highestInteger(busmap.values) + 1 + buses_clustered = network.buses[ + network.buses["carrier"].isin(carrier_based) + ] + buses_to_cluster = network.buses[ + network.buses["carrier"] == carrier_to_cluster + ] + buses_to_skip = network.buses[ + network.buses["carrier"] == carrier_to_cluster + "_store" + ] + + connected_links = network.links.loc[ + network.links["bus0"].isin(buses_clustered.index) + & network.links["bus1"].isin(buses_to_cluster.index) + & ~network.links["bus1"].isin(buses_to_skip.index) + & ~network.links["bus0"].isin(buses_to_skip.index) + ] + + busmap = busmap.to_dict() + connected_links["bus0_clustered"] = ( + connected_links["bus0"].map(busmap).fillna(connected_links["bus0"]) + ) + connected_links["bus1_clustered"] = ( + connected_links["bus1"].map(busmap).fillna(connected_links["bus1"]) + ) + + # cluster sector coupling technologies + busmap = sc_multi_carrier_based(buses_to_cluster, connected_links) + busmap = { + bus_id: bus_num + next_bus_id for bus_id, bus_num in busmap.items() + } + + # cluster appedices + skipped_links = network.links.loc[ + ( + network.links["bus1"].isin(buses_to_skip.index) + & network.links["bus0"].isin(buses_to_cluster.index) + ) + | ( + network.links["bus0"].isin(buses_to_cluster.index) + & network.links["bus1"].isin(buses_to_skip.index) + ) + ] + + # map skipped buses after clustering + skipped_links["bus0_clustered"] = ( + skipped_links["bus0"].map(busmap).fillna(skipped_links["bus0"]) + ) + skipped_links["bus1_clustered"] = ( + skipped_links["bus1"].map(busmap).fillna(skipped_links["bus1"]) + ) + + busmap_series = pd.Series(busmap) + next_bus_id = highestInteger(busmap_series.values) + 1 + + # create clusters for skipped buses + clusters = busmap_series.unique() + for i in range(len(clusters)): + buses = skipped_links.loc[ + skipped_links["bus0_clustered"] == clusters[i], "bus1_clustered" + ] + for bus_id in buses: + busmap[bus_id] = next_bus_id + i + buses = skipped_links.loc[ + skipped_links["bus1_clustered"] == clusters[i], "bus0_clustered" + ] + for bus_id in buses: + busmap[bus_id] = next_bus_id + i + return busmap + + +def consecutive_sector_coupling( + network, busmap, carrier_based, carrier_to_cluster +): + """ + Cluster sector coupling technology based on single connected carriers. + + The topology of the sector coupling technology must be in a way, that the + links connected to other sectors do only point inwards. E.g. for the heat + sector, heat generating technologies from electricity or gas only point to + the heat sector and not vice-versa. + + Parameters + ---------- + network : pypsa.Network + PyPSA network instance. + busmap : pandas.Series + Series with lookup table for clustered buses. + carrier_based : list + Carriers on which the clustering of the sector coupling is based. + carrier_to_cluster : str + Name of the carrier which should be clustered + + Returns + ------- + busmap_sc : dict + Busmap for the sector coupled cluster. + """ + next_bus_id = highestInteger(busmap.values) + 1 + buses_to_skip = network.buses[ + network.buses["carrier"] == carrier_to_cluster + "_store" + ] + buses_to_cluster = network.buses[ + network.buses["carrier"] == carrier_to_cluster + ] + buses_clustered = network.buses[ + network.buses["carrier"] == carrier_based[0] + ] + busmap_sc = {} + + for base in carrier_based: + # remove already clustered buses + buses_to_cluster = buses_to_cluster[ + ~buses_to_cluster.index.isin(busmap_sc.keys()) + ] + buses_clustered = network.buses[network.buses["carrier"] == base] + + connected_links = network.links.loc[ + network.links["bus0"].isin(buses_clustered.index) + & network.links["bus1"].isin(buses_to_cluster.index) + & ~network.links["bus1"].isin(buses_to_skip.index) + & ~network.links["bus0"].isin(buses_to_skip.index) + ] + + connected_links["bus0_clustered"] = ( + connected_links["bus0"].map(busmap).fillna(connected_links["bus0"]) + ) + connected_links["bus1_clustered"] = ( + connected_links["bus1"].map(busmap).fillna(connected_links["bus1"]) + ) + + # cluster sector coupling technologies + busmap_by_base = sc_single_carrier_based(connected_links) + bus_num = 0 + for bus_id, bus_num in busmap_by_base.items(): + busmap_by_base[bus_id] = bus_num + next_bus_id + next_bus_id = bus_num + next_bus_id + 1 + busmap_sc.update(busmap_by_base) + buses_to_cluster = buses_to_cluster[ + ~buses_to_cluster.index.isin(busmap_sc.keys()) + ] + + if len(buses_to_cluster) > 0: + msg = "The following buses are not added to any cluster: " + str( + buses_to_cluster.index + ) + logger.warning(msg) + # cluster appedices + skipped_links = network.links.loc[ + ( + network.links["bus1"].isin(buses_to_skip.index) + & network.links["bus0"].isin(busmap_sc.keys()) + ) + | ( + network.links["bus0"].isin(busmap_sc.keys()) + & network.links["bus1"].isin(buses_to_skip.index) + ) + ] + + # map skipped buses after clustering + skipped_links["bus0_clustered"] = ( + skipped_links["bus0"].map(busmap_sc).fillna(skipped_links["bus0"]) + ) + skipped_links["bus1_clustered"] = ( + skipped_links["bus1"].map(busmap_sc).fillna(skipped_links["bus1"]) + ) + + busmap_series = pd.Series(busmap_sc) + next_bus_id = highestInteger(busmap_series.values) + 1 + + # create clusters for skipped buses + clusters = busmap_series.unique() + for i in range(len(clusters)): + buses = skipped_links.loc[ + skipped_links["bus0_clustered"] == clusters[i], "bus1_clustered" + ] + for bus_id in buses: + busmap_sc[bus_id] = next_bus_id + i + buses = skipped_links.loc[ + skipped_links["bus1_clustered"] == clusters[i], "bus0_clustered" + ] + for bus_id in buses: + busmap_sc[bus_id] = next_bus_id + i + return busmap_sc + + +def sc_multi_carrier_based(buses_to_cluster, connected_links): + """ + Create busmap for sector coupled carrier based on multiple other carriers. + + Parameters + ---------- + buses_to_cluster : pandas.Series + Series containing the buses of the sector coupled carrier which are + to be clustered. + connected_links : pandas.DataFrame + Links that connect from the buses with other carriers to the + buses of the sector coupled carrier. + + Returns + ------- + busmap : dict + Busmap for the sector coupled carrier. + """ + clusters = pd.Series() + for bus_id in buses_to_cluster.index: + clusters.loc[bus_id] = tuple( + sorted( + connected_links.loc[ + connected_links["bus1_clustered"] == bus_id, + "bus0_clustered", + ].unique() + ) + ) + duplicates = clusters.unique() + + busmap = {} + for i in range(len(duplicates)): + cluster = clusters[clusters == duplicates[i]].index.tolist() + if len(cluster) > 1: + busmap.update({bus: i for bus in cluster}) + return busmap + + +def sc_single_carrier_based(connected_links): + """ + Create busmap for sector coupled carrier based on single other carrier. + + Parameters + ---------- + connected_links : pandas.DataFrame + Links that connect from the buses with other carrier to the + buses of the sector coupled carrier. + + Returns + ------- + busmap : dict + Busmap for the sector coupled carrier. + """ + busmap = {} + clusters = connected_links["bus0_clustered"].unique() + for i in range(len(clusters)): + buses = connected_links.loc[ + connected_links["bus0_clustered"] == clusters[i], "bus1_clustered" + ].unique() + busmap.update({bus: i for bus in buses}) + return busmap + + +def get_clustering_from_busmap( + network, + busmap, + line_length_factor=1.0, + with_time=True, + bus_strategies=dict(), + one_port_strategies=dict(), +): + """ + Aggregates components of the given network based on a bus mapping and + returns a clustered gas grid pypsa.Network. + + Parameters + ---------- + network : pypsa.Network + The input pypsa.Network object + busmap : pandas.Sereies : + A mapping of buses to clusters + line_length_factor : float + A factor used to adjust the length of new links created during + aggregation. Default is 1.0. + with_time : bool + Determines whether to copy the time-dependent properties of the input + network to the output network. Default is True. + bus_strategies : dict + A dictionary of custom strategies to use during the aggregation step. + Default is an empty dictionary. + one_port_strategies : dict + A dictionary of custom strategies to use during the one-port component + aggregation step. Default is an empty dictionary. + + Returns + ------- + network_gasgrid_c : pypsa.Network + A new gas grid pypsa.Network object with aggregated components based + on the bus mapping. + """ + network_gasgrid_c = Network() + + # Aggregate buses + new_buses = aggregatebuses( + network, + busmap, + custom_strategies=bus_strategies, + ) + new_buses.index.name = "bus_id" + + io.import_components_from_dataframe(network_gasgrid_c, new_buses, "Bus") + + if with_time: + network_gasgrid_c.set_snapshots(network.snapshots) + network_gasgrid_c.snapshot_weightings = ( + network.snapshot_weightings.copy() + ) + # Aggregate one port components + one_port_components = ["Generator", "Load", "Store"] + + for one_port in one_port_components: + new_df, new_pnl = aggregateoneport( + network, + busmap, + component=one_port, + with_time=with_time, + custom_strategies=one_port_strategies.get(one_port, {}), + ) + io.import_components_from_dataframe( + network_gasgrid_c, new_df, one_port + ) + for attr, df in iteritems(new_pnl): + io.import_series_from_dataframe( + network_gasgrid_c, df, one_port, attr + ) + # Aggregate links + new_links = ( + network.links.assign( + bus0=network.links.bus0.map(busmap), + bus1=network.links.bus1.map(busmap), + ) + .dropna(subset=["bus0", "bus1"]) + .loc[lambda df: df.bus0 != df.bus1] + ) + + # preparation for CH4 pipeline aggregation: + # pipelines are treated differently compared to other links, since all of + # them will be considered bidirectional. That means, if a pipeline exists, + # that connects one cluster with a different one simultaneously with a + # pipeline that connects these two clusters in reversed order (e.g. bus0=1, + # bus1=12 and bus0=12, bus1=1) they are aggregated to a single pipeline. + # therefore, the order of bus0/bus1 is adjusted + pipeline_mask = new_links["carrier"] == "CH4" + sorted_buses = np.sort( + new_links.loc[pipeline_mask, ["bus0", "bus1"]].values, 1 + ) + new_links.loc[pipeline_mask, ["bus0", "bus1"]] = sorted_buses + + # import the links and the respective time series with the bus0 and bus1 + # values updated from the busmap + io.import_components_from_dataframe(network_gasgrid_c, new_links, "Link") + + if with_time: + for attr, df in network.links_t.items(): + if not df.empty: + io.import_series_from_dataframe( + network_gasgrid_c, df, "Link", attr + ) + return network_gasgrid_c + + +def run_spatial_clustering_gas(self): + """ + Performs spatial clustering on the gas network using either K-means or + K-medoids-Dijkstra algorithm. Updates the network topology by aggregating + buses and links, and then performs postprocessing to finalize the changes. + + Returns + -------- + None + + Raises + ------- + ValueError: If the selected method is not "kmeans" or "kmedoids-dijkstra". + + """ + if "CH4" in self.network.buses.carrier.values: + settings = self.args["network_clustering"] + + if settings["active"]: + method = settings["method_gas"] + logger.info(f"Start {method} clustering GAS") + + gas_network, weight, n_clusters = preprocessing(self) + + if method == "kmeans": + if settings["k_gas_busmap"]: + busmap = pd.read_csv( + settings["k_gas_busmap"], + index_col="bus_id", + dtype=pd.StringDtype(), + ).squeeze() + medoid_idx = None + else: + busmap, medoid_idx = kmean_clustering_gas( + self, gas_network, weight, n_clusters + ) + + elif method == "kmedoids-dijkstra": + if settings["k_gas_busmap"]: + busmap = pd.read_csv( + settings["k_gas_busmap"], + index_col="bus_id", + dtype=pd.StringDtype(), + ) + medoid_idx = pd.Series( + busmap["medoid_idx"].unique(), + index=busmap["cluster"].unique(), + dtype=pd.StringDtype(), + ) + busmap = busmap["cluster"] + + else: + busmap, medoid_idx = kmedoids_dijkstra_clustering( + self, + gas_network.buses, + gas_network.links, + weight, + n_clusters, + ) + + else: + msg = ( + 'Please select "kmeans" or "kmedoids-dijkstra" as ' + "spatial clustering method for the gas network" + ) + raise ValueError(msg) + self.network, busmap = gas_postprocessing(self, busmap, medoid_idx) + + self.update_busmap(busmap) + + # The control parameter is overwritten in pypsa's clustering. + # The function network.determine_network_topology is called, + # which sets slack bus(es). + set_control_strategies(self.network) + + logger.info( + """GAS Network clustered to {} DE-buses and {} foreign buses + with {} algorithm.""".format( + len( + self.network.buses.loc[ + (self.network.buses.carrier == "CH4") + & (self.network.buses.country == "DE") + ] + ), + len( + self.network.buses.loc[ + (self.network.buses.carrier == "CH4") + & (self.network.buses.country != "DE") + ] + ), + method, + ) + ) diff --git a/etrago/cluster/networkclustering.py b/etrago/cluster/networkclustering.py deleted file mode 100644 index 7e1b64853..000000000 --- a/etrago/cluster/networkclustering.py +++ /dev/null @@ -1,656 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, -# Europa-Universität Flensburg, -# Centre for Sustainable Energy Systems, -# DLR-Institute for Networked Energy Systems - -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -# File description for read-the-docs -""" Networkclustering.py defines the methods to cluster power grid networks -spatially for applications within the tool eTraGo.""" - -import os -if 'READTHEDOCS' not in os.environ: - from etrago.tools.utilities import * - from pypsa.networkclustering import (aggregatebuses, aggregateoneport, - aggregategenerators, - get_clustering_from_busmap, - busmap_by_kmeans, busmap_by_stubs) - from egoio.db_tables.model_draft import EgoGridPfHvBusmap - - from itertools import product - import networkx as nx - import multiprocessing as mp - from math import ceil - import pandas as pd - from networkx import NetworkXNoPath - from pickle import dump - from pypsa import Network - import pypsa.io as io - import pypsa.components as components - from six import iteritems - from sqlalchemy import or_, exists - import numpy as np - import logging - - logger = logging.getLogger(__name__) - -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") -__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "s3pp, wolfbunke, ulfmueller, lukasol" - -# TODO: Workaround because of agg - - -def _leading(busmap, df): - """ - """ - def leader(x): - ix = busmap[x.index[0]] - return df.loc[ix, x.name] - return leader - - -def cluster_on_extra_high_voltage(network, busmap, with_time=True): - """ Main function of the EHV-Clustering approach. Creates a new clustered - pypsa.Network given a busmap mapping all bus_ids to other bus_ids of the - same network. - - Parameters - ---------- - network : pypsa.Network - Container for all network components. - - busmap : dict - Maps old bus_ids to new bus_ids. - - with_time : bool - If true time-varying data will also be aggregated. - - Returns - ------- - network : pypsa.Network - Container for all network components of the clustered network. - """ - - network_c = Network() - - buses = aggregatebuses( - network, busmap, { - 'x': _leading( - busmap, network.buses), 'y': _leading( - busmap, network.buses)}) - - # keep attached lines - lines = network.lines.copy() - mask = lines.bus0.isin(buses.index) - lines = lines.loc[mask, :] - - # keep attached links - links = network.links.copy() - mask = links.bus0.isin(buses.index) - links = links.loc[mask, :] - - # keep attached transformer - transformers = network.transformers.copy() - mask = transformers.bus0.isin(buses.index) - transformers = transformers.loc[mask, :] - - io.import_components_from_dataframe(network_c, buses, "Bus") - io.import_components_from_dataframe(network_c, lines, "Line") - io.import_components_from_dataframe(network_c, links, "Link") - io.import_components_from_dataframe(network_c, transformers, "Transformer") - - if with_time: - network_c.snapshots = network.snapshots - network_c.set_snapshots(network.snapshots) - network_c.snapshot_weightings = network.snapshot_weightings.copy() - - # dealing with generators - network.generators.control = "PV" - network.generators['weight'] = 1 - new_df, new_pnl = aggregategenerators(network, busmap, with_time, - custom_strategies={'p_nom_min':np.min,'p_nom_max': np.min, - 'weight': np.sum, 'p_nom': np.sum, - 'p_nom_opt': np.sum, 'marginal_cost': - np.mean, 'capital_cost': np.mean}) - io.import_components_from_dataframe(network_c, new_df, 'Generator') - for attr, df in iteritems(new_pnl): - io.import_series_from_dataframe(network_c, df, 'Generator', attr) - - # dealing with all other components - aggregate_one_ports = network.one_port_components.copy() - aggregate_one_ports.discard('Generator') - - for one_port in aggregate_one_ports: - one_port_strategies = {'StorageUnit': {'marginal_cost': np.mean, 'capital_cost': np.mean, 'efficiency': np.mean, - 'efficiency_dispatch': np.mean, 'standing_loss': np.mean, 'efficiency_store': np.mean, - 'p_min_pu': np.min}} - new_df, new_pnl = aggregateoneport( - network, busmap, component=one_port, with_time=with_time, - custom_strategies=one_port_strategies.get(one_port, {})) - io.import_components_from_dataframe(network_c, new_df, one_port) - for attr, df in iteritems(new_pnl): - io.import_series_from_dataframe(network_c, df, one_port, attr) - - network_c.determine_network_topology() - - return network_c - - -def graph_from_edges(edges): - """ Constructs an undirected multigraph from a list containing data on - weighted edges. - - Parameters - ---------- - edges : list - List of tuples each containing first node, second node, weight, key. - - Returns - ------- - M : :class:`networkx.classes.multigraph.MultiGraph - """ - - M = nx.MultiGraph() - - for e in edges: - - n0, n1, weight, key = e - - M.add_edge(n0, n1, weight=weight, key=key) - - return M - - -def gen(nodes, n, graph): - # TODO There could be a more convenient way of doing this. This generators - # single purpose is to prepare data for multiprocessing's starmap function. - """ Generator for applying multiprocessing. - - Parameters - ---------- - nodes : list - List of nodes in the system. - - n : int - Number of desired multiprocessing units. - - graph : :class:`networkx.classes.multigraph.MultiGraph - Graph representation of an electrical grid. - - Returns - ------- - None - """ - - g = graph.copy() - - for i in range(0, len(nodes), n): - yield (nodes[i:i + n], g) - - -def shortest_path(paths, graph): - """ Finds the minimum path lengths between node pairs defined in paths. - - Parameters - ---------- - paths : list - List of pairs containing a source and a target node - - graph : :class:`networkx.classes.multigraph.MultiGraph - Graph representation of an electrical grid. - - Returns - ------- - df : pd.DataFrame - DataFrame holding source and target node and the minimum path length. - """ - - idxnames = ['source', 'target'] - idx = pd.MultiIndex.from_tuples(paths, names=idxnames) - df = pd.DataFrame(index=idx, columns=['path_length']) - df.sort_index(inplace=True) - - for s, t in paths: - - try: - df.loc[(s, t), 'path_length'] = \ - nx.dijkstra_path_length(graph, s, t) - - except NetworkXNoPath: - continue - - return df - - -def busmap_by_shortest_path(etrago, scn_name, fromlvl, tolvl, cpu_cores=4): - """ Creates a busmap for the EHV-Clustering between voltage levels based - on dijkstra shortest path. The result is automatically written to the - `model_draft` on the [www.openenergy-platform.org] - database with the name `ego_grid_pf_hv_busmap` and the attributes scn_name - (scenario name), bus0 (node before clustering), bus1 (node after - clustering) and path_length (path length). - An AssertionError occurs if buses with a voltage level are not covered by - the input lists 'fromlvl' or 'tolvl'. - - Parameters - ---------- - network : pypsa.Network object - Container for all network components. - - session : sqlalchemy.orm.session.Session object - Establishes interactions with the database. - - scn_name : str - Name of the scenario. - - fromlvl : list - List of voltage-levels to cluster. - - tolvl : list - List of voltage-levels to remain. - - cpu_cores : int - Number of CPU-cores. - - Returns - ------- - None - """ - - # cpu_cores = mp.cpu_count() - - # data preperation - s_buses = buses_grid_linked(etrago.network, fromlvl) - lines = connected_grid_lines(etrago.network, s_buses) - transformer = connected_transformer(network, s_buses) - mask = transformer.bus1.isin(buses_of_vlvl(etrago.network, tolvl)) - - # temporary end points, later replaced by bus1 pendant - t_buses = transformer[mask].bus0 - - # create all possible pathways - ppaths = list(product(s_buses, t_buses)) - - # graph creation - edges = [(row.bus0, row.bus1, row.length, ix) for ix, row - in lines.iterrows()] - M = graph_from_edges(edges) - - # applying multiprocessing - p = mp.Pool(cpu_cores) - chunksize = ceil(len(ppaths) / cpu_cores) - container = p.starmap(shortest_path, gen(ppaths, chunksize, M)) - df = pd.concat(container) - dump(df, open('df.p', 'wb')) - - # post processing - df.sortlevel(inplace=True) - mask = df.groupby(level='source')['path_length'].idxmin() - df = df.loc[mask, :] - - # rename temporary endpoints - df.reset_index(inplace=True) - df.target = df.target.map(dict(zip(etrago.network.transformers.bus0, - etrago.network.transformers.bus1))) - - # append to busmap buses only connected to transformer - transformer = etrago.network.transformers - idx = list(set(buses_of_vlvl(network, fromlvl)). - symmetric_difference(set(s_buses))) - mask = transformer.bus0.isin(idx) - - toappend = pd.DataFrame(list(zip(transformer[mask].bus0, - transformer[mask].bus1)), - columns=['source', 'target']) - toappend['path_length'] = 0 - - df = pd.concat([df, toappend], ignore_index=True, axis=0) - - # append all other buses - buses = etrago.network.buses - mask = buses.index.isin(df.source) - - assert set(buses[~mask].v_nom) == set(tolvl) - - tofill = pd.DataFrame([buses.index[~mask]] * 2).transpose() - tofill.columns = ['source', 'target'] - tofill['path_length'] = 0 - - df = pd.concat([df, tofill], ignore_index=True, axis=0) - - # prepare data for export - - df['scn_name'] = scn_name - df['version'] = etrago.args['gridversion'] - - df.rename(columns={'source': 'bus0', 'target': 'bus1'}, inplace=True) - df.set_index(['scn_name', 'bus0', 'bus1'], inplace=True) - - for i, d in df.reset_index().iterrows(): - etrago.session.add(EgoGridPfHvBusmap(**d.to_dict())) - - etrago.session.commit() - - return - - -def busmap_from_psql(etrago): - """ Retrieves busmap from `model_draft.ego_grid_pf_hv_busmap` on the - [www.openenergy-platform.org] by a given scenario - name. If this busmap does not exist, it is created with default values. - - Parameters - ---------- - network : pypsa.Network object - Container for all network components. - - session : sqlalchemy.orm.session.Session object - Establishes interactions with the database. - - scn_name : str - Name of the scenario. - - Returns - ------- - busmap : dict - Maps old bus_ids to new bus_ids. - """ - scn_name=(etrago.args['scn_name'] if etrago.args['scn_extension']==None - else etrago.args['scn_name']+'_ext_'+'_'.join( - etrago.args['scn_extension'])) - def fetch(): - - query = etrago.session.query( - EgoGridPfHvBusmap.bus0, EgoGridPfHvBusmap.bus1).\ - filter(EgoGridPfHvBusmap.scn_name == scn_name).\ - filter(EgoGridPfHvBusmap.version == etrago.args['gridversion']) - - return dict(query.all()) - - busmap = fetch() - - # TODO: Or better try/except/finally - if not busmap: - print('Busmap does not exist and will be created.\n') - - cpu_cores = input('cpu_cores (default 4): ') or '4' - - busmap_by_shortest_path(etrago, scn_name, - fromlvl=[110], tolvl=[220, 380, 400, 450], - cpu_cores=int(cpu_cores)) - busmap = fetch() - - return busmap - -def ehv_clustering(self): - - - if self.args['network_clustering_ehv']: - - logger.info('Start ehv clustering') - - self.network.generators.control = "PV" - busmap = busmap_from_psql(self) - self.network = cluster_on_extra_high_voltage( - self.network, busmap, with_time=True) - - logger.info('Network clustered to EHV-grid') - - -def kmean_clustering(etrago): - """ Main function of the k-mean clustering approach. Maps an original - network to a new one with adjustable number of nodes and new coordinates. - - Parameters - ---------- - network : :class:`pypsa.Network - Container for all network components. - - n_clusters : int - Desired number of clusters. - - load_cluster : boolean - Loads cluster coordinates from a former calculation. - - line_length_factor : float - Factor to multiply the crow-flies distance between new buses in order - to get new line lengths. - - remove_stubs: boolean - Removes stubs and stubby trees (i.e. sequentially reducing dead-ends). - - use_reduced_coordinates: boolean - If True, do not average cluster coordinates, but take from busmap. - - bus_weight_tocsv : str - Creates a bus weighting based on conventional generation and load - and save it to a csv file. - - bus_weight_fromcsv : str - Loads a bus weighting from a csv file to apply it to the clustering - algorithm. - - Returns - ------- - network : pypsa.Network object - Container for all network components. - """ - - - network = etrago.network - kmean_settings = etrago.args['network_clustering_kmeans'] - def weighting_for_scenario(x, save=None): - """ - """ - # define weighting based on conventional 'old' generator spatial - # distribution - non_conv_types = { - 'biomass', - 'wind_onshore', - 'wind_offshore', - 'solar', - 'geothermal', - 'load shedding', - 'extendable_storage'} - # Attention: network.generators.carrier.unique() - gen = (network.generators.loc[(network.generators.carrier - .isin(non_conv_types) == False)] - .groupby('bus').p_nom.sum() - .reindex(network.buses.index, fill_value=0.) + - network.storage_units - .loc[(network.storage_units.carrier - .isin(non_conv_types) == False)] - .groupby('bus').p_nom.sum() - .reindex(network.buses.index, fill_value=0.)) - - load = network.loads_t.p_set.mean().groupby(network.loads.bus).sum() - - b_i = x.index - g = normed(gen.reindex(b_i, fill_value=0)) - l = normed(load.reindex(b_i, fill_value=0)) - - w = g + l - weight = ((w * (100000. / w.max())).astype(int) - ).reindex(network.buses.index, fill_value=1) - - if save: - weight.to_csv(save) - - return weight - - def normed(x): - return (x / x.sum()).fillna(0.) - - # prepare k-mean - # k-means clustering (first try) - network.generators.control = "PV" - network.storage_units.control[network.storage_units.carrier == \ - 'extendable_storage'] = "PV" - - # problem our lines have no v_nom. this is implicitly defined by the - # connected buses: - network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) - - # adjust the electrical parameters of the lines which are not 380. - lines_v_nom_b = network.lines.v_nom != 380 - - voltage_factor = (network.lines.loc[lines_v_nom_b, 'v_nom'] / 380.)**2 - - network.lines.loc[lines_v_nom_b, 'x'] *= 1/voltage_factor - - network.lines.loc[lines_v_nom_b, 'r'] *= 1/voltage_factor - - network.lines.loc[lines_v_nom_b, 'b'] *= voltage_factor - - network.lines.loc[lines_v_nom_b, 'g'] *= voltage_factor - - network.lines.loc[lines_v_nom_b, 'v_nom'] = 380. - - trafo_index = network.transformers.index - transformer_voltages = \ - pd.concat([network.transformers.bus0.map(network.buses.v_nom), - network.transformers.bus1.map(network.buses.v_nom)], axis=1) - - network.import_components_from_dataframe( - network.transformers.loc[:, [ - 'bus0', 'bus1', 'x', 's_nom', 'capital_cost', 'sub_network', 's_max_pu']] - .assign(x=network.transformers.x * (380. / - transformer_voltages.max(axis=1))**2, length = 1) - .set_index('T' + trafo_index), - 'Line') - network.transformers.drop(trafo_index, inplace=True) - - for attr in network.transformers_t: - network.transformers_t[attr] = network.transformers_t[attr]\ - .reindex(columns=[]) - - network.buses['v_nom'] = 380. - - # State whether to create a bus weighting and save it, create or not save - # it, or use a bus weighting from a csv file - if kmean_settings['bus_weight_tocsv'] is not None: - weight = weighting_for_scenario( - x=network.buses, - save=kmean_settings['bus_weight_tocsv']) - elif kmean_settings['bus_weight_fromcsv'] is not None: - weight = pd.Series.from_csv(kmean_settings['bus_weight_fromcsv']) - weight.index = weight.index.astype(str) - else: - weight = weighting_for_scenario(x=network.buses, save=False) - - - # remove stubs - if kmean_settings['remove_stubs']: - network.determine_network_topology() - busmap = busmap_by_stubs(network) - network.generators['weight'] = network.generators['p_nom'] - aggregate_one_ports = network.one_port_components.copy() - aggregate_one_ports.discard('Generator') - - # reset coordinates to the new reduced guys, rather than taking an - # average (copied from pypsa.networkclustering) - if kmean_settings['use_reduced_coordinates']: - # TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS, - # i.e. network is changed in place!! - network.buses.loc[busmap.index, ['x', 'y'] - ] = network.buses.loc[busmap, ['x', 'y']].values - - clustering = get_clustering_from_busmap( - network, - busmap, - aggregate_generators_weighted=True, - one_port_strategies={'StorageUnit': {'marginal_cost': np.mean, - 'capital_cost': np.mean, - 'efficiency': np.mean, - 'efficiency_dispatch': np.mean, - 'standing_loss': np.mean, - 'efficiency_store': np.mean, - 'p_min_pu': np.min}}, - generator_strategies={'p_nom_min':np.min, - 'p_nom_opt': np.sum, - 'marginal_cost': np.mean, - 'capital_cost': np.mean}, - aggregate_one_ports=aggregate_one_ports, - line_length_factor=kmean_settings['line_length_factor']) - network = clustering.network - - weight = weight.groupby(busmap.values).sum() - - # k-mean clustering - if not kmean_settings['kmeans_busmap']: - busmap = busmap_by_kmeans( - network, - bus_weightings=pd.Series(weight), - n_clusters=kmean_settings['n_clusters'], - n_init=kmean_settings['n_init'], - max_iter=kmean_settings['max_iter'], - tol=kmean_settings['tol'], - n_jobs=kmean_settings['n_jobs']) - busmap.to_csv('kmeans_busmap_' + str(kmean_settings['n_clusters']) + '_result.csv') - else: - df = pd.read_csv(kmean_settings['kmeans_busmap']) - df=df.astype(str) - df = df.set_index('bus_id') - busmap = df.squeeze('columns') - - network.generators['weight'] = network.generators['p_nom'] - aggregate_one_ports = network.one_port_components.copy() - aggregate_one_ports.discard('Generator') - clustering = get_clustering_from_busmap( - network, - busmap, - aggregate_generators_weighted=True, - one_port_strategies={'StorageUnit': {'marginal_cost': np.mean, - 'capital_cost': np.mean, - 'efficiency': np.mean, - 'efficiency_dispatch': np.mean, - 'standing_loss': np.mean, - 'efficiency_store': np.mean, - 'p_min_pu': np.min}}, - generator_strategies={'p_nom_min':np.min, - 'p_nom_opt': np.sum, - 'marginal_cost': np.mean, - 'capital_cost': np.mean}, - aggregate_one_ports=aggregate_one_ports, - line_length_factor=kmean_settings['line_length_factor']) - - return clustering - -def run_kmeans_clustering(self): - - if self.args['network_clustering_kmeans']['active']: - - self.network.generators.control = "PV" - - logger.info('Start k-mean clustering') - - self.clustering = kmean_clustering(self) - - if self.args['disaggregation'] != None: - self.disaggregated_network = self.network.copy() - - self.network = self.clustering.network.copy() - - self.geolocation_buses() - - self.network.generators.control[self.network.generators.control == ''] = 'PV' - - logger.info("Network clustered to {} buses with k-means algorithm." - .format(self.args['network_clustering_kmeans']['n_clusters'])) \ No newline at end of file diff --git a/etrago/cluster/snapshot.py b/etrago/cluster/snapshot.py index 49c387753..7ceee4741 100644 --- a/etrago/cluster/snapshot.py +++ b/etrago/cluster/snapshot.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems @@ -18,108 +18,245 @@ # along with this program. If not, see . # File description for read-the-docs -""" This module contains functions for calculating representative days/weeks -based on a pyPSA network object. It is designed to be used for the `lopf` -method. Essentially the tsam package -( https://github.com/FZJ-IEK3-VSA/tsam ), which is developed by -Leander Kotzur is used. - -Remaining questions/tasks: - -- Does it makes sense to cluster normed values? -- Include scaling method for yearly sums +""" This module contains functions for reducing the complexity of a PyPSA + network in temporal dimension by +a) downsampling to every n-th snapshot +b) clustering to typical periods (eg days, weeks) +c) clustering to segments of variable length +Essentially used is the tsam package +( https://github.com/FZJ-IEK3-VSA/tsam ) developed by Leander Kotzur et al. """ -import pandas as pd import os -if 'READTHEDOCS' not in os.environ: - import pyomo.environ as po + +import pandas as pd + +if "READTHEDOCS" not in os.environ: import tsam.timeseriesaggregation as tsam -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "Simon Hilpert" +__author__ = """ClaraBuettner, ulfmueller, KathiEsterl, simnh, wheitkoetter, + BartelsJ, AmeliaNadal""" def snapshot_clustering(self): """ - """ - if self.args['snapshot_clustering']['active']: + Function to call the snapshot clustering function with the respecting + method and settings. - self.network = run(network=self.network.copy(), - n_clusters=self.args['snapshot_clustering']['n_clusters'], - how=self.args['snapshot_clustering']['how'], - normed=False) + Raises + ------ + ValueError + When calling a non-available function. + Returns + ------- + None. + """ -def tsam_cluster(timeseries_df, - typical_periods=10, - how='daily', - extremePeriodMethod = 'None'): + if self.args["snapshot_clustering"]["active"]: + # save second network for optional dispatch disaggregation + if self.args["temporal_disaggregation"]["active"]: + self.network_tsa = self.network.copy() + + if self.args["snapshot_clustering"]["method"] == "segmentation": + self.network = run( + network=self.network.copy(), + n_clusters=1, + segmented_to=self.args["snapshot_clustering"]["n_segments"], + extreme_periods=self.args["snapshot_clustering"][ + "extreme_periods" + ], + ) + + elif self.args["snapshot_clustering"]["method"] == "typical_periods": + self.network = run( + network=self.network.copy(), + n_clusters=self.args["snapshot_clustering"]["n_clusters"], + how=self.args["snapshot_clustering"]["how"], + extreme_periods=self.args["snapshot_clustering"][ + "extreme_periods" + ], + ) + else: + raise ValueError( + """Type of clustering should be 'typical_periods' or + 'segmentation'""" + ) + + +def tsam_cluster( + timeseries_df, + typical_periods=10, + how="daily", + extremePeriodMethod="None", + segmentation=False, + segment_no=10, + segm_hoursperperiod=24, +): """ + Conducts the clustering of the snapshots for temporal aggregation with the + respecting method. + Parameters ---------- - df : pd.DataFrame - DataFrame with timeseries to cluster - extremePeriodMethod: {'None','append','new_cluster_center', - 'replace_cluster_center'}, default: 'None' - Method how to integrate extreme Periods - into to the typical period profiles. - None: No integration at all. - 'append': append typical Periods to cluster centers - 'new_cluster_center': add the extreme period as additional cluster - center. It is checked then for all Periods if they fit better - to the this new center or their original cluster center. - 'replace_cluster_center': replaces the cluster center of the - cluster where the extreme period belongs to with the periodly - profile of the extreme period. (Worst case system design) + timeseries_df : pd.DataFrame + Dataframe wit timeseries to cluster. + typical_periods : int, optional + Number of clusters for typical_periods. The default is 10. + how : {'daily', 'weekly', 'monthly'}, optional + Definition of period for typical_periods. The default is 'daily'. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional Method to consider extreme + snapshots in reduced timeseries. The default is 'None'. + segmentation : boolean, optional + Argument to activate segmenation method. The default is False. + segment_no : int, optional + Number of segments for segmentation. The default is 10. + segm_hoursperperiod : int, optional + Only for segmentation, ensures to cluster to segments considering all + snapshots. The default is 24. Returns ------- + df_cluster : pd.DataFrame + Information on cluster after clustering to typical periods. + cluster_weights : dict + Weightings per cluster after clustering to typical periods. + dates : DatetimeIndex + Dates of clusters after clustering to typical periods. + hours : int + Hours per typical period. + df_i_h : pd.DataFrame + Information on cluster after clustering to typical periods. timeseries : pd.DataFrame - Clustered timeseries + Information on segments after segmentation. + """ - if how == 'daily': + if how == "daily": hours = 24 - period = ' days' - if how == 'weekly': + period = " days" + + elif how == "weekly": hours = 168 - period = ' weeks' + period = " weeks" + + elif how == "monthly": + hours = 720 + period = " months" - print('Snapshot clustering to ' + str(typical_periods) + period + - ' using extreme period method: ' + extremePeriodMethod) + elif how == "hourly": + hours = 1 + period = " hours" + + if segmentation: + hoursPerPeriod = segm_hoursperperiod + hours = 1 + else: + hoursPerPeriod = hours + + # define weight for weightDict: + # residual load should not impact cluster findings, + # but only be the optional parameter to choose an extreme period + weight = pd.Series(data=1, index=timeseries_df.columns) + weight["residual_load"] = 0 + weight = weight.to_dict() aggregation = tsam.TimeSeriesAggregation( timeseries_df, noTypicalPeriods=typical_periods, - extremePeriodMethod = extremePeriodMethod, - addPeakMin = ['residual_load'], - addPeakMax = ['residual_load'], + extremePeriodMethod=extremePeriodMethod, + addPeakMin=["residual_load"], + addPeakMax=["residual_load"], rescaleClusterPeriods=False, - hoursPerPeriod=hours, - clusterMethod='hierarchical') + hoursPerPeriod=hoursPerPeriod, + clusterMethod="hierarchical", + segmentation=segmentation, + noSegments=segment_no, + weightDict=weight, + ) + + if segmentation: + print( + "Snapshot clustering to " + + str(segment_no) + + " segments" + + "\n" + + "Using extreme period method: " + + extremePeriodMethod + ) + else: + print( + "Snapshot clustering to " + + str(typical_periods) + + period + + "\n" + + "Using extreme period method: " + + extremePeriodMethod + ) + + timeseries_creator = aggregation.createTypicalPeriods() + timeseries = timeseries_creator.copy() + + # If Segmentation is True, insert 'Dates' and 'SegmentNo' column in + # timeseries + if segmentation: + weights = timeseries.index.get_level_values(2) + dates_df = timeseries_df.index.get_level_values(0) + dates = [] + segmentno = [] + wcount = 0 + count = 0 + for weight in weights: + dates.append(dates_df[wcount]) + wcount = wcount + weight + segmentno.append(count) + count = count + 1 + timeseries.insert(0, "dates", dates, True) + timeseries.insert(1, "SegmentNo", segmentno, True) + timeseries.insert(2, "SegmentDuration", weights, True) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + + if "Unnamed: 0" in timeseries.columns: + del timeseries["Unnamed: 0"] + if "Segment Step" in timeseries.columns: + del timeseries["Segment Step"] + # print(timeseries) - timeseries = aggregation.createTypicalPeriods() cluster_weights = aggregation.clusterPeriodNoOccur - clusterOrder =aggregation.clusterOrder - clusterCenterIndices= aggregation.clusterCenterIndices + clusterOrder = aggregation.clusterOrder + clusterCenterIndices = aggregation.clusterCenterIndices - if extremePeriodMethod == 'new_cluster_center': - for i in aggregation.extremePeriods.keys(): - clusterCenterIndices.insert( - aggregation.extremePeriods[i]['newClusterNo'], - aggregation.extremePeriods[i]['stepNo']) + if segmentation: + if extremePeriodMethod != "None": + timeseries = segmentation_extreme_periods( + timeseries_df, timeseries, extremePeriodMethod + ) - if extremePeriodMethod == 'append': - for i in aggregation.extremePeriods.keys(): - clusterCenterIndices.insert( - aggregation.extremePeriods[i]['clusterNo'], - aggregation.extremePeriods[i]['stepNo']) + else: + if extremePeriodMethod == "new_cluster_center": + for i in aggregation.extremePeriods.keys(): + clusterCenterIndices.insert( + aggregation.extremePeriods[i]["newClusterNo"], + aggregation.extremePeriods[i]["stepNo"], + ) + + if extremePeriodMethod == "append": + for i in aggregation.extremePeriods.keys(): + clusterCenterIndices.insert( + aggregation.extremePeriods[i]["clusterNo"], + aggregation.extremePeriods[i]["stepNo"], + ) # get all index for every hour of that day of the clusterCenterIndices start = [] @@ -140,149 +277,570 @@ def tsam_cluster(timeseries_df, # get the origial Datetimeindex dates = timeseries_df.iloc[nrhours].index - #get list of representative days - representative_day=[] + # get list of representative days + representative_day = [] - #cluster:medoid des jeweiligen Clusters + # cluster:medoid des jeweiligen Clusters dic_clusterCenterIndices = dict(enumerate(clusterCenterIndices)) for i in clusterOrder: representative_day.append(dic_clusterCenterIndices[i]) - #get list of last hour of representative days - last_hour_datetime=[] + # get list of last and first hour of representative days + last_hour_datetime = [] for i in representative_day: last_hour = i * hours + hours - 1 last_hour_datetime.append(timeseries_df.index[last_hour]) - #create a dataframe (index=nr. of day in a year/candidate) - df_cluster = pd.DataFrame({ - 'Cluster': clusterOrder, #Cluster of the day - 'RepresentativeDay': representative_day, #representative day of the cluster - 'last_hour_RepresentativeDay': last_hour_datetime}) #last hour of the cluster + # create a dataframe (index=nr. of day in a year/candidate) + df_cluster = pd.DataFrame( + { + "Cluster": clusterOrder, # Cluster of the day + "RepresentativeDay": representative_day, # representative day of + # the cluster + "last_hour_RepresentativeDay": last_hour_datetime, + } + ) # last hour of the cluster df_cluster.index = df_cluster.index + 1 - df_cluster.index.name = 'Candidate' + df_cluster.index.name = "Candidate" - #create a dataframe each timeseries (h) and its candiddate day (i) df_i_h + # create a dataframe each timeseries (h) and its candiddate day (i) df_i_h nr_day = [] - x = len(timeseries_df.index)/hours+1 + x = len(timeseries_df.index) / hours + 1 - for i in range(1,int(x)): - j=1 + for i in range(1, int(x)): + j = 1 while j <= hours: nr_day.append(i) - j=j+1 - df_i_h = pd.DataFrame({'Timeseries': timeseries_df.index, - 'Candidate_day': nr_day}) - df_i_h.set_index('Timeseries',inplace=True) + j = j + 1 + df_i_h = pd.DataFrame( + {"Timeseries": timeseries_df.index, "Candidate_day": nr_day} + ) + df_i_h.set_index("Timeseries", inplace=True) + + return df_cluster, cluster_weights, dates, hours, df_i_h, timeseries - return df_cluster, cluster_weights, dates, hours, df_i_h +def segmentation_extreme_periods( + timeseries_df, timeseries, extremePeriodMethod +): + """ + Function to consider extreme snapshots while using segmentation. + + Parameters + ---------- + timeseries_df : pd.DataFrame + Dataframe wit timeseries to cluster. + timeseries : pd.DataFrame + Information on segments after segmentation. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional method to consider extreme + snapshots in reduced timeseries. The default is 'None'. + + Raises + ------ + ValueError + When calling wrong method to consider extreme values. -def run(network, n_clusters=None, how='daily', - normed=False): + Returns + ------- + timeseries : pd.DataFrame + Information on segments including extreme snapshots after segmentation. """ + + # find maximum / minimum value in residual load + maxi = timeseries_df["residual_load"].idxmax() + mini = timeseries_df["residual_load"].idxmin() + + # add timestep if it is not already calculated + if maxi not in timeseries.index.get_level_values("dates"): + # identifiy timestep, adapt it to timeseries-df and add it + max_val = timeseries_df.loc[maxi].copy() + max_val["SegmentNo"] = len(timeseries) + max_val["SegmentDuration"] = 1 + max_val["dates"] = max_val.name + max_val = pd.DataFrame(max_val).transpose() + + if extremePeriodMethod == "append": + max_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries = timeseries.append(max_val) + timeseries = timeseries.sort_values(by="dates") + + # split up segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < maxi: + i = i + 1 + else: + timeseries[ + "SegmentDuration_Extreme" + ] = timeseries.index.get_level_values("SegmentDuration") + old_row = timeseries.iloc[i].copy() + old_row = pd.DataFrame(old_row).transpose() + + delta_t = ( + timeseries.index.get_level_values("dates")[i + 1] + - timeseries.index.get_level_values("dates")[i] + ) + delta_t = delta_t.total_seconds() / 3600 + timeseries["SegmentDuration_Extreme"].iloc[i] = delta_t + + timeseries_df["row_no"] = range(0, len(timeseries_df)) + new_row = int(timeseries_df.loc[maxi]["row_no"]) + 1 + new_date = timeseries_df[ + timeseries_df.row_no == new_row + ].index + + if new_date.isin( + timeseries.index.get_level_values("dates") + ): + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + break + else: + new_row = timeseries_df.iloc[new_row].copy() + new_row.drop("row_no", inplace=True) + new_row["SegmentNo"] = len(timeseries) + new_row["SegmentDuration"] = ( + old_row["SegmentDuration_Extreme"][0] - delta_t - 1 + ) + new_row["dates"] = new_row.name + new_row = pd.DataFrame(new_row).transpose() + new_row.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + for col in new_row.columns: + new_row[col][0] = old_row[col][0] + + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + timeseries = timeseries.append(new_row) + timeseries = timeseries.sort_values(by="dates") + break + + elif extremePeriodMethod == "replace_cluster_center": + # replace segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < maxi: + i = i + 1 + else: + if i == -1: + i = 0 + max_val[ + "SegmentDuration" + ] = timeseries.index.get_level_values("SegmentDuration")[i] + max_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries.drop(timeseries.index[i], inplace=True) + timeseries = timeseries.append(max_val) + timeseries = timeseries.sort_values(by="dates") + break + + else: + raise ValueError( + """Choose 'append' or 'replace_cluster_center' for + consideration of extreme periods with segmentation method""" + ) + + # add timestep if it is not already calculated + if mini not in timeseries.index.get_level_values("dates"): + # identifiy timestep, adapt it to timeseries-df and add it + min_val = timeseries_df.loc[mini].copy() + min_val["SegmentNo"] = len(timeseries) + 1 + min_val["SegmentDuration"] = 1 + min_val["dates"] = min_val.name + min_val = pd.DataFrame(min_val).transpose() + + if extremePeriodMethod == "append": + min_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries = timeseries.append(min_val) + timeseries = timeseries.sort_values(by="dates") + + # split up segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < mini: + i = i + 1 + else: + timeseries[ + "SegmentDuration_Extreme" + ] = timeseries.index.get_level_values("SegmentDuration") + old_row = timeseries.iloc[i].copy() + old_row = pd.DataFrame(old_row).transpose() + + delta_t = ( + timeseries.index.get_level_values("dates")[i + 1] + - timeseries.index.get_level_values("dates")[i] + ) + delta_t = delta_t.total_seconds() / 3600 + timeseries["SegmentDuration_Extreme"].iloc[i] = delta_t + + timeseries_df["row_no"] = range(0, len(timeseries_df)) + new_row = int(timeseries_df.loc[mini]["row_no"]) + 1 + new_date = timeseries_df[ + timeseries_df.row_no == new_row + ].index + + if new_date.isin( + timeseries.index.get_level_values("dates") + ): + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + break + else: + new_row = timeseries_df.iloc[new_row].copy() + new_row.drop("row_no", inplace=True) + new_row["SegmentNo"] = len(timeseries) + 1 + new_row["SegmentDuration"] = ( + old_row["SegmentDuration_Extreme"][0] - delta_t - 1 + ) + new_row["dates"] = new_row.name + new_row = pd.DataFrame(new_row).transpose() + new_row.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + for col in new_row.columns: + new_row[col][0] = old_row[col][0] + timeseries[ + "dates" + ] = timeseries.index.get_level_values("dates") + timeseries[ + "SegmentNo" + ] = timeseries.index.get_level_values("SegmentNo") + timeseries["SegmentDuration"] = timeseries[ + "SegmentDuration_Extreme" + ] + timeseries.drop( + "SegmentDuration_Extreme", axis=1, inplace=True + ) + timeseries.set_index( + ["dates", "SegmentNo", "SegmentDuration"], + inplace=True, + ) + timeseries = timeseries.append(new_row) + timeseries = timeseries.sort_values(by="dates") + break + + elif extremePeriodMethod == "replace_cluster_center": + # replace segment in which the extreme timestep was added + i = -1 + for date in timeseries.index.get_level_values("dates"): + if date < mini: + i = i + 1 + else: + if i == -1: + i = 0 + min_val[ + "SegmentDuration" + ] = timeseries.index.get_level_values("SegmentDuration")[i] + min_val.set_index( + ["dates", "SegmentNo", "SegmentDuration"], inplace=True + ) + timeseries.drop(timeseries.index[i], inplace=True) + timeseries = timeseries.append(min_val) + timeseries = timeseries.sort_values(by="dates") + break + + else: + raise ValueError( + """Choose 'append' or 'replace_cluster_center' for + consideration of extreme periods with segmentation method""" + ) + + if "row_no" in timeseries.columns: + timeseries.drop("row_no", axis=1, inplace=True) + + return timeseries + + +def run( + network, + n_clusters=None, + how="daily", + segmented_to=False, + extreme_periods="None", +): """ + Function to call the respecting snapshot clustering function and export the + result to a csv-file. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + n_clusters : int, optional + Number of clusters for typical_periods. The default is None. + how : {'daily', 'weekly', 'monthly'}, optional + Definition of period for typical_periods. The default is 'daily'. + segmented_to : int, optional + Number of segments for segmentation. The default is False. + extremePeriodMethod : {'None','append','new_cluster_center', + 'replace_cluster_center'}, optional + Method to consider extreme snapshots in reduced timeseries. + The default is 'None'. + + Returns + ------- + network : pypsa.Network object + Container for all network components. + + """ + + if segmented_to is not False: + segment_no = segmented_to + segmentation = True + + else: + segment_no = 24 + segmentation = False + + if not extreme_periods: + extreme_periods = "None" # calculate clusters - df_cluster, cluster_weights, dates, hours, df_i_h= tsam_cluster( - prepare_pypsa_timeseries(network), - typical_periods=n_clusters, - how='daily', - extremePeriodMethod = 'None') + ( + df_cluster, + cluster_weights, + dates, + hours, + df_i_h, + timeseries, + ) = tsam_cluster( + prepare_pypsa_timeseries(network), + typical_periods=n_clusters, + how="daily", + extremePeriodMethod=extreme_periods, + segmentation=segmentation, + segment_no=segment_no, + segm_hoursperperiod=network.snapshots.size, + ) + + if segmentation: + pd.DataFrame( + timeseries.reset_index(), + columns=["dates", "SegmentNo", "SegmentDuration"], + ).set_index("SegmentNo").to_csv( + "timeseries_segmentation=" + str(segment_no) + ".csv" + ) + else: + if how == "daily": + howie = "days" + elif how == "weekly": + howie = "weeks" + elif how == "monthly": + howie = "months" + elif how == "hourly": + howie = "hours" + df_cluster.to_csv( + "cluster_typical-periods=" + str(n_clusters) + howie + ".csv" + ) + network.cluster = df_cluster network.cluster_ts = df_i_h - update_data_frames(network, cluster_weights, dates, hours) + update_data_frames( + network, cluster_weights, dates, hours, timeseries, segmentation + ) return network -def prepare_pypsa_timeseries(network, normed=False): +def prepare_pypsa_timeseries(network): """ + Prepares timeseries and residual load timeseries for clustering. + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + + Returns + ------- + df : pd.DataFrame + Timeseries to be considered when clustering. + """ - if normed: - normed_loads = network.loads_t.p_set / network.loads_t.p_set.max() - normed_loads.columns = 'L' + normed_loads.columns - normed_renewables = network.generators_t.p_max_pu - normed_renewables.columns = 'G' + normed_renewables.columns - - df = pd.concat([normed_renewables, - normed_loads], axis=1) - else: - loads = network.loads_t.p_set.copy() - loads.columns = 'L' + loads.columns - renewables = network.generators_t.p_max_pu.mul( - network.generators.p_nom[ - network.generators_t.p_max_pu.columns], axis = 1).copy() - renewables.columns = 'G' + renewables.columns - residual_load=pd.DataFrame() - residual_load['residual_load']=loads.sum(axis=1)-renewables.sum(axis=1) - df = pd.concat([renewables, loads, residual_load], axis=1) + + loads = network.loads_t.p_set.copy() + loads.columns = "L" + loads.columns + + renewables = network.generators_t.p_max_pu.mul( + network.generators.p_nom[network.generators_t.p_max_pu.columns], axis=1 + ).copy() + renewables.columns = "G" + renewables.columns + + residual_load = pd.DataFrame() + residual_load["residual_load"] = loads.sum(axis=1) - renewables.sum(axis=1) + df = pd.concat([renewables, loads, residual_load], axis=1) return df -def update_data_frames(network, cluster_weights, dates, hours): - """ Updates the snapshots, snapshots weights and the dataframes based on +def update_data_frames( + network, cluster_weights, dates, hours, timeseries, segmentation +): + """ + Updates the snapshots, snapshot weightings and the dataframes based on the original data in the network and the medoids created by clustering these original data. Parameters - ----------- - network : pyPSA network object - cluster_weights: dictionary - dates: Datetimeindex - + ---------- + network : pypsa.Network object + Container for all network components. + cluster_weights : dict + Weightings per cluster after clustering to typical periods. + dates : DatetimeIndex + Dates of clusters after clustering to typical periods. + hours : int + Hours per typical period. + timeseries : pd.DataFrame + Information on segments after segmentation. + segmentation : boolean + Checks if segmentation of clustering to typical periods has been used. Returns ------- - network + network : pypsa.Network object + Container for all network components. """ - network.snapshot_weightings = network.snapshot_weightings.loc[dates] - network.snapshots = network.snapshot_weightings.index - - # set new snapshot weights from cluster_weights - snapshot_weightings = [] - for i in cluster_weights.values(): - x = 0 - while x < hours: - snapshot_weightings.append(i) - x += 1 - for i in range(len(network.snapshot_weightings)): - network.snapshot_weightings[i] = snapshot_weightings[i] + if segmentation: + network.snapshots = timeseries.index.get_level_values(0) + network.snapshot_weightings["objective"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) + network.snapshot_weightings["stores"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) + network.snapshot_weightings["generators"] = pd.Series( + data=timeseries.index.get_level_values(2).values, + index=timeseries.index.get_level_values(0), + ) - # put the snapshot in the right order - network.snapshots.sort_values() - network.snapshot_weightings.sort_index() + else: + network.snapshots = dates + network.snapshot_weightings = network.snapshot_weightings.loc[dates] + + snapshot_weightings = [] + for i in cluster_weights.values(): + x = 0 + while x < hours: + snapshot_weightings.append(i) + x += 1 + for i in range(len(network.snapshot_weightings)): + network.snapshot_weightings["objective"][i] = snapshot_weightings[ + i + ] + network.snapshot_weightings["stores"][i] = snapshot_weightings[i] + network.snapshot_weightings["generators"][i] = snapshot_weightings[ + i + ] + + # put the snapshot in the right order + network.snapshots.sort_values() + network.snapshot_weightings.sort_index() + + print(network.snapshots) return network def skip_snapshots(self): - n_skip = self.args['skip_snapshots'] + """ + Conducts the downsapling to every n-th snapshot. - if n_skip: - self.network.snapshots = self.network.snapshots[::n_skip] + Returns + ------- + None. - self.network.snapshot_weightings = \ - self.network.snapshot_weightings[::n_skip] * n_skip + """ -#################################### -def manipulate_storage_invest(network, costs=None, wacc=0.05, lifetime=15): - # default: 4500 € / MW, high 300 €/MW - crf = (1 / wacc) - (wacc / ((1 + wacc) ** lifetime)) - network.storage_units.capital_cost = costs / crf + # save second network for optional dispatch disaggregation + if ( + self.args["temporal_disaggregation"]["active"] + and not self.args["snapshot_clustering"]["active"] + ): + self.network_tsa = self.network.copy() + n_skip = self.args["skip_snapshots"] -def write_lpfile(network=None, path=None): - network.model.write(path, - io_options={'symbolic_solver_labels': True}) + if n_skip: + last_weight = ( + int( + ( + self.network.snapshots[-1] + - self.network.snapshots[::n_skip][-1] + ).seconds + / 3600 + ) + + 1 + ) + self.network.snapshots = self.network.snapshots[::n_skip] -def fix_storage_capacity(network, resultspath, n_clusters): # "network" added - path = resultspath.strip('daily') - values = pd.read_csv(path + 'storage_capacity.csv')[n_clusters].values - network.storage_units.p_nom_max = values - network.storage_units.p_nom_min = values - resultspath = 'compare-' + resultspath + self.network.snapshot_weightings["objective"] = n_skip + self.network.snapshot_weightings["stores"] = n_skip + self.network.snapshot_weightings["generators"] = n_skip + + if last_weight < n_skip: + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["objective"] = last_weight + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["stores"] = last_weight + self.network.snapshot_weightings.loc[ + self.network.snapshot_weightings.index[-1] + ]["generators"] = last_weight diff --git a/etrago/cluster/spatial.py b/etrago/cluster/spatial.py new file mode 100755 index 000000000..139f948ee --- /dev/null +++ b/etrago/cluster/spatial.py @@ -0,0 +1,767 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2023 Flensburg University of Applied Sciences, +# Europa-Universität Flensburg, +# Centre for Sustainable Energy Systems, +# DLR-Institute for Networked Energy Systems + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# File description for read-the-docs +""" spatial.py defines the methods to run spatial clustering on networks.""" + +import os + +if "READTHEDOCS" not in os.environ: + from itertools import product + from math import ceil + import logging + import multiprocessing as mp + + from networkx import NetworkXNoPath + from pypsa.networkclustering import ( + _flatten_multiindex, + busmap_by_kmeans, + busmap_by_stubs, + get_clustering_from_busmap, + ) + from sklearn.cluster import KMeans + from threadpoolctl import threadpool_limits + import networkx as nx + import numpy as np + import pandas as pd + import pypsa + + from etrago.tools.utilities import ( + buses_grid_linked, + buses_of_vlvl, + connected_grid_lines, + connected_transformer, + ) + + logger = logging.getLogger(__name__) + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = ( + "MGlauer, MarlonSchlemminger, mariusves, BartelsJ, gnn, lukasoldi, " + "ulfmueller, lukasol, ClaraBuettner, CarlosEpia, KathiEsterl, " + "pieterhexen, fwitte, AmeliaNadal, cjbernal071421" +) + +# TODO: Workaround because of agg + + +def _make_consense_links(x): + """ + Ensure that all elements in the input Series `x` are identical, or that + they are all NaN. + + Parameters + ---------- + x : pandas.Series + A Series containing the values to be checked for consensus. + + Returns + ------- + object + The value of the first element in the Series `x`. + """ + + v = x.iat[0] + assert ( + x == v + ).all() or x.isnull().all(), ( + f"No consense in table links column {x.name}: \n {x}" + ) + return v + + +def nan_links(x): + return np.nan + + +def ext_storage(x): + v = any(x[x]) + return v + + +def sum_with_inf(x): + if (x == np.inf).any(): + return np.inf + else: + return x.sum() + + +def strategies_one_ports(): + return { + "StorageUnit": { + "marginal_cost": np.mean, + "capital_cost": np.mean, + "efficiency_dispatch": np.mean, + "standing_loss": np.mean, + "efficiency_store": np.mean, + "p_min_pu": np.min, + "p_nom_extendable": ext_storage, + "p_nom_max": sum_with_inf, + }, + "Store": { + "marginal_cost": np.mean, + "capital_cost": np.mean, + "standing_loss": np.mean, + "e_nom": np.sum, + "e_nom_min": np.sum, + "e_nom_max": sum_with_inf, + "e_initial": np.sum, + }, + } + + +def strategies_generators(): + return { + "p_nom_min": np.min, + "p_nom_max": sum_with_inf, + "weight": np.sum, + "p_nom": np.sum, + "p_nom_opt": np.sum, + "marginal_cost": np.mean, + "capital_cost": np.mean, + "e_nom_max": sum_with_inf, + } + + +def strategies_links(): + return { + "scn_name": _make_consense_links, + "bus0": _make_consense_links, + "bus1": _make_consense_links, + "carrier": _make_consense_links, + "p_nom": np.sum, + "p_nom_extendable": _make_consense_links, + "p_nom_max": sum_with_inf, + "capital_cost": np.mean, + "length": np.mean, + "geom": nan_links, + "topo": nan_links, + "type": nan_links, + "efficiency": np.mean, + "p_nom_min": np.sum, + "p_set": np.mean, + "p_min_pu": np.min, + "p_max_pu": np.max, + "marginal_cost": np.mean, + "terrain_factor": _make_consense_links, + "p_nom_opt": np.mean, + "country": nan_links, + "build_year": np.mean, + "lifetime": np.mean, + } + + +def group_links(network, with_time=True, carriers=None, cus_strateg=dict()): + """ + Aggregate network.links and network.links_t after any kind of clustering + + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + with_time : bool + says if the network object contains timedependent series. + carriers : list of strings + Describe which type of carriers should be aggregated. The default is + None. + strategies : dictionary + custom strategies to perform the aggregation + + Returns + ------- + new_df : + links aggregated based on bus0, bus1 and carrier + new_pnl : + links time series aggregated + """ + + def normed_or_uniform(x): + return ( + x / x.sum() + if x.sum(skipna=False) > 0 + else pd.Series(1.0 / len(x), x.index) + ) + + def arrange_dc_bus0_bus1(network): + dc_links = network.links[network.links.carrier == "DC"].copy() + dc_links["n0"] = dc_links.apply( + lambda x: x.bus0 if x.bus0 < x.bus1 else x.bus1, axis=1 + ) + dc_links["n1"] = dc_links.apply( + lambda x: x.bus0 if x.bus0 > x.bus1 else x.bus1, axis=1 + ) + dc_links["bus0"] = dc_links["n0"] + dc_links["bus1"] = dc_links["n1"] + dc_links.drop(columns=["n0", "n1"], inplace=True) + + network.links.drop(index=dc_links.index, inplace=True) + network.links = pd.concat([network.links, dc_links]) + + return network + + network = arrange_dc_bus0_bus1(network) + + if carriers is None: + carriers = network.links.carrier.unique() + + links_agg_b = network.links.carrier.isin(carriers) + links = network.links.loc[links_agg_b] + grouper = [links.bus0, links.bus1, links.carrier] + + weighting = links.p_nom.groupby(grouper, axis=0).transform( + normed_or_uniform + ) + strategies = strategies_links() + strategies.update(cus_strateg) + new_df = links.groupby(grouper, axis=0).agg(strategies) + new_df.index = _flatten_multiindex(new_df.index).rename("name") + new_df = pd.concat( + [new_df, network.links.loc[~links_agg_b]], axis=0, sort=False + ) + new_df["new_id"] = np.arange(len(new_df)).astype(str) + cluster_id = new_df["new_id"].to_dict() + new_df.set_index("new_id", inplace=True) + new_df.index = new_df.index.rename("Link") + + new_pnl = dict() + if with_time: + for attr, df in network.links_t.items(): + pnl_links_agg_b = df.columns.to_series().map(links_agg_b) + df_agg = df.loc[:, pnl_links_agg_b].astype(float) + if not df_agg.empty: + if attr in ["efficiency", "p_max_pu", "p_min_pu"]: + df_agg = df_agg.multiply( + weighting.loc[df_agg.columns], axis=1 + ) + pnl_df = df_agg.groupby(grouper, axis=1).sum() + pnl_df.columns = _flatten_multiindex(pnl_df.columns).rename( + "name" + ) + new_pnl[attr] = pd.concat( + [df.loc[:, ~pnl_links_agg_b], pnl_df], axis=1, sort=False + ) + new_pnl[attr].columns = new_pnl[attr].columns.map(cluster_id) + else: + new_pnl[attr] = network.links_t[attr] + + new_pnl = pypsa.descriptors.Dict(new_pnl) + + return new_df, new_pnl + + +def graph_from_edges(edges): + """ + Constructs an undirected multigraph from a list containing data on + weighted edges. + + Parameters + ---------- + edges : list + List of tuples each containing first node, second node, weight, key. + + Returns + ------- + M : :class:`networkx.classes.multigraph.MultiGraph` + """ + + M = nx.MultiGraph() + + for e in edges: + n0, n1, weight, key = e + + M.add_edge(n0, n1, weight=weight, key=key) + + return M + + +def gen(nodes, n, graph): + # TODO There could be a more convenient way of doing this. This generators + # single purpose is to prepare data for multiprocessing's starmap function. + """ + Generator for applying multiprocessing. + + Parameters + ---------- + nodes : list + List of nodes in the system. + n : int + Number of desired multiprocessing units. + graph : :class:`networkx.classes.multigraph.MultiGraph` + Graph representation of an electrical grid. + + Returns + ------- + None + """ + + g = graph.copy() + + for i in range(0, len(nodes), n): + yield (nodes[i : i + n], g) + + +def shortest_path(paths, graph): + """ + Finds the minimum path lengths between node pairs defined in paths. + + Parameters + ---------- + paths : list + List of pairs containing a source and a target node + graph : :class:`networkx.classes.multigraph.MultiGraph` + Graph representation of an electrical grid. + + Returns + ------- + df : pd.DataFrame + DataFrame holding source and target node and the minimum path length. + """ + + idxnames = ["source", "target"] + idx = pd.MultiIndex.from_tuples(paths, names=idxnames) + df = pd.DataFrame(index=idx, columns=["path_length"]) + df.sort_index(inplace=True) + + df_isna = df.isnull() + for s, t in paths: + while df_isna.loc[(s, t), "path_length"]: + try: + s_to_other = nx.single_source_dijkstra_path_length(graph, s) + for t in idx.levels[1]: + if t in s_to_other: + df.loc[(s, t), "path_length"] = s_to_other[t] + else: + df.loc[(s, t), "path_length"] = np.inf + except NetworkXNoPath: + continue + df_isna = df.isnull() + + return df + + +def busmap_by_shortest_path(etrago, fromlvl, tolvl, cpu_cores=4): + """ + Creates a busmap for the EHV-Clustering between voltage levels based + on dijkstra shortest path. The result is automatically written to the + `model_draft` on the [www.openenergy-platform.org] + database with the name `ego_grid_pf_hv_busmap` and the attributes scn_name + (scenario name), bus0 (node before clustering), bus1 (node after + clustering) and path_length (path length). + An AssertionError occurs if buses with a voltage level are not covered by + the input lists 'fromlvl' or 'tolvl'. + + Parameters + ---------- + network : pypsa.Network + Container for all network components. + session : sqlalchemy.orm.session.Session object + Establishes interactions with the database. + fromlvl : list + List of voltage-levels to cluster. + tolvl : list + List of voltage-levels to remain. + cpu_cores : int + Number of CPU-cores. + + Returns + ------- + None + """ + + # data preperation + s_buses = buses_grid_linked(etrago.network, fromlvl) + lines = connected_grid_lines(etrago.network, s_buses) + transformer = connected_transformer(etrago.network, s_buses) + mask = transformer.bus1.isin(buses_of_vlvl(etrago.network, tolvl)) + + dc = etrago.network.links[etrago.network.links.carrier == "DC"] + dc.index = "DC_" + dc.index + lines_plus_dc = pd.concat([lines, dc]) + lines_plus_dc = lines_plus_dc[etrago.network.lines.columns] + lines_plus_dc["carrier"] = "AC" + + # temporary end points, later replaced by bus1 pendant + t_buses = transformer[mask].bus0 + + # create all possible pathways + ppaths = list(product(s_buses, t_buses)) + + # graph creation + edges = [ + (row.bus0, row.bus1, row.length, ix) + for ix, row in lines_plus_dc.iterrows() + ] + M = graph_from_edges(edges) + + # applying multiprocessing + p = mp.Pool(cpu_cores) + + chunksize = ceil(len(ppaths) / cpu_cores) + container = p.starmap(shortest_path, gen(ppaths, chunksize, M)) + df = pd.concat(container) + + # post processing + df.sort_index(inplace=True) + df = df.fillna(10000000) + + mask = df.groupby(level="source")["path_length"].idxmin() + df = df.loc[mask, :] + + # rename temporary endpoints + df.reset_index(inplace=True) + df.target = df.target.map( + dict( + zip( + etrago.network.transformers.bus0, + etrago.network.transformers.bus1, + ) + ) + ) + + # append to busmap buses only connected to transformer + transformer = etrago.network.transformers + idx = list( + set(buses_of_vlvl(etrago.network, fromlvl)).symmetric_difference( + set(s_buses) + ) + ) + mask = transformer.bus0.isin(idx) + + toappend = pd.DataFrame( + list(zip(transformer[mask].bus0, transformer[mask].bus1)), + columns=["source", "target"], + ) + toappend["path_length"] = 0 + + df = pd.concat([df, toappend], ignore_index=True, axis=0) + + # append all other buses + buses = etrago.network.buses[etrago.network.buses.carrier == "AC"] + mask = buses.index.isin(df.source) + + assert (buses[~mask].v_nom.astype(int).isin(tolvl)).all() + + tofill = pd.DataFrame([buses.index[~mask]] * 2).transpose() + tofill.columns = ["source", "target"] + tofill["path_length"] = 0 + + df = pd.concat([df, tofill], ignore_index=True, axis=0) + df.drop_duplicates(inplace=True) + + df.rename(columns={"source": "bus0", "target": "bus1"}, inplace=True) + + busmap = pd.Series(df.bus1.values, index=df.bus0).to_dict() + + return busmap + + +def busmap_ehv_clustering(etrago): + """ + Generates a busmap that can be used to cluster an electrical network to + only extra high voltage buses. If a path to a busmap in a csv file is + passed in the arguments, it loads the csv file and returns it. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + + Returns + ------- + busmap : dict + Maps old bus_ids to new bus_ids. + """ + + if etrago.args["network_clustering_ehv"]["busmap"] is False: + cpu_cores = etrago.args["network_clustering"]["CPU_cores"] + if cpu_cores == "max": + cpu_cores = mp.cpu_count() + else: + cpu_cores = int(cpu_cores) + + busmap = busmap_by_shortest_path( + etrago, + fromlvl=[110], + tolvl=[220, 380, 400, 450], + cpu_cores=cpu_cores, + ) + pd.DataFrame(busmap.items(), columns=["bus0", "bus1"]).to_csv( + "ehv_elecgrid_busmap_result.csv", + index=False, + ) + else: + busmap = pd.read_csv(etrago.args["network_clustering_ehv"]["busmap"]) + busmap = pd.Series( + busmap.bus1.apply(str).values, index=busmap.bus0.apply(str) + ).to_dict() + + return busmap + + +def kmean_clustering(etrago, selected_network, weight, n_clusters): + """ + Main function of the k-mean clustering approach. Maps an original + network to a new one with adjustable number of nodes and new coordinates. + + Parameters + ---------- + network : pypsa.Network + Container for all network components. + n_clusters : int + Desired number of clusters. + load_cluster : boolean + Loads cluster coordinates from a former calculation. + line_length_factor : float + Factor to multiply the crow-flies distance between new buses in order + to get new line lengths. + remove_stubs: boolean + Removes stubs and stubby trees (i.e. sequentially reducing dead-ends). + use_reduced_coordinates: boolean + If True, do not average cluster coordinates, but take from busmap. + bus_weight_tocsv : str + Creates a bus weighting based on conventional generation and load + and save it to a csv file. + bus_weight_fromcsv : str + Loads a bus weighting from a csv file to apply it to the clustering + algorithm. + + Returns + ------- + network : pypsa.Network + Container for all network components. + """ + network = etrago.network + kmean_settings = etrago.args["network_clustering"] + + with threadpool_limits(limits=kmean_settings["CPU_cores"], user_api=None): + # remove stubs + if kmean_settings["remove_stubs"]: + network.determine_network_topology() + busmap = busmap_by_stubs(network) + network.generators["weight"] = network.generators["p_nom"] + aggregate_one_ports = network.one_port_components.copy() + aggregate_one_ports.discard("Generator") + + # reset coordinates to the new reduced guys, rather than taking an + # average (copied from pypsa.networkclustering) + if kmean_settings["use_reduced_coordinates"]: + # TODO : FIX THIS HACK THAT HAS UNEXPECTED SIDE-EFFECTS, + # i.e. network is changed in place!! + network.buses.loc[ + busmap.index, ["x", "y"] + ] = network.buses.loc[busmap, ["x", "y"]].values + + clustering = get_clustering_from_busmap( + network, + busmap, + aggregate_generators_weighted=True, + one_port_strategies=strategies_one_ports(), + generator_strategies=strategies_generators(), + aggregate_one_ports=aggregate_one_ports, + line_length_factor=kmean_settings["line_length_factor"], + ) + etrago.network = clustering.network + + weight = weight.groupby(busmap.values).sum() + + # k-mean clustering + busmap = busmap_by_kmeans( + selected_network, + bus_weightings=pd.Series(weight), + n_clusters=n_clusters, + n_init=kmean_settings["n_init"], + max_iter=kmean_settings["max_iter"], + tol=kmean_settings["tol"], + random_state=kmean_settings["random_state"], + ) + + return busmap + + +def dijkstras_algorithm(buses, connections, medoid_idx, cpu_cores): + """ + Function for combination of k-medoids Clustering and Dijkstra's algorithm. + Creates a busmap assigning the nodes of a original network to the nodes of + a clustered network considering the electrical distances based on + Dijkstra's shortest path. + + Parameters + ---------- + network : pypsa.Network + Container for all network components. + medoid_idx : pandas.Series + Indices of k-medoids + busmap_kmedoid: pandas.Series + Busmap based on k-medoids clustering + cpu_cores: string + numbers of cores used during multiprocessing + + Returns + ------- + busmap : pandas.Series + Mapping from bus ids to medoids ids + """ + + # original data + o_buses = buses.index + # k-medoids centers + medoid_idx = medoid_idx.astype("str") + c_buses = medoid_idx.tolist() + + # list of all possible pathways + ppathss = list(product(o_buses, c_buses)) + + # graph creation + edges = [ + (row.bus0, row.bus1, row.length, ix) + for ix, row in connections.iterrows() + ] + M = graph_from_edges(edges) + + # processor count + if cpu_cores == "max": + cpu_cores = mp.cpu_count() + else: + cpu_cores = int(cpu_cores) + + # calculation of shortest path between original points and k-medoids + # centers using multiprocessing + p = mp.Pool(cpu_cores) + chunksize = ceil(len(ppathss) / cpu_cores) + container = p.starmap(shortest_path, gen(ppathss, chunksize, M)) + df = pd.concat(container) + + # assignment of data points to closest k-medoids centers + df["path_length"] = pd.to_numeric(df["path_length"]) + mask = df.groupby(level="source")["path_length"].idxmin() + df_dijkstra = df.loc[mask, :] + df_dijkstra.reset_index(inplace=True) + + # delete double entries in df due to multiprocessing + df_dijkstra.drop_duplicates(inplace=True) + df_dijkstra.index = df_dijkstra["source"] + + # creation of new busmap with final assignment (format: medoids indices) + busmap_ind = pd.Series(df_dijkstra["target"], dtype=object).rename( + "final_assignment", inplace=True + ) + busmap_ind.index = df_dijkstra["source"] + + # adaption of busmap to format with labels (necessary for aggregation) + busmap = busmap_ind.copy() + mapping = pd.Series(index=medoid_idx, data=medoid_idx.index) + busmap = busmap_ind.map(mapping).astype(str) + busmap.index = list(busmap.index.astype(str)) + + return busmap + + +def kmedoids_dijkstra_clustering( + etrago, buses, connections, weight, n_clusters +): + """ + Applies a k-medoids clustering on the given network and calls the function + to conduct a Dijkstra's algorithm afterwards for the consideration of the + network's topology in the spatial clustering. + + Parameters + ---------- + etrago : Etrago + An instance of the Etrago class + buses : pandas.DataFrame + DataFrame with information about the buses of the network. + connections : pandas.DataFrame + DataFrame with information about the connections of the network + (links or lines). + weight : pandas.Series + Series with the weight for each bus. + n_clusters : int + The number of clusters to create. + + Returns + ------- + Tuple containing: + busmap : pandas.Series + Series containing the mapping of buses to their resp. medoids + medoid_idx : pandas.Series + Series containing the medoid indeces + """ + + settings = etrago.args["network_clustering"] + + # n_jobs was deprecated for the function fit(). scikit-learn recommends + # to use threadpool_limits: + # https://scikit-learn.org/stable/computing/parallelism.html + with threadpool_limits(limits=settings["CPU_cores"], user_api=None): + # remove stubs + if settings["remove_stubs"]: + logger.info( + """options remove_stubs and use_reduced_coordinates not + reasonable for k-medoids Dijkstra Clustering""" + ) + + bus_weightings = pd.Series(weight) + buses_i = buses.index + points = buses.loc[buses_i, ["x", "y"]].values.repeat( + bus_weightings.reindex(buses_i).astype(int), axis=0 + ) + + kmeans = KMeans( + init="k-means++", + n_clusters=n_clusters, + n_init=settings["n_init"], + max_iter=settings["max_iter"], + tol=settings["tol"], + random_state=settings["random_state"], + ) + kmeans.fit(points) + + busmap = pd.Series( + data=kmeans.predict(buses.loc[buses_i, ["x", "y"]]), + index=buses_i, + dtype=object, + ) + + # identify medoids per cluster -> k-medoids clustering + + distances = pd.DataFrame( + data=kmeans.transform(buses.loc[buses_i, ["x", "y"]].values), + index=buses_i, + dtype=object, + ) + distances = distances.apply(pd.to_numeric) + + medoid_idx = distances.idxmin() + + # dijkstra's algorithm + busmap = dijkstras_algorithm( + buses, + connections, + medoid_idx, + etrago.args["network_clustering"]["CPU_cores"], + ) + busmap.index.name = "bus_id" + + return busmap, medoid_idx diff --git a/etrago/sectors/heat.py b/etrago/sectors/heat.py deleted file mode 100755 index 6633ca984..000000000 --- a/etrago/sectors/heat.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, -# Europa-Universität Flensburg, -# Centre for Sustainable Energy Systems, -# DLR-Institute for Networked Energy Systems -# -# This program is free software; you can redistribute it and/or -# modify it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation; either version 3 of the -# License, or (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. -# -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -# File description -import numpy as np -import pandas as pd - -def _add_heat_sector(etrago): - - # Import heat buses, loads, links and generators from db and add to electrical network - pass - -def _try_add_heat_sector(etrago): - - for i in etrago.network.buses.index: - etrago.network.add("Bus", - "heat bus {}".format(i), - carrier="heat", - x = etrago.network.buses.x[i]+0.5, - y = etrago.network.buses.y[i]+0.5) - - etrago.network.add("Bus", - "heat store bus {}".format(i), - carrier="heat", - x = etrago.network.buses.x[i]+0.5, - y = etrago.network.buses.y[i]+0.5) - - etrago.network.add("Link", - "heat pump {}".format(i), - bus0 = i, - bus1 = "heat bus {}".format(i), - efficiency = 3, - p_nom = 4) - - etrago.network.add("Load", - "heat load {}".format(i), - bus = "heat bus {}".format(i), - p_set = np.random.rand(len(etrago.network.snapshots))*10) - - - etrago.network.add("Generator", - "solar thermal {}".format(i), - bus = "heat bus {}".format(i), - p_max_pu = np.random.rand(len(etrago.network.snapshots)), - p_nom = 2) - - etrago.network.add("Link", - "heat store charger {}".format(i), - bus0 = "heat bus {}".format(i), - bus1 = "heat store bus {}".format(i), - efficiency = 1, - p_nom = 4) - - etrago.network.add("Link", - "heat store discharger {}".format(i), - bus0 = "heat store bus {}".format(i), - bus1 = "heat bus {}".format(i), - efficiency = 1, - p_nom = 4) - - etrago.network.add("Store", - "heat store {}".format(i), - bus ="heat store bus {}".format(i), - e_nom = 40) \ No newline at end of file diff --git a/etrago/tools/__init__.py b/etrago/tools/__init__.py index 497ec4c5a..170b87144 100644 --- a/etrago/tools/__init__.py +++ b/etrago/tools/__init__.py @@ -1,8 +1,38 @@ +"""Multi purpose tools that don't fit anywhere else in eTraGo. """ -""" -__copyright__ = "tba" -__license__ = "tba" -__author__ = "tba" +__copyright__ = ( + "Copyright (C) 2023" + " Otto-von-Guericke-University Magdeburg," + " Research group for theoretical computer science" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = "gnn " + + +def noop(*ignored_arguments, **ignored_keyword_arguments): + """Do nothing. + + Accept all kinds of arguments, ignore them and do nothing. + """ + pass + +class Noops: + """Provide arbitrarily named methods that do nothing. + Any attribute access will return a method that does nothing, i.e. + all methods of this object are :py:func:`noop`s. Normally you don't + need to instantiate this class. All instances behave the same, so + the containing module provides one called :py:obj:`noops` which you + can import and use. + """ + + @classmethod + def __getattribute__(cls, ignored_name): + return noop + + +noops = Noops() +"""A default :py:class:`Noops` instance so you don't have to create one. +""" diff --git a/etrago/tools/calc_results.py b/etrago/tools/calc_results.py index 7f4acc782..bf1edac83 100755 --- a/etrago/tools/calc_results.py +++ b/etrago/tools/calc_results.py @@ -22,149 +22,752 @@ calc_results.py defines methods to calculate results of eTraGo """ import os -if 'READTHEDOCS' not in os.environ: - import time + +if "READTHEDOCS" not in os.environ: import logging import pandas as pd - import numpy as np logger = logging.getLogger(__name__) -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" __author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" def _calc_storage_expansion(self): - """ Function that calulates storage expansion in MW + """Function that calulates storage expansion in MW + + + Returns + ------- + float + storage expansion in MW + + """ + return ( + ( + self.network.storage_units.p_nom_opt + - self.network.storage_units.p_nom_min + )[self.network.storage_units.p_nom_extendable] + .groupby(self.network.storage_units.carrier) + .sum() + ) + + +def _calc_store_expansion(self): + """Function that calulates store expansion in MW + + Returns + ------- + float + store expansion in MW + + """ + return (self.network.stores.e_nom_opt - self.network.stores.e_nom_min)[ + self.network.stores.e_nom_extendable + ] + + +def _calc_sectorcoupling_link_expansion(self): + """Function that calulates expansion of sectorcoupling links in MW + + Returns + ------- + float + link expansion in MW (differentiating between technologies) + + """ + ext_links = self.network.links[self.network.links.p_nom_extendable] + + links = [0, 0, 0, 0] + + l1 = ext_links[ext_links.carrier == "H2_to_power"] + l2 = ext_links[ext_links.carrier == "power_to_H2"] + l3 = ext_links[ext_links.carrier == "H2_to_CH4"] + l4 = ext_links[ext_links.carrier == "CH4_to_H2"] + + links[0] = (l1.p_nom_opt - l1.p_nom_min).sum() + links[1] = (l2.p_nom_opt - l2.p_nom_min).sum() + links[2] = (l3.p_nom_opt - l3.p_nom_min).sum() + links[3] = (l4.p_nom_opt - l4.p_nom_min).sum() + + return links + + +def _calc_network_expansion(self): + """Function that calulates electrical network expansion in MW + + Returns + ------- + float + network expansion (AC lines and DC links) in MW + + """ + + network = self.network + + lines = (network.lines.s_nom_opt - network.lines.s_nom_min)[ + network.lines.s_nom_extendable + ] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier == "DC"] - Returns - ------- - float - storage expansion in MW + dc_links = ext_dc_lines.p_nom_opt - ext_dc_lines.p_nom_min - """ - return (self.network.storage_units.p_nom_opt - - self.network.storage_units.p_nom_min - )[self.network.storage_units.p_nom_extendable]\ - .groupby(self.network.storage_units.carrier).sum() + return lines, dc_links def calc_investment_cost(self): - """ Function that calulates overall annualized investment costs. - - Returns - ------- - network_costs : float - Investments in line expansion - storage_costs : float - Investments in storage expansion - - """ - network = self.network - ext_storage = network.storage_units[network.storage_units.p_nom_extendable] - ext_lines = network.lines[network.lines.s_nom_extendable] - ext_links = network.links[network.links.p_nom_extendable] - ext_trafos = network.transformers[network.transformers.s_nom_extendable] - storage_costs = 0 - network_costs = [0, 0] - if not ext_storage.empty: - storage_costs = (ext_storage.p_nom_opt* - ext_storage.capital_cost).sum() + """Function that calulates overall annualized investment costs. + + Returns + ------- + network_costs : float + Investments in line expansion (AC+DC) + link_costs : float + Investments in sectorcoupling link expansion + stor_costs : float + Investments in storage and store expansion + + """ + network = self.network + + # electrical grid: AC lines, DC lines + + network_costs = [0, 0] + + ext_lines = network.lines[network.lines.s_nom_extendable] + ext_trafos = network.transformers[network.transformers.s_nom_extendable] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier == "DC"] + + if not ext_lines.empty: + network_costs[0] = ( + (ext_lines.s_nom_opt - ext_lines.s_nom_min) + * ext_lines.capital_cost + ).sum() + + if not ext_trafos.empty: + network_costs[0] = ( + network_costs[0] + + ( + (ext_trafos.s_nom_opt - ext_trafos.s_nom) + * ext_trafos.capital_cost + ).sum() + ) + + if not ext_dc_lines.empty: + network_costs[1] = ( + (ext_dc_lines.p_nom_opt - ext_dc_lines.p_nom_min) + * ext_dc_lines.capital_cost + ).sum() + + # links in other sectors / coupling different sectors - if not ext_lines.empty: - network_costs[0] = ((ext_lines.s_nom_opt-ext_lines.s_nom_min - )*ext_lines.capital_cost).sum() + link_costs = 0 - if not ext_links.empty: - network_costs[1] = ((ext_links.p_nom_opt-ext_links.p_nom_min - )*ext_links.capital_cost).sum() + ext_links = ext_links[ext_links.carrier != "DC"] - if not ext_trafos.empty: - network_costs[0] = network_costs[0]+(( - ext_trafos.s_nom_opt-ext_trafos.s_nom - )*ext_trafos.capital_cost).sum() + if not ext_links.empty: + link_costs = ( + (ext_links.p_nom_opt - ext_links.p_nom_min) + * ext_links.capital_cost + ).sum() + + # storage and store costs + + sto_costs = [0, 0] + + ext_storage = network.storage_units[network.storage_units.p_nom_extendable] + ext_store = network.stores[network.stores.e_nom_extendable] + + if not ext_storage.empty: + sto_costs[0] = (ext_storage.p_nom_opt * ext_storage.capital_cost).sum() + + if not ext_store.empty: + sto_costs[1] = (ext_store.e_nom_opt * ext_store.capital_cost).sum() + + return network_costs, link_costs, sto_costs - return network_costs, storage_costs def calc_marginal_cost(self): - """ - Function that caluclates and returns marginal costs, considering - generation and storage dispatch costs - - Returns - ------- - marginal_cost : float - Annual marginal cost in EUR - - """ - network = self.network - gen = network.generators_t.p.mul( - network.snapshot_weightings, axis=0).sum(axis=0).mul( - network.generators.marginal_cost).sum() - stor = network.storage_units_t.p.mul( - network.snapshot_weightings, axis=0).sum(axis=0).mul( - network.storage_units.marginal_cost).sum() - marginal_cost = gen + stor - return marginal_cost + """ + Function that caluclates and returns marginal costs, considering + generation and link and storage dispatch costs + + Returns + ------- + marginal_cost : float + Annual marginal cost in EUR + + """ + network = self.network + gen = ( + network.generators_t.p.mul( + network.snapshot_weightings.objective, axis=0 + ) + .sum(axis=0) + .mul(network.generators.marginal_cost) + .sum() + ) + link = ( + abs(network.links_t.p0) + .mul(network.snapshot_weightings.objective, axis=0) + .sum(axis=0) + .mul(network.links.marginal_cost) + .sum() + ) + stor = ( + network.storage_units_t.p.mul( + network.snapshot_weightings.objective, axis=0 + ) + .sum(axis=0) + .mul(network.storage_units.marginal_cost) + .sum() + ) + marginal_cost = gen + link + stor + return marginal_cost + + +def german_network(self): + """Cut out all network components in Germany + + Returns + ------- + new_network : pypsa.Network + Network with all components in Germany + + """ + keep_cntr = ["DE"] + new_idx = self.network.buses[ + self.network.buses.country.isin(keep_cntr) + ].index + + new_network = self.network.copy() + + # drop components of other countries + new_network.mremove( + "Bus", new_network.buses[~new_network.buses.index.isin(new_idx)].index + ) + + new_network.mremove( + "Line", + new_network.lines[ + ~new_network.lines.index.isin( + new_network.lines[ + ( + new_network.lines.bus0.isin(new_idx) + & new_network.lines.bus1.isin(new_idx) + ) + ].index + ) + ].index, + ) + new_network.mremove( + "Link", + new_network.links[ + ~new_network.links.index.isin( + new_network.links[ + ( + new_network.links.bus0.isin(new_idx) + & new_network.links.bus1.isin(new_idx) + ) + ].index + ) + ].index, + ) + + new_network.mremove( + "Transformer", + new_network.transformers[ + ~new_network.transformers.index.isin( + new_network.transformers[ + ( + new_network.transformers.bus0.isin(new_idx) + & new_network.transformers.bus1.isin(new_idx) + ) + ].index + ) + ].index, + ) + + new_network.mremove( + "Generator", + new_network.generators[ + ~new_network.generators.index.isin( + new_network.generators[ + new_network.generators.bus.isin(new_idx) + ].index + ) + ].index, + ) + + new_network.mremove( + "Load", + new_network.loads[ + ~new_network.loads.index.isin( + new_network.loads[new_network.loads.bus.isin(new_idx)].index + ) + ].index, + ) + + new_network.mremove( + "Store", + new_network.stores[ + ~new_network.stores.index.isin( + new_network.stores[new_network.stores.bus.isin(new_idx)].index + ) + ].index, + ) + + new_network.mremove( + "StorageUnit", + new_network.storage_units[ + ~new_network.storage_units.index.isin( + new_network.storage_units[ + new_network.storage_units.bus.isin(new_idx) + ].index + ) + ].index, + ) + + return new_network + + +def system_costs_germany(self): + """Calculte system costs for Germany + + Returns + ------- + marginal_cost : float + Marginal costs for dispatch in Germany + invest_cost : float + Annualized investment costs for components in Germany + import_costs : float + Costs for energy imported to Germany minus costs for exports + + """ + + network_de = self.german_network() + + marginal_cost = 0 + invest_cost = 0 + + for c in network_de.iterate_components(): + if c.name in ["Store"]: + value = "e" + elif c.name in ["Line", "Transformer"]: + value = "s" + else: + value = "p" + if c.name in network_de.one_port_components: + if "marginal_cost" in c.df.columns: + marginal_cost += ( + c.pnl.p.mul(c.df.marginal_cost) + .mul(network_de.snapshot_weightings.generators, axis=0) + .sum() + .sum() + ) + + else: + if "marginal_cost" in c.df.columns: + marginal_cost += ( + c.pnl.p0.mul(c.df.marginal_cost) + .mul(network_de.snapshot_weightings.generators, axis=0) + .sum() + .sum() + ) + if c.name not in [ + "Bus", + "Load", + "LineType", + "TransformerType", + "Carrier", + ]: + invest_cost += ( + ( + c.df[c.df[f"{value}_nom_extendable"]][f"{value}_nom_opt"] + - c.df[c.df[f"{value}_nom_extendable"]][f"{value}_nom_min"] + ) + * c.df[c.df[f"{value}_nom_extendable"]]["capital_cost"] + ).sum() + + # import and its costs + links_export = self.network.links[ + ( + self.network.links.bus0.isin(network_de.buses.index.values) + & ~(self.network.links.bus1.isin(network_de.buses.index.values)) + ) + ] + + export_positive = ( + self.network.links_t.p0[links_export.index] + .clip(lower=0) + .mul(self.network.snapshot_weightings.generators, axis=0) + .mul( + self.network.buses_t.marginal_price[links_export.bus1].values, + ) + .sum() + .sum() + ) + + export_negative = ( + self.network.links_t.p0[links_export.index] + .clip(upper=0) + .mul(self.network.snapshot_weightings.generators, axis=0) + .mul( + self.network.buses_t.marginal_price[links_export.bus1].values, + ) + .mul(-1) + .sum() + .sum() + ) + + links_import = self.network.links[ + ( + self.network.links.bus1.isin(network_de.buses.index.values) + & ~(self.network.links.bus0.isin(network_de.buses.index.values)) + ) + ] + + import_positive = ( + self.network.links_t.p0[links_import.index] + .clip(lower=0) + .mul(self.network.snapshot_weightings.generators, axis=0) + .mul( + self.network.buses_t.marginal_price[links_import.bus1].values, + ) + .sum() + .sum() + ) + + import_negative = ( + self.network.links_t.p0[links_import.index] + .clip(upper=0) + .mul(self.network.snapshot_weightings.generators, axis=0) + .mul( + self.network.buses_t.marginal_price[links_import.bus1].values, + ) + .mul(-1) + .sum() + .sum() + ) + + import_costs = ( + export_negative + import_positive - export_positive - import_negative + ) + + return marginal_cost, invest_cost, import_costs + + +def ac_export(self): + """Calculate electricity exports and imports over AC lines + + Returns + ------- + float + Electricity export (if negative: import) from Germany + + """ + de_buses = self.network.buses[self.network.buses.country == "DE"] + for_buses = self.network.buses[self.network.buses.country != "DE"] + exp = self.network.lines[ + (self.network.lines.bus0.isin(de_buses.index)) + & (self.network.lines.bus1.isin(for_buses.index)) + ] + imp = self.network.lines[ + (self.network.lines.bus1.isin(de_buses.index)) + & (self.network.lines.bus0.isin(for_buses.index)) + ] + + return ( + self.network.lines_t.p0[exp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + + self.network.lines_t.p1[imp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + ) + + +def ac_export_per_country(self): + """Calculate electricity exports and imports over AC lines per country + + Returns + ------- + float + Electricity export (if negative: import) from Germany in TWh + + """ + de_buses = self.network.buses[self.network.buses.country == "DE"] + + for_buses = self.network.buses[self.network.buses.country != "DE"] + + result = pd.Series(index=for_buses.country.unique()) + + for c in for_buses.country.unique(): + exp = self.network.lines[ + (self.network.lines.bus0.isin(de_buses.index)) + & ( + self.network.lines.bus1.isin( + for_buses[for_buses.country == c].index + ) + ) + ] + imp = self.network.lines[ + (self.network.lines.bus1.isin(de_buses.index)) + & ( + self.network.lines.bus0.isin( + for_buses[for_buses.country == c].index + ) + ) + ] + + result[c] = ( + self.network.lines_t.p0[exp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + + self.network.lines_t.p1[imp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + ) * 1e-6 + + return result + + +def dc_export(self): + """Calculate electricity exports and imports over DC lines + + Returns + ------- + float + Electricity export (if negative: import) from Germany + + """ + de_buses = self.network.buses[self.network.buses.country == "DE"] + for_buses = self.network.buses[self.network.buses.country != "DE"] + exp = self.network.links[ + (self.network.links.carrier == "DC") + & (self.network.links.bus0.isin(de_buses.index)) + & (self.network.links.bus1.isin(for_buses.index)) + ] + imp = self.network.links[ + (self.network.links.carrier == "DC") + & (self.network.links.bus1.isin(de_buses.index)) + & (self.network.links.bus0.isin(for_buses.index)) + ] + return ( + self.network.links_t.p0[exp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + + self.network.links_t.p1[imp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + ) + + +def dc_export_per_country(self): + """Calculate electricity exports and imports over DC lines per country + + Returns + ------- + float + Electricity export (if negative: import) from Germany in TWh + + """ + de_buses = self.network.buses[self.network.buses.country == "DE"] + + for_buses = self.network.buses[self.network.buses.country != "DE"] + + result = pd.Series(index=for_buses.country.unique()) + + for c in for_buses.country.unique(): + exp = self.network.links[ + (self.network.links.carrier == "DC") + & (self.network.links.bus0.isin(de_buses.index)) + & ( + self.network.links.bus1.isin( + for_buses[for_buses.country == c].index + ) + ) + ] + imp = self.network.links[ + (self.network.links.carrier == "DC") + & (self.network.links.bus1.isin(de_buses.index)) + & ( + self.network.links.bus0.isin( + for_buses[for_buses.country == c].index + ) + ) + ] + + result[c] = ( + self.network.links_t.p0[exp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + + self.network.links_t.p1[imp.index] + .sum(axis=1) + .mul(self.network.snapshot_weightings.generators) + .sum() + ) * 1e-6 + + return result + def calc_etrago_results(self): - """ Function that calculates main results of grid optimization - and adds them to Etrago object. - - Returns - ------- - None. - - """ - self.results = pd.DataFrame(columns=['unit', 'value'], - index=['annual system costs', - 'annual_investment_costs', - 'annual_marginal_costs', - 'annual_grid_investment_costs', - 'ac_annual_grid_investment_costs', - 'dc_annual_grid_investment_costs', - 'annual_storage_investment_costs', - 'storage_expansion', - 'battery_storage_expansion', - 'hydrogen_storage_expansion', - 'abs_network_expansion', - 'rel_network_expansion']) - - self.results.unit[self.results.index.str.contains('cost')] = 'EUR/a' - self.results.unit[self.results.index.isin([ - 'storage_expansion', 'abs_network_expansion', - 'battery_storage_expansion', 'hydrogen_storage_expansion'])] = 'MW' - self.results.unit['abs_network_expansion'] = 'MW' - self.results.unit['rel_network_expansion'] = 'p.u.' - - - - self.results.value['ac_annual_grid_investment_costs'] = calc_investment_cost(self)[0][0] - self.results.value['dc_annual_grid_investment_costs'] = calc_investment_cost(self)[0][1] - self.results.value['annual_grid_investment_costs'] = sum(calc_investment_cost(self)[0]) - - self.results.value['annual_storage_investment_costs'] = calc_investment_cost(self)[1] - - self.results.value['annual_investment_costs'] = \ - calc_investment_cost(self)[1] + sum(calc_investment_cost(self)[0]) - self.results.value['annual_marginal_costs'] = calc_marginal_cost(self) - - self.results.value['annual system costs'] = \ - self.results.value['annual_investment_costs'] + calc_marginal_cost(self) - - if 'storage' in self.args['extendable']: - self.results.value['storage_expansion'] = \ - _calc_storage_expansion(self).sum() - self.results.value['battery_storage_expansion'] = \ - _calc_storage_expansion(self)['extendable_battery_storage'] - self.results.value['hydrogen_storage_expansion'] = \ - _calc_storage_expansion(self)['extendable_hydrogen_storage'] - - if 'network' in self.args['extendable']: - self.results.value['abs_network_expansion'] + """Function that calculates main results of grid optimization + and adds them to Etrago object. + + Returns + ------- + None. + + """ + self.results = pd.DataFrame( + columns=["unit", "value"], + index=[ + "annual system costs", + "annual investment costs", + "annual marginal costs", + "annual electrical grid investment costs", + "annual ac grid investment costs", + "annual dc grid investment costs", + "annual links investment costs", + "annual storage+store investment costs", + "annual electrical storage investment costs", + "annual store investment costs", + "battery storage expansion", + "store expansion", + "H2 store expansion", + "CH4 store expansion", + "heat store expansion", + "storage+store expansion", + "fuel cell links expansion", + "electrolyzer links expansion", + "methanisation links expansion", + "Steam Methane Reformation links expansion", + "abs. electrical grid expansion", + "abs. electrical ac grid expansion", + "abs. electrical dc grid expansion", + "rel. electrical ac grid expansion", + "rel. electrical dc grid expansion", + ], + ) + + self.results.unit[self.results.index.str.contains("cost")] = "EUR/a" + self.results.unit[self.results.index.str.contains("expansion")] = "MW" + self.results.unit[self.results.index.str.contains("rel.")] = "p.u." + + # system costs + + self.results.value[ + "annual ac grid investment costs" + ] = calc_investment_cost(self)[0][0] + self.results.value[ + "annual dc grid investment costs" + ] = calc_investment_cost(self)[0][1] + self.results.value["annual electrical grid investment costs"] = sum( + calc_investment_cost(self)[0] + ) + + self.results.value["annual links investment costs"] = calc_investment_cost( + self + )[1] + + self.results.value[ + "annual electrical storage investment costs" + ] = calc_investment_cost(self)[2][0] + self.results.value["annual store investment costs"] = calc_investment_cost( + self + )[2][1] + self.results.value["annual storage+store investment costs"] = sum( + calc_investment_cost(self)[2] + ) + + self.results.value["annual investment costs"] = ( + sum(calc_investment_cost(self)[0]) + + calc_investment_cost(self)[1] + + sum(calc_investment_cost(self)[2]) + ) + self.results.value["annual marginal costs"] = calc_marginal_cost(self) + + self.results.value["annual system costs"] = ( + self.results.value["annual investment costs"] + + self.results.value["annual marginal costs"] + ) + + # storage and store expansion + + network = self.network + + if not network.storage_units[network.storage_units.p_nom_extendable].empty: + self.results.value[ + "battery storage expansion" + ] = _calc_storage_expansion(self).sum() + + store = _calc_store_expansion(self) + self.results.value["store expansion"] = store.sum() + self.results.value["H2 store expansion"] = store[ + store.index.str.contains("H2") + ].sum() + self.results.value["CH4 store expansion"] = store[ + store.index.str.contains("CH4") + ].sum() + self.results.value["heat store expansion"] = store[ + store.index.str.contains("heat") + ].sum() + + self.results.value["storage+store expansion"] = ( + self.results.value["battery storage expansion"] + + self.results.value["store expansion"] + ) + + # links expansion + + if not network.links[network.links.p_nom_extendable].empty: + links = _calc_sectorcoupling_link_expansion(self) + self.results.value["fuel cell links expansion"] = links[0] + self.results.value["electrolyzer links expansion"] = links[1] + self.results.value["methanisation links expansion"] = links[2] + self.results.value[ + "Steam Methane Reformation links expansion" + ] = links[3] + + # grid expansion + + if not network.lines[network.lines.s_nom_extendable].empty: + self.results.value[ + "abs. electrical ac grid expansion" + ] = _calc_network_expansion(self)[0].sum() + self.results.value[ + "abs. electrical dc grid expansion" + ] = _calc_network_expansion(self)[1].sum() + self.results.value["abs. electrical grid expansion"] = ( + self.results.value["abs. electrical ac grid expansion"] + + self.results.value["abs. electrical dc grid expansion"] + ) + + ext_lines = network.lines[network.lines.s_nom_extendable] + ext_links = network.links[network.links.p_nom_extendable] + ext_dc_lines = ext_links[ext_links.carrier == "DC"] + + self.results.value["rel. electrical ac grid expansion"] = ( + _calc_network_expansion(self)[0].sum() / ext_lines.s_nom.sum() + ) + self.results.value["rel. electrical dc grid expansion"] = ( + _calc_network_expansion(self)[1].sum() / ext_dc_lines.p_nom.sum() + ) diff --git a/etrago/tools/config.json b/etrago/tools/config.json deleted file mode 100644 index e340f62b9..000000000 --- a/etrago/tools/config.json +++ /dev/null @@ -1,45 +0,0 @@ -{ - "lopf": - { - "Bus": null, - "Generator": - { - "GeneratorPqSet": ["p_set", "p_max_pu"] - }, - "Line": null, - "Link": null, - "Transformer": null, - "Load": - { - "LoadPqSet": ["p_set", "q_set"] - }, - "Storage": - { - "StoragePqSet": ["p_set"] - } - }, - "pf": - { - "Bus": - { - "BusVMagSet":["v_mag_pu_set"] - }, - "Generator": - { - "GeneratorPqSet": ["p_set", "q_set"] - }, - "Line": null, - "Link": null, - "Transformer": null, - "Load": - { - "LoadPqSet": ["p_set", "q_set"] - }, - "Storage": - { - "StoragePqSet": ["p_set", "q_set"] - } - } -} - - diff --git a/etrago/tools/constraints.py b/etrago/tools/constraints.py index 57f1c0917..0893523bd 100755 --- a/etrago/tools/constraints.py +++ b/etrago/tools/constraints.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -21,23 +21,34 @@ """ Constraints.py includes additional constraints for eTraGo-optimizations """ +import os import logging + from pyomo.environ import Constraint +from pypsa.descriptors import expand_series +from pypsa.linopt import define_constraints, define_variables, get_var, linexpr +from pypsa.pf import get_switchable_as_dense as get_as_dense +import numpy as np import pandas as pd import pyomo.environ as po -from pypsa.linopt import get_var, linexpr, define_constraints + +if "READTHEDOCS" not in os.environ: + from etrago.tools import db logger = logging.getLogger(__name__) -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, AmeliaNadal, +CarlosEpia, ClaraBuettner, KathiEsterl""" -def _get_crossborder_components(network, cntr='all'): +def _get_crossborder_components(network, cntr="all"): """ Identifies foreign buses and crossborder ac- and dc-lines for all foreign countries or only one specific. @@ -65,27 +76,38 @@ def _get_crossborder_components(network, cntr='all'): Index of dc-lines from Germany to foreign country """ - buses_de = network.buses.index[network.buses.country_code == 'DE'] + buses_de = network.buses.index[network.buses.country == "DE"] - if cntr == 'all': - buses_for = network.buses.index[network.buses.country_code != 'DE'] + if cntr == "all": + buses_for = network.buses.index[network.buses.country != "DE"] else: - buses_for = network.buses.index[network.buses.country_code == cntr] - - cb0 = network.lines.index[(network.lines.bus0.isin(buses_for)) - & (network.lines.bus1.isin(buses_de))] - - cb1 = network.lines.index[(network.lines.bus1.isin(buses_for)) - & (network.lines.bus0.isin(buses_de))] - - cb0_link = network.links.index[(network.links.bus0.isin(buses_for)) - & (network.links.bus1.isin(buses_de))] - - cb1_link = network.links.index[(network.links.bus0.isin(buses_de)) - & (network.links.bus1.isin(buses_for))] + buses_for = network.buses.index[network.buses.country == cntr] + + cb0 = network.lines.index[ + (network.lines.bus0.isin(buses_for)) + & (network.lines.bus1.isin(buses_de)) + ] + + cb1 = network.lines.index[ + (network.lines.bus1.isin(buses_for)) + & (network.lines.bus0.isin(buses_de)) + ] + + cb0_link = network.links.index[ + (network.links.bus0.isin(buses_for)) + & (network.links.bus1.isin(buses_de)) + & (network.links.carrier == "DC") + ] + + cb1_link = network.links.index[ + (network.links.bus0.isin(buses_de)) + & (network.links.bus1.isin(buses_for)) + & (network.links.carrier == "DC") + ] return buses_de, buses_for, cb0, cb1, cb0_link, cb1_link + def _max_line_ext(self, network, snapshots): """ Extra-functionality that limits overall line expansion to a multiple @@ -103,21 +125,28 @@ def _max_line_ext(self, network, snapshots): None """ - lines_snom = network.lines.s_nom.sum() - links_pnom = network.links.p_nom.sum() + + lines_snom = network.lines.s_nom_min.sum() + + links_elec = network.links[network.links.carrier == "DC"] + links_index = links_elec.index + links_pnom = links_elec.p_nom_min.sum() def _rule(m): - lines_opt = sum(m.passive_branch_s_nom[index] - for index in m.passive_branch_s_nom_index) + lines_opt = sum( + m.passive_branch_s_nom[index] + for index in m.passive_branch_s_nom_index + ) - links_opt = sum(m.link_p_nom[index] - for index in m.link_p_nom_index) + links_opt = sum(m.link_p_nom[index] for index in links_index) - return (lines_opt + links_opt) <= (lines_snom + links_pnom)\ - * self.args['extra_functionality']['max_line_ext'] + return (lines_opt + links_opt) <= ( + lines_snom + links_pnom + ) * self.args["extra_functionality"]["max_line_ext"] network.model.max_line_ext = Constraint(rule=_rule) + def _max_line_ext_nmp(self, network, snapshots): """ Extra-functionality that limits overall line expansion to a multiple @@ -137,21 +166,28 @@ def _max_line_ext_nmp(self, network, snapshots): """ lines_snom = network.lines.s_nom.sum() - links_pnom = network.links.p_nom.sum() - get_var(network, 'Line', 's_nom') + + links_elec = network.links[network.links.carrier == "DC"] + links_index = links_elec.index + links_pnom = links_elec.p_nom_min.sum() + + get_var(network, "Line", "s_nom") def _rule(m): - lines_opt = sum(m.passive_branch_s_nom[index] - for index in m.passive_branch_s_nom_index) + lines_opt = sum( + m.passive_branch_s_nom[index] + for index in m.passive_branch_s_nom_index + ) - links_opt = sum(m.link_p_nom[index] - for index in m.link_p_nom_index) + links_opt = sum(m.link_p_nom[index] for index in links_index) - return (lines_opt + links_opt) <= (lines_snom + links_pnom)\ - * self.args['extra_functionality']['max_line_ext'] + return (lines_opt + links_opt) <= ( + lines_snom + links_pnom + ) * self.args["extra_functionality"]["max_line_ext"] network.model.max_line_ext = Constraint(rule=_rule) + def _min_renewable_share_nmp(self, network, snapshots): """ Extra-functionality that limits the minimum share of renewable generation. @@ -171,24 +207,45 @@ def _min_renewable_share_nmp(self, network, snapshots): """ - renewables = ['wind_onshore', 'wind_offshore', - 'biomass', 'solar', 'run_of_river'] + renewables = [ + "biomass", + "central_biomass_CHP", + "industrial_biomass_CHP", + "solar", + "solar_rooftop", + "wind_offshore", + "wind_onshore", + "run_of_river", + "other_renewable", + "central_biomass_CHP_heat", + "solar_thermal_collector", + "geo_thermal", + ] res = network.generators.index[network.generators.carrier.isin(renewables)] - renew = get_var(network, 'Generator', 'p').loc[network.snapshots, res].mul( - network.snapshot_weightings, axis=0) - total = get_var(network, 'Generator', 'p').mul( - network.snapshot_weightings, axis=0) + renew = ( + get_var(network, "Generator", "p") + .loc[network.snapshots, res] + .mul(network.snapshot_weightings.generators, axis=0) + ) + total = get_var(network, "Generator", "p").mul( + network.snapshot_weightings.generators, axis=0 + ) renew_production = linexpr((1, renew)).sum().sum() - total_production = linexpr(( - -self.args['extra_functionality']['min_renewable_share'], - total)).sum().sum() + total_production = ( + linexpr( + (-self.args["extra_functionality"]["min_renewable_share"], total) + ) + .sum() + .sum() + ) expr = renew_production + total_production - define_constraints(network, expr, '>=', 0, 'Generator', 'min_renew_share') + define_constraints(network, expr, ">=", 0, "Generator", "min_renew_share") + def _min_renewable_share(self, network, snapshots): """ @@ -209,280 +266,411 @@ def _min_renewable_share(self, network, snapshots): """ - renewables = ['wind_onshore', 'wind_offshore', - 'biomass', 'solar', 'run_of_river'] - - res = list(network.generators.index[ - network.generators.carrier.isin(renewables)]) + renewables = [ + "biomass", + "central_biomass_CHP", + "industrial_biomass_CHP", + "solar", + "solar_rooftop", + "wind_offshore", + "wind_onshore", + "run_of_river", + "other_renewable", + "CH4_biogas", + "central_biomass_CHP_heat", + "solar_thermal_collector", + "geo_thermal", + ] + + res = list( + network.generators.index[network.generators.carrier.isin(renewables)] + ) total = list(network.generators.index) def _rule(m): - - renewable_production = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in res - for sn in snapshots) - total_production = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in total - for sn in snapshots) - - return renewable_production >= total_production *\ - self.args['extra_functionality']['min_renewable_share'] + renewable_production = sum( + m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + for gen in res + for sn in snapshots + ) + total_production = sum( + m.generator_p[gen, sn] * network.snapshot_weightings.generators[sn] + for gen in total + for sn in snapshots + ) + + return ( + renewable_production + >= total_production + * self.args["extra_functionality"]["min_renewable_share"] + ) network.model.min_renewable_share = Constraint(rule=_rule) + def _cross_border_flow(self, network, snapshots): """ - Extra_functionality that limits overall crossborder flows from/to Germany. - Add key 'cross_border_flow' and array with minimal and maximal percent of - im- and exports as a fraction of loads in Germany. - Example: {'cross_border_flow': [-0.1, 0.1]} + Extra_functionality that limits overall AC crossborder flows from/to + Germany. Add key 'cross_border_flow' and array with minimal and maximal + import/export + Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = \ - _get_crossborder_components(network) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network) export = pd.Series( - data=self.args['extra_functionality']['cross_border_flow'])*\ - network.loads_t.p_set.mul(network.snapshot_weightings, axis=0)\ - [network.loads.index[network.loads.bus.isin(buses_de)]].sum().sum() + data=self.args["extra_functionality"]["cross_border_flow"] + ) def _rule_min(m): - cb_flow = - sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb0 - for sn in snapshots) \ - + sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb1 - for sn in snapshots)\ - - sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb0_link - for sn in snapshots)\ - + sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb1_link - for sn in snapshots) + cb_flow = ( + -sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb0 + for sn in snapshots + ) + + sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb1 + for sn in snapshots + ) + - sum( + m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + for link in cb0_link + for sn in snapshots + ) + + sum( + m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + for link in cb1_link + for sn in snapshots + ) + ) return cb_flow >= export[0] def _rule_max(m): - cb_flow = - sum(m.passive_branch_p['Line', line, sn] * \ - network.snapshot_weightings[sn] - for line in cb0 - for sn in snapshots)\ - + sum(m.passive_branch_p['Line', line, sn] * \ - network.snapshot_weightings[sn] - for line in cb1 - for sn in snapshots)\ - - sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb0_link - for sn in snapshots)\ - + sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb1_link - for sn in snapshots) + cb_flow = ( + -sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb0 + for sn in snapshots + ) + + sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb1 + for sn in snapshots + ) + - sum( + m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + for link in cb0_link + for sn in snapshots + ) + + sum( + m.link_p[link, sn] * network.snapshot_weightings.objective[sn] + for link in cb1_link + for sn in snapshots + ) + ) return cb_flow <= export[1] network.model.cross_border_flows_min = Constraint(rule=_rule_min) network.model.cross_border_flows_max = Constraint(rule=_rule_max) + def _cross_border_flow_nmp(self, network, snapshots): """ Extra_functionality that limits overall crossborder flows from/to Germany. - Add key 'cross_border_flow' and array with minimal and maximal percent of - im- and exports as a fraction of loads in Germany. - Example: {'cross_border_flow': [-0.1, 0.1]} + Add key 'cross_border_flow' and array with minimal and maximal + import/export + Example: {'cross_border_flow': [-x, y]} (with x Import, y Export) + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = \ - _get_crossborder_components(network) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network) export = pd.Series( - data=self.args['extra_functionality']['cross_border_flow'])*\ - network.loads_t.p_set.mul(network.snapshot_weightings, axis=0)\ - [network.loads.index[network.loads.bus.isin(buses_de)]].sum().sum() - - cb0_flow = get_var(network, 'Line', 's').loc[snapshots, cb0].mul( - network.snapshot_weightings, axis=0) - - cb1_flow = get_var(network, 'Line', 's').loc[snapshots, cb1].mul( - network.snapshot_weightings, axis=0) - - cb0_link_flow = get_var(network, 'Link', 'p').loc[snapshots, cb0_link].mul( - network.snapshot_weightings, axis=0) - - cb1_link_flow = get_var(network, 'Link', 'p').loc[snapshots, cb1_link].mul( - network.snapshot_weightings, axis=0) + data=self.args["extra_functionality"]["cross_border_flow"] + ) + + cb0_flow = ( + get_var(network, "Line", "s") + .loc[snapshots, cb0] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb1_flow = ( + get_var(network, "Line", "s") + .loc[snapshots, cb1] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb0_link_flow = ( + get_var(network, "Link", "p") + .loc[snapshots, cb0_link] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb1_link_flow = ( + get_var(network, "Link", "p") + .loc[snapshots, cb1_link] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + expr = ( + linexpr((-1, cb0_flow)).sum().sum() + + linexpr((1, cb1_flow)).sum().sum() + + linexpr((-1, cb0_link_flow)).sum().sum() + + linexpr((1, cb1_link_flow)).sum().sum() + ) + + define_constraints(network, expr, ">=", export[0], "Line", "min_cb_flow") + define_constraints(network, expr, "<=", export[1], "Line", "max_cb_flow") - expr = linexpr((-1, cb0_flow)).sum().sum() + \ - linexpr((1, cb1_flow)).sum().sum() +\ - linexpr((-1, cb0_link_flow)).sum().sum() +\ - linexpr((1, cb1_link_flow)).sum().sum() - - define_constraints(network, expr, '>=', export[0], 'Line', 'min_cb_flow') - define_constraints(network, expr, '<=', export[1], 'Line', 'max_cb_flow') def _cross_border_flow_per_country_nmp(self, network, snapshots): """ - Extra_functionality that limits crossborder flows for each given + Extra_functionality that limits AC crossborder flows for each given foreign country from/to Germany. Add key 'cross_border_flow_per_country' to args.extra_functionality and - define dictionary of country keys and desired limitations of im/exports as - a fraction of load in Germany. - Example: {'cross_border_flow_per_country': {'DK':[-0.05, 0.1], 'FR':[0,0]}} + define dictionary of country keys and desired limitations of im/exports + in MWh + Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de = network.buses.index[network.buses.country_code == 'DE'] + buses_de = network.buses.index[network.buses.country == "DE"] - countries = network.buses.country_code.unique() + countries = network.buses.country.unique() export_per_country = pd.DataFrame( - data=self.args['extra_functionality']['cross_border_flow_per_country'] - ).transpose()*network.loads_t.p_set.mul( - network.snapshot_weightings, axis=0)[network.loads.index[ - network.loads.bus.isin(buses_de)]].sum().sum() + data=self.args["extra_functionality"]["cross_border_flow_per_country"] + ).transpose() for cntr in export_per_country.index: if cntr in countries: - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = \ - _get_crossborder_components(network, cntr) - - cb0_flow = get_var(network, 'Line', 's').loc[snapshots, cb0].mul( - network.snapshot_weightings, axis=0) - - cb1_flow = get_var(network, 'Line', 's').loc[snapshots, cb1].mul( - network.snapshot_weightings, axis=0) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network, cntr) + + cb0_flow = ( + get_var(network, "Line", "s") + .loc[snapshots, cb0] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb1_flow = ( + get_var(network, "Line", "s") + .loc[snapshots, cb1] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb0_link_flow = ( + get_var(network, "Link", "p") + .loc[snapshots, cb0_link] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + cb1_link_flow = ( + get_var(network, "Link", "p") + .loc[snapshots, cb1_link] + .mul(network.snapshot_weightings.objective, axis=0) + ) + + expr = ( + linexpr((-1, cb0_flow)).sum().sum() + + linexpr((1, cb1_flow)).sum().sum() + + linexpr((-1, cb0_link_flow)).sum().sum() + + linexpr((1, cb1_link_flow)).sum().sum() + ) + + define_constraints( + network, + expr, + ">=", + export_per_country[cntr][0], + "Line", + "min_cb_flow_" + cntr, + ) + define_constraints( + network, + expr, + "<=", + export_per_country[cntr][1], + "Line", + "max_cb_flow_" + cntr, + ) - cb0_link_flow = get_var(network, 'Link', 'p').loc[ - snapshots, cb0_link].mul(network.snapshot_weightings, axis=0) - - cb1_link_flow = get_var(network, 'Link', 'p').loc[ - snapshots, cb1_link].mul(network.snapshot_weightings, axis=0) - - expr = linexpr((-1, cb0_flow)).sum().sum() + \ - linexpr((1, cb1_flow)).sum().sum() +\ - linexpr((-1, cb0_link_flow)).sum().sum() +\ - linexpr((1, cb1_link_flow)).sum().sum() - - define_constraints(network, expr, - '>=', export_per_country[cntr][0], - 'Line', 'min_cb_flow_' + cntr) - define_constraints(network, expr, - '<=', export_per_country[cntr][1], - 'Line', 'max_cb_flow_' + cntr) def _cross_border_flow_per_country(self, network, snapshots): """ - Extra_functionality that limits crossborder flows for each given + Extra_functionality that limits AC crossborder flows for each given foreign country from/to Germany. Add key 'cross_border_flow_per_country' to args.extra_functionality and - define dictionary of country keys and desired limitations of im/exports as - a fraction of load in Germany. - Example: {'cross_border_flow_per_country': {'DK':[-0.05, 0.1], 'FR':[0,0]}} + define dictionary of country keys and desired limitations of im/exports + in MWh + Example: {'cross_border_flow_per_country': {'DK':[-X, Y], 'FR':[0,0]}} + + Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA snapshots : pandas.DatetimeIndex List of timesteps considered in the optimization + Returns ------- None. """ - buses_de = network.buses.index[network.buses.country_code == 'DE'] + buses_de = network.buses.index[network.buses.country == "DE"] - countries = network.buses.country_code.unique() + countries = network.buses.country.unique() export_per_country = pd.DataFrame( - data=self.args['extra_functionality']['cross_border_flow_per_country'] - ).transpose()*network.loads_t.p_set.mul( - network.snapshot_weightings, axis=0)[network.loads.index[ - network.loads.bus.isin(buses_de)]].sum().sum() - + data=self.args["extra_functionality"]["cross_border_flow_per_country"] + ).transpose() for cntr in export_per_country.index: if cntr in countries: - buses_de, buses_for, cb0, cb1, cb0_link, cb1_link = \ - _get_crossborder_components(network, cntr) + ( + buses_de, + buses_for, + cb0, + cb1, + cb0_link, + cb1_link, + ) = _get_crossborder_components(network, cntr) def _rule_min(m): - cb_flow = - sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb0 - for sn in snapshots)\ - + sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb1 - for sn in snapshots)\ - - sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb0_link - for sn in snapshots)\ - + sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb1_link - for sn in snapshots) - + cb_flow = ( + -sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb0 + for sn in snapshots + ) + + sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb1 + for sn in snapshots + ) + - sum( + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] + for link in cb0_link + for sn in snapshots + ) + + sum( + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] + for link in cb1_link + for sn in snapshots + ) + ) return cb_flow >= export_per_country[0][cntr] - setattr(network.model, "min_cross_border" + cntr, - Constraint(cntr, rule=_rule_min)) + setattr( + network.model, + "min_cross_border-" + cntr, + Constraint(rule=_rule_min), + ) def _rule_max(m): - cb_flow = - sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb0 - for sn in snapshots)\ - + sum(m.passive_branch_p['Line', line, sn] *\ - network.snapshot_weightings[sn] - for line in cb1 - for sn in snapshots)\ - - sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb0_link - for sn in snapshots)\ - + sum(m.link_p[link, sn] * \ - network.snapshot_weightings[sn] - for link in cb1_link - for sn in snapshots) + cb_flow = ( + -sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb0 + for sn in snapshots + ) + + sum( + m.passive_branch_p["Line", line, sn] + * network.snapshot_weightings.objective[sn] + for line in cb1 + for sn in snapshots + ) + - sum( + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] + for link in cb0_link + for sn in snapshots + ) + + sum( + m.link_p[link, sn] + * network.snapshot_weightings.objective[sn] + for link in cb1_link + for sn in snapshots + ) + ) return cb_flow <= export_per_country[1][cntr] - setattr(network.model, "max_cross_border" + cntr, - Constraint(cntr, rule=_rule_max)) + setattr( + network.model, + "max_cross_border-" + cntr, + Constraint(rule=_rule_max), + ) + -def _generation_potential(network, carrier, cntr='all'): +def _generation_potential(network, carrier, cntr="all"): """ Function that calculates the generation potential for chosen carriers and countries. @@ -505,21 +693,33 @@ def _generation_potential(network, carrier, cntr='all'): """ - if cntr == 'all': + if cntr == "all": gens = network.generators.index[network.generators.carrier == carrier] else: gens = network.generators.index[ - (network.generators.carrier == carrier) & - (network.generators.bus.astype(str).isin( - network.buses.index[network.buses.country_code == cntr]))] - if carrier in ['wind_onshore', 'wind_offshore', 'solar']: - potential = (network.generators.p_nom[gens]*\ - network.generators_t.p_max_pu[gens].mul( - network.snapshot_weightings, axis=0) - ).sum().sum() + (network.generators.carrier == carrier) + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country == cntr] + ) + ) + ] + if carrier in ["wind_onshore", "wind_offshore", "solar"]: + potential = ( + ( + network.generators.p_nom[gens] + * network.generators_t.p_max_pu[gens].mul( + network.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .sum() + ) else: - potential = network.snapshot_weightings.sum() \ - * network.generators.p_nom[gens].sum() + potential = ( + network.snapshot_weightings.generators.sum() + * network.generators.p_nom[gens].sum() + ) return gens, potential @@ -531,7 +731,6 @@ def _capacity_factor(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -544,35 +743,38 @@ def _capacity_factor(self, network, snapshots): None. """ - arg = self.args['extra_functionality']['capacity_factor'] + arg = self.args["extra_functionality"]["capacity_factor"] carrier = arg.keys() for c in carrier: factor = arg[c] - gens, potential = _generation_potential(network, c, cntr='all') + gens, potential = _generation_potential(network, c, cntr="all") def _rule_max(m): - - dispatch = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in gens - for sn in snapshots) + dispatch = sum( + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] + for gen in gens + for sn in snapshots + ) return dispatch <= factor[1] * potential setattr(network.model, "max_flh_" + c, Constraint(rule=_rule_max)) def _rule_min(m): - - dispatch = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in gens - for sn in snapshots) + dispatch = sum( + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] + for gen in gens + for sn in snapshots + ) return dispatch >= factor[0] * potential setattr(network.model, "min_flh_" + c, Constraint(rule=_rule_min)) + def _capacity_factor_nmp(self, network, snapshots): """ Extra-functionality that limits overall dispatch of generators with chosen @@ -581,7 +783,6 @@ def _capacity_factor_nmp(self, network, snapshots): a dictonary as a fraction of generation potential. Example: 'capacity_factor': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -594,21 +795,35 @@ def _capacity_factor_nmp(self, network, snapshots): None. """ - arg = self.args['extra_functionality']['capacity_factor'] + arg = self.args["extra_functionality"]["capacity_factor"] carrier = arg.keys() for c in carrier: - gens, potential = _generation_potential(network, c, cntr='all') + gens, potential = _generation_potential(network, c, cntr="all") + + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, gens] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum().sum(), + ">=", + arg[c][0] * potential, + "Generator", + "min_flh_" + c, + ) + define_constraints( + network, + linexpr((1, generation)).sum().sum(), + "<=", + arg[c][1] * potential, + "Generator", + "max_flh_" + c, + ) - generation = get_var(network, 'Generator', 'p').loc[snapshots, gens].\ - mul(network.snapshot_weightings, axis=0) - - define_constraints(network, linexpr((1, generation)).sum().sum(), - '>=', arg[c][0] * potential, 'Generator', - 'min_flh_' + c) - define_constraints(network, linexpr((1, generation)).sum().sum(), - '<=', arg[c][1] * potential, 'Generator', - 'max_flh_' + c) def _capacity_factor_per_cntr(self, network, snapshots): """ @@ -633,36 +848,56 @@ def _capacity_factor_per_cntr(self, network, snapshots): ------- None. """ - arg = self.args['extra_functionality']['capacity_factor_per_cntr'] + arg = self.args["extra_functionality"]["capacity_factor_per_cntr"] for cntr in arg.keys(): carrier = arg[cntr].keys() for c in carrier: factor = arg[cntr][c] gens, potential = _generation_potential(network, c, cntr) - def _rule_max(m): + if len(gens) > 0: - dispatch = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in gens - for sn in snapshots) + def _rule_max(m): + dispatch = sum( + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] + for gen in gens + for sn in snapshots + ) - return dispatch <= factor[1] * potential + return dispatch <= factor[1] * potential - setattr(network.model, "max_flh_" + cntr + '_'+ c, - Constraint(rule=_rule_max)) + setattr( + network.model, + "max_flh_" + cntr + "_" + c, + Constraint(rule=_rule_max), + ) - def _rule_min(m): + def _rule_min(m): + dispatch = sum( + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] + for gen in gens + for sn in snapshots + ) - dispatch = sum(m.generator_p[gen, sn] * \ - network.snapshot_weightings[sn] - for gen in gens - for sn in snapshots) + return dispatch >= factor[0] * potential - return dispatch >= factor[0] * potential + setattr( + network.model, + "min_flh_" + cntr + "_" + c, + Constraint(rule=_rule_min), + ) + + else: + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) - setattr(network.model, "min_flh_" + cntr + '_'+ c, - Constraint(rule=_rule_min)) def _capacity_factor_per_cntr_nmp(self, network, snapshots): """ @@ -687,21 +922,45 @@ def _capacity_factor_per_cntr_nmp(self, network, snapshots): ------- None. """ - arg = self.args['extra_functionality']['capacity_factor_per_cntr'] + arg = self.args["extra_functionality"]["capacity_factor_per_cntr"] for cntr in arg.keys(): carrier = arg[cntr].keys() for c in carrier: gens, potential = _generation_potential(network, c, cntr) - generation = get_var(network, 'Generator', 'p').loc[ - snapshots, gens].mul(network.snapshot_weightings, axis=0) + if len(gens) > 0: + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, gens] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum().sum(), + ">=", + arg[cntr][c][0] * potential, + "Generator", + "min_flh_" + c + "_" + cntr, + ) + define_constraints( + network, + linexpr((1, generation)).sum().sum(), + "<=", + arg[cntr][c][1] * potential, + "Generator", + "max_flh_" + c + "_" + cntr, + ) + + else: + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) - define_constraints(network, linexpr((1, generation)).sum().sum(), - '>=', arg[cntr][c][0] * potential, 'Generator', - 'min_flh_' + c + '_' + cntr) - define_constraints(network, linexpr((1, generation)).sum().sum(), - '<=', arg[cntr][c][1] * potential, 'Generator', - 'max_flh_' + c + '_' + cntr) def _capacity_factor_per_gen(self, network, snapshots): """ @@ -712,7 +971,6 @@ def _capacity_factor_per_gen(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -725,43 +983,52 @@ def _capacity_factor_per_gen(self, network, snapshots): None. """ - arg = self.args['extra_functionality']['capacity_factor_per_gen'] + arg = self.args["extra_functionality"]["capacity_factor_per_gen"] carrier = arg.keys() snapshots = network.snapshots for c in carrier: factor = arg[c] gens = network.generators.index[network.generators.carrier == c] for g in gens: - if c in ['wind_onshore', 'wind_offshore', 'solar']: - potential = (network.generators.p_nom[g]* - network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings, axis=0) - ).sum().sum() + if c in ["wind_onshore", "wind_offshore", "solar"]: + potential = ( + ( + network.generators.p_nom[g] + * network.generators_t.p_max_pu[g].mul( + network.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .sum() + ) else: - potential = network.snapshot_weightings.sum() \ - * network.generators.p_nom[g].sum() + potential = ( + network.snapshot_weightings.generators.sum() + * network.generators.p_nom[g].sum() + ) def _rule_max(m): - - dispatch = sum(m.generator_p[g, sn] * \ - network.snapshot_weightings[sn] - for sn in snapshots) + dispatch = sum( + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] + for sn in snapshots + ) return dispatch <= factor[1] * potential - setattr(network.model, "max_flh_" + g, - Constraint(gens, rule=_rule_max)) + setattr(network.model, "max_flh_" + g, Constraint(rule=_rule_max)) def _rule_min(m): - - dispatch = sum(m.generator_p[g, sn] * \ - network.snapshot_weightings[sn] - for sn in snapshots) + dispatch = sum( + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] + for sn in snapshots + ) return dispatch >= factor[0] * potential - setattr(network.model, "min_flh_" + g, - Constraint(gens, rule=_rule_min)) + setattr(network.model, "min_flh_" + g, Constraint(rule=_rule_min)) + def _capacity_factor_per_gen_nmp(self, network, snapshots): """ @@ -772,7 +1039,6 @@ def _capacity_factor_per_gen_nmp(self, network, snapshots): Example: 'capacity_factor_per_gen': {'run_of_river': [0, 0.5], 'solar': [0.1, 1]} - Parameters ---------- network : :class:`pypsa.Network @@ -785,30 +1051,52 @@ def _capacity_factor_per_gen_nmp(self, network, snapshots): None. """ - arg = self.args['extra_functionality']['capacity_factor_per_gen'] + arg = self.args["extra_functionality"]["capacity_factor_per_gen"] carrier = arg.keys() snapshots = network.snapshots for c in carrier: gens = network.generators.index[network.generators.carrier == c] for g in gens: - if c in ['wind_onshore', 'wind_offshore', 'solar']: - potential = (network.generators.p_nom[g]* - network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings, axis=0) - ).sum().sum() + if c in ["wind_onshore", "wind_offshore", "solar"]: + potential = ( + ( + network.generators.p_nom[g] + * network.generators_t.p_max_pu[g].mul( + network.snapshot_weightings.generators, axis=0 + ) + ) + .sum() + .sum() + ) else: - potential = network.snapshot_weightings.sum() \ - * network.generators.p_nom[g].sum() - - generation = get_var(network, 'Generator', 'p').loc[ - snapshots, g].mul(network.snapshot_weightings, axis=0) + potential = ( + network.snapshot_weightings.generators.sum() + * network.generators.p_nom[g].sum() + ) + + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, g] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum(), + ">=", + arg[c][0] * potential, + "Generator", + "min_flh_" + g, + ) + define_constraints( + network, + linexpr((1, generation)).sum(), + "<=", + arg[c][1] * potential, + "Generator", + "max_flh_" + g, + ) - define_constraints(network, linexpr((1, generation)).sum(), - '>=', arg[c][0]*potential, 'Generator', - 'min_flh_' + g) - define_constraints(network, linexpr((1, generation)).sum(), - '<=', arg[c][1]*potential, 'Generator', - 'max_flh_' + g) def _capacity_factor_per_gen_cntr(self, network, snapshots): """ @@ -833,49 +1121,78 @@ def _capacity_factor_per_gen_cntr(self, network, snapshots): ------- None. """ - arg = self.args['extra_functionality']\ - ['capacity_factor_per_gen_cntr'] + arg = self.args["extra_functionality"]["capacity_factor_per_gen_cntr"] for cntr in arg.keys(): - carrier = arg[cntr].keys() snapshots = network.snapshots for c in carrier: factor = arg[cntr][c] gens = network.generators.index[ - (network.generators.carrier == c) & - (network.generators.bus.astype(str).isin( - network.buses.index[network.buses.country_code == cntr]))] - for g in gens: - if c in ['wind_onshore', 'wind_offshore', 'solar']: - potential = (network.generators.p_nom[g]* - network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings, axis=0) - ).sum().sum() - else: - potential = network.snapshot_weightings.sum() \ - * network.generators.p_nom[g].sum() + (network.generators.carrier == c) + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country == cntr] + ) + ) + ] + + if len(gens) > 0: + for g in gens: + if c in ["wind_onshore", "wind_offshore", "solar"]: + potential = ( + ( + network.generators.p_nom[g] + * network.generators_t.p_max_pu[g].mul( + network.snapshot_weightings.generators, + axis=0, + ) + ) + .sum() + .sum() + ) + else: + potential = ( + network.snapshot_weightings.generators.sum() + * network.generators.p_nom[g].sum() + ) + + def _rule_max(m): + dispatch = sum( + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] + for sn in snapshots + ) + return dispatch <= factor[1] * potential + + setattr( + network.model, + "max_flh_" + cntr + "_" + g, + Constraint(rule=_rule_max), + ) + + def _rule_min(m): + dispatch = sum( + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] + for sn in snapshots + ) + return dispatch >= factor[0] * potential + + setattr( + network.model, + "min_flh_" + cntr + "_" + g, + Constraint(rule=_rule_min), + ) - def _rule_max(m): - - dispatch = sum(m.generator_p[g, sn] * \ - network.snapshot_weightings[sn] - for sn in snapshots) - - return dispatch <= factor[1] * potential - - setattr(network.model, "max_flh_" + cntr + '_'+ g, - Constraint(gens, rule=_rule_max)) - - def _rule_min(m): - - dispatch = sum(m.generator_p[g, sn] * \ - network.snapshot_weightings[sn] - for sn in snapshots) - - return dispatch >= factor[0] * potential + else: + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) - setattr(network.model, "min_flh_" + cntr + '_'+ g, - Constraint(rule=_rule_min)) def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): """ @@ -900,298 +1217,1830 @@ def _capacity_factor_per_gen_cntr_nmp(self, network, snapshots): ------- None. """ - arg = self.args['extra_functionality']['capacity_factor_per_gen_cntr'] + arg = self.args["extra_functionality"]["capacity_factor_per_gen_cntr"] for cntr in arg.keys(): - carrier = arg[cntr].keys() for c in carrier: gens = network.generators.index[ - (network.generators.carrier == c) & - (network.generators.bus.astype(str).isin( - network.buses.index[network.buses.country_code == cntr]))] - for g in gens: - if c in ['wind_onshore', 'wind_offshore', 'solar']: - potential = (network.generators.p_nom[g]* - network.generators_t.p_max_pu[g].mul( - network.snapshot_weightings, axis=0) - ).sum().sum() + (network.generators.carrier == c) + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country == cntr] + ) + ) + ] + + if len(gens) > 0: + for g in gens: + if c in ["wind_onshore", "wind_offshore", "solar"]: + potential = ( + ( + network.generators.p_nom[g] + * network.generators_t.p_max_pu[g].mul( + network.snapshot_weightings.generators, + axis=0, + ) + ) + .sum() + .sum() + ) + else: + potential = ( + network.snapshot_weightings.generators.sum() + * network.generators.p_nom[g].sum() + ) + + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, g] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum(), + ">=", + arg[cntr][c][0] * potential, + "Generator", + "min_flh_" + g, + ) + define_constraints( + network, + linexpr((1, generation)).sum(), + "<=", + arg[cntr][c][1] * potential, + "Generator", + "max_flh_" + g, + ) + + else: + print( + "Carrier " + + c + + " is not available in " + + cntr + + ". Skipping this constraint." + ) + + +def read_max_gas_generation(self): + """Return the values limiting the gas production in Germany + + Read max_gas_generation_overtheyear from + scenario.egon_scenario_parameters if the table is available in the + database and return the dictionnary containing the values needed + for the constraints to limit the gas production in Germany, + depending of the scenario. + + Returns + ------- + arg: dict + + """ + scn_name = self.args["scn_name"] + arg_def = { + "eGon2035": { + "CH4": 36000000, + "biogas": 10000000, + }, # [MWh] Netzentwicklungsplan Gas 2020–2030 + "eGon2035_lowflex": { + "CH4": 36000000, + "biogas": 10000000, + }, # [MWh] Netzentwicklungsplan Gas 2020–2030 + "eGon100RE": { + "biogas": 14450103 + }, # [MWh] Value from reference p-e-s run used in eGon-data + } + engine = db.connection(section=self.args["db"]) + try: + sql = f""" + SELECT gas_parameters + FROM scenario.egon_scenario_parameters + WHERE name = '{scn_name}';""" + df = pd.read_sql(sql, engine) + arg = df["max_gas_generation_overtheyear"] + except: + arg = arg_def[scn_name] + + return arg + + +def add_ch4_constraints(self, network, snapshots): + """ + Add CH4 constraints for optimization with pyomo + + Functionality that limits the dispatch of CH4 generators. In + Germany, there is one limitation specific for biogas and one + limitation specific for natural gas (natural gas only in eGon2035). + Abroad, each generator has its own limitation contains in the + column e_nom_max. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps considered in the optimization + + Returns + ------- + None. + """ + scn_name = self.args["scn_name"] + n_snapshots = self.args["end_snapshot"] - self.args["start_snapshot"] + 1 + + # Add constraint for Germany + arg = read_max_gas_generation(self) + gas_carrier = arg.keys() + + carrier_names = { + "eGon2035": {"CH4": "CH4_NG", "biogas": "CH4_biogas"}, + "eGon2035_lowflex": {"CH4": "CH4_NG", "biogas": "CH4_biogas"}, + "eGon100RE": {"biogas": "CH4"}, + } + + for c in gas_carrier: + gens = network.generators.index[ + (network.generators.carrier == carrier_names[scn_name][c]) + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country == "DE"] + ) + ) + ] + if not gens.empty: + factor = arg[c] + + def _rule_max(m): + dispatch = sum( + m.generator_p[gen, sn] + * network.snapshot_weightings.generators[sn] + for gen in gens + for sn in snapshots + ) + + return dispatch <= factor * (n_snapshots / 8760) + + setattr( + network.model, "max_flh_DE_" + c, Constraint(rule=_rule_max) + ) + + # Add contraints for neigbouring countries + gen_abroad = network.generators[ + (network.generators.carrier == "CH4") + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country != "DE"] + ) + ) + & (network.generators.e_nom_max != np.inf) + ] + for g in gen_abroad.index: + factor = network.generators.e_nom_max[g] + + def _rule_max(m): + dispatch = sum( + m.generator_p[g, sn] + * network.snapshot_weightings.generators[sn] + for sn in snapshots + ) + + return dispatch <= factor * (n_snapshots / 8760) + + setattr( + network.model, + "max_flh_abroad_" + str(g).replace(" ", "_"), + Constraint(rule=_rule_max), + ) + + +def add_ch4_constraints_nmp(self, network, snapshots): + """ + Add CH4 constraints for optimization without pyomo + + Functionality that limits the dispatch of CH4 generators. In + Germany, there is one limitation specific for biogas and one + limitation specific for natural gas (natural gas only in eGon2035). + Abroad, each generator has its own limitation contains in the + column e_nom_max. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps considered in the optimization + + Returns + ------- + None. + """ + + scn_name = self.args["scn_name"] + n_snapshots = self.args["end_snapshot"] - self.args["start_snapshot"] + 1 + + # Add constraint for Germany + arg = read_max_gas_generation(self) + gas_carrier = arg.keys() + + carrier_names = { + "eGon2035": {"CH4": "CH4_NG", "biogas": "CH4_biogas"}, + "eGon100RE": {"biogas": "CH4"}, + } + + for c in gas_carrier: + gens = network.generators.index[ + (network.generators.carrier == carrier_names[scn_name][c]) + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country == "DE"] + ) + ) + ] + if not gens.empty: + factor = arg[c] + + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, gens] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum().sum(), + "<=", + factor * (n_snapshots / 8760), + "Generator", + "max_flh_DE_" + c, + ) + + # Add contraints for neigbouring countries + gen_abroad = network.generators[ + (network.generators.carrier == "CH4") + & ( + network.generators.bus.astype(str).isin( + network.buses.index[network.buses.country != "DE"] + ) + ) + & (network.generators.e_nom_max != np.inf) + ] + for g in gen_abroad.index: + factor = network.generators.e_nom_max[g] + + generation = ( + get_var(network, "Generator", "p") + .loc[snapshots, g] + .mul(network.snapshot_weightings.generators, axis=0) + ) + + define_constraints( + network, + linexpr((1, generation)).sum(), + "<=", + factor * (n_snapshots / 8760), + "Generator", + "max_flh_DE_" + str(g).replace(" ", "_"), + ) + + +def snapshot_clustering_daily_bounds(self, network, snapshots): + """ + Bound the storage level to 0.5 max_level every 24th hour. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps that will be constrained + + Returns + ------- + None + + """ + sus = network.storage_units + # take every first hour of the clustered days + network.model.period_starts = network.snapshot_weightings.index[0::24] + + network.model.storages = sus.index + + print("Setting daily_bounds constraint") + + def day_rule(m, s, p): + """ + Sets the soc of the every first hour to the + soc of the last hour of the day (i.e. + 23 hours) + """ + return ( + m.state_of_charge[s, p] + == m.state_of_charge[s, p + pd.Timedelta(hours=23)] + ) + + network.model.period_bound = Constraint( + network.model.storages, network.model.period_starts, rule=day_rule + ) + + +def snapshot_clustering_daily_bounds_nmp(self, network, snapshots): + """ + Bound the storage level to 0.5 max_level every 24th hour. + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps that will be constrained + + Returns + ------- + None + + """ + + c = "StorageUnit" + + period_starts = snapshots[0::24] + period_ends = period_starts + pd.Timedelta(hours=23) + + eh = expand_series( + network.snapshot_weightings.objective[period_ends], + network.storage_units.index, + ) # elapsed hours + + eff_stand = expand_series(1 - network.df(c).standing_loss, period_ends).T + eff_dispatch = expand_series( + network.df(c).efficiency_dispatch, period_ends + ).T + eff_store = expand_series(network.df(c).efficiency_store, period_ends).T + + soc = get_var(network, c, "state_of_charge").loc[period_ends, :] + + soc_peroid_start = get_var(network, c, "state_of_charge").loc[ + period_starts + ] + + coeff_var = [ + (-1, soc), + ( + -1 / eff_dispatch * eh, + get_var(network, c, "p_dispatch").loc[period_ends, :], + ), + (eff_store * eh, get_var(network, c, "p_store").loc[period_ends, :]), + ] + + lhs, *axes = linexpr(*coeff_var, return_axes=True) + + def masked_term(coeff, var, cols): + return ( + linexpr((coeff[cols], var[cols])) + .reindex(index=axes[0], columns=axes[1], fill_value="") + .values + ) + + lhs += masked_term( + eff_stand, soc_peroid_start, network.storage_units.index + ) + + rhs = -get_as_dense(network, c, "inflow", period_ends).mul(eh) + + define_constraints(network, lhs, "==", rhs, "daily_bounds") + + +def snapshot_clustering_seasonal_storage( + self, network, snapshots, simplified=False +): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + simplified : bool, optional + A flag indicating whether to use a simplified version of the model that + does not include intra-temporal constraints and variables. + + Returns + ------- + None + """ + + sus = network.storage_units + sto = network.stores + + if self.args["snapshot_clustering"]["how"] == "weekly": + network.model.period_starts = network.snapshot_weightings.index[0::168] + elif self.args["snapshot_clustering"]["how"] == "monthly": + network.model.period_starts = network.snapshot_weightings.index[0::720] + else: + network.model.period_starts = network.snapshot_weightings.index[0::24] + + network.model.storages = sus.index + network.model.stores = sto.index + + candidates = network.cluster.index.get_level_values(0).unique() + + # create set for inter-temp constraints and variables + network.model.candidates = po.Set(initialize=candidates, ordered=True) + + if not simplified: + # create intra soc variable for each storage/store and each hour + network.model.state_of_charge_intra = po.Var( + sus.index, network.snapshots + ) + network.model.state_of_charge_intra_store = po.Var( + sto.index, network.snapshots + ) + + else: + network.model.state_of_charge_intra_max = po.Var( + sus.index, network.model.candidates + ) + network.model.state_of_charge_intra_min = po.Var( + sus.index, network.model.candidates + ) + network.model.state_of_charge_intra_store_max = po.Var( + sto.index, network.model.candidates + ) + network.model.state_of_charge_intra_store_min = po.Var( + sto.index, network.model.candidates + ) + + # create intra soc variable for each storage and each hour + network.model.state_of_charge_intra = po.Var( + sus.index, network.snapshots + ) + network.model.state_of_charge_intra_store = po.Var( + sto.index, network.snapshots + ) + + def intra_max(model, st, h): + cand = network.cluster_ts["Candidate_day"][h] + return ( + model.state_of_charge_intra_max[st, cand] + >= model.state_of_charge_intra[st, h] + ) + + network.model.soc_intra_max = Constraint( + network.model.storages, network.snapshots, rule=intra_max + ) + + def intra_min(model, st, h): + cand = network.cluster_ts["Candidate_day"][h] + return ( + model.state_of_charge_intra_min[st, cand] + <= model.state_of_charge_intra[st, h] + ) + + network.model.soc_intra_min = Constraint( + network.model.storages, network.snapshots, rule=intra_min + ) + + def intra_max_store(model, st, h): + cand = network.cluster_ts["Candidate_day"][h] + return ( + model.state_of_charge_intra_store_max[st, cand] + >= model.state_of_charge_intra_store[st, h] + ) + + network.model.soc_intra_store_max = Constraint( + network.model.stores, network.snapshots, rule=intra_max_store + ) + + def intra_min_store(model, st, h): + cand = network.cluster_ts["Candidate_day"][h] + return ( + model.state_of_charge_intra_store_min[st, cand] + <= model.state_of_charge_intra_store[st, h] + ) + + network.model.soc_intra_store_min = Constraint( + network.model.stores, network.snapshots, rule=intra_min_store + ) + + def intra_soc_rule(m, s, h): + """ + Sets soc_inter of first hour of every day to 0. Other hours + are set by technical coherences of storage units + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018, equation no. 18 + """ + + if ( + self.args["snapshot_clustering"]["how"] == "weekly" + and h in network.snapshot_weightings[0::168].index + ): + expr = m.state_of_charge_intra[s, h] == 0 + elif ( + self.args["snapshot_clustering"]["how"] == "monthly" + and h in network.snapshot_weightings[0::720].index + ): + expr = m.state_of_charge_intra[s, h] == 0 + elif ( + self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0 + ): + expr = m.state_of_charge_intra[s, h] == 0 + else: + expr = m.state_of_charge_intra[s, h] == m.state_of_charge_intra[ + s, h - pd.DateOffset(hours=1) + ] * (1 - network.storage_units.at[s, "standing_loss"]) - ( + m.storage_p_dispatch[s, h - pd.DateOffset(hours=1)] + / network.storage_units.at[s, "efficiency_dispatch"] + - network.storage_units.at[s, "efficiency_store"] + * m.storage_p_store[s, h - pd.DateOffset(hours=1)] + ) + return expr + + def intra_soc_rule_store(m, s, h): + if ( + self.args["snapshot_clustering"]["how"] == "weekly" + and h in network.snapshot_weightings[0::168].index + ): + expr = m.state_of_charge_intra_store[s, h] == 0 + elif ( + self.args["snapshot_clustering"]["how"] == "monthly" + and h in network.snapshot_weightings[0::720].index + ): + expr = m.state_of_charge_intra_store[s, h] == 0 + elif ( + self.args["snapshot_clustering"]["how"] == "daily" and h.hour == 0 + ): + expr = m.state_of_charge_intra_store[s, h] == 0 + else: + expr = ( + m.state_of_charge_intra_store[s, h] + == m.state_of_charge_intra_store[s, h - pd.DateOffset(hours=1)] + * (1 - network.stores.at[s, "standing_loss"]) + + m.store_p[s, h - pd.DateOffset(hours=1)] + ) + return expr + + network.model.soc_intra = po.Constraint( + network.model.storages, network.snapshots, rule=intra_soc_rule + ) + network.model.soc_intra_store = po.Constraint( + network.model.stores, network.snapshots, rule=intra_soc_rule_store + ) + + # create inter soc variable for each storage/store and each candidate + network.model.state_of_charge_inter = po.Var( + sus.index, network.model.candidates, within=po.NonNegativeReals + ) + network.model.state_of_charge_inter_store = po.Var( + sto.index, network.model.candidates, within=po.NonNegativeReals + ) + + def inter_storage_soc_rule(m, s, i): + """ + Define the state_of_charge_inter as the state_of_charge_inter of + the day before minus the storage losses plus the state_of_charge_intra + of one hour after the last hour of the representative day. + For the last reperesentive day, the soc_inter is the same as + the first day due to cyclic soc condition + + According to: + L. Kotzur et al: 'Time series aggregation for energy system design: + Modeling seasonal storage', 2018, equation no. 19 + """ + + if i == network.model.candidates[-1]: + last_hour = network.cluster["last_hour_RepresentativeDay"][i] + expr = po.Constraint.Skip + else: + last_hour = network.cluster["last_hour_RepresentativeDay"][i] + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 + expr = m.state_of_charge_inter[ + s, i + 1 + ] == m.state_of_charge_inter[s, i] * ( + 1 - network.storage_units.at[s, "standing_loss"] + ) ** hrs + m.state_of_charge_intra[ + s, last_hour + ] * ( + 1 - network.storage_units.at[s, "standing_loss"] + ) - ( + m.storage_p_dispatch[s, last_hour] + / network.storage_units.at[s, "efficiency_dispatch"] + - network.storage_units.at[s, "efficiency_store"] + * m.storage_p_store[s, last_hour] + ) + return expr + + def inter_store_soc_rule(m, s, i): + if i == network.model.candidates[-1]: + last_hour = network.cluster["last_hour_RepresentativeDay"][i] + expr = po.Constraint.Skip + else: + last_hour = network.cluster["last_hour_RepresentativeDay"][i] + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 + expr = ( + m.state_of_charge_inter_store[s, i + 1] + == m.state_of_charge_inter_store[s, i] + * (1 - network.stores.at[s, "standing_loss"]) ** hrs + + m.state_of_charge_intra_store[s, last_hour] + * (1 - network.stores.at[s, "standing_loss"]) + + m.store_p[s, last_hour] + ) + return expr + + network.model.inter_storage_soc_constraint = po.Constraint( + sus.index, network.model.candidates, rule=inter_storage_soc_rule + ) + network.model.inter_store_soc_constraint = po.Constraint( + sto.index, network.model.candidates, rule=inter_store_soc_rule + ) + + # new definition of the state_of_charge used in pypsa + + network.model.del_component("state_of_charge_constraint") + network.model.del_component("state_of_charge_constraint_index") + network.model.del_component("state_of_charge_constraint_index_0") + network.model.del_component("state_of_charge_constraint_index_1") + + network.model.del_component("store_constraint") + network.model.del_component("store_constraint_index") + network.model.del_component("store_constraint_index_0") + network.model.del_component("store_constraint_index_1") + + def total_state_of_charge(m, s, h): + """ + Define the state_of_charge as the sum of state_of_charge_inter + and state_of_charge_intra + + According to: + L. Kotzur et al: 'Time series aggregation for energy system design: + Modeling seasonal storage', 2018 + """ + + return ( + m.state_of_charge[s, h] + == m.state_of_charge_intra[s, h] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] + ) + + def total_state_of_charge_store(m, s, h): + return ( + m.store_e[s, h] + == m.state_of_charge_intra_store[s, h] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] + ) + + network.model.total_storage_constraint = po.Constraint( + sus.index, network.snapshots, rule=total_state_of_charge + ) + network.model.total_store_constraint = po.Constraint( + sto.index, network.snapshots, rule=total_state_of_charge_store + ) + + network.model.del_component("state_of_charge_lower") + network.model.del_component("state_of_charge_lower_index") + network.model.del_component("state_of_charge_lower_index_0") + network.model.del_component("state_of_charge_lower_index_1") + + network.model.del_component("store_e_lower") + network.model.del_component("store_e_lower_index") + network.model.del_component("store_e_lower_index_0") + network.model.del_component("store_e_lower_index_1") + + def state_of_charge_lower(m, s, h): + """ + Define the state_of_charge as the sum of state_of_charge_inter + and state_of_charge_intra + + According to: + L. Kotzur et al: 'Time series aggregation for energy system design: + Modeling seasonal storage', 2018 + """ + + # Choose datetime of representive day + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: + hrs = 24 + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][h.dayofyear] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + + return ( + m.state_of_charge_intra[s, intra_hour] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs + >= 0 + ) + + def state_of_charge_lower_store(m, s, h): + # Choose datetime of representive day + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: + hrs = 24 + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][h.dayofyear] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + + if "DSM" in s: + low = ( + network.stores.e_nom[s] + * network.stores_t.e_min_pu.at[intra_hour, s] + ) + else: + low = 0 + + return ( + m.state_of_charge_intra_store[s, intra_hour] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.stores.at[s, "standing_loss"]) ** hrs + >= low + ) + + def state_of_charge_lower_simplified(m, s, h): + """ + Define the state_of_charge as the sum of state_of_charge_inter + and state_of_charge_intra + + According to: + L. Kotzur et al: 'Time series aggregation for energy system design: + Modeling seasonal storage', 2018 + """ + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 + + return ( + m.state_of_charge_intra_min[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs + >= 0 + ) + + def state_of_charge_lower_store_simplified(m, s, h): + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 + + if "DSM" in s: + if self.args["snapshot_clustering"]["how"] == "weekly": + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + elif self.args["snapshot_clustering"]["how"] == "monthly": + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][h.dayofyear] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + low = ( + network.stores.e_nom[s] + * network.stores_t.e_min_pu.at[intra_hour, s] + ) + else: + low = 0 + + return ( + m.state_of_charge_intra_store_min[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.stores.at[s, "standing_loss"]) ** hrs + >= low + ) + + if simplified: + network.model.state_of_charge_lower = po.Constraint( + sus.index, + network.cluster_ts.index, + rule=state_of_charge_lower_simplified, + ) + network.model.state_of_charge_lower_store = po.Constraint( + sto.index, + network.cluster_ts.index, + rule=state_of_charge_lower_store_simplified, + ) + + else: + network.model.state_of_charge_lower = po.Constraint( + sus.index, network.cluster_ts.index, rule=state_of_charge_lower + ) + network.model.state_of_charge_lower_store = po.Constraint( + sto.index, + network.cluster_ts.index, + rule=state_of_charge_lower_store, + ) + + network.model.del_component("state_of_charge_upper") + network.model.del_component("state_of_charge_upper_index") + network.model.del_component("state_of_charge_upper_index_0") + network.model.del_component("state_of_charge_upper_index_1") + + network.model.del_component("store_e_upper") + network.model.del_component("store_e_upper_index") + network.model.del_component("store_e_upper_index_0") + network.model.del_component("store_e_upper_index_1") + + def state_of_charge_upper(m, s, h): + # Choose datetime of representive day + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: + hrs = 24 # 0 + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][h.dayofyear] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + + if network.storage_units.p_nom_extendable[s]: + p_nom = m.storage_p_nom[s] + else: + p_nom = network.storage_units.p_nom[s] + + return ( + m.state_of_charge_intra[s, intra_hour] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs + <= p_nom * network.storage_units.at[s, "max_hours"] + ) + + def state_of_charge_upper_store(m, s, h): + # Choose datetime of representive day + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][candidate - 1] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: + hrs = 24 # 0 + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][h.dayofyear] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + + if network.stores.e_nom_extendable[s]: + e_nom = m.store_e_nom[s] + else: + if "DSM" in s: + e_nom = ( + network.stores.e_nom[s] + * network.stores_t.e_max_pu.at[intra_hour, s] + ) + else: + e_nom = network.stores.e_nom[s] + + return ( + m.state_of_charge_intra_store[s, intra_hour] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.stores.at[s, "standing_loss"]) ** hrs + <= e_nom + ) + + def state_of_charge_upper_simplified(m, s, h): + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 # 0 + + if network.storage_units.p_nom_extendable[s]: + p_nom = m.storage_p_nom[s] + else: + p_nom = network.storage_units.p_nom[s] + + return ( + m.state_of_charge_intra_max[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.storage_units.at[s, "standing_loss"]) ** hrs + <= p_nom * network.storage_units.at[s, "max_hours"] + ) + + def state_of_charge_upper_store_simplified(m, s, h): + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 168 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 720 + else: + hrs = 24 # 0 + + if network.stores.e_nom_extendable[s]: + e_nom = m.store_e_nom[s] + else: + if "DSM" in s: + if self.args["snapshot_clustering"]["how"] == "weekly": + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=167) + period_start = network.cluster_ts.index[0::168][ + candidate - 1 + ] + delta_t = h - period_start + intra_hour = first_hour + delta_t + + elif self.args["snapshot_clustering"]["how"] == "monthly": + candidate = network.cluster_ts["Candidate_day"][h] + last_hour = network.cluster.loc[candidate][ + "last_hour_RepresentativeDay" + ] + first_hour = last_hour - pd.DateOffset(hours=719) + period_start = network.cluster_ts.index[0::720][ + candidate - 1 + ] + delta_t = h - period_start + intra_hour = first_hour + delta_t + else: - potential = network.snapshot_weightings.sum() \ - * network.generators.p_nom[g].sum() + date = str( + network.snapshots[ + network.snapshots.dayofyear - 1 + == network.cluster["RepresentativeDay"][ + h.dayofyear + ] + ][0] + ).split(" ")[0] + hour = str(h).split(" ")[1] + intra_hour = pd.to_datetime(date + " " + hour) + e_nom = ( + network.stores.e_nom[s] + * network.stores_t.e_max_pu.at[intra_hour, s] + ) - generation = get_var(network, 'Generator', 'p').loc[ - snapshots, g].mul(network.snapshot_weightings, axis=0) + else: + e_nom = network.stores.e_nom[s] + + return ( + m.state_of_charge_intra_store_max[ + s, network.cluster_ts["Candidate_day"][h] + ] + + m.state_of_charge_inter_store[ + s, network.cluster_ts["Candidate_day"][h] + ] + * (1 - network.stores.at[s, "standing_loss"]) ** hrs + <= e_nom + ) + + if simplified: + network.model.state_of_charge_upper = po.Constraint( + sus.index, + network.cluster_ts.index, + rule=state_of_charge_upper_simplified, + ) + network.model.state_of_charge_upper_store = po.Constraint( + sto.index, + network.cluster_ts.index, + rule=state_of_charge_upper_store_simplified, + ) - define_constraints(network, linexpr((1, generation)).sum(), - '>=', arg[cntr][c][0]*potential, - 'Generator', 'min_flh_' + g) - define_constraints(network, linexpr((1, generation)).sum(), - '<=', arg[cntr][c][1]*potential, - 'Generator', 'max_flh_' + g) + else: + network.model.state_of_charge_upper = po.Constraint( + sus.index, network.cluster_ts.index, rule=state_of_charge_upper + ) + network.model.state_of_charge_upper_store = po.Constraint( + sto.index, + network.cluster_ts.index, + rule=state_of_charge_upper_store, + ) + + def cyclic_state_of_charge(m, s): + """ + Defines cyclic condition like pypsas 'state_of_charge_contraint'. + There are small differences to original results. + """ + last_day = network.cluster.index[-1] + last_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + last_day + ] + last_inter = m.state_of_charge_inter[s, last_day] + last_intra = m.state_of_charge_intra[s, last_calc_hour] + first_day = network.cluster.index[0] + + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 167 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 719 + else: + hrs = 23 + + first_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + first_day + ] - pd.DateOffset(hours=hrs) + first_inter = m.state_of_charge_inter[s, first_day] + first_intra = m.state_of_charge_intra[s, first_calc_hour] + + return first_intra + first_inter == ( + (last_intra + last_inter) + * (1 - network.storage_units.at[s, "standing_loss"]) + - ( + m.storage_p_dispatch[s, last_calc_hour] + / network.storage_units.at[s, "efficiency_dispatch"] + - m.storage_p_store[s, last_calc_hour] + * network.storage_units.at[s, "efficiency_store"] + ) + ) + + def cyclic_state_of_charge_store(m, s): + last_day = network.cluster.index[-1] + last_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + last_day + ] + last_inter = m.state_of_charge_inter_store[s, last_day] + last_intra = m.state_of_charge_intra_store[s, last_calc_hour] + first_day = network.cluster.index[0] + + if self.args["snapshot_clustering"]["how"] == "weekly": + hrs = 167 + elif self.args["snapshot_clustering"]["how"] == "monthly": + hrs = 719 + else: + hrs = 23 + + first_calc_hour = network.cluster["last_hour_RepresentativeDay"][ + first_day + ] - pd.DateOffset(hours=hrs) + first_inter = m.state_of_charge_inter_store[s, first_day] + first_intra = m.state_of_charge_intra_store[s, first_calc_hour] + + expr = first_intra + first_inter == ( + (last_intra + last_inter) + * (1 - network.stores.at[s, "standing_loss"]) + + m.store_p[s, last_calc_hour] + ) + + return expr + + network.model.cyclic_storage_constraint = po.Constraint( + sus.index, rule=cyclic_state_of_charge + ) + network.model.cyclic_store_constraint = po.Constraint( + sto.index, rule=cyclic_state_of_charge_store + ) + + +def snapshot_clustering_seasonal_storage_hourly(self, network, snapshots): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. -class Constraints: + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + + Returns + ------- + None + """ + + # TODO: updaten mit stores (Sektorkopplung) + + network.model.del_component("state_of_charge_all") + network.model.del_component("state_of_charge_all_index") + network.model.del_component("state_of_charge_all_index_0") + network.model.del_component("state_of_charge_all_index_1") + network.model.del_component("state_of_charge_constraint") + network.model.del_component("state_of_charge_constraint_index") + network.model.del_component("state_of_charge_constraint_index_0") + network.model.del_component("state_of_charge_constraint_index_1") + + candidates = network.cluster.index.get_level_values(0).unique() + network.model.state_of_charge_all = po.Var( + network.storage_units.index, + candidates - 1 + self.args["start_snapshot"], + within=po.NonNegativeReals, + ) + network.model.storages = network.storage_units.index + + def set_soc_all(m, s, h): + if h == self.args["start_snapshot"]: + prev = ( + network.cluster.index.get_level_values(0)[-1] + - 1 + + self.args["start_snapshot"] + ) + + else: + prev = h - 1 + + cluster_hour = network.cluster["Hour"][ + h + 1 - self.args["start_snapshot"] + ] + + expr = m.state_of_charge_all[s, h] == m.state_of_charge_all[ + s, prev + ] * (1 - network.storage_units.at[s, "standing_loss"]) - ( + m.storage_p_dispatch[s, cluster_hour] + / network.storage_units.at[s, "efficiency_dispatch"] + - network.storage_units.at[s, "efficiency_store"] + * m.storage_p_store[s, cluster_hour] + ) + return expr + + network.model.soc_all = po.Constraint( + network.model.storages, + candidates - 1 + self.args["start_snapshot"], + rule=set_soc_all, + ) + + def soc_equals_soc_all(m, s, h): + hour = (h.dayofyear - 1) * 24 + h.hour + + return m.state_of_charge_all[s, hour] == m.state_of_charge[s, h] + + network.model.soc_equals_soc_all = po.Constraint( + network.model.storages, network.snapshots, rule=soc_equals_soc_all + ) + + network.model.del_component("state_of_charge_upper") + network.model.del_component("state_of_charge_upper_index") + network.model.del_component("state_of_charge_upper_index_0") + network.model.del_component("state_of_charge_upper_index_1") + + def state_of_charge_upper(m, s, h): + if network.storage_units.p_nom_extendable[s]: + p_nom = m.storage_p_nom[s] + else: + p_nom = network.storage_units.p_nom[s] + + return ( + m.state_of_charge_all[s, h] + <= p_nom * network.storage_units.at[s, "max_hours"] + ) + + network.model.state_of_charge_upper = po.Constraint( + network.storage_units.index, + candidates - 1 + self.args["start_snapshot"], + rule=state_of_charge_upper, + ) + + +def snapshot_clustering_seasonal_storage_nmp(self, n, sns, simplified=False): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + n : :class:`pypsa.Network` + Overall container of PyPSA + sns : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + simplified : bool, optional + A flag indicating whether to use a simplified version of the model that + does not include intra-temporal constraints and variables. + + Returns + ------- + None + """ + + # TODO: so noch nicht korrekt... + # TODO: updaten mit stores (Sektorkopplung) + # TODO: simplified ergänzen + + sus = n.storage_units - def __init__(self, args): + c = "StorageUnit" + + period_starts = sns[0::24] + + candidates = n.cluster.index.get_level_values(0).unique() + + soc_total = get_var(n, c, "state_of_charge") + + # inter_soc + # Set lower and upper bound for soc_inter + lb = pd.DataFrame(index=candidates, columns=sus.index, data=0) + ub = pd.DataFrame(index=candidates, columns=sus.index, data=np.inf) + + # Create soc_inter variable for each storage and each day + define_variables(n, lb, ub, "StorageUnit", "soc_inter") + + # Define soc_intra + # Set lower and upper bound for soc_intra + lb = pd.DataFrame(index=sns, columns=sus.index, data=-np.inf) + ub = pd.DataFrame(index=sns, columns=sus.index, data=np.inf) + + # Set soc_intra to 0 at first hour of every day + lb.loc[period_starts, :] = 0 + ub.loc[period_starts, :] = 0 + + # Create intra soc variable for each storage and each hour + define_variables(n, lb, ub, "StorageUnit", "soc_intra") + soc_intra = get_var(n, c, "soc_intra") + + last_hour = n.cluster["last_hour_RepresentativeDay"].values + + soc_inter = get_var(n, c, "soc_inter") + next_soc_inter = soc_inter.shift(-1).fillna(soc_inter.loc[candidates[0]]) + + last_soc_intra = soc_intra.loc[last_hour].set_index(candidates) + + eff_stand = expand_series(1 - n.df(c).standing_loss, candidates).T + eff_dispatch = expand_series(n.df(c).efficiency_dispatch, candidates).T + eff_store = expand_series(n.df(c).efficiency_store, candidates).T + + dispatch = get_var(n, c, "p_dispatch").loc[last_hour].set_index(candidates) + store = get_var(n, c, "p_store").loc[last_hour].set_index(candidates) + + coeff_var = [ + (-1, next_soc_inter), + (eff_stand.pow(24), soc_inter), + (eff_stand, last_soc_intra), + (-1 / eff_dispatch, dispatch), + (eff_store, store), + ] + + lhs, *axes = linexpr(*coeff_var, return_axes=True) + + define_constraints(n, lhs, "==", 0, c, "soc_inter_constraints") + + coeff_var = [ + (-1, soc_total), + (1, soc_intra), + ( + 1, + soc_inter.loc[n.cluster_ts.loc[sns, "Candidate_day"]].set_index( + sns + ), + ), + ] + lhs, *axes = linexpr(*coeff_var, return_axes=True) + + define_constraints(n, lhs, "==", 0, c, "soc_intra_constraints") + + +def snapshot_clustering_seasonal_storage_hourly_nmp(self, n, sns): + """ + Depicts intertemporal dependencies of storage units and stores when using + snapshot clustering to typical periods for temporal complexity reduction. + + According to: + L. Kotzur et al: 'Time series aggregation for energy + system design: + Modeling seasonal storage', 2018 + + Parameters + ---------- + n : :class:`pypsa.Network` + Overall container of PyPSA + sns : list + A list of datetime objects representing the timestamps of the snapshots + to be clustered. + + Returns + ------- + None + """ + + print("TODO") + + # TODO: implementieren + + +def split_dispatch_disaggregation_constraints(self, n, sns): + """ + Add constraints for state of charge of storage units and stores + when separating the optimization into smaller subproblems + while conducting thedispatch_disaggregation in temporally fully resolved + network + + The state of charge at the end of each slice is set to the value + calculated in the optimization with the temporally reduced network + to account to ensure compatibility and to reproduce saisonality + + Parameters + ---------- + network : :class:`pypsa.Network` + Overall container of PyPSA + snapshots : pandas.DatetimeIndex + List of timesteps considered in the optimization + + Returns + ------- + None. + """ + tsa_hour = sns[sns.isin(self.conduct_dispatch_disaggregation.index)] + if len(tsa_hour) > 1: + tsa_hour = tsa_hour[-1] + else: + tsa_hour = tsa_hour[0] + n.model.soc_values = self.conduct_dispatch_disaggregation.loc[tsa_hour] + + sus = n.storage_units.index + # for stores, exclude emob and dsm because of their special constraints + sto = n.stores[ + ~n.stores.carrier.isin(["battery storage", "battery_storage", "dsm"]) + ].index + + def disaggregation_sus_soc(m, s, h): + """ + Sets soc at the end of the time slice in disptach_disaggregation + to value calculated in temporally reduced lopf without slices. + """ + return m.state_of_charge[s, h] == m.soc_values[s] + + n.model.split_dispatch_sus_soc = po.Constraint( + sus, sns[-1:], rule=disaggregation_sus_soc + ) + + def disaggregation_sto_soc(m, s, h): + """ + Sets soc at the end of the time slice in disptach_disaggregation + to value calculated in temporally reduced lopf without slices. + """ + return m.store_e[s, h] == m.soc_values[s] + + n.model.split_dispatch_sto_soc = po.Constraint( + sto, sns[-1:], rule=disaggregation_sto_soc + ) + + +def split_dispatch_disaggregation_constraints_nmp(self, n, sns): + print("TODO") + + # TODO: implementieren + + +class Constraints: + def __init__(self, args, conduct_dispatch_disaggregation): self.args = args + self.conduct_dispatch_disaggregation = conduct_dispatch_disaggregation def functionality(self, network, snapshots): - """ Add constraints to pypsa-model using extra-functionality. + """Add constraints to pypsa-model using extra-functionality. Serveral constraints can be choosen at once. Possible constraints are set and described in the above functions. Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA snapshots : pandas.DatetimeIndex - List of timesteps considered in the optimization + List of timesteps considered in the optimization """ + if "CH4" in network.buses.carrier.values: + if self.args["method"]["pyomo"]: + add_chp_constraints(network, snapshots) + if self.args["scn_name"] != "status2019": + add_ch4_constraints(self, network, snapshots) + else: + add_chp_constraints_nmp(network) + if self.args["scn_name"] != "status2019": + add_ch4_constraints_nmp(self, network, snapshots) - for constraint in self.args['extra_functionality'].keys(): + for constraint in self.args["extra_functionality"].keys(): try: type(network.model) try: - eval('_'+constraint+'(self, network, snapshots)') - logger.info("Added extra_functionality {}".format( - constraint)) + eval("_" + constraint + "(self, network, snapshots)") + logger.info( + "Added extra_functionality {}".format(constraint) + ) except: - logger.warning("Constraint {} not defined".format( - constraint)+\ - ". New constraints can be defined in" + - " etrago/tools/constraint.py.") + logger.warning( + "Constraint {} not defined".format(constraint) + + ". New constraints can be defined in" + + " etrago/tools/constraint.py." + ) except: try: - eval('_'+constraint+'_nmp(self, network, snapshots)') - logger.info("Added extra_functionality {} without pyomo". - format(constraint)) + eval("_" + constraint + "_nmp(self, network, snapshots)") + logger.info( + "Added extra_functionality {} without pyomo".format( + constraint + ) + ) except: - logger.warning("Constraint {} not defined".format(constraint)) - - - if self.args['snapshot_clustering']['active']: - # This will bound the storage level to 0.5 max_level every 24th hour. - sus = network.storage_units - # take every first hour of the clustered days - network.model.period_starts = \ - network.snapshot_weightings.index[0::24] - - network.model.storages = sus.index - - if self.args['snapshot_clustering']['storage_constraints'] \ - == 'daily_bounds': - - print('Setting daily_bounds constraint') - - def day_rule(m, s, p): - """ - Sets the soc of the every first hour to the - soc of the last hour of the day (i.e. + 23 hours) - """ - return (m.state_of_charge[s, p] == - m.state_of_charge[s, p + pd.Timedelta(hours=23 - )]) - - network.model.period_bound = Constraint( - network.model.storages, - network.model.period_starts, rule=day_rule) - - elif self.args['snapshot_clustering']['storage_constraints'] \ - == 'soc_constraints': - candidates = \ - network.cluster.index.get_level_values(0).unique() - - # create set for inter-temp constraints and variables - network.model.candidates = po.Set( - initialize=candidates, ordered=True) - - # create intra soc variable for each storage and each hour - network.model.state_of_charge_intra = po.Var( - sus.index, network.snapshots) - - def intra_soc_rule(m, s, h): - """ - Sets soc_inter of first hour of every day to 0. Other hours - are set by technical coherences of storage units - - According to: - L. Kotzur et al: 'Time series aggregation for energy - system design: - Modeling seasonal storage', 2018, equation no. 18 - """ - - if h.hour == 0: - expr = (m.state_of_charge_intra[s, h] == 0) - else: - expr = ( - m.state_of_charge_intra[s, h] == - m.state_of_charge_intra[s, h-pd.DateOffset(hours=1)] - * (1 - network.storage_units.at[s, 'standing_loss']) - -(m.storage_p_dispatch[s, h-pd.DateOffset(hours=1)]/ - network.storage_units.at[s, 'efficiency_dispatch'] - - network.storage_units.at[s, 'efficiency_store'] * - m.storage_p_store[s, h-pd.DateOffset(hours=1)])) - return expr - - network.model.soc_intra = po.Constraint( - network.model.storages, network.snapshots, - rule=intra_soc_rule) - - # create inter soc variable for each storage and each candidate - network.model.state_of_charge_inter = po.Var( - sus.index, network.model.candidates, - within=po.NonNegativeReals) - - def inter_storage_soc_rule(m, s, i): - """ - Define the state_of_charge_inter as the state_of_charge_inter of - the day before minus the storage losses plus the state_of_charge_intra - of one hour after the last hour of the representative day. - For the last reperesentive day, the soc_inter is the same as - the first day due to cyclic soc condition - - According to: - L. Kotzur et al: 'Time series aggregation for energy system design: - Modeling seasonal storage', 2018, equation no. 19 - """ - - if i == network.model.candidates[-1]: - last_hour = network.cluster["last_hour_RepresentativeDay"][i] - expr = po.Constraint.Skip - - else: - last_hour = network.cluster["last_hour_RepresentativeDay"][i] - expr = ( - m.state_of_charge_inter[s, i+1] == - m.state_of_charge_inter[s, i] - * (1 - network.storage_units.at[s, 'standing_loss'])**24 - + m.state_of_charge_intra[s, last_hour]\ - * (1 - network.storage_units.at[s, 'standing_loss'])\ - -(m.storage_p_dispatch[s, last_hour]/\ - network.storage_units.at[s, 'efficiency_dispatch'] - - network.storage_units.at[s, 'efficiency_store'] * - m.storage_p_store[s, last_hour])) - - return expr - - network.model.inter_storage_soc_constraint = po.Constraint( - sus.index, network.model.candidates, - rule=inter_storage_soc_rule) - - #new definition of the state_of_charge used in pypsa - network.model.del_component('state_of_charge_constraint') - network.model.del_component('state_of_charge_constraint_index') - network.model.del_component('state_of_charge_constraint_index_0') - network.model.del_component('state_of_charge_constraint_index_1') - - def total_state_of_charge(m, s, h): - """ - Define the state_of_charge as the sum of state_of_charge_inter - and state_of_charge_intra - - According to: - L. Kotzur et al: 'Time series aggregation for energy system design: - Modeling seasonal storage', 2018 - """ - - return(m.state_of_charge[s, h] == - m.state_of_charge_intra[s, h] + m.state_of_charge_inter[ - s, network.cluster_ts['Candidate_day'][h]]) - - network.model.total_storage_constraint = po.Constraint( - sus.index, network.snapshots, rule=total_state_of_charge) - - def state_of_charge_lower(m, s, h): - """ - Define the state_of_charge as the sum of state_of_charge_inter - and state_of_charge_intra - - According to: - L. Kotzur et al: 'Time series aggregation for energy system design: - Modeling seasonal storage', 2018 - """ - - # Choose datetime of representive day - date = str(network.snapshots[ - network.snapshots.dayofyear -1 == - network.cluster['RepresentativeDay'][h.dayofyear]][0]).split(' ')[0] - hour = str(h).split(' ')[1] - - intra_hour = pd.to_datetime(date + ' ' + hour) - - return(m.state_of_charge_intra[s, intra_hour] + - m.state_of_charge_inter[s, network.cluster_ts['Candidate_day'][h]] - # * (1 - network.storage_units.at - # [s, 'standing_loss']*elapsed_hours)**24 - >= 0) - - network.model.state_of_charge_lower = po.Constraint( - sus.index, network.cluster_ts.index, rule=state_of_charge_lower) - - network.model.del_component('state_of_charge_upper') - network.model.del_component('state_of_charge_upper_index') - network.model.del_component('state_of_charge_upper_index_0') - network.model.del_component('state_of_charge_upper_index_1') - - def state_of_charge_upper(m, s, h): - date = str(network.snapshots[ - network.snapshots.dayofyear -1 == - network.cluster['RepresentativeDay'][h.dayofyear]][0]).split(' ')[0] - - hour = str(h).split(' ')[1] - - intra_hour = pd.to_datetime(date + ' ' + hour) - - if network.storage_units.p_nom_extendable[s]: - p_nom = m.storage_p_nom[s] - else: - p_nom = network.storage_units.p_nom[s] - - return (m.state_of_charge_intra[s, intra_hour] + - m.state_of_charge_inter[s, network.cluster_ts['Candidate_day'][h]] - # * (1 - network.storage_units.at[s, - # 'standing_loss']*elapsed_hours)**24 - <= p_nom * network.storage_units.at[s, 'max_hours']) - - network.model.state_of_charge_upper = po.Constraint( - sus.index, network.cluster_ts.index, - rule=state_of_charge_upper) - - def cyclic_state_of_charge(m, s): - """ - Defines cyclic condition like pypsas 'state_of_charge_contraint'. - There are small differences to original results. - """ - last_day = network.cluster.index[-1] - - last_calc_hour = network.cluster[ - 'last_hour_RepresentativeDay'][last_day] + logger.warning( + "Constraint {} not defined".format(constraint) + ) + + if ( + self.args["snapshot_clustering"]["active"] + and self.args["snapshot_clustering"]["method"] == "typical_periods" + ): + if ( + self.args["snapshot_clustering"]["storage_constraints"] + == "daily_bounds" + ): + if self.args["method"]["pyomo"]: + snapshot_clustering_daily_bounds(self, network, snapshots) + else: + snapshot_clustering_daily_bounds_nmp( + self, network, snapshots + ) + + elif ( + self.args["snapshot_clustering"]["storage_constraints"] + == "soc_constraints" + ): + if self.args["snapshot_clustering"]["how"] == "hourly": + if self.args["method"]["pyomo"]: + snapshot_clustering_seasonal_storage_hourly( + self, network, snapshots + ) + else: + snapshot_clustering_seasonal_storage_hourly_nmp( + self, network, snapshots + ) + else: + if self.args["method"]["pyomo"]: + snapshot_clustering_seasonal_storage( + self, network, snapshots + ) + else: + snapshot_clustering_seasonal_storage_nmp( + self, network, snapshots + ) + + elif ( + self.args["snapshot_clustering"]["storage_constraints"] + == "soc_constraints_simplified" + ): + if self.args["snapshot_clustering"]["how"] == "hourly": + logger.info( + """soc_constraints_simplified not possible while hourly + clustering -> changed to soc_constraints""" + ) + + if self.args["method"]["pyomo"]: + snapshot_clustering_seasonal_storage_hourly( + self, network, snapshots + ) + else: + snapshot_clustering_seasonal_storage_hourly_nmp( + self, network, snapshots + ) + + if self.args["method"]["pyomo"]: + snapshot_clustering_seasonal_storage( + self, network, snapshots, simplified=True + ) + else: + snapshot_clustering_seasonal_storage_nmp( + self, network, snapshots, simplified=True + ) - last_inter = m.state_of_charge_inter[s, last_day] + else: + logger.error( + """If you want to use constraints considering the storage + behaviour, snapshot clustering constraints must be in + [daily_bounds, soc_constraints, + soc_constraints_simplified]""" + ) + + if self.conduct_dispatch_disaggregation is not False: + if self.args["method"]["pyomo"]: + split_dispatch_disaggregation_constraints( + self, network, snapshots + ) + else: + split_dispatch_disaggregation_constraints_nmp( + self, network, snapshots + ) - last_intra = m.state_of_charge_intra[s, last_calc_hour] - first_day = network.cluster.index[0] +def add_chp_constraints_nmp(n): + """ + Limits the dispatch of combined heat and power links based on + T.Brown et. al : Synergies of sector coupling and transmission + reinforcement in a cost-optimised, highly renewable European energy system, + 2018 - first_calc_hour = network.cluster[ - 'last_hour_RepresentativeDay'][first_day] - pd.DateOffset(hours=23) + Parameters + ---------- + n : pypsa.Network + Network container - first_inter = m.state_of_charge_inter[s, first_day] + Returns + ------- + None. - first_intra = m.state_of_charge_intra[s, first_calc_hour] + """ + # backpressure limit + c_m = 0.75 + + # marginal loss for each additional generation of heat + c_v = 0.15 + electric_bool = n.links.carrier == "central_gas_CHP" + heat_bool = n.links.carrier == "central_gas_CHP_heat" + + electric = n.links.index[electric_bool] + heat = n.links.index[heat_bool] + + n.links.loc[heat, "efficiency"] = ( + n.links.loc[electric, "efficiency"] / c_v + ).values.mean() + + ch4_nodes_with_chp = n.buses.loc[ + n.links.loc[electric, "bus0"].values + ].index.unique() + + for i in ch4_nodes_with_chp: + elec_chp = n.links[ + (n.links.carrier == "central_gas_CHP") & (n.links.bus0 == i) + ].index + + heat_chp = n.links[ + (n.links.carrier == "central_gas_CHP_heat") & (n.links.bus0 == i) + ].index + + link_p = get_var(n, "Link", "p") + # backpressure + + lhs_1 = sum( + c_m * n.links.at[h_chp, "efficiency"] * link_p[h_chp] + for h_chp in heat_chp + ) + + lhs_2 = sum( + n.links.at[e_chp, "efficiency"] * link_p[e_chp] + for e_chp in elec_chp + ) + + lhs = linexpr((1, lhs_1), (-1, lhs_2)) + + define_constraints( + n, lhs, "<=", 0, "chplink_" + str(i), "backpressure" + ) + + # top_iso_fuel_line + lhs, *ax = linexpr( + (1, sum(link_p[h_chp] for h_chp in heat_chp)), + (1, sum(link_p[h_e] for h_e in elec_chp)), + return_axes=True, + ) + + define_constraints( + n, + lhs, + "<=", + n.links.loc[elec_chp].p_nom.sum(), + "chplink_" + str(i), + "top_iso_fuel_line_fix", + axes=ax, + ) + + +def add_chp_constraints(network, snapshots): + """ + Limits the dispatch of combined heat and power links based on + T.Brown et. al : Synergies of sector coupling and transmission + reinforcement in a cost-optimised, highly renewable European energy system, + 2018 + + Parameters + ---------- + network : pypsa.Network + Network container + snapshots : pandas.DataFrame + Timesteps to optimize - return (first_intra + first_inter == \ - ((last_intra + last_inter) - * (1 - network.storage_units.at[s, 'standing_loss']) - -(m.storage_p_dispatch[s, last_calc_hour]/ - network.storage_units.at[s, 'efficiency_dispatch'] - -m.storage_p_store[s, last_calc_hour] * - network.storage_units.at[s, 'efficiency_store']))) + Returns + ------- + None. - network.model.cyclic_storage_constraint = po.Constraint( - sus.index, rule=cyclic_state_of_charge) + """ - else: - logger.error('snapshot clustering constraints must be in' + - ' [daily_bounds, soc_constraints]') + # backpressure limit + c_m = 0.75 + + # marginal loss for each additional generation of heat + c_v = 0.15 + electric_bool = network.links.carrier == "central_gas_CHP" + heat_bool = network.links.carrier == "central_gas_CHP_heat" + + electric = network.links.index[electric_bool] + heat = network.links.index[heat_bool] + + network.links.loc[heat, "efficiency"] = ( + network.links.loc[electric, "efficiency"] / c_v + ).values.mean() + + ch4_nodes_with_chp = network.buses.loc[ + network.links.loc[electric, "bus0"].values + ].index.unique() + + for i in ch4_nodes_with_chp: + elec_chp = network.links[ + (network.links.carrier == "central_gas_CHP") + & (network.links.bus0 == i) + ].index + + heat_chp = network.links[ + (network.links.carrier == "central_gas_CHP_heat") + & (network.links.bus0 == i) + ].index + + # Guarantees c_m p_b1 \leq p_g1 + def backpressure(model, snapshot): + lhs = sum( + c_m + * network.links.at[h_chp, "efficiency"] + * model.link_p[h_chp, snapshot] + for h_chp in heat_chp + ) + + rhs = sum( + network.links.at[e_chp, "efficiency"] + * model.link_p[e_chp, snapshot] + for e_chp in elec_chp + ) + + return lhs <= rhs + + setattr( + network.model, + "backpressure_" + str(i), + Constraint(list(snapshots), rule=backpressure), + ) + + # Guarantees p_g1 +c_v p_b1 \leq p_g1_nom + def top_iso_fuel_line(model, snapshot): + lhs = sum( + model.link_p[h_chp, snapshot] for h_chp in heat_chp + ) + sum(model.link_p[e_chp, snapshot] for e_chp in elec_chp) + + rhs = network.links[ + (network.links.carrier == "central_gas_CHP") + & (network.links.bus0 == i) + ].p_nom.sum() + + return lhs <= rhs + + setattr( + network.model, + "top_iso_fuel_line_" + str(i), + Constraint(list(snapshots), rule=top_iso_fuel_line), + ) diff --git a/etrago/tools/db.py b/etrago/tools/db.py new file mode 100644 index 000000000..3f9ca2a5a --- /dev/null +++ b/etrago/tools/db.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Europa-Universität Flensburg, +# Centre for Sustainable Energy Systems, +# DLR-Institute for Networked Energy Systems + +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation; either version 3 of the +# License, or (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Affero General Public License for more details. + +# You should have received a copy of the GNU Affero General Public License +# along with this program. If not, see . + +import configparser as cp +import getpass +import os + +from sqlalchemy import create_engine +import keyring +import oedialect + + +def readcfg(filepath, section): + """ + Reads the configuration file. If section is not available, calls + create_oedb_config_file to add the new section to an existing config.ini. + + Parameters + ---------- + filepath : str + Absolute path of config file including the filename itself + section : str + Section in config file which contains connection details + Returns + ------- + cfg : configparser.ConfigParser + Used for configuration file parser language. + """ + + cfg = cp.ConfigParser() + cfg.read(filepath) + + if not cfg.has_section(section): + print( + 'The section "{sec}" is not in the config file {file}.'.format( + sec=section, file=filepath + ) + ) + cfg = create_oedb_config_file(filepath, section) + + return cfg + + +def get_connection_details(section): + """ + Asks the user for the database connection details and returns them as a + ConfigParser-object. + + Parameters + ---------- + None + + Returns + ------- + cfg : configparser.ConfigParser + Used for configuration file parser language. + """ + print("Please enter your connection details:") + dialect = ( + input("Enter input value for `dialect` (default: psycopg2): ") + or "psycopg2" + ) + username = input("Enter value for `username`: ") + database = input("Enter value for `database`: ") + host = input("Enter value for `host`: ") + port = input("Enter value for `port` (default: 5432): ") or "5432" + + cfg = cp.ConfigParser() + cfg.add_section(section) + cfg.set(section, "dialect", dialect) + cfg.set(section, "username", username) + cfg.set(section, "host", host) + cfg.set(section, "port", port) + cfg.set(section, "database", database) + pw = getpass.getpass( + prompt="Enter your password/token to " + "store it in " + "keyring: ".format(database=section) + ) + keyring.set_password(section, cfg.get(section, "username"), pw) + + return cfg + + +def create_oedb_config_file(filepath, section="oep"): + """ + + Parameters + ---------- + filepath : str + Absolute path of config file including the filename itself + section : str + Section in config file which contains connection details + + Returns + ------- + cfg : configparser.ConfigParser + Used for configuration file parser language. + """ + + cfg = get_connection_details(section) + + print( + "Do you want to store the connection details in the config file {file} ?".format( + file=filepath + ) + ) + choice = "" + while choice not in ["y", "n"]: + choice = input("(y/n): ") + + if choice == "y": + # create egoio dir if not existent + base_path = os.path.split(filepath)[0] + if not os.path.isdir(base_path): + os.mkdir(base_path) + print("The directory {path} was created.".format(path=base_path)) + + with open(filepath, "a") as configfile: + cfg.write(configfile) + pass + + print( + 'Template {0} with section "{1}" created.\nYou can manually edit' + " the config file.".format(filepath, section) + ) + else: + pass + + return cfg + + +def connection(filepath=None, section="oep", readonly=False): + """ + Instantiate a database connection (for the use with SQLAlchemy). + + The keyword argument `filepath` specifies the location of the config file + that contains database connection information. If not given, the default + of `~/.etrago_database/config.ini` applies. + + Parameters + ---------- + filepath : str + Absolute path of config file including the filename itself + section : str + Section in config file containing database connection parameters. + Default: 'oep'. + readonly : bool + Set this option to True for creating a read-only and passwordless + engine for accessing the open energy platform. + Default: False. + + Returns + ------- + conn : sqlalchemy.engine + SQLalchemy engine object containing the connection details + """ + + if readonly: + conn = create_engine("postgresql+oedialect://openenergy-platform.org") + else: + # define default filepath if not provided + if filepath is None: + filepath = os.path.join( + os.path.expanduser("~"), ".etrago_database", "config.ini" + ) + + # does the file exist? + if not os.path.isfile(filepath): + print( + "DB config file {file} not found. " + "This might be the first run of the tool. ".format( + file=filepath + ) + ) + cfg = create_oedb_config_file(filepath, section=section) + else: + cfg = readcfg(filepath, section) + + try: + pw = cfg.get(section, "password") + except: + pw = keyring.get_password(section, cfg.get(section, "username")) + if pw is None: + pw = getpass.getpass( + prompt='No password found for database "{db}". ' + "Enter your password to " + "store it in keyring: ".format( + db=cfg.get(section, "database") + ) + ) + keyring.set_password(section, cfg.get(section, "username"), pw) + + # establish connection and return it + conn = create_engine( + "postgresql+{dialect}://{user}:{password}@{host}:{port}/{db}".format( + dialect=cfg.get(section, "dialect", fallback="psycopg2"), + user=cfg.get(section, "username"), + password=pw, + host=cfg.get(section, "host"), + port=cfg.get(section, "port"), + db=cfg.get(section, "database"), + ) + ) + + return conn diff --git a/etrago/tools/execute.py b/etrago/tools/execute.py index 6e008676f..b4d70d4c6 100755 --- a/etrago/tools/execute.py +++ b/etrago/tools/execute.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -19,69 +19,89 @@ # File description """ -execute.py defines optimization and simulation methods for Etrago object. +execute.py defines optimization and simulation methods for the etrago object. """ import os -if 'READTHEDOCS' not in os.environ: - import time + +if "READTHEDOCS" not in os.environ: import logging - import pandas as pd - import numpy as np + import time + from pypsa.linopf import network_lopf + from pypsa.networkclustering import aggregategenerators + from pypsa.pf import sub_network_pf + import numpy as np + import pandas as pd + + from etrago.cluster.spatial import strategies_generators from etrago.tools.constraints import Constraints logger = logging.getLogger(__name__) -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = ( + "ulfmueller, s3pp, wolfbunke, mariusves, lukasol, KathiEsterl, " + "ClaraBuettner, CarlosEpia, AmeliaNadal" +) -def update_electrical_parameters(network, l_snom_pre, t_snom_pre): +def update_electrical_parameters(network, l_snom_pre, t_snom_pre): """ Update electrical parameters of active branch components - considering s_nom of previous iteration + considering s_nom of previous iteration. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. l_snom_pre: pandas.Series - s_nom of ac-lines in previous iteration + s_nom of ac-lines in previous iteration. t_snom_pre: pandas.Series - s_nom of transformers in previous iteration + s_nom of transformers in previous iteration. + + Returns + ------- + None. + """ - network.lines.x[network.lines.s_nom_extendable] = \ - network.lines.x * l_snom_pre / network.lines.s_nom_opt + network.lines.x[network.lines.s_nom_extendable] = ( + network.lines.x * l_snom_pre / network.lines.s_nom_opt + ) - network.transformers.x[network.transformers.s_nom_extendable] = \ - network.transformers.x * t_snom_pre /\ - network.transformers.s_nom_opt + network.transformers.x[network.transformers.s_nom_extendable] = ( + network.transformers.x * t_snom_pre / network.transformers.s_nom_opt + ) - network.lines.r[network.lines.s_nom_extendable] = \ - network.lines.r * l_snom_pre / network.lines.s_nom_opt + network.lines.r[network.lines.s_nom_extendable] = ( + network.lines.r * l_snom_pre / network.lines.s_nom_opt + ) - network.transformers.r[network.transformers.s_nom_extendable] = \ - network.transformers.r * t_snom_pre /\ - network.transformers.s_nom_opt + network.transformers.r[network.transformers.s_nom_extendable] = ( + network.transformers.r * t_snom_pre / network.transformers.s_nom_opt + ) - network.lines.g[network.lines.s_nom_extendable] = \ - network.lines.g * network.lines.s_nom_opt / l_snom_pre + network.lines.g[network.lines.s_nom_extendable] = ( + network.lines.g * network.lines.s_nom_opt / l_snom_pre + ) - network.transformers.g[network.transformers.s_nom_extendable] = \ - network.transformers.g * network.transformers.s_nom_opt /\ - t_snom_pre + network.transformers.g[network.transformers.s_nom_extendable] = ( + network.transformers.g * network.transformers.s_nom_opt / t_snom_pre + ) - network.lines.b[network.lines.s_nom_extendable] = \ - network.lines.b * network.lines.s_nom_opt / l_snom_pre + network.lines.b[network.lines.s_nom_extendable] = ( + network.lines.b * network.lines.s_nom_opt / l_snom_pre + ) - network.transformers.b[network.transformers.s_nom_extendable] = \ - network.transformers.b * network.transformers.s_nom_opt /\ - t_snom_pre + network.transformers.b[network.transformers.s_nom_extendable] = ( + network.transformers.b * network.transformers.s_nom_opt / t_snom_pre + ) # Set snom_pre to s_nom_opt for next iteration l_snom_pre = network.lines.s_nom_opt.copy() @@ -91,19 +111,19 @@ def update_electrical_parameters(network, l_snom_pre, t_snom_pre): def run_lopf(etrago, extra_functionality, method): - """ Function that performs lopf with or without pyomo - + """ + Function that performs lopf with or without pyomo Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object - extra_functionality: str + etrago : etrago object + eTraGo containing all network information and a PyPSA network. + extra_functionality: dict Define extra constranits. method: dict Choose 'n_iter' and integer for fixed number of iterations or 'threshold' and derivation of objective in percent for variable number - of iteration until the threshold of the objective function is reached + of iteration until the threshold of the objective function is reached. Returns ------- @@ -112,131 +132,244 @@ def run_lopf(etrago, extra_functionality, method): """ x = time.time() - if method['pyomo']: - etrago.network.lopf( - etrago.network.snapshots, - solver_name=etrago.args['solver'], - solver_options=etrago.args['solver_options'], - extra_functionality=extra_functionality, - formulation=etrago.args['model_formulation']) - if etrago.network.results["Solver"][0]["Status"] != 'ok': - raise Exception('LOPF not solved.') + if etrago.conduct_dispatch_disaggregation is not False: + # parameters defining the start and end per slices + no_slices = etrago.args["temporal_disaggregation"]["no_slices"] + skipped = etrago.network.snapshot_weightings.iloc[0].objective + transits = np.where( + etrago.network_tsa.snapshots.isin( + etrago.conduct_dispatch_disaggregation.index + ) + )[0] + + if method["pyomo"]: + # repeat the optimization for all slices + for i in range(0, no_slices): + # keep information on the initial state of charge for the + # respectng slice + initial = transits[i - 1] + soc_initial = etrago.conduct_dispatch_disaggregation.loc[ + [etrago.network_tsa.snapshots[initial]] + ].transpose() + etrago.network_tsa.storage_units.state_of_charge_initial = ( + soc_initial + ) + etrago.network_tsa.stores.e_initial = soc_initial + etrago.network_tsa.stores.e_initial.fillna(0, inplace=True) + # the state of charge at the end of each slice is set within + # split_dispatch_disaggregation_constraints in constraints.py + + # adapt start and end snapshot of respecting slice + start = transits[i - 1] + skipped + end = transits[i] + (skipped - 1) + if i == 0: + start = 0 + if i == no_slices - 1: + end = len(etrago.network_tsa.snapshots) + + etrago.network_tsa.lopf( + etrago.network_tsa.snapshots[start : end + 1], + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + pyomo=True, + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) + + if etrago.network_tsa.results["Solver"][0]["Status"] != "ok": + raise Exception("LOPF not solved.") + + else: + for i in range(0, no_slices): + status, termination_condition = network_lopf( + etrago.network_tsa, + etrago.network_tsa.snapshots[start : end + 1], + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) + + if status != "ok": + raise Exception("LOPF not solved.") + + etrago.network_tsa.storage_units.state_of_charge_initial = 0 + etrago.network_tsa.stores.e_initial = 0 else: - status, termination_condition = network_lopf( - etrago.network, - solver_name=etrago.args['solver'], - solver_options=etrago.args['solver_options'], - extra_functionality=extra_functionality, - formulation=etrago.args['model_formulation']) - - if status != 'ok': - raise Exception('LOPF not solved.') + if method["pyomo"]: + etrago.network.lopf( + etrago.network.snapshots, + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + pyomo=True, + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) + + if etrago.network.results["Solver"][0]["Status"] != "ok": + raise Exception("LOPF not solved.") + + else: + status, termination_condition = network_lopf( + etrago.network, + solver_name=etrago.args["solver"], + solver_options=etrago.args["solver_options"], + extra_functionality=extra_functionality, + formulation=etrago.args["model_formulation"], + ) + + if status != "ok": + raise Exception("LOPF not solved.") + y = time.time() z = (y - x) / 60 print("Time for LOPF [min]:", round(z, 2)) -def iterate_lopf(etrago, extra_functionality, method={'n_iter':4, 'pyomo':True}, - ): +def iterate_lopf( + etrago, + extra_functionality, + method={"n_iter": 4, "pyomo": True}, +): """ Run optimization of lopf. If network extension is included, the specified number of iterations is calculated to consider reactance changes. Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object - extra_functionality: str + etrago : etrago object + eTraGo containing all network information and a PyPSA network. + extra_functionality: dict Define extra constranits. method: dict Choose 'n_iter' and integer for fixed number of iterations or 'threshold' and derivation of objective in percent for variable number - of iteration until the threshold of the objective function is reached + of iteration until the threshold of the objective function is reached. """ - network = etrago.network args = etrago.args + path = args["csv_export"] + lp_path = args["lpfile"] + + if ( + args["temporal_disaggregation"]["active"] is True + and etrago.conduct_dispatch_disaggregation is False + ): + if args["csv_export"]: + path = path + "/temporally_reduced" + + if args["lpfile"]: + lp_path = lp_path[0:-3] + "_temporally_reduced.lp" + + if etrago.conduct_dispatch_disaggregation is not False: + if args["lpfile"]: + lp_path = lp_path[0:-3] + "_dispatch_disaggregation.lp" + + etrago.network_tsa.lines["s_nom"] = etrago.network.lines["s_nom_opt"] + etrago.network_tsa.lines["s_nom_extendable"] = False + + etrago.network_tsa.links["p_nom"] = etrago.network.links["p_nom_opt"] + etrago.network_tsa.links["p_nom_extendable"] = False + + etrago.network_tsa.transformers["s_nom"] = etrago.network.transformers[ + "s_nom_opt" + ] + etrago.network_tsa.transformers.s_nom_extendable = False + + etrago.network_tsa.storage_units[ + "p_nom" + ] = etrago.network.storage_units["p_nom_opt"] + etrago.network_tsa.storage_units["p_nom_extendable"] = False + + etrago.network_tsa.stores["e_nom"] = etrago.network.stores["e_nom_opt"] + etrago.network_tsa.stores["e_nom_extendable"] = False + + etrago.network_tsa.storage_units.cyclic_state_of_charge = False + etrago.network_tsa.stores.e_cyclic = False + + args["snapshot_clustering"]["active"] = False + args["skip_snapshots"] = False + args["extendable"] = [] + + network = etrago.network_tsa + + else: + network = etrago.network + # if network is extendable, iterate lopf # to include changes of electrical parameters if network.lines.s_nom_extendable.any(): - # Initialise s_nom_pre (s_nom_opt of previous iteration) # to s_nom for first lopf: l_snom_pre = network.lines.s_nom.copy() t_snom_pre = network.transformers.s_nom.copy() # calculate fixed number of iterations - if 'n_iter' in method: - n_iter = method['n_iter'] - - for i in range(1, (1+n_iter)): + if "n_iter" in method: + n_iter = method["n_iter"] + for i in range(1, (1 + n_iter)): run_lopf(etrago, extra_functionality, method) - if args['csv_export'] != False: - path = args['csv_export'] + '/lopf_iteration_'+ str(i) - etrago.export_to_csv(path) + if args["csv_export"]: + path_it = path + "/lopf_iteration_" + str(i) + etrago.export_to_csv(path_it) if i < n_iter: - l_snom_pre, t_snom_pre = \ - update_electrical_parameters(network, - l_snom_pre, t_snom_pre) + l_snom_pre, t_snom_pre = update_electrical_parameters( + network, l_snom_pre, t_snom_pre + ) # Calculate variable number of iterations until threshold of objective # function is reached - if 'threshold' in method: - + if "threshold" in method: run_lopf(etrago, extra_functionality, method) - diff_obj = network.objective*method['threshold']/100 + diff_obj = network.objective * method["threshold"] / 100 i = 1 # Stop after 100 iterations to aviod unending loop while i <= 100: - if i == 100: - print('Maximum number of iterations reached.') + print("Maximum number of iterations reached.") break - l_snom_pre, t_snom_pre = \ - update_electrical_parameters(network, - l_snom_pre, t_snom_pre) + l_snom_pre, t_snom_pre = update_electrical_parameters( + network, l_snom_pre, t_snom_pre + ) pre = network.objective run_lopf(etrago, extra_functionality, method) i += 1 - if args['csv_export'] != False: - path = args['csv_export'] + '/lopf_iteration_'+ str(i) - etrago.export_to_csv(path) + if args["csv_export"]: + path_it = path + "/lopf_iteration_" + str(i) + etrago.export_to_csv(path_it) - if abs(pre-network.objective) <= diff_obj: - print('Threshold reached after ' + str(i) + ' iterations.') + if abs(pre - network.objective) <= diff_obj: + print("Threshold reached after " + str(i) + " iterations.") break else: run_lopf(etrago, extra_functionality, method) - - if args['csv_export'] != False: - path = args['csv_export'] etrago.export_to_csv(path) - if not args['lpfile'] is False: - network.model.write( - args['lpfile'], io_options={ - 'symbolic_solver_labels': True}) + if args["lpfile"]: + network.model.write(lp_path) return network + def lopf(self): - """ Functions that runs lopf accordning to arguments + """ + Functions that runs lopf according to arguments. Returns ------- @@ -246,18 +379,34 @@ def lopf(self): x = time.time() - iterate_lopf(self, - Constraints(self.args).functionality, - method=self.args['method']) + self.conduct_dispatch_disaggregation = False + + iterate_lopf( + self, + Constraints( + self.args, self.conduct_dispatch_disaggregation + ).functionality, + method=self.args["method"], + ) y = time.time() z = (y - x) / 60 logger.info("Time for LOPF [min]: {}".format(round(z, 2))) + if self.args["csv_export"]: + path = self.args["csv_export"] + if self.args["temporal_disaggregation"]["active"] is True: + path = path + "/temporally_reduced" + self.export_to_csv(path) -def run_pf_post_lopf(self): - """ Functions that runs pf_post_lopf accordning to arguments +def dispatch_disaggregation(self): + """ + Function running the tempral disaggregation meaning the optimization + of dispatch in the temporally fully resolved network; therfore, the problem + is reduced to smaller subproblems by slicing the whole considered time span + while keeping inforation on the state of charge of storage units and stores + to ensure compatibility and to reproduce saisonality. Returns ------- @@ -265,29 +414,235 @@ def run_pf_post_lopf(self): """ - if self.args['pf_post_lopf']['active']: + if self.args["temporal_disaggregation"]["active"]: + x = time.time() + + if self.args["temporal_disaggregation"]["no_slices"]: + # split dispatch_disaggregation into subproblems + # keep some information on soc in beginning and end of slices + # to ensure compatibility and to reproduce saisonality + + # define number of slices and corresponding slice length + no_slices = self.args["temporal_disaggregation"]["no_slices"] + slice_len = int(len(self.network.snapshots) / no_slices) + # transition snapshots defining start and end of slices + transits = self.network.snapshots[0::slice_len] + if len(transits) > 1: + transits = transits[1:] + if transits[-1] != self.network_tsa.snapshots[-1]: + transits = transits.insert( + (len(transits)), self.network.snapshots[-1] + ) + # for stores, exclude emob and dsm because of their special + # constraints + sto = self.network.stores[ + ~self.network.stores.carrier.isin( + ["battery_storage", "battery storage", "dsm"] + ) + ] + + # save state of charge of storage units and stores at those + # transition snapshots + self.conduct_dispatch_disaggregation = pd.DataFrame( + columns=self.network.storage_units.index.append(sto.index), + index=transits, + ) + for storage in self.network.storage_units.index: + self.conduct_dispatch_disaggregation[ + storage + ] = self.network.storage_units_t.state_of_charge[storage] + for store in sto.index: + self.conduct_dispatch_disaggregation[ + store + ] = self.network.stores_t.e[store] + + extra_func = self.args["extra_functionality"] + self.args["extra_functionality"] = {} + + load_shedding = self.args["load_shedding"] + if not load_shedding: + self.args["load_shedding"] = True + self.load_shedding(temporal_disaggregation=True) + + iterate_lopf( + self, + Constraints( + self.args, self.conduct_dispatch_disaggregation + ).functionality, + method=self.args["method"], + ) + + # switch to temporally fully resolved network as standard network, + # temporally reduced network is stored in network_tsa + network1 = self.network.copy() + self.network = self.network_tsa.copy() + self.network_tsa = network1.copy() + network1 = 0 + + # keep original settings + + if self.args["temporal_disaggregation"]["no_slices"]: + self.args["extra_functionality"] = extra_func + self.args["load_shedding"] = load_shedding + + self.network.lines["s_nom_extendable"] = self.network_tsa.lines[ + "s_nom_extendable" + ] + self.network.links["p_nom_extendable"] = self.network_tsa.links[ + "p_nom_extendable" + ] + self.network.transformers.s_nom_extendable = ( + self.network_tsa.transformers.s_nom_extendable + ) + self.network.storage_units[ + "p_nom_extendable" + ] = self.network_tsa.storage_units["p_nom_extendable"] + self.network.stores["e_nom_extendable"] = self.network_tsa.stores[ + "e_nom_extendable" + ] + self.network.storage_units.cyclic_state_of_charge = ( + self.network_tsa.storage_units.cyclic_state_of_charge + ) + self.network.stores.e_cyclic = self.network_tsa.stores.e_cyclic + + if not self.args["csv_export"]: + path = self.args["csv_export"] + self.export_to_csv(path) + self.export_to_csv(path + "/temporal_disaggregaton") + + y = time.time() + z = (y - x) / 60 + logger.info("Time for LOPF [min]: {}".format(round(z, 2))) + + +def import_gen_from_links(network, drop_small_capacities=True): + """ + create gas generators from links in order to not lose them when + dropping non-electric carriers + """ - pf_post_lopf(self) + if drop_small_capacities: + # Discard all generators < 1kW + discard_gen = network.links[network.links["p_nom"] <= 0.001].index + network.links.drop(discard_gen, inplace=True) + for df in network.links_t: + if not network.links_t[df].empty: + network.links_t[df].drop( + columns=discard_gen.values, inplace=True, errors="ignore" + ) + + gas_to_add = network.links[ + network.links.carrier.isin( + [ + "central_gas_CHP", + "OCGT", + "H2_to_power", + "industrial_gas_CHP", + ] + ) + ].copy() + + # Drop generators from the links table + network.links.drop(gas_to_add.index, inplace=True) + + gas_to_add.rename(columns={"bus1": "bus"}, inplace=True) + + # Create generators' names like in network.generators + gas_to_add["Generator"] = ( + gas_to_add["bus"] + " " + gas_to_add.index + gas_to_add["carrier"] + ) + gas_to_add_orig = gas_to_add.copy() + gas_to_add.set_index("Generator", drop=True, inplace=True) + gas_to_add = gas_to_add[ + gas_to_add.columns[gas_to_add.columns.isin(network.generators.columns)] + ] + + network.import_components_from_dataframe(gas_to_add, "Generator") + + # Dealing with generators_t + columns_new = network.links_t.p1.columns[ + network.links_t.p1.columns.isin(gas_to_add_orig.index) + ] + + new_gen_t = network.links_t.p1[columns_new] * -1 + new_gen_t.rename(columns=gas_to_add_orig["Generator"], inplace=True) + network.generators_t.p = network.generators_t.p.join(new_gen_t) + + # Drop generators from the links_t table + for df in network.links_t: + if not network.links_t[df].empty: + network.links_t[df].drop( + columns=gas_to_add_orig.index, + inplace=True, + errors="ignore", + ) + + # Group generators per bus if needed + if not ( + network.generators.groupby(["bus", "carrier"]).p_nom.count() == 1 + ).all(): + network.generators["weight"] = network.generators.p_nom + df, df_t = aggregategenerators( + network, + busmap=pd.Series( + index=network.buses.index, data=network.buses.index + ), + custom_strategies=strategies_generators(), + ) + + # Keep control arguments from generators + control = network.generators.groupby( + ["bus", "carrier"] + ).control.first() + control.index = ( + control.index.get_level_values(0) + + " " + + control.index.get_level_values(1) + ) + df.control = control + + # Drop non-aggregated generators + network.mremove("Generator", network.generators.index) + + # Insert aggregated generators and time series + network.import_components_from_dataframe(df, "Generator") + + for attr, data in df_t.items(): + if not data.empty: + network.import_series_from_dataframe(data, "Generator", attr) + return -def pf_post_lopf(etrago, calc_losses = True): +def run_pf_post_lopf(self): """ - Function that prepares and runs non-linar load flow using PyPSA pf. + Function that runs pf_post_lopf according to arguments. + + Returns + ------- + None. + + """ + + if self.args["pf_post_lopf"]["active"]: + pf_post_lopf(self) +def pf_post_lopf(etrago, calc_losses=False): + """ + Function that prepares and runs non-linar load flow using PyPSA pf. If crossborder lines are DC-links, pf is only applied on german network. Crossborder flows are still considerd due to the active behavior of links. - To return a network containing the whole grid, the optimised solution of the - foreign components can be added afterwards. + To return a network containing the whole grid, the optimised solution of + the foreign components can be added afterwards. Parameters ---------- - etrago : :class:`etrago.Etrago - Transmission grid object + etrago : etrago object + eTraGo containing all network information and a PyPSA network. add_foreign_lopf: boolean Choose if foreign results of lopf should be added to the network when - foreign lines are DC + foreign lines are DC. q_allocation: str Choose allocation of reactive power. Possible settings are listed in distribute_q function. @@ -298,78 +653,112 @@ def pf_post_lopf(etrago, calc_losses = True): ------- """ + def drop_foreign_components(network): """ Function that drops foreign components which are only connected via - DC-links and saves their optimization results in pandas.DataFrame - + DC-links and saves their optimization results in pd.DataFrame. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- None. """ + + # Create series for constant loads + constant_loads = network.loads[network.loads.p_set != 0]["p_set"] + for load in constant_loads.index: + network.loads_t.p_set[load] = constant_loads[load] + network.loads.p_set = 0 + n_bus = pd.Series(index=network.sub_networks.index) - for i in range(0, len(network.sub_networks.index)-1): - n_bus[i] = len(network.buses.index[ - network.buses.sub_network.astype(int) == i]) + for i in network.sub_networks.index: + n_bus[i] = len(network.buses.index[network.buses.sub_network == i]) sub_network_DE = n_bus.index[n_bus == n_bus.max()] - foreign_bus = network.buses[network.buses.sub_network != - sub_network_DE.values[0]] + foreign_bus = network.buses[ + (network.buses.sub_network != sub_network_DE.values[0]) + & (network.buses.country != "DE") + ] foreign_comp = { - 'Bus': network.buses[ - network.buses.sub_network != sub_network_DE.values[0]], - 'Generator': network.generators[ - network.generators.bus.isin(foreign_bus.index)], - 'Load': network.loads[ - network.loads.bus.isin(foreign_bus.index)], - 'Transformer': network.transformers[ - network.transformers.bus0.isin(foreign_bus.index)], - 'StorageUnit': network.storage_units[ - network.storage_units.bus.isin(foreign_bus.index)]} + "Bus": network.buses[network.buses.index.isin(foreign_bus.index)], + "Generator": network.generators[ + network.generators.bus.isin(foreign_bus.index) + ], + "Load": network.loads[network.loads.bus.isin(foreign_bus.index)], + "Transformer": network.transformers[ + network.transformers.bus0.isin(foreign_bus.index) + ], + "StorageUnit": network.storage_units[ + network.storage_units.bus.isin(foreign_bus.index) + ], + "Store": network.stores[ + network.stores.bus.isin(foreign_bus.index) + ], + } foreign_series = { - 'Bus': network.buses_t.copy(), - 'Generator': network.generators_t.copy(), - 'Load': network.loads_t.copy(), - 'Transformer': network.transformers_t.copy(), - 'StorageUnit': network.storage_units_t.copy()} + "Bus": network.buses_t.copy(), + "Generator": network.generators_t.copy(), + "Load": network.loads_t.copy(), + "Transformer": network.transformers_t.copy(), + "StorageUnit": network.storage_units_t.copy(), + "Store": network.stores_t.copy(), + } for comp in sorted(foreign_series): attr = sorted(foreign_series[comp]) for a in attr: if not foreign_series[comp][a].empty: - if a != 'p_max_pu': - foreign_series[comp][a] = foreign_series[comp][a][ - foreign_comp[comp].index] + if a != "p_max_pu": + if a in ["q_set", "e_max_pu", "e_min_pu"]: + g_in_q_set = foreign_comp[comp][ + foreign_comp[comp].index.isin( + foreign_series[comp][a].columns + ) + ] + foreign_series[comp][a] = foreign_series[comp][a][ + g_in_q_set.index + ] + else: + foreign_series[comp][a] = foreign_series[comp][a][ + foreign_comp[comp].index + ] else: - foreign_series[comp][a] = \ - foreign_series[comp][a][ - foreign_comp[comp][ - foreign_comp[comp].index.isin( - network.generators_t.p_max_pu.columns) - ].index] - - # Drop compoenents + foreign_series[comp][a] = foreign_series[comp][a][ + foreign_comp[comp][ + foreign_comp[comp].index.isin( + network.generators_t.p_max_pu.columns + ) + ].index + ] + + # Drop components network.buses = network.buses.drop(foreign_bus.index) network.generators = network.generators[ - network.generators.bus.isin(network.buses.index)] + network.generators.bus.isin(network.buses.index) + ] network.loads = network.loads[ - network.loads.bus.isin(network.buses.index)] + network.loads.bus.isin(network.buses.index) + ] network.transformers = network.transformers[ - network.transformers.bus0.isin(network.buses.index)] + network.transformers.bus0.isin(network.buses.index) + ] network.storage_units = network.storage_units[ - network.storage_units.bus.isin(network.buses.index)] + network.storage_units.bus.isin(network.buses.index) + ] + network.stores = network.stores[ + network.stores.bus.isin(network.buses.index) + ] return foreign_bus, foreign_comp, foreign_series @@ -379,138 +768,242 @@ def drop_foreign_components(network): network.lines.s_nom = network.lines.s_nom_opt - # For the PF, set the P to the optimised P + # generators modeled as links are imported to the generators table + import_gen_from_links(network) + + if args["spatial_disaggregation"]: + import_gen_from_links( + etrago.disaggregated_network, drop_small_capacities=False + ) + + # For the PF, set the P to be the optimised P network.generators_t.p_set = network.generators_t.p_set.reindex( - columns=network.generators.index) + columns=network.generators.index + ) network.generators_t.p_set = network.generators_t.p - network.storage_units_t.p_set = network.storage_units_t.p_set\ - .reindex(columns=network.storage_units.index) + network.storage_units_t.p_set = network.storage_units_t.p_set.reindex( + columns=network.storage_units.index + ) network.storage_units_t.p_set = network.storage_units_t.p + network.stores_t.p_set = network.stores_t.p_set.reindex( + columns=network.stores.index + ) + network.stores_t.p_set = network.stores_t.p + network.links_t.p_set = network.links_t.p_set.reindex( - columns=network.links.index) + columns=network.links.index + ) network.links_t.p_set = network.links_t.p0 - # if foreign lines are DC, execute pf only on sub_network in Germany - if (args['foreign_lines']['carrier'] == 'DC')\ - or ((args['scn_extension'] != None) - and ('BE_NO_NEP 2035' in args['scn_extension'])): - foreign_bus, foreign_comp, foreign_series = \ - drop_foreign_components(network) + network.determine_network_topology() - # Set slack bus - network = set_slack(network) + # if foreign lines are DC, execute pf only on sub_network in Germany + if (args["foreign_lines"]["carrier"] == "DC") or ( + (args["scn_extension"] is not None) + and ("BE_NO_NEP 2035" in args["scn_extension"]) + ): + foreign_bus, foreign_comp, foreign_series = drop_foreign_components( + network + ) + + # Find out the name of the main subnetwork + main_subnet = str(network.buses.sub_network.value_counts().argmax()) + + # Delete very small p_set and q_set values to avoid problems when solving + network.generators_t["p_set"][ + np.abs(network.generators_t["p_set"]) < 0.001 + ] = 0 + network.generators_t["q_set"][ + np.abs(network.generators_t["q_set"]) < 0.001 + ] = 0 + network.loads_t["p_set"][np.abs(network.loads_t["p_set"]) < 0.001] = 0 + network.loads_t["q_set"][np.abs(network.loads_t["q_set"]) < 0.001] = 0 + network.storage_units_t["p_set"][ + np.abs(network.storage_units_t["p_set"]) < 0.001 + ] = 0 + network.storage_units_t["q_set"][ + np.abs(network.storage_units_t["p_set"]) < 0.001 + ] = 0 # execute non-linear pf - pf_solution = network.pf(network.snapshots, use_seed=True) - - # if selected, copy lopf results of neighboring countries to network - if ((args['foreign_lines']['carrier'] == 'DC') - or ((args['scn_extension'] != None) - and ('BE_NO_NEP 2035' in args['scn_extension'])) - ) and etrago.args['pf_post_lopf']['add_foreign_lopf']: - for comp in sorted(foreign_series): - network.import_components_from_dataframe(foreign_comp[comp], comp) - - for attr in sorted(foreign_series[comp]): - network.import_series_from_dataframe(foreign_series - [comp][attr], comp, attr) - - pf_solve = pd.DataFrame(index=pf_solution['converged'].index) - pf_solve['converged'] = pf_solution['converged'].values - pf_solve['error'] = pf_solution['error'].values - pf_solve['n_iter'] = pf_solution['n_iter'].values + pf_solution = sub_network_pf( + sub_network=network.sub_networks["obj"][main_subnet], + snapshots=network.snapshots, + use_seed=True, + distribute_slack=True, + ) + + pf_solve = pd.DataFrame(index=pf_solution[0].index) + pf_solve["converged"] = pf_solution[2].values + pf_solve["error"] = pf_solution[1].values + pf_solve["n_iter"] = pf_solution[0].values if not pf_solve[~pf_solve.converged].count().max() == 0: - logger.warning("PF of %d snapshots not converged.", - pf_solve[~pf_solve.converged].count().max()) + logger.warning( + "PF of %d snapshots not converged.", + pf_solve[~pf_solve.converged].count().max(), + ) if calc_losses: - calc_line_losses(network) + calc_line_losses(network, pf_solve["converged"]) - network = distribute_q(network, - etrago.args['pf_post_lopf']['q_allocation']) + network = distribute_q( + network, etrago.args["pf_post_lopf"]["q_allocation"] + ) y = time.time() z = (y - x) / 60 print("Time for PF [min]:", round(z, 2)) - if args['csv_export'] != False: - path = args['csv_export'] + '/pf_post_lopf' + # if selected, copy lopf results of neighboring countries to network + if ( + (args["foreign_lines"]["carrier"] == "DC") + or ( + (args["scn_extension"] is not None) + and ("BE_NO_NEP 2035" in args["scn_extension"]) + ) + ) and etrago.args["pf_post_lopf"]["add_foreign_lopf"]: + for comp in sorted(foreign_series): + network.import_components_from_dataframe(foreign_comp[comp], comp) + + for attr in sorted(foreign_series[comp]): + network.import_series_from_dataframe( + foreign_series[comp][attr], comp, attr + ) + + if args["csv_export"]: + path = args["csv_export"] + "/pf_post_lopf" etrago.export_to_csv(path) - pf_solve.to_csv(os.path.join(path, 'pf_solution.csv'), - index=True) + pf_solve.to_csv(os.path.join(path, "pf_solution.csv"), index=True) - return network + if args["spatial_disaggregation"]: + etrago.disaggregated_network.export_to_csv_folder( + path + "/disaggregated_network" + ) + return network -def distribute_q(network, allocation='p_nom'): - """ Function that distributes reactive power at bus to all installed +def distribute_q(network, allocation="p_nom"): + """ + Function that distributes reactive power at bus to all installed generators and storages. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. allocation: str Choose key to distribute reactive power: 'p_nom' to dirstribute via p_nom - 'p' to distribute via p_set + 'p' to distribute via p_set. Returns ------- - + None. """ + + ac_bus = network.buses[network.buses.carrier == "AC"] + + gen_elec = network.generators[ + (network.generators.bus.isin(ac_bus.index)) + & (network.generators.carrier != "load shedding") + ].carrier.unique() + network.allocation = allocation - if allocation == 'p': - p_sum = network.generators_t['p'].\ - groupby(network.generators.bus, axis=1).sum().\ - add(network.storage_units_t['p'].abs().groupby( - network.storage_units.bus, axis=1).sum(), fill_value=0) - q_sum = network.generators_t['q'].\ - groupby(network.generators.bus, axis=1).sum() - - q_distributed = network.generators_t.p / \ - p_sum[network.generators.bus.sort_index()].values * \ - q_sum[network.generators.bus.sort_index()].values - - q_storages = network.storage_units_t.p / \ - p_sum[network.storage_units.bus.sort_index()].values *\ - q_sum[network.storage_units.bus.sort_index()].values - - if allocation == 'p_nom': - - q_bus = network.generators_t['q'].\ - groupby(network.generators.bus, axis=1).sum().add( + if allocation == "p": + if (network.buses.carrier == "AC").all(): + p_sum = ( + network.generators_t["p"] + .groupby(network.generators.bus, axis=1) + .sum() + .add( + network.storage_units_t["p"] + .abs() + .groupby(network.storage_units.bus, axis=1) + .sum(), + fill_value=0, + ) + ) + q_sum = ( + network.generators_t["q"] + .groupby(network.generators.bus, axis=1) + .sum() + ) + + q_distributed = ( + network.generators_t.p + / p_sum[network.generators.bus.sort_index()].values + * q_sum[network.generators.bus.sort_index()].values + ) + + q_storages = ( + network.storage_units_t.p + / p_sum[network.storage_units.bus.sort_index()].values + * q_sum[network.storage_units.bus.sort_index()].values + ) + else: + print( + """WARNING: Distribution of reactive power based on active + power is currently outdated for sector coupled models. This + process will continue with the option allocation = 'p_nom'""" + ) + allocation = "p_nom" + + if allocation == "p_nom": + q_bus = ( + network.generators_t["q"] + .groupby(network.generators.bus, axis=1) + .sum() + .add( network.storage_units_t.q.groupby( - network.storage_units.bus, axis=1).sum(), fill_value=0) - - p_nom_dist = network.generators.p_nom_opt.sort_index() - p_nom_dist[p_nom_dist.index.isin(network.generators.index - [network.generators.carrier == - 'load shedding'])] = 0 - - q_distributed = q_bus[ - network.generators.bus].multiply(p_nom_dist.values) /\ - (network.generators.p_nom_opt[ - network.generators.carrier != 'load shedding'] - .groupby(network.generators.bus).sum().add( - network.storage_units.p_nom_opt - .groupby(network.storage_units.bus).sum(), fill_value=0))[ - network.generators.bus.sort_index()].values - - q_distributed.columns = network.generators.index - - q_storages = q_bus[network.storage_units.bus]\ - .multiply(network.storage_units.p_nom_opt.values) /\ - ((network.generators.p_nom_opt[ - network.generators.carrier != 'load shedding'] - .groupby(network.generators.bus).sum() - .add(network.storage_units.p_nom_opt - .groupby(network.storage_units.bus).sum(), fill_value=0))[ - network.storage_units.bus].values) + network.storage_units.bus, axis=1 + ).sum(), + fill_value=0, + ) + ) + + total_q1 = q_bus.sum().sum() + ac_bus = network.buses[network.buses.carrier == "AC"] + + gen_elec = network.generators[ + (network.generators.bus.isin(ac_bus.index)) + & (network.generators.carrier != "load shedding") + & (network.generators.p_nom > 0) + ].sort_index() + + q_distributed = q_bus[gen_elec.bus].multiply(gen_elec.p_nom.values) / ( + ( + gen_elec.p_nom.groupby(network.generators.bus) + .sum() + .reindex(network.generators.bus.unique(), fill_value=0) + .add( + network.storage_units.p_nom_opt.groupby( + network.storage_units.bus + ).sum(), + fill_value=0, + ) + )[gen_elec.bus.sort_index()].values + ) + + q_distributed.columns = gen_elec.index + + q_storages = q_bus[network.storage_units.bus].multiply( + network.storage_units.p_nom_opt.values + ) / ( + ( + gen_elec.p_nom.groupby(network.generators.bus) + .sum() + .add( + network.storage_units.p_nom_opt.groupby( + network.storage_units.bus + ).sum(), + fill_value=0, + ) + )[network.storage_units.bus].values + ) q_storages.columns = network.storage_units.index @@ -521,37 +1014,49 @@ def distribute_q(network, allocation='p_nom'): network.generators_t.q = q_distributed network.storage_units_t.q = q_storages + total_q2 = q_distributed.sum().sum() + q_storages.sum().sum() + print(f"Error in q distribution={(total_q2 - total_q1)/total_q1}%") + return network -def calc_line_losses(network): - """ Calculate losses per line with PF result data +def calc_line_losses(network, converged): + """ + Calculate losses per line with PF result data. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - s0 : series - apparent power of line - i0 : series - current of line + network : pypsa.Network object + Container for all network components. + converged : pd.Series + List of snapshots with their status (converged or not). + + Returns ------- + None. """ - # Line losses # calculate apparent power S = sqrt(p² + q²) [in MW] - s0_lines = ((network.lines_t.p0**2 + network.lines_t.q0**2). - apply(np.sqrt)) + s0_lines = (network.lines_t.p0**2 + network.lines_t.q0**2).apply( + np.sqrt + ) + # in case some snapshots did not converge, discard them from the + # calculation + s0_lines.loc[converged[converged is False].index, :] = np.nan # calculate current I = S / U [in A] - i0_lines = np.multiply(s0_lines, 1000000) / \ - np.multiply(network.lines.v_nom, 1000) + i0_lines = np.multiply(s0_lines, 1000000) / np.multiply( + network.lines.v_nom, 1000 + ) # calculate losses per line and timestep network.\ # lines_t.line_losses = I² * R [in MW] - network.lines_t.losses = np.divide(i0_lines**2 * network.lines.r, 1000000) + network.lines_t.losses = np.divide( + i0_lines**2 * network.lines.r, 1000000 + ) # calculate total losses per line [in MW] network.lines = network.lines.assign( - losses=np.sum(network.lines_t.losses).values) + losses=np.sum(network.lines_t.losses).values + ) # Transformer losses # https://books.google.de/books?id=0glcCgAAQBAJ&pg=PA151&lpg=PA151&dq= @@ -562,71 +1067,91 @@ def calc_line_losses(network): # Crastan, Elektrische Energieversorgung, p.151 # trafo 1000 MVA: 99.8 % network.transformers = network.transformers.assign( - losses=np.multiply(network.transformers.s_nom, (1 - 0.998)).values) + losses=np.multiply(network.transformers.s_nom, (1 - 0.998)).values + ) + + main_subnet = str(network.buses.sub_network.value_counts().argmax()) + price_per_bus = network.buses_t.marginal_price[ + network.buses.sub_network[ + network.buses.sub_network == main_subnet + ].index + ] # calculate total losses (possibly enhance with adding these values # to network container) losses_total = sum(network.lines.losses) + sum(network.transformers.losses) print("Total lines losses for all snapshots [MW]:", round(losses_total, 2)) - losses_costs = losses_total * np.average(network.buses_t.marginal_price) + losses_costs = losses_total * np.average(price_per_bus) print("Total costs for these losses [EUR]:", round(losses_costs, 2)) - - return + if (~converged).sum() > 0: + print( + f"Note: {(~converged).sum()} snapshot(s) was/were excluded " + + "because the PF did not converge" + ) def set_slack(network): - """ - Function that chosses the bus with the maximum installed power as slack + Function that chosses the bus with the maximum installed power as slack. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA - - + network : pypsa.Network object + Container for all network components. """ - old_slack = network.generators.index[network. - generators.control == 'Slack'][0] + old_slack = network.generators.index[ + network.generators.control == "Slack" + ][0] # check if old slack was PV or PQ control: - if network.generators.p_nom[old_slack] > 50 and network.generators.\ - carrier[old_slack] in ('solar', 'wind'): - old_control = 'PQ' - elif network.generators.p_nom[old_slack] > 50 and network.generators.\ - carrier[old_slack] not in ('solar', 'wind'): - old_control = 'PV' + if network.generators.p_nom[old_slack] > 50 and network.generators.carrier[ + old_slack + ] in ("solar", "wind"): + old_control = "PQ" + elif network.generators.p_nom[ + old_slack + ] > 50 and network.generators.carrier[old_slack] not in ("solar", "wind"): + old_control = "PV" elif network.generators.p_nom[old_slack] < 50: - old_control = 'PQ' + old_control = "PQ" old_gens = network.generators gens_summed = network.generators_t.p.sum() - old_gens['p_summed'] = gens_summed - max_gen_buses_index = old_gens.groupby(['bus']).agg( - {'p_summed': np.sum}).p_summed.sort_values().index + old_gens["p_summed"] = gens_summed + max_gen_buses_index = ( + old_gens.groupby(["bus"]) + .agg({"p_summed": np.sum}) + .p_summed.sort_values() + .index + ) for bus_iter in range(1, len(max_gen_buses_index) - 1): - if old_gens[(network. - generators['bus'] == max_gen_buses_index[-bus_iter]) & - (network.generators['control'] != 'PQ')].empty: + if old_gens[ + (network.generators["bus"] == max_gen_buses_index[-bus_iter]) + & (network.generators["control"] != "PQ") + ].empty: continue else: new_slack_bus = max_gen_buses_index[-bus_iter] break - network.generators = network.generators.drop('p_summed', 1) - new_slack_gen = network.generators.\ - p_nom[(network.generators['bus'] == new_slack_bus) & ( - network.generators['control'] == 'PV')].sort_values().index[-1] - - network.generators.at[old_slack, 'control'] = old_control - network.generators.at[new_slack_gen, 'control'] = 'Slack' + network.generators = network.generators.drop(columns=["p_summed"]) + new_slack_gen = ( + network.generators.p_nom[ + (network.generators["bus"] == new_slack_bus) + & (network.generators["control"] == "PV") + ] + .sort_values() + .index[-1] + ) + network.generators.at[old_slack, "control"] = old_control + network.generators.at[new_slack_gen, "control"] = "Slack" - return network \ No newline at end of file + return network diff --git a/etrago/tools/extendable.py b/etrago/tools/extendable.py index b8c080484..1139b5849 100644 --- a/etrago/tools/extendable.py +++ b/etrago/tools/extendable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -19,329 +19,661 @@ # File description """ -Extendable.py defines function to set PyPSA-components extendable. +Extendable.py defines function to set PyPSA components extendable. """ -from etrago.tools.utilities import ( - set_line_costs, - set_trafo_costs, - convert_capital_costs, - find_snapshots, - buses_by_country) - -from etrago.cluster.snapshot import snapshot_clustering +from math import sqrt +import time import numpy as np +import pandas as pd -import time - -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +from etrago.cluster.snapshot import snapshot_clustering +from etrago.tools.utilities import convert_capital_costs, find_snapshots + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" - - -def extendable(self, line_max): - +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, + KathiEsterl, CarlosEpia""" + + +def extendable( + self, + grid_max_D=None, + grid_max_abs_D={ + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + }, + grid_max_foreign=4, + grid_max_abs_foreign=None, +): """ - Function that sets selected components extendable - - 'network' for all lines, links and transformers - 'german_network' for all lines, links and transformers located in Germany - 'foreign_network' for all foreign lines, links and transformers - 'transformers' for all transformers - 'storages' for extendable storages - 'overlay_network' for lines, links and trafos in extension scenerio(s) + Function that sets selected components extendable. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA - args : dict - Arguments set in appl.py - + grid_max_D : int, optional + Upper bounds for electrical grid expansion relative to existing + capacity. The default is None. + grid_max_abs_D : dict, optional + Absolute upper bounds for electrical grid expansion in Germany. + grid_max_foreign : int, optional + Upper bounds for expansion of electrical foreign lines relative to the + existing capacity. The default is 4. + grid_max_abs_foreign : dict, optional + Absolute upper bounds for expansion of foreign electrical grid. + The default is None. Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + None. + """ - network = self.network - if 'network' in self.args['extendable']: + network = self.network + extendable_settings = self.args["extendable"] + + if "as_in_db" not in extendable_settings["extendable_components"]: + network.lines.s_nom_extendable = False + network.transformers.s_nom_extendable = False + network.links.p_nom_extendable = False + network.storage_units.p_nom_extendable = False + network.stores.e_nom_extendable = False + network.generators.p_nom_extendable = False + + if "H2_feedin" not in extendable_settings["extendable_components"]: + network.mremove( + "Link", network.links[network.links.carrier == "H2_feedin"].index + ) + + if "network" in extendable_settings["extendable_components"]: network.lines.s_nom_extendable = True network.lines.s_nom_min = network.lines.s_nom - if not line_max == None: - network.lines.s_nom_max = line_max * network.lines.s_nom - - else: - network.lines.s_nom_max = float("inf") - if not network.transformers.empty: network.transformers.s_nom_extendable = True network.transformers.s_nom_min = network.transformers.s_nom - - if not line_max == None: - network.transformers.s_nom_max =\ - line_max * network.transformers.s_nom - - else: - network.transformers.s_nom_max = float("inf") + network.transformers.s_nom_max = float("inf") if not network.links.empty: - network.links.p_nom_extendable = True - network.links.p_nom_min = network.links.p_nom - network.links.p_nom_max = float("inf") - if not line_max == None: - network.links.p_nom_max =\ - line_max * network.links.p_nom - - else: - network.links.p_nom_max = float("inf") - - network = set_line_costs(network) - network = set_trafo_costs(network) - - if 'german_network' in self.args['extendable']: - buses = network.buses[~network.buses.index.isin( - buses_by_country(network).index)] - network.lines.loc[(network.lines.bus0.isin(buses.index)) & - (network.lines.bus1.isin(buses.index)), - 's_nom_extendable'] = True - network.lines.loc[(network.lines.bus0.isin(buses.index)) & - (network.lines.bus1.isin(buses.index)), - 's_nom_min'] = network.lines.s_nom - network.lines.loc[(network.lines.bus0.isin(buses.index)) & - (network.lines.bus1.isin(buses.index)), - 's_nom_max'] = float("inf") - - if not line_max == None: - network.lines.loc[ - (network.lines.bus0.isin(buses.index)) & - (network.lines.bus1.isin(buses.index)), - 's_nom_max'] = line_max * network.lines.s_nom - - else: - network.lines.loc[ - (network.lines.bus0.isin(buses.index)) & - (network.lines.bus1.isin(buses.index)), - 's_nom_max'] = float("inf") + network.links.loc[ + network.links.carrier == "DC", "p_nom_extendable" + ] = True + network.links.loc[ + network.links.carrier == "DC", "p_nom_min" + ] = network.links.p_nom + network.links.loc[ + network.links.carrier == "DC", "p_nom_max" + ] = float("inf") + + if "german_network" in extendable_settings["extendable_components"]: + buses = network.buses[network.buses.country == "DE"] + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)), + "s_nom_extendable", + ] = True + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)), + "s_nom_min", + ] = network.lines.s_nom + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)), + "s_nom_max", + ] = float("inf") if not network.transformers.empty: - network.transformers.loc[network.transformers.bus0.isin( - buses.index), 's_nom_extendable'] = True - network.transformers.loc[network.transformers.bus0.isin( - buses.index), 's_nom_min'] = network.transformers.s_nom - - if not line_max == None: - network.transformers.loc[network.transformers.bus0.isin( - buses.index), 's_nom_max'] = \ - line_max * network.transformers.s_nom - - else: - network.transformers.loc[network.transformers.bus0.isin( - buses.index), 's_nom_max'] = float("inf") - - if not network.links.empty: - network.links.loc[(network.links.bus0.isin(buses.index)) & - (network.links.bus1.isin(buses.index)), - 'p_nom_extendable'] = True - network.links.loc[(network.links.bus0.isin(buses.index)) & - (network.links.bus1.isin(buses.index)), - 'p_nom_min'] = network.links.p_nom - - if not line_max == None: - network.links.loc[ - (network.links.bus0.isin(buses.index)) & - (network.links.bus1.isin(buses.index)), - 'p_nom_max'] = line_max * network.links.p_nom - - else: - network.links.loc[ - (network.links.bus0.isin(buses.index)) & - (network.links.bus1.isin(buses.index)), - 'p_nom_max'] = float("inf") - - network = set_line_costs(network) - network = set_trafo_costs(network) - - if 'foreign_network' in self.args['extendable']: - buses = network.buses[network.buses.index.isin( - buses_by_country(network).index)] - network.lines.loc[network.lines.bus0.isin(buses.index) | - network.lines.bus1.isin(buses.index), - 's_nom_extendable'] = True - network.lines.loc[network.lines.bus0.isin(buses.index) | - network.lines.bus1.isin(buses.index), - 's_nom_min'] = network.lines.s_nom - - if not line_max == None: - network.lines.loc[network.lines.bus0.isin(buses.index) | - network.lines.bus1.isin(buses.index), - 's_nom_max'] = line_max * network.lines.s_nom - - else: - network.lines.loc[network.lines.bus0.isin(buses.index) | - network.lines.bus1.isin(buses.index), - 's_nom_max'] = float("inf") + network.transformers.loc[ + network.transformers.bus0.isin(buses.index), "s_nom_extendable" + ] = True + network.transformers.loc[ + network.transformers.bus0.isin(buses.index), "s_nom_min" + ] = network.transformers.s_nom + network.transformers.loc[ + network.transformers.bus0.isin(buses.index), "s_nom_max" + ] = float("inf") + + if not network.links[network.links.carrier == "DC"].empty: + network.links.loc[ + (network.links.bus0.isin(buses.index)) + & (network.links.bus1.isin(buses.index)) + & (network.links.carrier == "DC"), + "p_nom_extendable", + ] = True + network.links.loc[ + (network.links.bus0.isin(buses.index)) + & (network.links.bus1.isin(buses.index)) + & (network.links.carrier == "DC"), + "p_nom_min", + ] = network.links.p_nom + network.links.loc[ + (network.links.bus0.isin(buses.index)) + & (network.links.bus1.isin(buses.index)) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = float("inf") + + if "foreign_network" in extendable_settings["extendable_components"]: + buses = network.buses[network.buses.country != "DE"] + network.lines.loc[ + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), + "s_nom_extendable", + ] = True + network.lines.loc[ + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), + "s_nom_min", + ] = network.lines.s_nom + network.lines.loc[ + network.lines.bus0.isin(buses.index) + | network.lines.bus1.isin(buses.index), + "s_nom_max", + ] = float("inf") if not network.transformers.empty: - network.transformers.loc[network.transformers.bus0.isin( - buses.index) | network.transformers.bus1.isin( - buses.index), 's_nom_extendable'] = True - network.transformers.loc[network.transformers.bus0.isin( - buses.index) | network.transformers.bus1.isin( - buses.index), 's_nom_min'] = network.transformers.s_nom - - if not line_max == None: - network.transformers.loc[network.transformers.bus0.isin( - buses.index) | network.transformers.bus1.isin( - buses.index), 's_nom_max'] = \ - line_max * network.transformers.s_nom - - else: - network.transformers.loc[network.transformers.bus0.isin( - buses.index) | network.transformers.bus1.isin( - buses.index), 's_nom_max'] = float("inf") - - if not network.links.empty: - network.links.loc[(network.links.bus0.isin(buses.index)) | - (network.links.bus1.isin(buses.index)), - 'p_nom_extendable'] = True - network.links.loc[(network.links.bus0.isin(buses.index)) | - (network.links.bus1.isin(buses.index)), - 'p_nom_min'] = network.links.p_nom - - if not line_max == None: - network.links.loc[ - (network.links.bus0.isin(buses.index)) | - (network.links.bus1.isin(buses.index)), - 'p_nom_max'] = line_max * network.links.p_nom - - else: - network.links.loc[ - (network.links.bus0.isin(buses.index)) | - (network.links.bus1.isin(buses.index)), - 'p_nom_max'] = float("inf") - - network = set_line_costs(network) - network = set_trafo_costs(network) - - if 'transformers' in self.args['extendable']: + network.transformers.loc[ + network.transformers.bus0.isin(buses.index) + | network.transformers.bus1.isin(buses.index), + "s_nom_extendable", + ] = True + network.transformers.loc[ + network.transformers.bus0.isin(buses.index) + | network.transformers.bus1.isin(buses.index), + "s_nom_min", + ] = network.transformers.s_nom + network.transformers.loc[ + network.transformers.bus0.isin(buses.index) + | network.transformers.bus1.isin(buses.index), + "s_nom_max", + ] = float("inf") + + if not network.links[network.links.carrier == "DC"].empty: + network.links.loc[ + ( + (network.links.bus0.isin(buses.index)) + | (network.links.bus1.isin(buses.index)) + ) + & (network.links.carrier == "DC"), + "p_nom_extendable", + ] = True + network.links.loc[ + ( + (network.links.bus0.isin(buses.index)) + | (network.links.bus1.isin(buses.index)) + ) + & (network.links.carrier == "DC"), + "p_nom_min", + ] = network.links.p_nom + network.links.loc[ + ( + (network.links.bus0.isin(buses.index)) + | (network.links.bus1.isin(buses.index)) + ) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = float("inf") + + if "transformers" in extendable_settings["extendable_components"]: network.transformers.s_nom_extendable = True network.transformers.s_nom_min = network.transformers.s_nom network.transformers.s_nom_max = float("inf") - network = set_trafo_costs(network) - - if 'storages' in self.args['extendable'] or 'storage' in self.args['extendable']: - if not network.storage_units.carrier[ - network.storage_units.carrier.str.contains( - 'extendable')].empty: - network.storage_units.loc[network.storage_units.carrier.str.\ - contains('extendable'), - 'p_nom_extendable'] = True - - # Split extendable carriers to keep them seperated in clustering - self.network.storage_units.carrier[ - (self.network.storage_units.carrier == 'extendable_storage')& - (self.network.storage_units.max_hours == 6)] =\ - 'extendable_battery_storage' - - self.network.storage_units.carrier[ - (self.network.storage_units.carrier == 'extendable_storage')& - (self.network.storage_units.max_hours == 168)] = \ - 'extendable_hydrogen_storage' - - if 'foreign_storage' in self.args['extendable']: - network.storage_units.p_nom_extendable[(network.storage_units.bus.isin( - network.buses.index[network.buses.country_code != 'DE'])) & ( - network.storage_units.carrier.isin( - ['battery_storage', 'hydrogen_storage']))] = True + if ( + "storages" in extendable_settings["extendable_components"] + or "storage" in extendable_settings["extendable_components"] + or "store" in extendable_settings["extendable_components"] + or "stores" in extendable_settings["extendable_components"] + ): network.storage_units.loc[ - network.storage_units.p_nom_max.isnull(), 'p_nom_max'] = \ - network.storage_units.p_nom - + network.storage_units.carrier == "battery", + "p_nom_extendable", + ] = True + network.storage_units.loc[ + network.storage_units.carrier == "battery", + "p_nom_min", + ] = 0 + network.storage_units.loc[ + network.storage_units.carrier == "battery", + "p_nom_max", + ] = float("inf") network.storage_units.loc[ - (network.storage_units.carrier == 'battery_storage'), - 'capital_cost'] = network.storage_units.loc[ - (network.storage_units.carrier == 'extendable_battery_storage') - & (network.storage_units.max_hours == 6), 'capital_cost'].max() + (network.storage_units.carrier == "battery") + & (network.storage_units.capital_cost == 0), + "capital_cost", + ] = network.storage_units.loc[ + network.storage_units.carrier == "battery", "capital_cost" + ].max() + + ext_stores = [ + "H2_overground", + "H2_underground", + "central_heat_store", + "rural_heat_store", + ] + network.stores.loc[ + network.stores.carrier.isin(ext_stores), + "e_nom_extendable", + ] = True + network.stores.loc[ + network.stores.carrier.isin(ext_stores), + "e_nom_min", + ] = 0 + network.stores.loc[ + network.stores.carrier.isin(ext_stores), + "e_nom_max", + ] = float("inf") + if ( + len( + network.stores.loc[ + (network.stores.carrier.isin(ext_stores)) + & (network.stores.capital_cost == 0) + ] + ) + > 0 + ): + for c in ext_stores: + network.stores.loc[ + (network.stores.carrier == c) + & (network.stores.capital_cost == 0), + "capital_cost", + ] = network.stores.loc[ + (network.stores.carrier == c), "capital_cost" + ].max() + + if "foreign_storage" in extendable_settings["extendable_components"]: + foreign_battery = network.storage_units[ + ( + network.storage_units.bus.isin( + network.buses.index[network.buses.country != "DE"] + ) + ) + & (network.storage_units.carrier == "battery") + ].index + + de_battery = network.storage_units[ + ( + network.storage_units.bus.isin( + network.buses.index[network.buses.country == "DE"] + ) + ) + & (network.storage_units.carrier == "battery") + ].index + + network.storage_units.loc[foreign_battery, "p_nom_extendable"] = True network.storage_units.loc[ - (network.storage_units.carrier == 'hydrogen_storage'), - 'capital_cost'] = network.storage_units.loc[ - (network.storage_units.carrier == 'extendable_hydrogen_storage') & - (network.storage_units.max_hours == 168), 'capital_cost'].max() + foreign_battery, "p_nom_max" + ] = network.storage_units.loc[foreign_battery, "p_nom"] network.storage_units.loc[ - (network.storage_units.carrier == 'battery_storage'), - 'marginal_cost'] = network.storage_units.loc[ - (network.storage_units.carrier == 'extendable_battery_storage') - & (network.storage_units.max_hours == 6), 'marginal_cost'].max() + foreign_battery, "p_nom" + ] = network.storage_units.loc[foreign_battery, "p_nom_min"] network.storage_units.loc[ - (network.storage_units.carrier == 'hydrogen_storage'), - 'marginal_cost'] = network.storage_units.loc[ - (network.storage_units.carrier == 'extendable_hydrogen_storage') & - (network.storage_units.max_hours == 168), 'marginal_cost'].max() + foreign_battery, "capital_cost" + ] = network.storage_units.loc[de_battery, "capital_cost"].max() + network.storage_units.loc[ + foreign_battery, "marginal_cost" + ] = network.storage_units.loc[de_battery, "marginal_cost"].max() # Extension settings for extension-NEP 2035 scenarios - if 'overlay_network' in self.args['extendable']: - for i in range(len(self.args['scn_extension'])): - network.lines.loc[network.lines.scn_name == ( - 'extension_' + self.args['scn_extension'][i] - ), 's_nom_extendable'] = True + if "overlay_network" in extendable_settings["extendable_components"]: + for i in range(len(self.args["scn_extension"])): + network.lines.loc[ + network.lines.scn_name + == ("extension_" + self.args["scn_extension"][i]), + "s_nom_extendable", + ] = True - network.lines.loc[network.lines.scn_name == ( - 'extension_' + self.args['scn_extension'][i] - ), 's_nom_max'] = network.lines.s_nom[network.lines.scn_name == ( - 'extension_' + self.args['scn_extension'][i])] + network.lines.loc[ + network.lines.scn_name + == ("extension_" + self.args["scn_extension"][i]), + "s_nom_max", + ] = network.lines.s_nom[ + network.lines.scn_name + == ("extension_" + self.args["scn_extension"][i]) + ] + + network.links.loc[ + network.links.scn_name + == ("extension_" + self.args["scn_extension"][i]), + "p_nom_extendable", + ] = True + + network.transformers.loc[ + network.transformers.scn_name + == ("extension_" + self.args["scn_extension"][i]), + "s_nom_extendable", + ] = True - network.links.loc[network.links.scn_name == ( - 'extension_' + self.args['scn_extension'][i] - ), 'p_nom_extendable'] = True + network.lines.loc[ + network.lines.scn_name + == ("extension_" + self.args["scn_extension"][i]), + "capital_cost", + ] = network.lines.capital_cost + + # constrain network expansion to maximum + + if grid_max_abs_D is not None: + buses = network.buses[ + (network.buses.country == "DE") & (network.buses.carrier == "AC") + ] + + line_max_abs(network=network, buses=buses, line_max_abs=grid_max_abs_D) + + transformer_max_abs(network=network, buses=buses) + + network.links.loc[ + (network.links.bus0.isin(buses.index)) + & (network.links.bus1.isin(buses.index)) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = grid_max_abs_D["dc"] + + if grid_max_abs_foreign is not None: + foreign_buses = network.buses[ + (network.buses.country != "DE") & (network.buses.carrier == "AC") + ] + + line_max_abs( + network=network, + buses=foreign_buses, + line_max_abs=grid_max_abs_foreign, + ) + + transformer_max_abs(network=network, buses=foreign_buses) + + network.links.loc[ + ( + (network.links.bus0.isin(foreign_buses.index)) + | (network.links.bus1.isin(foreign_buses.index)) + ) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = grid_max_abs_foreign["dc"] + + if grid_max_D is not None: + buses = network.buses[ + (network.buses.country == "DE") & (network.buses.carrier == "AC") + ] + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)), + "s_nom_max", + ] = ( + grid_max_D * network.lines.s_nom + ) + + network.transformers.loc[ + network.transformers.bus0.isin(buses.index), "s_nom_max" + ] = (grid_max_D * network.transformers.s_nom) + + network.links.loc[ + (network.links.bus0.isin(buses.index)) + & (network.links.bus1.isin(buses.index)) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = ( + grid_max_D * network.links.p_nom + ) + + if grid_max_foreign is not None: + foreign_buses = network.buses[ + (network.buses.country != "DE") & (network.buses.carrier == "AC") + ] + + network.lines.loc[ + network.lines.bus0.isin(foreign_buses.index) + | network.lines.bus1.isin(foreign_buses.index), + "s_nom_max", + ] = ( + grid_max_foreign * network.lines.s_nom + ) + + network.links.loc[ + ( + (network.links.bus0.isin(foreign_buses.index)) + | (network.links.bus1.isin(foreign_buses.index)) + ) + & (network.links.carrier == "DC"), + "p_nom_max", + ] = ( + grid_max_foreign * network.links.p_nom + ) + + network.transformers.loc[ + network.transformers.bus0.isin(foreign_buses.index) + | network.transformers.bus1.isin(foreign_buses.index), + "s_nom_max", + ] = ( + grid_max_foreign * network.transformers.s_nom + ) + + +def snommax(i=1020, u=380, wires=4, circuits=4): + """ + Function to calculate limitation for capacity expansion. - network.transformers.loc[network.transformers.scn_name == ( - 'extension_' + self.args['scn_extension'][i] - ), 's_nom_extendable'] = True + Parameters + ---------- + i : int, optional + Current. The default is 1020. + u : int, optional + Voltage level. The default is 380. + wires : int, optional + Number of wires per line. The default is 4. + circuits : int, optional + Number of circuits. The default is 4. - network.lines.loc[network.lines.scn_name == ( - 'extension_' + self.args['scn_extension'][i] - ), 'capital_cost'] = network.lines.capital_cost + Returns + ------- + s_nom_max : float + Limitation for capacity expansion. - network.lines.s_nom_min[network.lines.s_nom_extendable == False] =\ - network.lines.s_nom + """ + s_nom_max = (i * u * sqrt(3) * wires * circuits) / 1000 + return s_nom_max + + +def line_max_abs( + network, + buses, + line_max_abs={ + "380": {"i": 1020, "wires": 4, "circuits": 4}, + "220": {"i": 1020, "wires": 4, "circuits": 4}, + "110": {"i": 1020, "wires": 4, "circuits": 2}, + "dc": 0, + }, +): + """ + Function to calculate limitation for capacity expansion of lines in + network. - network.transformers.s_nom_min[network.transformers.s_nom_extendable == \ - False] = network.transformers.s_nom + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + buses : pypsa.Network buses + Considered buses in network. + line_max_abs : dict, optional + Line parameters considered to calculate maximum capacity. - network.lines.s_nom_max[network.lines.s_nom_extendable == False] =\ - network.lines.s_nom + Returns + ------- + None. - network.transformers.s_nom_max[network.transformers.s_nom_extendable == \ - False] = network.transformers.s_nom + """ + # calculate the cables of the route between two buses + cables = network.lines.groupby(["bus0", "bus1"]).cables.sum() + cables2 = network.lines.groupby(["bus1", "bus0"]).cables.sum() + doubles_idx = cables.index == cables2.index + cables3 = cables[doubles_idx] + cables2[doubles_idx] + cables4 = cables3.swaplevel() + cables[cables3.index] = cables3 + cables[cables4.index] = cables4 + network.lines["total_cables"] = network.lines.apply( + lambda x: cables[(x.bus0, x.bus1)], axis=1 + ) + s_nom_max_110 = snommax( + u=110, + i=line_max_abs["110"]["i"], + wires=line_max_abs["110"]["wires"], + circuits=line_max_abs["110"]["circuits"], + ) * (network.lines["cables"] / network.lines["total_cables"]) + s_nom_max_220 = snommax( + u=220, + i=line_max_abs["220"]["i"], + wires=line_max_abs["220"]["wires"], + circuits=line_max_abs["220"]["circuits"], + ) * (network.lines["cables"] / network.lines["total_cables"]) + s_nom_max_380 = snommax( + u=380, + i=line_max_abs["380"]["i"], + wires=line_max_abs["380"]["wires"], + circuits=line_max_abs["380"]["circuits"], + ) * (network.lines["cables"] / network.lines["total_cables"]) + # set the s_nom_max depending on the voltage level + # and the share of the route + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 110.0) + & (network.lines.s_nom < s_nom_max_110), + "s_nom_max", + ] = s_nom_max_110 + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 110.0) + & (network.lines.s_nom >= s_nom_max_110), + "s_nom_max", + ] = network.lines.s_nom + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 220.0) + & (network.lines.s_nom < s_nom_max_220), + "s_nom_max", + ] = s_nom_max_220 + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 220.0) + & (network.lines.s_nom >= s_nom_max_220), + "s_nom_max", + ] = network.lines.s_nom + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 380.0) + & (network.lines.s_nom < s_nom_max_380), + "s_nom_max", + ] = s_nom_max_380 + + network.lines.loc[ + (network.lines.bus0.isin(buses.index)) + & (network.lines.bus1.isin(buses.index)) + & (network.lines.v_nom == 380.0) + & (network.lines.s_nom >= s_nom_max_380), + "s_nom_max", + ] = network.lines.s_nom + + +def transformer_max_abs(network, buses): + """ + Function to calculate limitation for capacity expansion of transformers in + network. - self.convert_capital_costs() + Parameters + ---------- + network : pypsa.Network object + Container for all network components. + buses : pypsa.Network buses + Considered buses in network. - return network + Returns + ------- + None. + """ -def extension_preselection(etrago, method, days=3): + # To determine the maximum extendable capacity of a transformer, the sum of + # the maximum capacities of the lines connected to it is calculated for + # each of its 2 sides. The smallest one is selected. + smax_bus0 = network.lines.s_nom_max.groupby(network.lines.bus0).sum() + smax_bus1 = network.lines.s_nom_max.groupby(network.lines.bus1).sum() + smax_bus = pd.concat([smax_bus0, smax_bus1], axis=1) + smax_bus.columns = ["s_nom_max_0", "s_nom_max_1"] + smax_bus = smax_bus.fillna(0) + smax_bus["s_nom_max_bus"] = smax_bus.apply( + lambda x: x["s_nom_max_0"] + x["s_nom_max_1"], axis=1 + ) + + pmax_links_bus0 = network.links.p_nom_max.groupby(network.links.bus0).sum() + pmax_links_bus1 = network.links.p_nom_max.groupby(network.links.bus1).sum() + pmax_links_bus = pd.concat([pmax_links_bus0, pmax_links_bus1], axis=1) + pmax_links_bus.columns = ["p_nom_max_0", "p_nom_max_1"] + pmax_links_bus = pmax_links_bus.fillna(0) + pmax_links_bus["p_nom_max_bus"] = pmax_links_bus.apply( + lambda x: x["p_nom_max_0"] + x["p_nom_max_1"], axis=1 + ) + + trafo_smax_0 = network.transformers.bus0.map(smax_bus["s_nom_max_bus"]) + trafo_smax_1 = network.transformers.bus1.map(smax_bus["s_nom_max_bus"]) + trafo_pmax_0 = ( + network.transformers.bus0.map(pmax_links_bus["p_nom_max_bus"]) / 2 + ) + trafo_pmax_1 = ( + network.transformers.bus1.map(pmax_links_bus["p_nom_max_bus"]) / 2 + ) + trafo_smax = pd.concat( + [trafo_smax_0, trafo_smax_1, trafo_pmax_0, trafo_pmax_1], axis=1 + ) + trafo_smax = trafo_smax.fillna(0) + trafo_smax.columns = ["bus0", "bus1", "dcbus0", "dcbus1"] + trafo_smax["s_nom_max"] = trafo_smax[trafo_smax.gt(0)].min(axis=1) + network.transformers.loc[ + network.transformers.bus0.isin(buses.index), "s_nom_max" + ] = trafo_smax["s_nom_max"] + + # Since the previous calculation does not depent on the min_capacity of the + # transformer, there are few cases where the min capacity is greater than + # the calculated maximum. For these cases, max capacity is set to be the + # equal to the min capacity. + network.transformers["s_nom_max"] = network.transformers.apply( + lambda x: x["s_nom_max"] + if float(x["s_nom_max"]) > float(x["s_nom_min"]) + else x["s_nom_min"], + axis=1, + ) + +def extension_preselection(etrago, method, days=3): """ Function that preselects lines which are extendend in snapshots leading to overloading to reduce nubmer of extension variables. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. args : dict - Arguments set in appl.py + Arguments set in appl.py. method: str Choose method of selection: 'extreme_situations' for remarkable timsteps @@ -352,80 +684,85 @@ def extension_preselection(etrago, method, days=3): Returns ------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. """ network = etrago.network args = etrago.args weighting = network.snapshot_weightings - if method == 'extreme_situations': - snapshots = find_snapshots(network, 'residual load') - snapshots = snapshots.append(find_snapshots(network, 'wind_onshore')) - snapshots = snapshots.append(find_snapshots(network, 'solar')) + if method == "extreme_situations": + snapshots = find_snapshots(network, "residual load") + snapshots = snapshots.append(find_snapshots(network, "wind_onshore")) + snapshots = snapshots.append(find_snapshots(network, "solar")) snapshots = snapshots.drop_duplicates() snapshots = snapshots.sort_values() - if method == 'snapshot_clustering': - network_cluster = snapshot_clustering(etrago, how='daily') + if method == "snapshot_clustering": + network_cluster = snapshot_clustering(etrago, how="daily") snapshots = network_cluster.snapshots network.snapshot_weightings = network_cluster.snapshot_weightings # Set all lines and trafos extendable in network - network.lines.loc[:, 's_nom_extendable'] = True - network.lines.loc[:, 's_nom_min'] = network.lines.s_nom - network.lines.loc[:, 's_nom_max'] = np.inf + network.lines.loc[:, "s_nom_extendable"] = True + network.lines.loc[:, "s_nom_min"] = network.lines.s_nom + network.lines.loc[:, "s_nom_max"] = np.inf - network.links.loc[:, 'p_nom_extendable'] = True - network.links.loc[:, 'p_nom_min'] = network.links.p_nom - network.links.loc[:, 'p_nom_max'] = np.inf + network.links.loc[:, "p_nom_extendable"] = True + network.links.loc[:, "p_nom_min"] = network.links.p_nom + network.links.loc[:, "p_nom_max"] = np.inf - network.transformers.loc[:, 's_nom_extendable'] = True - network.transformers.loc[:, 's_nom_min'] = network.transformers.s_nom - network.transformers.loc[:, 's_nom_max'] = np.inf + network.transformers.loc[:, "s_nom_extendable"] = True + network.transformers.loc[:, "s_nom_min"] = network.transformers.s_nom + network.transformers.loc[:, "s_nom_max"] = np.inf - network = set_line_costs(network) - network = set_trafo_costs(network) network = convert_capital_costs(network, 1, 1) - extended_lines = network.lines.index[network.lines.s_nom_opt > - network.lines.s_nom] - extended_links = network.links.index[network.links.p_nom_opt > - network.links.p_nom] + extended_lines = network.lines.index[ + network.lines.s_nom_opt > network.lines.s_nom + ] + extended_links = network.links.index[ + network.links.p_nom_opt > network.links.p_nom + ] x = time.time() for i in range(int(snapshots.value_counts().sum())): if i > 0: - network.lopf(snapshots[i], solver_name=args['solver']) + network.lopf(snapshots[i], solver_name=args["solver"]) extended_lines = extended_lines.append( - network.lines.index[network.lines.s_nom_opt > - network.lines.s_nom]) + network.lines.index[ + network.lines.s_nom_opt > network.lines.s_nom + ] + ) extended_lines = extended_lines.drop_duplicates() extended_links = extended_links.append( - network.links.index[network.links.p_nom_opt > - network.links.p_nom]) + network.links.index[ + network.links.p_nom_opt > network.links.p_nom + ] + ) extended_links = extended_links.drop_duplicates() print("Number of preselected lines: ", len(extended_lines)) - network.lines.loc[~network.lines.index.isin(extended_lines), - 's_nom_extendable'] = False - network.lines.loc[network.lines.s_nom_extendable, 's_nom_min']\ - = network.lines.s_nom - network.lines.loc[network.lines.s_nom_extendable, 's_nom_max']\ - = np.inf - - network.links.loc[~network.links.index.isin(extended_links), - 'p_nom_extendable'] = False - network.links.loc[network.links.p_nom_extendable, 'p_nom_min']\ - = network.links.p_nom - network.links.loc[network.links.p_nom_extendable, 'p_nom_max']\ - = np.inf + network.lines.loc[ + ~network.lines.index.isin(extended_lines), "s_nom_extendable" + ] = False + network.lines.loc[ + network.lines.s_nom_extendable, "s_nom_min" + ] = network.lines.s_nom + network.lines.loc[network.lines.s_nom_extendable, "s_nom_max"] = np.inf + + network.links.loc[ + ~network.links.index.isin(extended_links), "p_nom_extendable" + ] = False + network.links.loc[ + network.links.p_nom_extendable, "p_nom_min" + ] = network.links.p_nom + network.links.loc[network.links.p_nom_extendable, "p_nom_max"] = np.inf network.snapshot_weightings = weighting - network = set_line_costs(network) - network = set_trafo_costs(network) - network = convert_capital_costs(network, args['start_snapshot'], - args['end_snapshot']) + network = convert_capital_costs( + network, args["start_snapshot"], args["end_snapshot"] + ) y = time.time() z1st = (y - x) / 60 @@ -434,14 +771,14 @@ def extension_preselection(etrago, method, days=3): return network -def print_expansion_costs(network): - """ Function that prints network and storage investment costs +def print_expansion_costs(network): + """Function that prints network and storage investment costs. Parameters ---------- - network : :class:`pypsa.Network - Overall container of PyPSA + network : pypsa.Network object + Container for all network components. Returns ------- @@ -455,25 +792,38 @@ def print_expansion_costs(network): ext_trafos = network.transformers[network.transformers.s_nom_extendable] if not ext_storage.empty: - storage_costs = (ext_storage.p_nom_opt*ext_storage.capital_cost).sum() + storage_costs = ( + ext_storage.p_nom_opt * ext_storage.capital_cost + ).sum() if not ext_lines.empty: - network_costs = (( - (ext_lines.s_nom_opt-ext_lines.s_nom)*ext_lines.capital_cost - ).sum() + - (ext_links.p_nom_opt-ext_links.p_nom)*ext_links.capital_cost).sum() + network_costs = ( + ( + (ext_lines.s_nom_opt - ext_lines.s_nom) + * ext_lines.capital_cost + ).sum() + + (ext_links.p_nom_opt - ext_links.p_nom) * ext_links.capital_cost + ).sum() if not ext_trafos.empty: - network_costs = network_costs+( - (ext_trafos.s_nom_opt-ext_trafos.s_nom - )*ext_trafos.capital_cost).sum() + network_costs = ( + network_costs + + ( + (ext_trafos.s_nom_opt - ext_trafos.s_nom) + * ext_trafos.capital_cost + ).sum() + ) if not ext_storage.empty: print( - "Investment costs for all storage units in selected snapshots [EUR]:", - round(storage_costs, 2)) + """Investment costs for all storage units in selected snapshots + [EUR]:""", + round(storage_costs, 2), + ) if not ext_lines.empty: print( - "Investment costs for all lines and transformers in selected snapshots [EUR]:", - round(network_costs, 2)) + """Investment costs for all lines and transformers in selected + snapshots [EUR]:""", + round(network_costs, 2), + ) diff --git a/etrago/tools/io.py b/etrago/tools/io.py index 073dd2490..c87bda98b 100644 --- a/etrago/tools/io.py +++ b/etrago/tools/io.py @@ -24,10 +24,8 @@ Input/output operations between powerflow schema in the oedb and PyPSA. Additionally oedb wrapper classes to instantiate PyPSA network objects. - Attributes ----------- - +----------- packagename: str Package containing orm class definitions temp_ormclass: str @@ -36,112 +34,60 @@ Orm class name of table with carrier id to carrier name datasets Notes ------ +------- A configuration file connecting the chosen optimization method with components to be queried is needed for NetworkScenario class. """ -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, mariusves" +__author__ = "ulfmueller, mariusves, pieterhexen, ClaraBuettner" -import pypsa from importlib import import_module -import pandas as pd -from collections import OrderedDict -import re -import json import os -import numpy as np -if 'READTHEDOCS' not in os.environ: - from geoalchemy2.shape import to_shape - from sqlalchemy.orm.exc import NoResultFound - from sqlalchemy import and_, func, or_ -#from etrago.tools.nep import add_by_scenario, add_series_by_scenario +import numpy as np +import pandas as pd +import pypsa -packagename = 'egoio.db_tables' -temp_ormclass = 'TempResolution' -carr_ormclass = 'Source' +if "READTHEDOCS" not in os.environ: + import logging + from sqlalchemy.orm.exc import NoResultFound + import saio -def load_config_file(filename='config.json'): - dirname = os.path.dirname(__file__) - path = os.path.join(dirname, filename) - return json.load(open(path), object_pairs_hook=OrderedDict) + logger = logging.getLogger(__name__) +carr_ormclass = "Source" -class ScenarioBase(): - """ Base class to address the dynamic provision of orm classes representing - powerflow components from egoio based on a configuration file +class ScenarioBase: + """Base class to address the dynamic provision of orm classes representing + powerflow components from egoio Parameters ---------- - - config : OrderedDict - Dictionary with orm class names that should be accessable via _mapped. session : sqla.orm.session.Session Handles conversations with the database. version : str Version number of data version control in grid schema of the oedb. - prefix : str - Common prefix of component orm classnames in egoio. """ - def __init__( - self, session, method='lopf', configpath='config.json', version=None, - prefix='EgoGridPfHv'): - - global packagename - global temp_ormclass + def __init__(self, engine, session, version=None): global carr_ormclass - schema = 'grid' if version else 'model_draft' - - self.config = load_config_file(configpath)[method] + saio.register_schema("grid", engine) self.session = session self.version = version - self._prefix = prefix - #: module: Providing orm class definitions to oedb - self._pkg = import_module(packagename + '.' + schema) - #: dict: Container for orm classes corresponding to configuration file - self._mapped = {} - - # Populate _mapped with orm classes according to config - for k, v in self.config.items(): - self.map_ormclass(k) - if isinstance(v, dict): - for kk in v.keys(): - self.map_ormclass(kk) - - # map temporal resolution table - self.map_ormclass(temp_ormclass) - - # map carrier id to carrier table - self.map_ormclass(carr_ormclass) - - def map_ormclass(self, name): - """ Populate _mapped attribute with orm class - - Parameters - ---------- - name : str - Component part of orm class name. Concatenated with _prefix. - """ - - try: - self._mapped[name] = getattr(self._pkg, self._prefix + name) - - except AttributeError: - print('Warning: Relation %s does not exist.' % name) class NetworkScenario(ScenarioBase): - """ Adapter class between oedb powerflow data and PyPSA. Provides the + """Adapter class between oedb powerflow data and PyPSA. Provides the method build_network to generate a pypsa.Network. Parameters @@ -159,60 +105,76 @@ class NetworkScenario(ScenarioBase): """ def __init__( - self, session, scn_name='Status Quo', method='lopf', - start_snapshot=1, end_snapshot=20, temp_id=1, **kwargs): - + self, + engine, + session, + scn_name="Status Quo", + start_snapshot=1, + end_snapshot=20, + temp_id=1, + **kwargs, + ): self.scn_name = scn_name - self.method = method self.start_snapshot = start_snapshot self.end_snapshot = end_snapshot self.temp_id = temp_id - super().__init__(session, **kwargs) + super().__init__(engine, session, **kwargs) # network: pypsa.Network self.network = None + saio.register_schema("grid", engine) + self.configure_timeindex() def __repr__(self): - r = ('NetworkScenario: %s' % self.scn_name) + r = "NetworkScenario: %s" % self.scn_name if not self.network: - r += "\nTo create a PyPSA network call .build_network()." + r += """ + \nTo create a PyPSA network call .build_network(). + """ return r def configure_timeindex(self): - """ Construct a DateTimeIndex with the queried temporal resolution, - start- and end_snapshot. """ + """Construct a DateTimeIndex with the queried temporal resolution, + start- and end_snapshot.""" - try: + from saio.grid import egon_etrago_temp_resolution - ormclass = self._mapped['TempResolution'] + try: if self.version: - tr = self.session.query(ormclass).filter( - ormclass.temp_id == self.temp_id).filter( - ormclass.version == self.version).one() + tr = saio.as_pandas( + self.session.query(egon_etrago_temp_resolution) + .filter( + egon_etrago_temp_resolution.version == self.version + ) + .filter( + egon_etrago_temp_resolution.temp_id == self.temp_id + ) + ).squeeze() else: - tr = self.session.query(ormclass).filter( - ormclass.temp_id == self.temp_id).one() + tr = saio.as_pandas( + self.session.query(egon_etrago_temp_resolution).filter( + egon_etrago_temp_resolution.temp_id == self.temp_id + ) + ).squeeze() except (KeyError, NoResultFound): - print('temp_id %s does not exist.' % self.temp_id) + print("temp_id %s does not exist." % self.temp_id) - timeindex = pd.DatetimeIndex(data=pd.date_range( - start=tr.start_time, - periods=tr.timesteps, - freq=tr.resolution)) + timeindex = pd.DatetimeIndex( + data=pd.date_range( + start=tr.start_time, periods=tr.timesteps, freq=tr.resolution + ) + ) - self.timeindex = timeindex[self.start_snapshot - 1: self.end_snapshot] - """ pandas.tseries.index.DateTimeIndex : - Index of snapshots or timesteps. """ + self.timeindex = timeindex[self.start_snapshot - 1 : self.end_snapshot] def id_to_source(self): - - ormclass = self._mapped['Source'] + ormclass = self._mapped["Source"] query = self.session.query(ormclass) if self.version: @@ -222,7 +184,7 @@ def id_to_source(self): return {k.source_id: k.name for k in query.all()} def fetch_by_relname(self, name): - """ Construct DataFrame with component data from filtered table data. + """Construct DataFrame with component data from filtered table data. Parameters ---------- @@ -234,36 +196,47 @@ def fetch_by_relname(self, name): pd.DataFrame Component data. """ + from saio.grid import ( # noqa: F401 + egon_etrago_bus, + egon_etrago_generator, + egon_etrago_line, + egon_etrago_link, + egon_etrago_load, + egon_etrago_storage, + egon_etrago_store, + egon_etrago_transformer, + ) + + index = f"{name.lower()}_id" + + if name == "Transformer": + index = "trafo_id" - ormclass = self._mapped[name] - query = self.session.query(ormclass) - - if name != carr_ormclass: - - query = query.filter( - ormclass.scn_name == self.scn_name) + query = self.session.query( + vars()[f"egon_etrago_{name.lower()}"] + ).filter( + vars()[f"egon_etrago_{name.lower()}"].scn_name == self.scn_name + ) if self.version: - query = query.filter(ormclass.version == self.version) + query = query.filter( + vars()[f"egon_etrago_{name.lower()}"].version == self.version + ) - # TODO: Naming is not consistent. Change in database required. - if name == 'Transformer': - name = 'Trafo' + df = saio.as_pandas(query, crs=4326).set_index(index) - df = pd.read_sql(query.statement, - self.session.bind, - index_col=name.lower() + '_id') - if name == 'Link': - df['bus0'] = df.bus0.astype(int) - df['bus1'] = df.bus1.astype(int) + if name == "Transformer": + df.tap_side = 0 + df.tap_position = 0 + df = df.drop_duplicates() - if 'source' in df: + if "source" in df: df.source = df.source.map(self.id_to_source()) return df - def series_fetch_by_relname(self, name, column): - """ Construct DataFrame with component timeseries data from filtered + def series_fetch_by_relname(self, network, name, pypsa_name): + """Construct DataFrame with component timeseries data from filtered table data. Parameters @@ -278,127 +251,139 @@ def series_fetch_by_relname(self, name, column): pd.DataFrame Component data. """ + from saio.grid import ( # noqa: F401 + egon_etrago_bus_timeseries, + egon_etrago_generator_timeseries, + egon_etrago_line_timeseries, + egon_etrago_link_timeseries, + egon_etrago_load_timeseries, + egon_etrago_storage_timeseries, + egon_etrago_store_timeseries, + egon_etrago_transformer_timeseries, + ) + + # Select index column + if name == "Transformer": + index_col = "trafo_id" - ormclass = self._mapped[name] + else: + index_col = f"{name.lower()}_id" - # TODO: This is implemented in a not very robust way. - id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id' - id_column = id_column.lower() + # Select columns with time series data + query_columns = self.session.query( + vars()[f"egon_etrago_{name.lower()}_timeseries"] + ).limit(1) - query = self.session.query( - getattr(ormclass, id_column), - getattr(ormclass, column)[self.start_snapshot: self.end_snapshot]. - label(column)).filter(and_( - ormclass.scn_name == self.scn_name, - ormclass.temp_id == self.temp_id)) + key_columns = ["scn_name", index_col, "temp_id"] if self.version: - query = query.filter(ormclass.version == self.version) - - df = pd.io.sql.read_sql(query.statement, - self.session.bind, - columns=[column], - index_col=id_column) - - df.index = df.index.astype(str) - - # change of format to fit pypsa - df = df[column].apply(pd.Series).transpose() - - try: - assert not df.empty - df.index = self.timeindex - except AssertionError: - print("No data for %s in column %s." % (name, column)) + key_columns.append(["version"]) + + columns = saio.as_pandas(query_columns).columns.drop( + key_columns, errors="ignore" + ) + + # Query and import time series data + for col in columns: + query = self.session.query( + getattr( + vars()[f"egon_etrago_{name.lower()}_timeseries"], index_col + ), + getattr(vars()[f"egon_etrago_{name.lower()}_timeseries"], col)[ + self.start_snapshot : self.end_snapshot + ], + ).filter( + vars()[f"egon_etrago_{name.lower()}_timeseries"].scn_name + == self.scn_name + ) + if self.version: + query = query.filter( + vars()[f"egon_etrago_{name.lower()}_timeseries"].version + == self.version + ) + + df_all = saio.as_pandas(query) + + # Rename index + df_all.set_index(index_col, inplace=True) + + df_all.index = df_all.index.astype(str) + + if not df_all.isnull().all().all(): + # Fill empty lists with default values from pypsa + if col in network.component_attrs[pypsa_name].index: + df_all.loc[df_all.anon_1.isnull(), "anon_1"] = df_all.loc[ + df_all.anon_1.isnull(), "anon_1" + ].apply( + lambda x: [ + float( + network.component_attrs[pypsa_name].default[ + col + ] + ) + ] + * len(network.snapshots) + ) + + df = df_all.anon_1.apply(pd.Series).transpose() + + df.index = self.timeindex + + pypsa.io.import_series_from_dataframe( + network, df, pypsa_name, col + ) - return df + return network def build_network(self, network=None, *args, **kwargs): - """ Core method to construct PyPSA Network object. - """ - # TODO: build_network takes care of divergences in database design and - # future PyPSA changes from PyPSA's v0.6 on. This concept should be - # replaced, when the oedb has a revision system in place, because - # sometime this will break!!! - - if network != None: + """Core method to construct PyPSA Network object.""" + if network is not None: network = network else: network = pypsa.Network() network.set_snapshots(self.timeindex) - timevarying_override = False - - if pypsa.__version__ == '0.17.1': - old_to_new_name = {'Generator': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'source': 'carrier', - 'dispatch': 'former_dispatch'}, - 'Bus': - {'current_type': 'carrier'}, - 'Transformer': - {'trafo_id': 'transformer_id'}, - 'Storage': - {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial', - 'source': 'carrier'}} - - timevarying_override = True - - else: - - old_to_new_name = {'Storage': - {'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial'}} - - for comp, comp_t_dict in self.config.items(): - - # TODO: This is confusing, should be fixed in db - pypsa_comp_name = 'StorageUnit' if comp == 'Storage' else comp + for comp in [ + "Bus", + "Line", + "Transformer", + "Link", + "Load", + "Generator", + "Storage", + "Store", + ]: + pypsa_comp = "StorageUnit" if comp == "Storage" else comp + + if comp[-1] == "s": + logger.info(f"Importing {comp}es from database") + else: + logger.info(f"Importing {comp}s from database") df = self.fetch_by_relname(comp) - if comp in old_to_new_name: - - tmp = old_to_new_name[comp] - df.rename(columns=tmp, inplace=True) + # Drop columns with only NaN values + df = df.drop(df.isnull().all()[df.isnull().all()].index, axis=1) - network.import_components_from_dataframe(df, pypsa_comp_name) + # Replace NaN values with defailt values from pypsa + for c in df.columns: + if c in network.component_attrs[pypsa_comp].index: + df[c].fillna( + network.component_attrs[pypsa_comp].default[c], + inplace=True, + ) - if comp_t_dict: + if pypsa_comp == "Generator": + df.sign = 1 - for comp_t, columns in comp_t_dict.items(): + network.import_components_from_dataframe(df, pypsa_comp) - for col in columns: - - df_series = self.series_fetch_by_relname(comp_t, col) - - # TODO: VMagPuSet is not implemented. - if timevarying_override and comp == 'Generator' \ - and not df_series.empty: - idx = df[df.former_dispatch == 'flexible'].index - idx = [i for i in idx if i in df_series.columns] - df_series.drop(idx, axis=1, inplace=True) - - try: - - pypsa.io.import_series_from_dataframe( - network, - df_series, - pypsa_comp_name, - col) - - except (ValueError, AttributeError): - print("Series %s of component %s could not be " - "imported" % (col, pypsa_comp_name)) + network = self.series_fetch_by_relname(network, comp, pypsa_comp) # populate carrier attribute in PyPSA network - network.import_components_from_dataframe( - self.fetch_by_relname(carr_ormclass), 'Carrier') + # network.import_components_from_dataframe( + # self.fetch_by_relname(carr_ormclass), 'Carrier') self.network = network @@ -406,33 +391,36 @@ def build_network(self, network=None, *args, **kwargs): def clear_results_db(session): - '''Used to clear the result tables in the OEDB. Caution! - This deletes EVERY RESULT SET!''' - - from egoio.db_tables.model_draft import EgoGridPfHvResultBus as BusResult,\ - EgoGridPfHvResultBusT as BusTResult,\ - EgoGridPfHvResultStorage as StorageResult,\ - EgoGridPfHvResultStorageT as StorageTResult,\ - EgoGridPfHvResultGenerator as GeneratorResult,\ - EgoGridPfHvResultGeneratorT as GeneratorTResult,\ - EgoGridPfHvResultLine as LineResult,\ - EgoGridPfHvResultLineT as LineTResult,\ - EgoGridPfHvResultLoad as LoadResult,\ - EgoGridPfHvResultLoadT as LoadTResult,\ - EgoGridPfHvResultTransformer as TransformerResult,\ - EgoGridPfHvResultTransformerT as TransformerTResult,\ - EgoGridPfHvResultMeta as ResultMeta - print('Are you sure that you want to clear all results in the OEDB?') - choice = '' - while choice not in ['y', 'n']: - choice = input('(y/n): ') - if choice == 'y': - print('Are you sure?') - choice2 = '' - while choice2 not in ['y', 'n']: - choice2 = input('(y/n): ') - if choice2 == 'y': - print('Deleting all results...') + """Used to clear the result tables in the OEDB. Caution! + This deletes EVERY RESULT SET!""" + + from egoio.db_tables.model_draft import ( + EgoGridPfHvResultBus as BusResult, + EgoGridPfHvResultBusT as BusTResult, + EgoGridPfHvResultGenerator as GeneratorResult, + EgoGridPfHvResultGeneratorT as GeneratorTResult, + EgoGridPfHvResultLine as LineResult, + EgoGridPfHvResultLineT as LineTResult, + EgoGridPfHvResultLoad as LoadResult, + EgoGridPfHvResultLoadT as LoadTResult, + EgoGridPfHvResultMeta as ResultMeta, + EgoGridPfHvResultStorage as StorageResult, + EgoGridPfHvResultStorageT as StorageTResult, + EgoGridPfHvResultTransformer as TransformerResult, + EgoGridPfHvResultTransformerT as TransformerTResult, + ) + + print("Are you sure that you want to clear all results in the OEDB?") + choice = "" + while choice not in ["y", "n"]: + choice = input("(y/n): ") + if choice == "y": + print("Are you sure?") + choice2 = "" + while choice2 not in ["y", "n"]: + choice2 = input("(y/n): ") + if choice2 == "y": + print("Deleting all results...") session.query(BusResult).delete() session.query(BusTResult).delete() session.query(StorageResult).delete() @@ -448,12 +436,12 @@ def clear_results_db(session): session.query(ResultMeta).delete() session.commit() else: - print('Deleting aborted!') + print("Deleting aborted!") else: - print('Deleting aborted!') + print("Deleting aborted!") -def results_to_oedb(session, network, args, grid='hv', safe_results=False): +def results_to_oedb(session, network, args, grid="hv", safe_results=False): """Return results obtained from PyPSA to oedb Parameters @@ -473,44 +461,47 @@ def results_to_oedb(session, network, args, grid='hv', safe_results=False): """ # Update generator_ids when k_means clustering to get integer ids - if args['network_clustering_kmeans'] != False: - new_index=pd.DataFrame(index = network.generators.index) - new_index['new']=range(len(network.generators)) + if args["network_clustering_kmeans"]: + new_index = pd.DataFrame(index=network.generators.index) + new_index["new"] = range(len(network.generators)) - for col in (network.generators_t): + for col in network.generators_t: if not network.generators_t[col].empty: - network.generators_t[col].columns =\ - new_index.new[network.generators_t[col].columns] + network.generators_t[col].columns = new_index.new[ + network.generators_t[col].columns + ] network.generators.index = range(len(network.generators)) # moved this here to prevent error when not using the mv-schema import datetime - if grid.lower() == 'mv': - print('MV currently not implemented') - elif grid.lower() == 'hv': - from egoio.db_tables.model_draft import\ - EgoGridPfHvResultBus as BusResult,\ - EgoGridPfHvResultBusT as BusTResult,\ - EgoGridPfHvResultStorage as StorageResult,\ - EgoGridPfHvResultStorageT as StorageTResult,\ - EgoGridPfHvResultGenerator as GeneratorResult,\ - EgoGridPfHvResultGeneratorT as GeneratorTResult,\ - EgoGridPfHvResultLine as LineResult,\ - EgoGridPfHvResultLineT as LineTResult,\ - EgoGridPfHvResultLoad as LoadResult,\ - EgoGridPfHvResultLoadT as LoadTResult,\ - EgoGridPfHvResultTransformer as TransformerResult,\ - EgoGridPfHvResultTransformerT as TransformerTResult,\ - EgoGridPfHvResultMeta as ResultMeta,\ - EgoGridPfHvSource as Source + + if grid.lower() == "mv": + print("MV currently not implemented") + elif grid.lower() == "hv": + from egoio.db_tables.model_draft import ( + EgoGridPfHvResultBus as BusResult, + EgoGridPfHvResultBusT as BusTResult, + EgoGridPfHvResultGenerator as GeneratorResult, + EgoGridPfHvResultGeneratorT as GeneratorTResult, + EgoGridPfHvResultLine as LineResult, + EgoGridPfHvResultLineT as LineTResult, + EgoGridPfHvResultLoad as LoadResult, + EgoGridPfHvResultLoadT as LoadTResult, + EgoGridPfHvResultMeta as ResultMeta, + EgoGridPfHvResultStorage as StorageResult, + EgoGridPfHvResultStorageT as StorageTResult, + EgoGridPfHvResultTransformer as TransformerResult, + EgoGridPfHvResultTransformerT as TransformerTResult, + EgoGridPfHvSource as Source, + ) else: - print('Please enter mv or hv!') + print("Please enter mv or hv!") - print('Uploading results to db...') + print("Uploading results to db...") # get last result id and get new one - last_res_id = session.query(func.max(ResultMeta.result_id)).scalar() - if last_res_id == None: + last_res_id = session.query(max(ResultMeta.result_id)).scalar() + if last_res_id is None: new_res_id = 1 else: new_res_id = last_res_id + 1 @@ -519,20 +510,24 @@ def results_to_oedb(session, network, args, grid='hv', safe_results=False): res_meta = ResultMeta() meta_misc = [] for arg, value in args.items(): - if arg not in dir(res_meta) and arg not in ['db', 'lpfile', - 'results', 'export']: + if arg not in dir(res_meta) and arg not in [ + "db", + "lpfile", + "results", + "export", + ]: meta_misc.append([arg, str(value)]) res_meta.result_id = new_res_id - res_meta.scn_name = args['scn_name'] + res_meta.scn_name = args["scn_name"] res_meta.calc_date = datetime.datetime.now() - res_meta.user_name = args['user_name'] - res_meta.method = args['method'] - res_meta.start_snapshot = args['start_snapshot'] - res_meta.end_snapshot = args['end_snapshot'] + res_meta.user_name = args["user_name"] + res_meta.method = args["method"] + res_meta.start_snapshot = args["start_snapshot"] + res_meta.end_snapshot = args["end_snapshot"] res_meta.safe_results = safe_results res_meta.snapshots = network.snapshots.tolist() - res_meta.solver = args['solver'] + res_meta.solver = args["solver"] res_meta.settings = meta_misc session.add(res_meta) @@ -543,107 +538,145 @@ def results_to_oedb(session, network, args, grid='hv', safe_results=False): for gen in network.generators.index: if network.generators.carrier[gen] not in sources.name.values: new_source = Source() - new_source.source_id = session.query( - func.max(Source.source_id)).scalar()+1 + new_source.source_id = ( + session.query(max(Source.source_id)).scalar() + 1 + ) new_source.name = network.generators.carrier[gen] session.add(new_source) session.commit() sources = pd.read_sql( - session.query(Source).statement, session.bind) + session.query(Source).statement, session.bind + ) try: old_source_id = int( sources.source_id[ - sources.name == network.generators.carrier[gen]]) - network.generators.set_value(gen, 'source', int(old_source_id)) + sources.name == network.generators.carrier[gen] + ] + ) + network.generators.set_value(gen, "source", int(old_source_id)) except: print( - 'Source ' + network.generators.carrier[gen] + - ' is not in the source table!') + "Source " + + network.generators.carrier[gen] + + " is not in the source table!" + ) for stor in network.storage_units.index: if network.storage_units.carrier[stor] not in sources.name.values: new_source = Source() - new_source.source_id = session.query( - func.max(Source.source_id)).scalar()+1 + new_source.source_id = ( + session.query(max(Source.source_id)).scalar() + 1 + ) new_source.name = network.storage_units.carrier[stor] session.add(new_source) session.commit() sources = pd.read_sql( - session.query(Source).statement, session.bind) + session.query(Source).statement, session.bind + ) try: old_source_id = int( sources.source_id[ - sources.name == network.storage_units.carrier[stor]]) - network.storage_units.set_value(stor, 'source', int(old_source_id)) + sources.name == network.storage_units.carrier[stor] + ] + ) + network.storage_units.set_value(stor, "source", int(old_source_id)) except: print( - 'Source ' + network.storage_units.carrier[stor] + - ' is not in the source table!') - - whereismyindex = {BusResult: network.buses.index, - LoadResult: network.loads.index, - LineResult: network.lines.index, - TransformerResult: network.transformers.index, - StorageResult: network.storage_units.index, - GeneratorResult: network.generators.index, - BusTResult: network.buses.index, - LoadTResult: network.loads.index, - LineTResult: network.lines.index, - TransformerTResult: network.transformers.index, - StorageTResult: network.storage_units.index, - GeneratorTResult: network.generators.index} - - whereismydata = {BusResult: network.buses, - LoadResult: network.loads, - LineResult: network.lines, - TransformerResult: network.transformers, - StorageResult: network.storage_units, - GeneratorResult: network.generators, - BusTResult: network.buses_t, - LoadTResult: network.loads_t, - LineTResult: network.lines_t, - TransformerTResult: network.transformers_t, - StorageTResult: network.storage_units_t, - GeneratorTResult: network.generators_t} - - new_to_old_name = {'p_min_pu_fixed': 'p_min_pu', - 'p_max_pu_fixed': 'p_max_pu', - 'dispatch': 'former_dispatch', - 'current_type': 'carrier', - 'soc_cyclic': 'cyclic_state_of_charge', - 'soc_initial': 'state_of_charge_initial'} - - ormclasses = [BusResult, LoadResult, LineResult, TransformerResult, - GeneratorResult, StorageResult, BusTResult, LoadTResult, - LineTResult, TransformerTResult, GeneratorTResult, - StorageTResult] + "Source " + + network.storage_units.carrier[stor] + + " is not in the source table!" + ) + + whereismyindex = { + BusResult: network.buses.index, + LoadResult: network.loads.index, + LineResult: network.lines.index, + TransformerResult: network.transformers.index, + StorageResult: network.storage_units.index, + GeneratorResult: network.generators.index, + BusTResult: network.buses.index, + LoadTResult: network.loads.index, + LineTResult: network.lines.index, + TransformerTResult: network.transformers.index, + StorageTResult: network.storage_units.index, + GeneratorTResult: network.generators.index, + } + + whereismydata = { + BusResult: network.buses, + LoadResult: network.loads, + LineResult: network.lines, + TransformerResult: network.transformers, + StorageResult: network.storage_units, + GeneratorResult: network.generators, + BusTResult: network.buses_t, + LoadTResult: network.loads_t, + LineTResult: network.lines_t, + TransformerTResult: network.transformers_t, + StorageTResult: network.storage_units_t, + GeneratorTResult: network.generators_t, + } + + new_to_old_name = { + "p_min_pu_fixed": "p_min_pu", + "p_max_pu_fixed": "p_max_pu", + "dispatch": "former_dispatch", + "current_type": "carrier", + "soc_cyclic": "cyclic_state_of_charge", + "soc_initial": "state_of_charge_initial", + } + + ormclasses = [ + BusResult, + LoadResult, + LineResult, + TransformerResult, + GeneratorResult, + StorageResult, + BusTResult, + LoadTResult, + LineTResult, + TransformerTResult, + GeneratorTResult, + StorageTResult, + ] for ormclass in ormclasses: for index in whereismyindex[ormclass]: myinstance = ormclass() columns = ormclass.__table__.columns.keys() - columns.remove('result_id') + columns.remove("result_id") myinstance.result_id = new_res_id for col in columns: - if '_id' in col: + if "_id" in col: class_id_name = col else: continue setattr(myinstance, class_id_name, index) columns.remove(class_id_name) - if str(ormclass)[:-2].endswith('T'): + if str(ormclass)[:-2].endswith("T"): for col in columns: - if col == 'soc_set': + if col == "soc_set": try: - setattr(myinstance, col, getattr( - whereismydata[ormclass], - 'state_of_charge_set')[index].tolist()) + setattr( + myinstance, + col, + getattr( + whereismydata[ormclass], + "state_of_charge_set", + )[index].tolist(), + ) except: pass else: try: - setattr(myinstance, col, getattr( - whereismydata[ormclass], col)[index].tolist()) + setattr( + myinstance, + col, + getattr(whereismydata[ormclass], col)[ + index + ].tolist(), + ) except: pass session.add(myinstance) @@ -651,112 +684,145 @@ def results_to_oedb(session, network, args, grid='hv', safe_results=False): else: for col in columns: if col in new_to_old_name: - if col == 'soc_cyclic': + if col == "soc_cyclic": try: - setattr(myinstance, col, bool( - whereismydata[ormclass].loc[index, - new_to_old_name[col]])) + setattr( + myinstance, + col, + bool( + whereismydata[ormclass].loc[ + index, new_to_old_name[col] + ] + ), + ) except: pass - elif 'Storage' in str(ormclass) and col == 'dispatch': + elif "Storage" in str(ormclass) and col == "dispatch": try: - setattr(myinstance, col, - whereismydata[ormclass].loc[index, col]) + setattr( + myinstance, + col, + whereismydata[ormclass].loc[index, col], + ) except: pass else: try: setattr( - myinstance, col, whereismydata[ormclass].\ - loc[index, new_to_old_name[col]]) + myinstance, + col, + whereismydata[ormclass].loc[ + index, new_to_old_name[col] + ], + ) except: pass - elif col in ['s_nom_extendable', 'p_nom_extendable']: + elif col in ["s_nom_extendable", "p_nom_extendable"]: try: - setattr(myinstance, col, bool( - whereismydata[ormclass].loc[index, col])) + setattr( + myinstance, + col, + bool(whereismydata[ormclass].loc[index, col]), + ) except: pass else: try: - setattr(myinstance, col, - whereismydata[ormclass].loc[index, col]) + setattr( + myinstance, + col, + whereismydata[ormclass].loc[index, col], + ) except: pass session.add(myinstance) session.commit() - print('Upload finished!') + print("Upload finished!") return -def run_sql_script(conn, scriptname='results_md2grid.sql'): - """This function runs .sql scripts in the folder 'sql_scripts' """ +def run_sql_script(conn, scriptname="results_md2grid.sql"): + """This function runs .sql scripts in the folder 'sql_scripts'""" script_dir = os.path.abspath( - os.path.join(os.path.dirname(__file__), 'sql_scripts')) + os.path.join(os.path.dirname(__file__), "sql_scripts") + ) script_str = open(os.path.join(script_dir, scriptname)).read() conn.execution_options(autocommit=True).execute(script_str) return - def extension(self, **kwargs): """ Function that adds an additional network to the existing network container. - The new network can include every PyPSA-component (e.g. buses, lines, links). + The new network can include every PyPSA-component (e.g. buses, lines, + links). To connect it to the existing network, transformers are needed. - All components and its timeseries of the additional scenario need to be inserted in the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'extension_' + scn_name (e.g. 'extension_nep2035'). + All components and its timeseries of the additional scenario need to be + inserted in the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'extension\_' + scn_name + (e.g. 'extension_nep2035'). Until now, the tables include three additional scenarios: - 'nep2035_confirmed': all new lines and needed transformers planed in the 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the Bundesnetzagentur (BNetzA) - - 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 in the scenario 2035 B2 + 'nep2035_confirmed': all new lines and needed transformers planed in the + 'Netzentwicklungsplan 2035' (NEP2035) that have been confirmed by the + Bundesnetzagentur (BNetzA) - 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming electrical-neighbours Belgium and Norway - Generation, loads and its timeseries in Belgium and Norway for scenario 'NEP 2035' + 'nep2035_b2': all new lines and needed transformers planned in the NEP 2035 + in the scenario 2035 B2 + 'BE_NO_NEP 2035': DC-lines and transformers to connect the upcomming + electrical-neighbours Belgium and Norway + Generation, loads and its timeseries in Belgium and Norway for scenario + 'NEP 2035' - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the additional scenario (WITHOUT 'extension_') - start_snapshot, end_snapshot: Simulation time + Parameters + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the additional scenario (WITHOUT 'extension\_') + start_snapshot : + end_snapshot: + Simulation time Returns - ------ - network : Network container including existing and additional network + ------- + Network container including existing and additional network """ - if self.args['scn_extension'] is not None: - - if self.args['gridversion'] is None: - ormcls_prefix = 'EgoGridPfHvExtension' + if self.args["scn_extension"] is not None: + if self.args["gridversion"] is None: + ormcls_prefix = "EgoGridPfHvExtension" else: - ormcls_prefix = 'EgoPfHvExtension' + ormcls_prefix = "EgoPfHvExtension" - for i in range(len(self.args['scn_extension'])): - scn_extension = self.args['scn_extension'][i] + for i in range(len(self.args["scn_extension"])): + scn_extension = self.args["scn_extension"][i] # Adding overlay-network to existing network scenario = NetworkScenario( self.session, - version = self.args['gridversion'], + version=self.args["gridversion"], prefix=ormcls_prefix, - method=kwargs.get('method', 'lopf'), - start_snapshot=self.args['start_snapshot'], - end_snapshot=self.args['end_snapshot'], - scn_name='extension_' + scn_extension) + method=kwargs.get("method", "lopf"), + start_snapshot=self.args["start_snapshot"], + end_snapshot=self.args["end_snapshot"], + scn_name="extension_" + scn_extension, + ) self.network = scenario.build_network(self.network) # Allow lossless links to conduct bidirectional self.network.links.loc[ - self.network.links.efficiency == 1.0, 'p_min_pu'] = -1 + self.network.links.efficiency == 1.0, "p_min_pu" + ] = -1 + def decommissioning(self, **kwargs): """ @@ -765,96 +831,103 @@ def decommissioning(self, **kwargs): Currently, only lines can be decommissioned. All components of the decommissioning scenario need to be inserted in - the fitting 'model_draft.ego_grid_pf_hv_extension_' table. - The scn_name in the tables have to be labled with 'decommissioning_' + the fitting 'model_draft.ego_grid_pf_hv_extension\_' table. + The scn_name in the tables have to be labled with 'decommissioning\_' + scn_name (e.g. 'decommissioning_nep2035'). - Parameters - ----- - network : The existing network container (e.g. scenario 'NEP 2035') - session : session-data - overlay_scn_name : Name of the decommissioning scenario - + ----------- + network : + The existing network container (e.g. scenario 'NEP 2035') + session : + session-data + overlay_scn_name : + Name of the decommissioning scenario Returns ------ - network : Network container including decommissioning + Network container including decommissioning """ - if self.args['scn_decommissioning'] is not None: - if self.args['gridversion'] == None: - ormclass = getattr(import_module('egoio.db_tables.model_draft'), - 'EgoGridPfHvExtensionLine') + if self.args["scn_decommissioning"] is not None: + if self.args["gridversion"] is None: + ormclass = getattr( + import_module("egoio.db_tables.model_draft"), + "EgoGridPfHvExtensionLine", + ) else: - ormclass = getattr(import_module('egoio.db_tables.grid'), - 'EgoPfHvExtensionLine') + ormclass = getattr( + import_module("egoio.db_tables.grid"), "EgoPfHvExtensionLine" + ) query = self.session.query(ormclass).filter( - ormclass.scn_name == 'decommissioning_' + - self.args['scn_decommissioning']) + ormclass.scn_name + == "decommissioning_" + self.args["scn_decommissioning"] + ) - df_decommisionning = pd.read_sql(query.statement, - self.session.bind, - index_col='line_id') + df_decommisionning = pd.read_sql( + query.statement, self.session.bind, index_col="line_id" + ) df_decommisionning.index = df_decommisionning.index.astype(str) for idx, row in self.network.lines.iterrows(): - if ((row['s_nom_min'] !=0) - & (row['scn_name'] == 'extension_' + - self.args['scn_decommissioning'])): - v_nom_dec = df_decommisionning['v_nom'][( - df_decommisionning.project == row['project']) - & (df_decommisionning.project_id == row['project_id'])] - - self.network.lines.s_nom_min[self.network.lines.index == idx] = self.network.lines.s_nom_min - - ### Drop decommissioning-lines from existing network + if (row["s_nom_min"] != 0) & ( + row["scn_name"] + == "extension_" + self.args["scn_decommissioning"] + ): + self.network.lines.s_nom_min[ + self.network.lines.index == idx + ] = self.network.lines.s_nom_min + + # Drop decommissioning-lines from existing network self.network.lines = self.network.lines[ - ~self.network.lines.index.isin(df_decommisionning.index)] + ~self.network.lines.index.isin(df_decommisionning.index) + ] def distance(x0, x1, y0, y1): """ Function that calculates the square of the distance between two points. - Parameters - ----- - x0: x - coordinate of point 0 - x1: x - coordinate of point 1 - y0: y - coordinate of point 0 - y1: y - coordinate of point 1 - + --------- + x0 : + x - coordinate of point 0 + x1 : + x - coordinate of point 1 + y0 : + y - coordinate of point 0 + y1 : + y - coordinate of point 1 Returns - ------ - distance : float + -------- + distance : float square of distance """ # Calculate square of the distance between two points (Pythagoras) - distance = (x1.values- x0.values)*(x1.values- x0.values)\ - + (y1.values- y0.values)*(y1.values- y0.values) + distance = (x1.values - x0.values) * (x1.values - x0.values) + ( + y1.values - y0.values + ) * (y1.values - y0.values) return distance def calc_nearest_point(bus1, network): """ - Function that finds the geographical nearest point in a network from a given bus. - + Function that finds the geographical nearest point in a network from a + given bus. Parameters - ----- - bus1: float + ----------- + bus1 : float id of bus - network: Pypsa network container + network : Pypsa network container network including the comparable buses - Returns - ------ - bus0 : float + ------- + bus0 : float bus_id of nearest point """ @@ -862,43 +935,76 @@ def calc_nearest_point(bus1, network): bus1_index = network.buses.index[network.buses.index == bus1] forbidden_buses = np.append( - bus1_index.values, network.lines.bus1[ - network.lines.bus0 == bus1].values) + bus1_index.values, + network.lines.bus1[network.lines.bus0 == bus1].values, + ) forbidden_buses = np.append( - forbidden_buses, network.lines.bus0[network.lines.bus1 == bus1].values) + forbidden_buses, network.lines.bus0[network.lines.bus1 == bus1].values + ) forbidden_buses = np.append( - forbidden_buses, network.links.bus0[network.links.bus1 == bus1].values) + forbidden_buses, network.links.bus0[network.links.bus1 == bus1].values + ) forbidden_buses = np.append( - forbidden_buses, network.links.bus1[network.links.bus0 == bus1].values) + forbidden_buses, network.links.bus1[network.links.bus0 == bus1].values + ) x0 = network.buses.x[network.buses.index.isin(bus1_index)] y0 = network.buses.y[network.buses.index.isin(bus1_index)] - comparable_buses = network.buses[~network.buses.index.isin( - forbidden_buses)] + comparable_buses = network.buses[ + ~network.buses.index.isin(forbidden_buses) + ] x1 = comparable_buses.x y1 = comparable_buses.y - distance = (x1.values - x0.values)*(x1.values - x0.values) + \ - (y1.values - y0.values)*(y1.values - y0.values) + distance = (x1.values - x0.values) * (x1.values - x0.values) + ( + y1.values - y0.values + ) * (y1.values - y0.values) min_distance = distance.min() - bus0 = comparable_buses[(((x1.values - x0.values)*(x1.values - x0.values - ) + (y1.values - y0.values)*(y1.values - y0.values)) == min_distance)] + bus0 = comparable_buses[ + ( + ( + (x1.values - x0.values) * (x1.values - x0.values) + + (y1.values - y0.values) * (y1.values - y0.values) + ) + == min_distance + ) + ] bus0 = bus0.index[bus0.index == bus0.index.max()] - bus0 = ''.join(bus0.values) + bus0 = "".join(bus0.values) return bus0 -if __name__ == '__main__': - if pypsa.__version__ not in ['0.6.2', '0.11.0']: - print('Pypsa version %s not supported.' % pypsa.__version__) +def add_ch4_h2_correspondence(self): + """ + Method adding the database table grid.egon_etrago_ch4_h2 to self. + It contains the mapping from H2 buses to their corresponding CH4 buses. + + """ + + sql = """ + SELECT "bus_H2", "bus_CH4", scn_name FROM grid.egon_etrago_ch4_h2; + """ + + table = pd.read_sql(sql, self.engine) + + self.ch4_h2_mapping = pd.Series( + table.bus_H2.values, index=table.bus_CH4.values.astype(str) + ) + self.ch4_h2_mapping.index.name = "CH4_bus" + self.ch4_h2_mapping = self.ch4_h2_mapping.astype(str) + + +if __name__ == "__main__": + if pypsa.__version__ not in ["0.6.2", "0.11.0"]: + print("Pypsa version %s not supported." % pypsa.__version__) pass diff --git a/etrago/tools/network.py b/etrago/tools/network.py index 5af6a5e22..6a3f3c3a8 100644 --- a/etrago/tools/network.py +++ b/etrago/tools/network.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -23,50 +23,107 @@ """ import logging -import json -import pandas as pd +import os + from pypsa.components import Network -from egoio.tools import db from sqlalchemy.orm import sessionmaker +import pandas as pd + +if "READTHEDOCS" not in os.environ: + from etrago.tools import db + from etrago import __version__ -from etrago.tools.io import (NetworkScenario, - extension, - decommissioning) -from etrago.tools.utilities import (set_branch_capacity, - add_missing_components, - set_random_noise, - geolocation_buses, - check_args, - load_shedding, - set_q_foreign_loads, - foreign_links, - crossborder_capacity, - set_line_voltages, - convert_capital_costs, - get_args_setting, - export_to_csv) -from etrago.tools.plot import (add_coordinates, - plot_grid) -from etrago.tools.extendable import extendable -from etrago.cluster.networkclustering import (run_kmeans_clustering, - ehv_clustering) -from etrago.cluster.snapshot import (skip_snapshots, - snapshot_clustering) from etrago.cluster.disaggregation import run_disaggregation -from etrago.tools.execute import lopf, run_pf_post_lopf -from etrago.tools.calc_results import calc_etrago_results +from etrago.cluster.electrical import ehv_clustering, run_spatial_clustering +from etrago.cluster.gas import run_spatial_clustering_gas +from etrago.cluster.snapshot import skip_snapshots, snapshot_clustering +from etrago.tools.calc_results import ( + ac_export, + ac_export_per_country, + calc_etrago_results, + dc_export, + dc_export_per_country, + german_network, + system_costs_germany, +) +from etrago.tools.execute import ( + dispatch_disaggregation, + lopf, + run_pf_post_lopf, +) +from etrago.tools.extendable import extendable +from etrago.tools.io import ( + NetworkScenario, + add_ch4_h2_correspondence, + decommissioning, + extension, +) +from etrago.tools.plot import ( + bev_flexibility_potential, + demand_side_management, + flexibility_usage, + heat_stores, + hydrogen_stores, + plot_carrier, + plot_clusters, + plot_gas_generation, + plot_gas_summary, + plot_grid, + plot_h2_generation, + plot_h2_summary, + plot_heat_loads, + plot_heat_summary, + shifted_energy, +) +from etrago.tools.utilities import ( + add_missing_components, + adjust_CH4_gen_carriers, + buses_by_country, + check_args, + convert_capital_costs, + crossborder_capacity, + delete_dispensable_ac_buses, + drop_sectors, + export_to_csv, + filter_links_by_carrier, + foreign_links, + geolocation_buses, + get_args_setting, + get_clustering_data, + load_shedding, + manual_fixes_datamodel, + set_branch_capacity, + set_control_strategies, + set_line_costs, + set_q_foreign_loads, + set_q_national_loads, + set_random_noise, + set_trafo_costs, + update_busmap, +) logger = logging.getLogger(__name__) -class Etrago(): +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) +__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" +__author__ = """AmeliaNadal, CarlosEpia, ClaraBuettner, KathiEsterl, gnn, + fwitte, ulfmueller, pieterhexen""" + + +class Etrago: """ - Object containing pypsa.Network including the transmission grid, - input parameters and optimization results. + Object containing pypsa.Network including the transmission grid, not + electric sectors, input parameters and optimization results. Parameters ---------- args : dict - Dictionary including all inpu parameters. + Dictionary including all input parameters. csv_folder_name : string Name of folder from which to import CSVs of network data. name : string, default "" @@ -84,14 +141,16 @@ class Etrago(): Examples -------- """ - def __init__(self, - args=None, - json_path=None, - csv_folder_name=None, - name="", - ignore_standard_types=False, - **kwargs): + def __init__( + self, + args=None, + csv_folder_name=None, + ignore_standard_types=False, + json_path=None, + name="", + **kwargs, + ): self.tool_version = __version__ self.clustering = None @@ -100,52 +159,63 @@ def __init__(self, self.network = Network() + self.network_tsa = Network() + self.disaggregated_network = Network() - self.__re_carriers = ['wind_onshore', 'wind_offshore', 'solar', - 'biomass', 'run_of_river', 'reservoir'] - self.__vre_carriers = ['wind_onshore', 'wind_offshore', 'solar'] + self.__re_carriers = [ + "wind_onshore", + "wind_offshore", + "solar", + "biomass", + "run_of_river", + "reservoir", + ] + self.__vre_carriers = ["wind_onshore", "wind_offshore", "solar"] - if args is not None: + self.busmap = {} + + self.ch4_h2_mapping = {} + if args is not None: self.args = args self.get_args_setting(json_path) - conn = db.connection(section=args['db']) + conn = db.connection(section=self.args["db"]) session = sessionmaker(bind=conn) + self.engine = conn + self.session = session() self.check_args() elif csv_folder_name is not None: + self.get_args_setting(csv_folder_name + "/args.json") - self.get_args_setting(csv_folder_name + '/args.json') - - self.network = Network(csv_folder_name, - name, - ignore_standard_types) - - if self.args['disaggregation'] is not None: + self.network = Network( + csv_folder_name, name, ignore_standard_types + ) + if self.args["disaggregation"] is not None: self.disaggregated_network = Network( - csv_folder_name + '/disaggregated_network', + csv_folder_name + "/disaggregated_network", name, - ignore_standard_types) + ignore_standard_types, + ) - else: - logger.error('Set args or csv_folder_name') + self.get_clustering_data(csv_folder_name) + else: + logger.error("Set args or csv_folder_name") # Add functions get_args_setting = get_args_setting check_args = check_args - add_coordinates = add_coordinates - geolocation_buses = geolocation_buses add_missing_components = add_missing_components @@ -154,7 +224,7 @@ def __init__(self, set_random_noise = set_random_noise - set_line_voltages = set_line_voltages + set_q_national_loads = set_q_national_loads set_q_foreign_loads = set_q_foreign_loads @@ -172,9 +242,11 @@ def __init__(self, decommissioning = decommissioning - plot_grid = plot_grid + add_ch4_h2_correspondence = add_ch4_h2_correspondence - kmean_clustering = run_kmeans_clustering + spatial_clustering = run_spatial_clustering + + spatial_clustering_gas = run_spatial_clustering_gas skip_snapshots = skip_snapshots @@ -184,33 +256,97 @@ def __init__(self, lopf = lopf + temporal_disaggregation = dispatch_disaggregation + pf_post_lopf = run_pf_post_lopf - disaggregation = run_disaggregation + spatial_disaggregation = run_disaggregation calc_results = calc_etrago_results + calc_ac_export = ac_export + + calc_ac_export_per_country = ac_export_per_country + + calc_dc_export = dc_export + + calc_dc_export_per_country = dc_export_per_country + export_to_csv = export_to_csv + filter_links_by_carrier = filter_links_by_carrier - def build_network_from_db(self): + german_network = german_network + + set_line_costs = set_line_costs + + set_trafo_costs = set_trafo_costs + + system_costs_germany = system_costs_germany + + drop_sectors = drop_sectors + + buses_by_country = buses_by_country - """ Function that imports transmission grid from chosen database + update_busmap = update_busmap + + plot_grid = plot_grid + + plot_clusters = plot_clusters + + plot_carrier = plot_carrier + + plot_gas_generation = plot_gas_generation + + plot_gas_summary = plot_gas_summary + + plot_h2_generation = plot_h2_generation + + plot_h2_summary = plot_h2_summary + + plot_heat_loads = plot_heat_loads + + plot_heat_summary = plot_heat_summary + + plot_flexibility_usage = flexibility_usage + + demand_side_management = demand_side_management + + bev_flexibility_potential = bev_flexibility_potential + + heat_stores = heat_stores + + hydrogen_stores = hydrogen_stores + + delete_dispensable_ac_buses = delete_dispensable_ac_buses + + get_clustering_data = get_clustering_data + + adjust_CH4_gen_carriers = adjust_CH4_gen_carriers + + manual_fixes_datamodel = manual_fixes_datamodel + + shifted_energy = shifted_energy + + def dc_lines(self): + return self.filter_links_by_carrier("DC", like=False) + + def build_network_from_db(self): + """Function that imports transmission grid from chosen database Returns ------- None. """ - self.scenario = NetworkScenario(self.session, - version=self.args['gridversion'], - prefix=('EgoGridPfHv' if - self.args['gridversion'] is None - else 'EgoPfHv'), - method=self.args['method'], - start_snapshot=self.args['start_snapshot'], - end_snapshot=self.args['end_snapshot'], - scn_name=self.args['scn_name']) + self.scenario = NetworkScenario( + self.engine, + self.session, + version=self.args["gridversion"], + start_snapshot=self.args["start_snapshot"], + end_snapshot=self.args["end_snapshot"], + scn_name=self.args["scn_name"], + ) self.network = self.scenario.build_network() @@ -218,7 +354,10 @@ def build_network_from_db(self): self.decommissioning() - logger.info('Imported network from db') + if "H2" in self.network.buses.carrier: + self.add_ch4_h2_correspondence() + + logger.info("Imported network from db") def adjust_network(self): """ @@ -230,19 +369,20 @@ def adjust_network(self): None. """ - self.add_coordinates() - self.geolocation_buses() + self.manual_fixes_datamodel() - self.add_missing_components() + self.geolocation_buses() self.load_shedding() + self.adjust_CH4_gen_carriers() + self.set_random_noise(0.01) - self.set_line_voltages() + self.set_q_national_loads(cos_phi=0.9) - self.set_q_foreign_loads(cos_phi=1) + self.set_q_foreign_loads(cos_phi=0.9) self.foreign_links() @@ -250,8 +390,26 @@ def adjust_network(self): self.set_branch_capacity() - self.extendable(line_max=4) + self.extendable( + grid_max_D=self.args["extendable"]["upper_bounds_grid"][ + "grid_max_D" + ], + grid_max_abs_D=self.args["extendable"]["upper_bounds_grid"][ + "grid_max_abs_D" + ], + grid_max_foreign=self.args["extendable"]["upper_bounds_grid"][ + "grid_max_foreign" + ], + grid_max_abs_foreign=self.args["extendable"]["upper_bounds_grid"][ + "grid_max_abs_foreign" + ], + ) + + self.convert_capital_costs() + + self.delete_dispensable_ac_buses() + + set_control_strategies(self.network) def _ts_weighted(self, timeseries): return timeseries.mul(self.network.snapshot_weightings, axis=0) - diff --git a/etrago/tools/plot.py b/etrago/tools/plot.py index 174b0d139..f3d8ff406 100644 --- a/etrago/tools/plot.py +++ b/etrago/tools/plot.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -21,47 +21,48 @@ """ Plot.py defines functions necessary to plot results of eTraGo. """ +from math import log10, sqrt import logging import os + from matplotlib import pyplot as plt -import matplotlib.patches as mpatches +from matplotlib.legend_handler import HandlerPatch +from matplotlib.patches import Circle, Ellipse +from pyproj import Proj, transform import matplotlib -import pandas as pd +import matplotlib.patches as mpatches import numpy as np -from math import sqrt, log10 -from pyproj import Proj, transform -import tilemapbase +import pandas as pd + +from etrago.tools.execute import import_gen_from_links + +cartopy_present = True +try: + import cartopy.crs as ccrs +except ImportError: + cartopy_present = False +from pypsa.plot import draw_map_cartopy logger = logging.getLogger(__name__) -if 'READTHEDOCS' not in os.environ: +if "READTHEDOCS" not in os.environ: from geoalchemy2.shape import to_shape - -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") + from pyproj import Proj, transform + from shapely.geometry import LineString, MultiPoint, Point, Polygon + import geopandas as gpd + import tilemapbase + +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, MarlonSchlemminger, mariusves, lukasol" +__author__ = """ulfmueller, MarlonSchlemminger, mariusves, lukasol, ClaraBuettner, +CarlosEpia, pieterhexen, gnn, fwitte, lukasol, KathiEsterl, BartelsJ""" -def add_coordinates(self): - """ - Add coordinates to nodes based on provided geom - - Parameters - ---------- - network : PyPSA network container - - Returns - ------- - Altered PyPSA network container ready for plotting - """ - for idx, row in self.network.buses.iterrows(): - wkt_geom = to_shape(row['geom']) - self.network.buses.loc[idx, 'x'] = wkt_geom.x - self.network.buses.loc[idx, 'y'] = wkt_geom.y - def set_epsg_network(network): """ Change EPSG from 4326 to 3857. Needed when using osm-background. @@ -74,8 +75,8 @@ def set_epsg_network(network): ------- """ - inProj = Proj(init='epsg:4326') - outProj = Proj(init='epsg:3857') + inProj = Proj(init="epsg:4326") + outProj = Proj(init="epsg:3857") x1, y1 = network.buses.x.values, network.buses.y.values x2, y2 = transform(inProj, outProj, x1, y1) network.buses.x, network.buses.y = x2, y2 @@ -109,38 +110,100 @@ def plot_osm(x, y, zoom, alpha=0.4): extent = extent.to_project_3857() fig, ax = plt.subplots() - plt.axis('off') - plotter = tilemapbase.Plotter(extent, tilemapbase.tiles.build_OSM(), - zoom=zoom) + plt.axis("off") + plotter = tilemapbase.Plotter( + extent, tilemapbase.tiles.build_OSM(), zoom=zoom + ) plotter.plot(ax, alpha=alpha) - #ax.plot(x, y, "ro-") - return fig, ax + # ax.plot(x, y, "ro-") + return fig, ax, extent.xrange, extent.yrange + def coloring(): - colors = {'biomass': 'green', - 'coal': 'k', - 'gas': 'orange', - 'eeg_gas': 'olive', - 'geothermal': 'purple', - 'lignite': 'brown', - 'oil': 'darkgrey', - 'other_non_renewable': 'pink', - 'reservoir': 'navy', - 'run_of_river': 'aqua', - 'pumped_storage': 'steelblue', - 'solar': 'yellow', - 'uranium': 'lime', - 'waste': 'sienna', - 'wind': 'blue', - 'wind_onshore': 'skyblue', - 'wind_offshore': 'cornflowerblue', - 'slack': 'pink', - 'load shedding': 'red', - 'nan': 'm', - 'imports': 'salmon', - '': 'm'} + """ + Return a dictionary with a color assign to each kind of carrier used in + etrago.network. This is used for plotting porpuses. + + Returns + ------- + colors : dict + Color for each kind of carrier. + + """ + + colors = { + "load": "red", + "DC": "blue", + "power_to_H2": "cyan", + "H2_overground": "cyan", + "H2_underground": "cyan", + "H2": "cyan", + "dsm-cts": "dodgerblue", + "dsm-ind-osm": "dodgerblue", + "dsm-ind-sites": "dodgerblue", + "dsm": "dodgerblue", + "central_heat_pump": "mediumpurple", + "central_resistive_heater": "blueviolet", + "rural_heat_pump": "violet", + "CH4": "yellow", + "CH4_biogas": "yellow", + "CH4_NG": "yellow", + "CH4_to_H2": "yellowgreen", + "industrial_gas_CHP": "olive", + "rural_gas_boiler": "sandybrown", + "central_gas_CHP": "darkorange", + "central_gas_CHP_heat": "darkorange", + "central_gas_boiler": "saddlebrown", + "OCGT": "seagreen", + "H2_to_power": "darkcyan", + "H2_feedin": "lime", + "H2_to_CH4": "seagreen", + "central_heat_store_charger": "firebrick", + "central_heat_store": "firebrick", + "heat": "firebrick", + "rural_heat_store_charger": "salmon", + "rural_heat_store": "salmon", + "central_heat_store_discharger": "firebrick", + "rural_heat_store_discharger": "salmon", + "rural_heat": "orange", + "central_heat": "orangered", + "H2_grid": "green", + "H2_saltcavern": "darkgreen", + "central_heat_store": "firebrick", + "heat": "firebrick", + "rural_heat_store": "salmon", + "AC": "blue", + "nuclear": "palegreen", + "oil": "silver", + "other_non_renewable": "dimgrey", + "other_renewable": "lightsteelblue", + "reservoir": "indigo", + "run_of_river": "slateblue", + "solar": "gold", + "wind_offshore": "lightblue", + "wind_onshore": "blue", + "coal": "grey", + "lignite": "brown", + "biomass": "olive", + "solar_thermal_collector": "wheat", + "geo thermal": "peru", + "load shedding": "black", + "central_biomass_CHP": "darkorange", + "industrial_biomass_CHP": "darkorange", + "solar_rooftop": "goldenrod", + "gas": "yellow", + "central_biomass_CHP_heat": "darkorange", + "geo_thermal": "peru", + "battery": "blue", + "pumped_hydro": "indigo", + "BEV charger": "indigo", + "BEV_charger": "indigo", + "others": "dimgrey", + } + return colors + def plot_line_loading_diff(networkA, networkB, timestep=0, osm=False): """ Plot difference in line loading between two networks @@ -165,66 +228,68 @@ def plot_line_loading_diff(networkA, networkB, timestep=0, osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm + """ - if osm != False: + if osm is not False: if set_epsg_network.counter == 0: set_epsg_network(networkA) set_epsg_network(networkB) - plot_osm(osm['x'], osm['y'], osm['zoom']) + plot_osm(osm["x"], osm["y"], osm["zoom"]) + # new colormap to make sure 0% difference has the same color in every plot def shiftedColorMap( - cmap, - start=0, - midpoint=0.5, - stop=1.0, - name='shiftedcmap'): - ''' + cmap, start=0, midpoint=0.5, stop=1.0, name="shiftedcmap" + ): + """ Function to offset the "center" of a colormap. Useful for data with a negative min and positive max and you want the middle of the colormap's dynamic range to be at zero - Input - ----- - cmap : The matplotlib colormap to be altered - start : Offset from lowest point in the colormap's range. - Defaults to 0.0 (no lower ofset). Should be between - 0.0 and `midpoint`. - midpoint : The new center of the colormap. Defaults to - 0.5 (no shift). Should be between 0.0 and 1.0. In - general, this should be 1 - vmax/(vmax + abs(vmin)) - For example if your data range from -15.0 to +5.0 and - you want the center of the colormap at 0.0, `midpoint` - should be set to 1 - 5/(5 + 15)) or 0.75 - stop : Offset from highets point in the colormap's range. - Defaults to 1.0 (no upper ofset). Should be between - `midpoint` and 1.0. - ''' - cdict = { - 'red': [], - 'green': [], - 'blue': [], - 'alpha': [] - } + Parameters + ----------- + cmap : + The matplotlib colormap to be altered + start : + Offset from lowest point in the colormap's range. + Defaults to 0.0 (no lower ofset). Should be between + 0.0 and `midpoint`. + midpoint : + The new center of the colormap. Defaults to + 0.5 (no shift). Should be between 0.0 and 1.0. In + general, this should be 1 - vmax/(vmax + abs(vmin)) + For example if your data range from -15.0 to +5.0 and + you want the center of the colormap at 0.0, `midpoint` + should be set to 1 - 5/(5 + 15)) or 0.75 + stop : + Offset from highets point in the colormap's range. + Defaults to 1.0 (no upper ofset). Should be between + `midpoint` and 1.0. + + """ + cdict = {"red": [], "green": [], "blue": [], "alpha": []} # regular index to compute the colors reg_index = np.linspace(start, stop, 257) # shifted index to match the data - shift_index = np.hstack([ - np.linspace(0.0, midpoint, 128, endpoint=False), - np.linspace(midpoint, 1.0, 129, endpoint=True) - ]) + shift_index = np.hstack( + [ + np.linspace(0.0, midpoint, 128, endpoint=False), + np.linspace(midpoint, 1.0, 129, endpoint=True), + ] + ) for ri, si in zip(reg_index, shift_index): r, g, b, a = cmap(ri) - cdict['red'].append((si, r, r)) - cdict['green'].append((si, g, g)) - cdict['blue'].append((si, b, b)) - cdict['alpha'].append((si, a, a)) + cdict["red"].append((si, r, r)) + cdict["green"].append((si, g, g)) + cdict["blue"].append((si, b, b)) + cdict["alpha"].append((si, a, a)) newcmap = matplotlib.colors.LinearSegmentedColormap(name, cdict) plt.register_cmap(cmap=newcmap) @@ -233,44 +298,58 @@ def shiftedColorMap( # calculate difference in loading between both networks loading_switches = abs( - networkA.lines_t.p0.mul(networkA.snapshot_weightings, axis=0).\ - loc[networkA.snapshots[timestep]].to_frame()) - loading_switches.columns = ['switch'] + networkA.lines_t.p0.mul(networkA.snapshot_weightings, axis=0) + .loc[networkA.snapshots[timestep]] + .to_frame() + ) + loading_switches.columns = ["switch"] loading_noswitches = abs( - networkB.lines_t.p0.mul(networkB.snapshot_weightings, axis=0).\ - loc[networkB.snapshots[timestep]].to_frame()) - loading_noswitches.columns = ['noswitch'] + networkB.lines_t.p0.mul(networkB.snapshot_weightings, axis=0) + .loc[networkB.snapshots[timestep]] + .to_frame() + ) + loading_noswitches.columns = ["noswitch"] diff_network = loading_switches.join(loading_noswitches) - diff_network['noswitch'] = diff_network['noswitch'].fillna( - diff_network['switch']) - diff_network[networkA.snapshots[timestep]] \ - = diff_network['switch'] - diff_network['noswitch'] + diff_network["noswitch"] = diff_network["noswitch"].fillna( + diff_network["switch"] + ) + diff_network[networkA.snapshots[timestep]] = ( + diff_network["switch"] - diff_network["noswitch"] + ) # get switches new_buses = pd.Series(index=networkA.buses.index.values) - new_buses.loc[set(networkA.buses.index.values) - - set(networkB.buses.index.values)] = 0.1 + new_buses.loc[ + set(networkA.buses.index.values) - set(networkB.buses.index.values) + ] = 0.1 new_buses = new_buses.fillna(0) # plot network with difference in loading and shifted colormap - loading = (diff_network.loc[:, networkA.snapshots[timestep]] / - (networkA.lines.s_nom)) * 100 + loading = ( + diff_network.loc[:, networkA.snapshots[timestep]] + / (networkA.lines.s_nom) + ) * 100 midpoint = 1 - max(loading) / (max(loading) + abs(min(loading))) shifted_cmap = shiftedColorMap( - plt.cm.jet, midpoint=midpoint, name='shifted') - ll = networkA.plot(line_colors=loading, line_cmap=shifted_cmap, - title="Line loading", bus_sizes=new_buses, - bus_colors='blue', line_widths=0.55, - geomap=False) + plt.cm.jet, midpoint=midpoint, name="shifted" + ) + ll = networkA.plot( + line_colors=loading, + line_cmap=shifted_cmap, + title="Line loading", + bus_sizes=new_buses, + bus_colors="blue", + line_widths=0.55, + geomap=False, + ) cb = plt.colorbar(ll[1]) - cb.set_label('Difference in line loading in % of s_nom') + cb.set_label("Difference in line loading in % of s_nom") + -def network_expansion_diff(networkA, - networkB, - filename=None, - boundaries=[], - osm=False): +def network_expansion_diff( + networkA, networkB, filename=None, boundaries=[], osm=False +): """Plot relative network expansion derivation of AC- and DC-lines. Parameters @@ -286,58 +365,66 @@ def network_expansion_diff(networkA, osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm """ - if osm != False: + if osm is not False: if set_epsg_network.counter == 0: set_epsg_network(networkA) set_epsg_network(networkB) - plot_osm(osm['x'], osm['y'], osm['zoom']) + plot_osm(osm["x"], osm["y"], osm["zoom"]) cmap = plt.cm.jet - extension_lines = 100 *((networkA.lines.s_nom_opt - - networkB.lines.s_nom_opt)/\ - networkA.lines.s_nom_opt) - - extension_links = 100 * ((networkA.links.p_nom_opt -\ - networkB.links.p_nom_opt)/\ - networkA.links.p_nom_opt) + extension_lines = 100 * ( + (networkA.lines.s_nom_opt - networkB.lines.s_nom_opt) + / networkA.lines.s_nom_opt + ) + extension_links = 100 * ( + (networkA.links.p_nom_opt - networkB.links.p_nom_opt) + / networkA.links.p_nom_opt + ) ll = networkA.plot( line_colors=extension_lines, - link_colors = extension_links, + link_colors=extension_links, line_cmap=cmap, bus_sizes=0, title="Derivation of AC- and DC-line extension", line_widths=2, - geomap=False) + geomap=False, + ) if not boundaries: - v = np.linspace(min(extension_lines.min(), extension_links.min()), - max(extension_lines.max(), extension_links.max()), 101) - boundaries = [min(extension_lines.min(), extension_links.min()).round(0), - max(extension_lines.max(), extension_links.max()).round(0)] + v = np.linspace( + min(extension_lines.min(), extension_links.min()), + max(extension_lines.max(), extension_links.max()), + 101, + ) + boundaries = [ + min(extension_lines.min(), extension_links.min()).round(0), + max(extension_lines.max(), extension_links.max()).round(0), + ] else: v = np.linspace(boundaries[0], boundaries[1], 101) if not extension_links.empty: - cb_Link = plt.colorbar(ll[2], boundaries=v, - ticks=v[0:101:10]) + cb_Link = plt.colorbar(ll[2], boundaries=v, ticks=v[0:101:10]) cb_Link.set_clim(vmin=boundaries[0], vmax=boundaries[1]) cb_Link.remove() - cb = plt.colorbar(ll[1], boundaries=v, - ticks=v[0:101:10], fraction=0.046, pad=0.04) + cb = plt.colorbar( + ll[1], boundaries=v, ticks=v[0:101:10], fraction=0.046, pad=0.04 + ) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) - cb.set_label('line extension derivation in %') + cb.set_label("line extension derivation in %") if filename is None: plt.show() @@ -345,44 +432,57 @@ def network_expansion_diff(networkA, plt.savefig(filename) plt.close() + def plot_residual_load(network): - """ Plots residual load summed of all exisiting buses. + """Plots residual load summed of all exisiting buses. Parameters ---------- network : PyPSA network containter + + Returns + ------- + Plot """ renewables = network.generators[ - network.generators.carrier.isin(['wind_onshore', 'wind_offshore', - 'solar', 'run_of_river', - 'wind'])] - renewables_t = network.generators.p_nom[renewables.index] * \ - network.generators_t.p_max_pu[renewables.index].mul( - network.snapshot_weightings, axis=0) - load = network.loads_t.p_set.mul(network.snapshot_weightings, axis=0).\ - sum(axis=1) + network.generators.carrier.isin( + ["wind_onshore", "wind_offshore", "solar", "run_of_river", "wind"] + ) + ] + renewables_t = network.generators.p_nom[ + renewables.index + ] * network.generators_t.p_max_pu[renewables.index].mul( + network.snapshot_weightings, axis=0 + ) + load = network.loads_t.p_set.mul(network.snapshot_weightings, axis=0).sum( + axis=1 + ) all_renew = renewables_t.sum(axis=1) residual_load = load - all_renew plot = residual_load.plot( - title='Residual load', - drawstyle='steps', + title="Residual load", + drawstyle="steps", lw=2, - color='red', - legend=False) + color="red", + legend=False, + ) plot.set_ylabel("MW") # sorted curve sorted_residual_load = residual_load.sort_values( - ascending=False).reset_index() + ascending=False + ).reset_index() plot1 = sorted_residual_load.plot( - title='Sorted residual load', - drawstyle='steps', + title="Sorted residual load", + drawstyle="steps", lw=2, - color='red', - legend=False) + color="red", + legend=False, + ) plot1.set_ylabel("MW") -def plot_stacked_gen(network, bus=None, resolution='GW', filename=None): + +def plot_stacked_gen(network, bus=None, resolution="GW", filename=None): """ Plot stacked sum of generation grouped by carrier type @@ -400,41 +500,60 @@ def plot_stacked_gen(network, bus=None, resolution='GW', filename=None): ------- Plot """ - if resolution == 'GW': + if resolution == "GW": reso_int = 1e3 - elif resolution == 'MW': + elif resolution == "MW": reso_int = 1 - elif resolution == 'KW': + elif resolution == "KW": reso_int = 0.001 # sum for all buses if bus is None: - p_by_carrier = pd.concat( - [network.generators_t.p - [network.generators[network.generators.control != 'Slack'].index], - network.generators_t.p.mul(network.snapshot_weightings, axis=0) - [network.generators[network.generators.control == - 'Slack'].index].iloc[:, 0].apply( - lambda x: x if x > 0 else 0)], - axis=1).groupby(network.generators.carrier, axis=1).sum() + p_by_carrier = ( + pd.concat( + [ + network.generators_t.p[ + network.generators[ + network.generators.control != "Slack" + ].index + ], + network.generators_t.p.mul( + network.snapshot_weightings, axis=0 + )[ + network.generators[ + network.generators.control == "Slack" + ].index + ] + .iloc[:, 0] + .apply(lambda x: x if x > 0 else 0), + ], + axis=1, + ) + .groupby(network.generators.carrier, axis=1) + .sum() + ) load = network.loads_t.p.sum(axis=1) - if hasattr(network, 'foreign_trade'): + if hasattr(network, "foreign_trade"): trade_sum = network.foreign_trade.sum(axis=1) - p_by_carrier['imports'] = trade_sum[trade_sum > 0] - p_by_carrier['imports'] = p_by_carrier['imports'].fillna(0) + p_by_carrier["imports"] = trade_sum[trade_sum > 0] + p_by_carrier["imports"] = p_by_carrier["imports"].fillna(0) # sum for a single bus elif bus is not None: - filtered_gens = network.generators[network.generators['bus'] == bus] - p_by_carrier = network.generators_t.p.mul( - network.snapshot_weightings, axis=0).groupby( - filtered_gens.carrier, axis=1).abs().sum() - filtered_load = network.loads[network.loads['bus'] == bus] - load = network.loads_t.p.mul(network.snapshot_weightings, axis=0)\ - [filtered_load.index] + filtered_gens = network.generators[network.generators["bus"] == bus] + p_by_carrier = ( + network.generators_t.p.mul(network.snapshot_weightings, axis=0) + .groupby(filtered_gens.carrier, axis=1) + .abs() + .sum() + ) + filtered_load = network.loads[network.loads["bus"] == bus] + load = network.loads_t.p.mul(network.snapshot_weightings, axis=0)[ + filtered_load.index + ] colors = coloring() -# TODO: column reordering based on available columns + # TODO: column reordering based on available columns fig, ax = plt.subplots(1, 1) @@ -442,19 +561,18 @@ def plot_stacked_gen(network, bus=None, resolution='GW', filename=None): colors = [colors[col] for col in p_by_carrier.columns] if len(colors) == 1: colors = colors[0] - (p_by_carrier / reso_int).plot(kind="area", ax=ax, linewidth=0, - color=colors) - (load / reso_int).plot(ax=ax, legend='load', lw=2, color='darkgrey', - style='--') + (p_by_carrier / reso_int).plot( + kind="area", ax=ax, linewidth=0, color=colors + ) + (load / reso_int).plot( + ax=ax, legend="load", lw=2, color="darkgrey", style="--" + ) ax.legend(ncol=4, loc="upper left") ax.set_ylabel(resolution) ax.set_xlabel("") - - matplotlib.rcParams.update({'font.size': 22}) - - + matplotlib.rcParams.update({"font.size": 22}) if filename is None: plt.show() @@ -464,34 +582,57 @@ def plot_stacked_gen(network, bus=None, resolution='GW', filename=None): def plot_gen_diff( - networkA, - networkB, - leave_out_carriers=['geothermal', 'oil', 'other_non_renewable', - 'reservoir', 'waste']): + networkA, + networkB, + leave_out_carriers=[ + "geothermal", + "oil", + "other_non_renewable", + "reservoir", + "waste", + ], +): """ Plot difference in generation between two networks grouped by carrier type - Parameters ---------- networkA : PyPSA network container with switches networkB : PyPSA network container without switches - leave_out_carriers : list of carriers to leave out (default to all small - carriers) + leave_out_carriers : + list of carriers to leave out (default to all small carriers) Returns ------- Plot """ + def gen_by_c(network): - gen = pd.concat( - [network.generators_t.p.mul(etwork.snapshot_weightings, axis=0) - [network.generators[network.generators.control != 'Slack'].index], - network.generators_t.p.mul( - network.snapshot_weightings, axis=0) - [network.generators[network. generators.control == 'Slack'].index] - .iloc[:, 0].apply(lambda x: x if x > 0 else 0)], axis=1)\ - .groupby(network.generators.carrier, axis=1).sum() + gen = ( + pd.concat( + [ + network.generators_t.p.mul( + network.snapshot_weightings, axis=0 + )[ + network.generators[ + network.generators.control != "Slack" + ].index + ], + network.generators_t.p.mul( + network.snapshot_weightings, axis=0 + )[ + network.generators[ + network.generators.control == "Slack" + ].index + ] + .iloc[:, 0] + .apply(lambda x: x if x > 0 else 0), + ], + axis=1, + ) + .groupby(network.generators.carrier, axis=1) + .sum() + ) return gen gen = gen_by_c(networkB) @@ -502,15 +643,15 @@ def gen_by_c(network): diff.drop(leave_out_carriers, axis=1, inplace=True) colors = [colors[col] for col in diff.columns] - plot = diff.plot(kind='line', color=colors, use_index=False) - plot.legend(loc='upper left', ncol=5, prop={'size': 8}) + plot = diff.plot(kind="line", color=colors, use_index=False) + plot.legend(loc="upper left", ncol=5, prop={"size": 8}) x = [] for i in range(0, len(diff)): x.append(i) plt.xticks(x, x) - plot.set_xlabel('Timesteps') - plot.set_ylabel('Difference in Generation in MW') - plot.set_title('Difference in Generation') + plot.set_xlabel("Timesteps") + plot.set_ylabel("Difference in Generation in MW") + plot.set_title("Difference in Generation") plt.tight_layout() @@ -518,7 +659,6 @@ def plot_voltage(network, boundaries=[], osm=False): """ Plot voltage at buses as hexbin - Parameters ---------- network : PyPSA network container @@ -526,21 +666,22 @@ def plot_voltage(network, boundaries=[], osm=False): osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm Returns ------- Plot """ - if osm != False: + if osm is not False: if set_epsg_network.counter == 0: set_epsg_network(network) - plot_osm(osm['x'], osm['y'], osm['zoom']) + plot_osm(osm["x"], osm["y"], osm["zoom"]) - x = np.array(network.buses['x']) - y = np.array(network.buses['y']) + x = np.array(network.buses["x"]) + y = np.array(network.buses["y"]) alpha = np.array(network.buses_t.v_mag_pu.loc[network.snapshots[0]]) @@ -556,14 +697,15 @@ def plot_voltage(network, boundaries=[], osm=False): plt.hexbin(x, y, C=alpha, cmap=cmap, gridsize=100, norm=norm) cb = plt.colorbar(boundaries=v, ticks=v[0:101:10], norm=norm) cb.set_clim(vmin=boundaries[0], vmax=boundaries[1]) - cb.set_label('Voltage Magnitude per unit of v_nom') + cb.set_label("Voltage Magnitude per unit of v_nom") network.plot( - ax=ax, line_widths=pd.Series(0.5, network.lines.index), bus_sizes=0) + ax=ax, line_widths=pd.Series(0.5, network.lines.index), bus_sizes=0 + ) plt.show() -def curtailment(network, carrier='solar', filename=None): +def curtailment(network, carrier="solar", filename=None): """ Plot curtailment of selected carrier @@ -582,30 +724,37 @@ def curtailment(network, carrier='solar', filename=None): ------- Plot """ - p_by_carrier = network.generators_t.p.groupby\ - (network.generators.carrier, axis=1).sum() + p_by_carrier = network.generators_t.p.groupby( + network.generators.carrier, axis=1 + ).sum() capacity = network.generators.groupby("carrier").sum().at[carrier, "p_nom"] p_available = network.generators_t.p_max_pu.multiply( - network.generators["p_nom"]) + network.generators["p_nom"] + ) p_available_by_carrier = p_available.groupby( - network.generators.carrier, axis=1).sum() + network.generators.carrier, axis=1 + ).sum() p_curtailed_by_carrier = p_available_by_carrier - p_by_carrier print(p_curtailed_by_carrier.sum()) - p_df = pd.DataFrame({carrier + - " available": p_available_by_carrier[carrier], - carrier + - " dispatched": p_by_carrier[carrier], carrier + - " curtailed": p_curtailed_by_carrier[carrier]}) + p_df = pd.DataFrame( + { + carrier + " available": p_available_by_carrier[carrier], + carrier + " dispatched": p_by_carrier[carrier], + carrier + " curtailed": p_curtailed_by_carrier[carrier], + } + ) p_df[carrier + " capacity"] = capacity - p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.] = 0. + p_df[carrier + " curtailed"][p_df[carrier + " curtailed"] < 0.0] = 0.0 fig, ax = plt.subplots(1, 1) fig.set_size_inches(12, 6) - p_df[[carrier + " dispatched", carrier + " curtailed"] - ].plot(kind="area", ax=ax, linewidth=3) - p_df[[carrier + " available", carrier + " capacity"] - ].plot(ax=ax, linewidth=3) + p_df[[carrier + " dispatched", carrier + " curtailed"]].plot( + kind="area", ax=ax, linewidth=3 + ) + p_df[[carrier + " available", carrier + " capacity"]].plot( + ax=ax, linewidth=3 + ) ax.set_xlabel("") ax.set_ylabel("Power [MW]") @@ -617,8 +766,9 @@ def curtailment(network, carrier='solar', filename=None): plt.savefig(filename) plt.close() + def calc_dispatch_per_carrier(network, timesteps): - """ Function that calculates dispatch per carrier in given timesteps + """Function that calculates dispatch per carrier in given timesteps Parameters ---------- @@ -634,23 +784,55 @@ def calc_dispatch_per_carrier(network, timesteps): """ - index = [(network.generators.bus[idx], - network.generators.carrier[idx]) - for idx in network.generators.index] + import_gen_from_links(network) - dist = pd.Series(index=pd.MultiIndex.from_tuples( - index, names=['bus', 'carrier']), dtype=float) + ac_buses = network.buses[network.buses.carrier == "AC"].index + network.generators = network.generators[ + network.generators.bus.isin(ac_buses) + ] + network.generators_t.p = network.generators_t.p.loc[ + :, network.generators_t.p.columns.isin(network.generators.index) + ] + + index = [ + (network.generators.bus[idx], network.generators.carrier[idx]) + for idx in network.generators.index + ] + + dist = pd.Series( + index=pd.MultiIndex.from_tuples(index, names=["bus", "carrier"]), + dtype=float, + ) for i in dist.index: - gens = network.generators[(network.generators.bus == i[0]) - & (network.generators.carrier == i[1])].index - dist[i] = (network.generators_t.p[gens].transpose()\ - [network.snapshots[timesteps]]).sum().sum() + gens = network.generators[ + (network.generators.bus == i[0]) + & (network.generators.carrier == i[1]) + ].index + dist[i] = ( + ( + network.generators_t.p[gens].transpose()[ + network.snapshots[timesteps] + ] + ) + .sum() + .sum() + ) return dist -def calc_storage_expansion_per_bus(network): - """ Function that calculates storage expansion per bus and technology + +def calc_storage_expansion_per_bus( + network, + carriers=[ + "battery", + "H2_overground", + "H2_underground", + "rural_heat_store", + "central_heat_store", + ], +): + """Function that calculates storage expansion per bus and technology Parameters ---------- @@ -663,43 +845,132 @@ def calc_storage_expansion_per_bus(network): storage expansion per bus and technology """ - - batteries = network.storage_units[network.storage_units.carrier == - 'extendable_battery_storage'] - hydrogen = network.storage_units[network.storage_units.carrier == - 'extendable_hydrogen_storage'] - battery_distribution =\ - network.storage_units.p_nom_opt[batteries.index].groupby( - network.storage_units.bus).sum().reindex( - network.buses.index, fill_value=0.) - hydrogen_distribution =\ - network.storage_units.p_nom_opt[hydrogen.index].groupby( - network.storage_units.bus).sum().reindex( - network.buses.index, fill_value=0.) - index = [(idx, 'battery_storage') for idx in network.buses.index] - index.extend([(idx, 'hydrogen_storage') for idx in network.buses.index]) - - dist = pd.Series(index=pd.MultiIndex.from_tuples( - index, names=['bus', 'carrier']), dtype=float) - - dist.iloc[dist.index.get_level_values('carrier') == 'battery_storage'] = \ - battery_distribution.sort_index().values - dist.iloc[dist.index.get_level_values('carrier') == 'hydrogen_storage'] = \ - hydrogen_distribution.sort_index().values - network.carriers.color['hydrogen_storage'] = 'orange' - network.carriers.color['battery_storage'] = 'blue' + index = [(idx, "battery") for idx in network.buses.index] + for c in carriers: + if c != "battery": + index.extend([(idx, c) for idx in network.buses.index]) + # index.extend([(idx, 'hydrogen_storage') for idx in network.buses.index]) + + dist = pd.Series( + index=pd.MultiIndex.from_tuples(index, names=["bus", "carrier"]), + dtype=float, + ) + + if "battery" in carriers: + batteries = network.storage_units[ + network.storage_units.carrier == "battery" + ] + battery_distribution = ( + ( + network.storage_units.p_nom_opt[batteries.index] + - network.storage_units.p_nom_min[batteries.index] + ) + .groupby(network.storage_units.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ).mul(6) + + battery_distribution.index = pd.MultiIndex.from_tuples( + [(idx, "battery") for idx in battery_distribution.index] + ) + + dist.loc[ + dist.index.get_level_values("carrier") == "battery" + ] = battery_distribution + if "H2_overground" in carriers: + h2_overground = network.stores[ + network.stores.carrier == "H2_overground" + ] + h2_over_distribution = ( + network.stores.e_nom_opt[h2_overground.index] + .groupby(network.stores.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + h2_over_distribution.index = pd.MultiIndex.from_tuples( + [(idx, "H2_overground") for idx in h2_over_distribution.index] + ) + + dist.loc[ + dist.index.get_level_values("carrier") == "H2_overground" + ] = h2_over_distribution + + if "H2_overground" in carriers: + h2_underground = network.stores[ + network.stores.carrier == "H2_underground" + ] + h2_under_distribution = ( + network.stores.e_nom_opt[h2_underground.index] + .groupby(network.stores.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + h2_under_distribution.index = pd.MultiIndex.from_tuples( + [(idx, "H2_underground") for idx in h2_under_distribution.index] + ) + + dist.loc[ + dist.index.get_level_values("carrier") == "H2_underground" + ] = h2_under_distribution + + if "rural_heat_store" in carriers: + rural_heat = network.stores[ + network.stores.carrier == "rural_heat_store" + ] + rural_heat_distribution = ( + network.stores.e_nom_opt[rural_heat.index] + .groupby(network.stores.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + rural_heat_distribution.index = pd.MultiIndex.from_tuples( + [ + (idx, "rural_heat_store") + for idx in rural_heat_distribution.index + ] + ) + + dist.loc[ + dist.index.get_level_values("carrier") == "rural_heat_store" + ] = rural_heat_distribution + if "central_heat_store" in carriers: + central_heat = network.stores[ + network.stores.carrier == "central_heat_store" + ] + central_heat_distribution = ( + network.stores.e_nom_opt[central_heat.index] + .groupby(network.stores.bus) + .sum() + .reindex(network.buses.index, fill_value=0.0) + ) + + central_heat_distribution.index = pd.MultiIndex.from_tuples( + [ + (idx, "central_heat_store") + for idx in central_heat_distribution.index + ] + ) + + dist.loc[ + dist.index.get_level_values("carrier") == "central_heat_store" + ] = central_heat_distribution return dist + def gen_dist_diff( - networkA, - networkB, - techs=None, - snapshot=0, - n_cols=3, - gen_size=0.2, - filename=None, - buscmap=plt.cm.jet): + networkA, + networkB, + techs=None, + snapshot=0, + n_cols=3, + gen_size=0.2, + filename=None, + buscmap=plt.cm.jet, +): """ Difference in generation distribution Green/Yellow/Red colors mean that the generation at a location @@ -726,6 +997,10 @@ def gen_dist_diff( filename : str Specify filename If not given, figure will be show directly + + Returns + ------- + None. """ if techs is None: techs = networkA.generators.carrier.unique() @@ -755,22 +1030,31 @@ def gen_dist_diff( gensA = networkA.generators[networkA.generators.carrier == tech] gensB = networkB.generators[networkB.generators.carrier == tech] - gen_distribution =\ - networkA.generators_t.p.mul(networkA.snapshot_weightings, axis=0)\ - [gensA.index].loc[networkA.snapshots[snapshot]].groupby( - networkA.generators.bus).sum().reindex( - networkA.buses.index, fill_value=0.) -\ - networkB.generators_t.p.mul(networkB.snapshot_weightings, axis=0)\ - [gensB.index].loc[networkB.snapshots[snapshot]].groupby( - networkB.generators.bus).sum().reindex( - networkB.buses.index, fill_value=0.) + gen_distribution = networkA.generators_t.p.mul( + networkA.snapshot_weightings, axis=0 + )[gensA.index].loc[networkA.snapshots[snapshot]].groupby( + networkA.generators.bus + ).sum().reindex( + networkA.buses.index, fill_value=0.0 + ) - networkB.generators_t.p.mul( + networkB.snapshot_weightings, axis=0 + )[ + gensB.index + ].loc[ + networkB.snapshots[snapshot] + ].groupby( + networkB.generators.bus + ).sum().reindex( + networkB.buses.index, fill_value=0.0 + ) networkA.plot( ax=ax, bus_sizes=gen_size * abs(gen_distribution), bus_colors=gen_distribution, line_widths=0.1, - bus_cmap=buscmap) + bus_cmap=buscmap, + ) ax.set_title(tech) @@ -780,15 +1064,17 @@ def gen_dist_diff( plt.savefig(filename) plt.close() + def nodal_gen_dispatch( - network, - networkB=None, - techs=['wind_onshore', 'solar'], - item='energy', - direction=None, - scaling=1, - filename=None, - osm=False): + network, + networkB=None, + techs=["wind_onshore", "solar"], + item="energy", + direction=None, + scaling=1, + filename=None, + osm=False, +): """ Plot nodal dispatch or capacity. If networkB is given, difference in dispatch is plotted. @@ -825,76 +1111,98 @@ def nodal_gen_dispatch( osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm + + Returns + ------- + None. """ - if osm != False: + if osm is not False: if set_epsg_network.counter == 0: set_epsg_network(network) - fig, ax = plot_osm(osm['x'], osm['y'], osm['zoom']) + fig, ax, xrange, yrange = plot_osm(osm["x"], osm["y"], osm["zoom"]) + elif (osm is False) and cartopy_present: + fig, ax = plt.subplots( + subplot_kw={"projection": ccrs.PlateCarree()}, figsize=(5, 5) + ) + else: - fig, ax = plt.subplots(1, 1) + fig, ax = plt.subplots(figsize=(5, 5)) if techs: gens = network.generators[network.generators.carrier.isin(techs)] elif techs is None: gens = network.generators techs = gens.carrier.unique() - if item == 'capacity': - dispatch = gens.p_nom.groupby([network.generators.bus, - network.generators.carrier]).sum() - elif item == 'energy': + if item == "capacity": + dispatch = gens.p_nom.groupby( + [network.generators.bus, network.generators.carrier] + ).sum() + elif item == "energy": if networkB: - dispatch_network =\ - network.generators_t.p[gens.index].mul( - network.snapshot_weightings, axis=0).groupby( - [network.generators.bus, - network.generators.carrier], axis=1).sum() - dispatch_networkB =\ - networkB.generators_t.p[gens.index].mul( - networkB.snapshot_weightings, axis=0).groupby( - [networkB.generators.bus, - networkB.generators.carrier], - axis=1).sum() + dispatch_network = ( + network.generators_t.p[gens.index] + .mul(network.snapshot_weightings.generators, axis=0) + .groupby( + [network.generators.bus, network.generators.carrier], + axis=1, + ) + .sum() + ) + dispatch_networkB = ( + networkB.generators_t.p[gens.index] + .mul(networkB.snapshot_weightings.generators, axis=0) + .groupby( + [networkB.generators.bus, networkB.generators.carrier], + axis=1, + ) + .sum() + ) dispatch = dispatch_network - dispatch_networkB - if direction == 'positive': + if direction == "positive": dispatch = dispatch[dispatch > 0].fillna(0) - elif direction == 'negative': + elif direction == "negative": dispatch = dispatch[dispatch < 0].fillna(0) - elif direction == 'absolute': + elif direction == "absolute": pass else: - return 'No valid direction given.' + return "No valid direction given." dispatch = dispatch.sum() elif networkB is None: - dispatch =\ - network.generators_t.p[gens.index].mul( - network.snapshot_weightings, axis=0).sum().groupby( - [network.generators.bus, - network.generators.carrier]).sum() - - scaling = 1/(max(abs(dispatch.groupby(level=0).sum())))*scaling - if direction != 'absolute': + dispatch = ( + network.generators_t.p[gens.index] + .mul(network.snapshot_weightings.generators, axis=0) + .sum() + .groupby([network.generators.bus, network.generators.carrier]) + .sum() + ) + scaling = 1 / (max(abs(dispatch.groupby(level=0).sum()))) * scaling + if direction != "absolute": colors = coloring() subcolors = {a: colors[a] for a in techs} dispatch = dispatch.abs() + 1e-9 else: dispatch = dispatch.sum(level=0) - colors = {s[0]: 'green' if s[1] > 0 else 'red' - for s in dispatch.iteritems()} + colors = { + s[0]: "green" if s[1] > 0 else "red" for s in dispatch.iteritems() + } dispatch = dispatch.abs() - subcolors = {'negative': 'red', 'positive': 'green'} + subcolors = {"negative": "red", "positive": "green"} network.plot( + geomap=(cartopy_present | osm), bus_sizes=dispatch * scaling, bus_colors=colors, line_widths=0.2, margin=0.01, - ax=ax) + ax=ax, + ) fig.subplots_adjust(right=0.8) plt.subplots_adjust(wspace=0, hspace=0.001) @@ -904,7 +1212,7 @@ def nodal_gen_dispatch( data_key = mpatches.Patch(color=subcolors[key], label=key) patchList.append(data_key) - ax.legend(handles=patchList, loc='upper left') + ax.legend(handles=patchList, loc="upper left") ax.autoscale() if filename is None: @@ -917,7 +1225,7 @@ def nodal_gen_dispatch( def nodal_production_balance(network, timesteps, scaling=0.00001): - """ Function that calculates residual load per node in given timesteps + """Function that calculates residual load per node in given timesteps Parameters ---------- @@ -937,23 +1245,48 @@ def nodal_production_balance(network, timesteps, scaling=0.00001): """ - gen = mul_weighting(network, network.generators_t.p).\ - groupby(network.generators.bus, axis=1).sum().loc[ - network.snapshots[timesteps]] - load = mul_weighting(network, network.loads_t.p).\ - groupby(network.loads.bus, axis=1).sum().loc[ - network.snapshots[timesteps]] + import_gen_from_links(network) + + ac_buses = network.buses[network.buses.carrier == "AC"].index + network.generators = network.generators[ + network.generators.bus.isin(ac_buses) + ] + network.generators_t.p = network.generators_t.p.loc[ + :, network.generators_t.p.columns.isin(network.generators.index) + ] + + gen = ( + mul_weighting(network, network.generators_t.p) + .groupby(network.generators.bus, axis=1) + .sum() + .loc[network.snapshots[timesteps]] + ) + load = ( + mul_weighting(network, network.loads_t.p) + .groupby(network.loads.bus, axis=1) + .sum() + .loc[network.snapshots[timesteps]] + ) residual_load = (gen - load).sum() - bus_colors = pd.Series({s[0]: 'green' if s[1] > 0 else 'red' - for s in residual_load.iteritems()}) + bus_colors = pd.Series( + { + s[0]: "green" if s[1] > 0 else "red" + for s in residual_load.iteritems() + } + ) bus_sizes = residual_load.abs() * scaling + bus_sizes = pd.Series(data=bus_sizes, index=network.buses.index).fillna(0) + bus_colors = pd.Series(data=bus_colors, index=network.buses.index).fillna( + "grey" + ) return bus_sizes, bus_colors -def storage_p_soc(network, mean='1H', filename=None): + +def storage_p_soc(network, mean="1H", filename=None): """ Plots the dispatch and state of charge (SOC) of extendable storages. @@ -965,66 +1298,113 @@ def storage_p_soc(network, mean='1H', filename=None): Defines over how many snapshots the p and soc values will averaged. filename : path to folder + Returns + ------- + None. + """ sbatt = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) - & (network.storage_units.capital_cost > 10) & - (network.storage_units.max_hours == 6)] + & (network.storage_units.capital_cost > 10) + & (network.storage_units.max_hours == 6) + ] shydr = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) - & (network.storage_units.capital_cost > 10) & - (network.storage_units.max_hours == 168)] - - cap_batt = (network.storage_units.max_hours[sbatt] * - network.storage_units.p_nom_opt[sbatt]).sum() - cap_hydr = (network.storage_units.max_hours[shydr] * - network.storage_units.p_nom_opt[shydr]).sum() + & (network.storage_units.capital_cost > 10) + & (network.storage_units.max_hours == 168) + ] + + cap_batt = ( + network.storage_units.max_hours[sbatt] + * network.storage_units.p_nom_opt[sbatt] + ).sum() + cap_hydr = ( + network.storage_units.max_hours[shydr] + * network.storage_units.p_nom_opt[shydr] + ).sum() fig, ax = plt.subplots(1, 1) - if network.storage_units.p_nom_opt[sbatt].sum() < 1 and \ - network.storage_units.p_nom_opt[shydr].sum() < 1: + if ( + network.storage_units.p_nom_opt[sbatt].sum() < 1 + and network.storage_units.p_nom_opt[shydr].sum() < 1 + ): print("No storage unit to plot") - elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and \ - network.storage_units.p_nom_opt[shydr].sum() < 1: - - (network.storage_units_t.p[sbatt].resample(mean).mean().sum(axis=1) / \ - network.storage_units.p_nom_opt[sbatt].sum()).plot( - ax=ax, label="Battery dispatch", color='orangered') + elif ( + network.storage_units.p_nom_opt[sbatt].sum() > 1 + and network.storage_units.p_nom_opt[shydr].sum() < 1 + ): + ( + network.storage_units_t.p[sbatt].resample(mean).mean().sum(axis=1) + / network.storage_units.p_nom_opt[sbatt].sum() + ).plot(ax=ax, label="Battery dispatch", color="orangered") # instantiate a second axes that shares the same x-axis ax2 = ax.twinx() - ((network.storage_units_t.state_of_charge[sbatt].resample(mean).\ - mean().sum(axis=1) / cap_batt)*100).plot( - ax=ax2, label="Battery state of charge", color='blue') - elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and\ - network.storage_units.p_nom_opt[shydr].sum() > 1: - (network.storage_units_t.p[shydr].resample(mean).mean().sum(axis=1) /\ - network.storage_units.p_nom_opt[shydr].sum()).plot( - ax=ax, label="Hydrogen dispatch", color='teal') + ( + ( + network.storage_units_t.state_of_charge[sbatt] + .resample(mean) + .mean() + .sum(axis=1) + / cap_batt + ) + * 100 + ).plot(ax=ax2, label="Battery state of charge", color="blue") + elif ( + network.storage_units.p_nom_opt[sbatt].sum() < 1 + and network.storage_units.p_nom_opt[shydr].sum() > 1 + ): + ( + network.storage_units_t.p[shydr].resample(mean).mean().sum(axis=1) + / network.storage_units.p_nom_opt[shydr].sum() + ).plot(ax=ax, label="Hydrogen dispatch", color="teal") # instantiate a second axes that shares the same x-axis ax2 = ax.twinx() - ((network.storage_units_t.state_of_charge[shydr].resample(mean).\ - mean().sum(axis=1) / cap_hydr)*100).plot( - ax=ax2, label="Hydrogen state of charge", color='green') + ( + ( + network.storage_units_t.state_of_charge[shydr] + .resample(mean) + .mean() + .sum(axis=1) + / cap_hydr + ) + * 100 + ).plot(ax=ax2, label="Hydrogen state of charge", color="green") else: - (network.storage_units_t.p[sbatt].resample(mean).mean().sum(axis=1) / \ - network.storage_units.p_nom_opt[sbatt].sum()).plot( - ax=ax, label="Battery dispatch", color='orangered') - - (network.storage_units_t.p[shydr].resample(mean).mean().sum(axis=1) /\ - network.storage_units.p_nom_opt[shydr].sum()).plot( - ax=ax, label="Hydrogen dispatch", color='teal') + ( + network.storage_units_t.p[sbatt].resample(mean).mean().sum(axis=1) + / network.storage_units.p_nom_opt[sbatt].sum() + ).plot(ax=ax, label="Battery dispatch", color="orangered") + + ( + network.storage_units_t.p[shydr].resample(mean).mean().sum(axis=1) + / network.storage_units.p_nom_opt[shydr].sum() + ).plot(ax=ax, label="Hydrogen dispatch", color="teal") # instantiate a second axes that shares the same x-axis ax2 = ax.twinx() - ((network.storage_units_t.state_of_charge[shydr].resample(mean).\ - mean().sum(axis=1) / cap_hydr)*100).plot( - ax=ax2, label="Hydrogen state of charge", color='green') - - ((network.storage_units_t.state_of_charge[sbatt].resample(mean).\ - mean().sum(axis=1) / cap_batt)*100).plot( - ax=ax2, label="Battery state of charge", color='blue') + ( + ( + network.storage_units_t.state_of_charge[shydr] + .resample(mean) + .mean() + .sum(axis=1) + / cap_hydr + ) + * 100 + ).plot(ax=ax2, label="Hydrogen state of charge", color="green") + + ( + ( + network.storage_units_t.state_of_charge[sbatt] + .resample(mean) + .mean() + .sum(axis=1) + / cap_batt + ) + * 100 + ).plot(ax=ax2, label="Battery state of charge", color="blue") ax.set_xlabel("") ax.set_ylabel("Storage dispatch in p.u. \n <- charge - discharge ->") @@ -1035,7 +1415,6 @@ def storage_p_soc(network, mean='1H', filename=None): ax2.legend(loc=1) ax.set_title("Storage dispatch and state of charge") - if filename is None: plt.show() else: @@ -1056,47 +1435,66 @@ def storage_soc_sorted(network, filename=None): filename : path to folder + Returns + ------- + None. + """ sbatt = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) & (network.storage_units.capital_cost > 10) - & (network.storage_units.max_hours == 6)] + & (network.storage_units.max_hours == 6) + ] shydr = network.storage_units.index[ (network.storage_units.p_nom_opt > 1) & (network.storage_units.capital_cost > 10) - & (network.storage_units.max_hours == 168)] - - cap_batt = (network.storage_units.max_hours[sbatt] * - network.storage_units.p_nom_opt[sbatt]).sum() - cap_hydr = (network.storage_units.max_hours[shydr] * - network.storage_units.p_nom_opt[shydr]).sum() + & (network.storage_units.max_hours == 168) + ] fig, ax = plt.subplots(1, 1) - if network.storage_units.p_nom_opt[sbatt].sum() < 1 and \ - network.storage_units.p_nom_opt[shydr].sum() < 1: + if ( + network.storage_units.p_nom_opt[sbatt].sum() < 1 + and network.storage_units.p_nom_opt[shydr].sum() < 1 + ): print("No storage unit to plot") - elif network.storage_units.p_nom_opt[sbatt].sum() > 1 and \ - network.storage_units.p_nom_opt[shydr].sum() < 1: - (network.storage_units_t.p[sbatt].sum(axis=1).sort_values( - ascending=False).reset_index() / \ - network.storage_units.p_nom_opt[sbatt].sum())[0].plot( - ax=ax, label="Battery storage", color='orangered') - elif network.storage_units.p_nom_opt[sbatt].sum() < 1 and \ - network.storage_units.p_nom_opt[shydr].sum() > 1: - (network.storage_units_t.p[shydr].sum(axis=1).sort_values( - ascending=False).reset_index() / \ - network.storage_units.p_nom_opt[shydr].sum())[0].plot( - ax=ax, label="Hydrogen storage", color='teal') + elif ( + network.storage_units.p_nom_opt[sbatt].sum() > 1 + and network.storage_units.p_nom_opt[shydr].sum() < 1 + ): + ( + network.storage_units_t.p[sbatt] + .sum(axis=1) + .sort_values(ascending=False) + .reset_index() + / network.storage_units.p_nom_opt[sbatt].sum() + )[0].plot(ax=ax, label="Battery storage", color="orangered") + elif ( + network.storage_units.p_nom_opt[sbatt].sum() < 1 + and network.storage_units.p_nom_opt[shydr].sum() > 1 + ): + ( + network.storage_units_t.p[shydr] + .sum(axis=1) + .sort_values(ascending=False) + .reset_index() + / network.storage_units.p_nom_opt[shydr].sum() + )[0].plot(ax=ax, label="Hydrogen storage", color="teal") else: - (network.storage_units_t.p[sbatt].sum(axis=1).sort_values( - ascending=False).reset_index() / \ - network.storage_units.p_nom_opt[sbatt].sum())[0].plot( - ax=ax, label="Battery storage", color='orangered') - (network.storage_units_t.p[shydr].sum(axis=1).sort_values( - ascending=False).reset_index() / \ - network.storage_units.p_nom_opt[shydr].sum())[0].plot( - ax=ax, label="Hydrogen storage", color='teal') + ( + network.storage_units_t.p[sbatt] + .sum(axis=1) + .sort_values(ascending=False) + .reset_index() + / network.storage_units.p_nom_opt[sbatt].sum() + )[0].plot(ax=ax, label="Battery storage", color="orangered") + ( + network.storage_units_t.p[shydr] + .sum(axis=1) + .sort_values(ascending=False) + .reset_index() + / network.storage_units.p_nom_opt[shydr].sum() + )[0].plot(ax=ax, label="Hydrogen storage", color="teal") ax.set_xlabel("") ax.set_ylabel("Storage dispatch in p.u. \n <- charge - discharge ->") @@ -1107,13 +1505,14 @@ def storage_soc_sorted(network, filename=None): if filename is None: plt.show() else: - plt.savefig(filename, figsize=(3, 4), bbox_inches='tight') + plt.savefig(filename, figsize=(3, 4), bbox_inches="tight") plt.close() return + def mul_weighting(network, timeseries): - """ Returns timeseries considering snapshot_weighting + """Returns timeseries considering snapshot_weighting Parameters ---------- @@ -1128,10 +1527,11 @@ def mul_weighting(network, timeseries): timeseries considering snapshot_weightings """ - return timeseries.mul(network.snapshot_weightings, axis=0) + return timeseries.mul(network.snapshot_weightings.generators, axis=0) + def calc_ac_loading(network, timesteps): - """ Calculates loading of AC-lines + """Calculates loading of AC-lines Parameters ---------- @@ -1144,24 +1544,31 @@ def calc_ac_loading(network, timesteps): Returns ------- pandas.Series - ACC line loading in MVA + AC line loading in MVA """ - loading_lines = mul_weighting(network, network.lines_t.p0).loc[ - network.snapshots[timesteps]].sum() + loading_lines = ( + mul_weighting(network, network.lines_t.p0) + .loc[network.snapshots[timesteps]] + .sum() + ) if not network.lines_t.q0.empty: + loading_lines = ( + loading_lines**2 + + mul_weighting(network, network.lines_t.q0) + .loc[network.snapshots[timesteps]] + .abs() + .sum() + ** 2 + ).apply(sqrt) - loading_lines = (loading_lines ** 2 +\ - mul_weighting(network, network.lines_t.q0).loc[ - network.snapshots[timesteps]].abs().sum() ** 2).\ - apply(sqrt) + return loading_lines / network.lines.s_nom_opt - return loading_lines/network.lines.s_nom_opt def calc_dc_loading(network, timesteps): - """ Calculates loading of DC-lines + """Calculates loading of DC-lines Parameters @@ -1178,44 +1585,30 @@ def calc_dc_loading(network, timesteps): DC line loading in MW """ - # Aviod covering of bidirectional links - network.links['linked_to'] = 0 - for i, row in network.links.iterrows(): - if not (network.links.index[ - (network.links.bus0 == row['bus1']) & - (network.links.bus1 == row['bus0']) & - (network.links.length == row['length'])]).empty: - - l = network.links.index[(network.links.bus0 == row['bus1']) & - (network.links.bus1 == row['bus0']) & - (network.links.length == row['length'])] - - network.links.at[i, 'linked_to'] = l.values[0] - - network.links.linked_to = network.links.linked_to.astype(str) - # Set p_nom_max and line_loading for one directional links - link_load = network.links_t.p0[network.links.index[ - network.links.linked_to == '0']] - - p_nom_opt_max = network.links.p_nom_opt[network.links.linked_to == '0'] - - # Set p_nom_max and line_loading for bidirectional links - for i, row in network.links[network.links.linked_to != '0'].iterrows(): - load = pd.DataFrame(index=network.links_t.p0.index, - columns=['to', 'from']) - load['to'] = network.links_t.p0[row['linked_to']] - load['from'] = network.links_t.p0[i] - link_load[i] = load.abs().max(axis=1) - p_nom_opt_max[i] = max(row.p_nom_opt, - network.links.p_nom_opt[ - network.links.index == row['linked_to']] - .values[0]) - - return (mul_weighting(network, link_load).loc[network.snapshots[timesteps]] - .abs().sum()[network.links.index]/p_nom_opt_max).dropna() + dc_links = network.links.loc[network.links.carrier == "DC", :] + + link_load = network.links_t.p0[ + network.links.index[network.links.carrier == "DC"] + ] + + dc_load = pd.Series(index=network.links.index, data=0.0) + dc_load.loc[dc_links.index] = ( + ( + mul_weighting(network, link_load) + .loc[network.snapshots[timesteps]] + .abs() + .sum()[dc_links.index] + / dc_links.p_nom_opt + ) + .fillna(0) + .values + ) + + return dc_load + def plotting_colors(network): - """ Add color values to network.carriers + """Add color values to network.carriers Parameters ---------- @@ -1227,19 +1620,21 @@ def plotting_colors(network): None. """ - if network.carriers.columns[1] != 'co2_emissions': - network.carriers = network.carriers.set_index( - network.carriers.columns[1]) + # if network.carriers.columns[1] != 'co2_emissions': + # network.carriers = network.carriers.set_index( + # network.carriers.columns[1]) colors = coloring() - for i in network.carriers.index: - if i in colors.keys(): - network.carriers.color[i] = colors[i] - network.carriers.color['hydrogen_storage'] = 'sandybrown' - network.carriers.color['battery_storage'] = 'blue' - network.carriers.color[network.carriers.color == ''] = 'grey' + for i in colors.keys(): + network.carriers.loc[i, "color"] = colors[i] + # if i in colors.keys(): + # network.carriers.color[i] = colors[i] + # network.carriers.color['hydrogen_storage'] = 'sandybrown' + # network.carriers.color['battery_storage'] = 'blue' + # network.carriers.color[network.carriers.color == ''] = 'grey' + -def calc_network_expansion(network, method='abs', ext_min=0.1): - """ Calculates absolute or relative expansion per AC- and DC-line +def calc_network_expansion(network, method="abs", ext_min=0.1): + """Calculates absolute or relative expansion per AC- and DC-line Parameters ---------- @@ -1252,7 +1647,7 @@ def calc_network_expansion(network, method='abs', ext_min=0.1): Returns ------- - all_network : :class:`pypsa.Network + network : :class:`pypsa.Network Whole network including not extended lines extension_lines : pandas.Series AC-line expansion @@ -1260,40 +1655,74 @@ def calc_network_expansion(network, method='abs', ext_min=0.1): DC-line expansion """ - all_network = network.copy() - network.lines = network.lines[network.lines.s_nom_extendable & - ((network.lines.s_nom_opt - - network.lines.s_nom_min) / - network.lines.s_nom >= ext_min)] - network.links = network.links[network.links.p_nom_extendable & ( - (network.links.p_nom_opt -network.links.p_nom_min)/ - network.links.p_nom >= ext_min)] - - for i, row in network.links.iterrows(): - linked = network.links[(row['bus1'] == network.links.bus0) & - (row['bus0'] == network.links.bus1)] + + network_c = network.copy() + + network_c.lines = network_c.lines[ + network_c.lines.s_nom_extendable + & ( + (network_c.lines.s_nom_opt - network_c.lines.s_nom_min) + / network_c.lines.s_nom + >= ext_min + ) + ] + network_c.links = network_c.links[ + network_c.links.p_nom_extendable + & (network_c.links.carrier == "DC") + & ( + (network_c.links.p_nom_opt - network_c.links.p_nom_min) + / network_c.links.p_nom + >= ext_min + ) + ] + + for i, row in network_c.links.iterrows(): + linked = network_c.links[ + (row["bus1"] == network_c.links.bus0) + & (row["bus0"] == network_c.links.bus1) + ] if not linked.empty: - if row['p_nom_opt'] < linked.p_nom_opt.values[0]: - network.links.p_nom_opt[i] = linked.p_nom_opt.values[0] + if row["p_nom_opt"] < linked.p_nom_opt.values[0]: + network_c.links.p_nom_opt[i] = linked.p_nom_opt.values[0] + + if method == "rel": + extension_lines = ( + 100 + * (network_c.lines.s_nom_opt - network_c.lines.s_nom_min) + / network_c.lines.s_nom + ) + + extension_links = pd.DataFrame( + data=network_c.links, index=network_c.links.index + ) - if method == 'rel': + extension_links = ( + 100 + * (network_c.links.p_nom_opt - network_c.links.p_nom_min) + / (network_c.links.p_nom) + ) + extension_links = extension_links.fillna(0) - extension_lines = (100 *(network.lines.s_nom_opt - - network.lines.s_nom_min) / - network.lines.s_nom) + if method == "abs": + extension_lines = network_c.lines.s_nom_opt - network_c.lines.s_nom_min - extension_links = (100 *(network.links.p_nom_opt - - network.links.p_nom_min)/ - (network.links.p_nom)) - if method == 'abs': - extension_lines = network.lines.s_nom_opt - network.lines.s_nom_min + extension_links = pd.DataFrame( + data=network_c.links, index=network_c.links.index + ) - extension_links = network.links.p_nom_opt - network.links.p_nom_min + extension_links = network_c.links.p_nom_opt - network_c.links.p_nom_min - return all_network, extension_lines, extension_links + extension_lines = pd.Series( + data=extension_lines, index=network.lines.index + ).fillna(0) + extension_links = pd.Series( + data=extension_links, index=network.links.index + ).fillna(0) + return network, extension_lines, extension_links -def plot_background_grid(network, ax): - """ Plots grid topology in background of other network.plot + +def plot_background_grid(network, ax, geographical_boundaries, osm): + """Plots grid topology in background of other network.plot Parameters ---------- @@ -1301,57 +1730,627 @@ def plot_background_grid(network, ax): Overall container of PyPSA ax : matplotlib.axes._subplots.AxesSubplot axes of plot + geographical_boundaries : list + Set georaphical boundaries for the plots + osm : False or dict. + False if not osm background map is required or dictionary with + x, y and zoom information. Returns ------- None. """ - network.plot(ax=ax, line_colors='grey', link_colors='grey', - bus_sizes=0, line_widths=0.5, link_widths=0.55, - geomap=False) - -def plot_grid(self, - line_colors, - bus_sizes=0.02, - bus_colors='grey', - timesteps=range(2), - osm=False, - boundaries=None, - filename=None, - disaggregated=False, - ext_min=0.1, - ext_width=False): - """ Function that plots etrago.network and results for lines and buses + link_widths = pd.Series(index=network.links.index, data=0) + link_widths.loc[network.links.carrier == "DC"] = 0.3 + + if osm is not False: + network.plot( + ax=ax, + line_colors="grey", + link_colors="grey", + bus_sizes=0, + line_widths=0.5, + link_widths=link_widths, + geomap=False, + boundaries=geographical_boundaries, + ) + else: + if cartopy_present: + network.plot( + ax=ax, + line_colors="grey", + link_colors="grey", + bus_sizes=0, + line_widths=0.5, + link_widths=link_widths, + geomap=True, + projection=ccrs.PlateCarree(), + color_geomap=True, + boundaries=geographical_boundaries, + ) + else: + network.plot( + ax=ax, + line_colors="grey", + link_colors="grey", + bus_sizes=0, + line_widths=0.5, + link_widths=link_widths, + geomap=False, + ) + + +def demand_side_management(self, buses, snapshots, agg="5h", used=False): + """Calculate shifting potential of demand side management + + Parameters + ---------- + buses : array + List of electricity buses. + snapshots : array + List of snapshots. + agg : str, optional + Temporal resolution. The default is '5h'. + used : boolean, optional + State if usage should be included in the results. The default is False. + + Returns + ------- + df : pandas.DataFrame + Shifting potential (and usage) of power (MW) and energy (MWh) + + """ + df = pd.DataFrame(index=self.network.snapshots[snapshots]) + + link = self.network.links[ + (self.network.links.carrier == "dsm") + & (self.network.links.bus0.isin(buses)) + ] + s = self.network.stores[ + (self.network.stores.carrier == "dsm") + & (self.network.stores.bus.isin(link.bus1.values)) + ] + + df["p_min"] = ( + self.network.links_t.p_min_pu[link.index] + .mul(link.p_nom, axis=1) + .sum(axis=1) + .resample(agg) + .mean() + .iloc[snapshots] + ) + df["p_max"] = ( + self.network.links_t.p_max_pu[link.index] + .mul(link.p_nom, axis=1) + .sum(axis=1) + .resample(agg) + .mean() + .iloc[snapshots] + ) + + df["e_min"] = ( + self.network.stores_t.e_min_pu[s.index] + .mul(s.e_nom, axis=1) + .sum(axis=1) + .iloc[snapshots] + ) + df["e_max"] = ( + self.network.stores_t.e_max_pu[s.index] + .mul(s.e_nom, axis=1) + .sum(axis=1) + .iloc[snapshots] + ) + + if used: + df["p"] = ( + self.network.links_t.p0[link.index] + .clip(lower=0) + .sum(axis=1) + .resample(agg) + .mean()[snapshots] + ) + df["e"] = self.network.stores_t.e[s.index].sum(axis=1).iloc[snapshots] + + return df + + +def bev_flexibility_potential( + self, + buses, + snapshots, + agg="5h", + used=False, +): + """Calculate shifting potential of electric vehicles + + Parameters + ---------- + buses : array + List of electricity buses. + snapshots : array + List of snapshots. + agg : str, optional + Temporal resolution. The default is '5h'. + used : boolean, optional + State if usage should be included in the results. The default is False. + + Returns + ------- + df : pandas.DataFrame + Shifting potential (and usage) of power (MW) and energy (MWh) + + """ + + # Initialize DataFrame + df = pd.DataFrame(index=self.network.snapshots[snapshots]) + + # Select BEV buses and links + bev_buses = self.network.buses[ + self.network.buses.carrier.str.contains("Li ion") + ] + bev_links = self.network.links[ + (self.network.links.bus1.isin(bev_buses.index.values)) + & (self.network.links.bus0.isin(buses)) + ] + bev_buses = bev_links.bus1.values + + # Maximum loading of BEV charger in MW per BEV bus + bev_links_t = ( + self.network.links_t.p_max_pu[bev_links.index] + .mul(bev_links.p_nom, axis=1) + .iloc[snapshots] + ) + bev_links_t.columns = bev_links_t.columns.map(bev_links.bus1) + + # BEV loads per bus + bev_loads = self.network.loads[self.network.loads.bus.isin(bev_buses)] + bev_loads_t = self.network.loads_t.p_set[bev_loads.index].iloc[snapshots] + bev_loads_t.columns = bev_loads_t.columns.map(bev_loads.bus) + + # Maximal positive shifting df is max. loading of charger minus fixed loads + df["p_max"] = (bev_links_t - bev_loads_t).sum(axis=1).resample(agg).mean() + + # Maximal negative shifting is minus fixed loads + df["p_min"] = bev_loads_t.mul(-1).sum(axis=1).resample(agg).mean() + + # Select BEV stores (batteries of vehicles) + bev_stores = self.network.stores[self.network.stores.bus.isin(bev_buses)] + + # Calculate maximum and minumum state of charges of battries + df["e_max"] = ( + self.network.stores_t.e_max_pu[bev_stores.index] + .mul(bev_stores.e_nom, axis=1) + .iloc[snapshots] + .sum(axis=1) + .resample(agg) + .mean() + ) + df["e_min"] = ( + self.network.stores_t.e_min_pu[bev_stores.index] + .mul(bev_stores.e_nom, axis=1) + .iloc[snapshots] + .sum(axis=1) + .resample(agg) + .mean() + ) + + if used: + bev_links_t_used = self.network.links_t.p0[bev_links.index].iloc[ + snapshots + ] + + bev_links_t_used.columns = bev_links_t_used.columns.map(bev_links.bus1) + + bev_usage = bev_links_t_used - bev_loads_t + + df["p"] = ( + bev_usage.clip(lower=0).sum(axis=1).resample(agg).mean() + + bev_usage.clip(upper=0) # always > 0 + .sum(axis=1) + .resample(agg) + .mean() + ) # always < 0 + df["e"] = ( + self.network.stores_t.e[bev_stores.index] + .sum(axis=1) + .resample(agg) + .mean() + .iloc[snapshots] + ) + + return df + + +def heat_stores( + self, + buses, + snapshots, + agg="5h", + used=False, +): + """Calculate shifting potential (and usage) of heat stores + + Parameters + ---------- + buses : array + List of electricity buses. + snapshots : array + List of snapshots. + agg : str, optional + Temporal resolution. The default is '5h'. + used : boolean, optional + State if usage should be included in the results. The default is False. + + Returns + ------- + df : pandas.DataFrame + Shifting potential (and usage) of power (MW) and energy (MWh) + + """ + df = pd.DataFrame(index=self.network.snapshots[snapshots]) + + heat_buses = self.network.links[ + self.network.links.bus0.isin( + self.network.buses[ + (self.network.buses.carrier == "AC") + & (self.network.buses.index.isin(buses)) + ].index + ) + & self.network.links.bus1.isin( + self.network.buses[ + self.network.buses.carrier.str.contains("heat") + ].index + ) + ].bus1.unique() + + l_charge = self.network.links[ + (self.network.links.carrier.str.contains("heat_store_charger")) + & (self.network.links.bus0.isin(heat_buses)) + ] + l_discharge = self.network.links[ + (self.network.links.carrier.str.contains("heat_store_discharger")) + & (self.network.links.bus1.isin(heat_buses)) + ] + + s = self.network.stores[ + (self.network.stores.carrier.str.contains("heat_store")) + & (self.network.stores.bus.isin(l_charge.bus1.values)) + ] + + df["p_min"] = l_discharge.p_nom_opt.mul(-1 * l_discharge.efficiency).sum() + df["p_max"] = l_charge.p_nom_opt.mul(l_charge.efficiency).sum() + + df["e_min"] = 0 + df["e_max"] = s.e_nom_opt.sum() + + if used: + df["p"] = ( + self.network.links_t.p1[l_charge.index] + .mul(-1) + .sum(axis=1) + .resample(agg) + .mean()[snapshots] + + self.network.links_t.p0[l_discharge.index] + .mul(-1) + .sum(axis=1) + .resample(agg) + .mean()[snapshots] + ) + df["e"] = self.network.stores_t.e[s.index].sum(axis=1).iloc[snapshots] + + return df + + +def hydrogen_stores( + self, + buses, + snapshots, + agg="5h", + used=False, +): + """Calculate shifting potential (and usage) of heat stores + + Parameters + ---------- + buses : array + List of electricity buses. + snapshots : array + List of snapshots. + agg : str, optional + Temporal resolution. The default is '5h'. + used : boolean, optional + State if usage should be included in the results. The default is False. + + Returns + ------- + df : pandas.DataFrame + Shifting potential (and usage) of power (MW) and energy (MWh) + + """ + df = pd.DataFrame(index=self.network.snapshots[snapshots]) + + h2_buses = self.network.links[ + self.network.links.bus0.isin( + self.network.buses[ + (self.network.buses.carrier == "AC") + & (self.network.buses.index.isin(buses)) + ].index + ) + & self.network.links.bus1.isin( + self.network.buses[ + self.network.buses.carrier.str.contains("H2") + ].index + ) + ].bus1.unique() + + s = self.network.stores[self.network.stores.bus.isin(h2_buses)] + + df["p_min"] = self.network.stores_t.p[s.index].sum(axis=1).min() + df["p_max"] = self.network.stores_t.p[s.index].sum(axis=1).max() + + df["e_min"] = 0 + df["e_max"] = s.e_nom_opt.sum() + + if used: + df["p"] = self.network.stores_t.p[s.index].sum(axis=1).iloc[snapshots] + df["e"] = self.network.stores_t.e[s.index].sum(axis=1).iloc[snapshots] + + return df + + +def flexibility_usage( + self, flexibility, agg="5h", snapshots=[], buses=[], pre_path=None +): + """Plots temporal distribution of potential and usage for flexibilities + + Parameters + ---------- + flexibility : str + Name of flexibility option. + agg : str, optional + Temporal resolution. The default is "5h". + snapshots : list, optional + Considered snapshots, if empty all are considered. The default is []. + buses : list, optional + Considered components at AC buses, if empty all are considered. + The default is []. + pre_path : str, optional + State of and where you want to store the figure. The default is None. + + Returns + ------- + None. + + """ + colors = coloring() + colors["dlr"] = "orange" + colors["h2_store"] = colors["H2_underground"] + colors["heat"] = colors["central_heat_store"] + + if not buses: + buses = self.network.buses.index + + if len(snapshots) == 0: + snapshots = range(1, len(self.network.snapshots)) + + if flexibility == "dsm": + df = demand_side_management( + self, + buses, + snapshots, + agg, + used=True, + ) + + elif flexibility == "BEV charger": + df = bev_flexibility_potential( + self, + buses, + snapshots, + agg, + used=True, + ) + + elif flexibility == "heat": + df = heat_stores( + self, + buses, + snapshots, + agg, + used=True, + ) + + elif flexibility == "battery": + df = pd.DataFrame(index=self.network.snapshots[snapshots]) + + su = self.network.storage_units[ + (self.network.storage_units.carrier == "battery") + & (self.network.storage_units.bus.isin(buses)) + ] + + df["p_min"] = su.p_nom_opt.sum() * (-1) + df["p_max"] = su.p_nom_opt.sum() + df["p"] = ( + self.network.storage_units_t.p[su.index] + .sum(axis=1) + .iloc[snapshots] + ) + + df["e_min"] = 0 + df["e_max"] = su.p_nom_opt.mul(su.max_hours).sum() + df["e"] = ( + self.network.storage_units_t.state_of_charge[su.index] + .sum(axis=1) + .iloc[snapshots] + ) + + elif flexibility == "h2_store": + df = hydrogen_stores( + self, + buses, + snapshots, + agg, + used=True, + ) + + fig, ax = plt.subplots(figsize=(15, 5)) + ax.fill_between( + df.index, df.p_min, df.p_max, color=colors[flexibility], alpha=0.2 + ) + ax.plot(df.index, df.p, color=colors[flexibility]) + ax.set_ylabel("shifted power in MW") + ax.set_xlim(df.index[0], df.index[-1]) + if pre_path: + fig.savefig(pre_path + f"shifted_p_{flexibility}") + + fig_e, ax_e = plt.subplots(figsize=(15, 5)) + ax_e.fill_between( + df.index, df.e_min, df.e_max, color=colors[flexibility], alpha=0.2 + ) + ax_e.plot(df.index, df.e, color=colors[flexibility]) + ax_e.set_ylabel("stored energy in MWh") + ax_e.set_xlim(df.index[0], df.index[-1]) + if pre_path: + fig_e.savefig(pre_path + f"stored_e_{flexibility}") + + +def plot_carrier(etrago, carrier_links=["AC"], carrier_buses=["AC"]): + """ + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + carrier_links : list + List of links to be plotted. The default is ["AC"]. + carrier_buses : list + List of buses to be plotted. The default is ["AC"]. + cartopy : bool, optional + Provide data about the availability of Cartopy. The default is True. + + Returns + ------- + None. + + """ + network = etrago.network + colors = coloring() + line_colors = "lightblue" + + # Set background + if cartopy_present: + plt.rcParams["figure.autolayout"] = True + fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) + draw_map_cartopy(ax, color_geomap=True) + else: + fig, ax = plt.subplots() + + link_width = pd.Series(index=network.links.index, data=2) + + if len(carrier_links) > 0: + link_width.loc[~network.links.carrier.isin(carrier_links)] = 0 + + bus_sizes = pd.Series(index=network.buses.index, data=0.0005) + + if len(carrier_buses) > 0: + bus_sizes.loc[~network.buses.carrier.isin(carrier_buses)] = 0 + + link_colors = network.links.carrier.map(colors) + + bus_colors = network.buses.carrier.map(colors) + + if "AC" in carrier_links: + line_widths = 1 + else: + line_widths = 0 + + title = "" + + network.plot( + geomap=cartopy_present, + bus_sizes=bus_sizes, + link_widths=link_width, + line_widths=line_widths, + title=title, + link_colors=link_colors, + line_colors=line_colors, + bus_colors=bus_colors, + ax=ax, + ) + + patchList = [] + for key in carrier_links: + if key != "AC": + data_key = mpatches.Patch(color=colors[key], label=f"Link {key}") + else: + data_key = mpatches.Patch(color=line_colors, label=f"Line {key}") + patchList.append(data_key) + for key in carrier_buses: + data_key = mpatches.Patch(color=colors[key], label=f"Bus {key}") + patchList.append(data_key) + ax.legend(handles=patchList, loc="lower left", ncol=1) + ax.autoscale() + + +def plot_grid( + self, + line_colors, + bus_sizes=0.001, + bus_colors="grey", + timesteps=range(2), + osm=False, + boundaries=None, + filename=None, + disaggregated=False, + ext_min=0.1, + ext_width=False, + legend_entries="all", + scaling_store_expansion=False, + geographical_boundaries=[-2.5, 16, 46.8, 58], +): + """Function that plots etrago.network and results for lines and buses Parameters ---------- line_colors : str Set static line color or attribute to plot e.g. 'expansion_abs' Current options: - 'line_loading': mean line loading in p.u. in selected timesteps - 'v_nom': nominal voltage of lines - 'expansion_abs': absolute network expansion in MVA - 'expansion_rel': network expansion in p.u. of existing capacity - 'q_flow_max': maximal reactive flows + + * 'line_loading': mean line loading in p.u. in selected timesteps + * 'v_nom': nominal voltage of lines + * 'expansion_abs': absolute network expansion in MVA + * 'expansion_rel': network expansion in p.u. of existing capacity + * 'q_flow_max': maximal reactive flows + * 'dlr': energy above nominal capacity + * 'grey': plot all lines and DC links grey colored + bus_sizes : float, optional - Size of buses. The default is 0.02. + Size of buses. The default is 0.001. bus_colors : str, optional Set static bus color or attribute to plot. The default is 'grey'. Current options: - 'nodal_production_balance': net producer/consumer in selected timeteps - 'storage_expansion': storage expansion per bus and technology - 'storage_distribution': installed storage units per bus - 'gen_dist': dispatch per carrier in selected timesteps + + * 'nodal_production_balance': net producer/consumer in selected timeteps + * 'storage_expansion': storage expansion per bus and technology + * 'storage_distribution': installed storage units per bus + * 'h2_battery_storage_expansion': storage expansion per bus and + technology for underground and overground H2 and batteries. + * 'gen_dist': dispatch per carrier in selected timesteps + * 'PowerToH2': location and sizes of electrolizers + * 'flexibility_usage': use of DSM and BEV charger + timesteps : array, optional - Timesteps consideredd in time depended plots. The default is range(2). + Timesteps consideredd in time depended plots. The default + is range(2). osm : bool or dict, e.g. {'x': [1,20], 'y': [47, 56], 'zoom' : 6} If not False, osm is set as background with the following settings as dict: - 'x': array of two floats, x axis boundaries (lat) - 'y': array of two floats, y axis boundaries (long) - 'zoom' : resolution of osm. The default is False. + + * 'x': array of two floats, x axis boundaries (lat) + * 'y': array of two floats, y axis boundaries (long) + * 'zoom' : resolution of osm. The default is False. + boundaries: array Set fixed boundaries of heatmap axis. The default is None. filename: str or None @@ -1361,9 +2360,19 @@ def plot_grid(self, ext_min: float Choose minimum relative line extension shown in plot in p.u.. ext_width: float or bool - Choose if line_width respects line extension. Turn off with 'False' or - set linear factor to decremise extension line_width. + Choose if line_width respects line extension. Turn off with + 'False' or set linear factor to decremise extension line_width. The default is False. + legend_entries : list, optional + Set the legends for buses to be plotted. The default is 'all'. + scaling_store_expansion : dict, optional + Set scaling values to be used per technology for the plots + storage_expansion and h2_battery_storage_expansion. The default is + False, it could be assinged like this: + {"H2": 50, "heat": 0.1, "battery": 10} + geographical_boundaries : list, optional + Set georaphical boundaries for the plots. This parameter is overwritten + when osm is used. The default is [-2.5, 16, 46.8, 58] Returns ------- @@ -1381,192 +2390,1474 @@ def plot_grid(self, # Set default values flow = None + title = "" line_widths = 2 - link_widths = 2 + link_widths = 0 # Plot osm map in background - if osm != False: + if osm is not False: if network.srid == 4326: set_epsg_network(network) - fig, ax = plot_osm(osm['x'], osm['y'], osm['zoom']) + fig, ax, xrange, yrange = plot_osm(osm["x"], osm["y"], osm["zoom"]) + geographical_boundaries = [xrange[0], xrange[1], yrange[0], yrange[1]] + + elif (osm is False) and cartopy_present: + fig, ax = plt.subplots( + subplot_kw={"projection": ccrs.PlateCarree()}, figsize=(5, 5) + ) else: - fig, ax = plt.subplots(1, 1) + fig, ax = plt.subplots(figsize=(5, 5)) + + fig.set_tight_layout(True) # Set line colors - if line_colors == 'line_loading': - title = 'Mean loading from ' + str(network.snapshots[timesteps[0]])+\ - ' to ' + str(network.snapshots[timesteps[-1]]) - rep_snapshots = network.snapshot_weightings\ - [network.snapshots[timesteps]].sum() - line_colors = calc_ac_loading(network, timesteps).abs()/rep_snapshots - link_colors = calc_dc_loading(network, timesteps).abs()/rep_snapshots - label = 'line loading in p.u.' + if line_colors == "line_loading": + title = "Mean line loading" + rep_snapshots = network.snapshot_weightings["objective"][ + network.snapshots[timesteps] + ].sum() + line_colors = calc_ac_loading(network, timesteps).abs() / rep_snapshots + link_colors = calc_dc_loading(network, timesteps).abs() / rep_snapshots + if ext_width is not False: + link_widths = link_colors.apply( + lambda x: 10 + (x / ext_width) if x != 0 else 0 + ) + line_widths = 10 + (line_colors / ext_width) + else: + link_widths = link_colors.apply(lambda x: 10 if x != 0 else 0) + line_widths = 10 + label = "line loading in p.u." + plot_background_grid(network, ax, geographical_boundaries, osm) # Only active flow direction is displayed! - flow = pd.Series(index=network.branches().index, dtype='float64') - flow.iloc[flow.index.get_level_values('component') == 'Line'] = \ - (mul_weighting(network, network.lines_t.p0).loc[ - network.snapshots[timesteps]].sum()/\ - network.lines.s_nom/rep_snapshots).values - flow.iloc[flow.index.get_level_values('component') == 'Link'] = \ - (calc_dc_loading(network, timesteps)/rep_snapshots).values - elif line_colors == 'v_nom': - title = 'Voltage levels' - label = 'v_nom in kV' + flow = pd.Series(1, index=network.branches().index, dtype="float64") + flow.iloc[flow.index.get_level_values("component") == "Line"] = ( + mul_weighting(network, network.lines_t.p0) + .loc[network.snapshots[timesteps]] + .sum() + / network.lines.s_nom + / rep_snapshots + ).values + + dc_loading = calc_dc_loading(network, timesteps) / rep_snapshots + dc_loading.index = pd.MultiIndex.from_tuples( + [("Link", name) for name in dc_loading.index], + names=["component", "name"], + ) + flow.loc["Link", :] = dc_loading + + flow = flow[ + (flow.index.get_level_values("component") == "Line") + | ( + flow.index.isin( + link_widths[ + link_widths.index.isin( + network.links[network.links.carrier == "DC"].index + ) + ].index, + level=1, + ) + ) + ] + flow[flow < 0] = -1 + flow[flow > 0] = 1 + + elif line_colors == "v_nom": + title = "Voltage levels" + label = "v_nom in kV" line_colors = network.lines.v_nom - link_colors = network.links.v_nom - elif line_colors == 'expansion_abs': - title = 'Network expansion' - label = 'network expansion in MW' - all_network, line_colors, link_colors =\ - calc_network_expansion(network, method='abs', ext_min=ext_min) - plot_background_grid(all_network, ax) - if ext_width != False: + link_colors = pd.Series(data=0, index=network.links.index) + plot_background_grid(network, ax, geographical_boundaries, osm) + elif line_colors == "expansion_abs": + title = "Network expansion" + label = "network expansion in GVA" + all_network, line_colors, link_colors = calc_network_expansion( + network, method="abs", ext_min=ext_min + ) + plot_background_grid(all_network, ax, geographical_boundaries, osm) + + if ext_width is not False: + line_widths = line_colors / ext_width + link_widths = link_colors.apply( + lambda x: x / ext_width if x != 0 else 0 + ) + else: + dc_link = network.links.index[network.links.carrier == "DC"] + link_widths = pd.Series(0, index=network.links.index) + link_widths.loc[dc_link] = 1.5 + line_widths = line_colors.apply(lambda x: 1.5 if x != 0 else 0) + + link_colors = link_colors.mul(1e-3) + line_colors = line_colors.mul(1e-3) + + elif line_colors == "expansion_rel": + title = "Network expansion" + label = "network expansion in %" + all_network, line_colors, link_colors = calc_network_expansion( + network, method="rel", ext_min=ext_min + ) + plot_background_grid(all_network, ax, geographical_boundaries, osm) + if ext_width is not False: line_widths = 0.5 + (line_colors / ext_width) - link_widths = 0.5 + (link_colors / ext_width) - elif line_colors == 'expansion_rel': - title = 'Network expansion' - label = 'network expansion in %' - all_network, line_colors, link_colors =\ - calc_network_expansion(network, method='rel', ext_min=ext_min) - plot_background_grid(all_network, ax) - if ext_width != False: + link_widths = link_colors.apply( + lambda x: 0.5 + x / ext_width if x != 0 else 0 + ) + else: + dc_link = network.links.index[network.links.carrier == "DC"] + link_widths = pd.Series(0, index=network.links.index) + link_widths.loc[dc_link] = 2 + line_widths = line_colors.apply(lambda x: 1.5 if x != 0 else 0) + elif line_colors == "q_flow_max": + title = "Maximum reactive power flows" + label = "flow in pu" + line_colors = abs( + network.lines_t.q0.abs().max() / (network.lines.s_nom) + ) + if ext_width is not False: line_widths = 0.5 + (line_colors / ext_width) - link_widths = 0.5 + (link_colors / ext_width) - elif line_colors == 'q_flow_max': - title = 'Maximmal reactive power flows' - label = 'flow in Mvar' - line_colors = abs(network.lines_t.q0.abs().max()/(network.lines.s_nom)) link_colors = pd.Series(data=0, index=network.links.index) + plot_background_grid(network, ax, geographical_boundaries, osm) + elif line_colors == "dlr": + title = "Dynamic line rating" + label = "TWh above nominal capacity" + plot_background_grid(network, ax, geographical_boundaries, osm) + + # calc min capacity per line in the given period: Since lines with + # different original voltage level could be aggregated during the + # clustering, the security factors can be values in between the values + # provided in the args for branch_capacity_factor. + network.lines.s_max_pu = network.lines_t.s_max_pu.min() + line_loading = network.lines_t.p0.mul( + 1 / (network.lines.s_nom_opt * network.lines.s_max_pu) + ).abs() + line_loading = line_loading.iloc[timesteps, :] + # keep only the capacity allowed by dlr + line_loading = line_loading - 1 + dlr_usage = ( + line_loading[line_loading > 0] + .fillna(0) + .mul(network.snapshot_weightings.generators, axis=0) + .sum() + ) + dlr_usage = ( + dlr_usage * network.lines.s_nom * network.lines.s_max_pu / 1000000 + ) + dlr_usage = dlr_usage.round(decimals=0) + line_colors = dlr_usage + if ext_width is not False: + line_widths = 0.2 + (line_colors / ext_width) + link_colors = pd.Series(data=0, index=network.links.index) + + elif line_colors == "grey": + title = "" + label = "" + line_colors = "grey" + link_colors = "grey" + plot_background_grid(network, ax, geographical_boundaries, osm) + link_widths = 0 + line_widths = 0 + else: logger.warning("line_color {} undefined".format(line_colors)) # Set bus colors - if bus_colors == 'nodal_production_balance': + bus_legend = False + + if bus_colors == "nodal_production_balance": bus_scaling = bus_sizes bus_sizes, bus_colors = nodal_production_balance( - network, timesteps, scaling=bus_scaling) - bus_legend = 'Nodal production balance' - bus_unit = 'TWh' - elif bus_colors == 'storage_expansion': + network, timesteps, scaling=bus_scaling + ) + bus_legend = "Nodal production balance" + bus_unit = "TWh" + elif bus_colors == "storage_expansion": + if not isinstance(scaling_store_expansion, dict): + raise Exception( + """To plot storage_expansion, the argument\ + scaling_store_expansion must be a dictionary like: + {"H2": 50, + "heat": 0.1, + "battery": 10}""" + ) bus_scaling = bus_sizes bus_sizes = bus_scaling * calc_storage_expansion_per_bus(network) - bus_legend = 'Storage expansion' - bus_unit = 'TW' - elif bus_colors == 'storage_distribution': + for store_carrier in scaling_store_expansion.keys(): + bus_sizes[ + bus_sizes.index.get_level_values("carrier").str.contains( + store_carrier + ) + ] *= scaling_store_expansion[store_carrier] + bus_legend = "Storage expansion" + bus_unit = "GW" + elif bus_colors == "h2_battery_storage_expansion": + bus_scaling = bus_sizes + bus_sizes = bus_scaling * calc_storage_expansion_per_bus( + network, carriers=["battery", "H2_overground", "H2_underground"] + ) + if ( + ("battery" not in scaling_store_expansion.keys()) + | ("H2_overground" not in scaling_store_expansion.keys()) + | ("H2_underground" not in scaling_store_expansion.keys()) + ): + raise Exception( + """To plot h2_battery_storage_expansion, the argument\ + scaling_store_expansion must be a dictionary like: + {"H2_overground": 1, + "H2_underground": 1, + "battery": 1,}""" + ) + + for store_carrier in ["battery", "H2_overground", "H2_underground"]: + bus_sizes[ + bus_sizes.index.get_level_values("carrier").str.contains( + store_carrier + ) + ] *= scaling_store_expansion[store_carrier] + bus_legend = "Battery and H2 storage expansion" + bus_unit = "GW" + elif bus_colors == "storage_distribution": bus_scaling = bus_sizes - bus_sizes = bus_scaling * network.storage_units.p_nom_opt\ - .groupby(network.storage_units.bus)\ - .sum().reindex(network.buses.index, fill_value=0.) - bus_legend = 'Storage distribution' - bus_unit = 'TW' - elif bus_colors == 'gen_dist': + bus_sizes = ( + network.storage_units.groupby(["bus", "carrier"]).p_nom_opt.sum() + * bus_scaling + ) + bus_legend = "Storage distribution" + bus_unit = "TW" + elif bus_colors == "gen_dist": bus_scaling = bus_sizes bus_sizes = bus_scaling * calc_dispatch_per_carrier(network, timesteps) - bus_legend = 'Dispatch' - bus_unit = 'TW' + bus_legend = "Dispatch" + bus_unit = "TW" + elif bus_colors == "flexibility_usage": + bus_scaling = bus_sizes + flex_links = network.links[ + network.links.carrier.isin( + [ + "dsm", + "BEV charger", + ] + ) + ] + flex_links["p0_sum"] = ( + network.links_t.p0[flex_links.index] + .mul(network.snapshot_weightings.generators, axis=0) + .abs() + .sum() + ) + flex_links["p1_sum"] = ( + network.links_t.p1[flex_links.index] + .mul(network.snapshot_weightings.generators, axis=0) + .sum() + ) + bus_sizes = ( + bus_scaling * flex_links.groupby(["bus0", "carrier"]).p0_sum.sum() + ) + bus_unit = "TWh" + bus_legend = "flexibility_usage" + elif bus_colors == "h2_storage_expansion": + bus_scaling = bus_sizes + bus_sizes = bus_scaling * calc_storage_expansion_per_bus(network) + bus_sizes = bus_sizes.reset_index() + bus_sizes = bus_sizes[bus_sizes.carrier.str.contains("H2")] + bus_sizes.set_index(["bus", "carrier"], inplace=True) + bus_legend = "Storage expansion" + bus_unit = "GW" + elif ( + bus_colors == "PowerToH2" + ): # PowerToH2 plots p_nom_opt of links with carrier=power to H2 + bus_scaling = bus_sizes + bus_sizes = ( + bus_scaling + * network.links[(network.links.carrier == "power_to_H2")] + .groupby("bus0") + .sum() + .p_nom_opt + ) + if len(bus_sizes) == 0: + print("There is no PowerToH2 to plot") + bus_colors = coloring()["power_to_H2"] + bus_legend = "PowerToH2" + bus_unit = "TW" + elif bus_colors == "grey": + bus_scaling = bus_sizes + bus_sizes = pd.Series( + data=network.buses.carrier, index=network.buses.index + ) + bus_sizes[bus_sizes != "AC"] = 0 + bus_sizes[bus_sizes == "AC"] = 1 * bus_scaling + bus_scaling = bus_sizes else: logger.warning("bus_color {} undefined".format(bus_colors)) - ll = network.plot(line_colors=line_colors, link_colors=link_colors, - line_cmap=plt.cm.jet, link_cmap=plt.cm.jet, - bus_sizes=bus_sizes, - bus_colors=bus_colors, - line_widths=line_widths, link_widths=link_widths, - flow=flow, - title=title, - geomap=False) + if cartopy_present & (osm is False): + ll = network.plot( + line_colors=line_colors, + link_colors=link_colors, + line_cmap=plt.cm.jet, + link_cmap=plt.cm.jet, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + line_widths=line_widths, + link_widths=link_widths, + flow=flow, + title=title, + geomap=False, + projection=ccrs.PlateCarree(), + color_geomap=True, + boundaries=geographical_boundaries, + ) + else: + ll = network.plot( + line_colors=line_colors, + link_colors=link_colors, + line_cmap=plt.cm.jet, + link_cmap=plt.cm.jet, + bus_sizes=bus_sizes, + bus_colors=bus_colors, + line_widths=line_widths, + link_widths=link_widths, + flow=flow, + title=title, + geomap=False, + boundaries=geographical_boundaries, + ) + l3 = None # legends for bus sizes and colors - if type(bus_sizes) != float: - handles = make_legend_circles_for( - [bus_sizes.min(), bus_sizes.max()], scale=1, facecolor="gray") - labels = [("{} "+ bus_unit).format(s) for s in ( - round(bus_sizes.min()/bus_scaling/1000, 0), - round(bus_sizes.max()/bus_scaling/1000, 0))] - - l2 = ax.legend(handles, labels, - loc="upper left", bbox_to_anchor=(0.01, 1.01), - labelspacing=1.0, - framealpha=1., - title=bus_legend, - handler_map=make_handler_map_to_scale_circles_as_in(ax)) - ax.add_artist(l2) - + if bus_legend: handles = [] - if bus_legend == 'Nodal production balance': - positive = mpatches.Patch(color='green', label='generation') - negative = mpatches.Patch(color='red', label='consumption') - handles = [positive, negative] + labels = [] + if scaling_store_expansion: + if not isinstance(legend_entries, list): + if bus_legend == "Storage expansion": + legend_entries = list(scaling_store_expansion.keys()) + if bus_legend == "Battery and H2 storage expansion": + legend_entries = [ + "battery", + "H2_overground", + "H2_underground", + ] + for i in legend_entries: + try: + max_value = bus_sizes[ + bus_sizes.index.get_level_values( + "carrier" + ).str.contains(i) + ].max() + except KeyError: + max_value = bus_sizes.max() + handles.append( + make_legend_circles_for( + [max_value], + scale=1, + facecolor=network.carriers.color[i], + )[0] + ) + labels.append( + f""" + {round(max_value/bus_scaling/scaling_store_expansion[i]/ + 1000, 0).astype(int)} {bus_unit} """ + + i + ) else: - for i in network.carriers.color.index: - patch = mpatches.Patch(color=network.carriers.color[i], - label=i) - handles.append(patch) - - l3 = plt.legend(handles=handles, loc='upper left', ncol=3, - bbox_to_anchor=(-0.1, 0)) - ax.add_artist(l3) - - # Set fixed boundaries if selected in parameters - if not boundaries: - boundaries = [min(line_colors.min(), link_colors.min()), - max(line_colors.max(), link_colors.max())] - - # Create ticks for legend - v = np.linspace(boundaries[0], boundaries[1], 101) + if len(bus_sizes) > 0: + max_value = bus_sizes.max() + else: + max_value = 0 + labels.append(f"{round(max_value / bus_scaling /1000, 0)} GWh ") + handles.append( + make_legend_circles_for( + [max_value], + scale=1, + facecolor="grey", + )[0] + ) + + l2 = ax.legend( + handles, + labels, + loc="upper left", + bbox_to_anchor=(0.01, 1.01), + labelspacing=1.0, + framealpha=1.0, + title=bus_legend, + handler_map=make_handler_map_to_scale_circles_as_in(ax), + prop={"size": 8}, + ) + ax.add_artist(l2) - # colorbar for line heatmap - cb = plt.colorbar(ll[1], boundaries=v, ticks=v[0:101:10], - fraction=0.046, pad=0.04) - # Set legend label - cb.set_label(label) + plt.setp(l2.get_title(), fontsize="9") + + if not scaling_store_expansion: + handles = [] + if bus_legend == "Nodal production balance": + positive = mpatches.Patch(color="green", label="generation") + negative = mpatches.Patch(color="red", label="consumption") + handles = [positive, negative] + + elif bus_legend == "PowerToH2": + pth = mpatches.Patch(color="cyan", label="PowerToH2") + handles = [pth] + elif legend_entries != "all": + for i in legend_entries: + patch = mpatches.Patch( + color=network.carriers.color[i], label=i + ) + handles.append(patch) + else: + for i in bus_sizes.index.get_level_values("carrier").unique(): + patch = mpatches.Patch( + color=network.carriers.color[i], label=i + ) + handles.append(patch) + + l3 = plt.legend( + handles=handles, + loc="upper left", + ncol=2, + bbox_to_anchor=(0, 0), + ) + ax.add_artist(l3) + + if type(line_colors) != str: + # Set fixed boundaries if selected in parameters + if not boundaries: + boundaries = [ + min(round(line_colors.min(), 1), round(link_colors.min(), 1)), + max(round(line_colors.max()), round(link_colors.max())), + ] + + # Create ticks for legend + v = [ + round(x, 1) for x in np.linspace(boundaries[0], boundaries[1], 101) + ] + for l_collection in ll: + l_collection.set_clim(boundaries[0], boundaries[1]) + + # colorbar for line heatmap + cb = plt.colorbar( + ll[1], + values=v, + ticks=v[0:101:10], + fraction=0.028, + pad=0.04, + ) + # Set legend label + cb.set_label(label) # Show plot or save to file if filename is None: - if type(bus_sizes) != float: + if not isinstance(bus_sizes, (pd.Series, float)): logger.warning("Legend of bus sizes will change when zooming") + plt.tight_layout() plt.show() else: from matplotlib import pylab - pylab.savefig(filename, dpi=300, bbox_inches="tight") + + if l3 is None: + pylab.savefig(filename, dpi=300, bbox_inches="tight") + else: + pylab.savefig( + filename, dpi=300, bbox_inches="tight", bbox_extra_artists=[l3] + ) plt.close() + set_epsg_network.counter = 0 -### the following functions are copied from pypsa-eur-sec ### -### see here: https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/plot_network.py -from matplotlib.legend_handler import HandlerPatch -from matplotlib.patches import Circle, Ellipse +# the following functions are copied from pypsa-eur-sec. see: +# https://github.com/PyPSA/pypsa-eur-sec/blob/master/scripts/plot_network.py + + def make_handler_map_to_scale_circles_as_in(ax, dont_resize_actively=False): fig = ax.get_figure() def axes2pt(): - return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[ - 0] * (72. / fig.dpi) + return np.diff(ax.transData.transform([(0, 0), (1, 1)]), axis=0)[0] * ( + 72.0 / fig.dpi + ) ellipses = [] if not dont_resize_actively: + def update_width_height(event): dist = axes2pt() for e, radius in ellipses: - e.width, e.height = 2. * radius * dist - fig.canvas.mpl_connect('resize_event', update_width_height) - ax.callbacks.connect('xlim_changed', update_width_height) - ax.callbacks.connect('ylim_changed', update_width_height) - - def legend_circle_handler(legend, orig_handle, xdescent, ydescent, - width, height, fontsize): - w, h = 2. * orig_handle.get_radius() * axes2pt() - e = Ellipse(xy=(0.5 * width - 0.5 * xdescent, 0.5 * - height - 0.5 * ydescent), width=w, height=w) + e.width, e.height = 2.0 * radius * dist + + fig.canvas.mpl_connect("resize_event", update_width_height) + ax.callbacks.connect("xlim_changed", update_width_height) + ax.callbacks.connect("ylim_changed", update_width_height) + + def legend_circle_handler( + legend, orig_handle, xdescent, ydescent, width, height, fontsize + ): + w, h = 2.0 * orig_handle.get_radius() * axes2pt() + e = Ellipse( + xy=(0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent), + width=w, + height=w, + ) ellipses.append((e, orig_handle.get_radius())) return e + return {Circle: HandlerPatch(patch_func=legend_circle_handler)} + def make_legend_circles_for(sizes, scale=1.0, **kw): - return [Circle((0, 0), radius=(s / scale)**0.5, **kw) for s in sizes] + return [Circle((0, 0), radius=(s / scale) ** 0.5, **kw) for s in sizes] + + ### -if __name__ == '__main__': +if __name__ == "__main__": pass + + +def plot_clusters( + self, + carrier="AC", + save_path=False, + transmission_lines=False, + gas_pipelines=False, +): + """ + Parameters + ---------- + carrier : str, optional + This variable set the carrier of the buses that will be plotted. The + default is "AC". + cartopy : bool, optional + Set it to True when cartopy is installed and the map is supposed + to include country's boundaries and bodies of water + save_path : bool, optional + Path to save the generated plot. The default is False. + transmission_lines : bool, optional + The default is False. Define if the original transmission lines are + plotted or not. + gas_pipelines : bool, optional + The default is False. Define if the original gas pipelines are + plotted or not. + + Returns + ------- + None. + """ + new_geom = self.network.buses[ + [ + "carrier", + "x", + "y", + ] + ] + new_geom = new_geom[new_geom["carrier"] == carrier] + new_geom["geom"] = new_geom.apply(lambda x: Point(x["x"], x["y"]), axis=1) + map_buses = self.busmap["orig_network"].buses[ + [ + "carrier", + "x", + "y", + ] + ] + map_buses = map_buses[map_buses["carrier"] == carrier] + map_buses["geom"] = map_buses.apply( + lambda x: Point(x["x"], x["y"]), axis=1 + ) + map_buses["cluster"] = map_buses.index.map(self.busmap["busmap"]) + map_buses["cluster_geom"] = map_buses["cluster"].map(new_geom.geom) + map_buses["line"] = map_buses.apply( + lambda x: LineString((x["geom"], x["cluster_geom"])), axis=1 + ) + + # Set background + if cartopy_present: + plt.rcParams["figure.autolayout"] = True + fig, ax = plt.subplots(subplot_kw={"projection": ccrs.PlateCarree()}) + draw_map_cartopy(ax, color_geomap=True) + else: + fig, ax = plt.subplots() + + ax.set_title(f'Clustering {self.args["network_clustering"]["method"]}') + + # Draw original transmission lines + if transmission_lines: + # AC lines + lines = self.busmap["orig_network"].lines + if ( + self.busmap["orig_network"] + .lines["geom"] + .apply(lambda x: isinstance(x, str)) + .any() + ): + lines["geom"] = gpd.GeoSeries.from_wkt(lines["geom"]) + lines = gpd.GeoDataFrame( + self.busmap["orig_network"].lines, geometry="geom" + ) + lines = lines[ + lines["bus0"].isin(map_buses.index) + & lines["bus1"].isin(map_buses.index) + ] + lines["geom"] = lines.apply( + lambda x: x["geom"] + if not pd.isna(x["geom"]) + else LineString( + [map_buses["geom"][x["bus0"]], map_buses["geom"][x["bus1"]]] + ), + axis=1, + ) + lines.plot(ax=ax, color="grey", linewidths=0.8, zorder=1) + # DC lines + dc_lines = self.busmap["orig_network"].links + dc_lines = dc_lines[dc_lines["carrier"] == "DC"] + dc_lines["point0"] = dc_lines["bus0"].map(map_buses["geom"]) + dc_lines["point1"] = dc_lines["bus1"].map(map_buses["geom"]) + dc_lines["line_geom"] = dc_lines.apply( + lambda x: LineString([x["point0"], x["point1"]]), axis=1 + ) + dc_lines = gpd.GeoDataFrame(dc_lines, geometry="line_geom") + dc_lines.plot(ax=ax, color="grey", linewidths=0.8, zorder=1) + + if gas_pipelines: + # CH4 pipelines + pipelines = self.busmap["orig_network"].links + if ( + self.busmap["orig_network"] + .links["geom"] + .apply(lambda x: isinstance(x, str)) + .any() + ): + pipelines["geom"] = gpd.GeoSeries.from_wkt(pipelines["geom"]) + pipelines = pipelines[pipelines["carrier"] == "CH4"] + pipelines = gpd.GeoDataFrame(pipelines, geometry="geom") + pipelines.plot(ax=ax, color="grey", linewidths=0.8, zorder=1) + + # Assign a random color to each cluster + colors = { + color: np.random.rand( + 3, + ) + for color in map_buses.cluster.unique() + } + map_buses["color"] = map_buses["cluster"].map(colors) + + # Draw original and clustered buses + map_buses = gpd.GeoDataFrame(map_buses, geometry="line") + map_buses.plot(ax=ax, color=map_buses["color"], linewidths=0.25, zorder=2) + map_buses = gpd.GeoDataFrame(map_buses, geometry="geom") + map_buses.plot( + ax=ax, color=map_buses["color"], markersize=0.8, marker="o", zorder=3 + ) + map_buses = gpd.GeoDataFrame(map_buses, geometry="cluster_geom") + map_buses.plot( + ax=ax, + color=map_buses["color"], + markersize=10, + marker="o", + edgecolor="black", + zorder=3, + ) + + if save_path: + plt.savefig(save_path, dpi=800) + + return + + +def plot_gas_generation( + self, t_resolution="20H", save_path=False +): # FIXXXXXXXXXXXXXXXX + """ + Plots timeseries data for gas generation + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + + colors = coloring() + + ch4_gens_feedin = self.network.generators_t.p[ + [col for col in self.network.generators_t.p.columns if "CH4" in col] + ] # active power at bus + ch4_links_feedin = -self.network.links_t.p1[ + self.network.links.loc[self.network.links.carrier == "H2_to_CH4"].index + ] # p1 is output p of H2_to_CH4 + h2_links_feedin = -self.network.links_t.p1[ + self.network.links.loc[self.network.links.carrier == "H2_feedin"].index + ] + + total_gen_per_t = ch4_gens_feedin.sum(axis=1) / 1e3 + total_link_per_t = ch4_links_feedin.sum(axis=1) / 1e3 + total_h2_per_t = h2_links_feedin.sum(axis=1) / 1e3 + + (total_gen_per_t + total_link_per_t + total_h2_per_t).resample( + t_resolution + ).mean().plot( + ax=ax, + title="Gas Generation", + ylabel="[GW]", + legend=True, + label="Total Gas Dispatch", + ) + total_gen_per_t.plot( + ax=ax, label="CH4 Generator Dispatch", legend=True, color=colors["CH4"] + ) + total_h2_per_t.resample(t_resolution).mean().plot( + ax=ax, + label="H2_feedin Dispatch", + legend=True, + color=colors["H2_feedin"], + ) + total_link_per_t.resample(t_resolution).mean().plot( + ax=ax, + label="H2_to_CH4 Link Dispatch", + legend=True, + color=colors["H2_to_CH4"], + ) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def plot_gas_summary(self, t_resolution="20H", stacked=True, save_path=False): + """ + Plots timeseries data for gas loads (and generation) + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + stacked : bool, optional + If True all TS data will be shown as stacked area plot. Total gas + generation will then also be plotted to check for matching demand and + generation. + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + colors = coloring() + + ch4_load_carrier = ["rural_gas_boiler", "CH4_for_industry", "CH4"] + + rel_ch4_loads = self.network.links.loc[ + self.network.links.bus0.isin( + self.network.buses.loc[self.network.buses.carrier == "CH4"].index + ) + ].carrier.unique() + rel_ch4_loads = np.delete(rel_ch4_loads, np.where(rel_ch4_loads == "CH4")) + + data = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == rel_ch4_loads[0] + ].index.to_list() + ] + + if stacked: + data = ( + pd.DataFrame(data.sum(axis=1)).resample(t_resolution).mean() / 1e3 + ) + data = data.rename(columns={0: rel_ch4_loads[0]}) + + for i in rel_ch4_loads[1:]: + loads = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data[i] = loads.sum(axis=1).resample(t_resolution).mean() / 1e3 + + for i in ch4_load_carrier: + loads = self.network.loads_t.p[ + self.network.loads.loc[ + self.network.loads.carrier == i + ].index.to_list() + ] + data[i] = loads.sum(axis=1).resample(t_resolution).mean() / 1e3 + + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + data.plot.area( + ax=ax, + title="Stacked Gas Loads and Generation by carrier", + ylabel="[GW]", + legend=True, + stacked=True, + ) + + ch4_gens_feedin = self.network.generators_t.p[ + [ + col + for col in self.network.generators_t.p.columns + if "CH4" in col + ] + ] # active power at bus + ch4_links_feedin = -self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == "H2_to_CH4" + ].index + ] # p1 is output p of H2_to_CH4 + h2_links_feedin = -self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == "H2_feedin" + ].index + ] + + total_gen_per_t = ch4_gens_feedin.sum(axis=1) / 1e3 + total_link_per_t = ch4_links_feedin.sum(axis=1) / 1e3 + total_h2_per_t = h2_links_feedin.sum(axis=1) / 1e3 + + (total_gen_per_t + total_link_per_t + total_h2_per_t).resample( + t_resolution + ).mean().plot.line( + ax=ax, + legend=True, + label="Total_Gas_generation", + color=colors["CH4"], + linestyle="dashed", + ) + + stores = self.network.stores.loc[self.network.stores.carrier == "CH4"] + a = self.network.stores_t.p[stores.index].sum(axis=1) / 1e3 + (total_gen_per_t + total_link_per_t + total_h2_per_t + a).resample( + t_resolution + ).mean().plot.line( + ax=ax, + legend=True, + label="Total_Gas_generation + Gas Storage dispatch", + color="black", + linestyle="dashed", + ) + + else: + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + data.plot( + ax=ax, + title="Gas Loads by carrier", + label=rel_ch4_loads[0], + ylabel="[GW]", + legend=True, + ) + + for i in rel_ch4_loads[1:]: + data = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + data.plot(ax=ax, label=i, legend=True) + + data = self.network.loads_t.p[ + self.network.loads.loc[ + self.network.loads.carrier == ch4_load_carrier[0] + ].index.to_list() + ] + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + data.plot(ax=ax, label=ch4_load_carrier[0], ylabel="[GW]", legend=True) + + for i in ch4_load_carrier[1:]: + data = self.network.loads_t.p[ + self.network.loads.loc[ + self.network.loads.carrier == i + ].index.to_list() + ] + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + data.plot(ax=ax, label=i, legend=True) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def plot_h2_generation(self, t_resolution="20H", save_path=False): + """ + Plots timeseries data for H2 generation + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + + colors = coloring() + + h2_CH4_gen = -self.network.links_t.p1[ + self.network.links.loc[self.network.links.carrier == "CH4_to_H2"].index + ] + h2_power_gen = -self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == "power_to_H2" + ].index + ] + + (h2_CH4_gen.sum(axis=1) / 1e3 + h2_power_gen.sum(axis=1) / 1e3).resample( + t_resolution + ).mean().plot( + ax=ax, + title="H2 Generation", + legend=True, + ylabel="[GW]", + label="Total dispatch", + lw=5, + ) + (h2_CH4_gen.sum(axis=1) / 1e3).resample(t_resolution).mean().plot( + ax=ax, + label="CH4_to_H2 Dispatch", + legend=True, + color=colors["CH4_to_H2"], + ) + (h2_power_gen.sum(axis=1) / 1e3).resample(t_resolution).mean().plot( + ax=ax, + label="power_to_H2 Dispatch", + legend=True, + color=colors["power_to_H2"], + ) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def plot_h2_summary(self, t_resolution="20H", stacked=True, save_path=False): + """ + Plots timeseries data for H2 loads (and generation) + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + stacked : bool, optional + If True all TS data will be shown as stacked area plot. Total H2 + generation will then also be plotted to check for matching demand and + generation. + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + + rel_h2_links = ["H2_feedin", "H2_to_CH4", "H2_to_power"] + rel_h2_loads = ["H2_for_industry", "H2_hgv_load"] + + data = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == rel_h2_links[0] + ].index.to_list() + ] + + if stacked: + data = ( + pd.DataFrame(data.sum(axis=1)).resample(t_resolution).mean() / 1e3 + ) + data = data.rename(columns={0: rel_h2_links[0]}) + + for i in rel_h2_links[1:]: + loads = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data[i] = loads.sum(axis=1).resample(t_resolution).mean() / 1e3 + + DE_loads = self.network.loads.loc[ + self.network.loads.bus.isin( + self.network.buses.loc[ + self.network.buses.country == "DE" + ].index + ) + ] + for i in rel_h2_loads: + loads = self.network.loads_t.p[ + DE_loads.loc[DE_loads.carrier == i].index.to_list() + ] + data[i] = loads.sum(axis=1).resample(t_resolution).mean() / 1e3 + + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + data.plot.area( + ax=ax, + title="Stacked H2 Loads by carrier", + ylabel="[GW]", + legend=True, + stacked=True, + ) + + h2_CH4_gen = -self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == "CH4_to_H2" + ].index + ] + h2_power_gen = -self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == "power_to_H2" + ].index + ] + ( + h2_CH4_gen.sum(axis=1) / 1e3 + h2_power_gen.sum(axis=1) / 1e3 + ).resample(t_resolution).mean().plot( + ax=ax, + legend=True, + label="H2 Generation", + color="black", + linestyle="dashed", + ) + + else: + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + data.plot( + ax=ax, + title="H2 Loads by carrier", + label=rel_h2_links[0], + ylabel="[GW]", + legend=True, + ) + + for i in rel_h2_links[1:]: + data = self.network.links_t.p0[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + data.plot(ax=ax, label=i, legend=True) + + DE_loads = self.network.loads.loc[ + self.network.loads.bus.isin( + self.network.buses.loc[ + self.network.buses.country == "DE" + ].index + ) + ] + data = self.network.loads_t.p[ + DE_loads.loc[DE_loads.carrier == rel_h2_loads[0]].index.to_list() + ] + data = data.sum(axis=1).resample(t_resolution).mean() / 1e3 + data.plot(ax=ax, label=rel_h2_loads[0], ylabel="[GW]", legend=True) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def plot_heat_loads(self, t_resolution="20H", save_path=False): + """ + Plots timeseries data for heat loads + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + + central_h = self.network.loads.loc[ + self.network.loads.carrier == "central_heat" + ] + rural_h = self.network.loads.loc[ + self.network.loads.carrier == "rural_heat" + ] + central_h_loads = self.network.loads_t.p[central_h.index].sum(axis=1) + rural_h_loads = self.network.loads_t.p[rural_h.index].sum(axis=1) + + ((central_h_loads + rural_h_loads) / 1e3).resample( + t_resolution + ).mean().plot( + ax=ax, + title="Central and rural heat loads", + label="central_heat + rural_heat", + legend=True, + ylabel="[GW]", + ) + (central_h_loads / 1e3).resample(t_resolution).mean().plot( + ax=ax, label="central_heat", legend=True + ) + (rural_h_loads / 1e3).resample(t_resolution).mean().plot( + ax=ax, label="rural_heat", legend=True + ) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def plot_heat_summary(self, t_resolution="20H", stacked=True, save_path=False): + """ + Plots timeseries data for heat generation (and demand) + + Parameters + ---------- + self : :class:`Etrago + Overall container of Etrago + t_resolution : str, optional + sets the resampling rate of timeseries data to allow for smoother + line plots + stacked : bool, optional + If True all TS data will be shown as stacked area plot. Total heat + demand will then also be plotted to check for matching generation and + demand. + save_path : bool, optional + Path to save the generated plot. The default is False. + + Returns + ------- + None. + + """ + + heat_gen_techs = [ + "central_resistive_heater", + "central_heat_pump", + "rural_heat_pump", + "central_gas_CHP_heat", + "central_gas_boiler", + "rural_gas_boiler", + ] + + heat_gen_ids = self.network.generators.loc[ + self.network.generators.carrier.isin( + [ + "solar_thermal_collector", + "geo_thermal", + "central_biomass_CHP_heat", + ] + ) + ].index + heat_gen_dispatch = ( + self.network.generators_t.p.T.loc[heat_gen_ids].sum(axis=0) / 1e3 + ) + + links_id_hc = self.network.links.loc[ + self.network.links.carrier.isin( + ["central_heat_store_charger", "rural_heat_store_charger"] + ) + ].index + heat_store_charger_dispatch = ( + self.network.links_t.p0.T.loc[links_id_hc].sum(axis=0) / 1e3 + ) + + links_id_hdc = self.network.links.loc[ + self.network.links.carrier.isin( + ["central_heat_store_discharger", "rural_heat_store_discharger"] + ) + ].index + heat_store_discharger_dispatch = ( + self.network.links_t.p1.T.loc[links_id_hdc].sum(axis=0) / 1e3 + ) + + heat_store_dispatch_hb = ( + -heat_store_discharger_dispatch - heat_store_charger_dispatch + ) + + central_h = self.network.loads.loc[ + self.network.loads.carrier == "central_heat" + ] + rural_h = self.network.loads.loc[ + self.network.loads.carrier == "rural_heat" + ] + central_h_loads = self.network.loads_t.p[central_h.index].sum(axis=1) / 1e3 + rural_h_loads = self.network.loads_t.p[rural_h.index].sum(axis=1) / 1e3 + + data = ( + self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == heat_gen_techs[0] + ].index.to_list() + ] + / 1e3 + ) + + if stacked: + data = pd.DataFrame(-(data.sum(axis=1))) + data = data.rename(columns={0: heat_gen_techs[0]}) + + for i in heat_gen_techs[1:]: + loads = self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data[i] = -(loads).sum(axis=1) / 1e3 + + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + data.resample(t_resolution).mean().plot.area( + ax=ax, + title="Stacked heat generation and demand", + ylabel="[GW]", + legend=True, + stacked=True, + ) + + ( + data.sum(axis=1) + heat_store_dispatch_hb + heat_gen_dispatch + ).resample(t_resolution).mean().plot.line( + ax=ax, + legend=True, + label="Total heat generation + heat store dispatch", + color="yellow", + ) + + else: + data = -data.sum(axis=1) / 1e3 + + fig, ax = plt.subplots(figsize=(20, 10), dpi=300) + + data.resample(t_resolution).mean().plot( + ax=ax, + title="Heat generation and demand", + label=heat_gen_techs[0], + ylabel="[GW]", + legend=True, + ) + + for i in heat_gen_techs[1:]: + data = self.network.links_t.p1[ + self.network.links.loc[ + self.network.links.carrier == i + ].index.to_list() + ] + data = -data.sum(axis=1) / 1e3 + data.resample(t_resolution).mean().plot( + ax=ax, label=i, legend=True + ) + + heat_store_dispatch_hb.resample(t_resolution).mean().plot.line( + ax=ax, + legend=True, + label="Heat store dispatch", + color="yellow", + linestyle="dashed", + ) + + (central_h_loads + rural_h_loads).resample(t_resolution).mean().plot.line( + ax=ax, + legend=True, + label="Total heat demand", + color="black", + linestyle="dashed", + ) + + if save_path: + plt.savefig(save_path, dpi=300) + + +def shifted_energy(self, carrier, buses): + """Calulate shifted energy for a specific carrier + + Parameters + ---------- + carrier : str + Name of energy carrier + buses : list + List of considered bus indices + + Returns + ------- + shifted : pandas.Series + Shifted energy per time step + + """ + + buses = self.network.links[ + self.network.links.bus0.isin( + self.network.buses[ + (self.network.buses.carrier == "AC") + & (self.network.buses.index.isin(buses)) + ].index + ) + & self.network.links.bus1.isin( + self.network.buses[ + self.network.buses.carrier.str.contains(carrier) + ].index + ) + ].bus1.unique() + + supply = self.network.links_t.p1[ + self.network.links[ + (self.network.links.bus1.isin(buses)) + & ~(self.network.links.carrier.str.contains("charger")) + ].index + ].mul(-1).sum(axis=1) + ( + self.network.generators_t.p[ + self.network.generators[ + self.network.generators.bus.isin(buses) + ].index + ].sum(axis=1) + ) + + demand = self.network.loads_t.p[ + self.network.loads[self.network.loads.bus.isin(buses)].index + ].sum(axis=1) + ( + self.network.links_t.p0[ + self.network.links[ + (self.network.links.bus0.isin(buses)) + & ~(self.network.links.carrier.str.contains("charger")) + ].index + ].sum(axis=1) + ) + + shifted = supply - demand + return shifted + + +def flexibility_duration_curve(etrago, etrago_lowflex, filename=None): + """Plot duration curves of flexibility options + + Parameters + ---------- + etrago : Etrago + Object including network with flexibility options + etrago_lowflex : Etrago + Object including network with less flexibility options + filename : str, optional + Name of file to save plot. The default is None. + + Returns + ------- + None. + + """ + colors = coloring() + + value = "p" + + df = pd.DataFrame() + + dsm_stores = etrago.network.stores[ + etrago.network.stores.carrier.str.contains("dsm") + ] + df["dsm_positive"] = ( + etrago.network.stores_t[value][dsm_stores.index] + .clip(lower=0) + .sum(axis=1) + ) + df["dsm_negative"] = ( + etrago.network.stores_t[value][dsm_stores.index] + .clip(upper=0) + .sum(axis=1) + ) + + emob_static = etrago_lowflex.network.loads[ + etrago_lowflex.network.loads.carrier == "land transport EV" + ] + + emob_static_t = etrago_lowflex.network.loads_t.p_set[emob_static.index] + + emob_static_t = emob_static_t.loc[:, emob_static.index] + + emob_static_t.columns = emob_static.bus.values + + emob_flex = etrago.network.links[ + etrago.network.links.carrier.str.contains("BEV") + ] + + emob_flex_t = etrago.network.links_t.p0[emob_flex.index] + + emob_flex_t = emob_flex_t.loc[:, emob_flex.index] + + emob_flex_t.columns = emob_flex.bus0.values + + emob_flex_t - emob_static_t + df["BEV charger_positive"] = ( + (emob_flex_t - emob_static_t).clip(lower=0).sum(axis=1) + ) + df["BEV charger_negative"] = ( + (emob_flex_t - emob_static_t).clip(upper=0).sum(axis=1) + ) + + heat_stores = etrago.network.stores[ + etrago.network.stores.carrier.str.contains("heat") + ] + df["heat_positive"] = ( + etrago.network.stores_t[value][heat_stores.index] + .clip(lower=0) + .sum(axis=1) + ) + df["heat_negative"] = ( + etrago.network.stores_t[value][heat_stores.index] + .clip(upper=0) + .sum(axis=1) + ) + + h2_stores = etrago.network.stores[ + etrago.network.stores.carrier.str.contains("H2") + ] + df["H2_positive"] = ( + etrago.network.stores_t[value][h2_stores.index] + .clip(lower=0) + .sum(axis=1) + ) + df["H2_negative"] = ( + etrago.network.stores_t[value][h2_stores.index] + .clip(upper=0) + .sum(axis=1) + ) + + fig, ax = plt.subplots(figsize=(15, 8)) + for c in df.columns: + result = pd.Series(dtype=float) + color = colors[c.split("_")[0]] + for p in range(0, 100): + result[p * df[c].abs().max() * np.sign(df[c].sum()) / 100] = ( + df[c][df[c].abs() > p * 0.01 * df[c].abs().max()].size + / df[c].size + ) * 100 + + data_to_plot = pd.DataFrame( + index=result.values, data=result.index * 1e-3 + ) + data_to_plot.columns = [c.split("_")[0]] + data_to_plot.plot(ax=ax, color=color, linewidth=3.0) + plt.axhline(y=0.0, color="grey", linestyle="dotted") + ax.set_xlim(0, 80) + ax.set_xlabel("time in %") + ax.set_ylabel("flexibility usage in GW") + + handles, labels = plt.gca().get_legend_handles_labels() + by_label = dict(zip(labels, handles)) + plt.legend(by_label.values(), by_label.keys()) + + if filename: + fig.savefig(filename, dpi=600) + plt.close() diff --git a/etrago/tools/utilities.py b/etrago/tools/utilities.py old mode 100644 new mode 100755 index 2786d5bae..e6435a901 --- a/etrago/tools/utilities.py +++ b/etrago/tools/utilities.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016-2018 Flensburg University of Applied Sciences, +# Copyright 2016-2023 Flensburg University of Applied Sciences, # Europa-Universität Flensburg, # Centre for Sustainable Energy Systems, # DLR-Institute for Networked Energy Systems @@ -22,39 +22,74 @@ Utilities.py includes a wide range of useful functions. """ +from collections.abc import Mapping +from copy import deepcopy +import json +import logging +import math import os + +from pyomo.environ import Constraint, PositiveReals, Var import numpy as np import pandas as pd import pypsa -import json -import logging -import math -from pyomo.environ import (Var, Constraint, PositiveReals) -from importlib import import_module +import sqlalchemy.exc -geopandas = True -try: - import geopandas as gpd +if "READTHEDOCS" not in os.environ: from shapely.geometry import Point - import geoalchemy2 - from egoio.db_tables.model_draft import RenpassGisParameterRegion + import geopandas as gpd -except: - geopandas = False + from etrago.tools import db logger = logging.getLogger(__name__) -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = "ulfmueller, s3pp, wolfbunke, mariusves, lukasol" +__author__ = """ulfmueller, s3pp, wolfbunke, mariusves, lukasol, ClaraBuettner, +CarlosEpia, gnn, pieterhexen, fwitte, KathiEsterl, MGlauer, birgits, + AmeliaNadal, MarlonSchlemminger, wheitkoetter, jankaeh""" + + +def filter_links_by_carrier(self, carrier, like=True): + """ + + Parameters + ---------- + carrier : list or str + name of the carriers of interest. Can be a list of carriers or single + sting. + like : bool, optional + When like set to True, the links with carrier names that includes the + carrier(s) supplied are returned, Not just exact matches. + The default is True. + + Returns + ------- + df : pandas.DataFrame object + Dataframe that contains just links with carriers of the types given + in the argument carrier. + + """ + if isinstance(carrier, str): + if like: + df = self.network.links[ + self.network.links.carrier.str.contains(carrier) + ] + else: + df = self.network.links[self.network.links.carrier == carrier] + elif isinstance(carrier, list): + df = self.network.links[self.network.links.carrier.isin(carrier)] + return df def buses_of_vlvl(network, voltage_level): - """ Get bus-ids of given voltage level(s). + """Get bus-ids of given voltage level(s). Parameters ---------- @@ -75,7 +110,7 @@ def buses_of_vlvl(network, voltage_level): def buses_grid_linked(network, voltage_level): - """ Get bus-ids of a given voltage level connected to the grid. + """Get bus-ids of a given voltage level connected to the grid. Parameters ---------- @@ -89,9 +124,20 @@ def buses_grid_linked(network, voltage_level): List containing bus-ids. """ - mask = ((network.buses.index.isin(network.lines.bus0) | - (network.buses.index.isin(network.lines.bus1))) & - (network.buses.v_nom.isin(voltage_level))) + mask = ( + network.buses.index.isin(network.lines.bus0) + | (network.buses.index.isin(network.lines.bus1)) + | ( + network.buses.index.isin( + network.links.loc[network.links.carrier == "DC", "bus0"] + ) + ) + | ( + network.buses.index.isin( + network.links.loc[network.links.carrier == "DC", "bus1"] + ) + ) + ) & (network.buses.v_nom.isin(voltage_level)) df = network.buses[mask] @@ -100,215 +146,153 @@ def buses_grid_linked(network, voltage_level): def geolocation_buses(self): """ - If geopandas is installed: - Use geometries of buses x/y(lon/lat) and polygons - of countries from RenpassGisParameterRegion - in order to locate the buses + If geopandas is installed: + Use geometries of buses x/y(lon/lat) and polygons + of countries from RenpassGisParameterRegion + in order to locate the buses - Else: - Use coordinats of buses to locate foreign buses, which is less accurate. + Else: + Use coordinats of buses to locate foreign buses, which is less accurate. - TODO: Why not alway use geopandas?? + TODO: Why not alway use geopandas?? - Parameters - ---------- - etrago : :class:`etrago.Etrago - Transmission grid object + Parameters + ---------- + etrago : :class:`etrago.Etrago` + Transmission grid object """ network = self.network - if geopandas: - # Start db connetion - # get renpassG!S scenario data - - RenpassGISRegion = RenpassGisParameterRegion - - # Define regions - region_id = ['DE', 'DK', 'FR', 'BE', 'LU', 'AT', - 'NO', 'PL', 'CH', 'CZ', 'SE', 'NL'] - - query = self.session.query(RenpassGISRegion.gid, - RenpassGISRegion.u_region_id, - RenpassGISRegion.stat_level, - RenpassGISRegion.geom, - RenpassGISRegion.geom_point) - - # get regions by query and filter - Regions = [(gid, u_region_id, stat_level, geoalchemy2.shape.to_shape( - geom), geoalchemy2.shape.to_shape(geom_point)) - for gid, u_region_id, stat_level, - geom, geom_point in query.filter(RenpassGISRegion.u_region_id. - in_(region_id)).all()] - - crs = {'init': 'epsg:4326'} - # transform lon lat to shapely Points and create GeoDataFrame - points = [Point(xy) for xy in zip(network.buses.x, network.buses.y)] - bus = gpd.GeoDataFrame(network.buses, crs=crs, geometry=points) - # Transform Countries Polygons as Regions - region = pd.DataFrame( - Regions, columns=['id', 'country', 'stat_level', 'Polygon', - 'Point']) - re = gpd.GeoDataFrame(region, crs=crs, geometry=region['Polygon']) - # join regions and buses by geometry which intersects - busC = gpd.sjoin(bus, re, how='inner', op='intersects') - # busC - # Drop non used columns - busC = busC.drop(['index_right', 'Point', 'id', 'Polygon', - 'stat_level', 'geometry'], axis=1) - # add busC to eTraGo.buses - network.buses['country_code'] = busC['country'] - network.buses.country_code[network.buses.country_code.isnull()] = 'DE' - # close session - self.session.close() + transborder_lines_0 = network.lines[ + network.lines["bus0"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + transborder_lines_1 = network.lines[ + network.lines["bus1"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index - else: + # set country tag for lines + network.lines.loc[transborder_lines_0, "country"] = network.buses.loc[ + network.lines.loc[transborder_lines_0, "bus0"].values, "country" + ].values - buses_by_country(network) + network.lines.loc[transborder_lines_1, "country"] = network.buses.loc[ + network.lines.loc[transborder_lines_1, "bus1"].values, "country" + ].values + network.lines["country"].fillna("DE", inplace=True) + doubles = list(set(transborder_lines_0.intersection(transborder_lines_1))) + for line in doubles: + c_bus0 = network.buses.loc[network.lines.loc[line, "bus0"], "country"] + c_bus1 = network.buses.loc[network.lines.loc[line, "bus1"], "country"] + network.lines.loc[line, "country"] = "{}{}".format(c_bus0, c_bus1) - transborder_lines_0 = network.lines[network.lines['bus0'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - transborder_lines_1 = network.lines[network.lines['bus1'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index + transborder_links_0 = network.links[ + network.links["bus0"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + transborder_links_1 = network.links[ + network.links["bus1"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index - #set country tag for lines - network.lines.loc[transborder_lines_0, 'country'] = \ - network.buses.loc[network.lines.loc[transborder_lines_0, 'bus0'].\ - values, 'country_code'].values + # set country tag for links + network.links.loc[transborder_links_0, "country"] = network.buses.loc[ + network.links.loc[transborder_links_0, "bus0"].values, "country" + ].values - network.lines.loc[transborder_lines_1, 'country'] = \ - network.buses.loc[network.lines.loc[transborder_lines_1, 'bus1'].\ - values, 'country_code'].values - network.lines['country'].fillna('DE', inplace=True) - doubles = list(set(transborder_lines_0.intersection(transborder_lines_1))) - for line in doubles: - c_bus0 = network.buses.loc[network.lines.loc[line, 'bus0'], - 'country_code'] - c_bus1 = network.buses.loc[network.lines.loc[line, 'bus1'], - 'country_code'] - network.lines.loc[line, 'country'] = '{}{}'.format(c_bus0, c_bus1) - - transborder_links_0 = network.links[network.links['bus0'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - transborder_links_1 = network.links[network.links['bus1'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - - #set country tag for links - network.links.loc[transborder_links_0, 'country'] = \ - network.buses.loc[network.links.loc[transborder_links_0, 'bus0'].\ - values, 'country_code'].values - - network.links.loc[transborder_links_1, 'country'] = \ - network.buses.loc[network.links.loc[transborder_links_1, 'bus1'].\ - values, 'country_code'].values - network.links['country'].fillna('DE', inplace=True) + network.links.loc[transborder_links_1, "country"] = network.buses.loc[ + network.links.loc[transborder_links_1, "bus1"].values, "country" + ].values + network.links["country"].fillna("DE", inplace=True) doubles = list(set(transborder_links_0.intersection(transborder_links_1))) for link in doubles: - c_bus0 = network.buses.loc[ - network.links.loc[link, 'bus0'], 'country_code'] - c_bus1 = network.buses.loc[ - network.links.loc[link, 'bus1'], 'country_code'] - network.links.loc[link, 'country'] = '{}{}'.format(c_bus0, c_bus1) + c_bus0 = network.buses.loc[network.links.loc[link, "bus0"], "country"] + c_bus1 = network.buses.loc[network.links.loc[link, "bus1"], "country"] + network.links.loc[link, "country"] = "{}{}".format(c_bus0, c_bus1) return network -def buses_by_country(network): +def buses_by_country(self): """ Find buses of foreign countries using coordinates and return them as Pandas Series Parameters ---------- - network : :class:`pypsa.Network + self : Etrago object Overall container of PyPSA Returns ------- - foreign_buses: Series containing buses by country - """ - - poland = pd.Series(index=network. - buses[(network.buses['x'] > 17)].index, - data="PL") - czech = pd.Series(index=network. - buses[(network.buses['x'] < 17) & - (network.buses['x'] > 15.1)].index, - data="CZ") - denmark = pd.Series(index=network. - buses[((network.buses['y'] < 60) & - (network.buses['y'] > 55.2)) | - ((network.buses['x'] > 11.95) & - (network.buses['x'] < 11.97) & - (network.buses['y'] > 54.5))]. - index, - data="DK") - sweden = pd.Series(index=network.buses[(network.buses['y'] > 60)].index, - data="SE") - austria = pd.Series(index=network. - buses[(network.buses['y'] < 47.33) & - (network.buses['x'] > 9) | - ((network.buses['x'] > 9.65) & - (network.buses['x'] < 9.9) & - (network.buses['y'] < 47.5) & - (network.buses['y'] > 47.3)) | - ((network.buses['x'] > 12.14) & - (network.buses['x'] < 12.15) & - (network.buses['y'] > 47.57) & - (network.buses['y'] < 47.58)) | - (network.buses['y'] < 47.6) & - (network.buses['x'] > 14.1)].index, - data="AT") - switzerland = pd.Series(index=network. - buses[((network.buses['x'] > 8.1) & - (network.buses['x'] < 8.3) & - (network.buses['y'] < 46.8)) | - ((network.buses['x'] > 7.82) & - (network.buses['x'] < 7.88) & - (network.buses['y'] > 47.54) & - (network.buses['y'] < 47.57)) | - ((network.buses['x'] > 10.91) & - (network.buses['x'] < 10.92) & - (network.buses['y'] > 49.91) & - (network.buses['y'] < 49.92))].index, - data="CH") - netherlands = pd.Series(index=network. - buses[((network.buses['x'] < 6.96) & - (network.buses['y'] < 53.15) & - (network.buses['y'] > 53.1)) | - ((network.buses['x'] < 5.4) & - (network.buses['y'] > 52.1))].index, - data="NL") - luxembourg = pd.Series(index=network. - buses[((network.buses['x'] < 6.15) & - (network.buses['y'] < 49.91) & - (network.buses['y'] > 49.65))].index, - data="LU") - france = pd.Series(index=network. - buses[(network.buses['x'] < 4.5) | - ((network.buses['x'] > 7.507) & - (network.buses['x'] < 7.508) & - (network.buses['y'] > 47.64) & - (network.buses['y'] < 47.65)) | - ((network.buses['x'] > 6.2) & - (network.buses['x'] < 6.3) & - (network.buses['y'] > 49.1) & - (network.buses['y'] < 49.2)) | - ((network.buses['x'] > 6.7) & - (network.buses['x'] < 6.76) & - (network.buses['y'] > 49.13) & - (network.buses['y'] < 49.16))].index, - data="FR") - foreign_buses = pd.Series() - foreign_buses = foreign_buses.append([poland, czech, denmark, sweden, - austria, switzerland, - netherlands, luxembourg, france]) - - network.buses['country_code'] = foreign_buses[foreign_buses.index.isin( - network.buses.index)] - network.buses['country_code'].fillna('DE', inplace=True) - - return foreign_buses + None + """ + + countries = { + "Poland": "PL", + "Czechia": "CZ", + "Denmark": "DK", + "Sweden": "SE", + "Austria": "AT", + "Switzerland": "CH", + "Netherlands": "NL", + "Luxembourg": "LU", + "France": "FR", + "Belgium": "BE", + "United Kingdom": "GB", + "Norway": "NO", + "Finland": "FI", + "Germany": "DE", + "Russia": "RU", + } + + # read Germany borders from egon-data + query = "SELECT * FROM boundaries.vg250_lan" + con = self.engine + germany_sh = gpd.read_postgis(query, con, geom_col="geometry") + + path = gpd.datasets.get_path("naturalearth_lowres") + shapes = gpd.read_file(path) + shapes = shapes[shapes.name.isin([*countries])].set_index(keys="name") + + # Use Germany borders from egon-data if not using the SH test case + if len(germany_sh.gen.unique()) > 1: + shapes.at["Germany", "geometry"] = germany_sh.geometry.unary_union + + geobuses = self.network.buses.copy() + geobuses["geom"] = geobuses.apply( + lambda x: Point([x["x"], x["y"]]), axis=1 + ) + + geobuses = gpd.GeoDataFrame( + data=geobuses, geometry="geom", crs="EPSG:4326" + ) + geobuses["country"] = np.nan + + for country in countries: + geobuses["country"][ + self.network.buses.index.isin( + geobuses.clip(shapes[shapes.index == country]).index + ) + ] = countries[country] + + shapes = shapes.to_crs(3035) + geobuses = geobuses.to_crs(3035) + + for bus in geobuses[geobuses["country"].isna()].index: + distances = shapes.distance(geobuses.loc[bus, "geom"]) + closest = distances.idxmin() + geobuses.loc[bus, "country"] = countries[closest] + + self.network.buses = geobuses.drop(columns="geom") + + return def clip_foreign(network): @@ -330,58 +314,83 @@ def clip_foreign(network): # get foreign buses by country - foreign_buses = network.buses[network.buses.country_code != 'DE'] + foreign_buses = network.buses[network.buses.country != "DE"] network.buses = network.buses.drop( - network.buses.loc[foreign_buses.index].index) + network.buses.loc[foreign_buses.index].index + ) if not network.lines_t.p0.empty: # identify transborder lines - #TODO: Add links! + # TODO: Add links! transborder_lines = network.lines.query("country != 'DE'") - transborder_lines['bus0'] = network.lines['bus0'] - transborder_lines['bus1'] = network.lines['bus1'] - transborder_lines['country'] = network.lines.country + transborder_lines["bus0"] = network.lines["bus0"] + transborder_lines["bus1"] = network.lines["bus1"] + transborder_lines["country"] = network.lines.country # identify amount of flows per line and group to get flow per country transborder_flows = network.lines_t.p0[transborder_lines.index] for i in transborder_flows.columns: - if network.lines.loc[str(i)]['bus1'] in foreign_buses.index: - transborder_flows.loc[:, str( - i)] = transborder_flows.loc[:, str(i)]*-1 - - network.foreign_trade = transborder_flows.\ - groupby(transborder_lines['country'], axis=1).sum() + if network.lines.loc[str(i)]["bus1"] in foreign_buses.index: + transborder_flows.loc[:, str(i)] = ( + transborder_flows.loc[:, str(i)] * -1 + ) + network.foreign_trade = transborder_flows.groupby( + transborder_lines["country"], axis=1 + ).sum() # drop foreign components - network.lines = network.lines.drop(network.lines[ - (network.lines['bus0'].isin(network.buses.index) == False) | - (network.lines['bus1'].isin(network.buses.index) == False)].index) - - network.links = network.links.drop(network.links[ - (network.links['bus0'].isin(network.buses.index) == False) | - (network.links['bus1'].isin(network.buses.index) == False)].index) - - network.transformers = network.transformers.drop(network.transformers[ - (network.transformers['bus0'].isin(network.buses.index) == False) | - (network.transformers['bus1'].isin(network. - buses.index) == False)].index) - network.generators = network.generators.drop(network.generators[ - (network.generators['bus'].isin(network.buses.index) == False)].index) - network.loads = network.loads.drop(network.loads[ - (network.loads['bus'].isin(network.buses.index) == False)].index) - network.storage_units = network.storage_units.drop(network.storage_units[ - (network.storage_units['bus'].isin(network. - buses.index) == False)].index) - - components = ['loads', 'generators', 'lines', 'buses', 'transformers', - 'links'] + network.lines = network.lines.drop( + network.lines[ + ~(network.lines["bus0"].isin(network.buses.index)) + | ~(network.lines["bus1"].isin(network.buses.index)) + ].index + ) + + network.links = network.links.drop( + network.links[ + ~(network.links["bus0"].isin(network.buses.index)) + | ~(network.links["bus1"].isin(network.buses.index)) + ].index + ) + + network.transformers = network.transformers.drop( + network.transformers[ + ~(network.transformers["bus0"].isin(network.buses.index)) + | ~(network.transformers["bus1"].isin(network.buses.index)) + ].index + ) + network.generators = network.generators.drop( + network.generators[ + ~(network.generators["bus"].isin(network.buses.index)) + ].index + ) + network.loads = network.loads.drop( + network.loads[~(network.loads["bus"].isin(network.buses.index))].index + ) + network.storage_units = network.storage_units.drop( + network.storage_units[ + ~(network.storage_units["bus"].isin(network.buses.index)) + ].index + ) + + components = [ + "loads", + "generators", + "lines", + "buses", + "transformers", + "links", + ] for g in components: # loads_t - h = g + '_t' + h = g + "_t" nw = getattr(network, h) # network.loads_t for i in nw.keys(): # network.loads_t.p - cols = [j for j in getattr( - nw, i).columns if j not in getattr(network, g).index] + cols = [ + j + for j in getattr(nw, i).columns + if j not in getattr(network, g).index + ] for k in cols: del getattr(nw, i)[k] @@ -402,75 +411,147 @@ def foreign_links(self): Overall container of PyPSA """ - if self.args['foreign_lines']['carrier'] == 'DC': + if self.args["foreign_lines"]["carrier"] == "DC": network = self.network - foreign_buses = network.buses[network.buses.country_code != 'DE'] - - foreign_lines = network.lines[network.lines.bus0.astype(str).isin( - foreign_buses.index) | network.lines.bus1.astype(str).isin( - foreign_buses.index)] + foreign_buses = network.buses[ + (network.buses.country != "DE") + & (network.buses.carrier.isin(["AC", "DC"])) + ] - foreign_links = network.links[network.links.bus0.astype(str).isin( - foreign_buses.index) | network.links.bus1.astype(str).isin( - foreign_buses.index)] - - network.links = network.links.drop( - network.links.index[ - network.links.index.isin(foreign_links.index) - & network.links.bus0.isin(network.links.bus1) - & (network.links.bus0 > network.links.bus1)]) + foreign_lines = network.lines[ + network.lines.bus0.astype(str).isin(foreign_buses.index) + | network.lines.bus1.astype(str).isin(foreign_buses.index) + ] foreign_links = network.links[ - network.links.bus0.astype(str).isin(foreign_buses.index) | - network.links.bus1.astype(str).isin(foreign_buses.index)] + ( + network.links.bus0.astype(str).isin(foreign_buses.index) + | network.links.bus1.astype(str).isin(foreign_buses.index) + ) + & (network.links.carrier == "DC") + ] + + network.links.loc[foreign_links.index, "p_min_pu"] = -1 - network.links.loc[foreign_links.index, 'p_min_pu'] = -1 + network.links.loc[foreign_links.index, "efficiency"] = 1 - network.links.loc[foreign_links.index, 'efficiency'] = 1 + network.links.loc[foreign_links.index, "carrier"] = "DC" network.import_components_from_dataframe( - foreign_lines.loc[:, ['bus0', 'bus1', 'capital_cost', 'length']] - .assign(p_nom=foreign_lines.s_nom).assign(p_min_pu=-1) - .set_index('N' + foreign_lines.index), - 'Link') + foreign_lines.loc[:, ["bus0", "bus1", "capital_cost", "length"]] + .assign(p_nom=foreign_lines.s_nom) + .assign(p_nom_min=foreign_lines.s_nom_min) + .assign(p_nom_max=foreign_lines.s_nom_max) + .assign(p_nom_extendable=foreign_lines.s_nom_extendable) + .assign(p_max_pu=foreign_lines.s_max_pu) + .assign(p_min_pu=-1) + .assign(carrier="DC") + .set_index("N" + foreign_lines.index), + "Link", + ) network.lines = network.lines.drop(foreign_lines.index) self.geolocation_buses() -def set_q_foreign_loads(self, cos_phi=1): - """Set reative power timeseries of loads in neighbouring countries +def set_q_national_loads(self, cos_phi): + """ + Set q component of national loads based on the p component and cos_phi Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA - cos_phi: float + cos_phi : float Choose ration of active and reactive power of foreign loads Returns ------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA """ network = self.network - foreign_buses = network.buses[network.buses.country_code != 'DE'] + national_buses = network.buses[ + (network.buses.country == "DE") & (network.buses.carrier == "AC") + ] + + # Calculate q national loads based on p and cos_phi + new_q_loads = network.loads_t["p_set"].loc[ + :, + network.loads.index[ + (network.loads.bus.astype(str).isin(national_buses.index)) + & (network.loads.carrier.astype(str) == "AC") + ], + ] * math.tan(math.acos(cos_phi)) + + # insert the calculated q in loads_t. Only loads without previous + # assignment are affected + network.loads_t.q_set = pd.merge( + network.loads_t.q_set, + new_q_loads, + how="inner", + right_index=True, + left_index=True, + suffixes=("", "delete_"), + ) + network.loads_t.q_set.drop( + [i for i in network.loads_t.q_set.columns if "delete" in i], + axis=1, + inplace=True, + ) + + +def set_q_foreign_loads(self, cos_phi): + """Set reative power timeseries of loads in neighbouring countries - network.loads_t['q_set'][network.loads.index[ - network.loads.bus.astype(str).isin(foreign_buses.index)]] = \ - network.loads_t['p_set'][network.loads.index[ - network.loads.bus.astype(str).isin( - foreign_buses.index)]] * math.tan(math.acos(cos_phi)) + Parameters + ---------- + etrago : :class:`etrago.Etrago + Transmission grid object + cos_phi: float + Choose ration of active and reactive power of foreign loads + + Returns + ------- + None + + """ + network = self.network - network.generators.control[network.generators.control == 'PQ'] = 'PV' + foreign_buses = network.buses[ + (network.buses.country != "DE") & (network.buses.carrier == "AC") + ] + + network.loads_t["q_set"].loc[ + :, + network.loads.index[ + (network.loads.bus.astype(str).isin(foreign_buses.index)) + & (network.loads.carrier != "H2_for_industry") + ].astype(int), + ] = network.loads_t["p_set"].loc[ + :, + network.loads.index[ + (network.loads.bus.astype(str).isin(foreign_buses.index)) + & (network.loads.carrier != "H2_for_industry") + ], + ].values * math.tan( + math.acos(cos_phi) + ) + + # To avoid a problem when the index of the load is the weather year, + # the column names were temporarily set to `int` and changed back to + # `str`. + network.loads_t["q_set"].columns = network.loads_t["q_set"].columns.astype( + str + ) def connected_grid_lines(network, busids): - """ Get grid lines connected to given buses. + """Get grid lines connected to given buses. Parameters ---------- @@ -485,14 +566,13 @@ def connected_grid_lines(network, busids): PyPSA lines. """ - mask = network.lines.bus1.isin(busids) |\ - network.lines.bus0.isin(busids) + mask = network.lines.bus1.isin(busids) | network.lines.bus0.isin(busids) return network.lines[mask] def connected_transformer(network, busids): - """ Get transformer connected to given buses. + """Get transformer connected to given buses. Parameters ---------- @@ -507,13 +587,13 @@ def connected_transformer(network, busids): PyPSA transformer. """ - mask = (network.transformers.bus0.isin(busids)) + mask = network.transformers.bus0.isin(busids) return network.transformers[mask] -def load_shedding(self, **kwargs): - """ Implement load shedding in existing network to identify +def load_shedding(self, temporal_disaggregation=False, **kwargs): + """Implement load shedding in existing network to identify feasibility problems Parameters @@ -524,149 +604,423 @@ def load_shedding(self, **kwargs): Marginal costs for load shedding p_nom : int Installed capacity of load shedding generator + Returns ------- """ - if self.args['load_shedding']: + logger.debug("Shedding the load.") + if self.args["load_shedding"]: + if temporal_disaggregation: + network = self.network_tsa + else: + network = self.network + marginal_cost_def = 10000 # network.generators.marginal_cost.max()*2 - p_nom_def = self.network.loads_t.p_set.max().max() + p_nom_def = network.loads_t.p_set.max().max() + + marginal_cost = kwargs.get("marginal_cost", marginal_cost_def) + p_nom = kwargs.get("p_nom", p_nom_def) + + network.add("Carrier", "load") + start = ( + network.generators.index.to_series() + .str.rsplit(" ") + .str[0] + .astype(int) + .sort_values() + .max() + + 1 + ) - marginal_cost = kwargs.get('marginal_cost', marginal_cost_def) - p_nom = kwargs.get('p_nom', p_nom_def) + if start != start: + start = 0 - self.network.add("Carrier", "load") - start = self.network.generators.index.to_series().str.rsplit( - ' ').str[0].astype(int).sort_values().max() + 1 - index = list(range(start, start + len(self.network.buses.index))) - self.network.import_components_from_dataframe( + index = list(range(start, start + len(network.buses.index))) + network.import_components_from_dataframe( pd.DataFrame( - dict(marginal_cost=marginal_cost, - p_nom=p_nom, - carrier='load shedding', - bus=self.network.buses.index), - index=index), - "Generator" + dict( + marginal_cost=marginal_cost, + p_nom=p_nom, + carrier="load shedding", + bus=network.buses.index, + control="PQ", + ), + index=index, + ), + "Generator", ) -def data_manipulation_sh(network): - """ Adds missing components to run calculations with SH scenarios. +def set_control_strategies(network): + """Sets control strategies for AC generators and storage units Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA + Returns + ------- + None. + + """ + # Assign generators control strategy + network.generators.loc[:, "control"] = "PV" + + network.generators.loc[ + network.generators.carrier.isin( + [ + "load shedding", + "CH4", + "CH4_biogas", + "CH4_NG", + "central_biomass_CHP_heat", + "geo_thermal", + "solar_thermal_collector", + ] + ), + "control", + ] = "PQ" + + # Assign storage units control strategy + network.storage_units.loc[:, "control"] = "PV" + + +def data_manipulation_sh(network): + """Adds missing components to run calculations with SH scenarios. + + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + Returns + ------- + None """ - from shapely.geometry import Point, LineString, MultiLineString from geoalchemy2.shape import from_shape, to_shape + from shapely.geometry import LineString, MultiLineString, Point # add connection from Luebeck to Siems new_bus = str(network.buses.index.astype(np.int64).max() + 1) new_trafo = str(network.transformers.index.astype(np.int64).max() + 1) new_line = str(network.lines.index.astype(np.int64).max() + 1) - network.add("Bus", new_bus, carrier='AC', - v_nom=220, x=10.760835, y=53.909745) - network.add("Transformer", new_trafo, bus0="25536", - bus1=new_bus, x=1.29960, tap_ratio=1, s_nom=1600) - network.add("Line", new_line, bus0="26387", - bus1=new_bus, x=0.0001, s_nom=1600) - network.lines.loc[new_line, 'cables'] = 3.0 + network.add( + "Bus", new_bus, carrier="AC", v_nom=220, x=10.760835, y=53.909745 + ) + network.add( + "Transformer", + new_trafo, + bus0="25536", + bus1=new_bus, + x=1.29960, + tap_ratio=1, + s_nom=1600, + ) + network.add( + "Line", new_line, bus0="26387", bus1=new_bus, x=0.0001, s_nom=1600 + ) + network.lines.loc[new_line, "cables"] = 3.0 # bus geom point_bus1 = Point(10.760835, 53.909745) - network.buses.set_value(new_bus, 'geom', from_shape(point_bus1, 4326)) + network.buses.set_value(new_bus, "geom", from_shape(point_bus1, 4326)) # line geom/topo - network.lines.set_value(new_line, 'geom', from_shape(MultiLineString( - [LineString([to_shape(network. - buses.geom['26387']), point_bus1])]), 4326)) - network.lines.set_value(new_line, 'topo', from_shape(LineString( - [to_shape(network.buses.geom['26387']), point_bus1]), 4326)) + network.lines.set_value( + new_line, + "geom", + from_shape( + MultiLineString( + [ + LineString( + [to_shape(network.buses.geom["26387"]), point_bus1] + ) + ] + ), + 4326, + ), + ) + network.lines.set_value( + new_line, + "topo", + from_shape( + LineString([to_shape(network.buses.geom["26387"]), point_bus1]), + 4326, + ), + ) # trafo geom/topo - network.transformers.set_value(new_trafo, - 'geom', from_shape(MultiLineString( - [LineString( - [to_shape(network - .buses.geom['25536']), - point_bus1])]), 4326)) - network.transformers.set_value(new_trafo, 'topo', from_shape( - LineString([to_shape(network.buses.geom['25536']), point_bus1]), 4326)) + network.transformers.set_value( + new_trafo, + "geom", + from_shape( + MultiLineString( + [ + LineString( + [to_shape(network.buses.geom["25536"]), point_bus1] + ) + ] + ), + 4326, + ), + ) + network.transformers.set_value( + new_trafo, + "topo", + from_shape( + LineString([to_shape(network.buses.geom["25536"]), point_bus1]), + 4326, + ), + ) def _enumerate_row(row): - row['name'] = row.name + row["name"] = row.name return row def export_to_csv(self, path): - """ Function the writes the calaculation results - in csv-files in the desired directory. + """Write calculation results to csv-files in `path`. Parameters ---------- - network : :class:`pypsa.Network + network : :class:`pypsa.Network` Overall container of PyPSA args: dict Contains calculation settings of appl.py - path: str - Choose path for csv-files + path: str or False or None + Choose path for csv-files. Specify `""`, `False` or `None` to + not do anything. + + Returns + ------- + None """ - if path == False: + if not path: pass - if not os.path.exists(path): os.makedirs(path, exist_ok=True) self.network.export_to_csv_folder(path) - data = pd.read_csv(os.path.join(path, 'network.csv')) - #data['time'] = network.results['Solver'].Time + data = pd.read_csv(os.path.join(path, "network.csv")) + # data['time'] = network.results['Solver'].Time data = data.apply(_enumerate_row, axis=1) - data.to_csv(os.path.join(path, 'network.csv'), index=False) + data.to_csv(os.path.join(path, "network.csv"), index=False) - with open(os.path.join(path, 'args.json'), 'w') as fp: - json.dump(self.args, fp) + with open(os.path.join(path, "args.json"), "w") as fp: + json.dump(self.args, fp, indent=4) - if hasattr(self.network, 'Z'): - file = [i for i in os.listdir( - path.strip('0123456789')) if i == 'Z.csv'] + if hasattr(self.network, "Z"): + file = [ + i for i in os.listdir(path.strip("0123456789")) if i == "Z.csv" + ] if file: - print('Z already calculated') + print("Z already calculated") else: self.network.Z.to_csv( - path.strip('0123456789') + '/Z.csv', index=False) + path.strip("0123456789") + "/Z.csv", index=False + ) + + if bool(self.busmap): + path_clus = os.path.join(path, "clustering") + if not os.path.exists(path_clus): + os.makedirs(path_clus, exist_ok=True) + + with open(os.path.join(path_clus, "busmap.json"), "w") as d: + json.dump(self.busmap["busmap"], d, indent=4) + self.busmap["orig_network"].export_to_csv_folder(path_clus) + data = pd.read_csv(os.path.join(path_clus, "network.csv")) + data = data.apply(_enumerate_row, axis=1) + data.to_csv(os.path.join(path_clus, "network.csv"), index=False) + + if isinstance(self.ch4_h2_mapping, pd.Series): + path_clus = os.path.join(path, "clustering") + if not os.path.exists(path_clus): + os.makedirs(path_clus, exist_ok=True) + with open(os.path.join(path_clus, "ch4_h2_mapping.json"), "w") as d: + self.ch4_h2_mapping.to_json(d, indent=4) return def loading_minimization(network, snapshots): + """ + Minimizes the sum of the products of each element in the passive_branches + of the model. + Parameters + ---------- + network : :class:`pypsa.Network + Overall container of PyPSA + snapshots : 'pandas.core.indexes.datetimes.DatetimeIndex' + snapshots to perform the minimization + + Returns + ------- + None + + """ network.model.number1 = Var( - network.model.passive_branch_p_index, within=PositiveReals) + network.model.passive_branch_p_index, within=PositiveReals + ) network.model.number2 = Var( - network.model.passive_branch_p_index, within=PositiveReals) + network.model.passive_branch_p_index, within=PositiveReals + ) - def cRule(model, c, l, t): - return (model.number1[c, l, t] - model.number2[c, l, t] == model. - passive_branch_p[c, l, t]) + def cRule(model, c, l0, t): + return ( + model.number1[c, l0, t] - model.number2[c, l0, t] + == model.passive_branch_p[c, l0, t] + ) network.model.cRule = Constraint( - network.model.passive_branch_p_index, rule=cRule) + network.model.passive_branch_p_index, rule=cRule + ) + + network.model.objective.expr += 0.00001 * sum( + network.model.number1[i] + network.model.number2[i] + for i in network.model.passive_branch_p_index + ) + + +def _make_consense(component, attr): + """ + Returns a function `consense` that will be used to generate a consensus + value for the attribute `attr` of the given `component`. This consensus + value is derived from the input DataFrame `x`. If all values in the + DataFrame are equal, the consensus value will be that common value. + If all values are missing (NaN), the consensus value will be NaN. + Otherwise, an assertion error will be raised. + + Parameters + ---------- + component : str + specify the name of the component being clustered. + attr : str + specify the name of the attribute of the commponent being considered. + + Returns + ------- + function + A function that takes a DataFrame as input and returns a single value + as output when all the elements of the commponent attribute are the + same. + + """ + + def consense(x): + v = x.iat[0] + assert (x == v).all() or x.isnull().all(), ( + f"In {component} cluster {x.name} the values" + f" of attribute {attr} do not agree:\n{x}" + ) + return v + + return consense - network.model.objective.expr += 0.00001 * \ - sum(network.model.number1[i] + network.model.number2[i] - for i in network.model.passive_branch_p_index) + +def _normed(s): + """ + Given a pandas Series `s`, normalizes the series by dividing each element + by the sum of the series. If the sum of the series is zero, returns 1.0 to + avoid division by zero errors. + + Parameters + ---------- + s : pandas.Series + A pandas Series. + + Returns + ------- + pandas.Series + A normalized pandas Series. + + """ + tot = s.sum() + if tot == 0: + return 1.0 + else: + return s / tot + + +def agg_series_lines(l0, network): + """ + Given a pandas DataFrame `l0` containing information about lines in a + network and a network object, aggregates the data in `l0` for all its + attributes. Returns a pandas Series containing the aggregated data. + + + Parameters + ---------- + l0: pandas.DataFrame + contain information about lines in a network. + network : :class:`pypsa.Network + Overall container of PyPSA + + Returns + ------- + pandas.Series + A pandas Series containing aggregated data for the lines in the + network. + + """ + attrs = network.components["Line"]["attrs"] + columns = set( + attrs.index[attrs.static & attrs.status.str.startswith("Input")] + ).difference(("name", "bus0", "bus1")) + consense = { + attr: _make_consense("Bus", attr) + for attr in ( + columns + # | {"sub_network"} + - { + "r", + "x", + "g", + "b", + "terrain_factor", + "s_nom", + "s_nom_min", + "s_nom_max", + "s_nom_extendable", + "length", + "v_ang_min", + "v_ang_max", + } + ) + } + + Line = l0["Line"].iloc[0] + data = dict( + r=l0["r"].sum(), + x=l0["x"].sum(), + g=1.0 / (1.0 / l0["g"]).sum(), + b=1.0 / (1.0 / l0["b"]).sum(), + terrain_factor=l0["terrain_factor"].mean(), + s_max_pu=(l0["s_max_pu"] * _normed(l0["s_nom"])).sum(), + s_nom=l0["s_nom"].iloc[0], + s_nom_min=l0["s_nom_min"].max(), + s_nom_max=l0["s_nom_max"].min(), + s_nom_extendable=l0["s_nom_extendable"].any(), + num_parallel=l0["num_parallel"].max(), + capital_cost=(_normed(l0["s_nom"]) * l0["capital_cost"]).sum(), + length=l0["length"].sum(), + v_ang_min=l0["v_ang_min"].max(), + v_ang_max=l0["v_ang_max"].min(), + ) + data.update((f, consense[f](l0[f])) for f in columns.difference(data)) + return pd.Series( + data, index=[f for f in l0.columns if f in columns], name=Line + ) def group_parallel_lines(network): """ - TODO: Will be improved when merging feature/sclopf - Functions that groups parallel lines of the same voltage level to one + Function that groups parallel lines of the same voltage level to one line component representing all parallel lines Parameters @@ -680,145 +1034,419 @@ def group_parallel_lines(network): """ - # ordering of buses: (not sure if still necessary, remaining from SQL code) - old_lines = network.lines - - for line in old_lines.index: - bus0_new = str(old_lines.loc[line, ['bus0', 'bus1']].astype(int).min()) - bus1_new = str(old_lines.loc[line, ['bus0', 'bus1']].astype(int).max()) - old_lines.set_value(line, 'bus0', bus0_new) - old_lines.set_value(line, 'bus1', bus1_new) - - # saving the old index - for line in old_lines: - old_lines['old_index'] = network.lines.index - - grouped = old_lines.groupby(['bus0', 'bus1']) - - # calculating electrical properties for parallel lines - grouped_agg = grouped.\ - agg({'b': np.sum, - 'b_pu': np.sum, - 'cables': np.sum, - 'capital_cost': np.min, - 'frequency': np.mean, - 'g': np.sum, - 'g_pu': np.sum, - 'geom': lambda x: x[0], - 'length': lambda x: x.min(), - 'num_parallel': np.sum, - 'r': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'r_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 's_nom': np.sum, - 's_nom_extendable': lambda x: x.min(), - 's_nom_max': np.sum, - 's_nom_min': np.sum, - 's_nom_opt': np.sum, - 'scn_name': lambda x: x.min(), - 'sub_network': lambda x: x.min(), - 'terrain_factor': lambda x: x.min(), - 'topo': lambda x: x[0], - 'type': lambda x: x.min(), - 'v_ang_max': lambda x: x.min(), - 'v_ang_min': lambda x: x.min(), - 'x': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'x_pu': lambda x: np.reciprocal(np.sum(np.reciprocal(x))), - 'old_index': np.min}) - - for i in range(0, len(grouped_agg.index)): - grouped_agg.set_value( - grouped_agg.index[i], 'bus0', grouped_agg.index[i][0]) - grouped_agg.set_value( - grouped_agg.index[i], 'bus1', grouped_agg.index[i][1]) - - new_lines = grouped_agg.set_index(grouped_agg.old_index) - new_lines = new_lines.drop('old_index', 1) - network.lines = new_lines + def agg_parallel_lines(l0): + attrs = network.components["Line"]["attrs"] + columns = set( + attrs.index[attrs.static & attrs.status.str.startswith("Input")] + ).difference(("name", "bus0", "bus1")) + columns.add("Line") + columns.add("geom") + consense = { + attr: _make_consense("Bus", attr) + for attr in ( + columns + | {"sub_network"} + - { + "Line", + "r", + "x", + "g", + "b", + "terrain_factor", + "s_nom", + "s_nom_min", + "s_nom_max", + "s_nom_extendable", + "length", + "v_ang_min", + "v_ang_max", + "geom", + } + ) + } + + data = dict( + Line=l0["Line"].iloc[0], + r=1.0 / (1.0 / l0["r"]).sum(), + x=1.0 / (1.0 / l0["x"]).sum(), + g=l0["g"].sum(), + b=l0["b"].sum(), + terrain_factor=l0["terrain_factor"].mean(), + s_max_pu=(l0["s_max_pu"] * _normed(l0["s_nom"])).sum(), + s_nom=l0["s_nom"].sum(), + s_nom_min=l0["s_nom_min"].sum(), + s_nom_max=l0["s_nom_max"].sum(), + s_nom_extendable=l0["s_nom_extendable"].any(), + num_parallel=l0["num_parallel"].sum(), + capital_cost=(_normed(l0["s_nom"]) * l0["capital_cost"]).sum(), + length=l0["length"].mean(), + sub_network=consense["sub_network"](l0["sub_network"]), + v_ang_min=l0["v_ang_min"].max(), + v_ang_max=l0["v_ang_max"].min(), + geom=l0["geom"].iloc[0], + ) + data.update((f, consense[f](l0[f])) for f in columns.difference(data)) + return pd.Series(data, index=[f for f in l0.columns if f in columns]) + + # Make bus0 always the greattest to identify repeated lines + lines_2 = network.lines.copy() + bus_max = lines_2.apply(lambda x: max(x.bus0, x.bus1), axis=1) + bus_min = lines_2.apply(lambda x: min(x.bus0, x.bus1), axis=1) + lines_2["bus0"] = bus_max + lines_2["bus1"] = bus_min + lines_2.reset_index(inplace=True) + lines_2["geom"] = lines_2.apply( + lambda x: None if x.geom is None else x.geom.wkt, axis=1 + ) + network.lines = ( + lines_2.groupby(["bus0", "bus1"]) + .apply(agg_parallel_lines) + .reset_index() + .set_index("Line", drop=True) + ) + network.lines["geom"] = gpd.GeoSeries.from_wkt(network.lines["geom"]) return -def set_line_costs(network, cost110=230, cost220=290, cost380=85, costDC=375): - """ Set capital costs for extendable lines in respect to PyPSA [€/MVA] +def delete_dispensable_ac_buses(etrago): + """ + Function that identifies and delete AC buses without links, transformers, + generators, loads, stores or storage_units, which also are connected to + just one or two other buses + + Parameters + ---------- + etrago : etrago object + + Returns + ------- + None. + + """ + if etrago.args["delete_dispensable_ac_buses"] is False: + return + + def delete_buses(delete_buses, network): + drop_buses = delete_buses.index.to_list() + network.buses.drop(labels=drop_buses, inplace=True) + drop_lines = network.lines.index[ + (network.lines.bus0.isin(drop_buses)) + | (network.lines.bus1.isin(drop_buses)) + ].to_list() + network.lines.drop(labels=drop_lines, inplace=True) + drop_storage_units = network.storage_units.index[ + (network.storage_units.bus.isin(drop_buses)) + ].to_list() + network.storage_units.drop(drop_storage_units, inplace=True) + drop_generators = network.generators.index[ + (network.generators.bus.isin(drop_buses)) + ].to_list() + network.generators.drop(drop_generators, inplace=True) + return ( + network.buses, + network.lines, + network.storage_units, + network.generators, + ) + + def count_lines(lines): + buses_in_lines = lines[["bus0", "bus1"]].drop_duplicates() + + def count(bus): + total = ( + (buses_in_lines["bus0"] == bus.name) + | (buses_in_lines["bus1"] == bus.name) + ).sum() + return total + + return count + + network = etrago.network + + # Group the parallel transmission lines to reduce the complexity + group_parallel_lines(etrago.network) + + # ordering of buses + bus0_new = network.lines.apply(lambda x: max(x.bus0, x.bus1), axis=1) + bus1_new = network.lines.apply(lambda x: min(x.bus0, x.bus1), axis=1) + network.lines["bus0"] = bus0_new + network.lines["bus1"] = bus1_new + + # Find the buses without any other kind of elements attached to them + # more than transmission lines. + ac_buses = network.buses[network.buses.carrier == "AC"][ + ["geom", "country"] + ] + b_links = pd.concat([network.links.bus0, network.links.bus1]).unique() + b_trafo = pd.concat( + [network.transformers.bus0, network.transformers.bus1] + ).unique() + b_gen = network.generators[ + network.generators.carrier != "load shedding" + ].bus.unique() + b_load = network.loads.bus.unique() + b_store = network.stores[network.stores.e_nom > 0].bus.unique() + b_store_unit = network.storage_units[ + network.storage_units.p_nom > 0 + ].bus.unique() + + ac_buses["links"] = ac_buses.index.isin(b_links) + ac_buses["trafo"] = ac_buses.index.isin(b_trafo) + ac_buses["gen"] = ac_buses.index.isin(b_gen) + ac_buses["load"] = ac_buses.index.isin(b_load) + ac_buses["store"] = ac_buses.index.isin(b_store) + ac_buses["storage_unit"] = ac_buses.index.isin(b_store_unit) + + ac_buses = ac_buses[ + ~(ac_buses.links) + & ~(ac_buses.trafo) + & ~(ac_buses.gen) + & ~(ac_buses.load) + & ~(ac_buses.store) + & ~(ac_buses.storage_unit) + ][[]] + + # count how many lines are connected to each bus + number_of_lines = count_lines(network.lines) + ac_buses["n_lines"] = 0 + ac_buses["n_lines"] = ac_buses.apply(number_of_lines, axis=1) + + # Keep the buses with two or less transmission lines + ac_buses = ac_buses[ac_buses["n_lines"] <= 2] + + # Keep only the buses connecting 2 lines with the same capacity + lines_cap = network.lines[ + (network.lines.bus0.isin(ac_buses.index)) + | (network.lines.bus1.isin(ac_buses.index)) + ][["bus0", "bus1", "s_nom"]] + + delete_bus = [] + for bus in ac_buses[ac_buses["n_lines"] == 2].index: + l0 = lines_cap[(lines_cap.bus0 == bus) | (lines_cap.bus1 == bus)][ + "s_nom" + ].unique() + if len(l0) != 1: + delete_bus.append(bus) + ac_buses.drop(delete_bus, inplace=True) + + # create groups of lines to join + buses_2 = ac_buses[ac_buses["n_lines"] == 2] + lines = network.lines[ + (network.lines.bus0.isin(buses_2.index)) + | (network.lines.bus1.isin(buses_2.index)) + ][["bus0", "bus1"]].copy() + lines_index = lines.index + new_lines = pd.DataFrame(columns=["bus0", "bus1", "lines"]) + group = 0 + + for line in lines_index: + if line not in lines.index: + continue + bus0 = lines.at[line, "bus0"] + bus1 = lines.at[line, "bus1"] + lines_group = [line] + lines.drop(line, inplace=True) + + # Determine bus0 new group + end_search = False + + while not end_search: + if bus0 not in ac_buses.index: + end_search = True + continue + lines_b = lines[(lines.bus0 == bus0) | (lines.bus1 == bus0)] + if len(lines_b) > 0: + lines_group.append(lines_b.index[0]) + if lines_b.iat[0, 0] == bus0: + bus0 = lines_b.iat[0, 1] + else: + bus0 = lines_b.iat[0, 0] + lines.drop(lines_b.index[0], inplace=True) + else: + end_search = True + + # Determine bus1 new group + end_search = False + while not end_search: + if bus1 not in ac_buses.index: + end_search = True + continue + lines_b = lines[(lines.bus0 == bus1) | (lines.bus1 == bus1)] + if len(lines_b) > 0: + lines_group.append(lines_b.index[0]) + if lines_b.iat[0, 0] == bus1: + bus1 = lines_b.iat[0, 1] + else: + bus1 = lines_b.iat[0, 0] + lines.drop(lines_b.index[0], inplace=True) + else: + end_search = True + + # Define the parameters of the new lines to be inserted into + # `network.lines`. + new_lines.loc[group] = [bus0, bus1, lines_group] + group = group + 1 + + # Create the new lines as result of aggregating series lines + lines = network.lines[ + (network.lines.bus0.isin(buses_2.index)) + | (network.lines.bus1.isin(buses_2.index)) + ] + + new_lines_df = pd.DataFrame(columns=lines.columns).rename_axis("Lines") + + for l0 in new_lines.index: + lines_group = ( + lines[lines.index.isin(new_lines.at[l0, "lines"])] + .copy() + .reset_index() + ) + l_new = agg_series_lines(lines_group, network) + l_new["bus0"] = new_lines.at[l0, "bus0"] + l_new["bus1"] = new_lines.at[l0, "bus1"] + new_lines_df["s_nom_extendable"] = new_lines_df[ + "s_nom_extendable" + ].astype(bool) + new_lines_df.loc[l_new.name] = l_new + + # Delete all the dispensable buses + ( + network.buses, + network.lines, + network.storage_units, + network.generators, + ) = delete_buses(ac_buses, network) + + # exclude from the new lines the ones connected to deleted buses + new_lines_df = new_lines_df[ + (~new_lines_df.bus0.isin(ac_buses.index)) + & (~new_lines_df.bus1.isin(ac_buses.index)) + ] + + etrago.network.lines = pd.concat([etrago.network.lines, new_lines_df]) + + # Drop s_max_pu timeseries for deleted lines + etrago.network.lines_t.s_max_pu = ( + etrago.network.lines_t.s_max_pu.transpose()[ + etrago.network.lines_t.s_max_pu.columns.isin( + etrago.network.lines.index + ) + ].transpose() + ) + + return + + +def set_line_costs(self, cost110=230, cost220=290, cost380=85, costDC=375): + """Set capital costs for extendable lines in respect to PyPSA [€/MVA] Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA - args: dict containing settings from appl.py - cost110 : capital costs per km for 110kV lines and cables - default: 230€/MVA/km, source: costs for extra circuit in - dena Verteilnetzstudie, p. 146) - cost220 : capital costs per km for 220kV lines and cables - default: 280€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 220 kV lines in model - cost380 : capital costs per km for 380kV lines and cables - default: 85€/MVA/km, source: costs for extra circuit in - NEP 2025, capactity from most used 380 kV lines in NEP - costDC : capital costs per km for DC-lines - default: 375€/MVA/km, source: costs for DC transmission line - in NEP 2035 - ------- + args: dict + containing settings from appl.py + cost110 : + capital costs per km for 110kV lines and cables + default: 230€/MVA/km, source: costs for extra circuit in + dena Verteilnetzstudie, p. 146) + cost220 : + capital costs per km for 220kV lines and cables + default: 280€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 220 kV lines in model + cost380 : + capital costs per km for 380kV lines and cables + default: 85€/MVA/km, source: costs for extra circuit in + NEP 2025, capactity from most used 380 kV lines in NEP + costDC : + capital costs per km for DC-lines + default: 375€/MVA/km, source: costs for DC transmission line + in NEP 2035 """ - network.lines.loc[(network.lines.v_nom == 110), - 'capital_cost'] = cost110 * network.lines.length + network = self.network + + network.lines.loc[(network.lines.v_nom == 110), "capital_cost"] = ( + cost110 * network.lines.length + ) - network.lines.loc[(network.lines.v_nom == 220), - 'capital_cost'] = cost220 * network.lines.length + network.lines.loc[(network.lines.v_nom == 220), "capital_cost"] = ( + cost220 * network.lines.length + ) - network.lines.loc[(network.lines.v_nom == 380), - 'capital_cost'] = cost380 * network.lines.length + network.lines.loc[(network.lines.v_nom == 380), "capital_cost"] = ( + cost380 * network.lines.length + ) - network.links.loc[network.links.p_nom_extendable, - 'capital_cost'] = costDC * network.links.length + network.links.loc[ + (network.links.p_nom_extendable) + & (network.links.index.isin(self.dc_lines().index)), + "capital_cost", + ] = ( + costDC * network.links.length + ) return network -def set_trafo_costs(network, cost110_220=7500, cost110_380=17333, - cost220_380=14166): - """ Set capital costs for extendable transformers in respect +def set_trafo_costs( + self, cost110_220=7500, cost110_380=17333, cost220_380=14166 +): + """Set capital costs for extendable transformers in respect to PyPSA [€/MVA] Parameters ---------- network : :class:`pypsa.Network Overall container of PyPSA - cost110_220 : capital costs for 110/220kV transformer - default: 7500€/MVA, source: costs for extra trafo in - dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod - cost110_380 : capital costs for 110/380kV transformer - default: 17333€/MVA, source: NEP 2025 - cost220_380 : capital costs for 220/380kV transformer - default: 14166€/MVA, source: NEP 2025 + cost110_220 : + capital costs for 110/220kV transformer + default: 7500€/MVA, source: costs for extra trafo in + dena Verteilnetzstudie, p. 146; S of trafo used in osmTGmod + cost110_380 : + capital costs for 110/380kV transformer + default: 17333€/MVA, source: NEP 2025 + cost220_380 : + capital costs for 220/380kV transformer + default: 14166€/MVA, source: NEP 2025 """ + + network = self.network network.transformers["v_nom0"] = network.transformers.bus0.map( - network.buses.v_nom) + network.buses.v_nom + ) network.transformers["v_nom1"] = network.transformers.bus1.map( - network.buses.v_nom) + network.buses.v_nom + ) - network.transformers.loc[(network.transformers.v_nom0 == 110) & ( - network.transformers.v_nom1 == 220), 'capital_cost'] = cost110_220 + network.transformers.loc[ + (network.transformers.v_nom0 == 110) + & (network.transformers.v_nom1 == 220), + "capital_cost", + ] = cost110_220 - network.transformers.loc[(network.transformers.v_nom0 == 110) & ( - network.transformers.v_nom1 == 380), 'capital_cost'] = cost110_380 + network.transformers.loc[ + (network.transformers.v_nom0 == 110) + & (network.transformers.v_nom1 == 380), + "capital_cost", + ] = cost110_380 - network.transformers.loc[(network.transformers.v_nom0 == 220) & ( - network.transformers.v_nom1 == 380), 'capital_cost'] = cost220_380 + network.transformers.loc[ + (network.transformers.v_nom0 == 220) + & (network.transformers.v_nom1 == 380), + "capital_cost", + ] = cost220_380 return network def add_missing_components(self): - # Munich - """ TODO: Manualy adds lines between hard-coded buses. Has to be changed - for the next dataversion and should be moved to data processing - - Add missing transformer at Heizkraftwerk Nord in Munich and missing - transformer in Stuttgart + """ + Add a missing transformer at Heizkraftwerk Nord in Munich and a missing + transformer in Stuttgart. Parameters ---------- @@ -832,8 +1460,14 @@ def add_missing_components(self): """ + # Munich + # TODO: Manually adds lines between hard-coded buses. Has to be + # changed for the next dataversion and should be moved to data + # processing + """ - https://www.swm.de/privatkunden/unternehmen/energieerzeugung/heizkraftwerke.html?utm_medium=301 + "https://www.swm.de/privatkunden/unternehmen/energieerzeugung" + + "/heizkraftwerke.html?utm_medium=301" to bus 25096: 25369 (86) @@ -851,19 +1485,27 @@ def add_missing_components(self): 28335 to 28139 (28) Overhead lines: 16573 to 24182 (part of 4) - """ - """ + Installierte Leistung der Umspannungsebene Höchst- zu Hochspannung (380 kV / 110 kV): 2.750.000 kVA - https://www.swm-infrastruktur.de/strom/netzstrukturdaten/strukturmerkmale.html + + "https://www.swm-infrastruktur.de/strom/netzstrukturdaten" + + "/strukturmerkmale.html """ network = self.network new_trafo = str(network.transformers.index.astype(int).max() + 1) - network.add("Transformer", new_trafo, bus0="16573", bus1="23648", - x=0.135 / (2750 / 2), - r=0.0, tap_ratio=1, s_nom=2750 / 2) + network.add( + "Transformer", + new_trafo, + bus0="16573", + bus1="23648", + x=0.135 / (2750 / 2), + r=0.0, + tap_ratio=1, + s_nom=2750 / 2, + ) def add_110kv_line(bus0, bus1, overhead=False): new_line = str(network.lines.index.astype(int).max() + 1) @@ -876,36 +1518,39 @@ def add_110kv_line(bus0, bus1, overhead=False): network.lines.loc[new_line, "version"] = "added_manually" network.lines.loc[new_line, "frequency"] = 50 network.lines.loc[new_line, "cables"] = 3.0 - network.lines.loc[new_line, "country"] = 'DE' + network.lines.loc[new_line, "country"] = "DE" network.lines.loc[new_line, "length"] = ( - pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]], - network.buses.loc[bus1, ["x", "y"]]) - [0][0] * 1.2) + pypsa.geo.haversine( + network.buses.loc[bus0, ["x", "y"]], + network.buses.loc[bus1, ["x", "y"]], + )[0][0] + * 1.2 + ) if not overhead: - network.lines.loc[new_line, "r"] = (network.lines. - loc[new_line, "length"] * - 0.0177) + network.lines.loc[new_line, "r"] = ( + network.lines.loc[new_line, "length"] * 0.0177 + ) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*78e-9) - network.lines.loc[new_line, "x"] = (network.lines. - loc[new_line, "length"] * - 0.3e-3) - network.lines.loc[new_line, "b"] = (network.lines. - loc[new_line, "length"] * - 250e-9) + network.lines.loc[new_line, "x"] = ( + network.lines.loc[new_line, "length"] * 0.3e-3 + ) + network.lines.loc[new_line, "b"] = ( + network.lines.loc[new_line, "length"] * 250e-9 + ) elif overhead: - network.lines.loc[new_line, "r"] = (network.lines. - loc[new_line, "length"] * - 0.05475) + network.lines.loc[new_line, "r"] = ( + network.lines.loc[new_line, "length"] * 0.05475 + ) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*40e-9) - network.lines.loc[new_line, "x"] = (network.lines. - loc[new_line, "length"] * - 1.2e-3) - network.lines.loc[new_line, "b"] = (network.lines. - loc[new_line, "length"] * - 9.5e-9) + network.lines.loc[new_line, "x"] = ( + network.lines.loc[new_line, "length"] * 1.2e-3 + ) + network.lines.loc[new_line, "b"] = ( + network.lines.loc[new_line, "length"] * 9.5e-9 + ) add_110kv_line("16573", "28353") add_110kv_line("16573", "28092") @@ -929,8 +1574,16 @@ def add_110kv_line(bus0, bus1, overhead=False): Heizkraftwerk Heilbronn: """ # new_trafo = str(network.transformers.index.astype(int).max()1) - network.add("Transformer", '99999', bus0="18967", bus1="25766", - x=0.135 / 300, r=0.0, tap_ratio=1, s_nom=300) + network.add( + "Transformer", + "99999", + bus0="18967", + bus1="25766", + x=0.135 / 300, + r=0.0, + tap_ratio=1, + s_nom=300, + ) """ According to: https://assets.ctfassets.net/xytfb1vrn7of/NZO8x4rKesAcYGGcG4SQg/b780d6a3ca4c2600ab51a30b70950bb1/netzschemaplan-110-kv.pdf @@ -975,99 +1628,103 @@ def add_220kv_line(bus0, bus1, overhead=False): network.lines.loc[new_line, "version"] = "added_manually" network.lines.loc[new_line, "frequency"] = 50 network.lines.loc[new_line, "cables"] = 3.0 - network.lines.loc[new_line, "country"] = 'DE' + network.lines.loc[new_line, "country"] = "DE" network.lines.loc[new_line, "length"] = ( - pypsa.geo.haversine(network.buses.loc[bus0, ["x", "y"]], - network.buses.loc[bus1, ["x", "y"]])[0][0] * - 1.2) + pypsa.geo.haversine( + network.buses.loc[bus0, ["x", "y"]], + network.buses.loc[bus1, ["x", "y"]], + )[0][0] + * 1.2 + ) if not overhead: - network.lines.loc[new_line, "r"] = (network.lines. - loc[new_line, "length"] * - 0.0176) + network.lines.loc[new_line, "r"] = ( + network.lines.loc[new_line, "length"] * 0.0176 + ) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*67e-9) - network.lines.loc[new_line, "x"] = (network.lines. - loc[new_line, "length"] * - 0.3e-3) - network.lines.loc[new_line, "b"] = (network.lines. - loc[new_line, "length"] * - 210e-9) + network.lines.loc[new_line, "x"] = ( + network.lines.loc[new_line, "length"] * 0.3e-3 + ) + network.lines.loc[new_line, "b"] = ( + network.lines.loc[new_line, "length"] * 210e-9 + ) elif overhead: - network.lines.loc[new_line, "r"] = (network.lines. - loc[new_line, "length"] * - 0.05475) + network.lines.loc[new_line, "r"] = ( + network.lines.loc[new_line, "length"] * 0.05475 + ) network.lines.loc[new_line, "g"] = 0 # or: (network.lines.loc[new_line, "length"]*30e-9) - network.lines.loc[new_line, "x"] = (network.lines. - loc[new_line, "length"] * 1e-3) - network.lines.loc[new_line, "b"] = (network.lines. - loc[new_line, "length"] * 11e-9 - ) + network.lines.loc[new_line, "x"] = ( + network.lines.loc[new_line, "length"] * 1e-3 + ) + network.lines.loc[new_line, "b"] = ( + network.lines.loc[new_line, "length"] * 11e-9 + ) add_220kv_line("266", "24633", overhead=True) - # temporary turn buses of transformers network.transformers["v_nom0"] = network.transformers.bus0.map( - network.buses.v_nom) + network.buses.v_nom + ) network.transformers["v_nom1"] = network.transformers.bus1.map( - network.buses.v_nom) + network.buses.v_nom + ) new_bus0 = network.transformers.bus1[ - network.transformers.v_nom0 > network.transformers.v_nom1] + network.transformers.v_nom0 > network.transformers.v_nom1 + ] new_bus1 = network.transformers.bus0[ - network.transformers.v_nom0 > network.transformers.v_nom1] - network.transformers.bus0[network.transformers.v_nom0 > - network.transformers.v_nom1] = new_bus0.values - network.transformers.bus1[network.transformers.v_nom0 > - network.transformers.v_nom1] = new_bus1.values + network.transformers.v_nom0 > network.transformers.v_nom1 + ] + network.transformers.bus0[ + network.transformers.v_nom0 > network.transformers.v_nom1 + ] = new_bus0.values + network.transformers.bus1[ + network.transformers.v_nom0 > network.transformers.v_nom1 + ] = new_bus1.values return network -def convert_capital_costs(self, p=0.05, T=40): - """ Convert capital_costs to fit to pypsa and caluculated time +def convert_capital_costs(self): + """Convert capital_costs to fit to considered timesteps Parameters ---------- etrago : :class:`etrago.Etrago Transmission grid object - p : interest rate, default 0.05 - T : number of periods, default 40 years (source: StromNEV Anlage 1) - ------- """ network = self.network - start_snapshot = self.args['start_snapshot'] - end_snapshot = self.args['end_snapshot'] - # Add costs for DC-converter - network.links.capital_cost = network.links.capital_cost + 400000 - - # Calculate present value of an annuity (PVA) - PVA = (1 / p) - (1 / (p * (1 + p) ** T)) - - # Apply function on lines, links, trafos and storages - # Storage costs are already annuized yearly - network.lines.loc[network.lines.s_nom_extendable == True, - 'capital_cost'] = ( - network.lines.capital_cost / - (PVA * (8760 / (end_snapshot - start_snapshot + 1)))) - network.links.loc[network.links.p_nom_extendable == True, - 'capital_cost'] = network.links.capital_cost /\ - (PVA * (8760 / (end_snapshot - start_snapshot + 1))) + n_snapshots = self.args["end_snapshot"] - self.args["start_snapshot"] + 1 + + # Costs are already annuized yearly in the datamodel + # adjust to number of considered snapshots + + network.lines.loc[network.lines.s_nom_extendable, "capital_cost"] *= ( + n_snapshots / 8760 + ) + + network.links.loc[network.links.p_nom_extendable, "capital_cost"] *= ( + n_snapshots / 8760 + ) + network.transformers.loc[ - network.transformers.s_nom_extendable == True, 'capital_cost'] = \ - network.transformers.capital_cost /( - PVA * (8760 / (end_snapshot - start_snapshot + 1))) + network.transformers.s_nom_extendable, "capital_cost" + ] *= (n_snapshots / 8760) + network.storage_units.loc[ - network.storage_units.p_nom_extendable == True, 'capital_cost'] = \ - network.storage_units.capital_cost / \ - (8760 / (end_snapshot - start_snapshot + 1)) + network.storage_units.p_nom_extendable, "capital_cost" + ] *= (n_snapshots / 8760) + network.stores.loc[network.stores.e_nom_extendable, "capital_cost"] *= ( + n_snapshots / 8760 + ) -def find_snapshots(network, carrier, maximum=True, minimum=True, n=3): +def find_snapshots(network, carrier, maximum=True, minimum=True, n=3): """ Function that returns snapshots with maximum and/or minimum feed-in of selected carrier. @@ -1091,22 +1748,33 @@ def find_snapshots(network, carrier, maximum=True, minimum=True, n=3): List containing snapshots """ - if carrier == 'residual load': + if carrier == "residual load": power_plants = network.generators[ - network.generators.carrier.isin(['solar', 'wind', 'wind_onshore'])] - power_plants_t = network.generators.p_nom[power_plants.index] * \ - network.generators_t.p_max_pu[power_plants.index] + network.generators.carrier.isin(["solar", "wind", "wind_onshore"]) + ] + power_plants_t = ( + network.generators.p_nom[power_plants.index] + * network.generators_t.p_max_pu[power_plants.index] + ) load = network.loads_t.p_set.sum(axis=1) all_renew = power_plants_t.sum(axis=1) all_carrier = load - all_renew - if carrier in ('solar', 'wind', 'wind_onshore', - 'wind_offshore', 'run_of_river'): - power_plants = network.generators[network.generators.carrier - == carrier] + if carrier in ( + "solar", + "wind", + "wind_onshore", + "wind_offshore", + "run_of_river", + ): + power_plants = network.generators[ + network.generators.carrier == carrier + ] - power_plants_t = network.generators.p_nom[power_plants.index] * \ - network.generators_t.p_max_pu[power_plants.index] + power_plants_t = ( + network.generators.p_nom[power_plants.index] + * network.generators_t.p_max_pu[power_plants.index] + ) all_carrier = power_plants_t.sum(axis=1) if maximum and not minimum: @@ -1117,7 +1785,7 @@ def find_snapshots(network, carrier, maximum=True, minimum=True, n=3): if maximum and minimum: times = all_carrier.sort_values().head(n=n) - times = times.append(all_carrier.sort_values().tail(n=n)) + times = pd.concat([times, all_carrier.sort_values().tail(n=n)]) calc_snapshots = all_carrier.index[all_carrier.index.isin(times.index)] @@ -1125,7 +1793,7 @@ def find_snapshots(network, carrier, maximum=True, minimum=True, n=3): def ramp_limits(network): - """ Add ramping constraints to thermal power plants. + """Add ramping constraints to thermal power plants. Parameters ---------- @@ -1136,34 +1804,46 @@ def ramp_limits(network): ------- """ - carrier = ['coal', 'biomass', 'gas', 'oil', 'waste', 'lignite', - 'uranium', 'geothermal'] - data = {'start_up_cost':[77, 57, 42, 57, 57, 77, 50, 57], #€/MW - 'start_up_fuel':[4.3, 2.8, 1.45, 2.8, 2.8, 4.3, 16.7, 2.8], #MWh/MW - 'min_up_time':[5, 2, 3, 2, 2, 5, 12, 2], - 'min_down_time':[7, 2, 2, 2, 2, 7, 17, 2], -# ============================================================================= -# 'ramp_limit_start_up':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4], -# 'ramp_limit_shut_down':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4] -# ============================================================================= - 'p_min_pu':[0.33, 0.38, 0.4, 0.38, 0.38, 0.5, 0.45, 0.38] - } + carrier = [ + "coal", + "biomass", + "gas", + "oil", + "waste", + "lignite", + "uranium", + "geothermal", + ] + data = { + "start_up_cost": [77, 57, 42, 57, 57, 77, 50, 57], # €/MW + "start_up_fuel": [4.3, 2.8, 1.45, 2.8, 2.8, 4.3, 16.7, 2.8], # MWh/MW + "min_up_time": [5, 2, 3, 2, 2, 5, 12, 2], + "min_down_time": [7, 2, 2, 2, 2, 7, 17, 2], + # =================================================================== + # 'ramp_limit_start_up':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4], + # 'ramp_limit_shut_down':[0.4, 0.4, 0.4, 0.4, 0.4, 0.6, 0.5, 0.4] + # =================================================================== + "p_min_pu": [0.33, 0.38, 0.4, 0.38, 0.38, 0.5, 0.45, 0.38], + } df = pd.DataFrame(data, index=carrier) fuel_costs = network.generators.marginal_cost.groupby( - network.generators.carrier).mean()[carrier] - df['start_up_fuel'] = df['start_up_fuel'] * fuel_costs - df['start_up_cost'] = df['start_up_cost'] + df['start_up_fuel'] - df.drop('start_up_fuel', axis=1, inplace=True) + network.generators.carrier + ).mean()[carrier] + df["start_up_fuel"] = df["start_up_fuel"] * fuel_costs + df["start_up_cost"] = df["start_up_cost"] + df["start_up_fuel"] + df.drop("start_up_fuel", axis=1, inplace=True) for tech in df.index: for limit in df.columns: - network.generators.loc[network.generators.carrier == tech, - limit] = df.loc[tech, limit] - network.generators.start_up_cost = network.generators.start_up_cost\ - *network.generators.p_nom + network.generators.loc[ + network.generators.carrier == tech, limit + ] = df.loc[tech, limit] + network.generators.start_up_cost = ( + network.generators.start_up_cost * network.generators.p_nom + ) network.generators.committable = True -def get_args_setting(self, jsonpath='scenario_setting.json'): +def get_args_setting(self, jsonpath="scenario_setting.json"): """ Get and open json file with scenaio settings of eTraGo ``args``. The settings incluedes all eTraGo specific settings of arguments and @@ -1181,9 +1861,97 @@ def get_args_setting(self, jsonpath='scenario_setting.json'): Dictionary of json file """ - if not jsonpath == None: + if jsonpath is not None: with open(jsonpath) as f: - self.args = json.load(f) + if "args" in locals(): + self.args = merge_dicts(self.args, json.load(f)) + else: + self.args = json.load(f) + + +def merge_dicts(dict1, dict2): + """ + Return a new dictionary by merging two dictionaries recursively. + + Parameters + ---------- + dict1 : dict + dictionary 1. + dict2 : dict + dictionary 2. + + Returns + ------- + result : dict + Union of dict1 and dict2 + + """ + + result = deepcopy(dict1) + + for key, value in dict2.items(): + if isinstance(value, Mapping): + result[key] = merge_dicts(result.get(key, {}), value) + else: + result[key] = deepcopy(dict2[key]) + + return result + + +def get_clustering_data(self, path): + """ + Import the final busmap and the initial buses, lines and links + + Parameters + ---------- + path : str + Name of folder from which to import CSVs of network data. + + Returns + ------- + None + + """ + + if (self.args["network_clustering_ehv"]["active"]) | ( + self.args["network_clustering"]["active"] + ): + path_clus = os.path.join(path, "clustering") + if os.path.exists(path_clus): + ch4_h2_mapping_path = os.path.join( + path_clus, "ch4_h2_mapping.json" + ) + if os.path.exists(ch4_h2_mapping_path): + with open(ch4_h2_mapping_path) as f: + self.ch4_h2_mapping = pd.read_json(f, typ="series").astype( + str + ) + self.ch4_h2_mapping.index.name = "CH4_bus" + self.ch4_h2_mapping.index = ( + self.ch4_h2_mapping.index.astype(str) + ) + else: + logger.info( + """There is no CH4 to H2 bus mapping data + available in the loaded object.""" + ) + + busmap_path = os.path.join(path_clus, "busmap.json") + if os.path.exists(busmap_path): + with open(busmap_path) as f: + self.busmap["busmap"] = json.load(f) + self.busmap["orig_network"] = pypsa.Network( + path_clus, name="orig" + ) + else: + logger.info( + "There is no busmap data available in the loaded object." + ) + + else: + logger.info( + "There is no clustering data available in the loaded object." + ) def set_random_noise(self, sigma=0.01): @@ -1194,31 +1962,54 @@ def set_random_noise(self, sigma=0.01): ---------- etrago : :class:`etrago.Etrago Transmission grid object - seed: int seed number, needed to reproduce results - sigma: float Default: 0.01 standard deviation, small values reduce impact on dispatch but might lead to numerical instability """ - if self.args['generator_noise'] != False: + if self.args["generator_noise"]: network = self.network - seed = self.args['generator_noise'] + seed = self.args["generator_noise"] s = np.random.RandomState(seed) - network.generators.marginal_cost[network.generators.bus.isin( - network.buses.index[network.buses.country_code == 'DE'])] += \ - abs(s.normal(0, sigma, len(network.generators.marginal_cost[ - network.generators.bus.isin(network.buses.index[ - network.buses.country_code == 'DE'])]))) - - network.generators.marginal_cost[network.generators.bus.isin( - network.buses.index[network.buses.country_code != 'DE'])] += \ - abs(s.normal(0, sigma, len(network.generators.marginal_cost[ - network.generators.bus.isin(network.buses.index[ - network.buses.country_code == 'DE'])]))).max() + network.generators.marginal_cost[ + network.generators.bus.isin( + network.buses.index[network.buses.country == "DE"] + ) + ] += abs( + s.normal( + 0, + sigma, + len( + network.generators.marginal_cost[ + network.generators.bus.isin( + network.buses.index[network.buses.country == "DE"] + ) + ] + ), + ) + ) + + network.generators.marginal_cost[ + network.generators.bus.isin( + network.buses.index[network.buses.country != "DE"] + ) + ] += abs( + s.normal( + 0, + sigma, + len( + network.generators.marginal_cost[ + network.generators.bus.isin( + network.buses.index[network.buses.country == "DE"] + ) + ] + ), + ) + ).max() + def set_line_country_tags(network): """ @@ -1229,47 +2020,172 @@ def set_line_country_tags(network): network : :class:`pypsa.Network Overall container of PyPSA - """ - transborder_lines_0 = network.lines[network.lines['bus0'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - transborder_lines_1 = network.lines[network.lines['bus1'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - #set country tag for lines - network.lines.loc[transborder_lines_0, 'country'] = \ - network.buses.loc[network.lines.loc[transborder_lines_0, 'bus0']\ - .values, 'country_code'].values - - network.lines.loc[transborder_lines_1, 'country'] = \ - network.buses.loc[network.lines.loc[transborder_lines_1, 'bus1']\ - .values, 'country_code'].values - network.lines['country'].fillna('DE', inplace=True) + transborder_lines_0 = network.lines[ + network.lines["bus0"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + transborder_lines_1 = network.lines[ + network.lines["bus1"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + # set country tag for lines + network.lines.loc[transborder_lines_0, "country"] = network.buses.loc[ + network.lines.loc[transborder_lines_0, "bus0"].values, "country" + ].values + + network.lines.loc[transborder_lines_1, "country"] = network.buses.loc[ + network.lines.loc[transborder_lines_1, "bus1"].values, "country" + ].values + network.lines["country"].fillna("DE", inplace=True) doubles = list(set(transborder_lines_0.intersection(transborder_lines_1))) for line in doubles: - c_bus0 = network.buses.loc[network.lines.loc[line, 'bus0'], 'country'] - c_bus1 = network.buses.loc[network.lines.loc[line, 'bus1'], 'country'] - network.lines.loc[line, 'country'] = '{}{}'.format(c_bus0, c_bus1) - - transborder_links_0 = network.links[network.links['bus0'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - transborder_links_1 = network.links[network.links['bus1'].isin( - network.buses.index[network.buses['country_code'] != 'DE'])].index - - #set country tag for links - network.links.loc[transborder_links_0, 'country'] = \ - network.buses.loc[network.links.loc[transborder_links_0, 'bus0']\ - .values, 'country_code'].values - - network.links.loc[transborder_links_1, 'country'] = \ - network.buses.loc[network.links.loc[transborder_links_1, 'bus1']\ - .values, 'country_code'].values - network.links['country'].fillna('DE', inplace=True) + c_bus0 = network.buses.loc[network.lines.loc[line, "bus0"], "country"] + c_bus1 = network.buses.loc[network.lines.loc[line, "bus1"], "country"] + network.lines.loc[line, "country"] = "{}{}".format(c_bus0, c_bus1) + + transborder_links_0 = network.links[ + network.links["bus0"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + transborder_links_1 = network.links[ + network.links["bus1"].isin( + network.buses.index[network.buses["country"] != "DE"] + ) + ].index + + # set country tag for links + network.links.loc[transborder_links_0, "country"] = network.buses.loc[ + network.links.loc[transborder_links_0, "bus0"].values, "country" + ].values + + network.links.loc[transborder_links_1, "country"] = network.buses.loc[ + network.links.loc[transborder_links_1, "bus1"].values, "country" + ].values + network.links["country"].fillna("DE", inplace=True) doubles = list(set(transborder_links_0.intersection(transborder_links_1))) for link in doubles: - c_bus0 = network.buses.loc[network.links.loc[link, 'bus0'], 'country'] - c_bus1 = network.buses.loc[network.links.loc[link, 'bus1'], 'country'] - network.links.loc[link, 'country'] = '{}{}'.format(c_bus0, c_bus1) + c_bus0 = network.buses.loc[network.links.loc[link, "bus0"], "country"] + c_bus1 = network.buses.loc[network.links.loc[link, "bus1"], "country"] + network.links.loc[link, "country"] = "{}{}".format(c_bus0, c_bus1) + + +def crossborder_capacity_tyndp2020(): + """ + This function downloads and extracts a scenario datafile for the TYNDP 2020 + (Ten-Year Network Development Plan), reads a specific sheet from the file, + filters it based on certain criteria, and then calculates the minimum + cross-border capacities for a list of European countries. The minimum + cross-border capacity is the minimum of the export and import capacities + between two countries. + + Returns + ------- + dict + Dictionary with cossborder capacities. + + """ + from urllib.request import urlretrieve + import zipfile + + path = "TYNDP-2020-Scenario-Datafile.xlsx" + + urlretrieve( + "https://www.entsos-tyndp2020-scenarios.eu/wp-content/uploads" + "/2020/06/TYNDP-2020-Scenario-Datafile.xlsx.zip", + path, + ) + + file = zipfile.ZipFile(path) + + df = pd.read_excel( + file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(), + sheet_name="Line", + ) + + df = df[ + (df.Scenario == "Distributed Energy") + & (df.Case == "Reference Grid") + & (df.Year == 2040) + & (df["Climate Year"] == 1984) + & ( + (df.Parameter == "Import Capacity") + | (df.Parameter == "Export Capacity") + ) + ] + + df["country0"] = df["Node/Line"].str[:2] + + df["country1"] = df["Node/Line"].str[5:7] + + c_export = ( + df[df.Parameter == "Export Capacity"] + .groupby(["country0", "country1"]) + .Value.sum() + ) + + c_import = ( + df[df.Parameter == "Import Capacity"] + .groupby(["country0", "country1"]) + .Value.sum() + ) + + capacities = pd.DataFrame( + index=c_export.index, + data={"export": c_export.abs(), "import": c_import.abs()}, + ).reset_index() + + with_de = capacities[ + (capacities.country0 == "DE") & (capacities.country1 != "DE") + ].set_index("country1")[["export", "import"]] + + with_de = pd.concat( + [ + with_de, + capacities[ + (capacities.country0 != "DE") & (capacities.country1 == "DE") + ].set_index("country0")[["export", "import"]], + ] + ) + + countries = [ + "DE", + "DK", + "NL", + "CZ", + "PL", + "AT", + "CH", + "FR", + "LU", + "BE", + "GB", + "NO", + "SE", + ] + + without_de = capacities[ + (capacities.country0 != "DE") + & (capacities.country1 != "DE") + & (capacities.country0.isin(countries)) + & (capacities.country1.isin(countries)) + & (capacities.country1 != capacities.country0) + ] + + without_de["country"] = without_de.country0 + without_de.country1 + + without_de.set_index("country", inplace=True) + + without_de = without_de[["export", "import"]].fillna(0.0) + + return { + **without_de.min(axis=1).to_dict(), + **with_de.min(axis=1).to_dict(), + } def crossborder_capacity(self): @@ -1288,105 +2204,126 @@ def crossborder_capacity(self): likely overestimates the thermal capacity. """ - if self.args['foreign_lines']['capacity'] != 'osmTGmod': + if self.args["foreign_lines"]["capacity"] != "osmTGmod": network = self.network - if self.args['foreign_lines']['capacity'] == 'ntc_acer': - cap_per_country = {'AT': 4900, - 'CH': 2695, - 'CZ': 1301, - 'DK': 913, - 'FR': 3593, - 'LU': 2912, - 'NL': 2811, - 'PL': 280, - 'SE': 217, - 'CZAT': 574, - 'ATCZ': 574, - 'CZPL': 312, - 'PLCZ': 312, - 'ATCH': 979, - 'CHAT': 979, - 'CHFR': 2087, - 'FRCH': 2087, - 'FRLU': 364, - 'LUFR': 364, - 'SEDK': 1928, - 'DKSE': 1928} - - elif self.args['foreign_lines']['capacity'] == 'thermal_acer': - cap_per_country = {'CH': 12000, - 'DK': 4000, - 'SEDK': 3500, - 'DKSE': 3500} + if self.args["foreign_lines"]["capacity"] == "ntc_acer": + cap_per_country = { + "AT": 4900, + "CH": 2695, + "CZ": 1301, + "DK": 913, + "FR": 3593, + "LU": 2912, + "NL": 2811, + "PL": 280, + "SE": 217, + "CZAT": 574, + "ATCZ": 574, + "CZPL": 312, + "PLCZ": 312, + "ATCH": 979, + "CHAT": 979, + "CHFR": 2087, + "FRCH": 2087, + "FRLU": 364, + "LUFR": 364, + "SEDK": 1928, + "DKSE": 1928, + } - else: - logger.info("args['foreign_lines']['capacity'] has to be " - "in ['osmTGmod', 'ntc_acer', 'thermal_acer']") + elif self.args["foreign_lines"]["capacity"] == "thermal_acer": + cap_per_country = { + "CH": 12000, + "DK": 4000, + "SEDK": 3500, + "DKSE": 3500, + } - if not network.lines[network.lines.country != 'DE'].empty: - weighting = network.lines.loc[network.lines.country != 'DE', 's_nom'].\ - groupby(network.lines.country).transform(lambda x: x/x.sum()) + elif self.args["foreign_lines"]["capacity"] == "tyndp2020": + cap_per_country = crossborder_capacity_tyndp2020() - weighting_links = network.links.loc[network.links.country != 'DE', 'p_nom'].\ - groupby(network.links.country).transform(lambda x: x/x.sum()) - network.lines["v_nom"] = network.lines.bus0.map(network.buses.v_nom) - for country in cap_per_country: + else: + logger.info( + "args['foreign_lines']['capacity'] has to be " + "in ['osmTGmod', 'ntc_acer', 'thermal_acer', 'tyndp2020']" + ) + + if not network.lines[network.lines.country != "DE"].empty: + weighting = ( + network.lines.loc[network.lines.country != "DE", "s_nom"] + .groupby(network.lines.country) + .transform(lambda x: x / x.sum()) + ) + + dc_lines = self.dc_lines() + + weighting_links = ( + dc_lines.loc[dc_lines.country != "DE", "p_nom"] + .groupby(dc_lines.country) + .transform(lambda x: x / x.sum()) + .fillna(0.0) + ) - index_HV = network.lines[(network.lines.country == country) &( - network.lines.v_nom == 110)].index - index_eHV = network.lines[(network.lines.country == country) &( - network.lines.v_nom > 110)].index - index_links = network.links[network.links.country == country].index + for country in cap_per_country: + index_HV = network.lines[ + (network.lines.country == country) + & (network.lines.v_nom == 110) + ].index + index_eHV = network.lines[ + (network.lines.country == country) + & (network.lines.v_nom > 110) + ].index + index_links = dc_lines[dc_lines.country == country].index if not network.lines[network.lines.country == country].empty: - network.lines.loc[index_HV, 's_nom'] = weighting[index_HV] * \ - cap_per_country[country] - - network.lines.loc[index_eHV, 's_nom'] = \ - weighting[index_eHV] * cap_per_country[country] - - if not network.links[network.links.country == country].empty: - network.links.loc[index_links, 'p_nom'] = \ - weighting_links[index_links] * cap_per_country\ - [country] - if country == 'SE': - network.links.loc[network.links.country == country, 'p_nom'] =\ - cap_per_country[country] - - if not network.lines[network.lines.country == (country+country)].empty: - i_HV = network.lines[(network.lines.v_nom == 110)&( - network.lines.country == country+country)].index - - i_eHV = network.lines[(network.lines.v_nom == 110)&( - network.lines.country == country+country)].index - - network.lines.loc[i_HV, 's_nom'] = \ - weighting[i_HV] * cap_per_country[country] - network.lines.loc[i_eHV, 's_nom'] = \ - weighting[i_eHV] * cap_per_country[country] - - if not network.links[network.links.country == (country+country)].empty: - i_links = network.links[network.links.country == - (country+country)].index - network.links.loc[i_links, 'p_nom'] = \ + network.lines.loc[index_HV, "s_nom"] = ( + weighting[index_HV] * cap_per_country[country] + ) + + network.lines.loc[index_eHV, "s_nom"] = ( + weighting[index_eHV] * cap_per_country[country] + ) + + if not dc_lines[dc_lines.country == country].empty: + network.links.loc[index_links, "p_nom"] = ( + weighting_links[index_links] * cap_per_country[country] + ) + if country == "SE": + network.links.loc[ + dc_lines[dc_lines.country == country].index, "p_nom" + ] = cap_per_country[country] + + if not network.lines[ + network.lines.country == (country + country) + ].empty: + i_HV = network.lines[ + (network.lines.v_nom == 110) + & (network.lines.country == country + country) + ].index + + i_eHV = network.lines[ + (network.lines.v_nom == 110) + & (network.lines.country == country + country) + ].index + + network.lines.loc[i_HV, "s_nom"] = ( + weighting[i_HV] * cap_per_country[country] + ) + network.lines.loc[i_eHV, "s_nom"] = ( + weighting[i_eHV] * cap_per_country[country] + ) + + if not dc_lines[dc_lines.country == (country + country)].empty: + i_links = dc_lines[ + dc_lines.country == (country + country) + ].index + network.links.loc[i_links, "p_nom"] = ( weighting_links[i_links] * cap_per_country[country] + ) -def set_line_voltages(self): - """ - Adds voltage level to AC-lines - Returns - ------- - None. - - """ - self.network.lines['v_nom'] = self.network.lines.bus0.map( - self.network.buses.v_nom) - self.network.links['v_nom'] = self.network.links.bus0.map( - self.network.buses.v_nom) def set_branch_capacity(etrago): - """ Set branch capacity factor of lines and transformers, different factors for HV (110kV) and eHV (220kV, 380kV). @@ -1401,19 +2338,44 @@ def set_branch_capacity(etrago): args = etrago.args network.transformers["v_nom0"] = network.transformers.bus0.map( - network.buses.v_nom) - - network.lines.s_max_pu[network.lines.v_nom == 110] = \ - args['branch_capacity_factor']['HV'] + network.buses.v_nom + ) + + # If any line has a time dependend s_max_pu, use the time dependend + # factor for all lines, to avoid problems in the clustering + if not network.lines_t.s_max_pu.empty: + # Set time dependend s_max_pu for + # lines without dynamic line rating to 1.0 + network.lines_t.s_max_pu[ + network.lines[ + ~network.lines.index.isin(network.lines_t.s_max_pu.columns) + ].index + ] = 1.0 + + # Multiply time dependend s_max_pu with static branch capacitiy fator + network.lines_t.s_max_pu[ + network.lines[network.lines.v_nom == 110].index + ] *= args["branch_capacity_factor"]["HV"] + + network.lines_t.s_max_pu[ + network.lines[network.lines.v_nom > 110].index + ] *= args["branch_capacity_factor"]["eHV"] + else: + network.lines.s_max_pu[network.lines.v_nom == 110] = args[ + "branch_capacity_factor" + ]["HV"] - network.lines.s_max_pu[network.lines.v_nom > 110] = \ - args['branch_capacity_factor']['eHV'] + network.lines.s_max_pu[network.lines.v_nom > 110] = args[ + "branch_capacity_factor" + ]["eHV"] - network.transformers.s_max_pu[network.transformers.v_nom0 == 110]\ - = args['branch_capacity_factor']['HV'] + network.transformers.s_max_pu[network.transformers.v_nom0 == 110] = args[ + "branch_capacity_factor" + ]["HV"] - network.transformers.s_max_pu[network.transformers.v_nom0 > 110]\ - = args['branch_capacity_factor']['eHV'] + network.transformers.s_max_pu[network.transformers.v_nom0 > 110] = args[ + "branch_capacity_factor" + ]["eHV"] def check_args(etrago): @@ -1431,50 +2393,467 @@ def check_args(etrago): """ + names = [ + "eGon2035", + "eGon100RE", + "eGon2035_lowflex", + "eGon100RE_lowflex", + "status2019", + ] + + assert ( + etrago.args["scn_name"] in names + ), f"'scn_name' has to be in {names} but is {etrago.args['scn_name']}." + + assert ( + etrago.args["start_snapshot"] <= etrago.args["end_snapshot"] + ), "start_snapshot after end_snapshot" + + if etrago.args["gridversion"] is not None: + from saio.grid import egon_etrago_bus + + assert ( + etrago.args["gridversion"] + in pd.read_sql( + etrago.session.query(egon_etrago_bus).statement, + etrago.session.bind, + ).version.unique() + ), "gridversion does not exist" + + if etrago.args["snapshot_clustering"]["active"]: + # Assert that skip_snapshots and snapshot_clustering are not combined + # more information: https://github.com/openego/eTraGo/issues/691 + assert etrago.args["skip_snapshots"] is False, ( + "eTraGo does not support combining snapshot_clustering and" + " skip_snapshots. Please update your settings and choose either" + " snapshot_clustering or skip_snapshots." + ) + # typical periods + if etrago.args["snapshot_clustering"]["method"] == "typical_periods": + # typical days + + if etrago.args["snapshot_clustering"]["how"] == "daily": + assert ( + etrago.args["end_snapshot"] + / etrago.args["start_snapshot"] + % 24 + == 0 + ), ( + "Please select snapshots covering whole days when" + " choosing clustering to typical days." + ) + + if ( + etrago.args["snapshot_clustering"]["method"] + == "typical_periods" + ): + assert etrago.args["end_snapshot"] - etrago.args[ + "start_snapshot" + ] + 1 >= ( + 24 * etrago.args["snapshot_clustering"]["n_clusters"] + ), ( + "The umber of selected snapshots is is too small" + " for the chosen number of typical days." + ) + + # typical weeks + + if etrago.args["snapshot_clustering"]["how"] == "weekly": + assert ( + etrago.args["end_snapshot"] + / etrago.args["start_snapshot"] + % 168 + == 0 + ), ( + "Please select snapshots covering whole weeks when" + " choosing clustering to typical weeks." + ) + + if ( + etrago.args["snapshot_clustering"]["method"] + == "typical_periods" + ): + assert etrago.args["end_snapshot"] - etrago.args[ + "start_snapshot" + ] + 1 >= ( + 168 * etrago.args["snapshot_clustering"]["n_clusters"] + ), ( + "The number of selected snapshots is too small" + " for the chosen number of typical weeks." + ) + # typical months + + if etrago.args["snapshot_clustering"]["how"] == "monthly": + assert ( + etrago.args["end_snapshot"] + / etrago.args["start_snapshot"] + % 720 + == 0 + ), ( + "Please select snapshots covering whole months when" + " choosing clustering to typical months." + ) + + if ( + etrago.args["snapshot_clustering"]["method"] + == "typical_periods" + ): + assert etrago.args["end_snapshot"] - etrago.args[ + "start_snapshot" + ] + 1 >= ( + 720 * etrago.args["snapshot_clustering"]["n_clusters"] + ), ( + "The number of selected snapshots is too small" + " for the chosen number of typical months." + ) + + # segmentation + + elif etrago.args["snapshot_clustering"]["method"] == "segmentation": + assert etrago.args["end_snapshot"] - etrago.args[ + "start_snapshot" + ] + 1 >= ( + etrago.args["snapshot_clustering"]["n_segments"] + ), "Number of segments is higher than number of snapshots" + + if not etrago.args["method"]["pyomo"]: + logger.warning( + "Snapshot clustering constraints are" + " not yet correctly implemented without pyomo." + " Setting `args['method']['pyomo']` to `True`." + ) + etrago.args["method"]["pyomo"] = True + + if not etrago.args["method"]["pyomo"]: + try: + # The import isn't used, but just here to test for Gurobi. + # So we can make `flake8` stop complaining about the "unused + # import" via the appropriate `noqa` comment. + import gurobipy # noqa: F401 + except ModuleNotFoundError: + print( + "If you want to use nomopyomo you need to use the" + " solver gurobi and the package gurobipy." + " You can find more information and installation" + " instructions for gurobi here:" + " https://support.gurobi.com/hc/en-us/articles" + "/360044290292-How-do-I-install-Gurobi-for-Python-" + " For installation of gurobipy use pip." + ) + raise + + +def drop_sectors(self, drop_carriers): + """ + Manually drop secors from network. + Makes sure the network can be calculated without the dropped sectors. + + Parameters + ---------- + drop_carriers : array + List of sectors that will be dropped. + e.g. ['dsm', 'CH4', 'H2_saltcavern', 'H2_grid', + 'central_heat', 'rural_heat', 'central_heat_store', + 'rural_heat_store', 'Li ion'] means everything but AC - assert etrago.args['scn_name'] in ['Status Quo', 'NEP 2035', 'eGo 100'],\ - ("'scn_name' has to be in ['Status Quo', 'NEP 2035', 'eGo 100'] " - "but is " + etrago.args['scn_name']) + Returns + ------- + None. - assert etrago.args['start_snapshot'] < etrago.args['end_snapshot'],\ - ("start_snapshot after end_snapshot") + """ - if etrago.args['gridversion'] != None: - ormclass = getattr(import_module('egoio.db_tables.grid'), - 'EgoPfHvTempResolution') + if self.scenario.scn_name == "eGon2035": + if "CH4" in drop_carriers: + # create gas generators from links + # in order to not lose them when dropping non-electric carriers + gas_to_add = ["central_gas_CHP", "industrial_gas_CHP", "OCGT"] + gen = self.network.generators + + for i in gas_to_add: + gen_empty = gen.drop(gen.index) + gen_empty.bus = self.network.links[ + self.network.links.carrier == i + ].bus1 + gen_empty.p_nom = ( + self.network.links[self.network.links.carrier == i].p_nom + * self.network.links[ + self.network.links.carrier == i + ].efficiency + ) + gen_empty.marginal_cost = ( + self.network.links[ + self.network.links.carrier == i + ].marginal_cost + + 35.851 + ) # add fuel costs (source: NEP) + gen_empty.efficiency = 1 + gen_empty.carrier = i + gen_empty.scn_name = "eGon2035" + gen_empty.p_nom_extendable = False + gen_empty.sign = 1 + gen_empty.p_min_pu = 0 + gen_empty.p_max_pu = 1 + gen_empty.control = "PV" + gen_empty.fillna(0, inplace=True) + self.network.import_components_from_dataframe( + gen_empty, "Generator" + ) + + self.network.mremove( + "Bus", + self.network.buses[ + self.network.buses.carrier.isin(drop_carriers) + ].index, + ) + + for one_port in self.network.iterate_components( + ["Load", "Generator", "Store", "StorageUnit"] + ): + self.network.mremove( + one_port.name, + one_port.df[~one_port.df.bus.isin(self.network.buses.index)].index, + ) - assert etrago.args['gridversion'] in pd.read_sql( - etrago.session.query(ormclass).statement, etrago.session.bind - ).version.unique(), ("gridversion does not exist") + for two_port in self.network.iterate_components( + ["Line", "Link", "Transformer"] + ): + self.network.mremove( + two_port.name, + two_port.df[ + ~two_port.df.bus0.isin(self.network.buses.index) + ].index, + ) - if etrago.args['snapshot_clustering']['active']: + self.network.mremove( + two_port.name, + two_port.df[ + ~two_port.df.bus1.isin(self.network.buses.index) + ].index, + ) - assert etrago.args['end_snapshot']/\ - etrago.args['start_snapshot'] % 24 == 0,\ - ("Please select snapshots covering whole days when choosing " - "snapshot clustering") + logger.info("The following sectors are dropped: " + str(drop_carriers)) - assert etrago.args['end_snapshot']-etrago.args['start_snapshot'] > \ - (24 *etrago.args['snapshot_clustering']['n_clusters']),\ - ("Number of selected days is smaller than number of " - "representitive snapshots") - if not etrago.args['method']['pyomo']: - logger.warning("Snapshot clustering constraints are " - "not yet implemented without pyomo. " - "args['method']['pyomo'] is set to True.") - etrago.args['method']['pyomo'] = True +def update_busmap(self, new_busmap): + """ + Update busmap after any clustering process + + Parameters + ---------- + new_busmap : dictionary + busmap used to clusted the network. - if not etrago.args['method']['pyomo']: + Returns + ------- + None. + """ + if "busmap" not in self.busmap.keys(): + self.busmap["busmap"] = new_busmap + self.busmap["orig_network"] = pypsa.Network() + pypsa.io.import_components_from_dataframe( + self.busmap["orig_network"], self.network.buses, "Bus" + ) + pypsa.io.import_components_from_dataframe( + self.busmap["orig_network"], self.network.lines, "Line" + ) + pypsa.io.import_components_from_dataframe( + self.busmap["orig_network"], self.network.links, "Link" + ) + + else: + self.busmap["busmap"] = ( + pd.Series(self.busmap["busmap"]).map(new_busmap).to_dict() + ) + + +def adjust_CH4_gen_carriers(self): + """Precise the carrier for the generators with CH4 carrier + + For the eGon2035 scenario, the generators with carrier CH4 + represent the prodution od biogas and methan. In the data model, + these two differents types are differenciated only by the + marginal cost of the generator. This function introduces a + carrier distion (CH4_biogas and CH4_NG) in order to avoid the + clustering of these two types of generator together and facilitate + the contraint applying differently to each of them. + """ + + if self.args["scn_name"] == "eGon2035": + # Define marginal cost + marginal_cost_def = {"CH4": 40.9765, "biogas": 25.6} + + engine = db.connection(section=self.args["db"]) try: - import gurobipy - except ModuleNotFoundError: - print( - "If you want to use nomopyomo you need to use the " - "solver gurobi and the package gurobipy. " - "You can find more information and installation " - "instructions for gurobi here: " - "https://support.gurobi.com/hc/en-us/articles/360044290292-How-do-I-install-Gurobi-for-Python- " - "For installation of gurobipy use pip.") - raise + sql = f""" + SELECT gas_parameters + FROM scenario.egon_scenario_parameters + WHERE name = '{self.args["scn_name"]}';""" + df = pd.read_sql(sql, engine) + marginal_cost = df["gas_parameters"][0]["marginal_cost"] + except sqlalchemy.exc.ProgrammingError: + marginal_cost = marginal_cost_def + + self.network.generators.loc[ + self.network.generators[ + (self.network.generators.carrier == "CH4") + & ( + self.network.generators.marginal_cost + == marginal_cost["CH4"] + ) + & ( + self.network.generators.bus.astype(str).isin( + self.network.buses.index[ + self.network.buses.country == "DE" + ] + ) + ) + ].index, + "carrier", + ] = "CH4_NG" + + self.network.generators.loc[ + self.network.generators[ + (self.network.generators.carrier == "CH4") + & ( + self.network.generators.marginal_cost + == marginal_cost["biogas"] + ) + & ( + self.network.generators.bus.astype(str).isin( + self.network.buses.index[ + self.network.buses.country == "DE" + ] + ) + ) + ].index, + "carrier", + ] = "CH4_biogas" + + +def residual_load(network, sector="electricity"): + """ + Calculate the residual load for the specified sector. + In case of the electricity sector residual load is calculated using + all AC loads and all renewable generators with carriers + 'wind_onshore', 'wind_offshore', 'solar', 'solar_rooftop', + 'biomass', 'run_of_river', and 'reservoir'. + + In case of the central heat sector residual load is calculated using + all central heat loads and all renewable generators with carriers + 'solar_thermal_collector' and 'geo_thermal'. + + Parameters + ----------- + network : PyPSA network + Network to retrieve load and generation time series from, needed + to determine residual load. + sector : str + Sector to determine residual load for. Possible options are + 'electricity' and 'central_heat'. Default: 'electricity'. + + Returns + -------- + pd.DataFrame + Dataframe with residual load for each bus in the network. + Columns of the dataframe contain the corresponding bus name and + index of the dataframe is a datetime index with the + corresponding time step. + + """ + + if sector == "electricity": + carrier_gen = [ + "wind_onshore", + "wind_offshore", + "solar", + "solar_rooftop", + "biomass", + "run_of_river", + "reservoir", + ] + carrier_load = ["AC"] + elif sector == "central_heat": + carrier_gen = ["solar_thermal_collector", "geo_thermal"] + carrier_load = ["central_heat"] + else: + raise ValueError( + f"Specified sector {sector} is not a valid option." + " Valid options are 'electricity' and 'central_heat'." + ) + # Calculate loads per bus and timestep + loads = network.loads[network.loads.carrier.isin(carrier_load)] + loads_per_bus = ( + network.loads_t.p_set[loads.index].groupby(loads.bus, axis=1).sum() + ) + + # Calculate dispatch of renewable generators per bus of loads and timesteps + renewable_dispatch = pd.DataFrame( + index=loads_per_bus.index, columns=loads_per_bus.columns, data=0 + ) + + renewable_generators = network.generators[ + network.generators.carrier.isin(carrier_gen) + ] + + renewable_dispatch[renewable_generators.bus.unique()] = ( + network.generators_t.p[renewable_generators.index] + .groupby(renewable_generators.bus, axis=1) + .sum() + ) + + return loads_per_bus - renewable_dispatch + + +def manual_fixes_datamodel(etrago): + """Apply temporal fixes to the data model until a new egon-data run + is there + + Parameters + ---------- + etrago : :class:`Etrago + Overall container of Etrago + + Returns + ------- + None. + + """ + # Set line type + etrago.network.lines.type = "" + + # Set life time of storage_units, transformers and lines + etrago.network.storage_units.lifetime = 27.5 + etrago.network.transformers.lifetime = 40 + etrago.network.lines.lifetime = 40 + + # Set efficiences of CHP + etrago.network.links.loc[ + etrago.network.links[ + etrago.network.links.carrier.str.contains("CHP") + ].index, + "efficiency", + ] = 0.43 + + # Enlarge gas boilers as backup heat supply + etrago.network.links.loc[ + etrago.network.links[ + etrago.network.links.carrier.str.contains("gas_boiler") + ].index, + "p_nom", + ] *= 1000 + + # Set p_max_pu for run of river and reservoir + etrago.network.generators.loc[ + etrago.network.generators[ + etrago.network.generators.carrier.isin( + ["run_of_river", "reservoir"] + ) + ].index, + "p_max_pu", + ] = 0.65 + + # Set costs for CO2 from DAC for needed for methanation + etrago.network.links.loc[ + etrago.network.links.carrier == "H2_to_CH4", "marginal_cost" + ] = 25 diff --git a/noxfile.py b/noxfile.py new file mode 100644 index 000000000..07366ba12 --- /dev/null +++ b/noxfile.py @@ -0,0 +1,78 @@ +from pathlib import Path +from pprint import pformat + +import nox + +cleaned = [ + "etrago/cluster/disaggregation.py", + "etrago/cluster/electrical.py", + "etrago/cluster/gas.py", + "etrago/cluster/snapshot.py", + "etrago/cluster/spatial.py", + "etrago/tools/calc_results.py", + "etrago/tools/execute.py", + "etrago/tools/extendable.py", + "etrago/tools/io.py", + "etrago/tools/network.py", + "etrago/tools/utilities.py", + "noxfile.py", + "setup.py", +] + + +def setdefaults(session): + session.env["PYTHONUNBUFFERED"] = "yes" + + +@nox.session(python="3") +def check(session): + """Run custom checks.""" + setdefaults(session) + assert cleaned == sorted(set(cleaned)), ( + "The list of cleaned files contains duplicates and/or isn't sorted" + " alphabetically." + f"\nExpected:\n{pformat(sorted(set(cleaned)))}" + f"\nGot:\n{pformat(cleaned)}" + ) + + +@nox.session(python="3") +def black(session): + """Check for happy little style accidents with `black`.""" + setdefaults(session) + session.install("black") + session.run("black", "--check", "--diff", *cleaned) + + +@nox.session(python="3") +def isort(session): + """Check import ordering with `isort`.""" + setdefaults(session) + session.install("isort >= 5") + session.run("isort", "--check-only", "--diff", *cleaned) + + +@nox.session(python="3") +def flake8(session): + """Check for happy little style accidents with `flake8`.""" + setdefaults(session) + session.install("Flake8-pyproject", "flake8") + session.run("flake8", "--ignore=E722, W605", *cleaned) + + +@nox.session(python=["3", "3.8", "3.9", "3.10", "3.11"]) +def build(session): + """Build the package and check for packaging errors.""" + setdefaults(session) + session.install("twine") + session.run("python", "setup.py", "bdist", "bdist_wheel") + session.run("twine", "check", "dist/eTraGo*") + + +@nox.session(python=["3", "3.8", "3.9", "3.10", "3.11"]) +def install(session): + """Install the package.""" + setdefaults(session) + session.env["SKLEARN_ALLOW_DEPRECATED_SKLEARN_PACKAGE_INSTALL"] = "False" + session.run("python", "-mpip", "install", "--upgrade", "pip") + session.run("python", "-mpip", "install", *Path("dist").glob("*.whl")) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..d53c0e841 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,22 @@ +[tool.black] +exclude = ''' +/( + \.git + | build + | dist +)/ +''' +include = '\.pyi?$' +line-length = 79 +target-version = ["py38", "py39", "py310", "py311"] + +[tool.flake8] +max-line-length = 79 +extend-ignore = ["E203", "E741", "W503"] +exclude = ["build", "dist"] + +[tool.isort] +combine_as_imports = true +from_first = true +line_length = 79 +profile = "black" diff --git a/requirements-doc.txt b/requirements-doc.txt index abb50f991..864c63277 100644 --- a/requirements-doc.txt +++ b/requirements-doc.txt @@ -1,12 +1,12 @@ # Packages for read the docs # Using single requirments for docs, see: # https://github.com/rtfd/readthedocs.org/issues/2070 -sphinx_rtd_theme -pandas >= 0.25, <= 0.25 -pypsa >= 0.11.0, <= 0.11.0 -numpy == 1.16.2 -numpydoc == 0.7.0 +sphinx_rtd_theme > 1.2.2 +pypsa == 0.20.1 +numpydoc sqlalchemy geoalchemy2 matplotlib nbsphinx +saio +pyomo != 6.4.3 \ No newline at end of file diff --git a/setup.py b/setup.py old mode 100644 new mode 100755 index 66b08c52a..efd38cd85 --- a/setup.py +++ b/setup.py @@ -1,45 +1,83 @@ -import os +from os.path import dirname, join +import io +import re + from setuptools import find_packages, setup -__copyright__ = ("Flensburg University of Applied Sciences, " - "Europa-Universität Flensburg, " - "Centre for Sustainable Energy Systems, " - "DLR-Institute for Networked Energy Systems") +__copyright__ = ( + "Flensburg University of Applied Sciences, " + "Europa-Universität Flensburg, " + "Centre for Sustainable Energy Systems, " + "DLR-Institute for Networked Energy Systems" +) __license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)" -__author__ = ("ulfmueller, wolfbunke, BartelsJ, ClaraBuettner, gnn, " - "simnh, lukasol, s3pp, MGlauer, kimvk, MarlonSchlemminger, " - "mariusves") +__author__ = ( + "ulfmueller, wolfbunke, BartelsJ, ClaraBuettner, gnn, " + "simnh, lukasol, s3pp, MGlauer, kimvk, MarlonSchlemminger, " + "mariusves", + "CarlosEpia", + "KathiEsterl", + "pieterhexen", + "fwitte", + "AmeliaNadal", +) + + +def read(*names, **kwargs): + with io.open( + join(dirname(__file__), *names), + encoding=kwargs.get("encoding", "utf8"), + ) as fh: + return fh.read() setup( - name='eTraGo', - author='DLR VE, ZNES Flensburg', - author_email='', + name="eTraGo", + author="DLR VE, ZNES Flensburg", + author_email="", description="electric transmission grid optimization", - version='0.8.0', - url='https://github.com/openego/eTraGo', + long_description="{}".format( + re.compile("^.. start-badges.*^.. end-header", re.M | re.S).sub( + "", read("README.rst") + ) + ), + long_description_content_type="text/x-rst", + version="0.9.0", + url="https://github.com/openego/eTraGo", license="GNU Affero General Public License Version 3 (AGPL-3.0)", packages=find_packages(), include_package_data=True, - install_requires=['egoio == 0.4.7', - 'scikit-learn', - 'sqlalchemy == 1.3.16', - 'geoalchemy2 >= 0.3.0, <=0.4.0', - 'matplotlib == 3.0.3', - 'tsam == 0.9.9', - 'shapely', - 'oedialect', - 'pyproj == 2.0.2', - 'tilemapbase == 0.4.5', - 'pypsa == 0.17.1', - 'setuptools >= 54.2.0'], + install_requires=[ + "geoalchemy2 >= 0.3.0", + "geopandas", + "keyring", + "loguru", + "matplotlib >= 3.0.3", + "oedialect", + # Fix upper version limits for pyomo and pandas + # Related to problems with old pypsa version + "pandas < 2", + "pyomo>6.4, <6.6, !=6.4.3", + "pypsa == 0.20.1", + "rtree", + "saio", + "scikit-learn", + "setuptools >= 54.2.0", + "shapely", + "sqlalchemy < 2", + "tables < 3.9", + "tilemapbase == 0.4.5", + "tsam", + ], extras_require={ - 'docs': [ - 'sphinx >= 1.4', - 'sphinx_rtd_theme'], - 'gurobipy':['gurobipy']}, - package_data={ - 'etrago': [ - os.path.join('tools', '*.json')] - } + "docs": [ + "nbsphinx", + "numpydoc", + "sphinx >= 1.4", + "sphinx_rtd_theme", + ], + "gurobipy": ["gurobipy"], + "cartopy": ["cartopy", "requests"], + }, + package_data={"etrago": [join("tools", "*.json")]}, )