Skip to content

Commit

Permalink
Merge pull request #12 from PowerGridModel/feature/pgm-tpf
Browse files Browse the repository at this point in the history
New benchmark tpf-pgm migrated from internal repo
  • Loading branch information
Jerry-Jinfeng-Guo authored Jul 5, 2024
2 parents 2677d9d + 39b9b11 commit c1bc96a
Show file tree
Hide file tree
Showing 4 changed files with 372 additions and 1 deletion.
195 changes: 195 additions & 0 deletions PGM-TPF.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,195 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Introduction\n",
"\n",
"This notebook presents a benchmark between Power Grid Model and the Tensor Power Flow. N.B., the performance gain of TPF is made purely out of the data formulation, no advantage of Intel MKL or CUDA is taken."
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import warnings\n",
"from time import time\n",
"from tqdm import tqdm\n",
"\n",
"import numpy as np\n",
"import power_grid_model as pgm\n",
"from generate_fictional_dataset import generate_fictional_grid_pgm_tpf\n",
"from plotter import BenchmarkPlotter\n",
"from power_grid_model.validation import errors_to_string, validate_batch_data, validate_input_data\n",
"from tensorpowerflow import GridTensor\n",
"\n",
"warnings.filterwarnings(\"ignore\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Configurations for grids: PGM method dictionary for exact methods\n"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"pgm_method_dict = {\n",
" \"iterative_current\": \"PGM Iterative Current\",\n",
" \"newton_raphson\": \"PGM Newton-Raphson\",\n",
"}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Benchmark experiment function: "
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"def experiment(n_feeder=20, n_node_per_feeder=50, n_step=1000, log=False):\n",
" def log_print(*args):\n",
" if log:\n",
" print(*args)\n",
"\n",
" # fictional grid parameters\n",
" cable_length_km_min = 0.8\n",
" cable_length_km_max = 1.2\n",
" load_p_w_min = 0.4e6 * 0.8\n",
" load_p_w_max = 0.4e6 * 1.2\n",
" pf = 0.95\n",
"\n",
" load_scaling_min = 0.5\n",
" load_scaling_max = 1.5\n",
"\n",
" # gen grid data\n",
" fictional_dataset = generate_fictional_grid_pgm_tpf(\n",
" n_node_per_feeder=n_node_per_feeder,\n",
" n_feeder=n_feeder,\n",
" cable_length_km_min=cable_length_km_min,\n",
" cable_length_km_max=cable_length_km_max,\n",
" load_p_w_max=load_p_w_max,\n",
" load_p_w_min=load_p_w_min,\n",
" pf=pf,\n",
" n_step=n_step,\n",
" load_scaling_min=load_scaling_min,\n",
" load_scaling_max=load_scaling_max,\n",
" )\n",
" # unpack data\n",
" pgm_dataset = fictional_dataset[\"pgm_dataset\"]\n",
" pgm_update_dataset = fictional_dataset[\"pgm_update_dataset\"]\n",
" tpf_node_data = fictional_dataset[\"tpf_grid_nodes\"]\n",
" tpf_line_data = fictional_dataset[\"tpf_grid_lines\"]\n",
" tpf_time_series_p = fictional_dataset[\"tpf_time_series_p\"]\n",
" tpf_time_series_q = fictional_dataset[\"tpf_time_series_q\"]\n",
"\n",
" # validate data\n",
" log_print(errors_to_string(validate_input_data(pgm_dataset)))\n",
" log_print(errors_to_string(validate_batch_data(pgm_dataset, pgm_update_dataset)))\n",
"\n",
" res_pgm = []\n",
" # create grids, run pf's and time them\n",
" # pgm - all 4 methods\n",
" pgm_methods = [\"iterative_current\", \"newton_raphson\"]\n",
" for method in pgm_methods:\n",
" pgm_start_time = time()\n",
" model_instance = pgm.PowerGridModel(pgm_dataset)\n",
" start = time()\n",
" _ = model_instance.calculate_power_flow(\n",
" symmetric=True,\n",
" calculation_method=method,\n",
" update_data=pgm_update_dataset,\n",
" output_component_types=[\"node\", \"line\"],\n",
" max_iterations=10000,\n",
" )\n",
" end = time()\n",
" pgm_end_time = time()\n",
" res_pgm.append(end - start)\n",
" log_print(f\"{pgm_method_dict[method]}: {end - start}\")\n",
" log_print(f\"Total time{pgm_method_dict[method]}: {pgm_end_time - pgm_start_time}\")\n",
"\n",
" # tpf\n",
" tpf_time_start = time()\n",
" tpf_instance = GridTensor(\n",
" node_file_path=\"\",\n",
" lines_file_path=\"\",\n",
" from_file=False,\n",
" nodes_frame=tpf_node_data,\n",
" lines_frame=tpf_line_data,\n",
" gpu_mode=False,\n",
" )\n",
" tpf_time_end = time()\n",
"\n",
" tpf_functions = [\"run_pf_tensor\"]\n",
" res_tpf = []\n",
" for function_name in tpf_functions:\n",
" start = time()\n",
" _ = getattr(tpf_instance, function_name)(active_power=tpf_time_series_p, reactive_power=tpf_time_series_q)\n",
" end = time()\n",
" res_tpf.append(end - start)\n",
" log_print(f\"TensorPowerFlow.{function_name}: {end - start}\")\n",
"\n",
" log_print(f\"TensorPowerFlow instancing: {tpf_time_end - tpf_time_start}\")\n",
"\n",
" return {\"entry pgm\": pgm_methods, \"result pgm\": res_pgm, \"entry tpf\": tpf_functions, \"result tpf\": res_tpf}"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Now run the different experiment configurations."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"exp_options = [[20, 1], [20, 5], [20, 10], [20, 25], [40, 25], [40, 50]]\n",
"for n in tqdm([10, 100, 1000, 10000], desc=\"Overall Progress\"):\n",
" plotter = BenchmarkPlotter(n_steps=n)\n",
" for option in tqdm(exp_options, desc=f\"Processing n={n}\"):\n",
" res = experiment(option[0], option[1], n)\n",
" plotter.add(res, option[0] * option[1])\n",
" plotter.plot()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.0"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
3 changes: 3 additions & 0 deletions PGM-TPF.ipynb.license
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
SPDX-FileCopyrightText: Contributors to the Power Grid Model project <[email protected]>

SPDX-License-Identifier: MPL-2.0
129 changes: 128 additions & 1 deletion generate_fictional_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,8 +135,18 @@ def generate_fictional_grid(
for line, line_length in zip(pgm_dataset["line"], length)
}

# add asym load
n_load = n_node - 1
# add sym load
pgm_dataset["sym_load"] = pgm.initialize_array("input", "sym_load", n_load)
pgm_dataset["sym_load"]["id"] = np.arange(n_node + n_line, n_node + n_line + n_load, dtype=np.int32)
pgm_dataset["sym_load"]["node"] = pgm_dataset["node"]["id"][1:]
pgm_dataset["sym_load"]["status"] = 1
pgm_dataset["sym_load"]["type"] = pgm.LoadGenType.const_power
pgm_dataset["sym_load"]["p_specified"] = np.random.uniform(
low=load_p_w_min / 3.0, high=load_p_w_max / 3.0, size=n_load
)
pgm_dataset["sym_load"]["q_specified"] = pgm_dataset["sym_load"]["p_specified"] * np.sqrt(1 - pf**2) / pf
# add asym load
# pgm
pgm_dataset["asym_load"] = pgm.initialize_array("input", "asym_load", n_load)
pgm_dataset["asym_load"]["id"] = np.arange(n_node + n_line, n_node + n_line + n_load, dtype=np.int32)
Expand Down Expand Up @@ -283,3 +293,120 @@ def generate_fictional_grid(
"pp_time_series_dataset": pp_dataset,
"dss_file": output_path,
}


def generate_fictional_grid_pgm_tpf(
n_feeder: int,
n_node_per_feeder: int,
cable_length_km_min: float,
cable_length_km_max: float,
load_p_w_max: float,
load_p_w_min: float,
pf: float,
n_step: int,
load_scaling_min: float,
load_scaling_max: float,
seed=0,
):
rng = np.random.default_rng(seed)

n_node = n_feeder * n_node_per_feeder + 1
pgm_dataset = dict()
tpf_grid_nodes = pd.DataFrame()
tpf_grid_lines = pd.DataFrame()

# node
# pgm
pgm_dataset["node"] = pgm.initialize_array("input", "node", n_node)
pgm_dataset["node"]["id"] = np.arange(n_node, dtype=np.int32)
pgm_dataset["node"]["u_rated"] = u_rated
# tpf
tpf_grid_nodes["NODES"] = np.arange(1, n_node + 1, dtype=np.int32)
tpf_grid_nodes["Tb"] = np.zeros_like(pgm_dataset["node"]["id"])
tpf_grid_nodes["Tb"][0] = 1
tpf_grid_nodes["PD"] = np.zeros_like(pgm_dataset["node"]["id"])
tpf_grid_nodes["QD"] = np.zeros_like(pgm_dataset["node"]["id"])
tpf_grid_nodes["Pct"] = np.ones_like(pgm_dataset["node"]["id"])
tpf_grid_nodes["Ict"] = np.zeros_like(pgm_dataset["node"]["id"])
tpf_grid_nodes["Zct"] = np.zeros_like(pgm_dataset["node"]["id"])

# line
n_line = n_node - 1
to_node_feeder = np.arange(1, n_node_per_feeder + 1, dtype=np.int32)
to_node_feeder = to_node_feeder.reshape(1, -1) + np.arange(0, n_feeder).reshape(-1, 1) * n_node_per_feeder
to_node = to_node_feeder.ravel()
from_node_feeder = np.arange(1, n_node_per_feeder, dtype=np.int32)
from_node_feeder = from_node_feeder.reshape(1, -1) + np.arange(0, n_feeder).reshape(-1, 1) * n_node_per_feeder
from_node_feeder = np.concatenate((np.zeros(shape=(n_feeder, 1), dtype=np.int32), from_node_feeder), axis=1)
from_node = from_node_feeder.ravel()
length = rng.uniform(low=cable_length_km_min, high=cable_length_km_max, size=n_line)
# pgm
pgm_dataset["line"] = pgm.initialize_array("input", "line", n_line)
pgm_dataset["line"]["id"] = np.arange(n_node, n_node + n_line, dtype=np.int32)
pgm_dataset["line"]["from_node"] = from_node
pgm_dataset["line"]["to_node"] = to_node
pgm_dataset["line"]["from_status"] = 1
pgm_dataset["line"]["to_status"] = 1
for attr_name, attr in cable_param.items():
if attr_name in ["i_n", "tan1", "tan0"]:
pgm_dataset["line"][attr_name] = attr
else:
pgm_dataset["line"][attr_name] = attr * length
# tpf
tpf_grid_lines["FROM"] = from_node + np.ones_like(from_node)
tpf_grid_lines["TO"] = to_node + np.ones_like(to_node)
tpf_grid_lines["R"] = pgm_dataset["line"]["r1"]
tpf_grid_lines["X"] = pgm_dataset["line"]["x1"]
tpf_grid_lines["B"] = np.zeros_like(to_node)
tpf_grid_lines["STATUS"] = np.ones_like(to_node)
tpf_grid_lines["TAP"] = np.ones_like(to_node)

# add load
n_load = n_node - 1
# pgm
pgm_dataset["sym_load"] = pgm.initialize_array("input", "sym_load", n_load)
pgm_dataset["sym_load"]["id"] = np.arange(n_node + n_line, n_node + n_line + n_load, dtype=np.int32)
pgm_dataset["sym_load"]["node"] = pgm_dataset["node"]["id"][1:]
pgm_dataset["sym_load"]["status"] = 1
pgm_dataset["sym_load"]["type"] = pgm.LoadGenType.const_power
pgm_dataset["sym_load"]["p_specified"] = rng.uniform(low=load_p_w_min / 3.0, high=load_p_w_max / 3.0, size=n_load)
pgm_dataset["sym_load"]["q_specified"] = pgm_dataset["sym_load"]["p_specified"] * np.sqrt(1 - pf**2) / pf
# tpf
tpf_grid_nodes["PD"][1:] = pgm_dataset["sym_load"]["p_specified"]
tpf_grid_nodes["QD"][1:] = pgm_dataset["sym_load"]["q_specified"]

# source
# pgm
source_id = n_node + n_line + n_load
pgm_dataset["source"] = pgm.initialize_array("input", "source", 1)
pgm_dataset["source"]["id"] = source_id
pgm_dataset["source"]["node"] = source_node
pgm_dataset["source"]["status"] = 1
pgm_dataset["source"]["u_ref"] = source_u_ref
pgm_dataset["source"]["sk"] = source_sk
pgm_dataset["source"]["rx_ratio"] = source_rx
pgm_dataset["source"]["z01_ratio"] = source_01
# tpf

# generate time series
rng = np.random.default_rng(seed)

# pgm
n_load = pgm_dataset["sym_load"].size
scaling = rng.uniform(low=load_scaling_min, high=load_scaling_max, size=(n_step, n_load))
sym_load_profile = pgm.initialize_array("update", "sym_load", (n_step, n_load))
sym_load_profile["id"] = pgm_dataset["sym_load"]["id"].reshape(1, -1)
sym_load_profile["p_specified"] = pgm_dataset["sym_load"]["p_specified"].reshape(1, -1) * scaling
sym_load_profile["q_specified"] = pgm_dataset["sym_load"]["q_specified"].reshape(1, -1) * scaling
# tpf - in kW
tpf_time_series_p = sym_load_profile["p_specified"] * 0.001
tpf_time_series_q = sym_load_profile["q_specified"] * 0.001

return {
"pgm_dataset": pgm_dataset,
"pgm_update_dataset": {"sym_load": sym_load_profile},
"tpf_grid_nodes": tpf_grid_nodes,
"tpf_grid_lines": tpf_grid_lines,
"tpf_time_series_p": tpf_time_series_p,
"tpf_time_series_q": tpf_time_series_q,
}
46 changes: 46 additions & 0 deletions plotter.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# SPDX-FileCopyrightText: Contributors to the Power Grid Model project <[email protected]>
#
# SPDX-License-Identifier: MPL-2.0

from matplotlib import pyplot as plt


class BenchmarkPlotter:
def __init__(self, n_steps=1000):
self.data_pgm_ic = []
self.data_pgm_nr = []
self.data_tpf_tf = []
self.n_nodes = []
self.n_steps = n_steps

def add(self, res, n_nodes):
res_pgm = res["result pgm"]
res_tpf = res["result tpf"]
self.data_pgm_ic.append(res_pgm[0])
self.data_pgm_nr.append(res_pgm[1])
self.data_tpf_tf.append(res_tpf[0])
self.n_nodes.append(n_nodes)

def plot(self, log_scale=False):
plt.figure(figsize=(8, 5))
_, ax = plt.subplots()
data_lists = [
self.data_pgm_ic,
self.data_pgm_nr,
self.data_tpf_tf,
]
labels = ["pgm ic", "pgm nr", "tpf"]
styles = ["--", "--", "--", "--", "-"]
for data_list, label, style in zip(data_lists, labels, styles):
if log_scale:
ax.semilogy(self.n_nodes, data_list, label=label, linestyle=style)
else:
ax.plot(self.n_nodes, data_list, label=label, linestyle=style)

ax.set_title(f"PGM vs TPF {self.n_steps} steps")
ax.set_xlabel("Number of Nodes")
ax.set_ylabel("Execution Time (s)")

ax.legend()
# plt.savefig(f"data/benchmark_{self.n_steps}.pdf")
plt.show()

0 comments on commit c1bc96a

Please sign in to comment.