forked from foundry-rs/starknet-foundry
-
Notifications
You must be signed in to change notification settings - Fork 0
/
benchmark.py
99 lines (79 loc) · 2.79 KB
/
benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
from time import perf_counter
import shutil
import pandas as pd
import tempfile
from pathlib import Path
from contextlib import contextmanager
from distutils.dir_util import copy_tree
import subprocess
TOOLCHAINS = [
# name, command, cairo version
("protostar", ["protostar", "test"], 1),
("forge", ["snforge"], 2),
("cairo_test", ["scarb", "cairo-test"], 2),
]
# (unit, integration)
TESTS = [(x, x) for x in range(1, 8)]
CASES_PER_UNIT_TEST = 25
CASES_PER_INTEGRATION_TEST = 15
def log(x):
print(f"[BENCHMARK] {x}")
@contextmanager
def benchmark_dir(name: str, cairo_version: int, unit: int, integration: int) -> Path:
with tempfile.TemporaryDirectory() as tmp:
tmp = Path(tmp)
data = Path(__file__).parent / "data"
copy_tree(str(data / "project"), str(tmp))
src = tmp / "src"
tests = tmp / "tests" if name != "cairo_test" else src / "tests"
for i in range(unit):
shutil.copy(
data / "unit_test_template.cairo", tests / f"unit{i}_test.cairo"
)
for i in range(integration):
shutil.copy(
data / f"{name}.cairo",
tests / f"integration{i}_test.cairo",
)
shutil.copy(data / f"hello_starknet_{cairo_version}.cairo", src / "lib.cairo")
if name == "cairo_test":
with open(src / "tests.cairo", "w") as f:
for i in range(unit):
f.write(f"mod unit{i}_test;\n")
for i in range(integration):
f.write(f"mod integration{i}_test;\n")
with open(src / "lib.cairo", "a") as f:
f.write("\n")
f.write("mod tests;\n")
try:
log("Creating test directory")
yield tmp
finally:
pass
def benchmark():
data = {
"n_files": [],
"n_unit": [],
"n_integration": [],
} | {name: [] for name, _, _ in TOOLCHAINS}
for unit, integration in TESTS:
data["n_files"].append(unit + integration)
data["n_unit"].append(unit * CASES_PER_UNIT_TEST)
data["n_integration"].append(integration * CASES_PER_INTEGRATION_TEST)
for name, cmd, ver in TOOLCHAINS:
with benchmark_dir(name, ver, unit, integration) as project_path:
log(f"Running {name}")
start = perf_counter()
subprocess.run(
cmd,
stderr=subprocess.DEVNULL,
stdout=subprocess.PIPE,
check=False,
cwd=project_path,
)
data[name].append(perf_counter() - start)
df = pd.DataFrame(data)
df.to_csv("benchmarks.csv")
print("", df, "", sep="\n")
if __name__ == "__main__":
benchmark()