Skip to content

Commit f5b9763

Browse files
committed
Setup benchmark suite
Signed-off-by: Tim Paine <[email protected]>
1 parent 323122e commit f5b9763

File tree

10 files changed

+156
-0
lines changed

10 files changed

+156
-0
lines changed

.gitignore

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,11 @@ csp/lib/
104104
*.so
105105
*.tsbuildinfo
106106

107+
# Benchmarks
108+
.asv
109+
ci/benchmarks/*
110+
!ci/benchmarks/benchmarks.json
111+
107112
# Jupyter / Editors
108113
.ipynb_checkpoints
109114
.autoversion

Makefile

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -118,6 +118,28 @@ dockerps: ## spin up docker compose services for adapter testing
118118
dockerdown: ## spin up docker compose services for adapter testing
119119
$(DOCKER) compose -f ci/$(ADAPTER)/docker-compose.yml down
120120

121+
##############
122+
# BENCHMARKS #
123+
##############
124+
.PHONY: benchmark benchmarks benchmark-regen benchmark-view benchmarks-regen benchmarks-view
125+
benchmark: ## run benchmarks
126+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose `git rev-parse --abbrev-ref HEAD`
127+
128+
# https://github.com/airspeed-velocity/asv/issues/1027
129+
# https://github.com/airspeed-velocity/asv/issues/488
130+
benchmark-regen:
131+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.4^!
132+
python -m asv run --config csp/benchmarks/asv.conf.jsonc --verbose v0.0.5^!
133+
134+
benchmark-view: ## generate viewable website of benchmark results
135+
python -m asv publish --config csp/benchmarks/asv.conf.jsonc
136+
python -m asv preview --config csp/benchmarks/asv.conf.jsonc
137+
138+
# Alias
139+
benchmarks: benchmark
140+
benchmarks-regen: benchmark-regen
141+
benchmarks-view: benchmark-view
142+
121143
###########
122144
# VERSION #
123145
###########

ci/benchmarks/benchmarks.json

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
{
2+
"stats.basic.StatsBenchmarkSuite.time_stats_qtl": {
3+
"code": "class StatsBenchmarkSuite:\n def time_stats_qtl(self):\n def g_qtl():\n data = csp.curve(typ=np.ndarray, data=self.DATA)\n median = csp.stats.median(data, interval=self.INTERVAL)\n csp.add_graph_output(\"final_median\", median, tick_count=1)\n \n qtl_times = []\n \n for _ in range(self.NUM_SAMPLES):\n start = time.time()\n csp.run(g_qtl, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))\n post_qtl = time.time()\n qtl_times.append(post_qtl - start)\n \n avg_med = sum(qtl_times) / self.NUM_SAMPLES\n print(\n f\"Average time in {self.NUM_SAMPLES} tests for median with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_med, 2)} s\"\n )\n return avg_med\n\n def setup(self):\n self.st = datetime(2020, 1, 1)\n self.N = 1_000\n self.ARRAY_SIZE = 100\n self.TEST_TIMES = [self.st + timedelta(seconds=i) for i in range(self.N)]\n self.RANDOM_VALUES = [np.random.normal(size=(self.ARRAY_SIZE,)) for i in range(self.N)] # 100 element np array\n self.DATA = list(zip(self.TEST_TIMES, self.RANDOM_VALUES))\n self.INTERVAL = 500\n self.NUM_SAMPLES = 100",
4+
"min_run_count": 2,
5+
"name": "stats.basic.StatsBenchmarkSuite.time_stats_qtl",
6+
"number": 0,
7+
"param_names": [],
8+
"params": [],
9+
"repeat": 0,
10+
"rounds": 2,
11+
"sample_time": 0.01,
12+
"type": "time",
13+
"unit": "seconds",
14+
"version": "21f280e4eeceac0ca2172bed432939c57f2b2618bd26bd27d15d4ca177e2ab26",
15+
"warmup_time": -1
16+
},
17+
"stats.basic.StatsBenchmarkSuite.time_stats_rank": {
18+
"code": "class StatsBenchmarkSuite:\n def time_stats_rank(self):\n def g_rank():\n data = csp.curve(typ=np.ndarray, data=self.DATA)\n rank = csp.stats.rank(data, interval=self.INTERVAL)\n csp.add_graph_output(\"final_rank\", rank, tick_count=1)\n \n rank_times = []\n \n for _ in range(self.NUM_SAMPLES):\n start = time.time()\n csp.run(g_rank, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.N))\n post_rank = time.time()\n rank_times.append(post_rank - start)\n \n avg_rank = sum(rank_times) / self.NUM_SAMPLES\n print(\n f\"Average time in {self.NUM_SAMPLES} tests for rank with {self.N=}, {self.ARRAY_SIZE=}, {self.INTERVAL=}: {round(avg_rank, 2)} s\"\n )\n return avg_rank\n\n def setup(self):\n self.st = datetime(2020, 1, 1)\n self.N = 1_000\n self.ARRAY_SIZE = 100\n self.TEST_TIMES = [self.st + timedelta(seconds=i) for i in range(self.N)]\n self.RANDOM_VALUES = [np.random.normal(size=(self.ARRAY_SIZE,)) for i in range(self.N)] # 100 element np array\n self.DATA = list(zip(self.TEST_TIMES, self.RANDOM_VALUES))\n self.INTERVAL = 500\n self.NUM_SAMPLES = 100",
19+
"min_run_count": 2,
20+
"name": "stats.basic.StatsBenchmarkSuite.time_stats_rank",
21+
"number": 0,
22+
"param_names": [],
23+
"params": [],
24+
"repeat": 0,
25+
"rounds": 2,
26+
"sample_time": 0.01,
27+
"type": "time",
28+
"unit": "seconds",
29+
"version": "4c302ccf942084ac2367999fc84b2ba882c2ff74cddd80a3c27c8f8a1aee333d",
30+
"warmup_time": -1
31+
},
32+
"version": 2
33+
}

conda/dev-environment-unix.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ channels:
33
- conda-forge
44
- nodefaults
55
dependencies:
6+
- asv
67
- bison
78
- brotli
89
- build

conda/dev-environment-win.yml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ channels:
33
- conda-forge
44
- nodefaults
55
dependencies:
6+
- asv
67
- brotli
78
- build
89
- bump2version>=1

csp/benchmarks/__init__.py

Whitespace-only changes.

csp/benchmarks/asv.conf.jsonc

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
// https://asv.readthedocs.io/en/v0.6.3/asv.conf.json.html
2+
{
3+
"version": 1,
4+
"project": "csp",
5+
"project_url": "https://github.com/Point72/csp",
6+
"repo": "../..",
7+
"branches": ["main"],
8+
"dvcs": "git",
9+
10+
"install_command": ["in-dir={env_dir} python -mpip install {wheel_file}"],
11+
"uninstall_command": ["return-code=any python -mpip uninstall -y {project}"],
12+
"build_command": [
13+
"python -m pip install build",
14+
"python -m build --wheel -o {build_cache_dir} {build_dir}"
15+
],
16+
"environment_type": "virtualenv",
17+
"install_timeout": 600,
18+
"show_commit_url": "http://github.com/point72/csp/commit/",
19+
20+
"pythons": ["3.11"],
21+
22+
// "environment_type": "mamba",
23+
// "conda_channels": ["conda-forge"],
24+
// "conda_environment_file": "conda/dev-environment-unix.yml",
25+
26+
"benchmark_dir": "../../csp/benchmarks",
27+
"env_dir": "../../.asv/env",
28+
"results_dir": "../../ci/benchmarks",
29+
"html_dir": "../../.asv/html",
30+
31+
"hash_length": 8,
32+
"build_cache_size": 2
33+
}

csp/benchmarks/stats/__init__.py

Whitespace-only changes.

csp/benchmarks/stats/basic.py

Lines changed: 59 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,59 @@
1+
import numpy as np
2+
import time
3+
from datetime import datetime, timedelta
4+
from timeit import Timer
5+
6+
import csp
7+
8+
9+
class StatsBenchmarkSuite:
10+
def setup(self):
11+
self.start_date = datetime(2020, 1, 1)
12+
self.num_rows = 1_000
13+
self.array_size = 100
14+
self.test_times = [self.st + timedelta(seconds=i) for i in range(self.num_rows)]
15+
self.random_values = [
16+
np.random.normal(size=(self.array_size,)) for i in range(self.num_rows)
17+
] # 100 element np array
18+
self.data = list(zip(self.test_times, self.random_values))
19+
self.interval = 500
20+
self.num_samples = 100
21+
22+
def time_stats_qtl(self):
23+
def g_qtl():
24+
data = csp.curve(typ=np.ndarray, data=self.data)
25+
median = csp.stats.median(data, interval=self.interval)
26+
csp.add_graph_output("final_median", median, tick_count=1)
27+
28+
timer = Timer(
29+
lambda: csp.run(g_qtl, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.num_rows))
30+
)
31+
elapsed = timer.timeit(self.num_samples)
32+
avg_med = elapsed / self.num_samples
33+
print(
34+
f"Average time in {self.num_samples} tests for median with {self.num_rows=}, {self.array_size=}, {self.interval=}: {round(avg_med, 2)} s"
35+
)
36+
return avg_med
37+
38+
def time_stats_rank(self):
39+
def g_rank():
40+
data = csp.curve(typ=np.ndarray, data=self.DATA)
41+
rank = csp.stats.rank(data, interval=self.interval)
42+
csp.add_graph_output("final_rank", rank, tick_count=1)
43+
44+
timer = Timer(
45+
lambda: csp.run(g_rank, realtime=False, starttime=self.st, endtime=timedelta(seconds=self.num_rows))
46+
)
47+
elapsed = timer.timeit(self.num_samples)
48+
avg_rank = elapsed / self.num_samples
49+
print(
50+
f"Average time in {self.num_samples} tests for rank with {self.num_rows=}, {self.array_size=}, {self.interval=}: {round(avg_rank, 2)} s"
51+
)
52+
return avg_rank
53+
54+
55+
if __name__ == "__main__":
56+
sbs = StatsBenchmarkSuite()
57+
sbs.setup()
58+
sbs.time_stats_qtl()
59+
sbs.time_stats_rank()

pyproject.toml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,8 @@ develop = [
8383
"sqlalchemy", # db
8484
"threadpoolctl", # test_random
8585
"tornado", # profiler, perspective, websocket
86+
# benchmarks
87+
"asv",
8688
]
8789
showgraph = [
8890
"graphviz",

0 commit comments

Comments
 (0)