Benchmarking suite for the toqito software package.
# Checks if the Python virtual environment exists at env/python-env.
make check-env-python
# Creates a Python 3.10 virtual environment in env/python-env, upgrades pip.
make setup-python
# Ensures the Python environment exists, creating it if missing.
make ensure-python
# Installs/updates pytest-benchmark in the virtual environment and runs setup/test.py.
make test-python
# Removes the Python virtual environment.
make clean-python
# Cleans and reinstalls the Python environment from scratch.
make reinstall-python
# Performs a fresh Julia installation, removing previous installations and setting up a new environment with BenchmarkTools.
make install-julia-fresh
# Checks for Julia installation; installs or updates as needed. Ensures BenchmarkTools is available in the project environment.
make setup-julia
# Runs Julia tests via setup/test.jl in the configured environment.
make test-julia
# Removes all Julia installations and environments.
make clean-julia
# Checks if a Poetry environment for toqito exists at env/toqito-env.
make check-env-toqito
# Initializes a poetry environment for Toqito in env/toqito-env.
make setup-toqito
# Ensures the toqito poetry environment exists, creating it if missing.
make ensure-toqito
# Runs a setup test script (setup/test_toqito.py) inside the toqito environment.
make test-toqito-setup
# Displays installed packages and environment info for toqito.
make toqito-info
#removes the Toqito Poetry environment.
make clean-toqito
# Reinstalls the toqito poetry environment from scratch.
make reinstall-toqito
- Runs all the tests in
scripts/benchmark_toqito.py
with--benchmark-warmup=on
and--benchmark-verbose
. JSON report is stored inresults/toqito/full
make benchmark-full-toqito
- Runs a simple benchmark with optional filtering and saving. Results are saved in
results/toqito/<filter>/<function>
with only required columns obtained from--benchmark-columns=mean,median,stddev,ops
As an example,
# runs benchmarks only for partial_trace function with varying only dim and displays the results but does not save them.
make benchmark-simple-toqito FILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim" SAVE=false
- Generates histogram visualizations from benchmark results or runs
make benchmark-simple-toqito
if none exist and then generate them. Histogram SVG files saved toresults/toqito/<filter>/<function>/images/
. As an example,
# If benchmarks for partial_trace function with varying only dim exist in isolation then constructs histogram through it else runs simple benchmarks for toqito with specified arguments and then construct it.
make benchmark-histogram-toqito FILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim"
- Runs benchmarks with memory profiling using
cProfile
(currently in development)
make test-toqito-benchmark-memory
# Checks if a Poetry environment for qutipy exists at env/qutipy-env.
make check-env-qutipy
# Initializes a poetry environment for qutipy in env/qutipy-env.
make setup-qutipy
# Ensures the qutipy poetry environment exists, creating it if missing.
make ensure-qutipy
# Runs a setup test script (setup/test_qutipy.py) inside the qutipy environment.
make test-qutipy-setup
#removes the qutipy Poetry environment.
make clean-qutipy
# Reinstalls the qutipy poetry environment from scratch.
make reinstall-qutipy
- Runs all the tests in
scripts/benchmark_qutipy.py
with--benchmark-warmup=on
and--benchmark-verbose
. JSON report is stored inresults/qutipy/full
make benchmark-full-qutipy
- Runs a simple benchmark with optional filtering and saving. Results are saved in
results/qutipy/<filter>/<function>
with only required columns obtained from--benchmark-columns=mean,median,stddev,ops
As an example,
# runs benchmarks only for partial_trace function with varying only dim and displays the results but does not save them.
make benchmark-simple-qutipy FILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim" SAVE=false
- Generates histogram visualizations from benchmark results or runs
make benchmark-simple-qutipy
if none exist and then generate them. Histogram SVG files saved toresults/qutipy/<filter>/<function>/images/
. As an example,
# If benchmarks for partial_trace function with varying only dim exist in isolation then constructs histogram through it else runs simple benchmarks for qutipy with specified arguments and then construct it.
make benchmark-histogram-qutipy FILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim"
# Checks if an environment for ketjl exists at env/ketjl-env.
make check-env-ketjl
# Initializes an environment for ketjl in env/ketjl-env.
make setup-ketjl
# Ensures the ketjl environment exists, creating it if missing.
make ensure-ketjl
# Runs a setup test script (setup/test_ketjl.jl) inside the ketjl environment.
make test-ketjl-setup
#removes the ketjl environment.
make clean-ketjl
# Reinstalls the ketjl environment from scratch.
make reinstall-ketjl
- Runs all the tests in
scripts/benchmark_ketjl.jl
. JSON report is stored inresults/ketjl/full
.
make benchmark-full-ketjl
- Runs a simple benchmark with optional filtering and saving. Results are saved in
results/ketjl/<filter>/<function>
with all the required data. As an example,
# runs benchmarks only for partial_trace function with varying only dim and saves the results.
make benchmark-simple-ketjl FILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim" SAVE=true
- TODO: Generates histogram visualizations from benchmark results or runs
make benchmark-simple-ketjl
if none exist and then generate them. Histogram SVG files saved toresults/ketjl/<filter>/<function>/images/
. As an example,
# If benchmarks for partial_trace function with varying only dim exist in isolation then constructs histogram through it else runs simple benchmarks for ketjl with specified arguments and then construct it.
make benchmark-histogram-ketjlFILTER="TestPartialTraceBenchmarks" FUNCTION="test_bench__partial_trace__vary__dim"