Skip to content

pytest based synthesis tests #1257

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 34 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
f1c952a
add conftest.py, utils.py, update test_keras_api.py
marco66colombo Jan 15, 2025
4fb8bfe
Use asserts in check_synthesis
marco66colombo Jan 15, 2025
38f9199
update utils.py
marco66colombo Mar 14, 2025
d3038bd
Merge branch 'main' into add-conftest
marco66colombo Mar 14, 2025
109195d
fix baselines dir path
marco66colombo Mar 19, 2025
d25ea90
refactor synthesis_helpers.py
marco66colombo Mar 26, 2025
f2abbeb
fix comment
marco66colombo Mar 26, 2025
4b05026
Merge branch 'main' into add-conftest
marco66colombo Mar 26, 2025
bddf2c2
Merge pull request #1 from marco66colombo/add-conftest
marco66colombo Mar 26, 2025
a7bbc04
pytest synthesis tests refactor
marco66colombo Apr 1, 2025
343a4e1
update synthesis_helpers, confetst.py
marco66colombo Apr 1, 2025
08abec3
update default vitis version
marco66colombo Apr 1, 2025
3dae27b
fix typo test_keras_api.py
marco66colombo Apr 1, 2025
76b16fc
Merge branch 'main' into add-conftest
marco66colombo Apr 1, 2025
e6b09fd
Merge pull request #2 from marco66colombo/add-conftest
marco66colombo Apr 1, 2025
9528c8f
Merge branch 'fastmachinelearning:main' into main
marco66colombo Apr 1, 2025
3195bcb
clean imports test_keras_api
marco66colombo Apr 1, 2025
5e8990b
clean new lines test_keras_api
marco66colombo Apr 1, 2025
c1436a9
Trigger pre-commit hook
marco66colombo Apr 1, 2025
943dc74
update after precommit
marco66colombo Apr 1, 2025
19f653d
Merge branch 'fastmachinelearning:main' into main
marco66colombo Apr 2, 2025
34b28ea
enable vivado and vitis
marco66colombo Apr 3, 2025
8a2771f
Merge branch 'fastmachinelearning:main' into main
marco66colombo Apr 8, 2025
2908052
add oneAPI report support, bug fix
marco66colombo Apr 9, 2025
c51c180
add temp test file
marco66colombo Apr 11, 2025
e423d16
update generate_ci_template.py
marco66colombo Apr 11, 2025
d9c613a
update temp test file
marco66colombo Apr 11, 2025
97f3fe6
install libidn
marco66colombo Apr 11, 2025
1ed33b4
test cmake version
marco66colombo Apr 14, 2025
d39d7dc
fix cmake error
marco66colombo Apr 14, 2025
1280aa7
fix cmake error
marco66colombo Apr 14, 2025
aaf865f
update baselines, clean ci-template
marco66colombo Apr 14, 2025
db21ed4
remove test_keras_api_temp.py
marco66colombo Apr 15, 2025
4b9ceac
Merge remote-tracking branch 'upstream/main'
marco66colombo Apr 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -10,3 +10,6 @@
[submodule "hls4ml/templates/catapult/ac_math"]
path = hls4ml/templates/catapult/ac_math
url = https://github.com/hlslibs/ac_math.git
[submodule "test/pytest/baselines"]
path = test/pytest/baselines
url = https://github.com/marco66colombo/baselines.git
1 change: 1 addition & 0 deletions test/pytest/baselines
Submodule baselines added at 18144e
8 changes: 8 additions & 0 deletions test/pytest/ci-template.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,15 @@
- source /opt/intel/oneapi/setvars.sh --force
- git config --global --add safe.directory /builds/fastmachinelearning/hls4ml
- git submodule update --init --recursive hls4ml/templates/catapult/
- git submodule update --init --recursive test/pytest/
- if [ $EXAMPLEMODEL == 1 ]; then git submodule update --init example-models; fi
- pip install .[testing,sr,optimization]
- sudo yum install libtinfo.so.6 -y
- sudo ln -s /lib64/libtinfo.so.6 /lib64/libtinfo.so.5
- sudo ln -s /cvmfs/projects.cern.ch/hls4ml/vivado/2020.1_v1/vivado-2020.1_v1/opt/Xilinx /opt/Xilinx
- source /opt/Xilinx/Vivado/2020.1/settings64.sh
- export RUN_SYNTHESIS=true
- export PATH=/usr/bin:$PATH
script:
- cd test/pytest
- pytest $PYTESTFILE -rA --cov-report xml --cov-report term --cov=hls4ml --junitxml=report.xml --randomly-seed=42 --randomly-dont-reorganize --randomly-dont-reset-seed
Expand All @@ -24,3 +31,4 @@
path: test/pytest/coverage.xml
paths:
- test/pytest/hls4mlprj*.tar.gz
- test/pytest/synthesis_report_*.json
35 changes: 35 additions & 0 deletions test/pytest/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import os

import pytest


def str_to_bool(val):
return str(val).lower() in ("1", "true")


@pytest.fixture(scope="module")
def synthesis_config():
"""
Fixture that provides synthesis configuration for tests.

It gathers:
- Whether synthesis should be run (from the RUN_SYNTHESIS env var)
- Tool versions for each supported backend (from env vars)
- Build arguments specific to each backend toolchain

"""
return {
"run_synthesis": str_to_bool(os.getenv("RUN_SYNTHESIS", "false")),
"tools_version": {
"Vivado": os.getenv("VIVADO_VERSION", "2020.1"),
"Vitis": os.getenv("VITIS_VERSION", "2020.1"),
"Quartus": os.getenv("QUARTUS_VERSION", "latest"),
"oneAPI": os.getenv("ONEAPI_VERSION", "2025.0.1"),
},
"build_args": {
"Vivado": {"csim": False, "synth": True, "export": False},
"Vitis": {"csim": False, "synth": True, "export": False},
"Quartus": {"synth": True, "fpgasynth": False},
"oneAPI": {"build_type": "report", "run": False},
},
}
179 changes: 179 additions & 0 deletions test/pytest/synthesis_helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,179 @@
import json
from pathlib import Path

import pytest


def get_baseline_path(baseline_file_name, backend, version):
"""
Construct the full path to a baseline synthesis report file.

Args:
baseline_file_name (str): The name of the baseline report file.
backend (str): The backend used (e.g., 'Vivado', 'Vitis').
version (str): The tool version (e.g., '2020.1').

Returns:
Path: A pathlib.Path object pointing to the baseline file location.
"""
return Path(__file__).parent / "baselines" / backend / version / baseline_file_name


def save_report(data, filename):
"""
Save synthesis data to a JSON file in the same directory as this script.

Args:
data (dict): The synthesis output data to be saved.
filename (str): The filename to write to (e.g., 'synthesis_report_test_x.json').

Raises:
OSError: If the file cannot be written.
"""
out_path = Path(__file__).parent / filename
with open(out_path, "w") as fp:
json.dump(data, fp, indent=4)


def compare_dicts(data, baseline, tolerances):
"""
Compare two flat dictionaries with tolerances.

Args:
report (dict): The generated report dictionary.
baseline (dict): The expected/baseline dictionary.
tolerances (dict): Dictionary of tolerances per key.

Raises:
AssertionError: If values differ outside the allowed tolerance.
"""
for key, expected in baseline.items():
actual = data.get(key)
tolerance = tolerances.get(key, 0)

try:
actual = float(actual)
expected = float(expected)
assert actual == pytest.approx(
expected, rel=tolerance
), f"{key}: expected {expected}, got {actual} (tolerance={tolerance*100}%)"
except ValueError:
assert actual == expected, f"{key}: expected '{expected}', got '{actual}'"


def compare_vitis_backend(data, baseline):
"""
Compare reports from Vivado/Vitis backends.

Args:
data (dict): The current synthesis report.
baseline (dict): The expected synthesis report.
"""

tolerances = {
"EstimatedClockPeriod": 0.01,
"FF": 0.05,
"LUT": 0.10,
"BRAM_18K": 0.0,
"DSP": 0.0,
"URAM": 0.0,
"AvailableBRAM_18K": 0.0,
"AvailableDSP": 0.0,
"AvailableFF": 0.0,
"AvailableLUT": 0.0,
"AvailableURAM": 0.0,
}

compare_dicts(data["CSynthesisReport"], baseline["CSynthesisReport"], tolerances)


def compare_oneapi_backend(data, baseline):
"""
Compare reports from the oneAPI backend.

Args:
data (dict): The current synthesis report.
baseline (dict): The expected synthesis report.
"""

tolerances = {
"HLS": {
"total": {"alut": 0.01, "reg": 0.1, "ram": 0.01, "dsp": 0.01, "mlab": 0.01},
"available": {"alut": 0.01, "reg": 0.01, "ram": 0.01, "dsp": 0.01, "mlab": 0.01},
},
"Loop": {"worstFrequency": 0.01, "worstII": 0.01, "worstLatency": 0.01},
}

data = data["report"]
baseline = baseline["report"]

compare_dicts(data["HLS"]["total"], baseline["HLS"]["total"], tolerances["HLS"]["total"])
compare_dicts(data["HLS"]["available"], baseline["HLS"]["available"], tolerances["HLS"]["available"])
compare_dicts(data["Loop"], baseline["Loop"], tolerances["Loop"])


COMPARE_FUNCS = {
"Vivado": compare_vitis_backend,
"Vitis": compare_vitis_backend,
"oneAPI": compare_oneapi_backend,
}


EXPECTED_REPORT_KEYS = {
"Vivado": {"CSynthesisReport"},
"Vitis": {"CSynthesisReport"},
"oneAPI": {"report"},
}


def run_synthesis_test(config, hls_model, baseline_file_name, backend):
"""
Run HLS synthesis and compare the output with a stored baseline report.

If synthesis is disabled via the configuration (`run_synthesis=False`),
no synthesis is executed and the method silently returns.

Args:
config (dict): Test-wide synthesis configuration fixture.
hls_model (object): hls4ml model instance to build and synthesize.
baseline_file_name (str): The name of the baseline file for comparison.
backend (str): The synthesis backend used (e.g., 'Vivado', 'Vitis').
"""
if not config.get("run_synthesis", False):
return

# Skip Quartus backend
if backend == 'Quartus':
return

# Run synthesis
build_args = config["build_args"]
try:
data = hls_model.build(**build_args.get(backend, {}))
except Exception as e:
pytest.skip(f"hls_model.build failed: {e}")

# Save synthesis report
save_report(data, f"synthesis_report_{baseline_file_name}")

# Check synthesis report keys
expected_keys = EXPECTED_REPORT_KEYS.get(backend, set())
assert data and expected_keys.issubset(
data.keys()
), f"Synthesis failed: Missing expected keys in synthesis report: expected {expected_keys}, got {set(data.keys())}"

# Load baseline report
version = config["tools_version"].get(backend)
baseline_path = get_baseline_path(baseline_file_name, backend, version)
try:
with open(baseline_path) as fp:
baseline = json.load(fp)
except FileNotFoundError:
pytest.skip(f"Baseline file '{baseline_path}' not found.")

# Compare report against baseline using backend-specific rules
compare_func = COMPARE_FUNCS.get(backend)
if compare_func is None:
raise AssertionError(f"No comparison function defined for backend: {backend}")

compare_func(data, baseline)
Loading
Loading