diff --git a/.gitignore b/.gitignore index 2900b6074..d19638d07 100644 --- a/.gitignore +++ b/.gitignore @@ -26,3 +26,4 @@ __pycache__/ # bug: This file is created in repo root on test discovery. /consumer_test.log +.clwb diff --git a/docs/how-to/test_to_doc_links.rst b/docs/how-to/test_to_doc_links.rst index d7c3677f3..5a0503f76 100644 --- a/docs/how-to/test_to_doc_links.rst +++ b/docs/how-to/test_to_doc_links.rst @@ -53,3 +53,43 @@ Limitations - Partial properties will lead to no Testlink creation. If you want a test to be linked, please ensure all requirement properties are provided. - Tests must be executed by Bazel first so `test.xml` files exist. + + +CI/CD Gate for Linkage Percentage +--------------------------------- + +To enforce traceability in CI: + +1. Run tests. +2. Generate ``needs.json``. +3. Execute the traceability checker. + +.. code-block:: bash + + bazel test //... + bazel build //:needs_json + bazel run //scripts_bazel:traceability_coverage -- \ + --needs-json bazel-bin/needs_json/_build/needs/needs.json \ + --min-req-code 100 \ + --min-req-test 100 \ + --min-req-fully-linked 100 \ + --min-tests-linked 100 \ + --fail-on-broken-test-refs + +The checker reports: + +- Percentage of implemented requirements with ``source_code_link`` +- Percentage of implemented requirements with ``testlink`` +- Percentage of implemented requirements with both links (fully linked) +- Percentage of test cases linked to at least one requirement +- Broken testcase references to unknown requirement IDs + +To check only unit tests, filter testcase types: + +.. code-block:: bash + + bazel run //scripts_bazel:traceability_coverage -- \ + --needs-json bazel-bin/needs_json/_build/needs/needs.json \ + --test-types unit-test + +Use lower thresholds during rollout and tighten towards 100% over time. diff --git a/docs/internals/requirements/implementation_state.rst b/docs/internals/requirements/implementation_state.rst index 580e090fc..47eaa539a 100644 --- a/docs/internals/requirements/implementation_state.rst +++ b/docs/internals/requirements/implementation_state.rst @@ -20,12 +20,9 @@ Overview -------- .. needpie:: Requirements Status - :labels: not implemented, implemented but not tested, implemented and tested + :labels: not implemented, implemented but incomplete docs, fully documented :colors: red,yellow, green - - type == 'tool_req' and implemented == 'NO' - type == 'tool_req' and testlink == '' and (implemented == 'YES' or implemented == 'PARTIAL') - type == 'tool_req' and testlink != '' and (implemented == 'YES' or implemented == 'PARTIAL') + :filter-func: src.extensions.score_metamodel.checks.traceability_dashboard.pie_requirements_status(tool_req) In Detail --------- @@ -48,9 +45,7 @@ In Detail .. needpie:: Requirements with Codelinks :labels: no codelink, with codelink :colors: red, green - - type == 'tool_req' and source_code_link == '' - type == 'tool_req' and source_code_link != '' + :filter-func: src.extensions.score_metamodel.checks.traceability_dashboard.pie_requirements_with_code_links(tool_req) .. grid-item-card:: diff --git a/docs/reference/commands.md b/docs/reference/commands.md index 01bc8d15d..5bc2b959b 100644 --- a/docs/reference/commands.md +++ b/docs/reference/commands.md @@ -5,6 +5,7 @@ | `bazel run //:docs` | Builds documentation | | `bazel run //:docs_check` | Verifies documentation correctness | | `bazel run //:docs_combo` | Builds combined documentation with all external dependencies included | +| `bazel run //scripts_bazel:traceability_coverage -- --needs-json bazel-bin/needs_json/needs.json --min-req-code 100 --min-req-test 100 --min-req-fully-linked 100 --min-tests-linked 100 --fail-on-broken-test-refs` | Calculates requirement/test traceability percentages and fails if thresholds are not met | | `bazel run //:live_preview` | Creates a live_preview of the documentation viewable in a local server | | `bazel run //:live_preview_combo_experimental` | Creates a live_preview of the full documentation with all dependencies viewable in a local server | | `bazel run //:ide_support` | Sets up a Python venv for esbonio (Remember to restart VS Code!) | diff --git a/scripts_bazel/BUILD b/scripts_bazel/BUILD index befe51730..49ac766cc 100644 --- a/scripts_bazel/BUILD +++ b/scripts_bazel/BUILD @@ -37,3 +37,11 @@ py_binary( main = "merge_sourcelinks.py", visibility = ["//visibility:public"], ) + +py_binary( + name = "traceability_coverage", + srcs = ["traceability_coverage.py"], + main = "traceability_coverage.py", + visibility = ["//visibility:public"], + deps = all_requirements + ["//src/extensions/score_metamodel:score_metamodel"], +) diff --git a/scripts_bazel/tests/BUILD b/scripts_bazel/tests/BUILD index 7ff48c428..70b7703ac 100644 --- a/scripts_bazel/tests/BUILD +++ b/scripts_bazel/tests/BUILD @@ -32,3 +32,12 @@ score_pytest( ] + all_requirements, pytest_config = "//:pyproject.toml", ) + +score_pytest( + name = "traceability_coverage_test", + srcs = ["traceability_coverage_test.py"], + deps = [ + "//scripts_bazel:traceability_coverage", + ] + all_requirements, + pytest_config = "//:pyproject.toml", +) diff --git a/scripts_bazel/tests/traceability_coverage_test.py b/scripts_bazel/tests/traceability_coverage_test.py new file mode 100644 index 000000000..9e1267bc3 --- /dev/null +++ b/scripts_bazel/tests/traceability_coverage_test.py @@ -0,0 +1,233 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Tests for traceability_coverage.py.""" + +import json +import os +import subprocess +import sys +from pathlib import Path + +_MY_PATH = Path(__file__).parent + + +def _write_needs_json(tmp_path: Path) -> Path: + needs_json = tmp_path / "needs.json" + payload = { + "current_version": "main", + "versions": { + "main": { + "needs": { + "REQ_1": { + "id": "REQ_1", + "type": "tool_req", + "implemented": "YES", + "source_code_link": "src/foo.py:10", + "testlink": "", + }, + "REQ_2": { + "id": "REQ_2", + "type": "tool_req", + "implemented": "PARTIAL", + "source_code_link": "", + "testlink": "tests/test_foo.py::test_bar", + }, + "REQ_3": { + "id": "REQ_3", + "type": "tool_req", + "implemented": "NO", + "source_code_link": "", + "testlink": "", + }, + "TC_1": { + "id": "TC_1", + "type": "testcase", + "partially_verifies": "REQ_1, REQ_2", + "fully_verifies": "", + }, + "TC_2": { + "id": "TC_2", + "type": "testcase", + "partially_verifies": "", + "fully_verifies": "", + }, + "TC_3": { + "id": "TC_3", + "type": "testcase", + "partially_verifies": "", + "fully_verifies": "REQ_UNKNOWN", + }, + } + } + }, + } + needs_json.write_text(json.dumps(payload), encoding="utf-8") + return needs_json + + +def test_traceability_coverage_thresholds_pass(tmp_path: Path) -> None: + needs_json = _write_needs_json(tmp_path) + output_json = tmp_path / "summary.json" + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "traceability_coverage.py", + "--needs-json", + str(needs_json), + "--min-req-code", + "50", + "--min-req-test", + "50", + "--min-req-fully-linked", + "0", + "--min-tests-linked", + "60", + "--json-output", + str(output_json), + ], + capture_output=True, + text=True, + ) + + assert result.returncode == 0 + assert "Threshold check passed." in result.stdout + assert output_json.exists() + + summary = json.loads(output_json.read_text(encoding="utf-8")) + assert summary["requirements"]["total"] == 2 + assert summary["requirements"]["with_code_link"] == 1 + assert summary["requirements"]["with_test_link"] == 1 + assert summary["requirements"]["fully_linked"] == 0 + assert summary["tests"]["total"] == 3 + assert summary["tests"]["linked_to_requirements"] == 2 + assert len(summary["tests"]["broken_references"]) == 1 + + +def test_traceability_coverage_thresholds_fail(tmp_path: Path) -> None: + needs_json = _write_needs_json(tmp_path) + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "traceability_coverage.py", + "--needs-json", + str(needs_json), + "--min-req-code", + "80", + "--min-req-test", + "80", + "--min-req-fully-linked", + "80", + "--min-tests-linked", + "80", + ], + capture_output=True, + text=True, + ) + + assert result.returncode == 2 + assert "Threshold check failed:" in result.stdout + + +def test_traceability_coverage_fails_on_broken_refs(tmp_path: Path) -> None: + needs_json = _write_needs_json(tmp_path) + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "traceability_coverage.py", + "--needs-json", + str(needs_json), + "--min-req-code", + "0", + "--min-req-test", + "0", + "--min-req-fully-linked", + "0", + "--min-tests-linked", + "0", + "--fail-on-broken-test-refs", + ], + capture_output=True, + text=True, + ) + + assert result.returncode == 2 + assert "broken testcase references found:" in result.stdout + + +def test_traceability_coverage_prints_unlinked_requirements(tmp_path: Path) -> None: + needs_json = _write_needs_json(tmp_path) + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "traceability_coverage.py", + "--needs-json", + str(needs_json), + "--min-req-code", + "0", + "--min-req-test", + "0", + "--min-req-fully-linked", + "0", + "--min-tests-linked", + "0", + "--print-unlinked-requirements", + ], + capture_output=True, + text=True, + ) + + assert result.returncode == 0 + assert "Unlinked requirement details:" in result.stdout + assert "Missing source_code_link: REQ_2" in result.stdout + assert "Missing testlink: REQ_1" in result.stdout + assert "Not fully linked: REQ_1, REQ_2" in result.stdout + + +def test_traceability_coverage_accepts_workspace_relative_needs_json( + tmp_path: Path, +) -> None: + workspace = tmp_path / "workspace" + workspace.mkdir() + needs_json = _write_needs_json(workspace) + + env = dict(os.environ) + env["BUILD_WORKSPACE_DIRECTORY"] = str(workspace) + + result = subprocess.run( + [ + sys.executable, + _MY_PATH.parent / "traceability_coverage.py", + "--needs-json", + "needs.json", + "--min-req-code", + "0", + "--min-req-test", + "0", + "--min-req-fully-linked", + "0", + "--min-tests-linked", + "0", + ], + capture_output=True, + text=True, + cwd=tmp_path, + env=env, + ) + + assert result.returncode == 0 + assert f"Traceability input: {needs_json}" in result.stdout diff --git a/scripts_bazel/traceability_coverage.py b/scripts_bazel/traceability_coverage.py new file mode 100644 index 000000000..ef2dc15b8 --- /dev/null +++ b/scripts_bazel/traceability_coverage.py @@ -0,0 +1,401 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Compute requirement and test traceability coverage from sphinx-needs output.""" + +from __future__ import annotations + +import argparse +import importlib.util +import json +import os +import sys +from pathlib import Path +from typing import Any + +# Ensure shared metric code under src/ is importable when executed directly. +_REPO_ROOT = Path(__file__).resolve().parent.parent +if str(_REPO_ROOT) not in sys.path: + sys.path.insert(0, str(_REPO_ROOT)) + +# Import only the metrics module, avoid heavy __init__.py +_metrics_path = _REPO_ROOT / "src/extensions/score_metamodel/traceability_metrics.py" +_spec = importlib.util.spec_from_file_location("traceability_metrics", _metrics_path) +if _spec is None or _spec.loader is None: + raise ImportError(f"Failed to load metrics module from {_metrics_path}") +traceability_metrics = importlib.util.module_from_spec(_spec) +_spec.loader.exec_module(traceability_metrics) + +compute_traceability_summary = traceability_metrics.compute_traceability_summary + + +def _load_needs(needs_json: Path) -> list[dict[str, Any]]: + raw = json.loads(needs_json.read_text(encoding="utf-8")) + + if isinstance(raw, list): + return [item for item in raw if isinstance(item, dict)] + + if isinstance(raw, dict): + if "needs" in raw and isinstance(raw["needs"], dict): + return [v for v in raw["needs"].values() if isinstance(v, dict)] + + versions = raw.get("versions") + if isinstance(versions, dict) and versions: + current_version = raw.get("current_version") + selected: Any = None + if isinstance(current_version, str) and current_version in versions: + selected = versions[current_version] + else: + selected = next(iter(versions.values())) + if isinstance(selected, dict): + needs = selected.get("needs") + if isinstance(needs, dict): + return [v for v in needs.values() if isinstance(v, dict)] + + raise ValueError(f"Unsupported needs.json format in {needs_json}") + + +def _default_needs_json_candidates() -> list[Path]: + return [ + Path("_build/needs/needs.json"), + Path("bazel-bin/needs_json/needs.json"), + ] + + +def _find_needs_json(explicit: str | None) -> Path: + if explicit: + raw_path = Path(explicit) + candidates: list[Path] = [raw_path] + + # Under `bazel run` the working directory may be a runfiles tree, so + # also resolve relative paths from the workspace root when available. + workspace_dir = os.environ.get("BUILD_WORKSPACE_DIRECTORY", "").strip() + if not raw_path.is_absolute() and workspace_dir: + candidates.append(Path(workspace_dir) / raw_path) + + for path in candidates: + if path.exists(): + return path + + raise FileNotFoundError(f"needs.json not found: {raw_path}") + + for candidate in _default_needs_json_candidates(): + if candidate.exists(): + return candidate + + raise FileNotFoundError( + "Could not locate needs.json automatically. Use --needs-json with a valid path." + ) + + +def _apply_argument_shortcuts(args: argparse.Namespace) -> None: + """Apply shortcut arguments like --require-all-links.""" + if args.require_all_links: + args.min_req_code = 100.0 + args.min_req_test = 100.0 + args.min_req_fully_linked = 100.0 + args.min_tests_linked = 100.0 + args.fail_on_broken_test_refs = True + + +def _print_summary( + needs_json: Path, + req_total: int, + req_with_code: int, + req_code_pct: float, + req_with_test: int, + req_test_pct: float, + req_fully_linked: int, + req_fully_linked_pct: float, + req_missing_code: list[str], + req_missing_test: list[str], + req_not_fully_linked: list[str], + print_unlinked: bool, + tests_total: int, + tests_linked: int, + tests_linked_pct: float, + broken_test_references: list[dict[str, str]], +) -> None: + """Print human-readable summary.""" + print(f"Traceability input: {needs_json}") + print("-" * 72) + print( + "Requirements with source links: " + f"{req_with_code}/{req_total} ({req_code_pct:.2f}%)" + ) + print( + "Requirements with test links: " + f"{req_with_test}/{req_total} ({req_test_pct:.2f}%)" + ) + print( + "Requirements fully linked: " + f"{req_fully_linked}/{req_total} ({req_fully_linked_pct:.2f}%)" + ) + if print_unlinked: + print("Unlinked requirement details:") + print( + " Missing source_code_link: " + + (", ".join(sorted(req_missing_code)) if req_missing_code else "") + ) + print( + " Missing testlink: " + + (", ".join(sorted(req_missing_test)) if req_missing_test else "") + ) + print( + " Not fully linked: " + + ( + ", ".join(sorted(req_not_fully_linked)) + if req_not_fully_linked + else "" + ) + ) + print( + "Tests linked to requirements: " + f"{tests_linked}/{tests_total} ({tests_linked_pct:.2f}%)" + ) + print(f"Broken test references: {len(broken_test_references)}") + + if broken_test_references: + print("Broken reference details:") + for item in broken_test_references: + print(f" - {item['testcase']} -> {item['missing_need']}") + + +def _check_thresholds( + req_code_pct: float, + min_req_code: float, + req_test_pct: float, + min_req_test: float, + req_fully_linked_pct: float, + min_req_fully_linked: float, + tests_linked_pct: float, + min_tests_linked: float, + broken_test_references: list[dict[str, str]], + fail_on_broken_test_refs: bool, +) -> list[str]: + """Check threshold violations and return failures.""" + failures: list[str] = [] + if req_code_pct < float(min_req_code): + failures.append( + f"requirements with code links {req_code_pct:.2f}% < {min_req_code:.2f}%" + ) + if req_test_pct < float(min_req_test): + failures.append( + f"requirements with test links {req_test_pct:.2f}% < {min_req_test:.2f}%" + ) + if req_fully_linked_pct < float(min_req_fully_linked): + failures.append( + "requirements fully linked " + f"{req_fully_linked_pct:.2f}% < {min_req_fully_linked:.2f}%" + ) + if tests_linked_pct < float(min_tests_linked): + failures.append( + f"tests linked to requirements {tests_linked_pct:.2f}% < {min_tests_linked:.2f}%" + ) + if fail_on_broken_test_refs and broken_test_references: + failures.append( + f"broken testcase references found: {len(broken_test_references)}" + ) + return failures + + +def main() -> int: + parser = argparse.ArgumentParser( + description=( + "Compute requirement/test traceability coverage from sphinx-needs output " + "and optionally fail on threshold violations." + ) + ) + parser.add_argument( + "--needs-json", + default=None, + help=( + "Path to needs.json. If omitted, tries _build/needs/needs.json and " + "bazel-bin/needs_json/needs.json" + ), + ) + parser.add_argument( + "--requirement-types", + default="tool_req", + help="Comma separated need types treated as requirements (default: tool_req)", + ) + parser.add_argument( + "--test-types", + default="", + help=( + "Optional comma separated testcase test_type filter (for example unit-test). " + "If empty, all testcase types are included." + ), + ) + parser.add_argument( + "--include-not-implemented", + action="store_true", + help=( + "Include requirements with implemented == NO in requirement denominator. " + "By default only YES/PARTIAL are counted." + ), + ) + parser.add_argument( + "--min-req-code", + type=float, + default=0.0, + help="Minimum required percentage for requirements with source code links", + ) + parser.add_argument( + "--min-req-test", + type=float, + default=0.0, + help="Minimum required percentage for requirements with test links", + ) + parser.add_argument( + "--min-req-fully-linked", + type=float, + default=0.0, + help=( + "Minimum required percentage for requirements with both source code " + "and test links" + ), + ) + parser.add_argument( + "--min-tests-linked", + type=float, + default=0.0, + help="Minimum required percentage for testcases linked to requirements", + ) + parser.add_argument( + "--require-all-links", + action="store_true", + help="Shortcut that enforces 100%% for all three minimum percentages", + ) + parser.add_argument( + "--fail-on-broken-test-refs", + action="store_true", + help="Fail if a testcase references an unknown requirement ID", + ) + parser.add_argument( + "--json-output", + default=None, + help="Optional path to write machine-readable JSON summary", + ) + parser.add_argument( + "--print-unlinked-requirements", + action="store_true", + help=( + "Print IDs of requirements missing source_code_link and/or testlink. " + "Useful when coverage thresholds fail." + ), + ) + + args = parser.parse_args() + _apply_argument_shortcuts(args) + + requirement_types = { + item.strip() for item in str(args.requirement_types).split(",") if item.strip() + } + if not requirement_types: + raise ValueError("--requirement-types must not be empty") + + filtered_test_types = { + item.strip() for item in str(args.test_types).split(",") if item.strip() + } + + needs_json = _find_needs_json(args.needs_json) + all_needs = _load_needs(needs_json) + + summary = compute_traceability_summary( + all_needs=all_needs, + requirement_types=requirement_types, + include_not_implemented=args.include_not_implemented, + filtered_test_types=filtered_test_types, + ) + + req_total = int(summary["requirements"]["total"]) + req_with_code = int(summary["requirements"]["with_code_link"]) + req_with_test = int(summary["requirements"]["with_test_link"]) + req_fully_linked = int(summary["requirements"]["fully_linked"]) + req_code_pct = float(summary["requirements"]["with_code_link_pct"]) + req_test_pct = float(summary["requirements"]["with_test_link_pct"]) + req_fully_linked_pct = float(summary["requirements"]["fully_linked_pct"]) + req_missing_code = list(summary["requirements"]["missing_code_link_ids"]) + req_missing_test = list(summary["requirements"]["missing_test_link_ids"]) + req_not_fully_linked = list(summary["requirements"]["not_fully_linked_ids"]) + + tests_total = int(summary["tests"]["total"]) + tests_linked = int(summary["tests"]["linked_to_requirements"]) + tests_linked_pct = float(summary["tests"]["linked_to_requirements_pct"]) + broken_test_references = list(summary["tests"]["broken_references"]) + + summary_output = { + "needs_json": str(needs_json), + "requirement_types": summary["requirement_types"], + "include_not_implemented": summary["include_not_implemented"], + "requirements": summary["requirements"], + "tests": summary["tests"], + "thresholds": { + "min_req_code": float(args.min_req_code), + "min_req_test": float(args.min_req_test), + "min_req_fully_linked": float(args.min_req_fully_linked), + "min_tests_linked": float(args.min_tests_linked), + "fail_on_broken_test_refs": bool(args.fail_on_broken_test_refs), + }, + } + + _print_summary( + needs_json, + req_total, + req_with_code, + req_code_pct, + req_with_test, + req_test_pct, + req_fully_linked, + req_fully_linked_pct, + req_missing_code, + req_missing_test, + req_not_fully_linked, + args.print_unlinked_requirements, + tests_total, + tests_linked, + tests_linked_pct, + broken_test_references, + ) + + if args.json_output: + out_file = Path(args.json_output) + out_file.write_text(json.dumps(summary_output, indent=2), encoding="utf-8") + print(f"JSON summary written to: {out_file}") + + failures = _check_thresholds( + req_code_pct, + args.min_req_code, + req_test_pct, + args.min_req_test, + req_fully_linked_pct, + args.min_req_fully_linked, + tests_linked_pct, + args.min_tests_linked, + broken_test_references, + args.fail_on_broken_test_refs, + ) + + if failures: + print("Threshold check failed:") + for msg in failures: + print(f" - {msg}") + return 2 + + print("Threshold check passed.") + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/src/extensions/score_metamodel/checks/standards.py b/src/extensions/score_metamodel/checks/standards.py index 7d27f5bff..8460b267b 100644 --- a/src/extensions/score_metamodel/checks/standards.py +++ b/src/extensions/score_metamodel/checks/standards.py @@ -14,6 +14,13 @@ from sphinx_needs.need_item import NeedItem +from ..sphinx_filters import ( + generic_pie_items_by_tag, + generic_pie_items_in_relationships, + generic_pie_linked_items, + generic_pie_workproducts_by_type, +) + # from score_metamodel import ( # CheckLogger, # graph_check, @@ -186,27 +193,8 @@ def my_pie_linked_standard_requirements( Passed arguments can be accessed via kwargs['arg'] See: https://sphinx-needs.readthedocs.io/en/latest/filter.html#arguments """ - cnt_connected = 0 - cnt_not_connected = 0 - - standard = kwargs["arg1"] - - all_standards_needs = get_standards_needs(needs) - standards_needs = { - k: v - for k, v in all_standards_needs.items() - if k.startswith(f"std_req__{standard}__") - } - compliance_req_needs = get_compliance_req_needs(needs) - - for need in standards_needs.values(): - if need["id"] in compliance_req_needs: - cnt_connected += 1 - else: - cnt_not_connected += 1 - - results.append(cnt_connected) - results.append(cnt_not_connected) + standard = str(kwargs["arg1"]) + generic_pie_linked_items(needs, results, arg1=f"std_req__{standard}__", arg2="gd_") def my_pie_linked_standard_requirements_by_tag( @@ -238,23 +226,10 @@ def my_pie_linked_standard_requirements_by_tag( the mutated `results`list, and use this to display/generate the piechart. """ - count_linked = 0 - count_non_linked = 0 - - tag = str(kwargs["arg1"]) assert len(kwargs) == 1, ( "Can only provide one tag to `my_pie_linked_standard_requirements_by_tag`" ) - - compliance_req_needs = get_compliance_req_needs(needs) - for need in needs: - if tag in need["tags"]: - if need["id"] in compliance_req_needs: - count_linked += 1 - else: - count_non_linked += 1 - results.append(count_linked) - results.append(count_non_linked) + generic_pie_items_by_tag(needs, results, arg1=kwargs["arg1"], arg2="gd_") def my_pie_linked_standard_workproducts( @@ -267,28 +242,10 @@ def my_pie_linked_standard_workproducts( Passed arguments can be accessed via kwargs['arg'] See: https://sphinx-needs.readthedocs.io/en/latest/filter.html#arguments """ - cwp_connected = 0 - cwp_not_connected = 0 - - standard = kwargs["arg1"] - - all_standard_workproducts = get_standards_workproducts(needs) - standard_workproducts = { - k: v - for k, v in all_standard_workproducts.items() - if k.startswith(f"std_wp__{standard}__") - } - - compliance_wp_needs = get_compliance_wp_needs(needs) - - for need in standard_workproducts.values(): - if need["id"] in compliance_wp_needs: - cwp_connected += 1 - else: - cwp_not_connected += 1 - - results.append(cwp_connected) - results.append(cwp_not_connected) + standard = str(kwargs["arg1"]) + generic_pie_workproducts_by_type( + needs, results, arg1=f"std_wp__{standard}__", arg2="workproduct" + ) def my_pie_workproducts_contained_in_exactly_one_workflow( @@ -299,33 +256,6 @@ def my_pie_workproducts_contained_in_exactly_one_workflow( in exactly one workflow, the not connected once and the once that are connected to multiple workflows. """ - all_workflows = get_workflows(needs) - all_workproducts = get_workproducts(needs) - - # Map to track counts for each workproduct and their associated workflows - workproduct_analysis = {wp["id"]: {"count": 0} for wp in all_workproducts.values()} - - # Iterate over workflows and update the counts and workflows - for workflow in all_workflows.values(): - for output in workflow.get("output", []): - # Increment count and add workflow_id if workproduct is in analysis - if output in workproduct_analysis: - workproduct_analysis[output]["count"] += 1 - - not_connected_wp = 0 - nb_wp_connected_to_one_workflow = 0 - nb_wp_connected_to_more_than_one_workflow = 0 - - for analysis in workproduct_analysis.values(): - count = analysis["count"] - - if count == 0: - not_connected_wp += 1 - elif count == 1: - nb_wp_connected_to_one_workflow += 1 - else: - nb_wp_connected_to_more_than_one_workflow += 1 - - results.append(not_connected_wp) - results.append(nb_wp_connected_to_one_workflow) - results.append(nb_wp_connected_to_more_than_one_workflow) + generic_pie_items_in_relationships( + needs, results, arg1="workflow", arg2="output", arg3="workproduct" + ) diff --git a/src/extensions/score_metamodel/checks/traceability_dashboard.py b/src/extensions/score_metamodel/checks/traceability_dashboard.py new file mode 100644 index 000000000..5d109030c --- /dev/null +++ b/src/extensions/score_metamodel/checks/traceability_dashboard.py @@ -0,0 +1,77 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Needpie filter functions backed by shared traceability metric calculations.""" + +from __future__ import annotations + +from sphinx_needs.need_item import NeedItem + +from ..traceability_metrics import compute_traceability_summary, filter_requirements + + +def _requirement_types(kwargs: dict[str, str | int | float]) -> set[str]: + raw = str(kwargs.get("arg1", "tool_req")).strip() + values = {value.strip() for value in raw.split(",") if value.strip()} + return values or {"tool_req"} + + +def pie_requirements_status( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Dashboard status split: not implemented, implemented/incomplete, fully linked.""" + req_types = _requirement_types(kwargs) + + all_requirements = filter_requirements( + needs, + requirement_types=req_types, + include_not_implemented=True, + ) + implemented_requirements = filter_requirements( + needs, + requirement_types=req_types, + include_not_implemented=False, + ) + summary = compute_traceability_summary( + all_needs=needs, + requirement_types=req_types, + include_not_implemented=False, + filtered_test_types=set(), + ) + + not_implemented = len(all_requirements) - len(implemented_requirements) + fully_linked = int(summary["requirements"]["fully_linked"]) + implemented_incomplete = len(implemented_requirements) - fully_linked + + results.append(not_implemented) + results.append(implemented_incomplete) + results.append(fully_linked) + + +def pie_requirements_with_code_links( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Dashboard split: requirements with and without source code links.""" + req_types = _requirement_types(kwargs) + summary = compute_traceability_summary( + all_needs=needs, + requirement_types=req_types, + include_not_implemented=True, + filtered_test_types=set(), + ) + + total = int(summary["requirements"]["total"]) + with_code = int(summary["requirements"]["with_code_link"]) + + results.append(total - with_code) + results.append(with_code) diff --git a/src/extensions/score_metamodel/sphinx_filters.py b/src/extensions/score_metamodel/sphinx_filters.py new file mode 100644 index 000000000..0a5f4fc1b --- /dev/null +++ b/src/extensions/score_metamodel/sphinx_filters.py @@ -0,0 +1,188 @@ +# ******************************************************************************* +# Copyright (c) 2025 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Generic sphinx-needs filter functions for ``needpie`` directives. + +These functions are fully parameterizable and designed to be called directly +by consumers of docs-as-code (e.g. reference-integration repos) when they +pull in the ``score_docs_as_code`` Bazel module. All functions follow the +sphinx-needs ``filter-func`` signature convention: + +.. code-block:: python + + def func(needs: list[NeedItem], results: list[int], **kwargs) -> None: ... + +Arguments are injected from the ``:filter-func:`` call-site as positional +``arg1``, ``arg2``, … keyword arguments. + +Example usage in RST:: + + .. needpie:: My Requirements Coverage + :labels: Linked, Not Linked + :filter-func: score_metamodel.sphinx_filters.generic_pie_linked_items(std_req__mystandard__, gd_) + +""" + +from __future__ import annotations + +from sphinx_needs.need_item import NeedItem + + +def generic_pie_linked_items( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Count items matching an ID prefix split by compliance linkage. + + Finds all needs whose ``id`` starts with *arg1*, then checks whether + each one appears in the ``complies`` field of any need whose ``type`` + starts with *arg2*. + + :filter-func: arguments: + + - ``arg1`` – ID prefix of the items to count + (e.g. ``std_req__iso26262__``) + - ``arg2`` – type prefix of the source needs whose ``complies`` + lists are scanned (e.g. ``gd_``) + + Appends to *results*: ``[linked_count, not_linked_count]`` + """ + id_prefix = str(kwargs.get("arg1", "")) + compliance_prefix = str(kwargs.get("arg2", "")) + + target_ids = [ + str(n.get("id", "")) + for n in needs + if str(n.get("id", "")).startswith(id_prefix) + ] + + linked_ids: set[str] = { + ref + for n in needs + if str(n.get("type", "")).startswith(compliance_prefix) + for ref in n.get("complies", []) + if ref + } + + connected = sum(1 for item_id in target_ids if item_id in linked_ids) + not_connected = len(target_ids) - connected + + results.append(connected) + results.append(not_connected) + + +def generic_pie_items_by_tag( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Count items carrying a given tag split by compliance linkage. + + Checks every need that has *arg1* in its ``tags`` field and splits them + by whether their id appears in the ``complies`` field of any need whose + ``type`` starts with *arg2*. + + :filter-func: arguments: + + - ``arg1`` – tag to filter by (e.g. ``aspice40_man5``). + Note: tag values must not contain dots. + - ``arg2`` – type prefix of the source needs whose ``complies`` + lists are scanned (e.g. ``gd_``) + + Appends to *results*: ``[linked_count, not_linked_count]`` + """ + tag = str(kwargs.get("arg1", "")) + compliance_prefix = str(kwargs.get("arg2", "")) + + linked_ids: set[str] = { + ref + for n in needs + if str(n.get("type", "")).startswith(compliance_prefix) + for ref in n.get("complies", []) + if ref + } + + linked = 0 + not_linked = 0 + for n in needs: + if tag in n.get("tags", []): + if str(n.get("id", "")) in linked_ids: + linked += 1 + else: + not_linked += 1 + + results.append(linked) + results.append(not_linked) + + +def generic_pie_workproducts_by_type( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Count work-product items matching an ID prefix split by compliance linkage. + + Semantically equivalent to :func:`generic_pie_linked_items` but scoped to + work-product traceability where the compliance source type is typically an + exact match (e.g. ``workproduct``) rather than a prefix. Because + ``"workproduct".startswith("workproduct")`` is ``True``, both functions use + the same underlying logic. + + :filter-func: arguments: + + - ``arg1`` – ID prefix of the work-product items to count + (e.g. ``std_wp__iso26262__``) + - ``arg2`` – type (or type prefix) of source needs whose ``complies`` + lists are scanned (e.g. ``workproduct``) + + Appends to *results*: ``[linked_count, not_linked_count]`` + """ + generic_pie_linked_items(needs, results, **kwargs) + + +def generic_pie_items_in_relationships( + needs: list[NeedItem], results: list[int], **kwargs: str | int | float +) -> None: + """Count items of a given type by how many container items reference them. + + For every need of type *arg3*, counts how many needs of type *arg1* + include its id in their *arg2* field. Splits the result into three + buckets: not referenced, referenced exactly once, referenced more than + once. + + :filter-func: arguments: + + - ``arg1`` – type of the container needs (e.g. ``workflow``) + - ``arg2`` – field on the container that holds references + (e.g. ``output``) + - ``arg3`` – type of the items to count (e.g. ``workproduct``) + + Appends to *results*: + ``[not_referenced_count, referenced_once_count, referenced_multiple_count]`` + """ + container_type = str(kwargs.get("arg1", "")) + field = str(kwargs.get("arg2", "")) + item_type = str(kwargs.get("arg3", "")) + + containers = [n for n in needs if n.get("type") == container_type] + items = [n for n in needs if n.get("type") == item_type] + + item_counts: dict[str, int] = {str(n.get("id", "")): 0 for n in items} + + for container in containers: + for ref in container.get(field, []): + if ref in item_counts: + item_counts[ref] += 1 + + not_referenced = sum(1 for c in item_counts.values() if c == 0) + referenced_once = sum(1 for c in item_counts.values() if c == 1) + referenced_multiple = sum(1 for c in item_counts.values() if c > 1) + + results.append(not_referenced) + results.append(referenced_once) + results.append(referenced_multiple) diff --git a/src/extensions/score_metamodel/traceability_metrics.py b/src/extensions/score_metamodel/traceability_metrics.py new file mode 100644 index 000000000..fe35647ff --- /dev/null +++ b/src/extensions/score_metamodel/traceability_metrics.py @@ -0,0 +1,195 @@ +# ******************************************************************************* +# Copyright (c) 2026 Contributors to the Eclipse Foundation +# +# See the NOTICE file(s) distributed with this work for additional +# information regarding copyright ownership. +# +# This program and the accompanying materials are made available under the +# terms of the Apache License Version 2.0 which is available at +# https://www.apache.org/licenses/LICENSE-2.0 +# +# SPDX-License-Identifier: Apache-2.0 +# ******************************************************************************* + +"""Shared traceability metric calculations for CI checks and dashboards.""" + +from __future__ import annotations + +from collections.abc import Sequence +from typing import Any + + +def is_non_empty(value: Any) -> bool: + """Return True if value should be treated as present for traceability checks.""" + if isinstance(value, str): + return bool(value.strip()) + return bool(value) + + +def parse_need_id_list(value: Any) -> list[str]: + """Normalize need-id lists encoded as CSV strings or string lists.""" + if value is None: + return [] + if isinstance(value, str): + return [item.strip() for item in value.split(",") if item.strip()] + if isinstance(value, list): + out: list[str] = [] + for item in value: + if isinstance(item, str) and item.strip(): + out.append(item.strip()) + return out + return [] + + +def safe_percent(numerator: int, denominator: int) -> float: + """Return percentage in range [0, 100], treating empty denominator as 100%.""" + if denominator == 0: + return 100.0 + return (numerator / denominator) * 100.0 + + +def filter_requirements( + all_needs: Sequence[Any], + requirement_types: set[str], + include_not_implemented: bool, +) -> list[Any]: + """Extract requirements by type and implementation state.""" + requirements: list[dict[str, Any]] = [] + for need in all_needs: + need_type = str(need.get("type", "")).strip() + if need_type not in requirement_types: + continue + if not include_not_implemented: + implemented = str(need.get("implemented", "")).upper().strip() + if implemented not in {"YES", "PARTIAL"}: + continue + requirements.append(need) + return requirements + + +def calculate_requirement_metrics( + requirements: Sequence[Any], +) -> dict[str, Any]: + """Calculate requirement traceability statistics for links and completeness.""" + total = len(requirements) + with_code = sum( + 1 for need in requirements if is_non_empty(need.get("source_code_link")) + ) + with_test = sum(1 for need in requirements if is_non_empty(need.get("testlink"))) + fully_linked = sum( + 1 + for need in requirements + if is_non_empty(need.get("source_code_link")) + and is_non_empty(need.get("testlink")) + ) + + missing_code_ids = [ + str(need.get("id", "")) + for need in requirements + if not is_non_empty(need.get("source_code_link")) and need.get("id") + ] + missing_test_ids = [ + str(need.get("id", "")) + for need in requirements + if not is_non_empty(need.get("testlink")) and need.get("id") + ] + not_fully_linked_ids = [ + str(need.get("id", "")) + for need in requirements + if ( + ( + not is_non_empty(need.get("source_code_link")) + or not is_non_empty(need.get("testlink")) + ) + and need.get("id") + ) + ] + + return { + "total": total, + "with_code_link": with_code, + "with_test_link": with_test, + "fully_linked": fully_linked, + "with_code_link_pct": safe_percent(with_code, total), + "with_test_link_pct": safe_percent(with_test, total), + "fully_linked_pct": safe_percent(fully_linked, total), + "missing_code_link_ids": sorted(missing_code_ids), + "missing_test_link_ids": sorted(missing_test_ids), + "not_fully_linked_ids": sorted(not_fully_linked_ids), + } + + +def calculate_test_metrics( + all_needs: Sequence[Any], + requirement_ids: set[str], + filtered_test_types: set[str], +) -> dict[str, Any]: + """Calculate testcase linkage and broken testcase-reference statistics.""" + testcases = [ + need for need in all_needs if str(need.get("type", "")).strip() == "testcase" + ] + if filtered_test_types: + testcases = [ + need + for need in testcases + if str(need.get("test_type", need.get("TestType", ""))).strip() + in filtered_test_types + ] + + tests_total = len(testcases) + tests_linked = 0 + broken_references: list[dict[str, str]] = [] + + for test_need in testcases: + test_id = str(test_need.get("id", "")) + partially = parse_need_id_list( + test_need.get("partially_verifies", test_need.get("PartiallyVerifies")) + ) + fully = parse_need_id_list( + test_need.get("fully_verifies", test_need.get("FullyVerifies")) + ) + refs = partially + fully + if refs: + tests_linked += 1 + for ref in refs: + if ref not in requirement_ids: + broken_references.append({"testcase": test_id, "missing_need": ref}) + + return { + "total": tests_total, + "filtered_test_types": sorted(filtered_test_types), + "linked_to_requirements": tests_linked, + "linked_to_requirements_pct": safe_percent(tests_linked, tests_total), + "broken_references": broken_references, + } + + +def compute_traceability_summary( + all_needs: Sequence[Any], + requirement_types: set[str], + include_not_implemented: bool, + filtered_test_types: set[str], +) -> dict[str, Any]: + """Return full CI/dashboard summary using one shared metric implementation.""" + requirements = filter_requirements( + all_needs, + requirement_types=requirement_types, + include_not_implemented=include_not_implemented, + ) + requirement_ids = { + str(need.get("id", "")).strip() for need in requirements if need.get("id") + } + + req_metrics = calculate_requirement_metrics(requirements) + test_metrics = calculate_test_metrics( + all_needs, + requirement_ids=requirement_ids, + filtered_test_types=filtered_test_types, + ) + + return { + "requirement_types": sorted(requirement_types), + "include_not_implemented": include_not_implemented, + "requirements": req_metrics, + "tests": test_metrics, + }