diff --git a/.github/workflows/sycl-ur-perf-benchmarking.yml b/.github/workflows/sycl-ur-perf-benchmarking.yml index 83ff32bfc65b9..e20a9c439e53d 100644 --- a/.github/workflows/sycl-ur-perf-benchmarking.yml +++ b/.github/workflows/sycl-ur-perf-benchmarking.yml @@ -65,6 +65,7 @@ on: options: - '["PVC_PERF"]' - '["BMG_PERF"]' + - '["TEST_PERF"]' backend: description: Backend to use type: choice @@ -257,8 +258,8 @@ jobs: if: github.event_name == 'pull_request' uses: ./.github/workflows/sycl-linux-run-tests.yml with: - name: 'Framework test: PVC_PERF, L0, Minimal preset' - runner: '["PVC_PERF"]' + name: 'Framework test only: L0, Minimal preset, dry-run' + runner: '["TEST_PERF"]' image: ghcr.io/intel/llvm/sycl_ubuntu2404_nightly:latest image_options: -u 1001 --device=/dev/dri -v /dev/dri/by-path:/dev/dri/by-path --privileged --cap-add SYS_ADMIN target_devices: 'level_zero:gpu' diff --git a/devops/actions/run-tests/benchmark/action.yml b/devops/actions/run-tests/benchmark/action.yml index 443cbb9386f10..b3e1555967bd4 100644 --- a/devops/actions/run-tests/benchmark/action.yml +++ b/devops/actions/run-tests/benchmark/action.yml @@ -67,16 +67,15 @@ runs: case "$RUNNER_TAG" in '["PVC_PERF"]') GPU_TYPE="PVC" ;; '["BMG_PERF"]') GPU_TYPE="BMG" ;; - *) - # Best effort at matching if not known runners - # TODO: should we drop it and just exit instead? - GPU_TYPE="${RUNNER_TAG#[\"}" - GPU_TYPE="${GPU_TYPE%_PERF=\"]}" - echo "#" - echo "# WARNING: Only specific tuned runners are fully supported." - echo "# This workflow is not guaranteed to work with other runners." - echo "#" ;; + '["TEST_PERF"]') + # Benchmarks' framework test can be executed on any of the supported runners. + gpu_name=$(printf '%s' "$RUNNER_NAME" | grep -Eo 'BMG|PVC') + [ -n "$gpu_name" ] && GPU_TYPE="$gpu_name" esac + if [ -z "$GPU_TYPE" ]; then + echo "# ERROR: Only specific tuned runners are fully supported." + exit 1 + fi case "$TARGET_DEVICE" in level_zero:*) @@ -235,12 +234,13 @@ runs: # Install perf in version matching the host kernel. # Linux tools installed during docker creation may not match the self-hosted # kernel version, so we need to install the correct version here. - if [ "$RUNNER_TAG" = '["BMG_PERF"]' ]; then - echo "Adding repositories for Ubuntu 25.10 (Questing) on BMG_PERF runner" + if [ "$GPU_TYPE" = "BMG" ]; then + echo "Adding repositories for Ubuntu 25.10 (Questing) for BMG runners" echo "deb http://archive.ubuntu.com/ubuntu/ questing main restricted universe multiverse" | sudo tee /etc/apt/sources.list.d/questing.list echo "deb http://archive.ubuntu.com/ubuntu/ questing-updates main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list.d/questing.list echo "deb http://security.ubuntu.com/ubuntu/ questing-security main restricted universe multiverse" | sudo tee -a /etc/apt/sources.list.d/questing.list fi + sudo apt-get update sudo apt-get install -y linux-tools-$(uname -r) diff --git a/devops/scripts/benchmarks/tests/test_integration.py b/devops/scripts/benchmarks/tests/test_integration.py index adb4f2d2a7a7d..ac74d0eb15d12 100644 --- a/devops/scripts/benchmarks/tests/test_integration.py +++ b/devops/scripts/benchmarks/tests/test_integration.py @@ -189,8 +189,8 @@ def test_record_and_replay(self): def test_submit_kernel(self): self._checkCase( - "api_overhead_benchmark_l0 SubmitKernel out of order with measure completion KernelExecTime=20", - "SubmitKernel out of order with completion using events long kernel", + "api_overhead_benchmark_l0 SubmitKernel out of order with measure completion", + "SubmitKernel out of order with completion using events", {"L0", "latency", "micro", "submit"}, ) diff --git a/devops/scripts/benchmarks/utils/utils.py b/devops/scripts/benchmarks/utils/utils.py index a39b40d727281..b2c36406bc0bc 100644 --- a/devops/scripts/benchmarks/utils/utils.py +++ b/devops/scripts/benchmarks/utils/utils.py @@ -195,7 +195,7 @@ def get_device_architecture(additional_env_vars): if len(architectures) != 1: raise ValueError( - f"Expected exactly one device architecture, but found {len(architectures)}: {architectures}." + f"Expected exactly one device architecture, but found {len(architectures)}: {architectures}. " "Set ONEAPI_DEVICE_SELECTOR=backend:device_id to specify a single device." )