Skip to content

Bump version to 3.5.1 for PyPI re-release #11

Bump version to 3.5.1 for PyPI re-release

Bump version to 3.5.1 for PyPI re-release #11

Workflow file for this run

name: Benchmark
on:
push:
branches: [main]
paths:
- 'lib/**'
- 'python-kalign/**'
- 'benchmarks/**'
- 'CMakeLists.txt'
- '.github/workflows/benchmark.yml'
pull_request:
branches: [main]
paths:
- 'lib/**'
- 'python-kalign/**'
- 'benchmarks/**'
- 'CMakeLists.txt'
- '.github/workflows/benchmark.yml'
workflow_dispatch:
permissions:
contents: write
deployments: write
jobs:
benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y libomp-dev cmake
- name: Build C binary
run: |
mkdir -p build
cd build
cmake ..
make -j$(nproc)
- name: Install Python package
run: |
python -m pip install --upgrade pip
python -m pip install -e .
- name: Cache BAliBASE dataset
uses: actions/cache@v4
with:
path: benchmarks/data/downloads
key: benchmark-datasets-balibase-v1
- name: Run benchmarks
run: |
python -m benchmarks \
--dataset balibase \
--method python_api cli \
--binary build/src/kalign \
--output benchmarks/results/latest.json \
-v
- name: Check if results were produced
id: check_results
run: |
if [ -f benchmarks/results/latest.json ]; then
echo "has_results=true" >> "$GITHUB_OUTPUT"
else
echo "::warning::No benchmark results produced (dataset download may have failed)"
echo "has_results=false" >> "$GITHUB_OUTPUT"
fi
- name: Convert results for github-action-benchmark
if: steps.check_results.outputs.has_results == 'true'
run: |
python -c "
import json
with open('benchmarks/results/latest.json') as f:
data = json.load(f)
entries = []
for method, stats in data.get('summary', {}).items():
entries.append({
'name': f'SP Score Mean ({method})',
'unit': 'score',
'value': round(stats['sp_mean'], 2),
'range': f\"{stats['sp_min']:.1f}-{stats['sp_max']:.1f}\",
})
entries.append({
'name': f'Total Time ({method})',
'unit': 'seconds',
'value': round(stats['total_time'], 2),
})
with open('benchmarks/results/benchmark_output.json', 'w') as f:
json.dump(entries, f, indent=2)
"
- name: Store benchmark result
if: github.ref == 'refs/heads/main' && steps.check_results.outputs.has_results == 'true'
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customBiggerIsBetter'
output-file-path: benchmarks/results/benchmark_output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
auto-push: true
alert-threshold: '95%'
comment-on-alert: true
fail-on-alert: false
- name: Compare with baseline (PRs only)
if: github.event_name == 'pull_request' && steps.check_results.outputs.has_results == 'true'
uses: benchmark-action/github-action-benchmark@v1
with:
tool: 'customBiggerIsBetter'
output-file-path: benchmarks/results/benchmark_output.json
github-token: ${{ secrets.GITHUB_TOKEN }}
gh-pages-branch: gh-pages
benchmark-data-dir-path: dev/bench
auto-push: false
alert-threshold: '95%'
comment-on-alert: true
fail-on-alert: true
- name: Upload results artifact
if: steps.check_results.outputs.has_results == 'true'
uses: actions/upload-artifact@v4
with:
name: benchmark-results
path: benchmarks/results/