Skip to content

Commit

Permalink
Add method to check if benchmarks are mixed (#1977)
Browse files Browse the repository at this point in the history
Currently, Fuzzbench fails to generate a report if benchmarks are a mix
between bug and coverage benchmarks.

This PR adds a method to check if benchmarks are a mix between bug and
coverage benchmarks before starting the experiment, to spot the problem
early and avoid wasting time.

---------

Co-authored-by: gustavogaldinoo <[email protected]>
  • Loading branch information
gustavogaldinoo and gustavogaldinoo authored May 8, 2024
1 parent 162ca0c commit 6228338
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 4 deletions.
20 changes: 16 additions & 4 deletions common/benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,18 +133,18 @@ def get_all_benchmarks():
return sorted(all_benchmarks)


def get_coverage_benchmarks():
def get_coverage_benchmarks(benchmarks=get_all_benchmarks()):
"""Returns the list of all coverage benchmarks."""
return [
benchmark for benchmark in get_all_benchmarks()
benchmark for benchmark in benchmarks
if get_type(benchmark) == BenchmarkType.CODE.value
]


def get_bug_benchmarks():
def get_bug_benchmarks(benchmarks=get_all_benchmarks()):
"""Returns the list of standard bug benchmarks."""
return [
benchmark for benchmark in get_all_benchmarks()
benchmark for benchmark in benchmarks
if get_type(benchmark) == BenchmarkType.BUG.value
]

Expand All @@ -163,3 +163,15 @@ def get_language(benchmark):
"""Returns the prorgamming language the benchmark was written in."""
config = benchmark_config.get_config(benchmark)
return config.get('language', 'c++')


def are_benchmarks_mixed(benchmarks=None):
"""Returns True if benchmarks list is a mix of bugs and coverage
benchmarks."""
if benchmarks is None:
benchmarks = get_all_benchmarks()

unique_benchmarks_types = set(
get_type(benchmark) for benchmark in benchmarks)

return len(unique_benchmarks_types) > 1
19 changes: 19 additions & 0 deletions common/test_benchmark_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,22 @@ def test_validate_type_valid(benchmark_type):
def test_get_default_type(_):
"""Tests that get_type returns the correct default value."""
assert benchmark_utils.get_type('benchmark') == 'code'


@pytest.mark.parametrize(
('benchmarks'),
[['mbedtls_fuzz_dtlsclient_7c6b0e', 'mbedtls_fuzz_dtlsclient'],
['bloaty_fuzz_target', 'bloaty_fuzz_target_52948c']])
def test_are_benchmarks_mixed_valid(benchmarks):
"""Tests that are_benchmarks_mixed returns True
for a list that have both bug and coverage benchmarks"""
assert benchmark_utils.are_benchmarks_mixed(benchmarks)


@pytest.mark.parametrize(
('benchmarks'),
[['mbedtls_fuzz_dtlsclient_7c6b0e'], ['mbedtls_fuzz_dtlsclient'], []])
def test_are_benchmarks_mixed_invalid(benchmarks):
"""Tests that are_benchmarks_mixed returns False
for a list that have only bug or only coverage benchmarks"""
assert not benchmark_utils.are_benchmarks_mixed(benchmarks)
9 changes: 9 additions & 0 deletions experiment/run_experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -625,6 +625,7 @@ def run_experiment_main(args=None):

all_benchmarks = benchmark_utils.get_all_benchmarks()
coverage_benchmarks = benchmark_utils.get_coverage_benchmarks()

parser.add_argument('-b',
'--benchmarks',
help=('Benchmark names. '
Expand Down Expand Up @@ -743,6 +744,14 @@ def run_experiment_main(args=None):
parser.error('Cannot enable options "custom_seed_corpus_dir" and '
'"oss_fuzz_corpus" at the same time')

if benchmark_utils.are_benchmarks_mixed(args.benchmarks):
benchmark_types = ';'.join(
[f'{b}: {benchmark_utils.get_type(b)}' for b in args.benchmarks])
raise ValidationError(
'Selected benchmarks are a mix between coverage'
'and bug benchmarks. This is currently not supported.'
f'Selected benchmarks: {benchmark_types}')

start_experiment(args.experiment_name,
args.experiment_config,
args.benchmarks,
Expand Down

0 comments on commit 6228338

Please sign in to comment.