From d979a81a79eacc982790400e3a3d64e7bd6662da Mon Sep 17 00:00:00 2001 From: Aleksandr Nogikh Date: Mon, 27 May 2024 16:42:53 +0200 Subject: [PATCH] all: do comparison substitution for extra coverage Collect comparison arguments for extra coverage. For that, we now need to start remote coverage collection for every forked program. Substitute the arguments into all calls that have remote_cover set. --- executor/executor.cc | 69 ++++++++++++++++++-------------------------- pkg/fuzzer/job.go | 35 +++++++++++++++------- pkg/ipc/ipc.go | 3 ++ 3 files changed, 55 insertions(+), 52 deletions(-) diff --git a/executor/executor.cc b/executor/executor.cc index 71a3768700ac..55d1722b888b 100644 --- a/executor/executor.cc +++ b/executor/executor.cc @@ -508,10 +508,6 @@ int main(int argc, char** argv) cover_open(&extra_cov, true); cover_mmap(&extra_cov); cover_protect(&extra_cov); - if (flag_extra_coverage) { - // Don't enable comps because we don't use them in the fuzzer yet. - cover_enable(&extra_cov, false, true); - } char sep = '/'; #if GOOS_windows sep = '\\'; @@ -753,7 +749,7 @@ void execute_one() if (!flag_threaded) cover_enable(&threads[0].cov, flag_comparisons, false); if (flag_extra_coverage) - cover_reset(&extra_cov); + cover_enable(&extra_cov, flag_comparisons, true); } int call_index = 0; @@ -1110,20 +1106,11 @@ void copyout_call_results(thread_t* th) } } -void write_call_output(thread_t* th, bool finished) +void write_call_shmem_output(int call_index, int call_num, uint32 reserrno, uint32 call_flags, cover_t* cov) { - uint32 reserrno = ENOSYS; - const bool blocked = finished && th != last_scheduled; - uint32 call_flags = call_flag_executed | (blocked ? call_flag_blocked : 0); - if (finished) { - reserrno = th->res != -1 ? 0 : th->reserrno; - call_flags |= call_flag_finished | - (th->fault_injected ? call_flag_fault_injected : 0); - } -#if SYZ_EXECUTOR_USES_SHMEM write_output(kOutMagic); - write_output(th->call_index); - write_output(th->call_num); + write_output(call_index); + write_output(call_num); write_output(reserrno); write_output(call_flags); uint32* signal_count_pos = write_output(0); // filled in later @@ -1132,15 +1119,15 @@ void write_call_output(thread_t* th, bool finished) if (flag_comparisons) { // Collect only the comparisons - uint32 ncomps = th->cov.size; - kcov_comparison_t* start = (kcov_comparison_t*)(th->cov.data + sizeof(uint64)); + uint32 ncomps = cov->size; + kcov_comparison_t* start = (kcov_comparison_t*)(cov->data + sizeof(uint64)); kcov_comparison_t* end = start + ncomps; - if ((char*)end > th->cov.data_end) + if ((char*)end > cov->data_end) failmsg("too many comparisons", "ncomps=%u", ncomps); - cover_unprotect(&th->cov); + cover_unprotect(cov); std::sort(start, end); ncomps = std::unique(start, end) - start; - cover_protect(&th->cov); + cover_protect(cov); uint32 comps_size = 0; for (uint32 i = 0; i < ncomps; ++i) { if (start[i].ignore()) @@ -1152,15 +1139,29 @@ void write_call_output(thread_t* th, bool finished) *comps_count_pos = comps_size; } else if (flag_collect_signal || flag_collect_cover) { if (is_kernel_64_bit) - write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); + write_coverage_signal(cov, signal_count_pos, cover_count_pos); else - write_coverage_signal(&th->cov, signal_count_pos, cover_count_pos); + write_coverage_signal(cov, signal_count_pos, cover_count_pos); } debug_verbose("out #%u: index=%u num=%u errno=%d finished=%d blocked=%d sig=%u cover=%u comps=%u\n", - completed, th->call_index, th->call_num, reserrno, finished, blocked, + completed, call_index, call_num, reserrno, finished, blocked, *signal_count_pos, *cover_count_pos, *comps_count_pos); completed++; write_completed(completed); +} + +void write_call_output(thread_t* th, bool finished) +{ + uint32 reserrno = ENOSYS; + const bool blocked = finished && th != last_scheduled; + uint32 call_flags = call_flag_executed | (blocked ? call_flag_blocked : 0); + if (finished) { + reserrno = th->res != -1 ? 0 : th->reserrno; + call_flags |= call_flag_finished | + (th->fault_injected ? call_flag_fault_injected : 0); + } +#if SYZ_EXECUTOR_USES_SHMEM + write_call_shmem_output(th->call_index, th->call_num, reserrno, call_flags, &th->cov); #else call_reply reply; reply.header.magic = kOutMagic; @@ -1184,27 +1185,13 @@ void write_call_output(thread_t* th, bool finished) void write_extra_output() { #if SYZ_EXECUTOR_USES_SHMEM - if (!cover_collection_required() || !flag_extra_coverage || flag_comparisons) + if (!cover_collection_required() || !flag_extra_coverage) return; cover_collect(&extra_cov); if (!extra_cov.size) return; - write_output(kOutMagic); - write_output(-1); // call index - write_output(-1); // call num - write_output(999); // errno - write_output(0); // call flags - uint32* signal_count_pos = write_output(0); // filled in later - uint32* cover_count_pos = write_output(0); // filled in later - write_output(0); // comps_count_pos - if (is_kernel_64_bit) - write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); - else - write_coverage_signal(&extra_cov, signal_count_pos, cover_count_pos); + write_call_shmem_output(-1, -1, 999, 0, &extra_cov); cover_reset(&extra_cov); - debug_verbose("extra: sig=%u cover=%u\n", *signal_count_pos, *cover_count_pos); - completed++; - write_completed(completed); #endif // if SYZ_EXECUTOR_USES_SHMEM } diff --git a/pkg/fuzzer/job.go b/pkg/fuzzer/job.go index 00c7c034b3d4..a94fe5c89b13 100644 --- a/pkg/fuzzer/job.go +++ b/pkg/fuzzer/job.go @@ -259,7 +259,7 @@ type smashJob struct { func (job *smashJob) run(fuzzer *Fuzzer) { fuzzer.Logf(2, "smashing the program %s (call=%d):", job.p, job.call) - if fuzzer.Config.Comparisons && job.call >= 0 { + if fuzzer.Config.Comparisons { fuzzer.startJob(fuzzer.statJobsHints, &hintsJob{ p: job.p.Clone(), call: job.call, @@ -360,8 +360,15 @@ func (job *hintsJob) run(fuzzer *Fuzzer) { if result.Stop() || result.Info == nil { return } + call := result.Info.Extra + if job.call >= 0 { + call = result.Info.Calls[job.call] + } + if call == nil { + return + } got := make(prog.CompMap) - for _, cmp := range result.Info.Calls[job.call].Comps { + for _, cmp := range call.Comps { got.AddComp(cmp.Op1, cmp.Op2) } if len(got) == 0 { @@ -377,13 +384,19 @@ func (job *hintsJob) run(fuzzer *Fuzzer) { // Then mutate the initial program for every match between // a syscall argument and a comparison operand. // Execute each of such mutants to check if it gives new coverage. - p.MutateWithHints(job.call, comps, - func(p *prog.Prog) bool { - result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ - Prog: p, - ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), - Stat: fuzzer.statExecHint, - }) - return !result.Stop() - }) + + for i := 0; i < len(p.Calls); i++ { + // For .extra coverage, substitute arguments to all calls with remote_cover. + if i == job.call || job.call < 0 && job.p.Calls[i].Meta.Attrs.RemoteCover { + p.MutateWithHints(i, comps, + func(p *prog.Prog) bool { + result := fuzzer.execute(fuzzer.smashQueue, &queue.Request{ + Prog: p, + ExecOpts: setFlags(flatrpc.ExecFlagCollectSignal), + Stat: fuzzer.statExecHint, + }) + return !result.Stop() + }) + } + } } diff --git a/pkg/ipc/ipc.go b/pkg/ipc/ipc.go index 84a7b9541e72..895671130a0c 100644 --- a/pkg/ipc/ipc.go +++ b/pkg/ipc/ipc.go @@ -403,6 +403,9 @@ func convertExtra(extraParts []flatrpc.CallInfo, dedupCover bool) *flatrpc.CallI extra.Signal[i] = uint64(s) i++ } + for _, part := range extraParts { + extra.Comps = append(extra.Comps, part.Comps...) + } return &extra }