Skip to content

Commit

Permalink
fix(bench): Result interpretation problems (#5798)
Browse files Browse the repository at this point in the history
Co-authored-by: Lucas Nogueira <[email protected]>
  • Loading branch information
Beanow and lucasfernog authored Dec 14, 2022
1 parent bca09f7 commit f7a080a
Show file tree
Hide file tree
Showing 4 changed files with 49 additions and 37 deletions.
1 change: 1 addition & 0 deletions .github/workflows/bench.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
env:
RUST_BACKTRACE: 1
CARGO_PROFILE_DEV_DEBUG: 0 # This would add unnecessary bloat to the target folder, decreasing cache efficiency.
LC_ALL: en_US.UTF-8 # This prevents strace from changing it's number format to use commas.

concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
Expand Down
15 changes: 7 additions & 8 deletions tooling/bench/src/build_benchmark_jsons.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,20 +22,19 @@ fn main() {

// all data's
let all_data_buffer =
BufReader::new(File::open(&tauri_data).expect("Unable to read all data file"));
BufReader::new(File::open(tauri_data).expect("Unable to read all data file"));
let mut all_data: Vec<utils::BenchResult> =
serde_json::from_reader(all_data_buffer).expect("Unable to read all data buffer");

// add current data to alls data
all_data.push(current_data);

// use only latest 20 elements from alls data
let recent: Vec<utils::BenchResult>;
if all_data.len() > 20 {
recent = all_data[all_data.len() - 20..].to_vec();
let recent: Vec<utils::BenchResult> = if all_data.len() > 20 {
all_data[all_data.len() - 20..].to_vec()
} else {
recent = all_data.clone();
}
all_data.clone()
};

// write json's
utils::write_json(
Expand All @@ -44,13 +43,13 @@ fn main() {
.expect("Something wrong with tauri_data"),
&serde_json::to_value(&all_data).expect("Unable to build final json (alls)"),
)
.expect(format!("Unable to write {:?}", tauri_data).as_str());
.unwrap_or_else(|_| panic!("Unable to write {:?}", tauri_data));

utils::write_json(
tauri_recent
.to_str()
.expect("Something wrong with tauri_recent"),
&serde_json::to_value(&recent).expect("Unable to build final json (recent)"),
)
.expect(format!("Unable to write {:?}", tauri_recent).as_str());
.unwrap_or_else(|_| panic!("Unable to write {:?}", tauri_recent));
}
21 changes: 12 additions & 9 deletions tooling/bench/src/run_benchmark.rs
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
let mut file = tempfile::NamedTempFile::new()?;

Command::new("strace")
.args(&[
.args([
"-c",
"-f",
"-o",
Expand All @@ -64,7 +64,10 @@ fn run_strace_benchmarks(new_data: &mut utils::BenchResult) -> Result<()> {
file.as_file_mut().read_to_string(&mut output)?;

let strace_result = utils::parse_strace_output(&output);
let clone = strace_result.get("clone").map(|d| d.calls).unwrap_or(0) + 1;
// Note, we always have 1 thread. Use cloneX calls as counter for additional threads created.
let clone = 1
+ strace_result.get("clone").map(|d| d.calls).unwrap_or(0)
+ strace_result.get("clone3").map(|d| d.calls).unwrap_or(0);
let total = strace_result.get("total").unwrap().calls;
thread_count.insert(name.to_string(), clone);
syscall_count.insert(name.to_string(), total);
Expand All @@ -84,7 +87,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
let benchmark_file = benchmark_file.to_str().unwrap();

let proc = Command::new("mprof")
.args(&[
.args([
"run",
"-C",
"-o",
Expand All @@ -99,7 +102,7 @@ fn run_max_mem_benchmark() -> Result<HashMap<String, u64>> {
println!("{:?}", proc_result);
results.insert(
name.to_string(),
utils::parse_max_mem(&benchmark_file).unwrap(),
utils::parse_max_mem(benchmark_file).unwrap(),
);
}

Expand Down Expand Up @@ -132,7 +135,7 @@ fn rlib_size(target_dir: &std::path::Path, prefix: &str) -> u64 {
fn get_binary_sizes(target_dir: &Path) -> Result<HashMap<String, u64>> {
let mut sizes = HashMap::<String, u64>::new();

let wry_size = rlib_size(&target_dir, "libwry");
let wry_size = rlib_size(target_dir, "libwry");
println!("wry {} bytes", wry_size);
sizes.insert("wry_rlib".to_string(), wry_size);

Expand Down Expand Up @@ -174,9 +177,9 @@ fn cargo_deps() -> HashMap<String, usize> {
let mut cmd = Command::new("cargo");
cmd.arg("tree");
cmd.arg("--no-dedupe");
cmd.args(&["--edges", "normal"]);
cmd.args(&["--prefix", "none"]);
cmd.args(&["--target", target]);
cmd.args(["--edges", "normal"]);
cmd.args(["--prefix", "none"]);
cmd.args(["--target", target]);
cmd.current_dir(&utils::tauri_root_path());

let full_deps = cmd.output().expect("failed to run cargo tree").stdout;
Expand Down Expand Up @@ -268,7 +271,7 @@ fn main() -> Result<()> {
time::format_description::parse("[year]-[month]-[day]T[hour]:[minute]:[second]Z").unwrap();
let now = time::OffsetDateTime::now_utc();
let mut new_data = utils::BenchResult {
created_at: format!("{}", now.format(&format).unwrap()),
created_at: now.format(&format).unwrap(),
sha1: utils::run_collect(&["git", "rev-parse", "HEAD"])
.0
.trim()
Expand Down
49 changes: 29 additions & 20 deletions tooling/bench/src/utils.rs
Original file line number Diff line number Diff line change
Expand Up @@ -45,12 +45,11 @@ pub fn get_target() -> &'static str {
}

pub fn target_dir() -> PathBuf {
let target_dir = bench_root_path()
bench_root_path()
.join("tests")
.join("target")
.join(get_target())
.join("release");
target_dir.into()
.join("release")
}

pub fn bench_root_path() -> PathBuf {
Expand Down Expand Up @@ -105,16 +104,14 @@ pub fn parse_max_mem(file_path: &str) -> Option<u64> {
let output = BufReader::new(file);
let mut highest: u64 = 0;
// MEM 203.437500 1621617192.4123
for line in output.lines() {
if let Ok(line) = line {
// split line by space
let split = line.split(" ").collect::<Vec<_>>();
if split.len() == 3 {
// mprof generate result in MB
let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024;
if current_bytes > highest {
highest = current_bytes;
}
for line in output.lines().flatten() {
// split line by space
let split = line.split(' ').collect::<Vec<_>>();
if split.len() == 3 {
// mprof generate result in MB
let current_bytes = str::parse::<f64>(split[1]).unwrap() as u64 * 1024 * 1024;
if current_bytes > highest {
highest = current_bytes;
}
}
}
Expand Down Expand Up @@ -169,14 +166,26 @@ pub fn parse_strace_output(output: &str) -> HashMap<String, StraceOutput> {
}

let total_fields = total_line.split_whitespace().collect::<Vec<_>>();

summary.insert(
"total".to_string(),
StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: None,
calls: str::parse::<u64>(total_fields[2]).unwrap(),
errors: str::parse::<u64>(total_fields[3]).unwrap(),
match total_fields.len() {
// Old format, has no usecs/call
5 => StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: None,
calls: str::parse::<u64>(total_fields[2]).unwrap(),
errors: str::parse::<u64>(total_fields[3]).unwrap(),
},
6 => StraceOutput {
percent_time: str::parse::<f64>(total_fields[0]).unwrap(),
seconds: str::parse::<f64>(total_fields[1]).unwrap(),
usecs_per_call: Some(str::parse::<u64>(total_fields[2]).unwrap()),
calls: str::parse::<u64>(total_fields[3]).unwrap(),
errors: str::parse::<u64>(total_fields[4]).unwrap(),
},
_ => panic!("Unexpected total field count: {}", total_fields.len()),
},
);

Expand Down Expand Up @@ -222,7 +231,7 @@ pub fn download_file(url: &str, filename: PathBuf) {
.arg("-s")
.arg("-o")
.arg(&filename)
.arg(&url)
.arg(url)
.status()
.unwrap();

Expand Down

0 comments on commit f7a080a

Please sign in to comment.