From 901f9556cf6a1411bc78d0cf8094a8985397ef63 Mon Sep 17 00:00:00 2001 From: zapashcanon Date: Sun, 27 Oct 2024 14:06:15 +0100 Subject: [PATCH] update scripts and bench --- wasm/test/bench.sh | 2 +- wasm/test/plot.py | 48 +++++++++++++++++++++++++++++++++------------- wasm/test/soli.ml | 7 ++----- 3 files changed, 38 insertions(+), 19 deletions(-) diff --git a/wasm/test/bench.sh b/wasm/test/bench.sh index 7a8788da6..bb8c902db 100755 --- a/wasm/test/bench.sh +++ b/wasm/test/bench.sh @@ -32,7 +32,7 @@ bench() { # bench # TODO: use ../../ocamlrun when it'll be produced - hyperfine --warmup 2 --runs 5 --export-json ${2}.json \ + hyperfine --warmup 2 --runs 10 --export-json ${2}.json \ -n "OCaml native" "./a.out" \ -n "Wasocaml" "${NODE} ./main_node.mjs" \ -n "Wsoo" "${NODE} ./a.wsoo.bc.js" \ diff --git a/wasm/test/plot.py b/wasm/test/plot.py index 30218c63e..5ca732e50 100755 --- a/wasm/test/plot.py +++ b/wasm/test/plot.py @@ -2,6 +2,7 @@ import json import matplotlib.pyplot as plt import numpy as np +from matplotlib.ticker import LogLocator, FormatStrFormatter # Directory containing JSON files json_dir = './' @@ -9,6 +10,9 @@ # Dictionary to store benchmark data data = {} +# Desired order of tools +commands = ["OCaml native", "Wasocaml", "Wsoo", "Jsoo", "Bytecode"] + # Read each JSON file in the directory for filename in os.listdir(json_dir): if filename.endswith('.json'): @@ -16,17 +20,23 @@ with open(os.path.join(json_dir, filename), 'r') as f: json_data = json.load(f) # Extract command names and mean execution times - commands = [result['command'] for result in json_data['results']] - means = [result['mean'] for result in json_data['results']] - data[benchmark_name] = (commands, means) + commands_data = {result['command']: result['mean'] for result in json_data['results']} + # Calculate ratios with respect to OCaml native if it exists + if "OCaml native" in commands_data: + native_mean = commands_data["OCaml native"] + ratios = {command: commands_data.get(command, np.nan) / native_mean for command in commands} + else: + ratios = {command: np.nan for command in commands} # If "OCaml native" is missing, set all to NaN + data[benchmark_name] = ratios -# Prepare the plot -fig, ax = plt.subplots(figsize=(12, 6)) -benchmarks = list(data.keys()) -commands = data[benchmarks[0]][0] # Get the list of tools from the first benchmark +# Sort benchmarks lexicographically +benchmarks = sorted(data.keys()) num_benchmarks = len(benchmarks) num_commands = len(commands) +# Prepare the plot +fig, ax = plt.subplots(figsize=(12, 6)) + # Set bar width and positions with additional space between groups bar_width = 0.15 group_gap = 0.4 # Gap between benchmark groups @@ -34,18 +44,30 @@ # Plot bars for each command for i, command in enumerate(commands): - # Get means for each benchmark - means = [data[benchmark][1][i] for benchmark in benchmarks] - ax.bar(x + i * bar_width, means, bar_width, label=command) + # Get ratios for each benchmark + ratios = [data[benchmark].get(command, np.nan) for benchmark in benchmarks] + ax.bar(x + i * bar_width, ratios, bar_width, label=command) # Customize plot +ax.set_yscale('log') # Set y-axis to logarithmic scale + +# Add minor ticks between powers of ten on the y-axis +ax.yaxis.set_major_locator(LogLocator(base=2.0, numticks=64)) # Major ticks at 10^0, 10^1, etc. +ax.yaxis.set_major_formatter(FormatStrFormatter('%d')) +#ax.yaxis.set_minor_locator(LogLocator(base=2.0, subs=np.arange(2, 10) * 0.1, numticks=10)) # Minor ticks at 2, 3, ..., 9 +#ax.yaxis.set_minor_formatter(FormatStrFormatter('%d')) + +# Set y-axis tick labels to avoid the repeated 0 +#ax.set_ylim(0.9, 70) # Set limits slightly below 10^0 to avoid duplication +#ax.yaxis.get_minor_ticks()[0].label1.set_visible(False) # Hide the extra "0" tick + ax.set_xticks(x + bar_width * (num_commands - 1) / 2) ax.set_xticklabels(benchmarks, rotation=45, ha='right') -ax.set_ylabel('Execution Time (s)') +ax.set_ylabel('Execution Time Ratio (Relative to OCaml Native)') ax.legend(title="Tool") -plt.title("Benchmark Execution Times for Various OCaml compilers") +plt.title("Benchmark Execution Time Ratios for Various Compilers (Relative to OCaml Native)") plt.tight_layout() # Save as SVG -plt.savefig('benchmark_execution_times.svg', format='svg') +plt.savefig('benchmark_execution_time_ratios.svg', format='svg') plt.show() diff --git a/wasm/test/soli.ml b/wasm/test/soli.ml index 162ea9359..7e4a57470 100644 --- a/wasm/test/soli.ml +++ b/wasm/test/soli.ml @@ -89,11 +89,8 @@ let rec solve board m = with Found -> true -let array_copy a = - Sys.opaque_identity (Array.init (Array.length a) (Sys.opaque_identity (fun i -> a.(i)))) - let rec runbench n = - let board = Array.map array_copy initial_board in + let board = Array.map Array.copy initial_board in if n > 1 then begin ignore (solve board 0); runbench (n - 1) @@ -101,6 +98,6 @@ let rec runbench n = if solve board 0 then (print_string "\n"; print_board board) end -let _ = +let () = let nruns = 500 in runbench nruns