Skip to content

Commit

Permalink
Rename the grade mode option to tournament mode. Move the previous
Browse files Browse the repository at this point in the history
grading script from the tests folder dragon-runner first-class scripts.
  • Loading branch information
JustinMeimar committed Nov 17, 2024
1 parent 8533b03 commit d99e251
Show file tree
Hide file tree
Showing 8 changed files with 117 additions and 31 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
<br>


## What
## What is Dragon-Runner

`dragon-runner` is a successor to the [415-tester](https://github.com/cmput415/Tester). Its name is derived by being a test runner for a compiler class that likes dragon iconography. `dragon-runner` has dual functions for students and graders. For students it serves as unit tester, versaitle to use over arbitrary toolchains through a generic JSON configuration language. For graders dragon-runner is an swiss army knife of sorts. It wraps scripts for building, gathering tests, and meta JSON configuration which are useful for herding an arbitrary number of compiler submissions into place. It also can run tests in a tournament mode, where each submitted compier and test-suite pair is ran in a cross product with every other submission. The tournament mode produces a CSV output according to the CMPUT 415 grading scheme.

Expand Down
17 changes: 11 additions & 6 deletions dragon_runner/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,10 @@ class CLIArgs(NamedTuple):
verbosity: int
verify: bool
script_args: List[str]

def is_regular_mode(self): return self.mode == "regular"
def is_tournament_mode(self): return self.mode == "tournament"
def is_script_mode(self): return self.mode not in ["regular", "tournament"]

def __repr__(self) -> str:
return (
Expand All @@ -43,10 +47,10 @@ def parse_cli_args() -> CLIArgs:

# Make config_file optional
parser.add_argument("config_file", nargs="?", default=None,
help="Path to the tester JSON configuration file (required for regular and grade modes).")
help="Path to the tester JSON configuration file (required for regular and tournament modes).")

parser.add_argument("--mode", dest="mode", default="regular",
help="run in regular, grade or script mode")
help="run in regular, tournament or script mode")

parser.add_argument("--script-args", type=parse_script_args, default=[],
help='Arguments to pass to the script (quote the entire string, e.g. --script-args="arg1 arg2")')
Expand Down Expand Up @@ -75,19 +79,19 @@ def parse_cli_args() -> CLIArgs:
args = parser.parse_args()

# Check if config file is required based on mode
if args.mode in ["regular", "grade"]:
if args.mode in ["regular", "tournament"]:
if not args.config_file:
parser.error(f"Config file is required for {args.mode} mode")
if not os.path.isfile(args.config_file):
parser.error(f"The config file {args.config_file} does not exist.")
if args.mode == "grade" and (not bool(args.failure_log) or not bool(args.output)):
parser.error("Failure log and ouput file must be supplied when using grade mode.")
if args.mode == "tournament" and (not bool(args.failure_log) or not bool(args.output)):
parser.error("Failure log and ouput file must be supplied when using tournament mode.")

if args.verbosity > 0:
os.environ["DEBUG"] = str(args.verbosity)

return CLIArgs(
config_file = args.config_file,
config_file = args.config_file,
mode = args.mode,
failure_log = args.failure_log,
timeout = args.timeout,
Expand All @@ -98,3 +102,4 @@ def parse_cli_args() -> CLIArgs:
verify = args.verify,
script_args = args.script_args
)

16 changes: 9 additions & 7 deletions dragon_runner/harness.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ class TestHarness:
__test__ = False
def __init__(self, config: Config, cli_args: CLIArgs):
self.config = config
self.cli_args = cli_args
self.cli_args: CLIArgs = cli_args
self.failures: List[TestResult] = []

def log_failures(self):
Expand Down Expand Up @@ -73,13 +73,15 @@ def run(self) -> bool:
"""
decide wether to run in regular mode or grade mode based on the CLI args
"""
if self.cli_args.mode == "grade":
assert self.cli_args.failure_log is not None, "Need to supply failure log!"
print("Running Dragon Runner in grade mode")
return self.run_grader_json()
else:
if self.cli_args.is_tournament_mode():
return self.run_tournament()

elif self.cli_args.is_regular_mode():
return self.run_regular()

else:
return False

def log_failure_to_file(self, file, result: TestResult):
"""
Give full feedback to a defender for all the tests they failed
Expand Down Expand Up @@ -125,7 +127,7 @@ def create_timing_dataframe() -> Dict[str, Dict[str, float]]:
"""
return {}

def run_grader_json(self) -> bool:
def run_tournament(self) -> bool:
"""
Run the tester in grade mode. Run all test packages for each tested executable.
Write each toolchain table to the CSV file as it's completed.
Expand Down
2 changes: 1 addition & 1 deletion dragon_runner/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def main():
log(args, level=1)

# dragon-runner can also be used as a frontend for grading scripts
if args.mode not in ["regular", "grade"]:
if args.mode not in ["regular", "tournament"]:
print(f"Use dragon-runner as a loader for script: {args.mode}")
loader = Loader(args.mode, args.script_args)
loader.run()
Expand Down
8 changes: 8 additions & 0 deletions dragon_runner/scripts/build.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
"""
============================== 415 Grading Script ==============================
Author: Justin Meimar
Name: build.py
Desc:
================================================================================
"""

import os
import subprocess
import shutil
Expand Down
8 changes: 8 additions & 0 deletions dragon_runner/scripts/gather.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,11 @@
"""
============================== 415 Grading Script ==============================
Author: Justin Meimar
Name: gather.py
Desc:
================================================================================
"""

import shutil
import argparse
from pathlib import Path
Expand Down
9 changes: 9 additions & 0 deletions dragon_runner/scripts/gen_config.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,12 @@
"""
============================== 415 Grading Script ==============================
Author: Justin Meimar
Name: gen_config.py
Desc:
================================================================================
"""


import json
import argparse
from typing import Optional
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,28 @@

"""
============================== 415 Grading Script ==============================
Author: Justin Meimar
Name: grade.py
Desc:
================================================================================
"""
import argparse
import csv
from fractions import Fraction
from pathlib import Path
from fractions import Fraction
from typing import List

# These are hard coded as to not bloat the CLI. Probably easier to change in place.
DEFENSIVE_PTS = 2
OFFENSIVE_PTS = 1
COHERENCE_PTS = 10
COMPETITIVE_WEIGHT = 0.2
TA_WEIGHT = 0.5
SOLUTION = "solution"
SOLUTION = "solution" # the EXE of the solution

def to_float(to_convert) -> float:
"""Helper function to convert fraction strings to floating point."""
"""
Helper function to convert fraction strings to floating point.
"""
try:
return float(Fraction(to_convert))
except ValueError:
Expand All @@ -21,9 +33,8 @@ def normalize_competetive_scores(tc_table):
Normalize the competative scores of a table relative to the max score.
By convention the last row contains the total score for the toolchain
"""
n_rows = len(tc_table)
n_cols = len(tc_table[0])

# n_rows = len(tc_table)
# n_cols = len(tc_table[0])

print("TC_TABLE", tc_table)
print("COMPETITIVE ROW:", tc_table[-2][1:])
Expand Down Expand Up @@ -95,14 +106,13 @@ def add_competitive_rows(table):
o_score += (OFFENSIVE_PTS * (1 - to_float(table[i][j])))
d_score += (DEFENSIVE_PTS * to_float(table[j][i]))

# print(f"attacker: {attacker}\n oscore: {o_score} \ndscore: {d_score}\n cscore: {c_score}")

# Populate the new rows
ta_points_row[j] = str(round(ta_score * TA_WEIGHT, 3))
defensive_row[j] = str(round(d_score, 2))
offensive_row[j] = str(round(o_score, 2))
coherence_row[j] = round(c_score, 3)
total_row[j] = str(float(defensive_row[j]) + float(offensive_row[j]) + float(coherence_row[j]))
total_row[j] = str(
float(defensive_row[j]) + float(offensive_row[j]) + float(coherence_row[j]))

# Append new rows to the table
table.append(defensive_row)
Expand All @@ -113,11 +123,14 @@ def add_competitive_rows(table):

return table

if __name__ == "__main__":

input_files = ['Grades.csv']
def tournament(tournament_csv_paths: List[str], grade_path: str):
"""
Run the tournament for each tournament csv then average all the
toolchain tables. Write all the tables including the average to
the final grade_path
"""
tc_tables = []
for file in input_files:
for file in tournament_csv_paths:
with open(file, 'r') as f:
reader = csv.reader(f)
tc_table = list(reader)
Expand All @@ -128,10 +141,51 @@ def add_competitive_rows(table):
normalize_competetive_scores(tc_avg)
print(tc_avg)

output_file = './vcalc-grades.csv'
with open(output_file, 'w') as f:
with open(grade_path, 'w') as f:
writer = csv.writer(f)
for table in tc_tables:
writer.writerows(table)
writer.writerow([]) # newline
writer.writerows(tc_avg)

if __name__ == "__main__":

parser = argparse.ArgumentParser()
parser.add_argument(
"tournament_csvs",
type=Path,
nargs="+",
help="Path to one or more csv files generated from grade mode"
)
parser.add_argument(
"output_csv",
type=Path,
help="Path to final output csv with grades"
)

args = parser.parse_args()
tournament(args.tournament_csvs, args.log_file)

#
# input_files = ['Grades.csv']
# tc_tables = []
# for file in input_files:
# with open(file, 'r') as f:
# reader = csv.reader(f)
# tc_table = list(reader)
# tc_tables.append(add_competitive_rows(tc_table))
#
# print(tc_tables)
# tc_avg = average_toolchain_tables(tc_tables)
# normalize_competetive_scores(tc_avg)
# print(tc_avg)
#
# output_file = './vcalc-grades.csv'
# with open(output_file, 'w') as f:
# writer = csv.writer(f)
# for table in tc_tables:
# writer.writerows(table)
# writer.writerow([]) # newline
# writer.writerows(tc_avg)


0 comments on commit d99e251

Please sign in to comment.