Skip to content

Commit

Permalink
Allowed scoring multiple model/benchmark pairs per run via the endpoi…
Browse files Browse the repository at this point in the history
…nt (optional)
  • Loading branch information
shehadak committed Nov 2, 2023
1 parent f71ddf7 commit 2307893
Showing 1 changed file with 8 additions and 6 deletions.
14 changes: 8 additions & 6 deletions brainscore_language/submission/endpoints.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,13 +102,15 @@ def get_models_and_benchmarks(args_dict: Dict[str, Union[str, List]]) -> Tuple[L

def run_scoring(args_dict: Dict[str, Union[str, List]]):
""" prepares parameters for the `run_scoring_endpoint`. """
model = _get_ids(args_dict, 'new_models')
benchmark = _get_ids(args_dict, 'new_benchmarks')
new_models = _get_ids(args_dict, 'new_models')
new_benchmarks = _get_ids(args_dict, 'new_benchmarks')

run_scoring_endpoint(domain="language", jenkins_id=args_dict["jenkins_id"],
model_identifier=model, benchmark_identifier=benchmark, user_id=args_dict["user_id"],
model_type="artificialsubject", public=args_dict["public"],
competition=args_dict["competition"])
for model in new_models:
for benchmark in new_benchmarks:
run_scoring_endpoint(domain="language", jenkins_id=args_dict["jenkins_id"],
model_identifier=model, benchmark_identifier=benchmark, user_id=args_dict["user_id"],
model_type="artificialsubject", public=args_dict["public"],
competition=args_dict["competition"])


def parse_args() -> argparse.Namespace:
Expand Down

0 comments on commit 2307893

Please sign in to comment.