Skip to content

Commit 0f1f0f8

Browse files
keshavramjiKeshav Ramji Keshav.Ramji@ibm.comKeshav Ramji Keshav.Ramji@ibm.comavinash2692
authored
feat: Test-based Evaluation with LLM-as-a-judge (#225)
* v1 working * Update v1 data format and judge call * Pre-commit fixes * PR update: jinja, pydantic * Revert .gitignore to main --------- Co-authored-by: Keshav Ramji [email protected] <[email protected]> Co-authored-by: Keshav Ramji [email protected] <[email protected]> Co-authored-by: Avinash Balakrishnan <[email protected]>
1 parent 6b2a527 commit 0f1f0f8

File tree

7 files changed

+587
-6
lines changed

7 files changed

+587
-6
lines changed

cli/eval/__init__.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
"""CLI for test-based evaluation"""

cli/eval/commands.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
"""Use the eval command for LLM-as-a-judge evaluation, given a (set of) test file(s) consisting of prompts, instructions, and optionally, targets.
2+
Instantiate a generator model to produce candidate responses, and a judge model to determine whether the instructions have been followed."""
3+
4+
import typer
5+
6+
eval_app = typer.Typer(name="eval")
7+
8+
9+
def eval_run(
10+
test_files: list[str] = typer.Argument(
11+
..., help="List of paths to json/jsonl files containing test cases"
12+
),
13+
backend: str = typer.Option("ollama", "--backend", "-b", help="Generation backend"),
14+
model: str = typer.Option(None, "--model", help="Generation model name"),
15+
max_gen_tokens: int = typer.Option(
16+
256, "--max-gen-tokens", help="Max tokens to generate for responses"
17+
),
18+
judge_backend: str = typer.Option(
19+
None, "--judge-backend", "-jb", help="Judge backend"
20+
),
21+
judge_model: str = typer.Option(None, "--judge-model", help="Judge model name"),
22+
max_judge_tokens: int = typer.Option(
23+
256, "--max-judge-tokens", help="Max tokens for the judge model's judgement."
24+
),
25+
output_path: str = typer.Option(
26+
"eval_results", "--output-path", "-o", help="Output path for results"
27+
),
28+
output_format: str = typer.Option(
29+
"json", "--output-format", help="Either json or jsonl format for results"
30+
),
31+
continue_on_error: bool = typer.Option(True, "--continue-on-error"),
32+
):
33+
from cli.eval.runner import run_evaluations
34+
35+
run_evaluations(
36+
test_files=test_files,
37+
backend=backend,
38+
model=model,
39+
max_gen_tokens=max_gen_tokens,
40+
judge_backend=judge_backend,
41+
judge_model=judge_model,
42+
max_judge_tokens=max_judge_tokens,
43+
output_path=output_path,
44+
output_format=output_format,
45+
continue_on_error=continue_on_error,
46+
)
47+
48+
49+
eval_app.command("run")(eval_run)

0 commit comments

Comments
 (0)