Skip to content

Commit

Permalink
Fix entrypoint, uncomment exec.
Browse files Browse the repository at this point in the history
  • Loading branch information
ricklamers committed Sep 21, 2023
1 parent 312c5e5 commit fbcc4ca
Show file tree
Hide file tree
Showing 3 changed files with 4 additions and 4 deletions.
4 changes: 2 additions & 2 deletions human_eval/evaluate_functional_correctness.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@

def entry_point(
sample_file: str,
k: str = "1,10,100",
k: tuple = (1,10,100),
n_workers: int = 4,
timeout: float = 3.0,
problem_file: str = HUMAN_EVAL,
Expand All @@ -16,7 +16,7 @@ def entry_point(
Evaluates the functional correctness of generated samples, and writes
results to f"{sample_file}_results.jsonl.gz"
"""
k = list(map(int, k.split(",")))
k = list(k)
results = evaluate_functional_correctness(sample_file, k, n_workers, timeout, problem_file)
print(results)

Expand Down
2 changes: 1 addition & 1 deletion human_eval/execution.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ def unsafe_execute():
# information on how OpenAI sandboxes its code, see the accompanying paper.
# Once you have read this disclaimer and taken appropriate precautions,
# uncomment the following line and proceed at your own risk:
# exec(check_program, exec_globals)
exec(check_program, exec_globals)
result.append("passed")
except TimeoutException:
result.append("timed out")
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
],
entry_points={
"console_scripts": [
"evaluate_functional_correctness = human_eval.evaluate_functional_correctness",
"evaluate_functional_correctness = human_eval.evaluate_functional_correctness:main",
]
}
)

0 comments on commit fbcc4ca

Please sign in to comment.