-
Notifications
You must be signed in to change notification settings - Fork 4
/
Copy pathprompt_agent_en.yml
31 lines (29 loc) · 1.24 KB
/
prompt_agent_en.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
task_name: word_sorting # bigbench | ncbi | ... | or your own task
search_algo_name: beam_search # mcts | beam_search
world_model_name: beam_search
language: en
print_log: true
instruction: Sort these words alphabetically and return them as a single string with words separated by single spaces
train_size: 50
eval_size: 0 # data split for reward calculation
test_size: 200 # if test_size is not 0, the optimized nodes will be tested at last.
seed: 0 # if need to fixed shuffled dataset
data_dir: Path/to/PromptScope/prompt_scope/datasets/benchmarks/bbh/word_sorting.json # if data is downloaded
# Note: the current supported bigbench tasks are specified by
# data_dir using the same task_name (bigbench), if there is not
# specific .py class inplemented in the tasks folder.
post_instruction: false # false: prompt + task question | true: task question + prompt
eval_type: custom
iteration_num: 10
expand_width: 3 # num of branches of each node
depth_limit: 5 # the max depth of mcts
# mcts setting
min_depth: 2 # min depth of mcts for early stop
w_exp: 2.5 # balance exploration and exploitation
# beam search setting
beam_width: 3
# mcts world model setting
train_shuffle: true
num_new_prompts: 1 # 3 if beam search
train_batch_size: 5
prompt_length_limit: 200