-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathrun_predict.sh
executable file
·59 lines (56 loc) · 3.41 KB
/
run_predict.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
# # CLIP embeddings
# CUDA_VISIBLE_DEVICES=0 python predict.py \
# --model_name_or_path="gpt2" \
# --cache_dir_path="./cache/openai/clip-vit-base-patch32" \
# --preprocessing_num_workers=8 --image_column_name=image_id --text_column_name=caption \
# --per_device_train_batch_size=16 --per_device_eval_batch_size=1 \
# --dataloader_num_workers=8 --dataloader_pin_memory --group_by_length \
# --seed=14045 --num_train_epochs=100 --learning_rate=5e-5 \
# --fp16 --fp16_backend=amp \
# --logging_strategy=steps --logging_steps=10 --report_to=tensorboard \
# --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=1 \
# --save_strategy=epoch --save_steps=1 --save_total_limit=3 --load_best_model_at_end \
# --gradient_checkpointing=True \
# --do_eval=True
# # CLIP embeddings
# CUDA_VISIBLE_DEVICES=0 python predict.py \
# --model_name_or_path="save/save/decoder_finetuning/gpt2/checkpoint-1751175" \
# --cache_dir_path="./cache/openai/clip-vit-base-patch32" \
# --preprocessing_num_workers=8 --image_column_name=image_id --text_column_name=caption \
# --per_device_train_batch_size=16 --per_device_eval_batch_size=1 \
# --dataloader_num_workers=8 --dataloader_pin_memory --group_by_length \
# --seed=14045 --num_train_epochs=100 --learning_rate=5e-5 \
# --fp16 --fp16_backend=amp \
# --logging_strategy=steps --logging_steps=10 --report_to=tensorboard \
# --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=1 \
# --save_strategy=epoch --save_steps=1 --save_total_limit=3 --load_best_model_at_end \
# --gradient_checkpointing=True \
# --do_eval=True
# # CLIP embeddings
# CUDA_VISIBLE_DEVICES=0 python predict.py \
# --model_name_or_path="save/save/with-adapters/GPT2small_adapterid0_genreRomance_matched3_sampleNone_maxseqlen512_bs8_lr5e-05_2.0epoch_wd0.0_ws0/checkpoint-15000" \
# --cache_dir_path="./cache/openai/clip-vit-base-patch32" \
# --preprocessing_num_workers=8 --image_column_name=image_id --text_column_name=caption \
# --per_device_train_batch_size=16 --per_device_eval_batch_size=1 \
# --dataloader_num_workers=8 --dataloader_pin_memory --group_by_length \
# --seed=14045 --num_train_epochs=100 --learning_rate=5e-5 \
# --fp16 --fp16_backend=amp \
# --logging_strategy=steps --logging_steps=10 --report_to=tensorboard \
# --evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=1 \
# --save_strategy=epoch --save_steps=1 --save_total_limit=3 --load_best_model_at_end \
# --gradient_checkpointing=True \
# --do_eval=True
# CLIP embeddings
CUDA_VISIBLE_DEVICES=4 python predict.py \
--model_name_or_path="save/save/with-adapters/GPT2small_adapterid0_genreAction_matched3_sampleNone_maxseqlen512_bs8_lr5e-05_10.0epoch_wd0.0_ws0/checkpoint-15000" \
--cache_dir_path="./cache/openai/clip-vit-base-patch32" \
--preprocessing_num_workers=8 --image_column_name=image_id --text_column_name=caption \
--per_device_train_batch_size=16 --per_device_eval_batch_size=1 \
--dataloader_num_workers=8 --dataloader_pin_memory --group_by_length \
--seed=14045 --num_train_epochs=100 --learning_rate=5e-5 \
--fp16 --fp16_backend=amp \
--logging_strategy=steps --logging_steps=10 --report_to=tensorboard \
--evaluation_strategy=epoch --eval_steps=1 --eval_accumulation_steps=1 \
--save_strategy=epoch --save_steps=1 --save_total_limit=3 --load_best_model_at_end \
--gradient_checkpointing=True \
--do_eval=True