-
Notifications
You must be signed in to change notification settings - Fork 0
/
54.S2.30_BM_gamma-10.0_seed-54.sh
56 lines (49 loc) · 1.19 KB
/
54.S2.30_BM_gamma-10.0_seed-54.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/bin/bash
#SBATCH --chdir /scratch/izar/rajic/nli
#SBATCH --nodes=1
#SBATCH --ntasks-per-node=1
#SBATCH --cpus-per-task=20
#SBATCH --mem=90G
#SBATCH --partition=gpu
#SBATCH --qos=gpu
#SBATCH --gres=gpu:1
#SBATCH --time=8:00:00
#SBATCH -o /scratch/izar/rajic/nli/logs/slurm_logs/%x-%j.out
set -e
set -o xtrace
echo PWD:$(pwd)
echo STARTING AT $(date)
# Modules
module purge
module load gcc/9.3.0-cuda
module load cuda/11.0.2
# Environment
source ~/miniconda3/etc/profile.d/conda.sh
conda activate optml
# Run
date
printf "Run configured and environment setup. Gonna run now.\n\n"
python -m src.main \
--wandb_entity epfl-optml \
--experiment_name nli \
--experiment_version \
'S2.30_model-bert_dataset-mnli_gamma-10.0_seed-54' \
--model_name bert \
--dataset mnli \
--seed 54 \
--optimizer_name adamw \
--scheduler_name polynomial \
--adam_epsilon 1e-06 \
--weight_decay 0.01 \
--warmup_ratio 0.1 \
--gradient_clip_val 1.0 \
--tokenizer_model_max_length 128 \
--focal_loss_gamma 10.0 \
--accumulate_grad_batches 1 \
--lr 2e-05 \
--batch_size 32 \
--n_epochs 10 \
--early_stopping_patience 30 \
--precision 16 \
--num_hans_train_examples 0 \
echo FINISHED at $(date)