-
Notifications
You must be signed in to change notification settings - Fork 0
/
llama2-full.py
119 lines (98 loc) · 3.46 KB
/
llama2-full.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
from datasets import load_dataset
from transformers import (
AutoTokenizer,
DataCollatorWithPadding,
AutoModelForSequenceClassification,
TrainingArguments,
Trainer
)
import evaluate
import numpy as np
data_files = {
"train": "/mnt/sdb/benchmark/xiangrui/dfss_new/imdb/train-00000-of-00001.parquet",
"validation": "/mnt/sdb/benchmark/xiangrui/dfss_new/imdb/test-00000-of-00001.parquet"
}
train_dataset_ = load_dataset('parquet', data_files=data_files, split="train")
eval_dataset_ = load_dataset('parquet', data_files=data_files, split="validation")
train_dataset = train_dataset_.shuffle(seed=43)
eval_dataset = eval_dataset_.shuffle(seed=66)
train_len = 500; eval_len = 1500
train_dataset = train_dataset.select([i for i in range(train_len)])
eval_dataset = eval_dataset.select([i for i in range(eval_len)])
print(train_dataset)
print(eval_dataset)
t1_num = 0; t2_num = 0
f1_num = 0; f2_num = 0
for _ in range(train_len):
if train_dataset[_]['label'] == 0:
f1_num = f1_num +1
else:
t1_num = t1_num +1
for _ in range(eval_len):
if eval_dataset[_]['label'] == 0:
f2_num = f2_num +1
else:
t2_num = t2_num +1
print("label in train_dataset: true_num = ", t1_num, "false_num = ", f1_num)
print("label in test_dataset: true_num = ", t2_num, "false_num = ", f2_num)
print(train_dataset)
print(eval_dataset)
print("="*20,"loaded dataset","="*20)
# The model that you want to train from the Hugging Face hub
model_name = "/data2/share/llama/Llama-2-7b-hf"
# Fine-tuned model name
new_model = "./trained_model/test"
tokenizer = AutoTokenizer.from_pretrained(model_name)
tokenizer.pad_token = tokenizer.eos_token
tokenizer.padding_side = "right" # Fix weird overflow issue with fp16 training
print("="*20,"loaded tokenizer","="*20)
def preprocess_function(examples):
return tokenizer(examples["text"], truncation=True)
train_dataset = train_dataset.map(preprocess_function, batched=True)
eval_dataset = eval_dataset.map(preprocess_function, batched=True)
print("="*20,"preprocessed dataset","="*20)
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
accuracy = evaluate.load("accuracy")
print("="*20,"loaded accuracy","="*20)
def compute_metrics(eval_pred):
predictions, labels = eval_pred
predictions = np.argmax(predictions, axis=1)
return accuracy.compute(predictions=predictions, references=labels)
id2label = {0: "NEGATIVE", 1: "POSITIVE"}
label2id = {"NEGATIVE": 0, "POSITIVE": 1}
model = AutoModelForSequenceClassification.from_pretrained(
model_name, num_labels=2, id2label=id2label, label2id=label2id
)
model.config.pad_token_id = model.config.eos_token_id
print("="*20,"loaded model","="*20)
training_args = TrainingArguments(
output_dir=new_model,
learning_rate=2e-5,
per_device_train_batch_size=4,
per_device_eval_batch_size=4,
num_train_epochs=2,
weight_decay=0.01,
evaluation_strategy="steps",
eval_steps=0.1,
save_strategy="steps",
save_steps=0.1,
save_only_model=True,
load_best_model_at_end=True,
save_total_limit=1,
report_to="tensorboard",
fp16=True,
# deepspeed="./ds_config_unoffload.json",
deepspeed="./ds_config.json",
gradient_checkpointing=True,
# deepspeed="./ds_config_zero3.json",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset,
eval_dataset=eval_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
trainer.train()