-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
80 lines (57 loc) · 2.54 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
from modules.data import TextDataset, load_data, split_dataset
from modules.model import LiteGPT
from modules.train import train
from modules.eval import generate_text, evaluate
from modules.utils import count_parameters, loss_curve, load_configuration
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
import tiktoken
import os
import math
import warnings
def main() -> None:
warnings.filterwarnings("ignore")
torch.manual_seed(42)
config_path = "config/config.yaml"
config = load_configuration(config_path)
device = "cuda" if torch.cuda.is_available() else "cpu"
data = load_data(folder_path=config["data"]["path"])
tokenizer = tiktoken.get_encoding("gpt2")
vocab_size = tokenizer.n_vocab
dataset = TextDataset(
text=data,
tokenizer=tokenizer,
context_length=config["model"]["context_length"],
stride=config["data"]["stride"]
)
train_dataset, test_dataset = split_dataset(dataset, train_size=0.8)
train_dataloader = DataLoader(train_dataset, batch_size=config["training"]["batch_size"], shuffle=True, drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=config["training"]["batch_size"], shuffle=True, drop_last=True)
model = LiteGPT(
vocab_size=vocab_size,
context_length=config["model"]["context_length"],
embedding_dim=config["model"]["embedding_dim"],
num_heads=config["model"]["num_heads"],
num_layers=config["model"]["num_layers"],
ff_dim=config["model"]["ff_dim"],
dropout=config["model"]["dropout"]
).to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=config["training"]["learning_rate"])
criterion = nn.CrossEntropyLoss()
print(f"Total Number of Parameters: {count_parameters(model)}")
losses = train(model, optimizer, criterion, train_dataloader, config["training"]["num_epochs"], device)
loss_curve(losses, title="Training Loss")
prompt = "For years to come "
generated_text = generate_text(model, tokenizer, prompt)
print(generated_text)
os.makedirs("models", exist_ok=True)
model_save_path = "models/litegpt_model.pth"
torch.save(model.state_dict(), model_save_path)
print(f"Model saved to {model_save_path}")
test_loss, top_k_acc = evaluate(model, criterion, test_dataloader, device)
print(f"Test Loss: {test_loss}")
print(f"Top-5 Accuracy: {top_k_acc * 100:.4f}%")
print(f"Perplexity: {math.exp(test_loss)}") # Perplexity = e^{cross_entropy_loss}
if __name__ == "__main__":
main()