-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
126 lines (98 loc) · 3.58 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.utils.data as torchdata
from random import randint
import numpy as np
import flor
# Device configuration
device = torch.device(
flor.arg(
"device",
"mps"
if torch.backends.mps.is_available()
else "cuda"
if torch.cuda.is_available()
else "cpu",
)
)
seed = flor.arg("seed", default=randint(1, 1e10))
torch.manual_seed(seed)
# Hyper-parameters
input_size = 784
hidden_size = flor.arg("hidden", default=500)
num_classes = 10
num_epochs = flor.arg("epochs", 5)
batch_size = flor.arg("batch_size", 32)
learning_rate = flor.arg("lr", 1e-3)
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(
root="../data", train=True, transform=transforms.ToTensor(), download=True
)
test_dataset = torchvision.datasets.MNIST(
root="../data", train=False, transform=transforms.ToTensor()
)
# Data loader
train_loader = torchdata.DataLoader(
dataset=train_dataset, batch_size=batch_size, shuffle=True
)
test_loader = torchdata.DataLoader(dataset=test_dataset, batch_size=batch_size)
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.fc2(out)
out = self.relu(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
def get_val_loader(fraction=0.2):
indices = list(range(len(test_dataset)))
np.random.shuffle(indices)
split = int(np.floor(fraction * len(test_dataset)))
subset_indices = indices[:split]
sampler = torchdata.SubsetRandomSampler(subset_indices)
return torchdata.DataLoader(test_dataset, sampler=sampler, batch_size=batch_size)
def validate(val_loader: torchdata.DataLoader):
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in val_loader:
images = images.reshape(-1, 28 * 28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
return int(correct), int(total)
print_every = flor.arg("print_every", 500)
with flor.checkpointing(model=model, optimizer=optimizer):
for epoch in flor.loop("epoch", range(num_epochs)):
for i, (images, labels) in flor.loop("step", enumerate(train_loader)):
# Move tensors to the configured device
images = images.reshape(-1, 28 * 28).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % print_every == 0:
flor.log("loss", loss.item())
correct, total = validate(get_val_loader())
flor.log("val_acc", 100 * correct / total)
correct, total = validate(test_loader)
flor.log("accuracy", 100 * correct / total)
flor.log("correct", correct)