-
Notifications
You must be signed in to change notification settings - Fork 0
/
convNeXt.py
112 lines (89 loc) · 4.22 KB
/
convNeXt.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
"""
ConvNeXt PRETRAINED-22K MODEL
"""
from common import train_model, add_head, load_dataset, graph_saver, DATASETS
from timm.loss import LabelSmoothingCrossEntropy
from torch.optim import AdamW, lr_scheduler
import argparse, timm, torch, os, time
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def main(EPOCHS, LEARNING_RATE, BATCH_SIZE):
print("ConvNeXt Pretrained-22k Model")
print("Epochs: {}, Learning Rate: {}, Batch Size: {}".format(EPOCHS, LEARNING_RATE, BATCH_SIZE))
print("=========================================")
for datasetName in DATASETS:
(train_loader, test_loader, val_loader), (train_len, test_len, val_len), num_classes, classes = load_dataset(datasetName, BATCH_SIZE)
model = timm.create_model('convnext_base', pretrained=True, num_classes=num_classes)
data_loaders = {
"train": train_loader,
"val": val_loader
}
dataset_sizes = {
"train": train_len,
"val": val_len
}
model = model.to(device)
criterion = LabelSmoothingCrossEntropy()
criterion = criterion.to(device)
optimizer = AdamW(model.parameters(), lr=LEARNING_RATE)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
model, train_loss, train_acc, val_loss, val_acc = train_model(model,
data_loaders,
dataset_sizes,
device,
criterion,
optimizer,
exp_lr_scheduler,
EPOCHS)
# Write the above portion to a file
with open('ConvNeXt.txt', 'a') as f:
f.write("=========================================\n")
f.write("Model: ConvNeXt, Dataset: {}\n".format(datasetName))
f.write("Epochs: {}, Learning Rate: {}, Batch Size: {}\n".format(EPOCHS, LEARNING_RATE, BATCH_SIZE))
for i in range(EPOCHS):
f.write("{:.4f} ".format(train_loss[i]))
f.write("\n")
for i in range(EPOCHS):
f.write("{:.4f} ".format(train_acc[i]))
f.write("\n")
for i in range(EPOCHS):
f.write("{:.4f} ".format(val_loss[i]))
f.write("\n")
for i in range(EPOCHS):
f.write("{:.4f} ".format(val_acc[i]))
f.write("\n")
f.write("=========================================\n")
graph_saver('ConvNeXt', datasetName, EPOCHS, LEARNING_RATE, BATCH_SIZE, train_loss, train_acc, val_loss, val_acc)
# Create model directory for ConvNeXt if it doesn't exist
if not os.path.exists('models/ConvNeXt'):
os.makedirs('models/ConvNeXt')
if not os.path.exists('models/ConvNeXt/{}'.format(datasetName)):
os.makedirs('models/ConvNeXt/{}'.format(datasetName))
torch.save(model.state_dict(), 'models/ConvNeXt/{}/model_{}_{}_{}.pt'.format(datasetName, EPOCHS, LEARNING_RATE, BATCH_SIZE))
del model
torch.cuda.empty_cache()
return 0
if __name__ == '__main__':
"""
parser = argparse.ArgumentParser()
parser.add_argument('-e', type=int, default=10)
parser.add_argument('-lr', type=float, default=0.001)
parser.add_argument('-b', type=int, default=128)
args = parser.parse_args()
main(args.e, args.lr, args.b)
"""
# epochs = [5, 10]
# learning_rates = [0.0001, 0.001]
# batch_sizes = [64, 128]
epochs = [25]
learning_rates = [0.001]
batch_sizes = [64]
start_time = time.time()
for e in epochs:
for lr in learning_rates:
for b in batch_sizes:
main(e, lr, b)
end_time = time.time()
print("Total Time: {}".format(end_time - start_time))
# Write to file
with open('ConvNeXt.txt', 'a') as f:
f.write("Total Time: {}\n".format(end_time - start_time))