-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_unsupervised.py
86 lines (80 loc) · 3.09 KB
/
train_unsupervised.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import sys
import os
import torch
import torch.nn as nn
# 可视化张量图片
from DataTransers.TraceTranser import TripleBatchTransform
from DataTransers.TraceTranser import LoopMove
from IIC_Loss import IIC_Loss
from DataLoaders.LoadPartASCAD import Datasetloader
from config import *
# 引入网络结构
from utils import setup_seed
from tqdm import tqdm
setup_seed(seed)
# show = ToPILImage() # 可以把Tensor转成Image,方便可视化
def train():
train_loader, _ = Datasetloader(train_data_path)(bs, is_shuffle, dataset_mode, left, right)
# 更改网络结构在这里
print("将使用%s网络结构进行训练" % net_structure)
if (net_structure == 'cm3'):
from Nets.cnn_multi_head import CNNNet
model = CNNNet().to(device)
if (net_structure == 'mm5'):
from Nets.mlp_mutil_head_5layer import MLPNet
model = MLPNet().to(device)
if (net_structure == 'mm7'):
from Nets.mlp_mutil_head_7layer import MLPNet
model = MLPNet().to(device)
if (net_structure == 'cmp3'):
from Nets.cnn_mutil_head_pca import CNNNet
model = CNNNet().to(device)
print(f"使用{opt_structure}作为优化器")
if (opt_structure == 'adam'):
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
elif (opt_structure == 'SGD'):
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
iic_loss_fn = IIC_Loss()
iic_loss_fn = iic_loss_fn.to(device)
# transformer2 = TripleBatchTransform()
# transformer2 = transformer2.to(device)
transformer2 = LoopMove(fixed_move)
# 将每个epoch的loss保存下来
loss_list = []
for epoch in range(num_epochs):
model.train()
running_loss = 0.0
print(f"epoch: {epoch}")
for i, data in enumerate(tqdm(train_loader), 0):
inputs, _, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# 通过数据变换对象对数据进行变换
inputs_trans = transformer2(inputs)
optimizer.zero_grad()
outputs1 = model(inputs)
outputs2 = model(inputs_trans)
#计算IID损失
loss = 0.0
loss_no_lamb = 0.0
for i in range(num_sub_heads):
loss1, _ = iic_loss_fn(outputs1[i], outputs2[i])
loss += loss1
# loss_no_lamb += (loss1_no_lamb + loss2_no_lamb)
loss.backward()
optimizer.step()
running_loss += loss.item()
loss_list.append(running_loss/len(train_loader))
print(f"Loss: {running_loss / len(train_loader)}")
if (saveModel == True):
# 如果saveModelPath路径不存在,创建路径
if not os.path.exists(saveModelPath):
os.makedirs(saveModelPath)
torch.save(model, saveModelPath + f"model_{epoch}.pth")
# 保存模型
print(f"最终模型保存在{modelsaveName}")
torch.save(model, modelsaveName)
# 保存loss
with open(lossSavePath, "w") as f:
for i in loss_list:
f.write(str(i) + "\n")
train()