-
Notifications
You must be signed in to change notification settings - Fork 1
/
RAE.py
53 lines (41 loc) · 1.37 KB
/
RAE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
import torch
from torch import nn, optim
import torch.nn.functional as F
class RAEenc(nn.Module):
def __init__(self, dim=32):
super().__init__()
self.lstm1=nn.LSTM(128,64, batch_first=True, bidirectional=True)
self.linear = nn.Linear(256, dim)
def forward(self, x):
x=x[:,0,:,:]
x=x.permute(0,2,1)
x,(hn,cn)=self.lstm1(x)
#print(hn.size())
hn=hn.permute(1,0,2)
#print(hn.size())
x=x[:,-1,:]
hn=hn.contiguous().view(hn.size(0),-1)
x = x.view(x.size(0), -1)
x2=torch.cat((x,hn),1)
x = F.leaky_relu(self.linear(x2))
return x
class RAEdec(nn.Module):
def __init__(self, dim=32, seq_len=126):
super().__init__()
self.lstm = nn.LSTM(dim,128, batch_first=True, num_layers=2)
self.seq_len=seq_len
def forward(self, x):
x = torch.cat([x] * self.seq_len, 1).view(x.size(0), self.seq_len, x.size(1))
x, (h,c)=self.lstm(x)
x=x.permute(0,2,1)
x=x.view(x.size(0), 1, x.size(1), x.size(2))
return x
class RAEn(nn.Module):
def __init__(self, dim):
super().__init__()
self.encoder = RAEenc(dim=dim)
self.decoder = RAEdec(dim=dim)
def forward(self, x):
bottleneck = self.encoder(x)
x = self.decoder(bottleneck)
return x, bottleneck