-
Notifications
You must be signed in to change notification settings - Fork 6
/
convLSTMAE.py
100 lines (79 loc) · 3.79 KB
/
convLSTMAE.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 20 07:14:36 2019
@author: Shrishti D Hore
"""
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
import utils
import scipy
from scipy import signal
import mxnet as mx
from matplotlib import pyplot as plt
from keras.models import model_from_json
from matplotlib import colors
from mxnet import gluon
import glob
import numpy as np
import os
from PIL import Image
import json
train_directory = "HEU_AI/UCSD_Anomaly_Dataset.v1p2/UCSDped1/Train/*/*"
test_directory = "HEU_AI/UCSD_Anomaly_Dataset.v1p2/UCSDped1/Test/*/*"
# convolutional spatio-temporal autoencoder
class ConvSTAE(gluon.nn.HybridBlock):
def __init__(self):
super(ConvSTAE, self).__init__()
with self.name_scope():
self.encoder = gluon.nn.HybridSequential(prefix="encoder")
with self.encoder.name_scope():
self.encoder.add(gluon.nn.Conv2D(512, kernel_size=15, strides=4, padding=0, activation='relu'))
self.encoder.add(gluon.nn.BatchNorm())
self.encoder.add(gluon.nn.MaxPool2D(2))
self.encoder.add(gluon.nn.BatchNorm())
self.encoder.add(gluon.nn.Conv2D(256, kernel_size=4, padding=0, activation='relu'))
self.encoder.add(gluon.nn.BatchNorm())
self.encoder.add(gluon.nn.MaxPool2D(2))
self.encoder.add(gluon.nn.BatchNorm())
self.encoder.add(gluon.nn.Conv2D(128, kernel_size=3, padding=0,activation='relu'))
self.encoder.add(gluon.nn.BatchNorm())
self.decoder = gluon.nn.HybridSequential(prefix="decoder")
with self.decoder.name_scope():
self.decoder.add(gluon.nn.Conv2DTranspose(channels=256, kernel_size=3, padding=0,activation='relu'))
self.decoder.add(gluon.nn.BatchNorm())
self.decoder.add(gluon.nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')))
self.decoder.add(gluon.nn.BatchNorm())
self.decoder.add(gluon.nn.Conv2DTranspose(channels=512, kernel_size=4, padding=0, activation='relu'))
self.decoder.add(gluon.nn.BatchNorm())
self.decoder.add(gluon.nn.HybridLambda(lambda F, x: F.UpSampling(x, scale=2, sample_type='nearest')))
self.decoder.add(gluon.nn.BatchNorm())
self.decoder.add(gluon.nn.Conv2DTranspose(channels=10, kernel_size=15, padding=0, strides=4, activation='sigmoid'))
def hybrid_forward(self, F, x):
x = self.encoder(x)
x = self.decoder(x)
return x
encoded_parameters = "C:\\Users\\Shrishti D Hore\\OneDrive\\Documents\\HEU_AI\\parameters_encoded.json"
# Train the autoencoder
def train(batch_size, ctx, num_epochs, path, lr=1e-4, wd=1e-5, params_file=encoded_parameters):
# Dataloader for training dataset
dataloader = utils.create_dataset_stacked_images(path, batch_size, shuffle=True, augment=True)
# Get model
model = ConvSTAE()
model.hybridize()
model.collect_params().initialize(mx.init.Xavier(), ctx=mx.cpu())
# Loss
l2loss = gluon.loss.L2Loss()
optimizer = gluon.Trainer(model.collect_params(), 'adam', {'learning_rate': lr, 'wd': wd, 'epsilon':1e-6})
# start the training loop
for epoch in range(num_epochs):
for image in dataloader:
image = image.as_in_context(ctx)
with mx.autograd.record():
reconstructed = model(image)
loss = l2loss(reconstructed, image)
loss.backward()
optimizer.step(batch_size)
print('epoch [{}/{}], loss:{:.4f}'.format(epoch + 1, num_epochs, mx.nd.mean(loss).asscalar()))
model.save_parameters(params_file)
return model, params_file