forked from ndrplz/self-driving-car
-
Notifications
You must be signed in to change notification settings - Fork 0
/
model.py
89 lines (70 loc) · 3.08 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
from keras.models import Model
from keras.layers import Input, Convolution2D, Flatten, Dense, Dropout, ELU, Lambda
from keras.callbacks import ModelCheckpoint, CSVLogger
import keras.backend as K
from config import *
from load_data import generate_data_batch, split_train_val
def get_nvidia_model(summary=True):
"""
Get the keras Model corresponding to the NVIDIA architecture described in:
Bojarski, Mariusz, et al. "End to end learning for self-driving cars."
The paper describes the network architecture but doesn't go into details for some aspects.
Input normalization, as well as ELU activations are just my personal implementation choice.
:param summary: show model summary
:return: keras Model of NVIDIA architecture
"""
init = 'glorot_uniform'
if K.backend() == 'theano':
input_frame = Input(shape=(CONFIG['input_channels'], NVIDIA_H, NVIDIA_W))
else:
input_frame = Input(shape=(NVIDIA_H, NVIDIA_W, CONFIG['input_channels']))
# standardize input
x = Lambda(lambda z: z / 127.5 - 1.)(input_frame)
x = Convolution2D(24, 5, 5, border_mode='valid', subsample=(2, 2), init=init)(x)
x = ELU()(x)
x = Dropout(0.2)(x)
x = Convolution2D(36, 5, 5, border_mode='valid', subsample=(2, 2), init=init)(x)
x = ELU()(x)
x = Dropout(0.2)(x)
x = Convolution2D(48, 5, 5, border_mode='valid', subsample=(2, 2), init=init)(x)
x = ELU()(x)
x = Dropout(0.2)(x)
x = Convolution2D(64, 3, 3, border_mode='valid', init=init)(x)
x = ELU()(x)
x = Dropout(0.2)(x)
x = Convolution2D(64, 3, 3, border_mode='valid', init=init)(x)
x = ELU()(x)
x = Dropout(0.2)(x)
x = Flatten()(x)
x = Dense(100, init=init)(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(50, init=init)(x)
x = ELU()(x)
x = Dropout(0.5)(x)
x = Dense(10, init=init)(x)
x = ELU()(x)
out = Dense(1, init=init)(x)
model = Model(input=input_frame, output=out)
if summary:
model.summary()
return model
if __name__ == '__main__':
# split udacity csv data into training and validation
train_data, val_data = split_train_val(csv_driving_data='data/driving_log.csv')
# get network model and compile it (default Adam opt)
nvidia_net = get_nvidia_model(summary=True)
nvidia_net.compile(optimizer='adam', loss='mse')
# json dump of model architecture
with open('logs/model.json', 'w') as f:
f.write(nvidia_net.to_json())
# define callbacks to save history and weights
checkpointer = ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.3f}.hdf5')
logger = CSVLogger(filename='logs/history.csv')
# start the training
nvidia_net.fit_generator(generator=generate_data_batch(train_data, augment_data=True, bias=CONFIG['bias']),
samples_per_epoch=300*CONFIG['batchsize'],
nb_epoch=50,
validation_data=generate_data_batch(val_data, augment_data=False, bias=1.0),
nb_val_samples=100*CONFIG['batchsize'],
callbacks=[checkpointer, logger])