Skip to content

Commit e354300

Browse files
committed
Implementing Inception-v4 with the Keras Functional API: Create an InceptionV4 network
1 parent 0d8965a commit e354300

File tree

2 files changed

+105
-15
lines changed

2 files changed

+105
-15
lines changed

ml/deep_learning/keras_functional_api/helper_function.py

Lines changed: 67 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,40 +5,92 @@
55
# A collection of utilities functions
66

77
from keras import regularizers
8-
from keras.initializers import initializers_v1
9-
from keras import backend as be
108

11-
from keras.models import Model
9+
from keras.initializers import initializers_v1
1210
from keras.layers.convolutional import MaxPooling2D, Conv2D, AveragePooling2D
13-
from keras.layers import Input, Dropout, Dense, Flatten, Activation
11+
from keras.layers import Activation
1412
from keras.layers.merging import concatenate
1513
from keras.layers.normalization.batch_normalization import BatchNormalization
16-
from keras.optimizers import Adam
1714

1815
# Hyperparameters we can adjust
19-
DROPOUT_PROBABILITY = 0.1
20-
INITIAL_LEARNING_RATE = 0.001
2116
L2_REGULARIZATION_AMOUNT = 0.00004
2217

23-
# Adjust these to match the dimensions of our input image.
24-
IMAGE_HEIGHT = 299
25-
IMAGE_WIDTH = 299
26-
IMAGE_CHANNELS = 3
2718

28-
# Reduce this if this model does not fit on our GPU.
29-
BATCH_SIZE = 24
19+
def build_inception_v4_conv_base(input_tensor):
20+
"""
21+
Create the convolutions base portion of the InceptionV4 network.
22+
:param input_tensor:
23+
:return:
24+
"""
25+
# The stem
26+
conv_base = build_inception_v4_stem(input_tensor)
27+
# 4 Inception A blocks
28+
conv_base = build_inception_a_block(conv_base)
29+
conv_base = build_inception_a_block(conv_base)
30+
conv_base = build_inception_a_block(conv_base)
31+
conv_base = build_inception_a_block(conv_base)
32+
# 1 Reduction A block
33+
conv_base = build_reduction_a_block(conv_base)
34+
# 7 Inception B blocks
35+
conv_base = build_inception_b_block(conv_base)
36+
conv_base = build_inception_b_block(conv_base)
37+
conv_base = build_inception_b_block(conv_base)
38+
conv_base = build_inception_b_block(conv_base)
39+
conv_base = build_inception_b_block(conv_base)
40+
conv_base = build_inception_b_block(conv_base)
41+
conv_base = build_inception_b_block(conv_base)
42+
# 1 Reduction B block
43+
conv_base = build_reduction_b_block(conv_base)
44+
# 3 Inception C blocks
45+
conv_base = build_inception_c_block(conv_base)
46+
conv_base = build_inception_c_block(conv_base)
47+
conv_base = build_inception_c_block(conv_base)
48+
49+
return conv_base
50+
51+
52+
def build_inception_v4_stem(input_tensor):
53+
"""
54+
Create the Inception-v4 stem of the Inception Architecture
55+
:param input_tensor: The input image tensor
56+
:return: outputs of all input branches
57+
"""
58+
# First stage of the stem:
59+
stem = conv2d_batch_norm_relu(input_tensor, 32, 3, 3, strides=(2, 2), padding='valid')
60+
stem = conv2d_batch_norm_relu(stem, 32, 3, 3, padding='valid')
61+
stem = conv2d_batch_norm_relu(stem, 64, 3, 3)
62+
# Second stage of the stem:
63+
left_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(stem)
64+
right_1 = conv2d_batch_norm_relu(stem, 96, 3, 3, strides=(2, 2), padding='valid')
65+
# Concatenate all the results from the two branches
66+
stem = concatenate([left_1, right_1], axis=-1)
67+
# Third stage of the stem:
68+
left_2 = conv2d_batch_norm_relu(stem, 64, 1, 1)
69+
left_2 = conv2d_batch_norm_relu(left_2, 96, 3, 3, padding='valid')
70+
right_2 = conv2d_batch_norm_relu(stem, 64, 1, 1)
71+
right_2 = conv2d_batch_norm_relu(right_2, 64, 1, 7)
72+
right_2 = conv2d_batch_norm_relu(right_2, 64, 7, 1)
73+
right_2 = conv2d_batch_norm_relu(right_2, 96, 3, 3, padding='valid')
74+
# Concatenate all the results from the two branches
75+
stem = concatenate([left_2, right_2], axis=-1)
76+
# Fourth stage of the stem:
77+
left_3 = conv2d_batch_norm_relu(stem, 192, 3, 3, strides=(2, 2), padding='valid')
78+
right_3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(stem)
79+
# Concatenate all the results from the two branches
80+
stem = concatenate([left_3, right_3], axis=-1)
81+
return stem
3082

3183

3284
def build_reduction_b_block(input_tensor):
3385
"""
34-
A reduction block: Transform a 35x35 input into a 17x17 input in an efficient manner.
86+
A reduction block: Transform a 17x17 input into a 8x8 input in an efficient manner.
3587
:param input_tensor: The input image tensor
3688
:return: outputs of the three input branches
3789
"""
3890
# This is the first branch from the left
3991
branch_left = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input_tensor)
4092
# This is the middle branch
41-
branch_middle = conv2d_batch_norm_relu(input_tensor, 192 , 1, 1)
93+
branch_middle = conv2d_batch_norm_relu(input_tensor, 192, 1, 1)
4294
branch_middle = conv2d_batch_norm_relu(branch_middle, 192, 3, 3, strides=(2, 2), padding='valid')
4395
# This is the right branch
4496
branch_right = conv2d_batch_norm_relu(input_tensor, 256, 1, 1)

ml/deep_learning/keras_functional_api/inception-v4-network.py

Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,47 @@
33
#
44
# Topic: Deep Learning with Keras framework (A deep learning library)
55
# Implementing Inception-v4 with the Keras Functional API
6+
# Create an InceptionV4 network - An Inception Architecture.
67

78
from keras import backend as be
89

10+
from keras.models import Model
11+
from keras.layers.convolutional import AveragePooling2D
12+
from keras.layers import Input, Dropout, Dense, Flatten
13+
from keras.optimizers import Adam
14+
15+
from helper_function import build_inception_v4_conv_base
16+
17+
# Hyperparameters we can adjust
18+
DROPOUT_PROBABILITY = 0.1
19+
INITIAL_LEARNING_RATE = 0.001
20+
21+
# Adjust these to match the dimensions of our input image.
22+
IMAGE_HEIGHT = 299
23+
IMAGE_WIDTH = 299
24+
IMAGE_CHANNELS = 3
25+
926
# Check the data ordering format (if we're using Theano as the backend => should be channels_first.).
1027
be.set_image_data_format('channels_last')
1128
print(be.image_data_format())
29+
30+
# The previous layer of the network (or the input image tensor).
31+
INPUT_TENSOR = Input((IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS))
32+
# Convolutions Base
33+
conv_base = build_inception_v4_conv_base(INPUT_TENSOR)
34+
# The classifier on top of the inception-v4 convolutions base.
35+
pool_output = AveragePooling2D((8, 8), padding='valid')(conv_base)
36+
dropout_output = Dropout(DROPOUT_PROBABILITY)(pool_output)
37+
flattened_output = Flatten()(dropout_output)
38+
network_output = Dense(units=2, activation='softmax')(flattened_output)
39+
# Use the Adam optimizer and compile the model.
40+
adam_opt = Adam(lr=INITIAL_LEARNING_RATE)
41+
model = Model(INPUT_TENSOR, network_output, name='InceptionV4')
42+
model.compile(optimizer=adam_opt, loss='binary_crossentropy', metrics=["accuracy"])
43+
# Display a summary of the layers of the model.
44+
model.summary()
45+
#===========================================
46+
# Total params: 41,209,058
47+
# Trainable params: 41,145,890
48+
# Non-trainable params: 63,168
49+
#____________________________________________________

0 commit comments

Comments
 (0)