Skip to content

Commit 9da19b2

Browse files
committed
2 parents cc6f5dd + e354300 commit 9da19b2

File tree

8 files changed

+320
-3
lines changed

8 files changed

+320
-3
lines changed

.idea/FromCodingToDeepLearning.iml

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

.idea/misc.xml

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 224 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,224 @@
1+
# Learner: Nguyen Truong Thinh
2+
# Contact me: [email protected] || +84393280504
3+
#
4+
# Topic: Deep Learning with Keras framework (A deep learning library)
5+
# A collection of utilities functions
6+
7+
from keras import regularizers
8+
9+
from keras.initializers import initializers_v1
10+
from keras.layers.convolutional import MaxPooling2D, Conv2D, AveragePooling2D
11+
from keras.layers import Activation
12+
from keras.layers.merging import concatenate
13+
from keras.layers.normalization.batch_normalization import BatchNormalization
14+
15+
# Hyperparameters we can adjust
16+
L2_REGULARIZATION_AMOUNT = 0.00004
17+
18+
19+
def build_inception_v4_conv_base(input_tensor):
20+
"""
21+
Create the convolutions base portion of the InceptionV4 network.
22+
:param input_tensor:
23+
:return:
24+
"""
25+
# The stem
26+
conv_base = build_inception_v4_stem(input_tensor)
27+
# 4 Inception A blocks
28+
conv_base = build_inception_a_block(conv_base)
29+
conv_base = build_inception_a_block(conv_base)
30+
conv_base = build_inception_a_block(conv_base)
31+
conv_base = build_inception_a_block(conv_base)
32+
# 1 Reduction A block
33+
conv_base = build_reduction_a_block(conv_base)
34+
# 7 Inception B blocks
35+
conv_base = build_inception_b_block(conv_base)
36+
conv_base = build_inception_b_block(conv_base)
37+
conv_base = build_inception_b_block(conv_base)
38+
conv_base = build_inception_b_block(conv_base)
39+
conv_base = build_inception_b_block(conv_base)
40+
conv_base = build_inception_b_block(conv_base)
41+
conv_base = build_inception_b_block(conv_base)
42+
# 1 Reduction B block
43+
conv_base = build_reduction_b_block(conv_base)
44+
# 3 Inception C blocks
45+
conv_base = build_inception_c_block(conv_base)
46+
conv_base = build_inception_c_block(conv_base)
47+
conv_base = build_inception_c_block(conv_base)
48+
49+
return conv_base
50+
51+
52+
def build_inception_v4_stem(input_tensor):
53+
"""
54+
Create the Inception-v4 stem of the Inception Architecture
55+
:param input_tensor: The input image tensor
56+
:return: outputs of all input branches
57+
"""
58+
# First stage of the stem:
59+
stem = conv2d_batch_norm_relu(input_tensor, 32, 3, 3, strides=(2, 2), padding='valid')
60+
stem = conv2d_batch_norm_relu(stem, 32, 3, 3, padding='valid')
61+
stem = conv2d_batch_norm_relu(stem, 64, 3, 3)
62+
# Second stage of the stem:
63+
left_1 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(stem)
64+
right_1 = conv2d_batch_norm_relu(stem, 96, 3, 3, strides=(2, 2), padding='valid')
65+
# Concatenate all the results from the two branches
66+
stem = concatenate([left_1, right_1], axis=-1)
67+
# Third stage of the stem:
68+
left_2 = conv2d_batch_norm_relu(stem, 64, 1, 1)
69+
left_2 = conv2d_batch_norm_relu(left_2, 96, 3, 3, padding='valid')
70+
right_2 = conv2d_batch_norm_relu(stem, 64, 1, 1)
71+
right_2 = conv2d_batch_norm_relu(right_2, 64, 1, 7)
72+
right_2 = conv2d_batch_norm_relu(right_2, 64, 7, 1)
73+
right_2 = conv2d_batch_norm_relu(right_2, 96, 3, 3, padding='valid')
74+
# Concatenate all the results from the two branches
75+
stem = concatenate([left_2, right_2], axis=-1)
76+
# Fourth stage of the stem:
77+
left_3 = conv2d_batch_norm_relu(stem, 192, 3, 3, strides=(2, 2), padding='valid')
78+
right_3 = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(stem)
79+
# Concatenate all the results from the two branches
80+
stem = concatenate([left_3, right_3], axis=-1)
81+
return stem
82+
83+
84+
def build_reduction_b_block(input_tensor):
85+
"""
86+
A reduction block: Transform a 17x17 input into a 8x8 input in an efficient manner.
87+
:param input_tensor: The input image tensor
88+
:return: outputs of the three input branches
89+
"""
90+
# This is the first branch from the left
91+
branch_left = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input_tensor)
92+
# This is the middle branch
93+
branch_middle = conv2d_batch_norm_relu(input_tensor, 192, 1, 1)
94+
branch_middle = conv2d_batch_norm_relu(branch_middle, 192, 3, 3, strides=(2, 2), padding='valid')
95+
# This is the right branch
96+
branch_right = conv2d_batch_norm_relu(input_tensor, 256, 1, 1)
97+
branch_right = conv2d_batch_norm_relu(branch_right, 256, 1, 7)
98+
branch_right = conv2d_batch_norm_relu(branch_right, 320, 7, 1)
99+
branch_right = conv2d_batch_norm_relu(branch_right, 320, 3, 3, strides=(2, 2), padding='valid')
100+
# Concatenate all the results from the three branches
101+
outputs = concatenate([branch_left, branch_middle, branch_right], axis=-1)
102+
return outputs
103+
104+
105+
def build_reduction_a_block(input_tensor):
106+
"""
107+
A reduction block: Transform a 35x35 input into a 17x17 input in an efficient manner.
108+
:param input_tensor: The input image tensor
109+
:return: outputs of the three input branches
110+
"""
111+
# This is the first branch from the left
112+
branch_left = MaxPooling2D((3, 3), strides=(2, 2), padding='valid')(input_tensor)
113+
# This is the middle branch
114+
branch_middle = conv2d_batch_norm_relu(input_tensor, 384, 3, 3, strides=(2, 2), padding='valid')
115+
# This is the right branch
116+
branch_right = conv2d_batch_norm_relu(input_tensor, 192, 1, 1)
117+
branch_right = conv2d_batch_norm_relu(branch_right, 224, 3, 3)
118+
branch_right = conv2d_batch_norm_relu(branch_right, 256, 3, 3, strides=(2, 2), padding='valid')
119+
# Concatenate all the results from the three branches
120+
outputs = concatenate([branch_left, branch_middle, branch_right], axis=-1)
121+
return outputs
122+
123+
124+
def build_inception_c_block(input_tensor):
125+
"""
126+
Create the Inception C block - an Inception-v4 block
127+
:param input_tensor: The input image tensor
128+
:return: outputs of the four input branches
129+
"""
130+
# (384 1x1 convolutions) - This is the first branch for the left
131+
branch_a = conv2d_batch_norm_relu(input_tensor, 256, 1, 1)
132+
# This is the second branch for the left
133+
branch_b = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input_tensor)
134+
branch_b = conv2d_batch_norm_relu(branch_b, 256, 1, 1)
135+
# This is the third branch from the left
136+
branch_c = conv2d_batch_norm_relu(input_tensor, 384, 1, 1)
137+
branch_c_left = conv2d_batch_norm_relu(branch_c, 256, 1, 3)
138+
branch_c_right = conv2d_batch_norm_relu(branch_c, 256, 3, 1)
139+
# This is the fourth (right-most) branch
140+
branch_d = conv2d_batch_norm_relu(input_tensor, 384, 1, 1)
141+
branch_d = conv2d_batch_norm_relu(branch_d, 448, 1, 3)
142+
branch_d = conv2d_batch_norm_relu(branch_d, 512, 3, 1)
143+
branch_d_left = conv2d_batch_norm_relu(branch_d, 256, 1, 3)
144+
branch_d_right = conv2d_batch_norm_relu(branch_d, 256, 3, 1)
145+
# Concatenate all the results from the four branches
146+
outputs = concatenate([branch_a, branch_b, branch_c_left, branch_c_right, branch_d_left, branch_d_right], axis=-1)
147+
return outputs
148+
149+
150+
def build_inception_b_block(input_tensor):
151+
"""
152+
Create the Inception B block - an Inception-v4 block
153+
:param input_tensor: The input image tensor
154+
:return: outputs of the four input branches
155+
"""
156+
# (384 1x1 convolutions) - This is the first branch for the left
157+
branch_a = conv2d_batch_norm_relu(input_tensor, 384, 1, 1)
158+
# This is the second branch for the left
159+
branch_b = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input_tensor)
160+
branch_b = conv2d_batch_norm_relu(branch_b, 128, 1, 1)
161+
# This is the third branch from the left
162+
branch_c = conv2d_batch_norm_relu(input_tensor, 192, 1, 1)
163+
branch_c = conv2d_batch_norm_relu(branch_c, 224, 1, 7)
164+
branch_c = conv2d_batch_norm_relu(branch_c, 256, 7, 1)
165+
# This is the fourth (right-most) branch
166+
branch_d = conv2d_batch_norm_relu(input_tensor, 192, 1, 1)
167+
branch_d = conv2d_batch_norm_relu(branch_d, 192, 1, 7)
168+
branch_d = conv2d_batch_norm_relu(branch_d, 224, 7, 1)
169+
branch_d = conv2d_batch_norm_relu(branch_d, 224, 1, 7)
170+
branch_d = conv2d_batch_norm_relu(branch_d, 256, 7, 1)
171+
# Concatenate all the results from the four branches
172+
outputs = concatenate([branch_a, branch_b, branch_c, branch_d], axis=-1)
173+
return outputs
174+
175+
176+
def build_inception_a_block(input_tensor):
177+
"""
178+
Create the Inception A block - an Inception-v4 block
179+
:param input_tensor: The input image tensor
180+
:return: outputs of the four input branches
181+
"""
182+
# (96 1x1 convolutions) - This is the first branch for the left
183+
branch_a = conv2d_batch_norm_relu(input_tensor, 96, 1, 1)
184+
# This is the second branch for the left
185+
branch_b = conv2d_batch_norm_relu(input_tensor, 64, 1, 1)
186+
branch_b = conv2d_batch_norm_relu(branch_b, 96, 3, 3)
187+
# This is the third branch from the left
188+
branch_c = conv2d_batch_norm_relu(input_tensor, 64, 1, 1)
189+
branch_c = conv2d_batch_norm_relu(branch_c, 96, 3, 3)
190+
branch_c = conv2d_batch_norm_relu(branch_c, 96, 3, 3)
191+
# This is the fourth (right-most) branch
192+
branch_d = AveragePooling2D((3, 3), strides=(1, 1), padding='same')(input_tensor)
193+
branch_d = conv2d_batch_norm_relu(branch_d, 96, 1, 1)
194+
# Concatenate all the results from the four branches
195+
outputs = concatenate([branch_a, branch_b, branch_c, branch_d], axis=-1)
196+
return outputs
197+
198+
199+
def conv2d_batch_norm_relu(input_tensor, num_kernels, kernel_rows, kernel_cols, padding='same', strides=(1, 1)):
200+
"""
201+
Create a 2D convolutional layer.
202+
Apply batch normalization to the output of the convolutional layer, and then apply a rectified linear unit
203+
activation function to the normalization output.
204+
:param input_tensor: The input image tensor
205+
:param num_kernels: Convolutional kernels
206+
:param kernel_rows: height dimension
207+
:param kernel_cols: width dimension
208+
:param padding: one of `"valid"` or `"same"` (case-insensitive). `"valid"` means no padding. `"same"`
209+
results in padding with zeros evenly to the left/right or up/down of the input. When `padding="same"` and
210+
`strides=1`, the output has the same size as the input.
211+
:param strides: An integer or tuple/list of 2 integers, specifying the strides of the convolution along the height
212+
and width.
213+
:return: The normalization output of the 2D convolutional layer
214+
"""
215+
x = Conv2D(num_kernels, (kernel_rows, kernel_cols), strides=strides, padding=padding, use_bias=False,
216+
kernel_regularizer=regularizers.l2(L2_REGULARIZATION_AMOUNT),
217+
kernel_initializer=initializers_v1._v1_glorot_normal_initializer(seed=42))(input_tensor)
218+
x = BatchNormalization()(x)
219+
220+
output = Activation('relu')(x)
221+
222+
return output
223+
224+
Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
# Learner: Nguyen Truong Thinh
2+
# Contact me: [email protected] || +84393280504
3+
#
4+
# Topic: Deep Learning with Keras framework (A deep learning library)
5+
# Implementing Inception-v4 with the Keras Functional API
6+
# Create an InceptionV4 network - An Inception Architecture.
7+
8+
from keras import backend as be
9+
10+
from keras.models import Model
11+
from keras.layers.convolutional import AveragePooling2D
12+
from keras.layers import Input, Dropout, Dense, Flatten
13+
from keras.optimizers import Adam
14+
15+
from helper_function import build_inception_v4_conv_base
16+
17+
# Hyperparameters we can adjust
18+
DROPOUT_PROBABILITY = 0.1
19+
INITIAL_LEARNING_RATE = 0.001
20+
21+
# Adjust these to match the dimensions of our input image.
22+
IMAGE_HEIGHT = 299
23+
IMAGE_WIDTH = 299
24+
IMAGE_CHANNELS = 3
25+
26+
# Check the data ordering format (if we're using Theano as the backend => should be channels_first.).
27+
be.set_image_data_format('channels_last')
28+
print(be.image_data_format())
29+
30+
# The previous layer of the network (or the input image tensor).
31+
INPUT_TENSOR = Input((IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS))
32+
# Convolutions Base
33+
conv_base = build_inception_v4_conv_base(INPUT_TENSOR)
34+
# The classifier on top of the inception-v4 convolutions base.
35+
pool_output = AveragePooling2D((8, 8), padding='valid')(conv_base)
36+
dropout_output = Dropout(DROPOUT_PROBABILITY)(pool_output)
37+
flattened_output = Flatten()(dropout_output)
38+
network_output = Dense(units=2, activation='softmax')(flattened_output)
39+
# Use the Adam optimizer and compile the model.
40+
adam_opt = Adam(lr=INITIAL_LEARNING_RATE)
41+
model = Model(INPUT_TENSOR, network_output, name='InceptionV4')
42+
model.compile(optimizer=adam_opt, loss='binary_crossentropy', metrics=["accuracy"])
43+
# Display a summary of the layers of the model.
44+
model.summary()
45+
#===========================================
46+
# Total params: 41,209,058
47+
# Trainable params: 41,145,890
48+
# Non-trainable params: 63,168
49+
#____________________________________________________
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
# Author/ Learner: Nguyen Truong Thinh
2+
# Contact me: [email protected] || +84393280504
3+
#
4+
# Use case: Create a Logistic Regression model that can be used to convert into
5+
# the Core ML Format via CoreML Tool .
6+
# The model will be trained & converted on the popular UCI ML Pima Indians Diabetes dataset.
7+
8+
import coremltools
9+
10+
from usecases.tabular_classifier.logistic_regression.logistic_regression_classifier import trained_model
11+
12+
coreml_model = coremltools.converters.sklearn.convert(trained_model, ['Pregnancies', 'Glucose',
13+
'BloodPressure', 'SkinThickness', 'Insulin',
14+
'BMI', 'DiabetesPedigreeFunction', 'Age'],
15+
'Outcome')
16+
17+
coreml_model.author = 'Nguyen Truong Thinh'
18+
coreml_model.short_description = 'A logistic regression model trained on the Kaggle.com version of th Pima Indians ' \
19+
'diabetes dataset.'
20+
# Features description
21+
coreml_model.input_description['Pregnancies'] = 'Number of pregnancies.'
22+
coreml_model.input_description['Glucose'] = 'Plasma glucose concentration after 2 hours in an oral glucose tolerance ' \
23+
'test.'
24+
coreml_model.input_description['BloodPressure'] = 'Diastolic blood pressure.'
25+
coreml_model.input_description['SkinThickness'] = 'Thickness of the triceps skin folds.'
26+
coreml_model.input_description['BMI'] = 'Body mass index.'
27+
coreml_model.input_description[
28+
'DiabetesPedigreeFunction'] = 'A function that determines the risk of diabetes based on family history.'
29+
coreml_model.input_description['Age'] = 'The age of the subject.'
30+
# Description of target variable
31+
coreml_model.output_description['Outcome'] = 'A binary value, 1 indicates the patient has type-2 diabetes.'
32+
coreml_model.save('diabetes_indian.mlpackage')
742 Bytes
Binary file not shown.
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
{
2+
"fileFormatVersion": "1.0.0",
3+
"itemInfoEntries": {
4+
"F2D67E27-AF2E-4D54-89B0-9990C4DF7371": {
5+
"author": "com.apple.CoreML",
6+
"description": "CoreML Model Specification",
7+
"name": "model.mlmodel",
8+
"path": "com.apple.CoreML/model.mlmodel"
9+
}
10+
},
11+
"rootModelIdentifier": "F2D67E27-AF2E-4D54-89B0-9990C4DF7371"
12+
}

usecases/tabular_classifier/logistic_regression/logistic_regression_classifier.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848

4949
# Train a logistic regression model
5050
model = LogisticRegression(penalty='l2', fit_intercept=True, solver="liblinear", multi_class='ovr')
51-
trained_model = model.fit(df_diabetes_features_train, df_diabetes_target_train.values.ravel())
51+
trained_model_kfcv = model.fit(df_diabetes_features_train, df_diabetes_target_train.values.ravel())
5252

5353
# Get predictions
5454
predictions = model.predict(df_diabetes_features_test)

0 commit comments

Comments
 (0)