-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmodels.py
143 lines (109 loc) · 4.43 KB
/
models.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
"""Define different model structures for crop type classification
"""
import json
import os
import tensorflow as tf
from tensorflow.keras import layers, models, optimizers
import tensorflow.keras.applications.xception as keras_xception
import tensorflow.keras.applications.vgg16 as keras_vgg16
def add_classifier(x, num_outputs, layer_sizes):
for layer_size in layer_sizes:
if len(layer_size) == 1:
x = layers.Dense(layer_size[0], activation='relu')(x)
else:
x = layers.Dropout(layer_size[0])(x)
x = layers.Dense(layer_size[1], activation='relu')(x)
if num_outputs == 2:
return layers.Dense(1, activation='sigmoid')(x)
else:
return layers.Dense(num_outputs, activation='softmax')(x)
def freeze_layers(model, n_layers):
if n_layers is not None or n_layers != 0:
for layer in model.layers[:n_layers]:
layer.trainable = False
for layer in model.layers[n_layers:]:
layer.trainable = True
def xception(input_shape, frozen_layers=0, weights=None, pooling='avg', **kwargs):
# create the base model
base_model = keras_xception.Xception(
weights=weights, include_top=False, input_shape=input_shape,
pooling=pooling
)
predictions = add_classifier(base_model.output, **kwargs)
model = models.Model(inputs=base_model.input, outputs=predictions)
# Freeze some layers:
freeze_layers(model, frozen_layers)
return model
def xception_preprocess_input(*args, **kwargs):
return keras_xception.preprocess_input(*args, **kwargs)
def vgg16(input_shape, frozen_layers=0, weights=None, pooling='avg', **kwargs):
# create the base model
base_model = keras_vgg16.VGG16(
weights=weights, include_top=False, input_shape=input_shape,
pooling=pooling
)
predictions = add_classifier(base_model.output, **kwargs)
model = models.Model(inputs=base_model.input, outputs=predictions)
# Freeze some layers:
freeze_layers(model, frozen_layers)
return model
def vgg16_preprocess_input(*args, **kwargs):
return keras_vgg16.preprocess_input(*args, **kwargs)
def baseline(input_shape, frozen_layers=0, weights=None, pooling='avg', **kwargs):
inputs = layers.Input(input_shape)
x = layers.Conv2D(64, (3, 3))(inputs)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(128, (3, 3))(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(128, (3, 3))(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Conv2D(128, (3, 3))(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPooling2D((2, 2))(x)
x = layers.Flatten()(x)
predictions = add_classifier(x, **kwargs)
# this is the model we will train
return models.Model(inputs=inputs, outputs=predictions)
def get_effnet_block(x_in, ch_in, ch_out):
x = layers.Conv2D(ch_in,
kernel_size=(1, 1),
padding='same',
use_bias=False)(x_in)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.DepthwiseConv2D(kernel_size=(1, 3), padding='same', use_bias=False)(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.MaxPool2D(pool_size=(2, 1),
strides=(2, 1))(x) # Separable pooling
x = layers.DepthwiseConv2D(kernel_size=(3, 1),
padding='same',
use_bias=False)(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
x = layers.Conv2D(ch_out,
kernel_size=(2, 1),
strides=(1, 2),
padding='same',
use_bias=False)(x)
x = layers.LeakyReLU()(x)
x = layers.BatchNormalization()(x)
return x
def init_effnet(num_outputs, input_shape):
inputs = layers.Input(shape=input_shape)
x = get_effnet_block(inputs, 32, 64)
x = get_effnet_block(x, 64, 128)
x = get_effnet_block(x, 128, 256)
x = layers.Flatten()(x)
x = layers.Dropout(0.3)(x)
x = layers.Dense(512, activation='relu')(x)
x = layers.Dropout(0.3)(x)
x = layers.Dense(num_outputs, activation='softmax')(x)
return models.Model(inputs=inputs, outputs=x)