-
Notifications
You must be signed in to change notification settings - Fork 76
/
model.py
118 lines (97 loc) · 5.76 KB
/
model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import tensorflow as tf
class Model(object):
@staticmethod
def inference(x, drop_rate):
with tf.variable_scope('hidden1'):
conv = tf.layers.conv2d(x, filters=48, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden1 = dropout
with tf.variable_scope('hidden2'):
conv = tf.layers.conv2d(hidden1, filters=64, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=1, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden2 = dropout
with tf.variable_scope('hidden3'):
conv = tf.layers.conv2d(hidden2, filters=128, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden3 = dropout
with tf.variable_scope('hidden4'):
conv = tf.layers.conv2d(hidden3, filters=160, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=1, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden4 = dropout
with tf.variable_scope('hidden5'):
conv = tf.layers.conv2d(hidden4, filters=192, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden5 = dropout
with tf.variable_scope('hidden6'):
conv = tf.layers.conv2d(hidden5, filters=192, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=1, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden6 = dropout
with tf.variable_scope('hidden7'):
conv = tf.layers.conv2d(hidden6, filters=192, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=2, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden7 = dropout
with tf.variable_scope('hidden8'):
conv = tf.layers.conv2d(hidden7, filters=192, kernel_size=[5, 5], padding='same')
norm = tf.layers.batch_normalization(conv)
activation = tf.nn.relu(norm)
pool = tf.layers.max_pooling2d(activation, pool_size=[2, 2], strides=1, padding='same')
dropout = tf.layers.dropout(pool, rate=drop_rate)
hidden8 = dropout
flatten = tf.reshape(hidden8, [-1, 4 * 4 * 192])
with tf.variable_scope('hidden9'):
dense = tf.layers.dense(flatten, units=3072, activation=tf.nn.relu)
hidden9 = dense
with tf.variable_scope('hidden10'):
dense = tf.layers.dense(hidden9, units=3072, activation=tf.nn.relu)
hidden10 = dense
with tf.variable_scope('digit_length'):
dense = tf.layers.dense(hidden10, units=7)
length = dense
with tf.variable_scope('digit1'):
dense = tf.layers.dense(hidden10, units=11)
digit1 = dense
with tf.variable_scope('digit2'):
dense = tf.layers.dense(hidden10, units=11)
digit2 = dense
with tf.variable_scope('digit3'):
dense = tf.layers.dense(hidden10, units=11)
digit3 = dense
with tf.variable_scope('digit4'):
dense = tf.layers.dense(hidden10, units=11)
digit4 = dense
with tf.variable_scope('digit5'):
dense = tf.layers.dense(hidden10, units=11)
digit5 = dense
length_logits, digits_logits = length, tf.stack([digit1, digit2, digit3, digit4, digit5], axis=1)
return length_logits, digits_logits
@staticmethod
def loss(length_logits, digits_logits, length_labels, digits_labels):
length_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=length_labels, logits=length_logits))
digit1_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 0], logits=digits_logits[:, 0, :]))
digit2_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 1], logits=digits_logits[:, 1, :]))
digit3_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 2], logits=digits_logits[:, 2, :]))
digit4_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 3], logits=digits_logits[:, 3, :]))
digit5_cross_entropy = tf.reduce_mean(tf.losses.sparse_softmax_cross_entropy(labels=digits_labels[:, 4], logits=digits_logits[:, 4, :]))
loss = length_cross_entropy + digit1_cross_entropy + digit2_cross_entropy + digit3_cross_entropy + digit4_cross_entropy + digit5_cross_entropy
return loss