Skip to content

Commit 47369ee

Browse files
authored
Add files via upload
1 parent 4f717f5 commit 47369ee

19 files changed

+1681
-0
lines changed

alexnet.py

Lines changed: 167 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,167 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
Created on Thu Apr 5 20:24:20 2018
5+
6+
@author: wu
7+
"""
8+
9+
import tensorflow as tf
10+
import numpy as np
11+
12+
tf.reset_default_graph()
13+
14+
def maxPoolLayer(x, kHeight, kWidth, strideX, strideY, name, padding = "SAME"):
15+
"""max-pooling"""
16+
return tf.nn.max_pool(x, ksize = [1, kHeight, kWidth, 1],
17+
strides = [1, strideX, strideY, 1], padding = padding, name = name)
18+
19+
def dropout(x, keepPro, name = None):
20+
"""dropout"""
21+
return tf.nn.dropout(x, keepPro, name)
22+
23+
def LRN(x, R, alpha, beta, name = None, bias = 1.0):
24+
"""LRN"""
25+
return tf.nn.local_response_normalization(x, depth_radius = R, alpha = alpha,
26+
beta = beta, bias = bias, name = name)
27+
28+
def fcLayer(x, inputD, outputD, reluFlag, name):
29+
"""fully-connect"""
30+
with tf.variable_scope(name) as scope:
31+
w = tf.get_variable("weights", shape = [inputD, outputD], dtype = "float")
32+
b = tf.get_variable("biases", [outputD], dtype = "float")
33+
out = tf.nn.xw_plus_b(x, w, b, name = scope.name)
34+
if reluFlag:
35+
return tf.nn.relu(out)
36+
else:
37+
return out
38+
39+
def convLayer(x, kHeight, kWidth, strideX, strideY,
40+
featureNum, name, padding = "SAME", groups = 1):
41+
"""convolution"""
42+
channel = int(x.get_shape()[-1])
43+
conv = lambda a, b: tf.nn.conv2d(a, b, strides = [1, strideY, strideX, 1], padding = padding)
44+
with tf.variable_scope(name) as scope:
45+
w = tf.get_variable("weights", shape = [kHeight, kWidth, channel/groups, featureNum])
46+
b = tf.get_variable("biases", shape = [featureNum])
47+
48+
xNew = tf.split(value = x, num_or_size_splits = groups, axis = 3)
49+
wNew = tf.split(value = w, num_or_size_splits = groups, axis = 3)
50+
51+
featureMap = [conv(t1, t2) for t1, t2 in zip(xNew, wNew)]
52+
mergeFeatureMap = tf.concat(axis = 3, values = featureMap)
53+
# print mergeFeatureMap.shape
54+
out = tf.nn.bias_add(mergeFeatureMap, b)
55+
return tf.nn.relu(tf.reshape(out, mergeFeatureMap.get_shape().as_list()), name = scope.name)
56+
57+
def losses(logits, labels):
58+
with tf.variable_scope("loss") as scope:
59+
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
60+
labels=labels, name="xentropy_per_example")
61+
loss = tf.reduce_mean(cross_entropy, name="loss")
62+
tf.summary.scalar(scope.name + "loss", loss)
63+
return loss
64+
65+
66+
def training(loss, learning_rate):
67+
with tf.name_scope("optimizer"):
68+
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
69+
global_step = tf.Variable(0, name="global_step", trainable=False)
70+
train_op = optimizer.minimize(loss, global_step=global_step)
71+
return train_op
72+
73+
74+
def evaluation(logits, labels):
75+
with tf.variable_scope("accuracy") as scope:
76+
correct = tf.nn.in_top_k(logits, labels, 1)
77+
correct = tf.cast(correct, tf.float16)
78+
accuracy = tf.reduce_mean(correct)
79+
tf.summary.scalar(scope.name + "accuracy", accuracy)
80+
return accuracy
81+
82+
83+
def Batch_Normalization(x):
84+
"""Batch Normalization layer"""
85+
x_mean, x_var = tf.nn.moments(x, axes=[0])
86+
scale = tf.Variable(tf.ones([4096]))
87+
offset = tf.Variable(tf.zeros([4096]))
88+
variance_epsilon = 0.001
89+
BN_x = tf.nn.batch_normalization(x, x_mean, x_var, offset, scale, variance_epsilon)
90+
return BN_x
91+
92+
93+
class alexNet(object):
94+
"""alexNet model"""
95+
def __init__(self, x, keepPro=1.0, classNum=6, skip_layer=['conv5','fc6','fc7','fc8'], weights_path = 'DEFAULT'):
96+
self.X = x
97+
self.KEEPPRO = keepPro
98+
self.CLASSNUM = classNum
99+
self.SKIP_LAYER = skip_layer
100+
self.WEIGHTS_PATH = weights_path
101+
self.buildCNN()
102+
103+
#fine-tune choice
104+
if weights_path == 'DEFAULT':
105+
self.WEIGHTS_PATH = '/home/wu/TF_Project/action/bvlc_alexnet.npy'
106+
#self.WEIGHTS_PATH = '/home/wu/TF_Project/action/model_tfrecord_sample/model.ckpt-1000'
107+
else:
108+
self.WEIGHTS_PATH = weights_path
109+
110+
def load_initial_weights(self, session):
111+
weights_dict = np.load(self.WEIGHTS_PATH, encoding = 'bytes').item()
112+
for op_name in weights_dict:
113+
if op_name not in self.SKIP_LAYER:
114+
with tf.variable_scope(op_name, reuse = True):
115+
for data in weights_dict[op_name]:
116+
if len(data.shape) == 1:
117+
var = tf.get_variable('biases', trainable = False)
118+
session.run(var.assign(data))
119+
else:
120+
var = tf.get_variable('weights', trainable = False)
121+
session.run(var.assign(data))
122+
123+
def buildCNN(self):
124+
"""
125+
input 100*100*3
126+
conv1 23*23*96
127+
pool1 11*11*96
128+
conv2 11*11*256
129+
pool2 5*5*256
130+
conv3 5*5*384
131+
conv4 5*5*384
132+
conv5 5*5*256
133+
pool5 2*2*256
134+
fc1 1024->4096
135+
fc2 4096->4096
136+
fc3 4096->6
137+
"""
138+
self.conv1 = convLayer(self.X, 11, 11, 4, 4, 96, "conv1", "VALID")
139+
"""
140+
split = tf.split(self.conv1,num_or_size_splits=96,axis=3)
141+
tf.summary.image("conv1_features",split[0],10)
142+
"""
143+
lrn1 = LRN(self.conv1, 2, 2e-05, 0.75, "norm1")
144+
pool1 = maxPoolLayer(lrn1, 3, 3, 2, 2, "pool1", "VALID")
145+
146+
conv2 = convLayer(pool1, 5, 5, 1, 1, 256, "conv2", groups = 2)
147+
lrn2 = LRN(conv2, 2, 2e-05, 0.75, "lrn2")
148+
pool2 = maxPoolLayer(lrn2, 3, 3, 2, 2, "pool2", "VALID")
149+
150+
conv3 = convLayer(pool2, 3, 3, 1, 1, 384, "conv3")
151+
152+
conv4 = convLayer(conv3, 3, 3, 1, 1, 384, "conv4", groups = 2)
153+
154+
conv5 = convLayer(conv4, 3, 3, 1, 1, 256, "conv5", groups = 2)
155+
pool5 = maxPoolLayer(conv5, 3, 3, 2, 2, "pool5", "VALID")
156+
157+
fcIn = tf.reshape(pool5, [-1, 1024])
158+
fc1 = fcLayer(fcIn, 1024, 4096, True, "fc6")
159+
#dropout1 = dropout(fc1, self.KEEPPRO)
160+
dropout1 = Batch_Normalization(fc1)
161+
162+
fc2 = fcLayer(dropout1, 4096, 4096, True, "fc7")
163+
#dropout2 = dropout(fc2, self.KEEPPRO)
164+
dropout2 = Batch_Normalization(fc2)
165+
166+
self.fc3 = fcLayer(dropout2, 4096, self.CLASSNUM, True, "fc8")
167+

alexnet_feature_extractor.py

Lines changed: 87 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
Created on Sun Apr 15 19:42:31 2018
5+
6+
@author: wu
7+
"""
8+
9+
import tensorflow as tf
10+
import numpy as np
11+
import scipy.io as sio
12+
import matplotlib.pyplot as plt
13+
import tfrecord
14+
import alexnet
15+
16+
BATCH_SIZE=100
17+
IMG_W=100
18+
IMG_H=100
19+
20+
train_filename = '/home/wu/TF_Project/action/sample_TFrecord/train1.tfrecords'
21+
#dir = '/home/wu/TF_Project/action/feature_tensorboard/'
22+
dir = '/home/wu/TF_Project/action/features/'
23+
train_img, train_label = tfrecord.read_and_decode(train_filename)
24+
train_batch, train_label_batch = tf.train.shuffle_batch([train_img, train_label],
25+
batch_size = 100, num_threads=64,
26+
capacity=2000,
27+
min_after_dequeue=1000)
28+
x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, IMG_W, IMG_H, 3])
29+
30+
train_model = alexnet.alexNet(x)
31+
conv1_feature = train_model.conv1
32+
"""
33+
state = tf.Variable(0, name='counter')
34+
one = tf.constant(1)
35+
new_value = tf.add(state, one)
36+
update = tf.assign(state, new_value)
37+
"""
38+
with tf.Session() as sess:
39+
sess.run(tf.global_variables_initializer())
40+
coord = tf.train.Coordinator()
41+
threads = tf.train.start_queue_runners(coord=coord,sess=sess)
42+
43+
train_model.load_initial_weights(sess)
44+
45+
#summary_op = tf.summary.merge_all()
46+
#writer = tf.summary.FileWriter(dir,sess.graph)
47+
48+
params=tf.trainable_variables()
49+
print("Trainable variables:------------------------")
50+
for idx, v in enumerate(params):
51+
print(" param {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name))
52+
i = 0
53+
try:
54+
while not coord.should_stop():
55+
tra_images,tra_labels = sess.run([train_batch, train_label_batch])
56+
feature = sess.run(conv1_feature,feed_dict={x:tra_images})
57+
dic1 = {'feature':feature}
58+
sio.savemat(dir+str(i)+'_feature.mat',dic1)
59+
dic2 = {'label':tra_labels}
60+
sio.savemat(dir+str(i)+'_label.mat',dic2)
61+
i += 1
62+
"""
63+
#show feature map by tensorboard
64+
split = tf.split(feature,num_or_size_splits=96,axis=3)
65+
featuremap = tf.summary.image('conv1_image',split[0],40)
66+
writer.add_summary(sess.run(featuremap))
67+
writer.close()
68+
69+
#show feature map by plt
70+
"""
71+
conv1_reshape = sess.run(tf.reshape(feature[0,:,:,:], [96, 1, 23, 23]))
72+
fig1,ax1 = plt.subplots(nrows=1, ncols=1, figsize = (1,1))
73+
for i in range(1):
74+
plt.subplot(3, 4, i + 1)
75+
plt.imshow(conv1_reshape[i][0])
76+
plt.title('Conv1 featuremap')
77+
plt.show()
78+
79+
80+
#all_feature = sess.run(np.append(all_feature,feature,axis=0),feed_dict={:feature})
81+
#all_label = sess.run(np.append(all_label,tra_labels,axis=0))
82+
83+
except tf.errors.OutOfRangeError:
84+
print('Epochs Complete!')
85+
finally:
86+
coord.request_stop()
87+
coord.join(threads)

batch_normalization.py

Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
#!/usr/bin/env python3
2+
# -*- coding: utf-8 -*-
3+
"""
4+
Created on Fri Apr 27 16:17:07 2018
5+
6+
@author: wu
7+
"""
8+
9+
import tensorflow as tf
10+
11+
# 计算Wx_plus_b 的均值与方差,其中axis = [0] 表示想要标准化的维度
12+
img_shape = [128, 32, 32, 64]
13+
Wx_plus_b = tf.Variable(tf.random_normal(img_shape))
14+
axis = list(range(len(img_shape) - 1))
15+
wb_mean, wb_var = tf.nn.moments(Wx_plus_b, axis)
16+
17+
scale = tf.Variable(tf.ones([64]))
18+
offset = tf.Variable(tf.zeros([64]))
19+
variance_epsilon = 0.001
20+
Wx_plus_b = tf.nn.batch_normalization(Wx_plus_b, wb_mean, wb_var, offset, scale, variance_epsilon)
21+
22+
Wx_plus_b1 = (Wx_plus_b - wb_mean) / tf.sqrt(wb_var + variance_epsilon)
23+
Wx_plus_b1 = Wx_plus_b1 * scale + offset
24+
25+
with tf.Session() as sess:
26+
sess.run(tf.global_variables_initializer())
27+
28+
print('*** wb_mean ***')
29+
print(sess.run(wb_mean))
30+
print('*** wb_var ***')
31+
print(sess.run(wb_var))
32+
print('*** Wx_plus_b ***')
33+
print(sess.run(Wx_plus_b))
34+
print('**** Wx_plus_b1 ****')
35+
print(sess.run(Wx_plus_b1))

0 commit comments

Comments
 (0)