-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathtesting_isogr_valid.py
183 lines (169 loc) · 7.39 KB
/
testing_isogr_valid.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
import io
import sys
import numpy as np
import tensorflow as tf
slim = tf.contrib.slim
import tensorlayer as tl
import inputs as data
import c3d_biclstm as net
import time
from datetime import datetime
import threading
seq_len = 32
batch_size = 8
num_classes = 249
dataset_name = 'isogr'
model_prefix='./'
curtime = '%s' % datetime.now()
d = curtime.split(' ')[0]
t = curtime.split(' ')[1]
strtime = '%s%s%s-%s%s%s' %(d.split('-')[0],d.split('-')[1],d.split('-')[2],
t.split(':')[0],t.split(':')[1],t.split(':')[2])
x = tf.placeholder(tf.float32, [batch_size, seq_len, 112, 112, 3], name='x')
y = tf.placeholder(tf.int32, shape=[batch_size, ], name='y')
sess = tf.InteractiveSession()
_,networks = net.c3d_biclstm(x, num_classes, False, False)
network_pred = tf.nn.softmax(networks.outputs)
network_y_op = tf.argmax(tf.nn.softmax(networks.outputs),1)
network_accu = tf.reduce_mean(tf.cast(tf.equal(tf.cast(network_y_op, tf.int32), y), tf.float32))
sess.run(tf.initialize_all_variables())
# RGB
testing_datalist = './dataset_splits/valid_rgb_list.txt'
X_test,y_test = data.load_video_list(testing_datalist)
X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
rgb_prediction = np.zeros((len(y_test),num_classes), dtype=np.float32)
load_params = tl.files.load_npz(name='%s/isogr_rgb_birnn_model_epoch_10.npz'%(model_prefix))
tl.files.assign_params(sess, load_params, networks)
#networks.print_params(True)
average_accuracy = 0.0
test_iterations = 0
print '%s: rgb testing' % datetime.now()
for X_indices, y_label_t in tl.iterate.minibatches(X_teidx,
y_test,
batch_size,
shuffle=False):
# Read data for each batch
image_path = []
image_fcnt = []
image_olen = []
is_training = []
for data_a in range(batch_size):
X_index_a = X_indices[data_a]
key_str = '%06d' % X_index_a
image_path.append(X_test[key_str]['videopath'])
image_fcnt.append(X_test[key_str]['framecnt'])
image_olen.append(seq_len)
is_training.append(False) # Testing
image_info = zip(image_path,image_fcnt,image_olen,is_training)
X_data_t = tl.prepro.threading_data([_ for _ in image_info],
data.prepare_isogr_rgb_data)
feed_dict = {x: X_data_t, y: y_label_t}
dp_dict = tl.utils.dict_to_one(networks.all_drop)
feed_dict.update(dp_dict)
predict_value,accu_value = sess.run([network_pred, network_accu], feed_dict=feed_dict)
rgb_prediction[test_iterations*batch_size:(test_iterations+1)*batch_size,:]=predict_value
average_accuracy = average_accuracy + accu_value
test_iterations = test_iterations + 1
average_accuracy = average_accuracy / test_iterations
format_str = ('%s: rgb average_accuracy = %.6f')
print (format_str % (datetime.now(), average_accuracy))
# Depth
testing_datalist = './dataset_splits/valid_depth_list.txt'
X_test,y_test = data.load_video_list(testing_datalist)
X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
depth_prediction = np.zeros((len(y_test),num_classes), dtype=np.float32)
load_params = tl.files.load_npz(name='%s/isogr_depth_birnn_model_epoch_10.npz'%(model_prefix))
tl.files.assign_params(sess, load_params, networks)
#networks.print_params(True)
average_accuracy = 0.0
test_iterations = 0
print '%s: depth testing' % datetime.now()
for X_indices, y_label_t in tl.iterate.minibatches(X_teidx,
y_test,
batch_size,
shuffle=False):
# Read data for each batch
image_path = []
image_fcnt = []
image_olen = []
is_training = []
for data_a in range(batch_size):
X_index_a = X_indices[data_a]
key_str = '%06d' % X_index_a
image_path.append(X_test[key_str]['videopath'])
image_fcnt.append(X_test[key_str]['framecnt'])
image_olen.append(seq_len)
is_training.append(False) # Testing
image_info = zip(image_path,image_fcnt,image_olen,is_training)
X_data_t = tl.prepro.threading_data([_ for _ in image_info],
data.prepare_isogr_depth_data)
feed_dict = {x: X_data_t, y: y_label_t}
dp_dict = tl.utils.dict_to_one(networks.all_drop)
feed_dict.update(dp_dict)
predict_value,accu_value = sess.run([network_pred, network_accu], feed_dict=feed_dict)
depth_prediction[test_iterations*batch_size:(test_iterations+1)*batch_size,:]=predict_value
average_accuracy = average_accuracy + accu_value
test_iterations = test_iterations + 1
average_accuracy = average_accuracy / test_iterations
format_str = ('%s: depth average_accuracy = %.6f')
print (format_str % (datetime.now(), average_accuracy))
# Flow
testing_datalist = './dataset_splits/valid_flow_list.txt'
X_test,y_test = data.load_video_list(testing_datalist)
X_teidx = np.asarray(np.arange(0, len(y_test)), dtype=np.int32)
y_test = np.asarray(y_test, dtype=np.int32)
flow_prediction = np.zeros((len(y_test),num_classes), dtype=np.float32)
load_params = tl.files.load_npz(name='%s/isogr_flow_birnn_model_epoch_10.npz'%(model_prefix))
tl.files.assign_params(sess, load_params, networks)
#networks.print_params(True)
average_accuracy = 0.0
test_iterations = 0
print '%s: flow testing' % datetime.now()
for X_indices, y_label_t in tl.iterate.minibatches(X_teidx,
y_test,
batch_size,
shuffle=False):
# Read data for each batch
image_path = []
image_fcnt = []
image_olen = []
is_training = []
for data_a in range(batch_size):
X_index_a = X_indices[data_a]
key_str = '%06d' % X_index_a
image_path.append(X_test[key_str]['videopath'])
image_fcnt.append(X_test[key_str]['framecnt'])
image_olen.append(seq_len)
is_training.append(False) # Testing
image_info = zip(image_path,image_fcnt,image_olen,is_training)
X_data_t = tl.prepro.threading_data([_ for _ in image_info],
data.prepare_isogr_flow_data)
feed_dict = {x: X_data_t, y: y_label_t}
dp_dict = tl.utils.dict_to_one(networks.all_drop)
feed_dict.update(dp_dict)
predict_value,accu_value = sess.run([network_pred, network_accu], feed_dict=feed_dict)
flow_prediction[test_iterations*batch_size:(test_iterations+1)*batch_size,:]=predict_value
average_accuracy = average_accuracy + accu_value
test_iterations = test_iterations + 1
average_accuracy = average_accuracy / test_iterations
format_str = ('%s: flow average_accuracy = %.6f')
print (format_str % (datetime.now(), average_accuracy))
fusion_prediction = rgb_prediction + depth_prediction + flow_prediction
prediction_values = tf.argmax(fusion_prediction, 1)
final_accuracy = tf.reduce_mean(tf.cast(tf.equal(tf.cast(prediction_values, tf.int32), y_test), tf.float32))
print final_accuracy.eval()
video_list = './dataset_splits/valid_list.txt'
f = open(video_list, 'r')
f_lines = f.readlines()
f.close()
f = open('valid_prediction.txt', 'w')
for idx, line in enumerate(f_lines):
linetxt = '%s %s %d\n' %(line.split(' ')[0], line.split(' ')[1], prediction_values[idx].eval()+1)
f.write(linetxt)
f.close()
# In the end, close TensorFlow session.
sess.close()