diff --git a/cnn_new_gray/.gitignore b/cnn_new_gray/.gitignore new file mode 100644 index 0000000..2bcb3b1 --- /dev/null +++ b/cnn_new_gray/.gitignore @@ -0,0 +1,6 @@ +.idea +**/*.txt +**/*.png +**/*.jpg +**/model +**/*.local \ No newline at end of file diff --git a/cnn_new_gray/cnn_test1.py b/cnn_new_gray/cnn_test1.py new file mode 100644 index 0000000..aa05d00 --- /dev/null +++ b/cnn_new_gray/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 5 +h = 50 +c = 1 + +# 测试集地址 +path = 'labeledTimeFreq/test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(5, 50) + b = np.zeros((5, 50, 1)) + for i1 in range(5): + for j1 in range(50): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/cnn_train1.py b/cnn_new_gray/cnn_train1.py new file mode 100644 index 0000000..8a955bb --- /dev/null +++ b/cnn_new_gray/cnn_train1.py @@ -0,0 +1,344 @@ +# -*- coding: utf-8 -*- +from travel_path import * +from skimage import transform +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 5 +h = 50 +c = 1 + +# path = 'train_set/' +# files = travel_txt(path) + +model_path = "model/model.ckpt" + +# data = [] +# labels = [] +# for i in range(files.shape[0]): +# data_tmp = np.genfromtxt(files[i]) +# data_tmp1 = data_tmp[:60,:] +# for j in range(data_tmp1.shape[1]): +# data_tmp2 = data_tmp1[:,j] +# data_tmp2 = data_tmp2.reshape(5, 12) +# +# # b = [] +# # for ia in range(8 - 1): +# # for ja in range(data_tmp2.shape[0]): +# # if b == []: +# # b = data_tmp2 +# # b = np.row_stack((b, data_tmp2[ja, :])) +# # else: +# # b = np.row_stack((b, data_tmp2[ja, :])) +# # c = [] +# # for ia in range(8 - 1): +# # for ja in range(data_tmp2.shape[1]): +# # if c == []: +# # c = b +# # c = np.column_stack((c, b[:, ja])) +# # else: +# # c = np.column_stack((c, b[:, ja])) +# +# b = np.zeros((5, 12, 1)) +# for i1 in range(5): +# for j1 in range(12): +# b[i1, j1, 0] = data_tmp2[i1, j1] +# # b[i1, j1, 1] = data_tmp2[i1, j1] +# # b[i1, j1, 2] = data_tmp2[i1, j1] +# img = transform.resize(b,(10,24,1)) +# +# # d = np.zeros((40, 96, 3)) +# # for i1 in range(40): +# # for j1 in range(96): +# # d[i1, j1, 0] = c[i1, j1] +# # d[i1, j1, 1] = c[i1, j1] +# # d[i1, j1, 2] = c[i1, j1] +# # img = transform.resize(d, (40, 96, 3)) +# data.append(img) +# label_tmp1 = data_tmp[60,:] +# label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) +# # if data == []: +# # data = data_tmp1 +# # else: +# # data = np.column_stack((data, data_tmp1)) +# if labels == []: +# labels = label_tmp1 +# else: +# labels = np.column_stack((labels, label_tmp1)) + +data0 = np.genfromtxt("training_fr_timeFreq/training data.txt") +label = np.genfromtxt("training_fr_timeFreq/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(5,50) + b = np.zeros((5, 50, 1)) + for i1 in range(5): + for j1 in range(50): + b[i1, j1, 0] = data_tmp[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +""" +# 将所有数据分为训练集和验证集 +ratio = 7.0/9.0 +print ratio +s = np.int(num_example*ratio) +x_train = data[:s] +y_train = label[:s] +x_val = data[s:] +y_val = label[s:] +print x_train.shape +print x_val.shape """ + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [5, 5, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [5, 5, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 12 * 256 + reshaped = tf.reshape(pool2, [-1, nodes]) + + # with tf.variable_scope("layer5-conv3"): + # conv3_weights = tf.get_variable("weight", [3, 3, 128, 256], + # initializer=tf.truncated_normal_initializer(stddev=0.1)) + # # 可视化观看变量 + # tf.summary.histogram("layer5-conv3/weights", conv3_weights) + # conv3_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # # 可视化观看变量 + # tf.summary.histogram("layer5-conv3/biases", conv3_biases) + # conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + # relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + # + # with tf.name_scope("layer6-pool3"): + # pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + # nodes = 1 * 3 * 256 + # reshaped = tf.reshape(pool3, [-1, nodes]) + # + # with tf.variable_scope("layer7-conv4"): + # conv4_weights = tf.get_variable("weight", [3, 3, 128, 128], + # initializer=tf.truncated_normal_initializer(stddev=0.1)) + # # 可视化观看变量 + # tf.summary.histogram("layer7-conv4/weights", conv4_weights) + # conv4_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # # 可视化观看变量 + # tf.summary.histogram("layer7-conv4/biases", conv4_biases) + # conv4 = tf.nn.conv2d(pool3, conv4_weights, strides=[1, 1, 1, 1], padding='SAME') + # relu4 = tf.nn.relu(tf.nn.bias_add(conv4, conv4_biases)) + # + # with tf.name_scope("layer8-pool4"): + # pool4 = tf.nn.max_pool(relu4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + # # nodes = 6 * 6 * 128 + # nodes = 2 * 6 * 128 + # reshaped = tf.reshape(pool4, [-1, nodes]) + + # with tf.variable_scope('layer9-fc1'): + # fc1_weights = tf.get_variable("weight", [nodes, 1024], + # initializer=tf.truncated_normal_initializer(stddev=0.1)) + # tf.summary.histogram("layer9-fc1/weights", fc1_weights) + # if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + # fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + # # 可视化观看变量 + # tf.summary.histogram("layer9-fc1/biases", fc1_biases) + # + # fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + # if train: fc1 = tf.nn.dropout(fc1, 0.5) + # + # with tf.variable_scope('layer10-fc2'): + # fc2_weights = tf.get_variable("weight", [1024, 512], + # initializer=tf.truncated_normal_initializer(stddev=0.1)) + # tf.summary.histogram("layer10-fc2/weights", fc2_weights) + # if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + # fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + # tf.summary.histogram("layer10-fc2/biases", fc2_biases) + # + # fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + # if train: fc2 = tf.nn.dropout(fc2, 0.5) + # + # with tf.variable_scope('layer11-fc3'): + # fc3_weights = tf.get_variable("weight", [512, 4], + # initializer=tf.truncated_normal_initializer(stddev=0.1)) + # tf.summary.histogram("layer11-fc3/weights", fc3_weights) + # if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + # fc3_biases = tf.get_variable("bias", [4], initializer=tf.constant_initializer(0.1)) + # tf.summary.histogram("layer11-fc3/biases", fc3_biases) + # logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + with tf.variable_scope('layer5-fc1'): + fc2_weights = tf.get_variable("weight", [nodes, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer5-fc1/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer5-fc1/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(reshaped, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer6-fc2'): + fc3_weights = tf.get_variable("weight", [512, 4], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer6-fc2/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [4], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer6-fc2/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 + +n_epoch = 500 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +""" #validation + if n_epoch % 10 == 0: + val_loss, val_acc, n_batch = 0, 0, 0 + for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False): + err, ac = sess.run([loss,acc], feed_dict={x: x_val_a, y_: y_val_a}) + val_loss += err; val_acc += ac; n_batch += 1 + print "validation loss: %f" % (np.sum(val_loss)/ n_batch) + print "validation acc: %f" % (np.sum(val_acc)/ n_batch) """ +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/getTimeFreq.py b/cnn_new_gray/getTimeFreq.py new file mode 100644 index 0000000..023813c --- /dev/null +++ b/cnn_new_gray/getTimeFreq.py @@ -0,0 +1,65 @@ +from travel_path import * + +path = 'labeledTimeFreq/train_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(250,) + + # b = [] + # for ia in range(8 - 1): + # for ja in range(data_tmp2.shape[0]): + # if b == []: + # b = data_tmp2 + # b = np.row_stack((b, data_tmp2[ja, :])) + # else: + # b = np.row_stack((b, data_tmp2[ja, :])) + # c = [] + # for ia in range(8 - 1): + # for ja in range(data_tmp2.shape[1]): + # if c == []: + # c = b + # c = np.column_stack((c, b[:, ja])) + # else: + # c = np.column_stack((c, b[:, ja])) + + # b = np.zeros((5, 50, 1)) + # for i1 in range(5): + # for j1 in range(50): + # b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + # img = b + + # d = np.zeros((40, 96, 3)) + # for i1 in range(40): + # for j1 in range(96): + # d[i1, j1, 0] = c[i1, j1] + # d[i1, j1, 1] = c[i1, j1] + # d[i1, j1, 2] = c[i1, j1] + # img = transform.resize(d, (40, 96, 3)) + data.append(data_tmp2) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) +data = np.array(data) +labels = np.array(labels) +labels = labels.reshape(labels.shape[0]*labels.shape[1],) +print(data.shape) +print(labels.shape) +np.savetxt(u'training_fr_timeFreq/training data.txt', data) +np.savetxt(u'training_fr_timeFreq/labels.txt', labels) \ No newline at end of file diff --git a/cnn_new_gray/labeledTimeFreq/seg.py b/cnn_new_gray/labeledTimeFreq/seg.py new file mode 100644 index 0000000..b11d251 --- /dev/null +++ b/cnn_new_gray/labeledTimeFreq/seg.py @@ -0,0 +1,15 @@ +import numpy as np + +data = np.genfromtxt('Labeled Freq Feature of Point at 0.1km.txt') +print(data.shape) + +data_former = data[:,:864] +data_latter = data[:,864:] + +print(data_former.shape) +print(data_latter.shape) +print(data_former) +print(data_latter) + +np.savetxt('Labeled Freq Feature of Point at 0.1km Former.txt',data_former) +np.savetxt('Labeled Freq Feature of Point at 0.1km Latter.txt',data_latter) \ No newline at end of file diff --git a/cnn_new_gray/new1-1/cnn_test1.py b/cnn_new_gray/new1-1/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/new1-1/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/new1-1/cnn_train1.py b/cnn_new_gray/new1-1/cnn_train1.py new file mode 100644 index 0000000..e7f4dbd --- /dev/null +++ b/cnn_new_gray/new1-1/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq/training data.txt") +label = np.genfromtxt("training_fr_timeFreq/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [3, 3, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [3, 3, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [2, 2, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 120 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/new1-1/training_fr_timeFreq/read.py b/cnn_new_gray/new1-1/training_fr_timeFreq/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/new1-1/training_fr_timeFreq/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/new1-2/cnn_test1.py b/cnn_new_gray/new1-2/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/new1-2/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/new1-2/cnn_train1.py b/cnn_new_gray/new1-2/cnn_train1.py new file mode 100644 index 0000000..7ba146c --- /dev/null +++ b/cnn_new_gray/new1-2/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq_new/training data.txt") +label = np.genfromtxt("training_fr_timeFreq_new/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [3, 3, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [3, 3, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [2, 2, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 60 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/new1-2/training_fr_timeFreq_new/read.py b/cnn_new_gray/new1-2/training_fr_timeFreq_new/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/new1-2/training_fr_timeFreq_new/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/new1-3/cnn_test1.py b/cnn_new_gray/new1-3/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/new1-3/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/new1-3/cnn_train1.py b/cnn_new_gray/new1-3/cnn_train1.py new file mode 100644 index 0000000..e212166 --- /dev/null +++ b/cnn_new_gray/new1-3/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq_new_1/training data.txt") +label = np.genfromtxt("training_fr_timeFreq_new_1/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [3, 3, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [3, 3, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [2, 2, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 90 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/new1-3/training_fr_timeFreq_new_1/read.py b/cnn_new_gray/new1-3/training_fr_timeFreq_new_1/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/new1-3/training_fr_timeFreq_new_1/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/plot.py b/cnn_new_gray/plot.py new file mode 100644 index 0000000..b130a3f --- /dev/null +++ b/cnn_new_gray/plot.py @@ -0,0 +1,58 @@ +import numpy as np +import pylab as pl + +pl.figure(figsize=(11,9)) + +data = np.genfromtxt("loss_acc/acc in every step training model.txt") + +pl.subplot(221) +pl.plot(data, color='blue') +pl.xlabel('Epoch') +pl.ylabel('Training Accuracy Rate') +pl.title('Training Accuracy Rate Varies with Training Epochs\n' + + '(Model3, Ratio of Normal and Abnormal is 21:1 in Training Set)') + +data = np.genfromtxt("loss_acc/loss in every step training model.txt") + +pl.subplot(222) +pl.plot(data, color='blue') +pl.xlabel('Epoch') +pl.ylabel('Loss') +pl.title('Loss Varies with Training Epochs\n' + + '(Model3, Ratio of Normal and Abnormal is 21:1 in Training Set)') + +data = np.genfromtxt("test_acc/count.txt") +data_abnormal = np.genfromtxt("test_acc/count_abnormal.txt") + +pl.subplot(223) +pl.plot(data/5184.0, color='k', linewidth=0.7, label='Accuracy in All') +pl.plot(data_abnormal/192.0, color='r', linewidth=0.7, label='Accuracy in Abnormal') +pl.plot((data-data_abnormal)/4992.0, color='b', linewidth=0.7, label='Accuracy in Normal') +pl.xlabel('Epoch') +pl.ylabel('Test Accuracy Rate') +pl.title('Test Accuracy Rate Varies with Training Epochs\n' + + '(Model3, Ratio of Normal and Abnormal is 21:1 in Training Set)') +pl.legend() + +data = np.genfromtxt("test_acc/count_abnormal.txt") +data_new = [] +for i in range(data.shape[0]-5+1): + data_tmp = data[i:i+5] + data_mean = np.mean(data_tmp,axis=0) + data_mean = data_mean/192.0 + if data_new == []: + data_new = np.array([[data_mean]]) + else: + data_new = np.row_stack((data_new, data_mean)) +epochs = np.arange(1,data_new.shape[0]+1) + +pl.subplot(224) +pl.plot(epochs,data_new,color='r') +pl.title("Mean of abnormal-testing accuracy rates of" + "\n" + + "5 classifiers having neighboring training epochs") +pl.xlabel("Training Epochs of 1st Classifier") +pl.ylabel("Abnormal-Testing Accuracy Rates Mean") + +pl.tight_layout() + +pl.savefig(u'fig.png') diff --git a/cnn_new_gray/resize/new/cnn_test1.py b/cnn_new_gray/resize/new/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/resize/new/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/resize/new/cnn_train1.py b/cnn_new_gray/resize/new/cnn_train1.py new file mode 100644 index 0000000..b85f67b --- /dev/null +++ b/cnn_new_gray/resize/new/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq/training data.txt") +label = np.genfromtxt("training_fr_timeFreq/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [5, 5, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [5, 5, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [3, 3, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 100 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/resize/new/training_fr_timeFreq/read.py b/cnn_new_gray/resize/new/training_fr_timeFreq/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/resize/new/training_fr_timeFreq/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/resize/new1/cnn_test1.py b/cnn_new_gray/resize/new1/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/resize/new1/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/resize/new1/cnn_train1.py b/cnn_new_gray/resize/new1/cnn_train1.py new file mode 100644 index 0000000..61842f1 --- /dev/null +++ b/cnn_new_gray/resize/new1/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq_new/training data.txt") +label = np.genfromtxt("training_fr_timeFreq_new/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [5, 5, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [5, 5, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [3, 3, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 500 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/resize/new1/training_fr_timeFreq_new/read.py b/cnn_new_gray/resize/new1/training_fr_timeFreq_new/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/resize/new1/training_fr_timeFreq_new/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/resize/new2/cnn_test1.py b/cnn_new_gray/resize/new2/cnn_test1.py new file mode 100644 index 0000000..45fec26 --- /dev/null +++ b/cnn_new_gray/resize/new2/cnn_test1.py @@ -0,0 +1,131 @@ +# -*- coding: utf-8 -*- +from skimage import io,transform +import tensorflow as tf +from travel_path import * +import sys + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +signal_dict = {0:'正常无风舞信号',1:'异常有风舞信号'} + +w = 10 +h = 25 +c = 1 + +# 测试集地址 +path = 'test_set/' +files = travel_txt(path) + +data = [] +labels = [] +for i in range(files.shape[0]): + data_tmp = np.genfromtxt(files[i]) + data_tmp1 = data_tmp[:250,:] + for j in range(data_tmp1.shape[1]): + data_tmp2 = data_tmp1[:,j] + data_tmp2 = data_tmp2.reshape(10, 25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp2[i1, j1] + # b[i1, j1, 1] = data_tmp2[i1, j1] + # b[i1, j1, 2] = data_tmp2[i1, j1] + # img = transform.resize(b,(10,24,1)) + img = b + data.append(img) + label_tmp1 = data_tmp[250,:] + label_tmp1 = label_tmp1.reshape(1,label_tmp1.shape[0]) + # if data == []: + # data = data_tmp1 + # else: + # data = np.column_stack((data, data_tmp1)) + if labels == []: + labels = label_tmp1 + else: + labels = np.column_stack((labels, label_tmp1)) + +data = np.asarray(data,np.float32) +label = labels.reshape(labels.shape[0]*labels.shape[1],) +# print(data.shape) +# print(label.shape) + +with tf.Session() as sess: + saver = tf.train.import_meta_graph('model/model.ckpt-' + sys.argv[1] + '.meta') + saver.restore(sess,tf.train.latest_checkpoint('model/')) + + graph = tf.get_default_graph() + x = graph.get_tensor_by_name("x:0") + feed_dict = {x:data} + + logits = graph.get_tensor_by_name("logits_eval:0") + + classification_result = sess.run(logits,feed_dict) + + #打印出预测矩阵 + # print(classification_result) + #打印出预测矩阵每一行最大值的索引 + # print(tf.argmax(classification_result,1).eval()) + #根据索引通过字典对应花的分类 + output = [] + output = tf.argmax(classification_result,1).eval() + count = 0 + count_abnormal = 0 + total_abnormal = 0 + for i in range(len(output)): + # print("第",i+1,"帧信号识别:"+signal_dict[output[i]]) + if label[i] == 1: + total_abnormal = total_abnormal + 1 + if output[i] == label[i]: + count = count + 1 + if label[i] == 1: + count_abnormal = count_abnormal + 1 + print(count_abnormal) + print(total_abnormal) + print(count) + # print("信号识别准确率:",count/label.shape[0]) + # print("异常信号识别准确率:",count_abnormal/total_abnormal) + # 记录下每一步的损失函数和训练之后的准确率 + # if os.path.exists('test_acc/test acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count/label.shape[0]]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # else: + # data = np.array([count/label.shape[0]],dtype=np.float64) + # np.savetxt('test_acc/test acc in every step training model.txt', data) + # if os.path.exists('test_acc/test abnormal acc in every step training model.txt'): + # data_tmp = np.genfromtxt('test_acc/test abnormal acc in every step training model.txt') + # data_tmp = np.array([data_tmp]) + # if data_tmp.shape != (1,): + # data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + # data_i = np.array([[count_abnormal/total_abnormal]],dtype=np.float64) + # data = np.row_stack((data_tmp, data_i)) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + # else: + # data = np.array([count_abnormal/total_abnormal],dtype=np.float64) + # np.savetxt('test_acc/test abnormal acc in every step training model.txt', data) + if os.path.exists('test_acc/count.txt'): + data_tmp = np.genfromtxt('test_acc/count.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count.txt', data) + else: + data = np.array([count]) + np.savetxt('test_acc/count.txt', data) + if os.path.exists('test_acc/count_abnormal.txt'): + data_tmp = np.genfromtxt('test_acc/count_abnormal.txt') + data_tmp = np.array([data_tmp]) + if data_tmp.shape != (1,): + data_tmp = data_tmp.reshape(data_tmp.shape[0]*data_tmp.shape[1],1) + data_i = np.array([[count_abnormal]]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('test_acc/count_abnormal.txt', data) + else: + data = np.array([count_abnormal]) + np.savetxt('test_acc/count_abnormal.txt', data) diff --git a/cnn_new_gray/resize/new2/cnn_train1.py b/cnn_new_gray/resize/new2/cnn_train1.py new file mode 100644 index 0000000..e456162 --- /dev/null +++ b/cnn_new_gray/resize/new2/cnn_train1.py @@ -0,0 +1,222 @@ +# -*- coding: utf-8 -*- +from travel_path import * +import tensorflow as tf +import time +from subprocess import call + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +w = 10 +h = 25 +c = 1 + +model_path = "model/model.ckpt" + +data0 = np.genfromtxt("training_fr_timeFreq_new_1/training data.txt") +label = np.genfromtxt("training_fr_timeFreq_new_1/labels.txt") +data = [] +for i in range(data0.shape[0]): + data_tmp = data0[i,:] + data_tmp = data_tmp.reshape(10,25) + b = np.zeros((10, 25, 1)) + for i1 in range(10): + for j1 in range(25): + b[i1, j1, 0] = data_tmp[i1, j1] + img = b + data.append(img) +data = np.asarray(data,np.float32) +print(data.shape) +print(label.shape) + +# 打乱顺序 +num_example = data.shape[0] +arr = np.arange(num_example) +np.random.shuffle(arr) +data = data[arr] +label = label[arr] + +x_train = data +y_train = label + +#-----------------构建网络---------------------- +#占位符 +x = tf.placeholder(tf.float32,shape = [None,w,h,c],name='x') +y_ = tf.placeholder(tf.int32,shape = [None,],name='y_') + +def inference(input_tensor, train, regularizer): + with tf.variable_scope('layer1-conv1'): + conv1_weights = tf.get_variable("weight", [5, 5, 1, 128], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/weights",conv1_weights) + conv1_biases = tf.get_variable("bias", [128], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer1-conv1/biases", conv1_biases) + conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding='SAME') + relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases)) + + with tf.name_scope("layer2-pool1"): + pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID") + + with tf.variable_scope("layer3-conv2"): + conv2_weights = tf.get_variable("weight", [5, 5, 128, 256], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/weights", conv2_weights) + conv2_biases = tf.get_variable("bias", [256], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer3-conv2/biases", conv2_biases) + conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding='SAME') + relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases)) + + with tf.name_scope("layer4-pool2"): + pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + + with tf.variable_scope("layer5-conv3"): + conv3_weights = tf.get_variable("weight", [3, 3, 256, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/weights", conv3_weights) + conv3_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.0)) + # 可视化观看变量 + tf.summary.histogram("layer5-conv3/biases", conv3_biases) + conv3 = tf.nn.conv2d(pool2, conv3_weights, strides=[1, 1, 1, 1], padding='SAME') + relu3 = tf.nn.relu(tf.nn.bias_add(conv3, conv3_biases)) + + with tf.name_scope("layer6-pool3"): + pool3 = tf.nn.max_pool(relu3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='VALID') + nodes = 1 * 3 * 512 + reshaped = tf.reshape(pool3, [-1, nodes]) + + with tf.variable_scope('layer7-fc1'): + fc1_weights = tf.get_variable("weight", [nodes, 1024], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer7-fc1/weights", fc1_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc1_weights)) + fc1_biases = tf.get_variable("bias", [1024], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer7-fc1/biases", fc1_biases) + + fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases) + if train: fc1 = tf.nn.dropout(fc1, 0.5) + + with tf.variable_scope('layer8-fc2'): + fc2_weights = tf.get_variable("weight", [1024, 512], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer8-fc2/weights", fc2_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc2_weights)) + fc2_biases = tf.get_variable("bias", [512], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer8-fc2/biases", fc2_biases) + + fc2 = tf.nn.relu(tf.matmul(fc1, fc2_weights) + fc2_biases) + if train: fc2 = tf.nn.dropout(fc2, 0.5) + + with tf.variable_scope('layer9-fc3'): + fc3_weights = tf.get_variable("weight", [512, 2], + initializer=tf.truncated_normal_initializer(stddev=0.1)) + tf.summary.histogram("layer9-fc3/weights", fc3_weights) + if regularizer != None: tf.add_to_collection('losses', regularizer(fc3_weights)) + fc3_biases = tf.get_variable("bias", [2], initializer=tf.constant_initializer(0.1)) + tf.summary.histogram("layer9-fc3/biases", fc3_biases) + logit = tf.matmul(fc2, fc3_weights) + fc3_biases + + return logit + +#---------------------------网络结束--------------------------- +regularizer = tf.contrib.layers.l2_regularizer(0.0001) +logits = inference(x,False,regularizer) + +#(小处理)将logits乘以1赋值给logits_eval,定义name,方便在后续调用模型时通过tensor名字调用输出tensor +b = tf.constant(value=1,dtype=tf.float32) +logits_eval = tf.multiply(logits,b,name='logits_eval') + +loss = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_) +train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) +correct_prediction = tf.equal(tf.cast(tf.argmax(logits,1),tf.int32), y_) +acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) + +#定义一个函数,按批次取数据 +def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False): + assert len(inputs) == len(targets) + if shuffle: + indices = np.arange(len(inputs)) + np.random.shuffle(indices) + for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): + if shuffle: + excerpt = indices[start_idx:start_idx + batch_size] + else: + excerpt = slice(start_idx, start_idx + batch_size) + yield inputs[excerpt], targets[excerpt] + +#训练和测试数据,可将n_epoch设置更大一些 +n_epoch = 100 +batch_size = 64 +saver = tf.train.Saver() +sess = tf.Session() +# 合并到Summary中 +merged = tf.summary.merge_all() +# 选定可视化存储目录 +writer = tf.summary.FileWriter("graph/", sess.graph) +sess.run(tf.global_variables_initializer()) + +initial_step = 0 + +# 验证之前是否已经保存了检查点文件 +ckpt = tf.train.get_checkpoint_state('model/') +if ckpt and ckpt.model_checkpoint_path: + saver.restore(sess, ckpt.model_checkpoint_path) + initial_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]) + print(initial_step) + initial_step = initial_step + 1 + +for epoch in range(initial_step, n_epoch): + start_time = time.time() + + print("initial_step: ", initial_step) + + #training + train_loss, train_acc, n_batch = 0, 0, 0 + for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True): + _,err,ac = sess.run([train_op,loss,acc], feed_dict={x: x_train_a, y_: y_train_a}) + train_loss += err; train_acc += ac; n_batch += 1 + print("train loss: %f" % (np.sum(train_loss)/n_batch)) + print("train acc: %f" % (np.sum(train_acc)/n_batch)) + tf.summary.scalar('train loss', np.sum(train_loss)/n_batch) + tf.summary.scalar('train accuracy', np.sum(train_acc)/n_batch) + + # 记录下每一步的损失函数和训练之后的准确率 + if os.path.exists('loss_acc/loss in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/loss in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_loss)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/loss in every step training model.txt', data) + else: + data = np.array([(np.sum(train_loss)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/loss in every step training model.txt', data) + if os.path.exists('loss_acc/acc in every step training model.txt'): + data_tmp = np.genfromtxt('loss_acc/acc in every step training model.txt') + if epoch > 1: + print(data_tmp.shape) + data_tmp = data_tmp.reshape(data_tmp.shape[0],1) + data_i = np.array([(np.sum(train_acc)/n_batch)]) + data = np.row_stack((data_tmp, data_i)) + np.savetxt('loss_acc/acc in every step training model.txt', data) + else: + data = np.array([(np.sum(train_acc)/n_batch)]) + print(data.shape) + np.savetxt('loss_acc/acc in every step training model.txt', data) + + if (epoch+1)%5 == 0: + result = sess.run(merged,feed_dict={x: x_train_a, y_: y_train_a}) + writer.add_summary(result, epoch+1) + + saver.save(sess, model_path, global_step=epoch) + call("python cnn_test1.py " + str(epoch), shell=True) + +saver.save(sess, model_path, global_step=n_epoch-1) +sess.close() diff --git a/cnn_new_gray/resize/new2/plot.py b/cnn_new_gray/resize/new2/plot.py new file mode 100644 index 0000000..b22469c --- /dev/null +++ b/cnn_new_gray/resize/new2/plot.py @@ -0,0 +1,60 @@ +import numpy as np +import pylab as pl + +pl.figure(figsize=(11,9)) + +data = np.genfromtxt("loss_acc/acc in every step training model.txt") +data = data[:490] + +pl.subplot(221) +pl.plot(data, color='blue') +pl.xlabel('Epoch') +pl.ylabel('Training Accuracy Rate') +pl.title('Training Accuracy Rate Varies with Training Epochs') + +data = np.genfromtxt("loss_acc/loss in every step training model.txt") +data = data[:490] + +pl.subplot(222) +pl.plot(data, color='blue') +pl.xlabel('Epoch') +pl.ylabel('Loss') +pl.title('Loss Varies with Training Epochs') + +data = np.genfromtxt("test_acc/count.txt") +data = data[:490] +data_abnormal = np.genfromtxt("test_acc/count_abnormal.txt") +data_abnormal = data_abnormal[:490] + +pl.subplot(223) +pl.plot(data/5184.0, color='k', linewidth=0.7, label='Accuracy in All') +pl.plot(data_abnormal/192.0, color='r', linewidth=0.7, label='Accuracy in Abnormal') +pl.plot((data-data_abnormal)/4992.0, color='b', linewidth=0.7, label='Accuracy in Normal') +pl.xlabel('Epoch') +pl.ylabel('Testing Accuracy Rate') +pl.title('Test Accuracy Rate Varies with Training Epochs') +pl.legend() + +data = np.genfromtxt("test_acc/count_abnormal.txt") +data = data[:490] +data_new = [] +for i in range(data.shape[0]-5+1): + data_tmp = data[i:i+5] + data_mean = np.mean(data_tmp,axis=0) + data_mean = data_mean/192.0 + if data_new == []: + data_new = np.array([[data_mean]]) + else: + data_new = np.row_stack((data_new, data_mean)) +epochs = np.arange(1,data_new.shape[0]+1) + +pl.subplot(224) +pl.plot(epochs,data_new,color='r') +pl.title("Mean of abnormal-testing accuracy rates of" + "\n" + + "5 classifiers having neighboring training epochs") +pl.xlabel("Training Epochs of 1st Classifier") +pl.ylabel("Abnormal-Testing Accuracy Rates Mean") + +pl.tight_layout() + +pl.savefig(u'fig.png') diff --git a/cnn_new_gray/resize/new2/training_fr_timeFreq_new_1/read.py b/cnn_new_gray/resize/new2/training_fr_timeFreq_new_1/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/resize/new2/training_fr_timeFreq_new_1/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/training_fr_timeFreq/read.py b/cnn_new_gray/training_fr_timeFreq/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/cnn_new_gray/training_fr_timeFreq/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/cnn_new_gray/travel_path.py b/cnn_new_gray/travel_path.py new file mode 100644 index 0000000..f3f09bf --- /dev/null +++ b/cnn_new_gray/travel_path.py @@ -0,0 +1,21 @@ +import glob +import os +import numpy as np + +def travel_txt(path): + files = [] + for file in glob.glob(path+'/*.txt'): + files.append(file) + return np.asarray(files) + +def travel_txt_in_path(path): + cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)] + files = [] + for idx,folder in enumerate(cate): + for file in glob.glob(folder+'/*.txt'): + files.append(file) + return np.asarray(files) + +def list_files(files): + for i in range(files.shape[0]): + print(files[i]) diff --git a/cnn_new_gray/try.py b/cnn_new_gray/try.py new file mode 100644 index 0000000..1b8104d --- /dev/null +++ b/cnn_new_gray/try.py @@ -0,0 +1,186 @@ +# setup-only-ignore +import tensorflow as tf + +sess = tf.InteractiveSession() + +import glob + +image_filenames = glob.glob("./imagenet-dogs/n02*/*.jpg") + +from itertools import groupby +from collections import defaultdict + +training_dataset = defaultdict(list) +testing_dataset = defaultdict(list) + +# Split up the filename into its breed and corresponding filename. The breed is found by taking the directory name +image_filename_with_breed = map(lambda filename: (filename.split("/")[2], filename), image_filenames) + +# Group each image by the breed which is the 0th element in the tuple returned above +for dog_breed, breed_images in groupby(image_filename_with_breed, lambda x: x[0]): + # Enumerate each breed's image and send ~20% of the images to a testing set + for i, breed_image in enumerate(breed_images): + if i % 5 == 0: + testing_dataset[dog_breed].append(breed_image[1]) + else: + training_dataset[dog_breed].append(breed_image[1]) + + # Check that each breed includes at least 18% of the images for testing + breed_training_count = len(training_dataset[dog_breed]) + breed_testing_count = len(testing_dataset[dog_breed]) + + assert round(breed_testing_count / (breed_training_count + breed_testing_count), 2) > 0.18, "Not enough testing images." + +def write_records_file(dataset, record_location): + """ + Fill a TFRecords file with the images found in `dataset` and include their category. + + Parameters + ---------- + dataset : dict(list) + Dictionary with each key being a label for the list of image filenames of its value. + record_location : str + Location to store the TFRecord output. + """ + writer = None + + # Enumerating the dataset because the current index is used to breakup the files if they get over 100 + # images to avoid a slowdown in writing. + current_index = 0 + for breed, images_filenames in dataset.items(): + for image_filename in images_filenames: + if current_index % 100 == 0: + if writer: + writer.close() + + record_filename = "{record_location}-{current_index}.tfrecords".format( + record_location=record_location, + current_index=current_index) + + writer = tf.python_io.TFRecordWriter(record_filename) + current_index += 1 + + image_file = tf.read_file(image_filename) + + # In ImageNet dogs, there are a few images which TensorFlow doesn't recognize as JPEGs. This + # try/catch will ignore those images. + try: + image = tf.image.decode_jpeg(image_file) + except: + print(image_filename) + continue + + # Converting to grayscale saves processing and memory but isn't required. + grayscale_image = tf.image.rgb_to_grayscale(image) + resized_image = tf.image.resize_images(grayscale_image, 250, 151) + + # tf.cast is used here because the resized images are floats but haven't been converted into + # image floats where an RGB value is between [0,1). + image_bytes = sess.run(tf.cast(resized_image, tf.uint8)).tobytes() + + # Instead of using the label as a string, it'd be more efficient to turn it into either an + # integer index or a one-hot encoded rank one tensor. + # https://en.wikipedia.org/wiki/One-hot + image_label = breed.encode("utf-8") + + example = tf.train.Example(features=tf.train.Features(feature={ + 'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_label])), + 'image': tf.train.Feature(bytes_list=tf.train.BytesList(value=[image_bytes])) + })) + + writer.write(example.SerializeToString()) + writer.close() + +write_records_file(testing_dataset, "./output/testing-images/testing-image") +write_records_file(training_dataset, "./output/training-images/training-image") + +filename_queue = tf.train.string_input_producer( + tf.train.match_filenames_once("./output/training-images/*.tfrecords")) +reader = tf.TFRecordReader() +_, serialized = reader.read(filename_queue) + +features = tf.parse_single_example( + serialized, + features={ + 'label': tf.FixedLenFeature([], tf.string), + 'image': tf.FixedLenFeature([], tf.string), + }) + +record_image = tf.decode_raw(features['image'], tf.uint8) + +# Changing the image into this shape helps train and visualize the output by converting it to +# be organized like an image. +image = tf.reshape(record_image, [250, 151, 1]) + +label = tf.cast(features['label'], tf.string) + +min_after_dequeue = 10 +batch_size = 3 +capacity = min_after_dequeue + 3 * batch_size +image_batch, label_batch = tf.train.shuffle_batch( + [image, label], batch_size=batch_size, capacity=capacity, min_after_dequeue=min_after_dequeue) + +# Converting the images to a float of [0,1) to match the expected input to convolution2d +float_image_batch = tf.image.convert_image_dtype(image_batch, tf.float32) + +conv2d_layer_one = tf.contrib.layers.convolution2d( + float_image_batch, + num_output_channels=32, # The number of filters to generate + kernel_size=(5,5), # It's only the filter height and width. + activation_fn=tf.nn.relu, + weight_init=tf.random_normal, + stride=(2, 2), + trainable=True) +pool_layer_one = tf.nn.max_pool(conv2d_layer_one, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME') + +# Note, the first and last dimension of the convolution output hasn't changed but the +# middle two dimensions have. +conv2d_layer_one.get_shape(), pool_layer_one.get_shape() + +conv2d_layer_two = tf.contrib.layers.convolution2d( + pool_layer_one, + num_output_channels=64, # More output channels means an increase in the number of filters + kernel_size=(5,5), + activation_fn=tf.nn.relu, + weight_init=tf.random_normal, + stride=(1, 1), + trainable=True) + +pool_layer_two = tf.nn.max_pool(conv2d_layer_two, + ksize=[1, 2, 2, 1], + strides=[1, 2, 2, 1], + padding='SAME') + +conv2d_layer_two.get_shape(), pool_layer_two.get_shape() + +flattened_layer_two = tf.reshape( + pool_layer_two, + [ + batch_size, # Each image in the image_batch + -1 # Every other dimension of the input + ]) + +flattened_layer_two.get_shape() + +# The weight_init parameter can also accept a callable, a lambda is used here returning a truncated normal +# with a stddev specified. +hidden_layer_three = tf.contrib.layers.fully_connected( + flattened_layer_two, + 512, + weight_init=lambda i, dtype: tf.truncated_normal([38912, 512], stddev=0.1), + activation_fn=tf.nn.relu +) + +# Dropout some of the neurons, reducing their importance in the model +hidden_layer_three = tf.nn.dropout(hidden_layer_three, 0.1) + +# The output of this are all the connections between the previous layers and the 120 different dog breeds +# available to train on. +final_fully_connected = tf.contrib.layers.fully_connected( + hidden_layer_three, + 120, # Number of dog breeds in the ImageNet Dogs dataset + weight_init=lambda i, dtype: tf.truncated_normal([512, 120], stddev=0.1) +) \ No newline at end of file diff --git a/rnn_new_gray/.gitignore b/rnn_new_gray/.gitignore new file mode 100644 index 0000000..97ad6be --- /dev/null +++ b/rnn_new_gray/.gitignore @@ -0,0 +1,5 @@ +.idea +**/*.txt +**/*.png +**/*.jpg +**/*.local diff --git a/rnn_new_gray/SequenceLabellingModel.py b/rnn_new_gray/SequenceLabellingModel.py new file mode 100755 index 0000000..8872367 --- /dev/null +++ b/rnn_new_gray/SequenceLabellingModel.py @@ -0,0 +1,80 @@ +import tensorflow as tf + +from helpers import lazy_property + + +class SequenceLabellingModel: + + def __init__(self, data, target, params): + self.data = data + self.target = target + self.params = params + self.prediction + self.cost + self.error + self.optimize + + @lazy_property + def length(self): + used = tf.sign(tf.reduce_max(tf.abs(self.data), reduction_indices=2)) + length = tf.reduce_sum(used, reduction_indices=1) + length = tf.cast(length, tf.int32) + return length + + @lazy_property + def prediction(self): + output, _ = tf.nn.dynamic_rnn( + tf.nn.rnn_cell.GRUCell(self.params.rnn_hidden), + self.data, + dtype=tf.float32, + sequence_length=self.length, + ) + # Softmax layer. + max_length = int(self.target.get_shape()[1]) + num_classes = int(self.target.get_shape()[2]) + weight = tf.Variable(tf.truncated_normal( + [self.params.rnn_hidden, num_classes], stddev=0.01)) + bias = tf.Variable(tf.constant(0.1, shape=[num_classes])) + # Flatten to apply same weights to all time steps. + output = tf.reshape(output, [-1, self.params.rnn_hidden]) + prediction = tf.nn.softmax(tf.matmul(output, weight) + bias) + prediction = tf.reshape(prediction, [-1, max_length, num_classes]) + return prediction + + @lazy_property + def cost(self): + # Compute cross entropy for each frame. + cross_entropy = self.target * tf.log(self.prediction) + cross_entropy = -tf.reduce_sum(cross_entropy, reduction_indices=2) + mask = tf.sign(tf.reduce_max(tf.abs(self.target), reduction_indices=2)) + cross_entropy *= mask + # Average over actual sequence lengths. + cross_entropy = tf.reduce_sum(cross_entropy, reduction_indices=1) + cross_entropy /= tf.cast(self.length, tf.float32) + return tf.reduce_mean(cross_entropy) + + @lazy_property + def error(self): + mistakes = tf.not_equal( + tf.argmax(self.target, 2), tf.argmax(self.prediction, 2)) + mistakes = tf.cast(mistakes, tf.float32) + mask = tf.sign(tf.reduce_max(tf.abs(self.target), reduction_indices=2)) + mistakes *= mask + # Average over actual sequence lengths. + mistakes = tf.reduce_sum(mistakes, reduction_indices=1) + mistakes /= tf.cast(self.length, tf.float32) + return tf.reduce_mean(mistakes) + + @lazy_property + def optimize(self): + gradient = self.params.optimizer.compute_gradients(self.cost) + try: + limit = self.params.gradient_clipping + gradient = [ + (tf.clip_by_value(g, -limit, limit), v) + if g is not None else (None, v) + for g, v in gradient] + except AttributeError: + print('No gradient clipping parameter specified.') + optimize = self.params.optimizer.apply_gradients(gradient) + return optimize diff --git a/rnn_new_gray/SignalDataSet.py b/rnn_new_gray/SignalDataSet.py new file mode 100644 index 0000000..d0be409 --- /dev/null +++ b/rnn_new_gray/SignalDataSet.py @@ -0,0 +1,33 @@ +from travel_path import * + +def getDataLabel(path): + files = travel_txt(path) + data = [] + label = [] + for i in range(files.shape[0]): + data.append([]) + label.append([]) + data_tmp = np.genfromtxt(files[i]) + count = 0 + for j in range(data_tmp.shape[1]): + data_tmp1 = data_tmp[:250, j] + data_tmp1 = data_tmp1.reshape(5, 50) + label_tmp1 = data_tmp[250:, j] + if count == 12: + count = 0 + data.append([]) + label.append([]) + data[-1].append(data_tmp1) + label[-1].append(label_tmp1) + count = count + 1 + data = np.array(data) + label = np.array(label) + # print(data.shape) + # print(label.shape) + return data, label + +# validation +# path = 'train_set/' +# data, label = getDataLabel(path) +# print(data.shape) +# print(label.shape) diff --git a/rnn_new_gray/__pycache__/SequenceLabellingModel.cpython-36.pyc b/rnn_new_gray/__pycache__/SequenceLabellingModel.cpython-36.pyc new file mode 100644 index 0000000..6f09e3c Binary files /dev/null and b/rnn_new_gray/__pycache__/SequenceLabellingModel.cpython-36.pyc differ diff --git a/rnn_new_gray/__pycache__/SignalDataSet.cpython-36.pyc b/rnn_new_gray/__pycache__/SignalDataSet.cpython-36.pyc new file mode 100644 index 0000000..9191c59 Binary files /dev/null and b/rnn_new_gray/__pycache__/SignalDataSet.cpython-36.pyc differ diff --git a/rnn_new_gray/__pycache__/batched.cpython-36.pyc b/rnn_new_gray/__pycache__/batched.cpython-36.pyc new file mode 100644 index 0000000..16cb251 Binary files /dev/null and b/rnn_new_gray/__pycache__/batched.cpython-36.pyc differ diff --git a/rnn_new_gray/__pycache__/travel_path.cpython-36.pyc b/rnn_new_gray/__pycache__/travel_path.cpython-36.pyc new file mode 100644 index 0000000..e5745d2 Binary files /dev/null and b/rnn_new_gray/__pycache__/travel_path.cpython-36.pyc differ diff --git a/rnn_new_gray/batched.py b/rnn_new_gray/batched.py new file mode 100755 index 0000000..78ea650 --- /dev/null +++ b/rnn_new_gray/batched.py @@ -0,0 +1,24 @@ +import numpy as np + + +def batched(data, target, batch_size): + """ + + """ + epoch = 0 + offset = 0 + while True: + old_offset = offset + offset = (offset + batch_size) % (target.shape[0] - batch_size) + + if offset < old_offset: + # New epoch, need to shuffle data + p = np.random.permutation(len(data)) + data = data[p] + target = target[p] + epoch += 1 + + batch_data = data[offset:(offset + batch_size), :] + batch_target = target[offset:(offset + batch_size), :] + yield batch_data, batch_target, epoch + \ No newline at end of file diff --git a/rnn_new_gray/funny/GeneratesPic.py b/rnn_new_gray/funny/GeneratesPic.py new file mode 100644 index 0000000..75f2456 --- /dev/null +++ b/rnn_new_gray/funny/GeneratesPic.py @@ -0,0 +1,40 @@ +import matplotlib.pyplot as plt +import matplotlib.patches as patches + +# 创建一个图像对象 +fig, ax = plt.subplots() + +# 添加一个圆形表示小熊的头部 +circle = patches.Circle((0.5, 0.6), 0.2, color='brown') +ax.add_patch(circle) + +# 添加一个矩形表示小熊的身体 +rect = patches.Rectangle((0.3, 0.1), 0.4, 0.5, color='brown') +ax.add_patch(rect) + +# 添加两个圆形表示小熊的耳朵 +left_ear = patches.Circle((0.4, 0.7), 0.08, color='white') +right_ear = patches.Circle((0.6, 0.7), 0.08, color='white') +ax.add_patch(left_ear) +ax.add_patch(right_ear) + +# 添加一个矩形表示汽车的车身 +car_body = patches.Rectangle((0.2, 0), 0.6, 0.2, color='lightblue') +ax.add_patch(car_body) + +# 添加两个圆形表示汽车的轮子 +left_wheel = patches.Circle((0.3, 0.1), 0.1, color='black') +right_wheel = patches.Circle((0.7, 0.1), 0.1, color='black') +ax.add_patch(left_wheel) +ax.add_patch(right_wheel) + +# 设置图像的坐标轴范围和比例 +ax.set_xlim(0, 1) +ax.set_ylim(0, 1) +ax.set_aspect('equal') + +# 隐藏坐标轴 +plt.axis('off') + +# 显示图像 +plt.show() diff --git a/rnn_new_gray/helpers/__init__.py b/rnn_new_gray/helpers/__init__.py new file mode 100755 index 0000000..61a806c --- /dev/null +++ b/rnn_new_gray/helpers/__init__.py @@ -0,0 +1,19 @@ +""" +Helper __init__ file to make using these helpers easier. These imports should be +namespaced properly when used in full projects! +""" +# Use proper namespacing and importing when using using this file +# +# 1. Use absolute imports +# from __future__ import absolute_import +# +# 2. Use absolute path for imports +# e.g. +# from path.to.helpers.attribute_dictionary import * + +from .attribute_dictionary import * +from .disk_cache_decorator import * +from .download import * +from .ensure_directory import * +from .lazy_property_decorator import * +from .overwrite_graph_decorator import * diff --git a/rnn_new_gray/helpers/__pycache__/__init__.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/__init__.cpython-36.pyc new file mode 100644 index 0000000..d49dd7e Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/__init__.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/attribute_dictionary.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/attribute_dictionary.cpython-36.pyc new file mode 100644 index 0000000..990beb5 Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/attribute_dictionary.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/disk_cache_decorator.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/disk_cache_decorator.cpython-36.pyc new file mode 100644 index 0000000..96a052c Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/disk_cache_decorator.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/download.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/download.cpython-36.pyc new file mode 100644 index 0000000..0fa3089 Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/download.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/ensure_directory.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/ensure_directory.cpython-36.pyc new file mode 100644 index 0000000..ab4364b Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/ensure_directory.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/lazy_property_decorator.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/lazy_property_decorator.cpython-36.pyc new file mode 100644 index 0000000..8adc618 Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/lazy_property_decorator.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/__pycache__/overwrite_graph_decorator.cpython-36.pyc b/rnn_new_gray/helpers/__pycache__/overwrite_graph_decorator.cpython-36.pyc new file mode 100644 index 0000000..015b4c9 Binary files /dev/null and b/rnn_new_gray/helpers/__pycache__/overwrite_graph_decorator.cpython-36.pyc differ diff --git a/rnn_new_gray/helpers/attribute_dictionary.py b/rnn_new_gray/helpers/attribute_dictionary.py new file mode 100755 index 0000000..3776b9a --- /dev/null +++ b/rnn_new_gray/helpers/attribute_dictionary.py @@ -0,0 +1,11 @@ +class AttrDict(dict): + + def __getattr__(self, key): + if key not in self: + raise AttributeError + return self[key] + + def __setattr__(self, key, value): + if key not in self: + raise AttributeError + self[key] = value \ No newline at end of file diff --git a/rnn_new_gray/helpers/disk_cache_decorator.py b/rnn_new_gray/helpers/disk_cache_decorator.py new file mode 100755 index 0000000..edc1dec --- /dev/null +++ b/rnn_new_gray/helpers/disk_cache_decorator.py @@ -0,0 +1,33 @@ +import functools +import os +import pickle + +def disk_cache(basename, directory, method=False): + """ + Function decorator for caching pickleable return values on disk. Uses a + hash computed from the function arguments for invalidation. If 'method', + skip the first argument, usually being self or cls. The cache filepath is + 'directory/basename-hash.pickle'. + """ + directory = os.path.expanduser(directory) + ensure_directory(directory) + + def wrapper(func): + @functools.wraps(func) + def wrapped(*args, **kwargs): + key = (tuple(args), tuple(kwargs.items())) + # Don't use self or cls for the invalidation hash. + if method and key: + key = key[1:] + filename = '{}-{}.pickle'.format(basename, hash(key)) + filepath = os.path.join(directory, filename) + if os.path.isfile(filepath): + with open(filepath, 'rb') as handle: + return pickle.load(handle) + result = func(*args, **kwargs) + with open(filepath, 'wb') as handle: + pickle.dump(result, handle) + return result + return wrapped + + return wrapper \ No newline at end of file diff --git a/rnn_new_gray/helpers/disk_cache_decorator_example.py b/rnn_new_gray/helpers/disk_cache_decorator_example.py new file mode 100755 index 0000000..3f0dec2 --- /dev/null +++ b/rnn_new_gray/helpers/disk_cache_decorator_example.py @@ -0,0 +1,7 @@ +@disk_cache('dataset', '/home/user/dataset/') +def get_dataset(one_hot=True): + dataset = Dataset('http://example.com/dataset.bz2') + dataset = Tokenize(dataset) + if one_hot: + dataset = OneHotEncoding(dataset) + return dataset \ No newline at end of file diff --git a/rnn_new_gray/helpers/download.py b/rnn_new_gray/helpers/download.py new file mode 100755 index 0000000..026dc7e --- /dev/null +++ b/rnn_new_gray/helpers/download.py @@ -0,0 +1,36 @@ +import os +import shutil +import errno +from lxml import etree +from urllib.request import urlopen + + +def ensure_directory(directory): + """ + Create the directories along the provided directory path that do not exist. + """ + directory = os.path.expanduser(directory) + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise e + + +def download(url, directory, filename=None): + """ + Download a file and return its filename on the local file system. If the + file is already there, it will not be downloaded again. The filename is + derived from the url if not provided. Return the filepath. + """ + if not filename: + _, filename = os.path.split(url) + directory = os.path.expanduser(directory) + ensure_directory(directory) + filepath = os.path.join(directory, filename) + if os.path.isfile(filepath): + return filepath + print('Download', filepath) + with urlopen(url) as response, open(filepath, 'wb') as file_: + shutil.copyfileobj(response, file_) + return filepath diff --git a/rnn_new_gray/helpers/ensure_directory.py b/rnn_new_gray/helpers/ensure_directory.py new file mode 100755 index 0000000..ed7a587 --- /dev/null +++ b/rnn_new_gray/helpers/ensure_directory.py @@ -0,0 +1,13 @@ +import errno +import os + +def ensure_directory(directory): + """ + Create the directories along the provided directory path that do not exist. + """ + directory = os.path.expanduser(directory) + try: + os.makedirs(directory) + except OSError as e: + if e.errno != errno.EEXIST: + raise e \ No newline at end of file diff --git a/rnn_new_gray/helpers/lazy_property_decorator.py b/rnn_new_gray/helpers/lazy_property_decorator.py new file mode 100755 index 0000000..8646ce1 --- /dev/null +++ b/rnn_new_gray/helpers/lazy_property_decorator.py @@ -0,0 +1,12 @@ +import functools + +def lazy_property(function): + attribute = '_lazy_' + function.__name__ + + @property + @functools.wraps(function) + def wrapper(self): + if not hasattr(self, attribute): + setattr(self, attribute, function(self)) + return getattr(self, attribute) + return wrapper \ No newline at end of file diff --git a/rnn_new_gray/helpers/lazy_property_decorator_example.py b/rnn_new_gray/helpers/lazy_property_decorator_example.py new file mode 100755 index 0000000..48e2312 --- /dev/null +++ b/rnn_new_gray/helpers/lazy_property_decorator_example.py @@ -0,0 +1,29 @@ +class Model: + + def __init__(self, data, target): + self.data = data + self.target = target + self.prediction + self.optimize + self.error + + @lazy_property + def prediction(self): + data_size = int(self.data.get_shape()[1]) + target_size = int(self.target.get_shape()[1]) + weight = tf.Variable(tf.truncated_normal([data_size, target_size])) + bias = tf.Variable(tf.constant(0.1, shape=[target_size])) + incoming = tf.matmul(self.data, weight) + bias + return tf.nn.softmax(incoming) + + @lazy_property + def optimize(self): + cross_entropy = -tf.reduce_sum(self.target, tf.log(self.prediction)) + optimizer = tf.train.RMSPropOptimizer(0.03) + return optimizer.minimize(cross_entropy) + + @lazy_property + def error(self): + mistakes = tf.not_equal( + tf.argmax(self.target, 1), tf.argmax(self.prediction, 1)) + return tf.reduce_mean(tf.cast(mistakes, tf.float32)) \ No newline at end of file diff --git a/rnn_new_gray/helpers/overwrite_graph_decorator.py b/rnn_new_gray/helpers/overwrite_graph_decorator.py new file mode 100755 index 0000000..95734da --- /dev/null +++ b/rnn_new_gray/helpers/overwrite_graph_decorator.py @@ -0,0 +1,9 @@ +import functools +import tensorflow as tf + +def overwrite_graph(function): + @functools.wraps(function) + def wrapper(*args, **kwargs): + with tf.Graph().as_default(): + return function(*args, **kwargs) + return wrapper \ No newline at end of file diff --git a/rnn_new_gray/helpers/overwrite_graph_decorator_example.py b/rnn_new_gray/helpers/overwrite_graph_decorator_example.py new file mode 100755 index 0000000..8cbc8b1 --- /dev/null +++ b/rnn_new_gray/helpers/overwrite_graph_decorator_example.py @@ -0,0 +1,8 @@ +@overwrite_graph +def main(): + # Define your placeholders, model, etc. + data = tf.placeholder(...) + target = tf.placeholder(...) + model = Model() + +main() \ No newline at end of file diff --git a/rnn_new_gray/rnn_train.py b/rnn_new_gray/rnn_train.py new file mode 100644 index 0000000..daf9f25 --- /dev/null +++ b/rnn_new_gray/rnn_train.py @@ -0,0 +1,122 @@ +import tensorflow as tf +from helpers import AttrDict +from SequenceLabellingModel import SequenceLabellingModel +from batched import batched +from SignalDataSet import * + +import os + +os.environ['TF_CPP_MIN_LOG_LEVEL']='2' + +params = AttrDict( + rnn_cell=tf.nn.rnn_cell.GRUCell, + rnn_hidden=300, + optimizer=tf.train.RMSPropOptimizer(0.002), + gradient_clipping=5, + batch_size=10, + epochs=5, + epoch_size=50 +) + +def get_dataset(path): + # Flatten images into vectors. + # path = 'train_set/' + data,label = getDataLabel(path) + data = data.reshape(data.shape[:2] + (-1,)) + # One-hot encode labels. + label0 = np.copy(label) + label0 = label0.reshape(label0.shape[0],label0.shape[1]) + label1 = np.zeros(label0.shape + (2,)) + for i in range(label.shape[0]): + for j in range(label.shape[1]): + if label[i,j,0] == 1: + label1[i,j,0] = 1 + else: + label1[i,j,1] = 1 + # Shuffle order of examples. + label = label1 + order = np.random.permutation(len(data)) + data = data[order] + label = label[order] + return data, label + + +# Split into training and test data. +train_data, train_target = get_dataset('train_set/') +print(train_data.shape) +print(train_target.shape) +test_data, test_target = get_dataset('test_set/') +print(test_data.shape) +print(test_target.shape) +abnormal_data = [] +abnormal_target = [] +for i in range(test_target.shape[0]): + for j in range(test_target.shape[1]): + if test_target[i,j,0] == 1: + abnormal_data.append(test_data[i, j]) + abnormal_target.append(test_target[i, j]) +abnormal_data = np.array(abnormal_data) +abnormal_target = np.array(abnormal_target) +abnormal_data = abnormal_data.reshape(16,12,250) +abnormal_target = abnormal_target.reshape(16,12,2) +# split = int(0.66 * len(data)) +# train_data, test_data = data[:split], data[split:] +# train_target, test_target = target[:split], target[split:] + +# Compute graph. +_, length, image_size = train_data.shape +num_classes = train_target.shape[2] +print(length, image_size, num_classes) +data = tf.placeholder(tf.float32, [None, length, image_size]) +target = tf.placeholder(tf.float32, [None, length, num_classes]) +model = SequenceLabellingModel(data, target, params) +batches = batched(train_data, train_target, params.batch_size) + +abnormal_count = 0 +for i0 in range(train_target.shape[0]): + for j0 in range(train_target.shape[1]): + if train_target[i0,j0,0] == 1: + abnormal_count = abnormal_count + 1 +print('abnormal_count in train:',abnormal_count) + +saver = tf.train.Saver() + +sess = tf.Session() +sess.run(tf.initialize_all_variables()) +for index, batch in enumerate(batches): + batch_data = batch[0] + batch_target = batch[1] + epoch = batch[2] + if epoch >= params.epochs: + break + feed = {data: batch_data, target: batch_target} + error, _ = sess.run([model.error, model.optimize], feed) + print('{}: {:3.6f}%'.format(index + 1, 100 * error)) + test_feed = {data: test_data, target: test_target} + test_error, _ = sess.run([model.error, model.cost], test_feed) + print('Test error: {:3.6f}%'.format(100 * test_error)) + abnormal_feed = {data: abnormal_data, target: abnormal_target} + abnormal_error, _ = sess.run([model.error, model.cost], abnormal_feed) + print('Abnormal error: {:3.6f}%'.format(100 * abnormal_error)) + +# model_path = 'model/model.ckpt' +# saver.save(sess, model_path) +# sess.close() + +abnormal_count = 0 +for i1 in range(test_target.shape[0]): + for j1 in range(test_target.shape[1]): + if test_target[i1,j1,0] == 1: + abnormal_count = abnormal_count + 1 +print('abnormal_count in test target:',abnormal_count) + +test_feed = {data: test_data, target: test_target} +test_error, _ = sess.run([model.error, model.cost], test_feed) +print('Test error: {:3.6f}%'.format(100 * test_error)) +abnormal_feed = {data: abnormal_data, target: abnormal_target} +abnormal_error, _ = sess.run([model.error, model.cost], abnormal_feed) +print('Abnormal error: {:3.6f}%'.format(100 * abnormal_error)) +# prediction = sess.run([model.prediction], {data: test_data}) +# prediction = np.array(prediction) +# print(prediction.shape) + diff --git a/rnn_new_gray/training_fr_timeFreq/read.py b/rnn_new_gray/training_fr_timeFreq/read.py new file mode 100644 index 0000000..43d4256 --- /dev/null +++ b/rnn_new_gray/training_fr_timeFreq/read.py @@ -0,0 +1,11 @@ +import numpy as np + +data = np.genfromtxt("training data.txt") +print(data.shape) +labels = np.genfromtxt("labels.txt") + +count = 0 +for i in range(labels.shape[0]): + if labels[i] == 1: + count = count + 1 +print(count) \ No newline at end of file diff --git a/rnn_new_gray/travel_path.py b/rnn_new_gray/travel_path.py new file mode 100644 index 0000000..f3f09bf --- /dev/null +++ b/rnn_new_gray/travel_path.py @@ -0,0 +1,21 @@ +import glob +import os +import numpy as np + +def travel_txt(path): + files = [] + for file in glob.glob(path+'/*.txt'): + files.append(file) + return np.asarray(files) + +def travel_txt_in_path(path): + cate = [path+x for x in os.listdir(path) if os.path.isdir(path+x)] + files = [] + for idx,folder in enumerate(cate): + for file in glob.glob(folder+'/*.txt'): + files.append(file) + return np.asarray(files) + +def list_files(files): + for i in range(files.shape[0]): + print(files[i])