|
| 1 | +from tensorflow.examples.tutorials.mnist import input_data |
| 2 | +import numpy |
| 3 | +import matplotlib.pyplot as plt |
| 4 | +import tensorflow as tf |
| 5 | + |
| 6 | +# 讀入手寫數字辨識資料 會下載gz檔 |
| 7 | +mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) |
| 8 | +x_train = mnist.train.images |
| 9 | +y_train = mnist.train.labels |
| 10 | +x_test = mnist.test.images |
| 11 | +y_test = mnist.test.labels |
| 12 | + |
| 13 | +print(x_train.shape) # 55000張 28x28=784像素的圖片 |
| 14 | + |
| 15 | +# 印出圖片看看 第54999張=最後一張 |
| 16 | +last_train_img = numpy.reshape(x_train[54999, :], (28,28)) |
| 17 | +plt.matshow(last_train_img, cmap=plt.get_cmap('gray')) |
| 18 | +plt.show() |
| 19 | + |
| 20 | +# 設定參數 |
| 21 | +learning_rate = 0.5 |
| 22 | +training_steps = 1000 |
| 23 | +batch_size = 100 |
| 24 | +logs_path = 'TensorBoard/' |
| 25 | +n_features = x_train.shape[1] |
| 26 | +n_labels = y_train.shape[1] |
| 27 | + |
| 28 | +# 建立Feeds |
| 29 | +with tf.name_scope("Inputs"): |
| 30 | + x = tf.placeholder(tf.float32, [None, n_features], name="Input_Data") |
| 31 | +with tf.name_scope("Labels"): |
| 32 | + y = tf.placeholder(tf.float32, [None, n_labels], name="Labe_Data") |
| 33 | + |
| 34 | +# 建立 Variables |
| 35 | +with tf.name_scope("ModelParameters"): |
| 36 | + W = tf.Variable(tf.zeros([n_features, n_labels]), name="Weights") |
| 37 | + b = tf.Variable(tf.zeros([n_labels]), name="Biases") |
| 38 | + |
| 39 | +# 深度學習模型 |
| 40 | +with tf.name_scope("Model"): |
| 41 | + prediction = tf.nn.softmax(tf.matmul(x, W) + b) |
| 42 | +with tf.name_scope("Cross_Entropy"): # 定義Loss |
| 43 | + loss = tf.reduce_mean(-tf.reduce_sum(y * tf.log(prediction), |
| 44 | + reduction_indices=1)) |
| 45 | + tf.summary.scalar("Loss", loss) |
| 46 | +with tf.name_scope("Gradient_Descent"): # 優化Optimization 梯度下降 |
| 47 | + optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss) |
| 48 | +with tf.name_scope("Accuracy"): # 準確度 |
| 49 | + correct_prediction = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1)) |
| 50 | + acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) |
| 51 | + tf.summary.scalar("Accuracy", acc) |
| 52 | + |
| 53 | +# 初始化 |
| 54 | +init = tf.global_variables_initializer() |
| 55 | + |
| 56 | +sess = tf.Session() |
| 57 | +sess.run(init) |
| 58 | + |
| 59 | +merged = tf.summary.merge_all() |
| 60 | +writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) |
| 61 | + |
| 62 | +for step in range(training_steps): |
| 63 | + batch_xs, batch_ys = mnist.train.next_batch(batch_size) |
| 64 | + sess.run(optimizer, feed_dict={x: batch_xs, y: batch_ys}) |
| 65 | + if step % 50 == 0: |
| 66 | + print(sess.run(loss, feed_dict={x: batch_xs, y: batch_ys})) |
| 67 | + summary = sess.run(merged, feed_dict={x:batch_xs, y: batch_ys}) |
| 68 | + writer.add_summary(summary, step) |
| 69 | + |
| 70 | +print("Accuracy:", sess.run(acc, feed_dict={x: x_test, y: y_test})) |
| 71 | + |
| 72 | +sess.close() |
0 commit comments