-
Notifications
You must be signed in to change notification settings - Fork 1
/
first_practice.py
54 lines (40 loc) · 1.47 KB
/
first_practice.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# coding=utf-8
import tensorflow as tf
node1 = tf.constant(3.0, tf.float32)
node2 = tf.constant(4.0) # 类型不写的话默认为float32
print (node1, node2)
sess = tf.Session()
print (sess.run([node1, node2]))
node3 = tf.add(node1, node2)
print ("node3:", node3)
print ("sess.run(node3):", sess.run(node3))
a = tf.placeholder(tf.float32)
b = tf.placeholder(tf.float32)
adder_node = a + b # 加号是tf.add的缩写
print (sess.run(adder_node, {a: 3, b: 4.5}))
print (sess.run(adder_node, {a: [1, 3], b: [2, 4]}))
add_and_triple = adder_node * 3
print (sess.run(add_and_triple, {a: 3, b: 4.5}))
W = tf.Variable([.3], tf.float32)
b = tf.Variable([-.3], tf.float32)
x = tf.placeholder(tf.float32)
linear_model = W * x + b
init = tf.global_variables_initializer()
sess.run(init)
print (sess.run(linear_model, {x: [1, 2, 3, 4]}))
y = tf.placeholder(tf.float32)
squared_deltas = tf.square(linear_model - y)
loss = tf.reduce_sum(squared_deltas)
print (sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
fixW = tf.assign(W, [-1.])
fixb = tf.assign(b, [1.])
sess.run([fixW, fixb])
print (sess.run(loss, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]}))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train =optimizer.minimize(loss)
sess.run(init)
for i in range(1000):
sess.run(train, {x: [1, 2, 3, 4], y: [0, -1, -2, -3]})
print (sess.run([W, b]))
# sess.graph contains the graph definition; that enables the Graph Visualizer.
file_writer = tf.summary.FileWriter('visualization/log', sess.graph)