forked from hunkim/DeepLearningZeroToAll
-
Notifications
You must be signed in to change notification settings - Fork 0
/
lab-07-2-linear_regression_without_min_max.py
117 lines (105 loc) · 2.57 KB
/
lab-07-2-linear_regression_without_min_max.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
import tensorflow as tf
import numpy as np
tf.set_random_seed(777) # for reproducibility
xy = np.array([[828.659973, 833.450012, 908100, 828.349976, 831.659973],
[823.02002, 828.070007, 1828100, 821.655029, 828.070007],
[819.929993, 824.400024, 1438100, 818.97998, 824.159973],
[816, 820.958984, 1008100, 815.48999, 819.23999],
[819.359985, 823, 1188100, 818.469971, 818.97998],
[819, 823, 1198100, 816, 820.450012],
[811.700012, 815.25, 1098100, 809.780029, 813.669983],
[809.51001, 816.659973, 1398100, 804.539978, 809.559998]])
x_data = xy[:, 0:-1]
y_data = xy[:, [-1]]
# placeholders for a tensor that will be always fed.
X = tf.placeholder(tf.float32, shape=[None, 4])
Y = tf.placeholder(tf.float32, shape=[None, 1])
W = tf.Variable(tf.random_normal([4, 1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# Hypothesis
hypothesis = tf.matmul(X, W) + b
# Simplified cost/loss function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Minimize
optimizer = tf.train.GradientDescentOptimizer(learning_rate=1e-5)
train = optimizer.minimize(cost)
# Launch the graph in a session.
sess = tf.Session()
# Initializes global variables in the graph.
sess.run(tf.global_variables_initializer())
for step in range(101):
cost_val, hy_val, _ = sess.run(
[cost, hypothesis, train], feed_dict={X: x_data, Y: y_data})
print(step, "Cost: ", cost_val, "\nPrediction:\n", hy_val)
'''
0 Cost: 2.45533e+12
Prediction:
[[-1104436.375]
[-2224342.75 ]
[-1749606.75 ]
[-1226179.375]
[-1445287.125]
[-1457459.5 ]
[-1335740.5 ]
[-1700924.625]]
1 Cost: 2.69762e+27
Prediction:
[[ 3.66371490e+13]
[ 7.37543360e+13]
[ 5.80198785e+13]
[ 4.06716290e+13]
[ 4.79336847e+13]
[ 4.83371348e+13]
[ 4.43026590e+13]
[ 5.64060907e+13]]
2 Cost: inf
Prediction:
[[ -1.21438790e+21]
[ -2.44468702e+21]
[ -1.92314724e+21]
[ -1.34811610e+21]
[ -1.58882674e+21]
[ -1.60219962e+21]
[ -1.46847142e+21]
[ -1.86965602e+21]]
3 Cost: inf
Prediction:
[[ 4.02525216e+28]
[ 8.10324465e+28]
[ 6.37453079e+28]
[ 4.46851237e+28]
[ 5.26638074e+28]
[ 5.31070676e+28]
[ 4.86744608e+28]
[ 6.19722623e+28]]
4 Cost: inf
Prediction:
[[ -1.33422428e+36]
[ -2.68593010e+36]
[ -2.11292430e+36]
[ -1.48114879e+36]
[ -1.74561303e+36]
[ -1.76030542e+36]
[ -1.61338091e+36]
[ -2.05415459e+36]]
5 Cost: inf
Prediction:
[[ inf]
[ inf]
[ inf]
[ inf]
[ inf]
[ inf]
[ inf]
[ inf]]
6 Cost: nan
Prediction:
[[ nan]
[ nan]
[ nan]
[ nan]
[ nan]
[ nan]
[ nan]
[ nan]]
'''