-
Notifications
You must be signed in to change notification settings - Fork 0
/
q2_neural.py
95 lines (75 loc) · 2.93 KB
/
q2_neural.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
import numpy as np
import random
from q1_softmax import softmax
from q2_sigmoid import sigmoid, sigmoid_grad
from q2_gradcheck import gradcheck_naive
def forward_backward_prop(data, labels, params, dimensions):
"""
Forward and backward propagation for a two-layer sigmoidal network
Compute the forward propagation and for the cross entropy cost,
and backward propagation for the gradients for all parameters.
"""
### Unpack network parameters (do not modify)
ofs = 0
Dx, H, Dy = (dimensions[0], dimensions[1], dimensions[2])
N = labels.shape[0]
W1 = np.reshape(params[ofs:ofs+ Dx * H], (Dx, H))
ofs += Dx * H
b1 = np.reshape(params[ofs:ofs + H], (1, H))
ofs += H
W2 = np.reshape(params[ofs:ofs + H * Dy], (H, Dy))
ofs += H * Dy
b2 = np.reshape(params[ofs:ofs + Dy], (1, Dy))
### YOUR CODE HERE: forward propagation
x = data
a1 = x
z1 = x
z2 = np.dot(a1,W1) + b1
a2 = sigmoid(z2)
z3 = np.dot(a2, W2) + b2
a3 = softmax(z3) #normalized for probabilty
#loss function would be here ? https://en.wikipedia.org/wiki/Cross_entropy or lecture 6
cost = - np.sum(np.log(np.sum(labels * a3, axis=1))) / N #this labels * a3 multiplication only keeps the non 0 vector, we use the softmax function here
### END YOUR CODE
### YOUR CODE HERE: backward propagation
error = (a3 - labels) / N
gradW2 = np.dot(np.transpose(a2), error)
gradb2 = np.sum(error, axis=0)
delta2 = sigmoid_grad(a2) * np.dot(error, np.transpose(W2))
gradW1 = np.dot( np.transpose(x), delta2)
gradb1 = np.sum(delta2, axis=0)
### END YOUR CODE
### Stack gradients (do not modify)
grad = np.concatenate((gradW1.flatten(), gradb1.flatten(),
gradW2.flatten(), gradb2.flatten()))
return cost, grad
def sanity_check():
"""
Set up fake data and parameters for the neural network, and test using
gradcheck.
"""
print "Running sanity check..."
N = 20 #number of window to classify
dimensions = [10, 5, 10]
data = np.random.randn(N, dimensions[0]) # each row will be a datum
labels = np.zeros((N, dimensions[2]))
for i in xrange(N):
labels[i,random.randint(0,dimensions[2]-1)] = 1 # give probability 1 to each row, give a 100% label to each row
params = np.random.randn((dimensions[0] + 1) * dimensions[1] + (
dimensions[1] + 1) * dimensions[2], )
gradcheck_naive(lambda params: forward_backward_prop(data, labels, params,
dimensions), params)
def your_sanity_checks():
"""
Use this space add any additional sanity checks by running:
python q2_neural.py
This function will not be called by the autograder, nor will
your additional tests be graded.
"""
print "Running your sanity checks..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
sanity_check()
your_sanity_checks()