1
1
# posterior inference by laplace approximation
2
+ from autograd .misc .optimizers import adam , sgd
2
3
from autograd import grad
3
4
import autograd .numpy as np
4
5
import autograd .scipy as scipy
@@ -12,23 +13,25 @@ def predict(w, x):
12
13
13
14
def log_sigmoid (x ):
14
15
a = np .array ([np .zeros_like (x ), - x ])
15
- return - scipy .special .logsumexp (a )
16
+ return - scipy .special .logsumexp (a , axis = 0 )
16
17
17
18
def nll_loss (w , x , y , alpha = None ):
18
19
score = np .dot (x , w )
19
20
20
- logp1 = log_sigmoid (score )
21
- logp0 = - score + logp1
22
- loss = y * logp0 + (1 - y )* logp1
23
- reg = alpha * np .dot (w , w ) if alpha else 0
24
- return sum (loss ) + reg
21
+ logp0 = log_sigmoid (score )
22
+
23
+ logp1 = - score + logp0
24
+
25
+ loss = - np .sum (y * logp0 + (1 - y )* logp1 )
26
+ reg = alpha * np .sum (w ** 2 ) if alpha else 0
27
+ return loss + reg
25
28
26
29
def compute_precision (x , y , w , alpha ):
27
30
d = np .size (x , 1 )
28
31
y_hat = predict (w , x )
29
32
R = np .diag (y_hat * (1 - y_hat ))
30
- precision = alpha * np .eye (d ) + x .T .dot (R ).dot (x )
31
- return precision + 1e-9 * np . eye ( d )
33
+ precision = 1e-9 * np . eye ( d ) + alpha * np .eye (d ) + x .T .dot (R ).dot (x )
34
+ return precision
32
35
33
36
def predict_mc (mu , sigma , x , T = 100 ):
34
37
ps = []
@@ -44,30 +47,34 @@ def predict_var(mu, sigmainv, x):
44
47
kappa = np .sqrt (1 + sigma2_a * np .pi * .125 )
45
48
return sigmoid (mu_a / kappa )
46
49
50
+
47
51
x = np .array ([[0.52 , 1.12 , 0.77 ],
48
52
[0.88 , - 1.08 , 0.15 ],
49
53
[0.52 , 0.06 , - 1.30 ],
50
54
[0.74 , - 2.49 , 1.39 ],
51
55
[0.52 , 1.12 , 0.77 ]])
56
+
52
57
y = np .array ([True , True , False , True , False ])
53
58
54
59
55
60
x = np .hstack ([np .ones (( len (x ),1 )), x ])
56
- training_loss = lambda w : nll_loss (w , x , y , alpha = 1 )
61
+ training_loss = lambda w , i : nll_loss (w , x , y , alpha = 0. 1 )
57
62
g = grad (training_loss )
58
- w = np .array ([1 , 0 , 0 , 0 ], dtype = np .float )
59
- print ("Initial loss:" , training_loss (w ))
60
- for i in range (100 ):
61
- w -= g (w ) * 0.01
62
- print ("Trained loss:" , training_loss (w ))
63
+ w = np .array ([1 , 1 , 1 , 1 ], dtype = np .float )
64
+ print ("Initial loss:" , training_loss (w , 0 ))
65
+ #for i in range(100):
66
+ # w -= g(w) * 0.01
67
+
68
+ w = sgd (g , w )
69
+ print ("Trained loss:" , training_loss (w , 0 ))
63
70
64
71
pred = predict (w , x ) > 0.5
65
72
66
73
print (y .astype (int ))
67
- print (predict (w , x ) )
74
+ print ('ml' , predict (w , x ) )
68
75
69
76
70
- sigmainv = compute_precision (x ,y ,w ,alpha = 1 )
77
+ sigmainv = compute_precision (x ,y ,w ,alpha = 0. 1 )
71
78
72
- print (predict_var (w , sigmainv , x ))
73
- print (predict_mc (w , np .linalg .inv (sigmainv ), x ))
79
+ print ('var' , predict_var (w , sigmainv , x ))
80
+ print ('mc' , predict_mc (w , np .linalg .inv (sigmainv ), x ))
0 commit comments