-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain
48 lines (46 loc) · 1.5 KB
/
main
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import cv2
import numpy as np
import sys
from random import randrange, randint
from ale_python_interface import ALEInterface
import skflow
g = 0.9
prob = 0.9
append = lambda x, y: np.append(x,y)
regressor = skflow.TensorFlowDNNRegressor(hidden_units=[10,10],learning_rate=0.01)
regressor.fit(np.random.randn(1601, 1), append([1.], np.zeros((1600, 1))))
def Q(s, a):
return regressor.predict(append(s, a))
def detectState(ale):
return cv2.resize(ale.getScreenGrayscale(), (40,40))
while True:
ale = ALEInterface()
ale.loadROM("breakout.bin")
actionSet = ale.getMinimalActionSet()[2:]
while not ale.game_over():
if sys.argv[1] == 'disp':
cv2.imshow('', cv2.resize(ale.getScreenRGB(), (600,600)))
cv2.waitKey(1)
s = cv2.resize(ale.getScreenGrayscale(), (40,40))
a = None# best action
X = (s,a)
s = detectState(ale)
rand = randint(1, 10)
if rand < prob * 10:
qvals = []
for action in actionSet:
qvals.append(Q(s, action)[0])
a = actionSet[qvals.index(max(qvals))]
else:
a = actionSet[randrange(len(actionSet))]
X = append(s, a)
r = ale.act(a)
s_ = detectState(ale)
qvals = []
for action in actionSet:
# print len(s.flatten()) + 1
qvals.append(Q(s_, action)[0])
a_ = actionSet[qvals.index(max(qvals))]
y = r + g*Q(s_, a_)
regressor.fit(X, y)
regressor.save('./regressor')