-
Notifications
You must be signed in to change notification settings - Fork 0
/
naive_deep_q.py
97 lines (77 loc) · 3.12 KB
/
naive_deep_q.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import gym
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch as T
from util import plot_learning_curve
class LinearDeepQNetwork(nn.Module):
def __init__(self, lr, n_actions, input_dims):
super(LinearDeepQNetwork, self).__init__()
self.fc1 = nn.Linear(*input_dims, 128)
self.fc2 = nn.Linear(128, n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.MSELoss()
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
layer1 = F.relu(self.fc1(state))
actions = self.fc2(layer1)
return actions
class Agent():
def __init__(self, input_dims, n_actions, lr, gamma=0.99, epsilon=1.0, eps_dec=1e-5, eps_min=0.01):
self.lr = lr
self.input_dims = input_dims
self.n_actions = n_actions
self.gamma = gamma
self.epsilon = epsilon
self.eps_dec = eps_dec
self.eps_min = eps_min
self.action_space = [i for i in range(self.n_actions)]
# agent has a Q estimate, agent is not a Q estimate
self.Q = LinearDeepQNetwork(self.lr, self.n_actions, self.input_dims)
def choose_action(self, observation):
if np.random.random() > self.epsilon:
state = T.tensor(observation, dtype=T.float).to(self.Q.device)
actions = self.Q.forward(state)
#.item is used to get the value from the tensors.
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def decrement_epsilon(self):
self.epsilon = self.epsilon - self.eps_dec if self.epsilon > self.eps_min else self.eps_min
def learn(self, state, action, reward, state_2):
self.Q.optimizer.zero_grad()
state = T.tensor(state, dtype=T.float).to(self.Q.device)
action = T.tensor(action).to(self.Q.device)
reward = T.tensor(reward).to(self.Q.device)
state_2 = T.tensor(state_2, dtype=T.float).to(self.Q.device)
q_pred = self.Q.forward(state)[action]
q_next = self.Q.forward(state_2).max()
q_target = reward + self.gamma*q_next
loss = self.Q.loss(q_target, q_pred).to(self.Q.device)
loss.backward()
self.Q.optimizer.step()
self.decrement_epsilon()
if __name__ == '__main__':
env = gym.make('CartPole-v1')
n_games = 100
scores = []
eps_history = []
agent = Agent(lr=0.0001, input_dims=env.observation_space.shape, n_actions=env.action_space.n)
for i in range(n_games):
score = 0
done = False
obs = env.reset()
while not done:
action = agent.choose_action(obs)
obs_2, reward, done, info = env.step(action)
score+=reward
agent.learn(obs, action, reward, obs_2)
obs = obs_2
scores.append(score)
eps_history.append(agent.epsilon)
filename = 'cartpole_native_dqn.png'
x = [i+1 for i in range(n_games)]
plot_learning_curve(x, scores, eps_history, filename)