-
Notifications
You must be signed in to change notification settings - Fork 1
/
main.py
118 lines (109 loc) · 4.14 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
import os
import numpy as np
import tensorflow as tf
import gym
import argparse
from src import ddpg as models
if __name__ == "__main__":
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("--episodes", type=int, default=250)
parser.add_argument("--transitions", type=int, default=32)
parser.add_argument("--gpu", action="store_true")
parser.add_argument("--bn", action="store_true")
parser.add_argument("--chkpt", action="store_true")
parser.add_argument("--render", action="store_true")
parser.add_argument("--train", action="store_true")
parser.add_argument("--priority", action="store_true")
parser.add_argument("--save", action="store_true")
args = parser.parse_args()
episodes = args.episodes
transitions = args.transitions
gpu = args.gpu
bn = args.bn
load_checkpoint = args.chkpt
train = args.train
render = args.render
priority = args.priority
save = args.save
print(f"Run Config:"
f"\n episodes {episodes} "
f"\n transitions {transitions} "
f"\n gpu {gpu}"
f"\n Train {train}"
f"\n Render {render}"
f"\n Priority {priority}"
f"\n Save {save}")
if priority:
chkpt_dir = "./tmp/priority"
else:
chkpt_dir = "./tmp/uniform"
env = gym.make("Pendulum-v1")
action_shape = env.action_space.shape
state_shape = env.observation_space.shape
if gpu:
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
else:
physical_devices = tf.config.list_physical_devices('CPU')
tf.config.set_visible_devices(physical_devices)
current_state = env.reset()
current_state = tf.expand_dims(current_state, 0)
if priority:
ddpg = models.DDPGPriority(alpha=0.1,
beta=0.4,
states=state_shape[-1],
actions=action_shape[-1],
batch_size=60,
buffer_size=1e6)
else:
ddpg = models.DDPG(state_shape[-1],
action_shape[-1],
batch_size=60,
buffer_size=1e6)
if load_checkpoint:
ddpg.load_models(chkpt_dir)
ddpg.load_buffer(chkpt_dir)
else:
_ = ddpg(current_state) # build current models
_ = ddpg(current_state, target=True) # build target models
ddpg.update_targets(1) # copy weights to target networks
best = env.reward_range[0]
episodes_reward = []
rewards = []
i = 0
for ep in range(episodes):
current_state = env.reset()
terminal = False
episode_reward = []
while not terminal:
action, _ = ddpg(tf.convert_to_tensor(current_state[None, :]), exploration=train)
action = action[0]
next_state, reward, terminal, info = env.step(action)
if train:
ddpg.store_transition(current_state, action, reward, next_state, terminal)
if i > transitions:
ddpg.train_step()
current_state = next_state
episode_reward.append(reward)
if render:
env.render()
if terminal:
total_reward = tf.reduce_sum(episode_reward).numpy()
episodes_reward.append(total_reward)
print(f"Episode {ep} Reward {total_reward} AVG {np.mean(episodes_reward[-10:])}")
if best < episodes_reward[-1]:
best = episodes_reward[-1]
if save:
print("Saving model...")
ddpg.save_models(chkpt_dir)
ddpg.save_buffer(chkpt_dir)
i += 1
np.save(os.path.join(chkpt_dir, "episodes_reward.npy"), episodes_reward)
fig, ax = plt.subplots()
ax.plot(episodes_reward)
ax.set_title("Episodes Reward")
ax.set_xlabel("Episodes")
ax.set_ylabel("Reward")
plt.savefig(os.path.join(chkpt_dir, "episodes_reward.png"))
plt.show()