-
-
Notifications
You must be signed in to change notification settings - Fork 15
/
run-soccer.py
80 lines (67 loc) · 2.48 KB
/
run-soccer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# By Lilian Besson (Naereen)
# https://github.com/Naereen/gym-nes-mario-bros
# MIT License https://lbesson.mit-license.org/
#
from __future__ import division, print_function # Python 2 compatibility
import os
from collections import deque
import gym
from gym import wrappers
import nesgym
import numpy as np
from dqn.model import DoubleDQN
from dqn.utils import PiecewiseSchedule
def get_env():
env = gym.make('nesgym/NekketsuSoccerPK-v0')
env = nesgym.wrap_nes_env(env)
expt_dir = '/tmp/soccer/'
env = wrappers.Monitor(env, os.path.join(expt_dir, "gym"), force=True)
return env
def soccer_main():
env = get_env()
last_obs = env.reset()
max_timesteps = 40000000
exploration_schedule = PiecewiseSchedule(
[
(0, 1.0),
(1e5, 0.1),
(max_timesteps / 2, 0.01),
], outside_value=0.01
)
dqn = DoubleDQN(image_shape=(84, 84, 1),
num_actions=env.action_space.n,
# training_starts=10000,
# target_update_freq=4000,
# training_batch_size=64,
training_starts=1000,
target_update_freq=500,
training_batch_size=3,
frame_history_len=4,
replay_buffer_size=100000, # XXX reduce if MemoryError
exploration=exploration_schedule
)
reward_sum_episode = 0
num_episodes = 0
episode_rewards = deque(maxlen=100)
for step in range(max_timesteps):
if step > 0 and step % 100 == 0:
print('step: ', step, 'episodes:', num_episodes, 'epsilon:', exploration_schedule.value(step),
'learning rate:', dqn.get_learning_rate(), 'last 100 training loss mean', dqn.get_avg_loss(),
'last 100 episode mean rewards: ', np.mean(np.array(episode_rewards, dtype=np.float32)))
# env.render()
action = dqn.choose_action(step, last_obs)
obs, reward, done, info = env.step(action)
reward_sum_episode += reward
dqn.learn(step, action, reward, done, info)
print("Step", step, " using action =", action, "gave reward =", reward) # DEBUG
if done:
last_obs = env.reset()
episode_rewards.append(reward_sum_episode)
reward_sum_episode = 0
num_episodes += 1
else:
last_obs = obs
if __name__ == "__main__":
soccer_main()