-
Notifications
You must be signed in to change notification settings - Fork 0
/
train.py
122 lines (101 loc) · 4.67 KB
/
train.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
# train.py
# Script to train policies in Isaac Gym
#
# Copyright (c) 2018-2021, NVIDIA Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import isaacgym
import os
import hydra
from omegaconf import DictConfig, OmegaConf
from hydra.utils import to_absolute_path
from isaacgymenvs.utils.reformat import omegaconf_to_dict, print_dict
from isaacgymenvs.utils.rlgames_utils import RLGPUEnv, RLGPUAlgoObserver, get_rlgames_env_creator
from isaacgymenvs.utils.utils import set_np_formatting, set_seed
from rl_games.common import env_configurations, vecenv
from rl_games.torch_runner import Runner
import yaml
## OmegaConf & Hydra Config
# Resolvers used in hydra configs (see https://omegaconf.readthedocs.io/en/2.1_branch/usage.html#resolvers)
OmegaConf.register_new_resolver('eq', lambda x, y: x.lower()==y.lower())
OmegaConf.register_new_resolver('contains', lambda x, y: x.lower() in y.lower())
OmegaConf.register_new_resolver('if', lambda pred, a, b: a if pred else b)
# allows us to resolve default arguments which are copied in multiple places in the config. used primarily for
# num_ensv
OmegaConf.register_new_resolver('resolve_default', lambda default, arg: default if arg=='' else arg)
@hydra.main(config_name="config", config_path="./cfg")
def launch_rlg_hydra(cfg: DictConfig):
# cfg.train.params.config.device = 'cuda:1'
# cfg.task.rl_device = 'cuda:1'
# ensure checkpoints can be specified as relative paths
if cfg.checkpoint:
cfg.checkpoint = to_absolute_path(cfg.checkpoint)
cfg_dict = omegaconf_to_dict(cfg)
print_dict(cfg_dict)
# set numpy formatting for printing only
set_np_formatting()
# sets seed. if seed is -1 will pick a random one
cfg.seed = set_seed(cfg.seed, torch_deterministic=cfg.torch_deterministic)
# `create_rlgpu_env` is environment construction function which is passed to RL Games and called internally.
# We use the helper function here to specify the environment config.
create_rlgpu_env = get_rlgames_env_creator(
omegaconf_to_dict(cfg.task),
cfg.task_name,
cfg.sim_device,
cfg.rl_device,
cfg.graphics_device_id,
cfg.headless,
multi_gpu=cfg.multi_gpu,
)
# register the rl-games adapter to use inside the runner # run __init__
vecenv.register('RLGPU',
lambda config_name, num_actors, **kwargs: RLGPUEnv(config_name, num_actors, **kwargs))
env_configurations.register('rlgpu', {
'vecenv_type': 'RLGPU',
'env_creator': lambda **kwargs: create_rlgpu_env(**kwargs),
})
rlg_config_dict = omegaconf_to_dict(cfg.train)
# print(cfg.train)
# print('----------------------------------')
# print(cfg.task)
# exit()
# convert CLI arguments into dictionory
# create runner and set the settings
runner = Runner(RLGPUAlgoObserver())
runner.load(rlg_config_dict)
runner.reset()
# dump config dict
experiment_dir = os.path.join('runs', cfg.train.params.config.name)
os.makedirs(experiment_dir, exist_ok=True)
with open(os.path.join(experiment_dir, 'config.yaml'), 'w') as f:
f.write(OmegaConf.to_yaml(cfg))
runner.run({
'train': not cfg.test,
'play': cfg.test,
})
if __name__ == "__main__":
launch_rlg_hydra()