initial commit
This commit is contained in:
207
scripts/rl_games/play.py
Normal file
207
scripts/rl_games/play.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to play a checkpoint if an RL agent from RL-Games."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Play a checkpoint of an RL agent from RL-Games.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--use_last_checkpoint",
|
||||
action="store_true",
|
||||
help="When no checkpoint provided, use the last saved model. Otherwise use the best saved model.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli = parser.parse_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
|
||||
import gymnasium as gym
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
import torch
|
||||
|
||||
from rl_games.common import env_configurations, vecenv
|
||||
from rl_games.common.player import BasePlayer
|
||||
from rl_games.torch_runner import Runner
|
||||
|
||||
from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
from isaaclab_rl.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path, load_cfg_from_registry, parse_env_cfg
|
||||
|
||||
import FLEXR_v0.tasks # noqa: F401
|
||||
|
||||
|
||||
def main():
|
||||
"""Play with RL-Games agent."""
|
||||
# parse env configuration
|
||||
env_cfg = parse_env_cfg(
|
||||
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
|
||||
)
|
||||
agent_cfg = load_cfg_from_registry(args_cli.task, "rl_games_cfg_entry_point")
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"])
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Loading experiment from directory: {log_root_path}")
|
||||
# find checkpoint
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
resume_path = get_published_pretrained_checkpoint("rl_games", args_cli.task)
|
||||
if not resume_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint is None:
|
||||
# specify directory for logging runs
|
||||
run_dir = agent_cfg["params"]["config"].get("full_experiment_name", ".*")
|
||||
# specify name of checkpoint
|
||||
if args_cli.use_last_checkpoint:
|
||||
checkpoint_file = ".*"
|
||||
else:
|
||||
# this loads the best checkpoint
|
||||
checkpoint_file = f"{agent_cfg['params']['config']['name']}.pth"
|
||||
# get path to previous checkpoint
|
||||
resume_path = get_checkpoint_path(log_root_path, run_dir, checkpoint_file, other_dirs=["nn"])
|
||||
else:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
log_dir = os.path.dirname(os.path.dirname(resume_path))
|
||||
|
||||
# wrap around environment for rl-games
|
||||
rl_device = agent_cfg["params"]["config"]["device"]
|
||||
clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf)
|
||||
clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_root_path, log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rl-games
|
||||
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions)
|
||||
|
||||
# register the environment to rl-games registry
|
||||
# note: in agents configuration: environment name must be "rlgpu"
|
||||
vecenv.register(
|
||||
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
|
||||
)
|
||||
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
|
||||
|
||||
# load previously trained model
|
||||
agent_cfg["params"]["load_checkpoint"] = True
|
||||
agent_cfg["params"]["load_path"] = resume_path
|
||||
print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}")
|
||||
|
||||
# set number of actors into agent config
|
||||
agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs
|
||||
# create runner from rl-games
|
||||
runner = Runner()
|
||||
runner.load(agent_cfg)
|
||||
# obtain the agent from the runner
|
||||
agent: BasePlayer = runner.create_player()
|
||||
agent.restore(resume_path)
|
||||
agent.reset()
|
||||
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# reset environment
|
||||
obs = env.reset()
|
||||
if isinstance(obs, dict):
|
||||
obs = obs["obs"]
|
||||
timestep = 0
|
||||
# required: enables the flag for batched observations
|
||||
_ = agent.get_batch_size(obs, 1)
|
||||
# initialize RNN states if used
|
||||
if agent.is_rnn:
|
||||
agent.init_rnn()
|
||||
# simulate environment
|
||||
# note: We simplified the logic in rl-games player.py (:func:`BasePlayer.run()`) function in an
|
||||
# attempt to have complete control over environment stepping. However, this removes other
|
||||
# operations such as masking that is used for multi-agent learning by RL-Games.
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# convert obs to agent format
|
||||
obs = agent.obs_to_torch(obs)
|
||||
# agent stepping
|
||||
actions = agent.get_action(obs, is_deterministic=agent.is_deterministic)
|
||||
# env stepping
|
||||
obs, _, dones, _ = env.step(actions)
|
||||
|
||||
# perform operations for terminated episodes
|
||||
if len(dones) > 0:
|
||||
# reset rnn state for terminated episodes
|
||||
if agent.is_rnn and agent.states is not None:
|
||||
for s in agent.states:
|
||||
s[:, dones, :] = 0.0
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# Exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
184
scripts/rl_games/train.py
Normal file
184
scripts/rl_games/train.py
Normal file
@@ -0,0 +1,184 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to train RL agent with RL-Games."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RL-Games.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument(
|
||||
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
|
||||
)
|
||||
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint.")
|
||||
parser.add_argument("--sigma", type=str, default=None, help="The policy's initial standard deviation.")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
# parse the arguments
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import math
|
||||
import os
|
||||
import random
|
||||
from datetime import datetime
|
||||
|
||||
from rl_games.common import env_configurations, vecenv
|
||||
from rl_games.common.algo_observer import IsaacAlgoObserver
|
||||
from rl_games.torch_runner import Runner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_pickle, dump_yaml
|
||||
|
||||
from isaaclab_rl.rl_games import RlGamesGpuEnv, RlGamesVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import FLEXR_v0.tasks # noqa: F401
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, "rl_games_cfg_entry_point")
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: dict):
|
||||
"""Train with RL-Games agent."""
|
||||
# override configurations with non-hydra CLI arguments
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
|
||||
agent_cfg["params"]["seed"] = args_cli.seed if args_cli.seed is not None else agent_cfg["params"]["seed"]
|
||||
agent_cfg["params"]["config"]["max_epochs"] = (
|
||||
args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg["params"]["config"]["max_epochs"]
|
||||
)
|
||||
if args_cli.checkpoint is not None:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
agent_cfg["params"]["load_checkpoint"] = True
|
||||
agent_cfg["params"]["load_path"] = resume_path
|
||||
print(f"[INFO]: Loading model checkpoint from: {agent_cfg['params']['load_path']}")
|
||||
train_sigma = float(args_cli.sigma) if args_cli.sigma is not None else None
|
||||
|
||||
# multi-gpu training config
|
||||
if args_cli.distributed:
|
||||
agent_cfg["params"]["seed"] += app_launcher.global_rank
|
||||
agent_cfg["params"]["config"]["device"] = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg["params"]["config"]["device_name"] = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg["params"]["config"]["multi_gpu"] = True
|
||||
# update env config device
|
||||
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
|
||||
|
||||
# set the environment seed (after multi-gpu config for updated rank from agent seed)
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg["params"]["seed"]
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rl_games", agent_cfg["params"]["config"]["name"])
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# specify directory for logging runs
|
||||
log_dir = agent_cfg["params"]["config"].get("full_experiment_name", datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
|
||||
# set directory into agent config
|
||||
# logging directory path: <train_dir>/<full_experiment_name>
|
||||
agent_cfg["params"]["config"]["train_dir"] = log_root_path
|
||||
agent_cfg["params"]["config"]["full_experiment_name"] = log_dir
|
||||
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_root_path, log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_root_path, log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
dump_pickle(os.path.join(log_root_path, log_dir, "params", "env.pkl"), env_cfg)
|
||||
dump_pickle(os.path.join(log_root_path, log_dir, "params", "agent.pkl"), agent_cfg)
|
||||
|
||||
# read configurations about the agent-training
|
||||
rl_device = agent_cfg["params"]["config"]["device"]
|
||||
clip_obs = agent_cfg["params"]["env"].get("clip_observations", math.inf)
|
||||
clip_actions = agent_cfg["params"]["env"].get("clip_actions", math.inf)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_root_path, log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rl-games
|
||||
env = RlGamesVecEnvWrapper(env, rl_device, clip_obs, clip_actions)
|
||||
|
||||
# register the environment to rl-games registry
|
||||
# note: in agents configuration: environment name must be "rlgpu"
|
||||
vecenv.register(
|
||||
"IsaacRlgWrapper", lambda config_name, num_actors, **kwargs: RlGamesGpuEnv(config_name, num_actors, **kwargs)
|
||||
)
|
||||
env_configurations.register("rlgpu", {"vecenv_type": "IsaacRlgWrapper", "env_creator": lambda **kwargs: env})
|
||||
|
||||
# set number of actors into agent config
|
||||
agent_cfg["params"]["config"]["num_actors"] = env.unwrapped.num_envs
|
||||
# create runner from rl-games
|
||||
runner = Runner(IsaacAlgoObserver())
|
||||
runner.load(agent_cfg)
|
||||
|
||||
# reset the agent and env
|
||||
runner.reset()
|
||||
# train the agent
|
||||
if args_cli.checkpoint is not None:
|
||||
runner.run({"train": True, "play": False, "sigma": train_sigma, "checkpoint": resume_path})
|
||||
else:
|
||||
runner.run({"train": True, "play": False, "sigma": train_sigma})
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
Reference in New Issue
Block a user