initial commit
This commit is contained in:
BIN
scripts/rsl_rl/__pycache__/cli_args.cpython-310.pyc
Normal file
BIN
scripts/rsl_rl/__pycache__/cli_args.cpython-310.pyc
Normal file
Binary file not shown.
91
scripts/rsl_rl/cli_args.py
Normal file
91
scripts/rsl_rl/cli_args.py
Normal file
@@ -0,0 +1,91 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import random
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg
|
||||
|
||||
|
||||
def add_rsl_rl_args(parser: argparse.ArgumentParser):
|
||||
"""Add RSL-RL arguments to the parser.
|
||||
|
||||
Args:
|
||||
parser: The parser to add the arguments to.
|
||||
"""
|
||||
# create a new argument group
|
||||
arg_group = parser.add_argument_group("rsl_rl", description="Arguments for RSL-RL agent.")
|
||||
# -- experiment arguments
|
||||
arg_group.add_argument(
|
||||
"--experiment_name", type=str, default=None, help="Name of the experiment folder where logs will be stored."
|
||||
)
|
||||
arg_group.add_argument("--run_name", type=str, default=None, help="Run name suffix to the log directory.")
|
||||
# -- load arguments
|
||||
arg_group.add_argument("--resume", action="store_true", default=False, help="Whether to resume from a checkpoint.")
|
||||
arg_group.add_argument("--load_run", type=str, default=None, help="Name of the run folder to resume from.")
|
||||
arg_group.add_argument("--checkpoint", type=str, default=None, help="Checkpoint file to resume from.")
|
||||
# -- logger arguments
|
||||
arg_group.add_argument(
|
||||
"--logger", type=str, default=None, choices={"wandb", "tensorboard", "neptune"}, help="Logger module to use."
|
||||
)
|
||||
arg_group.add_argument(
|
||||
"--log_project_name", type=str, default=None, help="Name of the logging project when using wandb or neptune."
|
||||
)
|
||||
|
||||
|
||||
def parse_rsl_rl_cfg(task_name: str, args_cli: argparse.Namespace) -> RslRlOnPolicyRunnerCfg:
|
||||
"""Parse configuration for RSL-RL agent based on inputs.
|
||||
|
||||
Args:
|
||||
task_name: The name of the environment.
|
||||
args_cli: The command line arguments.
|
||||
|
||||
Returns:
|
||||
The parsed configuration for RSL-RL agent based on inputs.
|
||||
"""
|
||||
from isaaclab_tasks.utils.parse_cfg import load_cfg_from_registry
|
||||
|
||||
# load the default configuration
|
||||
rslrl_cfg: RslRlOnPolicyRunnerCfg = load_cfg_from_registry(task_name, "rsl_rl_cfg_entry_point")
|
||||
rslrl_cfg = update_rsl_rl_cfg(rslrl_cfg, args_cli)
|
||||
return rslrl_cfg
|
||||
|
||||
|
||||
def update_rsl_rl_cfg(agent_cfg: RslRlOnPolicyRunnerCfg, args_cli: argparse.Namespace):
|
||||
"""Update configuration for RSL-RL agent based on inputs.
|
||||
|
||||
Args:
|
||||
agent_cfg: The configuration for RSL-RL agent.
|
||||
args_cli: The command line arguments.
|
||||
|
||||
Returns:
|
||||
The updated configuration for RSL-RL agent based on inputs.
|
||||
"""
|
||||
# override the default configuration with CLI arguments
|
||||
if hasattr(args_cli, "seed") and args_cli.seed is not None:
|
||||
# randomly sample a seed if seed = -1
|
||||
if args_cli.seed == -1:
|
||||
args_cli.seed = random.randint(0, 10000)
|
||||
agent_cfg.seed = args_cli.seed
|
||||
if args_cli.resume is not None:
|
||||
agent_cfg.resume = args_cli.resume
|
||||
if args_cli.load_run is not None:
|
||||
agent_cfg.load_run = args_cli.load_run
|
||||
if args_cli.checkpoint is not None:
|
||||
agent_cfg.load_checkpoint = args_cli.checkpoint
|
||||
if args_cli.run_name is not None:
|
||||
agent_cfg.run_name = args_cli.run_name
|
||||
if args_cli.logger is not None:
|
||||
agent_cfg.logger = args_cli.logger
|
||||
# set the project name for wandb and neptune
|
||||
if agent_cfg.logger in {"wandb", "neptune"} and args_cli.log_project_name:
|
||||
agent_cfg.wandb_project = args_cli.log_project_name
|
||||
agent_cfg.neptune_project = args_cli.log_project_name
|
||||
|
||||
return agent_cfg
|
||||
170
scripts/rsl_rl/play.py
Normal file
170
scripts/rsl_rl/play.py
Normal file
@@ -0,0 +1,170 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to play a checkpoint if an RL agent from RSL-RL."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# local imports
|
||||
import cli_args # isort: skip
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument(
|
||||
"--disable_fabric", action="store_true", default=False, help="Disable fabric and use USD I/O operations."
|
||||
)
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument(
|
||||
"--use_pretrained_checkpoint",
|
||||
action="store_true",
|
||||
help="Use the pre-trained checkpoint from Nucleus.",
|
||||
)
|
||||
parser.add_argument("--real-time", action="store_true", default=False, help="Run in real-time, if possible.")
|
||||
# append RSL-RL cli arguments
|
||||
cli_args.add_rsl_rl_args(parser)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
args_cli = parser.parse_args()
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import time
|
||||
import torch
|
||||
|
||||
from rsl_rl.runners import OnPolicyRunner
|
||||
|
||||
from isaaclab.envs import DirectMARLEnv, multi_agent_to_single_agent
|
||||
from isaaclab.utils.assets import retrieve_file_path
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.pretrained_checkpoint import get_published_pretrained_checkpoint
|
||||
|
||||
from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper, export_policy_as_jit, export_policy_as_onnx
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path, parse_env_cfg
|
||||
|
||||
import FLEXR_v0.tasks # noqa: F401
|
||||
|
||||
|
||||
def main():
|
||||
"""Play with RSL-RL agent."""
|
||||
# parse configuration
|
||||
env_cfg = parse_env_cfg(
|
||||
args_cli.task, device=args_cli.device, num_envs=args_cli.num_envs, use_fabric=not args_cli.disable_fabric
|
||||
)
|
||||
agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(args_cli.task, args_cli)
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Loading experiment from directory: {log_root_path}")
|
||||
if args_cli.use_pretrained_checkpoint:
|
||||
resume_path = get_published_pretrained_checkpoint("rsl_rl", args_cli.task)
|
||||
if not resume_path:
|
||||
print("[INFO] Unfortunately a pre-trained checkpoint is currently unavailable for this task.")
|
||||
return
|
||||
elif args_cli.checkpoint:
|
||||
resume_path = retrieve_file_path(args_cli.checkpoint)
|
||||
else:
|
||||
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
|
||||
|
||||
log_dir = os.path.dirname(resume_path)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "play"),
|
||||
"step_trigger": lambda step: step == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rsl-rl
|
||||
env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions)
|
||||
|
||||
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
|
||||
# load previously trained model
|
||||
ppo_runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=None, device=agent_cfg.device)
|
||||
ppo_runner.load(resume_path)
|
||||
|
||||
# obtain the trained policy for inference
|
||||
policy = ppo_runner.get_inference_policy(device=env.unwrapped.device)
|
||||
|
||||
# extract the neural network module
|
||||
# we do this in a try-except to maintain backwards compatibility.
|
||||
try:
|
||||
# version 2.3 onwards
|
||||
policy_nn = ppo_runner.alg.policy
|
||||
except AttributeError:
|
||||
# version 2.2 and below
|
||||
policy_nn = ppo_runner.alg.actor_critic
|
||||
|
||||
# export policy to onnx/jit
|
||||
export_model_dir = os.path.join(os.path.dirname(resume_path), "exported")
|
||||
export_policy_as_jit(policy_nn, ppo_runner.obs_normalizer, path=export_model_dir, filename="policy.pt")
|
||||
export_policy_as_onnx(
|
||||
policy_nn, normalizer=ppo_runner.obs_normalizer, path=export_model_dir, filename="policy.onnx"
|
||||
)
|
||||
|
||||
dt = env.unwrapped.step_dt
|
||||
|
||||
# reset environment
|
||||
obs, _ = env.get_observations()
|
||||
timestep = 0
|
||||
# simulate environment
|
||||
while simulation_app.is_running():
|
||||
start_time = time.time()
|
||||
# run everything in inference mode
|
||||
with torch.inference_mode():
|
||||
# agent stepping
|
||||
actions = policy(obs)
|
||||
# env stepping
|
||||
obs, _, _, _ = env.step(actions)
|
||||
if args_cli.video:
|
||||
timestep += 1
|
||||
# Exit the play loop after recording one video
|
||||
if timestep == args_cli.video_length:
|
||||
break
|
||||
|
||||
# time delay for real-time evaluation
|
||||
sleep_time = dt - (time.time() - start_time)
|
||||
if args_cli.real_time and sleep_time > 0:
|
||||
time.sleep(sleep_time)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
193
scripts/rsl_rl/train.py
Normal file
193
scripts/rsl_rl/train.py
Normal file
@@ -0,0 +1,193 @@
|
||||
# Copyright (c) 2022-2025, The Isaac Lab Project Developers.
|
||||
# All rights reserved.
|
||||
#
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
"""Script to train RL agent with RSL-RL."""
|
||||
|
||||
"""Launch Isaac Sim Simulator first."""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
|
||||
from isaaclab.app import AppLauncher
|
||||
|
||||
# local imports
|
||||
import cli_args # isort: skip
|
||||
|
||||
|
||||
# add argparse arguments
|
||||
parser = argparse.ArgumentParser(description="Train an RL agent with RSL-RL.")
|
||||
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
|
||||
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
|
||||
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
|
||||
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
|
||||
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
|
||||
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
|
||||
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
|
||||
parser.add_argument(
|
||||
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
|
||||
)
|
||||
# append RSL-RL cli arguments
|
||||
cli_args.add_rsl_rl_args(parser)
|
||||
# append AppLauncher cli args
|
||||
AppLauncher.add_app_launcher_args(parser)
|
||||
args_cli, hydra_args = parser.parse_known_args()
|
||||
|
||||
# always enable cameras to record video
|
||||
if args_cli.video:
|
||||
args_cli.enable_cameras = True
|
||||
|
||||
# clear out sys.argv for Hydra
|
||||
sys.argv = [sys.argv[0]] + hydra_args
|
||||
|
||||
# launch omniverse app
|
||||
app_launcher = AppLauncher(args_cli)
|
||||
simulation_app = app_launcher.app
|
||||
|
||||
"""Check for minimum supported RSL-RL version."""
|
||||
|
||||
import importlib.metadata as metadata
|
||||
import platform
|
||||
|
||||
from packaging import version
|
||||
|
||||
# for distributed training, check minimum supported rsl-rl version
|
||||
RSL_RL_VERSION = "2.3.1"
|
||||
installed_version = metadata.version("rsl-rl-lib")
|
||||
if args_cli.distributed and version.parse(installed_version) < version.parse(RSL_RL_VERSION):
|
||||
if platform.system() == "Windows":
|
||||
cmd = [r".\isaaclab.bat", "-p", "-m", "pip", "install", f"rsl-rl-lib=={RSL_RL_VERSION}"]
|
||||
else:
|
||||
cmd = ["./isaaclab.sh", "-p", "-m", "pip", "install", f"rsl-rl-lib=={RSL_RL_VERSION}"]
|
||||
print(
|
||||
f"Please install the correct version of RSL-RL.\nExisting version is: '{installed_version}'"
|
||||
f" and required version is: '{RSL_RL_VERSION}'.\nTo install the correct version, run:"
|
||||
f"\n\n\t{' '.join(cmd)}\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
"""Rest everything follows."""
|
||||
|
||||
import gymnasium as gym
|
||||
import os
|
||||
import torch
|
||||
from datetime import datetime
|
||||
|
||||
from rsl_rl.runners import OnPolicyRunner
|
||||
|
||||
from isaaclab.envs import (
|
||||
DirectMARLEnv,
|
||||
DirectMARLEnvCfg,
|
||||
DirectRLEnvCfg,
|
||||
ManagerBasedRLEnvCfg,
|
||||
multi_agent_to_single_agent,
|
||||
)
|
||||
from isaaclab.utils.dict import print_dict
|
||||
from isaaclab.utils.io import dump_pickle, dump_yaml
|
||||
|
||||
from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper
|
||||
|
||||
import isaaclab_tasks # noqa: F401
|
||||
from isaaclab_tasks.utils import get_checkpoint_path
|
||||
from isaaclab_tasks.utils.hydra import hydra_task_config
|
||||
|
||||
import FLEXR_v0.tasks # noqa: F401
|
||||
|
||||
torch.backends.cuda.matmul.allow_tf32 = True
|
||||
torch.backends.cudnn.allow_tf32 = True
|
||||
torch.backends.cudnn.deterministic = False
|
||||
torch.backends.cudnn.benchmark = False
|
||||
|
||||
|
||||
@hydra_task_config(args_cli.task, "rsl_rl_cfg_entry_point")
|
||||
def main(env_cfg: ManagerBasedRLEnvCfg | DirectRLEnvCfg | DirectMARLEnvCfg, agent_cfg: RslRlOnPolicyRunnerCfg):
|
||||
"""Train with RSL-RL agent."""
|
||||
# override configurations with non-hydra CLI arguments
|
||||
agent_cfg = cli_args.update_rsl_rl_cfg(agent_cfg, args_cli)
|
||||
env_cfg.scene.num_envs = args_cli.num_envs if args_cli.num_envs is not None else env_cfg.scene.num_envs
|
||||
agent_cfg.max_iterations = (
|
||||
args_cli.max_iterations if args_cli.max_iterations is not None else agent_cfg.max_iterations
|
||||
)
|
||||
|
||||
# set the environment seed
|
||||
# note: certain randomizations occur in the environment initialization so we set the seed here
|
||||
env_cfg.seed = agent_cfg.seed
|
||||
env_cfg.sim.device = args_cli.device if args_cli.device is not None else env_cfg.sim.device
|
||||
|
||||
# multi-gpu training configuration
|
||||
if args_cli.distributed:
|
||||
env_cfg.sim.device = f"cuda:{app_launcher.local_rank}"
|
||||
agent_cfg.device = f"cuda:{app_launcher.local_rank}"
|
||||
|
||||
# set seed to have diversity in different threads
|
||||
seed = agent_cfg.seed + app_launcher.local_rank
|
||||
env_cfg.seed = seed
|
||||
agent_cfg.seed = seed
|
||||
|
||||
# specify directory for logging experiments
|
||||
log_root_path = os.path.join("logs", "rsl_rl", agent_cfg.experiment_name)
|
||||
log_root_path = os.path.abspath(log_root_path)
|
||||
print(f"[INFO] Logging experiment in directory: {log_root_path}")
|
||||
# specify directory for logging runs: {time-stamp}_{run_name}
|
||||
log_dir = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
|
||||
# The Ray Tune workflow extracts experiment name using the logging line below, hence, do not change it (see PR #2346, comment-2819298849)
|
||||
print(f"Exact experiment name requested from command line: {log_dir}")
|
||||
if agent_cfg.run_name:
|
||||
log_dir += f"_{agent_cfg.run_name}"
|
||||
log_dir = os.path.join(log_root_path, log_dir)
|
||||
|
||||
# create isaac environment
|
||||
env = gym.make(args_cli.task, cfg=env_cfg, render_mode="rgb_array" if args_cli.video else None)
|
||||
|
||||
# convert to single-agent instance if required by the RL algorithm
|
||||
if isinstance(env.unwrapped, DirectMARLEnv):
|
||||
env = multi_agent_to_single_agent(env)
|
||||
|
||||
# save resume path before creating a new log_dir
|
||||
if agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation":
|
||||
resume_path = get_checkpoint_path(log_root_path, agent_cfg.load_run, agent_cfg.load_checkpoint)
|
||||
|
||||
# wrap for video recording
|
||||
if args_cli.video:
|
||||
video_kwargs = {
|
||||
"video_folder": os.path.join(log_dir, "videos", "train"),
|
||||
"step_trigger": lambda step: step % args_cli.video_interval == 0,
|
||||
"video_length": args_cli.video_length,
|
||||
"disable_logger": True,
|
||||
}
|
||||
print("[INFO] Recording videos during training.")
|
||||
print_dict(video_kwargs, nesting=4)
|
||||
env = gym.wrappers.RecordVideo(env, **video_kwargs)
|
||||
|
||||
# wrap around environment for rsl-rl
|
||||
env = RslRlVecEnvWrapper(env, clip_actions=agent_cfg.clip_actions)
|
||||
|
||||
# create runner from rsl-rl
|
||||
runner = OnPolicyRunner(env, agent_cfg.to_dict(), log_dir=log_dir, device=agent_cfg.device)
|
||||
# write git state to logs
|
||||
runner.add_git_repo_to_log(__file__)
|
||||
# load the checkpoint
|
||||
if agent_cfg.resume or agent_cfg.algorithm.class_name == "Distillation":
|
||||
print(f"[INFO]: Loading model checkpoint from: {resume_path}")
|
||||
# load previously trained model
|
||||
runner.load(resume_path)
|
||||
|
||||
# dump the configuration into log-directory
|
||||
dump_yaml(os.path.join(log_dir, "params", "env.yaml"), env_cfg)
|
||||
dump_yaml(os.path.join(log_dir, "params", "agent.yaml"), agent_cfg)
|
||||
dump_pickle(os.path.join(log_dir, "params", "env.pkl"), env_cfg)
|
||||
dump_pickle(os.path.join(log_dir, "params", "agent.pkl"), agent_cfg)
|
||||
|
||||
# run training
|
||||
runner.learn(num_learning_iterations=agent_cfg.max_iterations, init_at_random_ep_len=True)
|
||||
|
||||
# close the simulator
|
||||
env.close()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# run the main function
|
||||
main()
|
||||
# close sim app
|
||||
simulation_app.close()
|
||||
Reference in New Issue
Block a user