I am trying to create an RL training environment, however as I am building up the simulation I am slowly testing things out. I have an Interactive scene where there is a ground plane and a robot (the lander). As you can see in the video, as the lander falls it does not collide with the ground plane. I think this is a wrong setting in my USD files but im not sure.
Here is my python script that I use to create the simulation.
"""Launch Isaac Sim Simulator first."""
import argparse
import sys
from isaaclab.app import AppLauncher
# add argparse arguments
parser = argparse.ArgumentParser(description="Train an RL agent with skrl.")
parser.add_argument("--video", action="store_true", default=False, help="Record videos during training.")
parser.add_argument("--video_length", type=int, default=200, help="Length of the recorded video (in steps).")
parser.add_argument("--video_interval", type=int, default=2000, help="Interval between video recordings (in steps).")
parser.add_argument("--num_envs", type=int, default=None, help="Number of environments to simulate.")
parser.add_argument("--task", type=str, default=None, help="Name of the task.")
parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
parser.add_argument(
"--distributed", action="store_true", default=False, help="Run training with multiple GPUs or nodes."
)
parser.add_argument("--checkpoint", type=str, default=None, help="Path to model checkpoint to resume training.")
parser.add_argument("--max_iterations", type=int, default=None, help="RL Policy training iterations.")
parser.add_argument(
"--ml_framework",
type=str,
default="torch",
choices=["torch", "jax", "jax-numpy"],
help="The ML framework used for training the dreamer agent.",
)
parser.add_argument(
"--algorithm",
type=str,
default="PPO",
choices=["AMP", "PPO", "IPPO", "MAPPO"],
help="The RL algorithm used for training the skrl agent.",
)
# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()
# always enable cameras to record video
if args_cli.video:
args_cli.enable_cameras = True
# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app
import gymnasium as gym
import os
import random
import torch
from datetime import datetime
from isaaclab.sim import SimulationCfg, SimulationContext
import isaacsim.core.utils.prims as prim_utils
from isaaclab.actuators import ImplicitActuatorCfg
from isaaclab.assets import ArticulationCfg, AssetBaseCfg, RigidObjectCfg
from isaaclab.scene import InteractiveScene, InteractiveSceneCfg
from isaaclab.utils import configclass
import isaaclab.sim as sim_utils
isaaclab_root = os.path.abspath(os.path.join(os.path.dirname(__file__), "../../.."))
sys.path.insert(0, isaaclab_root)
from source.lander_assets.lander_vehicle import LUNAR_LANDER_CFG
@configclass
class CartpoleSceneCfg(InteractiveSceneCfg):
"""Configuration for a cart-pole scene."""
# ground plane
ground = AssetBaseCfg(prim_path="/World/defaultGroundPlane",
spawn=sim_utils.UsdFileCfg(usd_path=f"source/lander_assets/camera_baked_moon_terrain_without_sun_from_blender.usd")
)
# lights
dome_light = AssetBaseCfg(
prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
)
# articulation
lander: RigidObjectCfg = LUNAR_LANDER_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
"""Runs the simulation loop."""
# Extract scene entities
# note: we only do this here for readability.
robot = scene["lander"]
# Define simulation stepping
sim_dt = sim.get_physics_dt()
count = 0
# Simulation loop
while simulation_app.is_running():
# Reset
if count % 500 == 0:
# reset counter
count = 0
# reset the scene entities
# root state
# we offset the root state by the origin since the states are written in simulation world frame
# if this is not done, then the robots will be spawned at the (0, 0, 0) of the simulation world
root_state = robot.data.default_root_state.clone()
root_state[:, :3] += scene.env_origins
robot.write_root_pose_to_sim(root_state[:, :7])
robot.write_root_velocity_to_sim(root_state[:, 7:])
# set joint positions with some noise
# joint_pos, joint_vel = robot.data.default_joint_pos.clone(), robot.data.default_joint_vel.clone()
# joint_pos += torch.rand_like(joint_pos) * 0.1
# robot.write_joint_state_to_sim(joint_pos, joint_vel)
# clear internal buffers
scene.reset()
print("[INFO]: Resetting robot state...")
# Apply random action
# -- generate random joint efforts
# efforts = torch.randn_like(robot.data.joint_pos) * 5.0
# -- apply action to the robot
base_gravity = torch.Tensor(sim.cfg.gravity)
mass = torch.Tensor(robot.root_physx_view.get_masses().sum())
robot.set_external_force_and_torque(
forces=mass*base_gravity.expand(scene.num_envs, 1, 3).clone(),
torques=torch.zeros(scene.num_envs,1,3),
)
# robot.set_joint_effort_target(efforts)
# -- write data to sim
scene.write_data_to_sim()
# Perform step
sim.step()
# Increment counter
count += 1
# Update buffers
scene.update(sim_dt)
def main():
"""Main function."""
# Load kit helper
sim_cfg = sim_utils.SimulationCfg(device=args_cli.device)
sim = SimulationContext(sim_cfg)
# Set main camera
sim.set_camera_view([2.5, 0.0, 4.0], [0.0, 0.0, 2.0])
# Design scene
scene_cfg = CartpoleSceneCfg(num_envs=args_cli.num_envs, env_spacing=100.0)
scene = InteractiveScene(scene_cfg)
# Play the simulator
sim.reset()
# Now we are ready!
print("[INFO]: Setup complete...")
# Run the simulator
run_simulator(sim, scene)
if __name__ == "__main__":
# run the main function
main()
# close sim app
simulation_app.close()