Isaac orbit OperationSpaceController help, no examples implemented to run

Hello,

After finishing the motion generator tutorial with the IK controller (run_diff_ik.py), I’m trying to make a Cartesian impedance controller for a panda robot. For that, there is a class called OperationSpaceController into the operational_space.py, which according to the documentation:

It uses task-space pose error and Jacobian to compute join torques through mass-spring-damper system with a) fixed stiffness, b) variable stiffness (stiffness control), and c) variable stiffness and damping (impedance control).

I’ve been trying to use the motion generator tutorial script (run_diff_ik.py) as template to use with the impedance class but I’m having lots of issues with the controller. I modified this “template” according to the class but I’m wondering now that the reason of not having a script to simulate a Cartesian controller is because is still developing? or this class is meant for another type of robot/purpose?

This is my script so far:

import argparse

from omni.isaac.orbit.app import AppLauncher

# add argparse arguments
parser = argparse.ArgumentParser(description="Tutorial on using the differential IK controller.")
parser.add_argument("--robot", type=str, default="franka_panda", help="Name of the robot.")
parser.add_argument("--num_envs", type=int, default=128, help="Number of environments to spawn.")
parser.add_argument("--num_dof", type=int, default=7, help="Number of DOF.")
parser.add_argument("--num_robots", type=int, default=1, help="Number of robots to spawn.")

# append AppLauncher cli args
AppLauncher.add_app_launcher_args(parser)
# parse the arguments
args_cli = parser.parse_args()

# launch omniverse app
app_launcher = AppLauncher(args_cli)
simulation_app = app_launcher.app

"""Rest everything follows."""

import torch

import omni.isaac.orbit.sim as sim_utils
from omni.isaac.orbit.assets import AssetBaseCfg
from omni.isaac.orbit.managers import SceneEntityCfg
from omni.isaac.orbit.scene import InteractiveScene, InteractiveSceneCfg
from omni.isaac.orbit.utils import configclass
from omni.isaac.orbit.utils.assets import ISAAC_NUCLEUS_DIR
from omni.isaac.orbit.utils.math import subtract_frame_transforms
from omni.isaac.orbit.controllers.operational_space import OperationSpaceControllerCfg, OperationSpaceController

# load panda 
from omni.isaac.orbit_assets import FRANKA_PANDA_HIGH_PD_CFG, UR10_CFG  # isort:skip

# define scene

@configclass
class TableTopSceneCfg(InteractiveSceneCfg):
    """Configuration for a cart-pole scene."""

    # ground plane
    ground = AssetBaseCfg(
        prim_path="/World/defaultGroundPlane",
        spawn=sim_utils.GroundPlaneCfg(),
        init_state=AssetBaseCfg.InitialStateCfg(pos=(0.0, 0.0, -1.05)),
    )

    # lights
    dome_light = AssetBaseCfg(
        prim_path="/World/Light", spawn=sim_utils.DomeLightCfg(intensity=3000.0, color=(0.75, 0.75, 0.75))
    )

    # mount
    table = AssetBaseCfg(
        prim_path="{ENV_REGEX_NS}/Table",
        spawn=sim_utils.UsdFileCfg(
            usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Mounts/Stand/stand_instanceable.usd", scale=(2.0, 2.0, 2.0)
        ),
    )

    # articulation
    if args_cli.robot == "franka_panda":
        robot = FRANKA_PANDA_HIGH_PD_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
    elif args_cli.robot == "ur10":
        robot = UR10_CFG.replace(prim_path="{ENV_REGEX_NS}/Robot")
    else:
        raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10")

# run simulation

def run_simulator(sim: sim_utils.SimulationContext, scene: InteractiveScene):
    """Runs the simulation loop."""
    # Extract scene entities (for data reading and writing)
    # note: we only do this here for readability.
    robot = scene["robot"]

    # Create a controller config
    op_controller = OperationSpaceControllerCfg(
        command_types=["pose_abs"],
        impedance_mode="fixed",
        uncouple_motion_wrench=True,
        inertial_compensation=False,
        gravity_compensation=False,
        stiffness=100.0,
        damping_ratio=1,
    )
    # create controller
    op_controller = OperationSpaceController(op_controller, num_robots=1, num_dof=7, device=sim.device)

    # define the target pose
    ee_goals = [
        [0.5, 0.5, 0.7, 0.707, 0, 0.707, 0],
        [0.5, -0.4, 0.6, 0.707, 0.707, 0.0, 0.0],
        [0.5, 0, 0.5, 0.0, 1.0, 0.0, 0.0],
    ]
    # convert to tensor
    ee_goals = torch.tensor(ee_goals, device=sim.device)

    # counter to track given goals
    current_goal_idx = 0
    # create buffers to store actions
    goal_command = torch.zeros(1, op_controller.target_dim, device=robot.device)
    goal_command[:] = ee_goals[current_goal_idx]

    # robot parameterization (joint names and body names to use later)
    if args_cli.robot == "franka_panda":
        robot_entity_cfg = SceneEntityCfg("robot", joint_names=["panda_joint.*"], body_names=["panda_hand"])
    elif args_cli.robot == "ur10":
        robot_entity_cfg = SceneEntityCfg("robot", joint_names=[".*"], body_names=["ee_link"])
    else:
        raise ValueError(f"Robot {args_cli.robot} is not supported. Valid: franka_panda, ur10")

    # resolve the scene entities
    robot_entity_cfg.resolve(scene)

    # obtain the frame index of the end-effector (one less for fixed base robots as root body is not included)
    ee_jacobi_idx = robot_entity_cfg.body_ids[0] - 1

    # define simulation stepping
    sim_dt = sim.get_physics_dt()
    count = 0
    count_error = 0
    # loop
    while simulation_app.is_running():
        # reset 
        if count % 150 == 0:
            # reset time
            count = 0
            # reset articulation state
            joint_pos_default = robot.data.default_joint_pos.clone()
            joint_vel_default = robot.data.default_joint_vel.clone()
            robot.write_joint_state_to_sim(joint_pos_default, joint_vel_default)
            robot.reset()
            # reset actions
            goal_command[:] = ee_goals[current_goal_idx]
            # reset robot joint positions
            joint_pos_default = joint_pos_default[:, robot_entity_cfg.joint_ids].clone()
            joint_vel_default = joint_vel_default[:, robot_entity_cfg.joint_ids].clone()
            joint_torques_desired = torch.zeros(1, 7, device=robot.device)
            # reset controller
            op_controller.initialize()
            op_controller.reset_idx()
            # set command
            op_controller.set_command(goal_command)
            # change goal
            current_goal_idx = (current_goal_idx + 1) % len(ee_goals)
        else:
            # read information from simulation
            # jacobian
            jacobian = robot.root_physx_view.get_jacobians()[:, ee_jacobi_idx, :, robot_entity_cfg.joint_ids]
            print(f"jacobian: {jacobian.shape}")
            # state of all bodies ([pos, quat, lin_vel, ang_vel])
            ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7]
            # print(f"body_state_w: {robot.data.body_state_w}")
            # root state [pos, quat, lin_vel, ang_vel]
            root_pose_w = robot.data.root_state_w[:, 0:7]
            root_vel_w = robot.data.root_state_w[:, 7:13]
            # compute frame in root frame 
            """ 
            asset instances are not “aware” of the local environment frame, they return their states in the simulation world frame. Thus, we need to convert the obtained quantities to the local environment frame. This is done by subtracting the local environment origin from the obtained quantities.
              """
            ee_pos_b, ee_quat_b = subtract_frame_transforms(
                root_pose_w[:, 0:3], root_pose_w[:, 3:7], ee_pose_w[:, 0:3], ee_pose_w[:, 3:7]
            )
            # merge the position and orientation
            ee_pose_b = torch.cat([ee_pos_b, ee_quat_b], dim=-1)
            
            # compute the end-effector velocity
            ee_vel_b = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 7:13]

            # compute the joint commands
            joint_torques_desired = op_controller.compute(jacobian, ee_pose_b, ee_vel_b)
            print(f"joint_torques_desired: {joint_torques_desired.shape}")
            joint_torques_desired = joint_torques_desired.permute(0, 2, 1)

        
        # apply actions
        robot.set_joint_effort_target(joint_torques_desired, joint_ids=robot_entity_cfg.joint_ids)
        scene.write_data_to_sim()
        # perform step
        sim.step()
        # update sim-time
        count += 1
        # update buffers
        scene.update(sim_dt)

        #obtain information from simulation
        ee_pose_w = robot.data.body_state_w[:, robot_entity_cfg.body_ids[0], 0:7]


def main():
    """Main function."""
    # Load kit helper
    sim_cfg = sim_utils.SimulationCfg(dt=0.01)
    sim = sim_utils.SimulationContext(sim_cfg)
    # Set main camera
    sim.set_camera_view([2.5, 2.5, 2.5], [0.0, 0.0, 0.0])
    # Design scene
    scene_cfg = TableTopSceneCfg(num_envs=args_cli.num_envs, env_spacing=2.0)
    scene = InteractiveScene(scene_cfg)
    # Play the simulator
    sim.reset()
    # Now we are ready!
    print("[INFO]: Setup complete...")
    # Run the simulator
    run_simulator(sim, scene)


if __name__ == "__main__":
    # run the main function
    main()
    # close sim app
    simulation_app.close()

As an example of the strange things I found into the controller class is the way that the desired_torque is defined:

self._desired_torques = torch.zeros(self.num_robots, self.num_dof, self.num_dof, device=self._device)

this tensor is [1,7,7] in my case as I’m using just one robot. Here I’m missing something? the torque tensor should be a [1,7,1] tensor as the robot has 7 DOFs, why is a 7x7 matrix?

Please, I would appreciate any input on this cause I have been debugging for a week, and now I’m thinking on starting from zero to make my own controller specific for a 7 DOF robot.

Thanks in advance!

EDIT: I finally have a half working simulation with some changes done in the controller class code, now I can finally tell that I’m missing something with the use of this controller