Doubts on publishing sim_camera data via python standalone

I’m a new beginner of isaac sim and programming, and I was studying isaac sim 23.1.0 currently. Maybe I will have some shallow and stupid doubts, please forgive me.
Following the tutorial, I wrote a user_application.py aiming at publishing camera depth/rgb image. I’m confused with some snippets from the tutorial and here’s my code:

# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#

import argparse
import sys

import carb
from omni.isaac.kit import SimulationApp

CAMERA_STAGE_PATH = "/World/Camera1"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera1"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"

CONFIG = {"renderer": "RayTracedLighting", "headless": False}

simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
from omni.isaac.sensor import Camera
import omni.isaac.core.utils.numpy.rotations as rot_utils
import numpy as np
import omni.replicator.core as rep
import omni.syntheticdata._syntheticdata as sd

# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")

simulation_app.update()

simulation_context = SimulationContext(stage_units_in_meters=1.0)

# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
    carb.log_error("Could not find Isaac Sim assets folder")
    simulation_app.close()
    sys.exit()

# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)

# Creating a Camera prim
camera = Camera(
    prim_path=CAMERA_STAGE_PATH,
    # path = "/Camera1"
    # parent = None
    position=np.array([-1, 2, 1]),
    frequency=60,
    resolution=[800, 600],
    orientation=rot_utils.euler_angles_to_quats(np.array([0, 0, 0]), degrees=True),
    #############################################################
    #First question: the camera orientation would be [90, -90, 0] in the app, though I set it to be [0, 0, 0] ?
    ############################################################
)


simulation_app.update()

# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
    {
        "graph_path": ROS_CAMERA_GRAPH_PATH,
        "evaluator_name": "push",
        "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
    },
    {
        keys.CREATE_NODES: [
            ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
            ("RenderProduct", "omni.isaac.core_nodes.IsaacCreateRenderProduct"),
            ("cameraHelperRGB", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
            ("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
        ],
        keys.CONNECT: [
            ("OnPlaybackTick.outputs:tick", "RenderProduct.inputs:execIn"),
            ("RenderProduct.outputs:execOut", "cameraHelperRGB.inputs:execIn"),
            ("RenderProduct.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
            ("RenderProduct.outputs:renderProductPath", "cameraHelperRGB.inputs:renderProductPath"),
            ("RenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
        ],
        keys.SET_VALUES: [
            ("cameraHelperRGB.inputs:frameId", "sim_camera"),
            ("cameraHelperRGB.inputs:topicName", "rgb"),
            ("cameraHelperRGB.inputs:type", "rgb"),
            ("cameraHelperDepth.inputs:frameId", "sim_camera"),
            ("cameraHelperDepth.inputs:topicName", "depth"),
            ("cameraHelperDepth.inputs:type", "depth"),
            ("RenderProduct.inputs:cameraPrim", [camera.prim_path]),    #ROS_CAMERA_GRAPH_PATH
        ],
    },
)

# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)

simulation_app.update()

# Inside the SDGPipeline graph, Isaac Simulation Gate nodes are added to control the execution rate of each of the ROS image and camera info publishers.
# By default the step input of each Isaac Simulation Gate node is set to a value of 1 to execute every frame.
# We can change this value to N for each Isaac Simulation Gate node individually to publish every N number of frames.

'''
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
    rv_rgb + "IsaacSimulationGate", camera._render_product_path
)

# Get name of rendervar for DistanceToImagePlane sensor type
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
    sd.SensorType.DistanceToImagePlane.name
)

# Get path to IsaacSimulationGate node in Depth pipeline#####修改路径
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
    rv_depth + "IsaacSimulationGate", camera._render_product_path
)
'''
########################################################
#Question 2, the above code failed and the rgb_camera_gate_path is "/Render/PostProcess/SDGPipeline/LdrColorSDIsaacSimulationGate" without "RenderProduct_Isaac_"
########################################################
rgb_camera_gate_path = "/Render/PostProcess/SDGPipeline/RenderProduct_Isaac_LdrColorSDIsaacSimulationGate"
depth_camera_gate_path = "/Render/PostProcess/SDGPipeline/RenderProduct_Isaac_DistanceToImagePlaneSDIsaacSimulationGate"

# Set Rgb execution step to 5 frames
rgb_step_size = 5

# Set Depth execution step to 1 frames
depth_step_size = 1

# Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(rgb_step_size)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(depth_step_size)


# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()

simulation_context.play()


while simulation_app.is_running():
    # Run with a fixed step size
    simulation_context.step(render=True)

simulation_context.stop()
simulation_app.close()

I got 2 questions as follows (You can also find their position as comments on the code above):

  1. First question: the camera orientation would be [90, -90, 0] in the app, though I set it to be [0, 0, 0] ?

  2. #Question 2, the above code failed because the rgb_camera_gate_path is “/Render/PostProcess/SDGPipeline/LdrColorSDIsaacSimulationGate” without “RenderProduct_Isaac_”, so how could i change it?

Also, when I try to run the code, it takes too much time to start the isaac sim, and here’s the message shown repeatedly:

2023-11-21 12:19:53 [227,845ms] [Warning] [omni.usd] Warning (secondary thread): in GetInstanceIndices at line 2190 of W:\ac88d7d902b57417\USD\pxr\usdImaging\usdImaging\pointInstancerAdapter.cpp -- ProtoIndex 0 out of bounds (prototypes size = 0) for (/background/PaintTool/rock_small_01_c27df/pointInstancer, /background/PaintTool/rock_small_01_c27df/pointInstancer.proto0_rock_id0)

2023-11-21 12:19:53 [227,845ms] [Warning] [omni.usd] Warning (secondary thread): in GetInstanceIndices at line 3251 of W:\ac88d7d902b57417\USD\pxr\usdImaging\usdImaging\delegate.cpp -- Empty InstanceIndices (/background/PaintTool/rock_small_10_3a652/pointInstancer, /background/PaintTool/rock_small_10_3a652/pointInstancer.proto0_rock_id0)

What can I do to speed up the starting process?

Could you try explaining what you are trying to achieve it will be easier to point to more specific code snippets or tutorials. Do you want to publish data to ROS or access the data?

Thank you for your reply!
I would like to publish depth image data to ROS2, but I failed at first when I tried to follow the tutorial here https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_camera_publishing.html.
My initial code is:

import carb
from omni.isaac.kit import SimulationApp
import sys

import argparse

parser = argparse.ArgumentParser(description="Ros2 Bridge Sample")
parser.add_argument(
    "--ros2_bridge",
    default="omni.isaac.ros2_bridge",
    nargs="?",
    choices=["omni.isaac.ros2_bridge", "omni.isaac.ros2_bridge-humble"],
)
args, unknown = parser.parse_known_args()

BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"

CONFIG = {"renderer": "RayTracedLighting", "headless": False}

# Example ROS2 bridge sample demonstrating the manual loading of stages and manual publishing of images
simulation_app = SimulationApp(CONFIG)
import omni
import numpy as np
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import stage, extensions, nucleus
import omni.graph.core as og
import omni.replicator.core as rep
import omni.syntheticdata._syntheticdata as sd

from omni.isaac.core.utils.prims import set_targets
from omni.isaac.sensor import Camera
import omni.isaac.core.utils.numpy.rotations as rot_utils
from omni.isaac.core.utils.prims import is_prim_path_valid
from omni.isaac.core_nodes.scripts.utils import set_target_prims

# enable ROS2 bridge extension
extensions.enable_extension(args.ros2_bridge)

simulation_app.update()

simulation_context = SimulationContext(stage_units_in_meters=1.0)

# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
    carb.log_error("Could not find Isaac Sim assets folder")
    simulation_app.close()
    sys.exit()

# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)


############### Some Camera helper functions for setting up publishers. ###############

def publish_camera_info(camera: Camera, freq):
    # The following code will link the camera's render product and publish the data to the specified topic name.
    render_product = camera._render_product_path
    step_size = int(60/freq)
    topic_name = camera.name+"_camera_info"
    queue_size = 1
    node_namespace = ""
    frame_id = camera.prim_path.split("/")[-1] # This matches what the TF tree is publishing.

    stereo_offset = [0.0, 0.0]

    writer = rep.writers.get("ROS2PublishCameraInfo")
    writer.initialize(
        frameId=frame_id,
        nodeNamespace=node_namespace,
        queueSize=queue_size,
        topicName=topic_name,
        stereoOffset=stereo_offset,
    )
    print("\n\n",render_product)
    writer.attach(render_product)

    gate_path = omni.syntheticdata.SyntheticData._get_node_path(
        "PostProcessDispatch" + "IsaacSimulationGate", render_product
    )

    # Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
    og.Controller.attribute(gate_path + ".inputs:step").set(step_size)
    return

def publish_pointcloud_from_depth(camera: Camera, freq):
    # The following code will link the camera's render product and publish the data to the specified topic name.
    render_product = camera._render_product_path
    step_size = int(60/freq)
    topic_name = camera.name+"_pointcloud" # Set topic name to the camera's name
    queue_size = 1
    node_namespace = ""
    frame_id = camera.prim_path.split("/")[-1] # This matches what the TF tree is publishing.

    # Note, this pointcloud publisher will simply convert the Depth image to a pointcloud using the Camera intrinsics.
    # This pointcloud generation method does not support semantic labelled objects.
    rv = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
        sd.SensorType.DistanceToImagePlane.name
    )

    writer = rep.writers.get(rv + "ROS2PublishPointCloud")
    writer.initialize(
        frameId=frame_id,
        nodeNamespace=node_namespace,
        queueSize=queue_size,
        topicName=topic_name
    )
    writer.attach([render_product])

    # Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
    gate_path = omni.syntheticdata.SyntheticData._get_node_path(
        rv + "IsaacSimulationGate", render_product
    )
    og.Controller.attribute(gate_path + ".inputs:step").set(step_size)

    return

def publish_rgb(camera: Camera, freq):
    # The following code will link the camera's render product and publish the data to the specified topic name.
    render_product = camera._render_product_path
    step_size = int(60/freq)
    topic_name = camera.name+"_rgb"
    queue_size = 1
    node_namespace = ""
    frame_id = camera.prim_path.split("/")[-1] # This matches what the TF tree is publishing.

    rv = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
    writer = rep.writers.get(rv + "ROS2PublishImage")
    writer.initialize(
        frameId=frame_id,
        nodeNamespace=node_namespace,
        queueSize=queue_size,
        topicName=topic_name
    )
    writer.attach([render_product])

    # Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
    gate_path = omni.syntheticdata.SyntheticData._get_node_path(
        rv + "IsaacSimulationGate", render_product
    )
    og.Controller.attribute(gate_path + ".inputs:step").set(step_size)

    return

def publish_depth(camera: Camera, freq):
    # The following code will link the camera's render product and publish the data to the specified topic name.
    render_product = camera._render_product_path
    step_size = int(60/freq)
    topic_name = camera.name+"_depth"
    queue_size = 1
    node_namespace = ""
    frame_id = camera.prim_path.split("/")[-1] # This matches what the TF tree is publishing.

    rv = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
                            sd.SensorType.DistanceToImagePlane.name
                        )
    writer = rep.writers.get(rv + "ROS2PublishImage")
    writer.initialize(
        frameId=frame_id,
        nodeNamespace=node_namespace,
        queueSize=queue_size,
        topicName=topic_name
    )
    writer.attach([render_product])

    # Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
    gate_path = omni.syntheticdata.SyntheticData._get_node_path(
        rv + "IsaacSimulationGate", render_product
    )
    og.Controller.attribute(gate_path + ".inputs:step").set(step_size)

    return

def publish_camera_tf(camera: Camera):
    camera_prim = camera.prim_path

    if not is_prim_path_valid(camera_prim):
        raise ValueError(f"Camera path '{camera_prim}' is invalid.")

    try:
        # Generate the camera_frame_id. OmniActionGraph will use the last part of
        # the full camera prim path as the frame name, so we will extract it here
        # and use it for the pointcloud frame_id.
        camera_frame_id=camera_prim.split("/")[-1]

        # Generate an action graph associated with camera TF publishing.
        ros_camera_graph_path = "/CameraTFActionGraph"

        # If a camera graph is not found, create a new one.
        if not is_prim_path_valid(ros_camera_graph_path):
            (ros_camera_graph, _, _, _) = og.Controller.edit(
                {
                    "graph_path": ros_camera_graph_path,
                    "evaluator_name": "execution",
                    "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_SIMULATION,
                },
                {
                    og.Controller.Keys.CREATE_NODES: [
                        ("OnTick", "omni.graph.action.OnTick"),
                        ("IsaacClock", "omni.isaac.core_nodes.IsaacReadSimulationTime"),
                        ("RosPublisher", "omni.isaac.ros2_bridge.ROS2PublishClock"),
                    ],
                    og.Controller.Keys.CONNECT: [
                        ("OnTick.outputs:tick", "RosPublisher.inputs:execIn"),
                        ("IsaacClock.outputs:simulationTime", "RosPublisher.inputs:timeStamp"),
                    ]
                }
            )

        # Generate 2 nodes associated with each camera: TF from world to ROS camera convention, and world frame.
        og.Controller.edit(
            ros_camera_graph_path,
            {
                og.Controller.Keys.CREATE_NODES: [
                    ("PublishTF_"+camera_frame_id, "omni.isaac.ros_bridge.ROS2PublishTransformTree"),
                    ("PublishRawTF_"+camera_frame_id+"_world", "omni.isaac.ros2_bridge.ROS2PublishRawTransformTree"),
                ],
                og.Controller.Keys.SET_VALUES: [
                    ("PublishTF_"+camera_frame_id+".inputs:topicName", "/tf"),
                    # Note if topic_name is changed to something else besides "/tf",
                    # it will not be captured by the ROS tf broadcaster.
                    ("PublishRawTF_"+camera_frame_id+"_world.inputs:topicName", "/tf"),
                    ("PublishRawTF_"+camera_frame_id+"_world.inputs:parentFrameId", camera_frame_id),
                    ("PublishRawTF_"+camera_frame_id+"_world.inputs:childFrameId", camera_frame_id+"_world"),
                    # Static transform from ROS camera convention to world (+Z up, +X forward) convention:
                    ("PublishRawTF_"+camera_frame_id+"_world.inputs:rotation", [0.5, -0.5, 0.5, 0.5]),
                ],
                og.Controller.Keys.CONNECT: [
                    (ros_camera_graph_path+"/OnTick.outputs:tick",
                        "PublishTF_"+camera_frame_id+".inputs:execIn"),
                    (ros_camera_graph_path+"/OnTick.outputs:tick",
                        "PublishRawTF_"+camera_frame_id+"_world.inputs:execIn"),
                    (ros_camera_graph_path+"/IsaacClock.outputs:simulationTime",
                        "PublishTF_"+camera_frame_id+".inputs:timeStamp"),
                    (ros_camera_graph_path+"/IsaacClock.outputs:simulationTime",
                        "PublishRawTF_"+camera_frame_id+"_world.inputs:timeStamp"),
                ],
            },
        )
    except Exception as e:
        print(e)

    # Add target prims for the USD pose. All other frames are static.
    set_target_prims(
        primPath=ros_camera_graph_path+"/PublishTF_"+camera_frame_id,
        inputName="inputs:targetPrims",
        targetPrimPaths=[camera_prim],
    )
    return

########################################################################################

# Create a Camera prim.
camera = Camera(
    prim_path="/World/camera",
    position=np.array([0.0, 0.0, 2.0]),
    frequency=20,
    resolution=(256, 256),
    orientation=rot_utils.euler_angles_to_quats(np.array([0, 0, 0]), degrees=True),
)

simulation_app.update()

############### Calling Camera publishing functions ###############

# Setup publishers.
publish_camera_tf(camera)
publish_camera_info(camera, 30)
publish_rgb(camera, 30)
publish_depth(camera, 30)
publish_pointcloud_from_depth(camera, 30)

####################################################################

# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()
simulation_context.play()

while simulation_app.is_running():
    # Run with a fixed step size
    simulation_context.step(render=True)

simulation_context.stop()
simulation_app.close()

The message is as below:

OmniGraphError: Could not create node using unrecognized type 'omni.isaac.ros_bridge.ROS2PublishTransformTree'. Perhaps the extension 'omni.isaac.ros_bridge' is not loaded?
Accessed invalid null prim


 None
Traceback (most recent call last):
  File "D:\Omniverse\Library\isaac_sim-2023.1.0\exts\omni.isaac.examples\omni\isaac\examples\user_examples\publish_camera.py", line 269, in <module>
    publish_camera_info(camera, 30)
  File "D:\Omniverse\Library\isaac_sim-2023.1.0\exts\omni.isaac.examples\omni\isaac\examples\user_examples\publish_camera.py", line 77, in publish_camera_info
    writer.attach(render_product)
  File "d:/omniverse/library/isaac_sim-2023.1.0/extscache/omni.replicator.core-1.10.10+105.1.wx64.r.cp310/omni/replicator/core/scripts/writers.py", line 211, in attach
    WriterRegistry.attach(self, render_products, trigger="omni.replicator.core.OgnOnFrame", **init_params)
  File "d:/omniverse/library/isaac_sim-2023.1.0/extscache/omni.replicator.core-1.10.10+105.1.wx64.r.cp310/omni/replicator/core/scripts/writers.py", line 610, in attach
    render_products = [rp.path if isinstance(rp, HydraTexture) else rp for rp in render_products]
TypeError: 'NoneType' object is not iterable
2023-11-28 10:19:15 [240,395ms] [Warning] [omni.graph.core.plugin] Node type name '' is missing the unique namespace
2023-11-28 10:19:15 [240,397ms] [Warning] [omni.stageupdate.plugin] Deprecated: direct use of IStageUpdate callbacks is deprecated. Use IStageUpdate::getStageUpdate instead.
2023-11-28 10:19:15 [240,397ms] [Warning] [omni.timeline.plugin] Deprecated: direct use of ITimeline callbacks is deprecated. Use ITimeline::getTimeline (Python: omni.timeline.get_timeline_interface) instead.
2023-11-28 10:19:15 [240,399ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,399ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,400ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,400ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,400ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,400ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,400ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,401ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,401ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,401ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,401ms] [Warning] [omni.graph.core.plugin] Could not find category 'Replicator:Annotators' for removal
2023-11-28 10:19:15 [240,405ms] [Warning] [carb.audio.context] 1 contexts were leaked
2023-11-28 10:19:15 [240,556ms] [Warning] [carb] Recursive unloadAllPlugins() detected!
2023-11-28 10:19:15 [240,557ms] [Warning] [omni.core.ITypeFactory] Module d:/omniverse/library/isaac_sim-2023.1.0/kit/omni.structuredlog.plugin.dll remained loaded after unload request.

I had no idea how to change the code so I chose to imitate camera_periodic.py in another tutorial https://docs.omniverse.nvidia.com/isaacsim/latest/ros2_tutorials/tutorial_ros2_python.html#isaac-sim-app-tutorial-ros2-python-stereo
and use the code as below:

# Copyright (c) 2020-2023, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
#

import argparse
import sys

import carb
from omni.isaac.kit import SimulationApp

CAMERA_STAGE_PATH = "/World/Camera1"
ROS_CAMERA_GRAPH_PATH = "/ROS_Camera1"
BACKGROUND_STAGE_PATH = "/background"
BACKGROUND_USD_PATH = "/Isaac/Environments/Simple_Warehouse/warehouse_with_forklifts.usd"

CONFIG = {"renderer": "RayTracedLighting", "headless": False}

simulation_app = SimulationApp(CONFIG)
import omni
import omni.graph.core as og
#import usdrt.Sdf
from omni.isaac.core import SimulationContext
from omni.isaac.core.utils import extensions, nucleus, stage
# from pxr import Gf, Usd, UsdGeom
from omni.isaac.sensor import Camera
import omni.isaac.core.utils.numpy.rotations as rot_utils
import numpy as np
import omni.replicator.core as rep
import omni.syntheticdata._syntheticdata as sd

# enable ROS bridge extension
extensions.enable_extension("omni.isaac.ros2_bridge")

simulation_app.update()

simulation_context = SimulationContext(stage_units_in_meters=1.0)

# Locate Isaac Sim assets folder to load environment and robot stages
assets_root_path = nucleus.get_assets_root_path()
if assets_root_path is None:
    carb.log_error("Could not find Isaac Sim assets folder")
    simulation_app.close()
    sys.exit()

# Loading the simple_room environment
stage.add_reference_to_stage(assets_root_path + BACKGROUND_USD_PATH, BACKGROUND_STAGE_PATH)

# Creating a Camera prim
camera = Camera(
    prim_path=CAMERA_STAGE_PATH,
    # path = "/Camera1"
    # parent = None
    position=np.array([-1, 2, 1]),
    frequency=60,
    # resolution=[800, 600],
    orientation=rot_utils.euler_angles_to_quats(np.array([0, 0, 0]), degrees=True),
)

simulation_app.update()

# Creating an on-demand push graph with cameraHelper nodes to generate ROS image publishers
keys = og.Controller.Keys
(ros_camera_graph, _, _, _) = og.Controller.edit(
    {
        "graph_path": ROS_CAMERA_GRAPH_PATH,
        "evaluator_name": "push",
        "pipeline_stage": og.GraphPipelineStage.GRAPH_PIPELINE_STAGE_ONDEMAND,
    },
    {
        keys.CREATE_NODES: [
            ("OnPlaybackTick", "omni.graph.action.OnPlaybackTick"),
            ("RenderProduct", "omni.isaac.core_nodes.IsaacCreateRenderProduct"),
            ("cameraHelperRGB", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
            ("cameraHelperDepth", "omni.isaac.ros2_bridge.ROS2CameraHelper"),
        ],
        keys.CONNECT: [
            ("OnPlaybackTick.outputs:tick", "RenderProduct.inputs:execIn"),
            ("RenderProduct.outputs:execOut", "cameraHelperRGB.inputs:execIn"),
            ("RenderProduct.outputs:execOut", "cameraHelperDepth.inputs:execIn"),
            ("RenderProduct.outputs:renderProductPath", "cameraHelperRGB.inputs:renderProductPath"),
            ("RenderProduct.outputs:renderProductPath", "cameraHelperDepth.inputs:renderProductPath"),
        ],
        keys.SET_VALUES: [
            ("cameraHelperRGB.inputs:frameId", "sim_camera"),
            ("cameraHelperRGB.inputs:topicName", "rgb"),
            ("cameraHelperRGB.inputs:type", "rgb"),
            ("cameraHelperDepth.inputs:frameId", "sim_camera"),
            ("cameraHelperDepth.inputs:topicName", "depth"),
            ("cameraHelperDepth.inputs:type", "depth"),
            ("RenderProduct.inputs:cameraPrim", [camera.prim_path]),    #ROS_CAMERA_GRAPH_PATH
        ],
    },
)

# Run the ROS Camera graph once to generate ROS image publishers in SDGPipeline
og.Controller.evaluate_sync(ros_camera_graph)

simulation_app.update()

# Inside the SDGPipeline graph, Isaac Simulation Gate nodes are added to control the execution rate of each of the ROS image and camera info publishers.
# By default the step input of each Isaac Simulation Gate node is set to a value of 1 to execute every frame.
# We can change this value to N for each Isaac Simulation Gate node individually to publish every N number of frames.

'''
rv_rgb = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(sd.SensorType.Rgb.name)
rgb_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
    rv_rgb + "IsaacSimulationGate", camera._render_product_path
)

# Get name of rendervar for DistanceToImagePlane sensor type
rv_depth = omni.syntheticdata.SyntheticData.convert_sensor_type_to_rendervar(
    sd.SensorType.DistanceToImagePlane.name
)

# Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
    rv_depth + "IsaacSimulationGate", camera._render_product_path
)
'''

rgb_camera_gate_path = "/Render/PostProcess/SDGPipeline/RenderProduct_Isaac_LdrColorSDIsaacSimulationGate"
depth_camera_gate_path = "/Render/PostProcess/SDGPipeline/RenderProduct_Isaac_DistanceToImagePlaneSDIsaacSimulationGate"

# Set Rgb execution step to 5 frames
rgb_step_size = 5

# Set Depth execution step to 1 frames
depth_step_size = 1

# Set step input of the Isaac Simulation Gate nodes upstream of ROS publishers to control their execution rate
og.Controller.attribute(rgb_camera_gate_path + ".inputs:step").set(rgb_step_size)
og.Controller.attribute(depth_camera_gate_path + ".inputs:step").set(depth_step_size)


# Need to initialize physics getting any articulation..etc
simulation_context.initialize_physics()

simulation_context.play()


while simulation_app.is_running():
    # Run with a fixed step size
    simulation_context.step(render=True)

simulation_context.stop()
simulation_app.close()

And I have 2questions:

  1. I created camera using omni.isaac.sensor.Camera. When I set camera orientation, I used this code orientation=rot_utils.euler_angles_to_quats(np.array([0, 0, 0]), degrees=True), but when the app open, the orientation is [90, -90 ,0] like this. How can I controll the orientation through code?
    F$K}@4_B9{5BE0$HG}(}4

  2. When I was trying to change simulation gate, I used this code

Get path to IsaacSimulationGate node in Depth pipeline
depth_camera_gate_path = omni.syntheticdata.SyntheticData._get_node_path(
    rv_depth + "IsaacSimulationGate", camera._render_product_path
)

The depth_camera_gate_path above is"/Render/PostProcess/SDGPipeline/DistanceToImagePlaneSDIsaacSimulationGate",
not"/Render/PostProcess/SDGPipeline/RenderProduct_Isaac_DistanceToImagePlaneSDIsaacSimulationGate" which is vaild.
How should I rewrite the code to obtain the correct depth_camera_gate_path

Here is some quick info regarding camera orientations, these have to do with the various representations between USD, Isaac Sim World and ROS Camera:

I will forward the question to a ROS dev and will get back to you.

Hi, there is a typo in 7.2.4. Publishing Camera’s Data — Omniverse IsaacSim latest documentation. There is an update coming to this page soon. For now, on line 42 of the above link, change

("PublishTF_"+camera_frame_id, "omni.isaac.ros_bridge.ROS2PublishTransformTree"),

to

("PublishTF_"+camera_frame_id, "omni.isaac.ros2_bridge.ROS2PublishTransformTree"),

Then it should be fixed.

1 Like

Thanks for your reply.
I fixed this ‘ros2’ typo and then new error occured.
The message is as below:

Traceback (most recent call last):
  File "D:\Omniverse\Library\isaac_sim-2023.1.0\exts\omni.isaac.examples\omni\isaac\examples\user_examples\publish_camera.py", line 270, in <module>
    publish_camera_info(camera, 30)
  File "D:\Omniverse\Library\isaac_sim-2023.1.0\exts\omni.isaac.examples\omni\isaac\examples\user_examples\publish_camera.py", line 78, in publish_camera_info
    writer.attach([render_product])
  File "d:/omniverse/library/isaac_sim-2023.1.0/extscache/omni.replicator.core-1.10.10+105.1.wx64.r.cp310/omni/replicator/core/scripts/writers.py", line 211, in attach
    WriterRegistry.attach(self, render_products, trigger="omni.replicator.core.OgnOnFrame", **init_params)
  File "d:/omniverse/library/isaac_sim-2023.1.0/extscache/omni.replicator.core-1.10.10+105.1.wx64.r.cp310/omni/replicator/core/scripts/writers.py", line 624, in attach
    writer_node = cls._attach(writer, active_writer_id, render_products, **combined_kwargs)
  File "d:/omniverse/library/isaac_sim-2023.1.0/extscache/omni.replicator.core-1.10.10+105.1.wx64.r.cp310/omni/replicator/core/scripts/writers.py", line 696, in _attach
    writer_node_name = f"{render_products[0].split('/')[-1]}_{writer_name}Writer"
AttributeError: 'NoneType' object has no attribute 'split'

Then I found that the output ofprint(camera._render_product_path) is None

Can you check if adding camera.initialize() before using the camera will fix the issue?

After adding this, it did work!
Thanks for your help.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.