Using RTX Path Tracer as Hydra interface in simple USD application usdrecord

Sorry I will update later

I did make a solution using capture that I should be able to share

It makes use of the Isaac Omniverse Kit Helper and calling .update() a number of times and waiting for the file to open in an event loop

Most of the problems relate to kit’s async behavior and the event loop

Sam

This is what I mixed up

#!/usr/bin/env python

import copy
import os
import omni
import numpy as np
from omni.isaac.python_app import OmniKitHelper
from pathlib import Path
import matplotlib.pyplot as plt
import logging
import asyncio


SEED = 1337

EXAMPLE = Path(__file__).parent.parent / Path("./data/raw/shipsShaded.usda")


def setup_logging(level=logging.INFO):
    logging.basicConfig(level=level)

    urllib3_logger = logging.getLogger("urllib3")
    urllib3_logger.setLevel(logging.INFO)


setup_logging()


async def load_stage(path):
    await omni.usd.get_context().open_stage_async(path)


def _setup_world(kit, scenario_path):
    # Load scenario
    setup_task = asyncio.ensure_future(load_stage(scenario_path))
    while not setup_task.done():
        # logging.info("Update in progress")
        kit.update()
    kit.setup_renderer()
    kit.update()

    logging.info(f"Stage opened with {scenario_path}")


np.random.seed(SEED)


def main(
    usd_file=f"{EXAMPLE.absolute()}",
    camera_name="/Camera/Camera_001",
    width=1920,
    hieght=1080,
    start_frame=0,
    end_frame=120,
    background_match="Plane",
    output_path="./data/output/ground_truth_examples/example1.####.png",
):
    output_folder = Path(output_path).parent
    output_folder.mkdir(parents=True, exist_ok=True)
    logging.info("Start up")
    kit = OmniKitHelper(
        {
            "renderer": "PathTracedLighting",
            "experience": f'{os.environ["EXP_PATH"]}/omni.isaac.sim.python.kit',
            "width": width,
            "height": hieght,
        }
    )
    logging.info("Start End")

    from pxr import Semantics  # Note: Import after kit is instantiated
    from omni.syntheticdata import visualize, helpers  # Note: as above
    from omni.isaac.synthetic_utils import (
        SyntheticDataHelper,
    )  # Note: as above

    sd_helper = SyntheticDataHelper()

    logging.info("Imports End")
    _setup_world(kit=kit, scenario_path=usd_file)
    kit.update()
    stage = kit.get_stage()
    logging.info("Open Stage End")
    for prim in stage.Traverse():
        if prim.GetTypeName() == "Mesh":
            prim.GetAttribute("primvars:displayColor").Set([np.random.rand(3).tolist()])
        sem = Semantics.SemanticsAPI.Apply(prim, "Semantics")
        sem.CreateSemanticTypeAttr()
        sem.CreateSemanticDataAttr()
        sem.GetSemanticTypeAttr().Set("class")
        prim_type = prim.GetTypeName()
        if str(prim_type) == "Mesh":
            if background_match in str(prim.GetPath()):
                sem.GetSemanticDataAttr().Set("background")
                logging.info(f"Background Mesh: {prim.GetPath()}")
            else:
                sem.GetSemanticDataAttr().Set("foreground")
                logging.info(f"Foreground Mesh: {prim.GetPath()}")
    logging.info("Traverse Stage End")

    kit.update()
    logging.info("Update End")
    viewport = omni.kit.viewport.get_default_viewport_window()
    logging.info("Viewport End")
    viewport.set_active_camera(camera_name)
    timeline = omni.timeline.get_timeline_interface()
    for i in range(1000):
        kit.update()
    for cur_frame in range(start_frame + 1, end_frame + 1, 1):
        timeline.set_current_time(cur_frame)
        for i in range(20):
            kit.update()
        gt = sd_helper.get_groundtruth(
            [
                "rgb",
                "depth",
                "boundingBox2DTight",
                "boundingBox2DLoose",
                "instanceSegmentation",
                "semanticSegmentation",
                "boundingBox3D",
                "camera",
            ],
            viewport,
        )

        # GROUNDTRUTH VISUALIZATION

        # Setup a figure
        _, axes = plt.subplots(2, 4, figsize=(20, 7))
        axes = axes.flat
        for ax in axes:
            ax.axis("off")

        # RGB
        axes[0].set_title("RGB")
        for ax in axes[:-1]:
            ax.imshow(gt["rgb"])

        # DEPTH
        axes[1].set_title("Depth")
        depth_data = np.clip(gt["depth"], 0, 255)
        axes[1].imshow(visualize.colorize_depth(depth_data.squeeze()))

        # BBOX2D TIGHT
        axes[2].set_title("BBox 2D Tight")
        rgb_data = copy.deepcopy(gt["rgb"])
        axes[2].imshow(visualize.colorize_bboxes(gt["boundingBox2DTight"], rgb_data))

        # BBOX2D LOOSE
        axes[3].set_title("BBox 2D Loose")
        rgb_data = copy.deepcopy(gt["rgb"])
        axes[3].imshow(visualize.colorize_bboxes(gt["boundingBox2DLoose"], rgb_data))

        # INSTANCE SEGMENTATION
        axes[4].set_title("Instance Segmentation")
        instance_seg = gt["instanceSegmentation"][0]
        instance_rgb = visualize.colorize_segmentation(instance_seg)
        axes[4].imshow(instance_rgb, alpha=0.7)

        # SEMANTIC SEGMENTATION
        axes[5].set_title("Semantic Segmentation")
        semantic_seg = gt["semanticSegmentation"]
        semantic_rgb = visualize.colorize_segmentation(semantic_seg)
        axes[5].imshow(semantic_rgb, alpha=0.7)

        # BBOX 3D
        axes[6].set_title("BBox 3D")
        bbox_3d_data = gt["boundingBox3D"]
        bboxes_3d_corners = bbox_3d_data["corners"]
        projected_corners = helpers.world_to_image(
            bboxes_3d_corners.reshape(-1, 3), viewport
        )
        projected_corners = projected_corners.reshape(-1, 8, 3)
        rgb_data = copy.deepcopy(gt["rgb"])
        bboxes3D_rgb = visualize.colorize_bboxes_3d(projected_corners, rgb_data)
        axes[6].imshow(bboxes3D_rgb)

        # Save figure
        plt.savefig(
            f"{output_folder}"
            + f'/{Path(output_path).name.split(".")[0]}.'
            + f'{cur_frame:04d}.{Path(output_path).name.split(".")[-1]}'
        )
        plt.close()
        logging.info(f"Frame {cur_frame:04d} Done!")
        # cleanup
    kit.shutdown()


if __name__ == "__main__":
    main()

Yes if you want to capture images, you may have to disable the kit’s async rendering like this:

        # disable async rendering for capture, otherwise it won't capture images correctly
        self._settings.set_bool("/app/asyncRendering", False)
        self._settings.set_bool("/app/asyncRenderingLowLatency", False)

You can see how its used in omni.kit.capture too.

Thanks for this information it is useful to myself and others