Hi @dkalpay Try this script below thats a simple demonstration of a step async workflow.
This will not generate an omnigraph, instead it does everything in python, where arbitrary funcitons can be written and used. (Note: I’m using IsaacSim 2023.1.1)
from semantics.schema.editor import add_prim_semantics, LabelWriteType
import omni
from pxr import Gf, UsdGeom
import asyncio
import omni.replicator.core as rep
import numpy as np
import random
def set_transform(mesh_prim, prim_pos, prim_rot, prim_scale):
stage = omni.usd.get_context().get_stage()
xform = UsdGeom.Xformable(mesh_prim)
xform_ops = {op.GetBaseName(): op for op in xform.GetOrderedXformOps()}
for op in xform.GetOrderedXformOps():
if "translate" in op.GetName():
translate_op = xform_ops["translate"]
translate_op.Set(Gf.Vec3d(prim_pos[0],prim_pos[1],prim_pos[2]))
elif "rotate" in op.GetName():
rotate_op = xform_ops["rotateXYZ"]
rotate_op.Set(Gf.Vec3d(prim_rot[0],prim_rot[1],prim_rot[2]))
elif "scale" in op.GetName():
scale_op = xform_ops["scale"]
scale_op.Set(Gf.Vec3d(prim_scale[0],prim_scale[1],prim_scale[2]))
def spawn_mesh(mesh_type, pos, rot, scale):
# I decided to make my own function to spawn basic meshes in this example. Using rep, its much simpler
# but I wanted to show an example that doesn't generate ANY omnigraph nodes.
stage = omni.usd.get_context().get_stage()
mesh_prim = []
if mesh_type == "Cube":
result, path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cube")
mesh_prim = stage.GetPrimAtPath(path)
set_transform(mesh_prim, pos, rot, scale)
elif mesh_type == "Cone":
result, path = omni.kit.commands.execute("CreateMeshPrimCommand", prim_type="Cone")
mesh_prim = stage.GetPrimAtPath(path)
set_transform(mesh_prim, pos, rot, scale)
return mesh_prim
def scene_setup(stage):
# Hide the default light
default_light_prim = stage.GetPrimAtPath("/Environment/defaultLight")
if default_light_prim.IsValid():
xform = UsdGeom.Xformable(default_light_prim)
xform.MakeInvisible()
# Spawn camera and some geometry in the scene
camera_prim = stage.DefinePrim("/World/Camera", "Camera")
UsdGeom.Xformable(camera_prim).AddTranslateOp().Set((1347,825,1440))
UsdGeom.Xformable(camera_prim).AddRotateXYZOp().Set((-21., 42., 0.))
cam_focalLength_attr = camera_prim.GetAttribute("focalLength").Set(24.0)
distance_light_prim = stage.DefinePrim("/World/DistantLight", "DistantLight")
UsdGeom.Xformable(distance_light_prim).AddTranslateOp().Set((0,0,0))
UsdGeom.Xformable(distance_light_prim).AddRotateXYZOp().Set((400., -23., -94.0))
# Meshes too - cone, floor, wall1, and wall2
annot_cone = spawn_mesh("Cone",(-125,100,125), (0,0,0), (2,2,2))
annot_cube = spawn_mesh("Cube",(125,80,-125), (0,0,0), (2,2,2))
floor = spawn_mesh("Cube", (0,0,0), (0,0,0), (10,0.1,10))
wall1 = spawn_mesh("Cube", (-450,250,0), (0,0,0), (1,5,10))
wall2 = spawn_mesh("Cube", (0,250,-450), (0,0,0), (10,5,1))
# Setup semantics
add_prim_semantics(prim=annot_cone, type="class", data="cone", write_type=LabelWriteType.OVERWRITE,)
add_prim_semantics(prim=annot_cube, type="class", data="cube", write_type=LabelWriteType.OVERWRITE,)
return camera_prim, annot_cone, annot_cube
def toggle_visibility(stage, leader_prim, follower_prim):
### NOTE!!! - toggling the visibility of a prim is expensive ...
# Much better to move the prim to a distant non visible location if you're concerned about perf.
leader_xform = UsdGeom.Xformable(leader_prim)
follower_xform = UsdGeom.Xformable(follower_prim)
#get the local and absolute visibility on this object
local_vis = leader_xform.GetVisibilityAttr().Get()
if local_vis == "inherited":
leader_xform.MakeInvisible() #sets visibility on the object to invisible
follower_xform.MakeVisible()
if local_vis == "invisible":
leader_xform.MakeVisible() #sets visibility on the object to visible. This also forces parents to be visible too!
follower_xform.MakeInvisible()
def move_camera(camera_prim, position, rotation):
set_transform(camera_prim, position, rotation, (1,1,1))
async def run():
### Notes:
# This script uses the "step async" workflow. Usually rep scripts generate an omnigraph, which then get executed ...
# But in this workflow, the python does all the heavy lifting and selectively uses the replicator API for things like
# writers, triggers, render products etc. The benefit of this method, is that you have complete freedom to build your own
# logic, and do anything you like using USD, KIT, etc. There's some downsides, like possible performance loss, and if
# you're not using the rep. functions code can be a lot less friendly and verbose.
# Get the stage
stage = omni.usd.get_context().get_stage()
# Set the renderer to Path Traced
rep.settings.set_render_pathtraced(samples_per_pixel=64)
# Scene settings
rep.settings.set_stage_up_axis("Y")
rep.settings.set_stage_meters_per_unit(0.01)
# Camera will cycle between these in sequence, loops if more than 3 frames
camera_positions = [(1347,825,1440), (0, 825, 1440),(1440,825,0)]
camera_rotations = [(-20,43,0), (-26, 0, 1440),(-28,90,0)]
# Set up render products and annotators
render_product = rep.create.render_product("/World/Camera", (1920, 1080))
# Initialize and attach writer
writer = rep.WriterRegistry.get("BasicWriter")
writer.initialize(output_dir="_stepasync_writer_example", rgb=True, bounding_box_2d_tight=True, semantic_segmentation=True)
# On windows, outputs default to: C:\Users\yourname\omni.replicator_out\
writer.attach([render_product], trigger = None)
# Set up the scene ONCE
camera_prim, cone_prim, cube_prim = scene_setup(stage)
index = 0
for frame_id in range(10):
# Move Camera
move_camera(camera_prim, camera_positions[index % len(camera_positions)], camera_rotations[index % len(camera_rotations)])
index += 1
# Toggle visibility of the cone, and do the opposite to the cube
toggle_visibility(stage, cone_prim, cube_prim)
await rep.orchestrator.step_async(rt_subframes=50)
# Trigger the writer
writer.schedule_write()
rep.orchestrator.stop()
asyncio.ensure_future(run())