Hello there,
I am using an implementation similar to Isaac Orbit to create a camera for visualizing my reinforcement learning environment. The camera creation code looks like:
class IsaacEnv:
def render(self, mode: str="human"):
if mode == "human":
return None
elif mode == "rgb_array":
# check if viewport is enabled -- if not, then complain because we won't get any data
if not self.enable_viewport:
raise RuntimeError(
f"Cannot render '{mode}' when enable viewport is False. Please check the provided"
"arguments to the environment class at initialization."
)
# obtain the rgb data
rgb_data = self._rgb_annotator.get_data()
# convert to numpy array
rgb_data = np.frombuffer(rgb_data, dtype=np.uint8).reshape(*rgb_data.shape)
# return the rgb data
return rgb_data[:, :, :3]
else:
raise NotImplementedError(
f"Render mode '{mode}' is not supported. Please use: {self.metadata['render.modes']}."
)
def _create_viewport_render_product(self):
"""Create a render product of the viewport for rendering."""
# set camera view for "/OmniverseKit_Persp" camera
set_camera_view(eye=self.cfg.viewer.eye, target=self.cfg.viewer.lookat)
# check if flatcache is enabled
# this is needed to flush the flatcache data into Hydra manually when calling `env.render()`
# ref: https://docs.omniverse.nvidia.com/prod_extensions/prod_extensions/ext_physics.html
if self.sim.get_physics_context().use_flatcache:
from omni.physxflatcache import get_physx_flatcache_interface
# acquire flatcache interface
self._flatcache_iface = get_physx_flatcache_interface()
# check if viewport is enabled before creating render product
if self.enable_viewport:
import omni.replicator.core as rep
# create render product
self._render_product = rep.create.render_product(
"/OmniverseKit_Persp", tuple(self.cfg.viewer.resolution)
)
# create rgb annotator -- used to read data from the render product
self._rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb", device="cpu")
self._rgb_annotator.attach([self._render_product])
else:
carb.log_info("Viewport is disabled. Skipping creation of render product.")
But the captured images hence the video looked to have motion blurs:
I ensured physics_dt
and rendering_dt
of the SimulationContext
are identical.
What is the cause of the problem, and how can I address it?
Thanks.