Time Latency Issue in Acquiring Point Clouds - replicator, annotator

Hello, I am working on obtaining segmented point clouds of objects by randomizing the pose of the object, the robot, and the camera.

However, I have discovered that the ground truth point cloud and the simulated point cloud from the camera do not match.

Here is a video of my setup. (You can skip it if you need)

I realized that a mismatch happens when I move the object rapidly.

In reality, I teleport the object & robot pose even faster.

The fundamental problem is that the point cloud acquisition module in the replicator behaves like a thread. I thought the code was blocking, but it seems not.

Here are my snippets. Could you please see the comments on the main function?


class PointCloudGenerator():
    def __init__(self, object="001_chips_can"):
        path = os.path.dirname(os.path.abspath(__file__))
        self.object_name = object
        self.object_usd_path = os.path.join(path, f"ycb/{self.object_name}/google_64k/_converted/textured_obj.usd")
        self.robot_usd_path = os.path.join(path, "robot/Panda.usd")
        self.full_pcd_path = os.path.join(path, f"ycb/{self.object_name}/google_64k/nontextured.ply")
        self.full_pcd_array = self.load_point_cloud_from_ply(self.full_pcd_path)
        self.full_pcd_array = self.downsample_point_cloud(self.full_pcd_array, 500)

    def add_camera(self):
        self.camera = self.stage.DefinePrim("/World/Camera", "Camera")
        if not self.camera.GetAttribute("xformOp:translate"):
            UsdGeom.Xformable(self.camera).AddTranslateOp()
        if not self.camera.GetAttribute("xformOp:orient"):
            UsdGeom.Xformable(self.camera).AddOrientOp()

        self.render_product = rep.create.render_product(str(self.camera.GetPath()), (self.cam_intrinsic_width, self.cam_intrinsic_height))
        self.rgb_annotator = rep.AnnotatorRegistry.get_annotator("rgb")
        self.pcd_annotator = rep.AnnotatorRegistry.get_annotator("pointcloud", init_params={"includeUnlabelled": False})
        self.rgb_annotator.attach([self.render_product])
        self.pcd_annotator.attach([self.render_product])
        rep.orchestrator.step()

        horizontal_aperture = self.cam_intrinsic_fx / self.cam_intrinsic_width
        vertical_aperture = self.cam_intrinsic_fy / self.cam_intrinsic_height

        self.camera.GetAttribute("focusDistance").Set(self.cam_intrinsic_focus_distance)
        self.camera.GetAttribute("focalLength").Set(self.cam_intrinsic_fx / self.cam_intrinsic_width)
        self.camera.GetAttribute("horizontalAperture").Set(horizontal_aperture)
        self.camera.GetAttribute("verticalAperture").Set(vertical_aperture)
        self.camera.GetAttribute("clippingRange").Set((0.1, 1000))
        self.camera.GetAttribute("clippingPlanes").Set(np.array([1.0, 0.0, 1.0, 1.0]))
    

def get_pcd(self):
        # https://docs.omniverse.nvidia.com/extensions/latest/ext_replicator/annotators_details.html#point-cloud
        return self.pcd_annotator.get_data(device="cpu") # "cuda" why don't work?    

def main(self):
        self.load_world()
        self.add_light()
        self.add_camera()

        if self.debugging:
            self._debug_create_point_1(self.full_pcd_array, points_path="/World/Points", color=(1, 0, 0))
            self._debug_create_point_2(self.full_pcd_array)
            self._debug_plot_point_clouds_init()
    
        self.world.reset()

        while simulation_app.is_running():
            self.world.step(render=True)
            if self.world.is_playing():
                if self.world.current_time_step_index == 0:
                    self.world.reset()
                
                else:
                    cam_spherical_pose = self.randomize_cam_spherical_position()
                    cam_position, cam_orientation = self.sperical_to_cartesian(cam_spherical_pose)
                    self.move_camera(cam_position, cam_orientation)
                    self.move_light(cam_position)

                    object_position = self.randomize_object_cartesian_position()
                    if self.enable_randomize_object_orientation:
                        object_orientation = self.randomize_orientation()
                    else:
                        object_orientation = np.array([1.0, 0.0, 0.0, 0.0])
                    self.move_object(object_position, object_orientation)

                    if self.enable_robot:
                        robot_position = object_position + np.array([0, 0, 0.1])
                        robot_orientation = self.randomize_hemisphere_orientation()
                        self.move_robot(robot_position, robot_orientation)
                   
                    # 2. THEREFORE, I need to query them like self.take_pictures() 
                    sim_pcd = self.get_pcd() # 1. HERE, THE FUNCTION IS NON-BLOCKING!

                    if not len(sim_pcd["data"]):
                        continue
                    
                    sim_pcd_object = sim_pcd["data"][sim_pcd["info"]["pointSemantic"]==3, :]
                    if not len(sim_pcd_object):
                        continue
                    
                    sim_pcd_robot = sim_pcd["data"][sim_pcd["info"]["pointSemantic"]==2, :]
                    if not len(sim_pcd_robot):
                        continue
                
                    if len(sim_pcd_object) > 500:
                        sim_pcd_object = self.downsample_point_cloud(sim_pcd_object, 500)

                    if len(sim_pcd_robot) > 500:
                        sim_pcd_robot = self.downsample_point_cloud(sim_pcd_robot, 500)

                    sim_rgb = self.get_rgb()
                    object_position, object_orientation = self.get_object_pose()
                    full_pcd = self.move_full_pcd(self.full_pcd_array, object_position, object_orientation)

                    self.data_saver.save_image(sim_rgb)

                    collect_data = {"sim_pcd_object": sim_pcd_object, 
                                    "sim_pcd_robot": sim_pcd_robot,
                                    "full_pcd": full_pcd,
                                    "cam_sperical_pos": cam_spherical_pose,
                                    "cam_position": cam_position,
                                    "cam_orientation": cam_orientation,
                                    "object_position": object_position,
                                    "object_orientation": object_orientation,
                                    "fx": self.cam_intrinsic_fx,
                                    "fy": self.cam_intrinsic_fy,
                                    "cx": self.cam_intrinsic_cx,
                                    "cy": self.cam_intrinsic_cy,
                                    "focus_distance": self.cam_intrinsic_focus_distance,
                                    "width": self.cam_intrinsic_width,
                                    "height": self.cam_intrinsic_height,
                                    }

                    self.data_saver.save_data(collect_data)

                    if self.debugging:
                        self._debug_update_point_1(full_pcd)
                        self._debug_update_point_2(sim_pcd_object)
                        self._debug_plot_point_clouds_update(sim_pcd_object, sim_pcd_robot, full_pcd)

        self.data_saver.close()
        simulation_app.close()

I have considered using a rendering time step (rending_dt) five times smaller than the physics time step (physics_dt), but I still see the potential for issues. Can I update the camera’s point cloud data and RGB data in a blocking manner at the time I want?

Thanks!

I also checked annotators API , but no way…

One possibility seems like using an orchestrator.wait_uptile_complete(). However, it pauses the simulator running.

                    rep.orchestrator.wait_until_complete()
                    sim_pcd = self.get_pcd()
                    if not len(sim_pcd["data"]):
                        continue

So this is my temporal approach:

                    for _ in range(10): 
                        self.world.step(render=True) # you should wait for correct pcd

                    sim_pcd = self.get_pcd()

However, there is still no guarantee for correct PCDs, which is very inefficient.

You probably need to use sim step.

It completes all physics steps then returns the current frame/tick so it should all be in sync