Hi everyone,
My stereo camera is running through deepstream independently. But when i tried to publish my image data(live camera data) on ros2 topic stereo/left/image_raw and stereo/right/image_raw using deepstream, and when i check topic info it says publisher count=1 but when i echo topic the output terminal is blank, dont get anything. So where am i wrong? guidance will be highly appreciated. Thank you in advance.
Hi
Could you show us your pipeline and how are you publishing to ros?
Regards,
Allan Navarro
Embedded SW Engineer at RidgeRun
Contact us: support@ridgerun.com
Developers wiki: https://developer.ridgerun.com/
Website: www.ridgerun.com
Hi @allan.navarro ,
Sorry for delay, got stuck in some personal problems. Thank you for your reply. There was problem in my pipeline and able to fix it and now it is running and publishing the image or data on respective topics.
But now i’m trying to get depth map and point cloud on rviz, but unable to get. this is my launch .py , the code i attached is depth map only excluding points cloud. But i’m hoping to get help in both depth map and points cloud.
this is the code : import sys
import gi
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
import numpy as np
import cv2
gi.require_version(‘Gst’, ‘1.0’)
from gi.repository import GObject, Gst
class DeepStreamROS2(Node):
def init(self):
super().init(‘deepstream_ros2_node’)
self.bridge = CvBridge()
# Create publishers for left and right camera images, and depth images
self.left_image_pub = self.create_publisher(Image, ‘/left/image_raw’, 10)
self.right_image_pub = self.create_publisher(Image, ‘/right/image_raw’, 10)
self.depth_image_pub = self.create_publisher(Image, ‘/depth/image_raw’, 10)
# Initialize GStreamer
Gst.init(None)
self.pipeline = self.create_pipeline()
self.pipeline.set_state(Gst.State.PLAYING)
self.get_logger().info(“DeepStream pipeline started”)
# Stereo block matching parameters
self.stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
# Initialize left and right images
self.left_image = None
self.right_image = None
def create_pipeline(self):
# Create the GStreamer pipeline for CSI cameras with BGR format
pipeline = Gst.parse_launch(
'nvarguscamerasrc sensor-id=0 ! video/x-raw(memory:NVMM), format=NV12 ! '
'nvvidconv ! video/x-raw, format=BGRx ! '
'videoconvert ! video/x-raw, format=BGR ! '
'appsink name=left_sink sync=false '
'nvarguscamerasrc sensor-id=1 ! video/x-raw(memory:NVMM), format=NV12 ! '
'nvvidconv ! video/x-raw, format=BGRx ! '
'videoconvert ! video/x-raw, format=BGR ! '
'appsink name=right_sink sync=false'
)
# Get the sinks
left_sink = pipeline.get_by_name('left_sink')
right_sink = pipeline.get_by_name('right_sink')
# Set callbacks for the sinks
left_sink.set_property('emit-signals', True)
left_sink.connect('new-sample', self.on_new_sample, 'left')
right_sink.set_property('emit-signals', True)
right_sink.connect('new-sample', self.on_new_sample, 'right')
self.get_logger().info("GStreamer pipeline created successfully with BGR format")
return pipeline
def on_new_sample(self, sink, camera_side):
# Get the sample from the sink
sample = sink.emit('pull-sample')
if not sample:
self.get_logger().error(f'Failed to pull sample from {camera_side} sink')
return Gst.FlowReturn.ERROR
# Get buffer and caps
buf = sample.get_buffer()
caps = sample.get_caps()
width = caps.get_structure(0).get_value('width')
height = caps.get_structure(0).get_value('height')
# Map the buffer to access pixel data
buf_size = buf.get_size()
buf_data = buf.extract_dup(0, buf_size)
# Log dimensions and buffer size for debugging
self.get_logger().info(f"{camera_side.capitalize()} - Width: {width}, Height: {height}, Buffer Size: {buf_size}")
# Convert raw BGR data to numpy array
bgr_image = np.frombuffer(buf_data, np.uint8).reshape((height, width, 3))
# Convert to grayscale for disparity calculation
gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY)
# Publish the image based on the camera side
if camera_side == 'left':
self.left_image_pub.publish(self.bridge.cv2_to_imgmsg(bgr_image, encoding="bgr8"))
self.get_logger().info('Published left image')
self.left_image = gray_image # Save left image for disparity calculation
else:
self.right_image_pub.publish(self.bridge.cv2_to_imgmsg(bgr_image, encoding="bgr8"))
self.get_logger().info('Published right image')
self.right_image = gray_image # Save right image for disparity calculation
# Calculate disparity map if both images are available
if self.left_image is not None and self.right_image is not None:
disparity_map = self.calculate_disparity(self.left_image, self.right_image)
depth_image = self.convert_disparity_to_depth(disparity_map)
# Publish depth image
self.depth_image_pub.publish(self.bridge.cv2_to_imgmsg(depth_image, encoding="32FC1"))
self.get_logger().info('Published depth image')
return Gst.FlowReturn.OK
def calculate_disparity(self, left_img, right_img):
# Compute disparity map
disparity_map = self.stereo.compute(left_img, right_img)
# Normalize the disparity map for visualization
disparity_map = cv2.normalize(disparity_map, None, 0, 255, cv2.NORM_MINMAX).astype(np.uint8)
return disparity_map
def convert_disparity_to_depth(self, disparity_map):
# Convert disparity map to depth (using a fixed baseline and updated focal length)
baseline = 0.06 # in meters
focal_length = 2673.0 # in pixels (updated from 718.0)
# Depth is computed as (baseline * focal_length) / disparity
depth_map = (baseline * focal_length) / (disparity_map.astype(np.float32) + 1e-6) # Add small value to avoid division by zero
return depth_map
def main(args=None):
rclpy.init(args=args)
node = DeepStreamROS2()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.pipeline.set_state(Gst.State.NULL)
rclpy.shutdown()
if name == ‘main’:
main()
Hi @amarnath.m
After a quick parse of the code it makes sense to me. Could you tell me a little more about the issue you are seeing?
Regards,
Allan Navarro
Embedded SW Engineer at RidgeRun
Contact us: support@ridgerun.com
Developers wiki: https://developer.ridgerun.com/
Website: www.ridgerun.com
This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.