Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.2
**• Language ** Python
I get a segmentation fault error when extracting frames from a multi-stream input pipeline using a probe. Complete code to reproduce this issue is attached herewith (refer test.py)
probe code is as follows :
def osd_sink_pad_buffer_probe(pad, info, u_data):
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
if not batch_meta:
return Gst.PadProbeReturn.OK
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
print("iterating frames..............")
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
continue
l_obj = frame_meta.obj_meta_list
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
frame_copy = np.array(n_frame, copy=True, order='C')
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGR)
while l_obj is not None:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
continue
pass
try:
l_obj = l_obj.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
pipeline code is as follows :
Gst.init(None)
pipeline = Gst.Pipeline()
video_list = ['file:///workdir/test.h264',
'file:///workdir/test.h264']
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
streammux.set_property("batched-push-timeout", 25000)
streammux.set_property("batch-size", 30)
streammux.set_property("gpu_id", GPU_ID)
pipeline.add(streammux)
streammux.set_property("live-source", 1) # need to check
for id, uri in enumerate(video_list):
print("Creating source_bin ",uri," \n ")
#Create first source bin and add to pipeline
source_bin= create_uridecode_bin((id, uri))
if not source_bin:
sys.stderr.write("Failed to create source bin. Exiting. \n")
sys.exit(1)
pipeline.add(source_bin)
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
sink = Gst.ElementFactory.make("fakesink", "fakesink")
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
pgie.set_property('config-file-path', "model_config.txt")
pgie.set_property("gpu_id", GPU_ID)
pgie.set_property("batch-size",MAX_SOURCE)
nvvidconv.set_property("gpu_id", GPU_ID)
nvosd.set_property("gpu_id", GPU_ID)
print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
print("Linking elements in the Pipeline \n")
streammux.link(pgie)
pgie.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(sink)
the problem is with frame extraction I assume
n_frame = pyds.get_nvds_buf_surface(hash(gst_buffer), frame_meta.batch_id)
frame_copy = np.array(n_frame, copy=True, order='C')
frame_copy = cv2.cvtColor(frame_copy, cv2.COLOR_RGBA2BGR)
test.py (5.8 KB)
Also, can you tell me the most efficient way to extract frames from a multistream input pipeline, with demux plugin or without it?