This is my configuration
• x86 Machine with dGPU
• DeepStream Version : deepstream-6.3
• TensorRT Version: 8.6.1.
• NVIDIA GPU Driver Version (valid for GPU only) : Driver Version: 535.86.10 CUDA Version: 12.2
• Issue Type: questions
• USE_NEW_NVSTREAMMUX=yes
I have a code that have 2 channels as mp4 files as input. Whenver I am trying to new streammux one of the video is frozen and other video is not even showed.
import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib
from common.bus_call import bus_call
import time
import threading
import os
import pyds
def attach_probe_to_pad(pad):
print("Here\n")
pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)
def queue_status(delay_queue, i=0):
while True:
time.sleep(1)
print(f"queue:{i}==>current-level-buffers={delay_queue.get_property('current-level-buffers')}")
def make_element(element_name, element_alias):
element = Gst.ElementFactory.make(element_name, element_alias)
if not element:
sys.stderr.write(f"Unable to create {element_name}\n")
return element
def decodebin_pad_added(decodebin, pad, nvvideoconvert):
print("Decodebin pad added")
caps = pad.get_current_caps()
structure_name = caps.get_structure(0).get_name()
if "video" in structure_name:
pad.link(nvvideoconvert.get_static_pad("sink"))
def pgie_src_pad_buffer_probe(pad, info, u_data):
print("Buffer probe called")
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer")
return Gst.PadProbeReturn.OK
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
if not batch_meta:
print("Unable to get batch meta")
return Gst.PadProbeReturn.OK
l_frame = batch_meta.frame_meta_list
if not l_frame:
print("No frame meta list")
return Gst.PadProbeReturn.OK
while l_frame is not None:
print("Processing frame")
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
if not display_meta:
print("Unable to acquire display meta from pool")
break
circle_params = display_meta.circle_params
circle_index = 0
num_circles = min(len(circle_params), 5) # Ensure we do not exceed the allocated circles
x_start = 100
y_start = 100
radius = 50
spacing = 50
for i in range(num_circles):
circle_params[circle_index].xc = x_start + (radius * 2 + spacing) * i
circle_params[circle_index].yc = y_start
circle_params[circle_index].radius = radius
circle_params[circle_index].circle_color.set(1.0, 0.0, 0.0, 0.5) # Red color
circle_index += 1
display_meta.num_circles = circle_index
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
except StopIteration:
break
except Exception as e:
print(f"Error processing frame: {e}")
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
def main(args):
Gst.init(None)
pipeline = Gst.Pipeline.new("video-pipeline")
# Create elements for the first source
filesrc1 = make_element("filesrc", "filesrc1")
filesrc1.set_property("location", "/opt/nvidia/deepstream/deepstream-6.3/samples/streams/fisheye_dist.mp4")
decodebin1 = make_element("decodebin", "decodebin1")
nvvideoconvert1 = make_element("nvvideoconvert", "nvvideoconvert1")
videorate1 = make_element("videorate", "videorate1")
capsfilter1 = make_element("capsfilter", "capsfilter1")
caps1 = Gst.Caps.from_string("video/x-raw,framerate=25/1")
capsfilter1.set_property("caps", caps1)
nvvideoconvert2 = make_element("nvvideoconvert", "nvvideoconvert2")
capsfilter2 = make_element("capsfilter", "capsfilter2")
caps2 = Gst.Caps.from_string("video/x-raw(memory:NVMM), width=640, height=480")
capsfilter2.set_property("caps", caps2)
streammux = make_element("nvstreammux", "streammux")
if os.environ["USE_NEW_NVSTREAMMUX"] == "yes":
streammux.set_property("config-file-path", "/opt/nvidia/deepstream/deepstream-6.3/sources/src/streammux.txt")
else:
streammux.set_property("batched-push-timeout", 25000)
streammux.set_property("width", 640)
streammux.set_property("height", 480)
streammux.set_property("batch-size", 2)
nvstreamdemux = make_element("nvstreamdemux", "nvstreamdemux")
nvvideoconvert3 = make_element("nvvideoconvert", "nvvideoconvert3")
tee = make_element("tee", "tee")
delay_queue = make_element("queue", "delay_queue")
autovideosink_delay = make_element("autovideosink", "autovideosink_delay")
autovideosink_delay.set_property("sync", True)
# Create elements for the second source
filesrc2 = make_element("filesrc", "filesrc2")
filesrc2.set_property("location", "/opt/nvidia/deepstream/deepstream-6.3/samples/streams/sample_1080p_h264.mp4")
decodebin2 = make_element("decodebin", "decodebin2")
nvvideoconvert4 = make_element("nvvideoconvert", "nvvideoconvert4")
videorate2 = make_element("videorate", "videorate2")
capsfilter3 = make_element("capsfilter", "capsfilter3")
caps3 = Gst.Caps.from_string("video/x-raw,framerate=25/1")
capsfilter3.set_property("caps", caps3)
nvvideoconvert5 = make_element("nvvideoconvert", "nvvideoconvert5")
capsfilter4 = make_element("capsfilter", "capsfilter4")
caps4 = Gst.Caps.from_string("video/x-raw(memory:NVMM), width=640, height=480")
capsfilter4.set_property("caps", caps4)
nvvideoconvert6 = make_element("nvvideoconvert", "nvvideoconvert6")
delay_queue2 = make_element("queue", "delay_queue2")
autovideosink_delay2 = make_element("autovideosink", "autovideosink_delay2")
autovideosink_delay2.set_property("sync", True)
nvdsosd1 = make_element("nvdsosd","nvdsosd1")
nvdsosd2 = make_element("nvdsosd","nvdsosd2")
nvvideoconvert7 = make_element("nvvideoconvert", "nvvideoconvert7")
nvvideoconvert8 = make_element("nvvideoconvert", "nvvideoconvert8")
# Add elements to the pipeline
elements = [
filesrc1, decodebin1, nvvideoconvert1, videorate1, capsfilter1, nvvideoconvert2, capsfilter2,
filesrc2, decodebin2, nvvideoconvert4, videorate2, capsfilter3, nvvideoconvert5, capsfilter4,
streammux, nvstreamdemux, nvvideoconvert3, tee, delay_queue, autovideosink_delay,
nvvideoconvert6, delay_queue2, autovideosink_delay2,nvdsosd1,nvdsosd2,nvvideoconvert7,nvvideoconvert8
]
for element in elements:
pipeline.add(element)
# Link the first source elements
filesrc1.link(decodebin1)
def decodebin_pad_added1(decodebin, pad):
print("Decodebin pad added for source 1")
caps = pad.get_current_caps()
structure_name = caps.get_structure(0).get_name()
if "video" in structure_name:
pad.link(nvvideoconvert1.get_static_pad("sink"))
decodebin1.connect("pad-added", decodebin_pad_added1)
nvvideoconvert1.link(videorate1)
videorate1.link(capsfilter1)
capsfilter1.link(nvvideoconvert2)
nvvideoconvert2.link(capsfilter2)
capsfilter2_pad = capsfilter2.get_static_pad("src")
streammux_pad1 = streammux.get_request_pad("sink_0")
capsfilter2_pad.link(streammux_pad1)
# Link the second source elements
filesrc2.link(decodebin2)
def decodebin_pad_added2(decodebin, pad):
print("Decodebin pad added for source 2")
caps = pad.get_current_caps()
structure_name = caps.get_structure(0).get_name()
if "video" in structure_name:
pad.link(nvvideoconvert4.get_static_pad("sink"))
decodebin2.connect("pad-added", decodebin_pad_added2)
nvvideoconvert4.link(videorate2)
videorate2.link(capsfilter3)
capsfilter3.link(nvvideoconvert5)
nvvideoconvert5.link(capsfilter4)
capsfilter4_pad = capsfilter4.get_static_pad("src")
streammux_pad2 = streammux.get_request_pad("sink_1")
capsfilter4_pad.link(streammux_pad2)
# Link streammux to nvstreamdemux
# Link streammux to nvstreamdemux
queue_before_demux = make_element("queue", "queue_before_demux")
pgie = make_element("nvinferserver", "primary-inference")
pgie.set_property("config-file-path", "dstest1_pgie_inferserver_config.txt")
pipeline.add(queue_before_demux)
# pipeline.add(pgie)
streammux.link(queue_before_demux)
# queue_before_demux.link(pgie)
# pgie.link(nvstreamdemux)
queue_before_demux.link(nvstreamdemux)
# Attach the probe to the pad of the first stream
# attach_probe_to_pad(demuxsrcpad1)
pgie_src_pad = pgie.get_static_pad("src")
if not pgie_src_pad:
sys.stderr.write(" Unable to get src pad \n")
else:
pgie_src_pad.add_probe(Gst.PadProbeType.BUFFER, pgie_src_pad_buffer_probe, 0)
# Link nvstreamdemux -> nvvideoconvert3 -> delay_queue -> autovideosink_delay
# Link nvstreamdemux -> nvvideoconvert3 -> nvdsosd -> delay_queue -> nvvideoconvert7 -> autovideosink_delay
demuxsrcpad1 = nvstreamdemux.get_request_pad("src_0")
if not demuxsrcpad1:
sys.stderr.write("Unable to create demux src pad 1\n")
return
queuesinkpad1 = nvvideoconvert3.get_static_pad("sink")
if not queuesinkpad1:
sys.stderr.write("Unable to create queue sink pad 1\n")
return
demuxsrcpad1.link(queuesinkpad1)
nvvideoconvert3.link(nvdsosd1)
nvdsosd1.link(delay_queue)
delay_queue.link(nvvideoconvert7)
nvvideoconvert7.link(autovideosink_delay)
# Link nvstreamdemux -> nvvideoconvert6 -> nvdsosd -> delay_queue2 -> nvvideoconvert8 -> autovideosink_delay2
demuxsrcpad2 = nvstreamdemux.get_request_pad("src_1")
if not demuxsrcpad2:
sys.stderr.write("Unable to create demux src pad 2\n")
return
queuesinkpad2 = nvvideoconvert6.get_static_pad("sink")
if not queuesinkpad2:
sys.stderr.write("Unable to create queue sink pad 2\n")
return
demuxsrcpad2.link(queuesinkpad2)
nvvideoconvert6.link(nvdsosd2)
nvdsosd2.link(delay_queue2)
delay_queue2.link(nvvideoconvert8)
nvvideoconvert8.link(autovideosink_delay2)
# Attach the probe to the pad of the second stream
# attach_probe_to_pad(demuxsrcpad2)
# Create an event loop and feed GStreamer bus messages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
pipeline.set_state(Gst.State.PAUSED)
# Start the pipeline
print("Now playing...")
print("Starting pipeline\n")
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except Exception as e:
print(f"Error: {e}")
# Cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
sys.exit(main(sys.argv))
in the old streammux it is working. Please help me to fix the issue, also let us know how it can be mitigated.