`nvmultiurisrcbin` : segmentation fault and major plugin limitations

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) dGPU
• DeepStream Version 7.1
• JetPack Version (valid for Jetson only)
• TensorRT Version 10.3.0.26
• NVIDIA GPU Driver Version (valid for GPU only) 560.35.03
• Issue Type( questions, new requirements, bugs) bug
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing) Please run the following script with an RTSP input.


import sys
import gi
import random
import pyds

gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib

# Initialize GStreamer
Gst.init(None)

# Global Constants
GPU_ID = 0
MAX_SOURCES = 1

# Global Variables
pipeline = None
multiurisrcbin = None
nvvidconv = None
nvstreammux = None
tiler = None
sink = None
loop = None

def create_sink(type="fakesink"):
    print("Got in create sink")
    global pipeline
    fakesink = Gst.ElementFactory.make("fakesink", "sink")
    if not fakesink:
        sys.stderr.write("Unable to create fakesink\n")
        sys.exit(1)
    
    fakesink.set_property("sync", False)  # Prevents buffer blocking
    fakesink.set_property("async", False)  # Ensures smooth handling
    pipeline.add(fakesink)
    return fakesink

def on_multiurisrcbin_child_added(child_proxy, child, name):
    """
    Recursively connects to child-added on any newly created bins,
    until we find the actual rtspsrc. Then we connect on_rtspsrc_pad_added.
    """
    print(f"[child-added] {name} ({child.get_name()}) -> {child}")

    # If this child is itself a Bin, connect recursively
    if isinstance(child, Gst.Bin):
        child.connect("child-added", on_multiurisrcbin_child_added)

    # Check if this child is an actual rtspsrc
    # Either by name or by checking child.get_factory().get_name()
    factory = child.get_factory()
    factory_name = factory.get_name() if factory else ""
    if "rtspsrc" in name.lower() or "rtspsrc" in factory_name.lower():
        print("   -> Detected RTSP child, connecting pad-added callback.")

        # Keep enforcing TCP inside the bin's elements
        # Force 'rtspsrc' itself to use TCP
        child.set_property("protocols", 4)  # 4 means TCP
        # If your stream needs a certain latency
        child.set_property("latency", 200)
        # Possibly disable retransmissions, if that helps
        if child.list_properties():
            if any(p.name == "do-retransmission" for p in child.list_properties()):
                child.set_property("do-retransmission", False)
            if any(p.name == "do-rtcp" for p in child.list_properties()):
                child.set_property("do-rtcp", False)
        # TCP enforcement end
        

        child.connect("pad-added", on_rtspsrc_pad_added)

def on_rtspsrc_pad_added(rtspsrc, new_pad):
    caps = new_pad.get_current_caps()
    if not caps or caps.get_size() < 1:
        return

    structure = caps.get_structure(0)
    pad_type = structure.get_name()
    print(f"[rtspsrc pad-added] {new_pad.get_name()} : {pad_type}")

    # If it’s RTCP or an unwanted audio pad, link to fakesink
    if pad_type.startswith("application/x-rtcp") or pad_type.startswith("audio/"):
        fakesink = Gst.ElementFactory.make("fakesink", None)
        pipeline.add(fakesink)
        fakesink.sync_state_with_parent()

        sinkpad = fakesink.get_static_pad("sink")
        if sinkpad and new_pad.link(sinkpad) == Gst.PadLinkReturn.OK:
            print(f"Linked {pad_type} pad to fakesink.")
        else:
            print(f"Failed to link {pad_type} pad to fakesink.")

    else:
        # Probably the main video pad is handled internally by nvmultiurisrcbin or we could do custom linking if needed
        print(f"Likely the main video pad: {pad_type} (no action needed).")

def on_pad_added(src, new_pad):
    """Handles dynamic pad linking from nvmultiurisrcbin to nvstreammux."""
    print(f"New pad created: {new_pad.get_name()} from {src.get_name()}")
    caps = new_pad.query_caps(None)
    print(f"New Pad Capabilities: {caps.to_string()}")

    breakpoint()

    # Find an available sink pad dynamically
    for i in range(MAX_SOURCES):
        pad_name = f"sink_{i}"
        sink_pad = nvstreammux.get_request_pad(pad_name)
        if sink_pad and not sink_pad.is_linked():
            if new_pad.link(sink_pad) == Gst.PadLinkReturn.OK:
                print(f"Linked {new_pad.get_name()} to {sink_pad.get_name()}")
                return
            else:
                print(f"Failed to link {new_pad.get_name()} to {sink_pad.get_name()}")
    
    print("No available sink pads in nvstreammux")

def bus_call(bus, message, loop):
    """Handles GStreamer bus messages."""
    if message.type == Gst.MessageType.EOS:
        print("End of stream")
        stop_pipeline()
        loop.quit()
    elif message.type == Gst.MessageType.ERROR:
        err, debug = message.parse_error()
        print(f"Error: {err}: {debug}")
        stop_pipeline()
        loop.quit()
    return True

def stop_pipeline():
    """Stops the GStreamer pipeline cleanly."""
    global pipeline
    if pipeline:
        print("Stopping pipeline...")
        pipeline.set_state(Gst.State.NULL)
        bus = pipeline.get_bus()
        if bus:
            bus.remove_signal_watch()
        pipeline.unref()
        pipeline = None
        print("Pipeline set to NULL state and resources released.")

def main(args):
    global pipeline, multiurisrcbin, nvvidconv, nvstreammux, tiler, sink, loop

    if len(args) != 2:
        sys.stderr.write(f"Usage: {args[0]} <RTSP-URI>\n")
        sys.exit(1)

    uri = args[1]

    # Create Pipeline
    pipeline = Gst.Pipeline()
    if not pipeline:
        sys.stderr.write("Unable to create Pipeline\n")
        sys.exit(1)

    # Create Multi-Source Bin
    multiurisrcbin = Gst.ElementFactory.make("nvmultiurisrcbin", "multiurisrcbin")
    if not multiurisrcbin:
        sys.stderr.write("Unable to create nvmultiurisrcbin\n")
        sys.exit(1)

    # Set properties
    multiurisrcbin.set_property("uri-list", uri)
    multiurisrcbin.set_property("sensor-id-list", "0")  # If DeepStream expects a string
    multiurisrcbin.set_property("live-source", 1)
    multiurisrcbin.set_property("drop-pipeline-eos", 1)
    multiurisrcbin.set_property("max-batch-size", MAX_SOURCES)
    multiurisrcbin.set_property("gpu-id", GPU_ID)
    multiurisrcbin.set_property("width", 640)
    multiurisrcbin.set_property("height", 320)
    multiurisrcbin.set_property("rtsp-reconnect-interval", 10)
    multiurisrcbin.set_property("rtsp-reconnect-attempts", 5)
    multiurisrcbin.set_property("disable-audio", True)
    multiurisrcbin.set_property("mode", 0)

    # multiurisrcbin.set_property("select-rtp-protocol", 4) # 0: UDP + UDP Multicast + TCP // 4: TCP 
    multiurisrcbin.set_property("udp-buffer-size", 65536) # or else results in this situation `Could not create a buffer of requested 524288 bytes (Operation not permitted). Need net.admin privilege?`

    multiurisrcbin.connect("child-added", on_multiurisrcbin_child_added)
    multiurisrcbin.connect("pad-added", on_pad_added)

    pipeline.add(multiurisrcbin)

    # Create Video Converter (Ensures NVMM format for nvstreammux)
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "nvvidconv")
    if not nvvidconv:
        sys.stderr.write("Unable to create nvvideoconvert\n")
        sys.exit(1)
    pipeline.add(nvvidconv)

    # Create Stream Muxer
    nvstreammux = Gst.ElementFactory.make("nvstreammux", "nvstreammux")
    if not nvstreammux:
        sys.stderr.write("Unable to create nvstreammux\n")
        sys.exit(1)
    
    # Set streammux properties
    nvstreammux.set_property("batch-size", 1)
    nvstreammux.set_property("width", 640)
    nvstreammux.set_property("height", 320)
    nvstreammux.set_property("live-source", 1)
    nvstreammux.set_property("batched-push-timeout", 40000)  # Avoids buffer timeout issues
    nvstreammux.set_property("attach-sys-ts", 1)  # Ensures timestamp consistency

    pipeline.add(nvstreammux)

    # Create Tiler
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "tiler")
    if not tiler:
        sys.stderr.write("Unable to create tiler\n")
        sys.exit(1)

    tiler.set_property("rows", 1)
    tiler.set_property("columns", 1)
    tiler.set_property("width", 640)
    tiler.set_property("height", 320)
    
    pipeline.add(tiler)

    # Create Sink
    sink = create_sink(type="fakesink")

    # Link Elements
    nvstreammux.link(nvvidconv)
    nvvidconv.link(tiler)
    tiler.link(sink)


    # Set up Bus
    bus = pipeline.get_bus()
    bus.add_signal_watch()
    bus.connect("message", bus_call, loop)

    # Start Pipeline
    pipeline.set_state(Gst.State.READY)
    Gst.debug_bin_to_dot_file(pipeline, Gst.DebugGraphDetails.ALL, "before_playing")
    pipeline.set_state(Gst.State.PAUSED)  # Allow RTSP to establish
    pipeline.set_state(Gst.State.PLAYING)

    # Event Loop
    loop = GLib.MainLoop()

    print("Before running the loop.")

    try:
        loop.run()
    except KeyboardInterrupt:
        print("KeyboardInterrupt received, stopping...")
    finally:
        stop_pipeline()
        print("Exiting gracefully.")

if __name__ == '__main__':
    sys.exit(main(sys.argv))


• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

The issue is the plugin nvmultiurisrcbin. In the newest versions of Deepstream, the specific plugin not in alpha which is surprising given all these complications,

  1. If a minimal pipeline like the one above is run it results in a segfault and GST_DEBUG levels provide nothing useful.
  2. Once the above issue was “fixed” I got an even more weird error coming from udpsrc0 indicating there was an unlinked pad related to udpsrc0. Since nvmultiurisrcbin’s errors come when we try to have an rtsp stream as an input I am suspecting that an RTCP or an audio substream remains unhandled by the abstraction bin named nvmultiurisrcbin.
  3. There’s a warning for a default value of the said plugin udp-buffer-size. If we don’t set it to something less than 524288 bytes (default), it throws a warning need net.admin privilege followed by the usual “not linked” error.

These are some debugging steps I have tried so far,

For issue number 1, I analyzed the stack trace which showed the crash happening inside GLib’s libproxy usage, specifically in px_proxy_factory_get_proxies(). Disabling or removing system proxy auto-detection (no_proxy="*") or removing (renaming) libgiolibproxy.so entirely resolved the segfault.

For issue number 2.

  • multiurisrcbin.set_property("disable-audio", True)
  • Initially tried hooking multiurisrcbin.connect("pad-added", on_pad_added), but the pad for RTCP/extra flows (audio?) was never exposed at the bin’s top level. The callback only got a single static “src” pad. I am assuming that it never really propagates the callback to its constituent elements which was proven to be the case.
  • Since nvmultiurisrcbin internally spawns a GstRTSPSrc, I recursively listened for child-added signals, then connected a "pad-added" callback on the actual rtspsrc hence the on_multiurisrcbin_child_added function. I tried linking all the other substreams to a fakesink but then again I am suspecting that the actual unlinked udpsrc0 was internal to rtpbin since this step didn’t work.
  • Another way would be to force TCP so that we won’t have to go through RTP/UDP which caused another array of issues. I set the properties of the plugin and the children elements of the plugin, select-rtp-protocol (of nvmultiurisrcbin) and protocols (of rtspsrc). This was futile since it led to repeated “Resetting source” reconnect loops. So TCP didn’t solve the not-linked error. I also tried with do-rtcp and do-retransmission for the rtspsrc element inside but that was in vain again.

Note. I see the same behavior in the Deepstream Docker container.

Important update.

The pipeline I attached above has an obvious major flaw among others. It includes an explicit streammux. nvmultiurisrcbin is a bin that already includes a streammux element.

I tried to build a more minimal pipeline to showcase the segfault. I only slightly tweaked it and didn’t get a segfault at all! It’s very confusing how two almost identical pipelines behave so differently. I am suspecting that by adding an encoder or muxer as I do in the second pipeline might shift the thread timing enough that libproxy doesn’t end up in a race condition.

The segfault pipeline,

import sys
import gi
import pyds
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib

Gst.init(None)

def main(args):
    if len(args) != 2:
        sys.stderr.write(f"Usage: {args[0]} <RTSP-URI>\n")
        sys.exit(1)

    uri = args[1]

    # Create top-level pipeline
    pipeline = Gst.Pipeline()

    # 1) Create nvmultiurisrcbin
    multiurisrcbin = Gst.ElementFactory.make("nvmultiurisrcbin", "multiurisrcbin")
    if not multiurisrcbin:
        sys.stderr.write("Unable to create nvmultiurisrcbin\n")
        sys.exit(1)

    # Set nvmultiurisrcbin properties to mimic the CLI usage
    multiurisrcbin.set_property("uri-list", uri)
    multiurisrcbin.set_property("width", 1920)
    multiurisrcbin.set_property("height", 1080)
    multiurisrcbin.set_property("live-source", 1)
    # multiurisrcbin.set_property("drop-pipeline-eos", 1)
    # multiurisrcbin.set_property("max-batch-size", 10)
    # Possibly set batch-push-timeout or other properties if you want to match your CLI exactly:
    # multiurisrcbin.set_property("batched-push-timeout", 33333)

    pipeline.add(multiurisrcbin)

    # 2) Create nvmultistreamtiler
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "nvmultistreamtiler")
    if not tiler:
        sys.stderr.write("Unable to create nvmultistreamtiler\n")
        sys.exit(1)

    pipeline.add(tiler)

    # 3) Create a fakesink
    fakesink = Gst.ElementFactory.make("fakesink", "sink")
    if not fakesink:
        sys.stderr.write("Unable to create fakesink\n")
        sys.exit(1)
    fakesink.set_property("sync", False)
    fakesink.set_property("async", False)
    pipeline.add(fakesink)

    # 4) Link: nvmultiurisrcbin -> nvmultistreamtiler -> fakesink
    # Because nvmultiurisrcbin is a bin, we connect its static src pad to the tiler.
    # Then connect tiler->fakesink.
    # We'll locate the bin's 'src' pad and the tiler's 'sink' pad manually.
    bin_src_pad = multiurisrcbin.get_static_pad("src")
    tiler_sink_pad = tiler.get_static_pad("sink")
    if bin_src_pad and tiler_sink_pad:
        if bin_src_pad.link(tiler_sink_pad) != Gst.PadLinkReturn.OK:
            print("Could not link multiurisrcbin to tiler")
    else:
        print("Could not get src pad from multiurisrcbin or sink pad on tiler")

    if not tiler.link(fakesink):
        print("Could not link tiler to fakesink")

    # 5) Bus for error/EOS handling
    bus = pipeline.get_bus()
    bus.add_signal_watch()

    def bus_call(bus, message, userdata):
        if message.type == Gst.MessageType.ERROR:
            err, dbg = message.parse_error()
            print(f"ERROR: {err} {dbg}")
            pipeline.set_state(Gst.State.NULL)
            loop.quit()
        elif message.type == Gst.MessageType.EOS:
            print("End of stream")
            pipeline.set_state(Gst.State.NULL)
            loop.quit()
        return True

    bus.connect("message", bus_call, None)

    # 6) Start pipeline
    pipeline.set_state(Gst.State.PLAYING)

    # 7) Main loop
    loop = GLib.MainLoop()
    try:
        loop.run()
    except KeyboardInterrupt:
        pass
    finally:
        pipeline.set_state(Gst.State.NULL)
        print("Exiting gracefully.")

if __name__ == "__main__":
    sys.exit(main(sys.argv))

The working pipeline that saves rtsp to file,

import sys
import gi
gi.require_version('Gst', '1.0')
from gi.repository import Gst, GLib

Gst.init(None)

def main(args):
    if len(args) != 2:
        sys.stderr.write(f"Usage: {args[0]} <RTSP-URI>\n")
        sys.exit(1)

    uri = args[1]

    pipeline = Gst.Pipeline.new("pipeline")

    # 1) Create nvmultiurisrcbin
    multiurisrcbin = Gst.ElementFactory.make("nvmultiurisrcbin", "multiurisrcbin")
    multiurisrcbin.set_property("uri-list", uri)
    multiurisrcbin.set_property("width", 1920)
    multiurisrcbin.set_property("height", 1080)
    multiurisrcbin.set_property("live-source", 1)

    # IMPORTANT: allow EOS to propagate, so qtmux can finalize
    # If you had set drop-pipeline-eos=1, qtmux would never see the EOS to finalize the file
    multiurisrcbin.set_property("drop-pipeline-eos", 0)

    pipeline.add(multiurisrcbin)

    # 2) Tiler
    tiler = Gst.ElementFactory.make("nvmultistreamtiler", "tiler")
    pipeline.add(tiler)

    # 3) Encoder, parse, mux, filesink
    encoder = Gst.ElementFactory.make("nvv4l2h264enc", "encoder")
    encoder.set_property("bitrate", 4000000)
    pipeline.add(encoder)

    parser = Gst.ElementFactory.make("h264parse", "parser")
    pipeline.add(parser)

    muxer = Gst.ElementFactory.make("qtmux", "muxer")
    pipeline.add(muxer)

    filesink = Gst.ElementFactory.make("filesink", "filesink")
    filesink.set_property("location", "output.mp4")
    # For real-time / live pipelines, it's common to set sync=false
    filesink.set_property("sync", False)
    pipeline.add(filesink)

    # 4) Link them manually:
    # nvmultiurisrcbin has a static "src" pad
    bin_src_pad = multiurisrcbin.get_static_pad("src")
    tiler_sink_pad = tiler.get_static_pad("sink")
    if bin_src_pad and tiler_sink_pad:
        if bin_src_pad.link(tiler_sink_pad) != Gst.PadLinkReturn.OK:
            print("Could not link multiurisrcbin -> tiler")
    else:
        print("Could not get src pad or sink pad for linking")

    if not tiler.link(encoder):
        print("Could not link tiler -> encoder")
    if not encoder.link(parser):
        print("Could not link encoder -> parser")
    if not parser.link(muxer):
        print("Could not link parser -> muxer")
    if not muxer.link(filesink):
        print("Could not link muxer -> filesink")

    # 5) Bus to catch errors and EOS
    bus = pipeline.get_bus()
    bus.add_signal_watch()

    def on_message(bus, message, user_data):
        if message.type == Gst.MessageType.ERROR:
            err, dbg = message.parse_error()
            print(f"ERROR: {err} {dbg}")
            pipeline.set_state(Gst.State.NULL)
            loop.quit()
        elif message.type == Gst.MessageType.EOS:
            print("Received EOS; finalizing file and shutting down.")
            pipeline.set_state(Gst.State.NULL)
            loop.quit()
        return True

    bus.connect("message", on_message, None)

    # 6) Start the pipeline
    ret = pipeline.set_state(Gst.State.PLAYING)
    if ret == Gst.StateChangeReturn.FAILURE:
        print("Failed to start pipeline")
        return

    # 7) Event loop
    loop = GLib.MainLoop()

    try:
        loop.run()
    except KeyboardInterrupt:
        # First Ctrl+C: send EOS event to let qtmux finalize file
        print("KeyboardInterrupt: sending EOS so qtmux can finalize MP4. Press Ctrl+C again to force exit.")
        pipeline.send_event(Gst.Event.new_eos())

        # We'll keep running the same mainloop to let the pipeline post EOS or error
        try:
            loop.run()
        except KeyboardInterrupt:
            # Second Ctrl+C: force immediate quit
            print("Force-exiting without waiting for final EOS.")
            pass
    finally:
        pipeline.set_state(Gst.State.NULL)
        print("Pipeline set to NULL. Exiting gracefully.")

if __name__ == "__main__":
    sys.exit(main(sys.argv))

Could you try to add some queue plugin in your segfault pipeline and try?