Multiple RSTP streams as input working but multiple HLS streams in output not working

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.4
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs) Question
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
**• Requirement details

i have a pipeline where multiple streams as input are currently being tiled into a single output hls stream
i want to output separate hls stream for each input stream
i am trying to demux but it isnt working

hlssinks =
h264parsers =
muxers =
encoders =
sink_queues =

for i in range(number_sources):
    # Create encoder for this stream
    encoder = Gst.ElementFactory.make("nvv4l2h264enc", f"encoder_{i}")
    if not encoder:
        sys.stderr.write(f" Unable to create encoder {i}\n")
    encoder.set_property("bitrate", 1800000)
    if is_aarch64():
        encoder.set_property("preset-level", "FastPreset")
    else:
        encoder.set_property("preset-id", 2)
    encoders.append(encoder)
    
    # Create H264 parser
    h264parser = Gst.ElementFactory.make("h264parse", f"h264-parser_{i}")
    if not h264parser:
        sys.stderr.write(f" Unable to create h264 parser {i}\n")
    h264parsers.append(h264parser)
    
    # Create MP4 muxer
    muxer = Gst.ElementFactory.make("mpegtsmux", f"muxer_{i}")
    if not muxer:
        sys.stderr.write(f" Unable to create muxer {i}\n")
    muxers.append(muxer)
    
    # Create HLS sink
    hlssink = Gst.ElementFactory.make("hlssink", f"hlssink_{i}")
    if not hlssink:
        sys.stderr.write(f" Unable to create HLS sink {i}\n")
    
    # Configure HLS sink for this stream
    hlssink.set_property('location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/segment%05d.ts')
    hlssink.set_property('playlist-location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/playlist.m3u8')
    hlssink.set_property('target-duration', 5)
    hlssink.set_property('max-files', 20)
    hlssinks.append(hlssink)
    
    # Create necessary queues
    q1 = Gst.ElementFactory.make("queue", f"sink_queue1_{i}")
    q2 = Gst.ElementFactory.make("queue", f"sink_queue2_{i}")
    q3 = Gst.ElementFactory.make("queue", f"sink_queue3_{i}")
    sink_queues.append([q1, q2, q3])
    
    # Add elements to pipeline
    pipeline.add(encoder)
    pipeline.add(h264parser)
    pipeline.add(muxer)
    pipeline.add(hlssink)
    pipeline.add(q1)
    pipeline.add(q2)
    pipeline.add(q3)

and here is how i link them:

print(“Adding elements to Pipeline \n”)

pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvanalytics)
pipeline.add(sgie1)
pipeline.add(sgie2)
pipeline.add(sgie3)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(filter1)
pipeline.add(nvvidconv1)
pipeline.add(nvosd)
pipeline.add(capsfilter_osd)
pipeline.add(streamdemux)



print("Linking elements in the Pipeline \n")
if args[3] == '0':            # ROI disable
    streammux.link(pgie)

    # Sequential Processing:
    # pgie -> tracker -> sgie3 -> sgie1 -> sgie2

    # Primary Inference
    pgie.link(queue1)
    queue1.link(tracker)
    
    # Vehicle Type Detection after tracker
    tracker.link(queue2)
    queue2.link(sgie3)
    
    # License Plate Detection after Vehicle Type Detection
    sgie3.link(queue3)
    queue3.link(sgie1)
    
    # License Plate Recognition after License Plate Detection
    sgie1.link(queue4)
    queue4.link(sgie2)

    # Continue with the rest of the pipeline
    sgie2.link(queue5)
    queue5.link(nvvidconv1)
    nvvidconv1.link(queue6)
    queue6.link(filter1)
    filter1.link(queue7)
    
    # Tiler and Display
    queue7.link(nvosd)
    nvosd.link(queue9)
    queue9.link(nvvidconv)
    nvvidconv.link(queue10)
    queue10.link(capsfilter_osd)
    capsfilter_osd.link(queue11)
    queue11.link(streamdemux)
    # Link each demuxed stream to its own HLS sink
    for i in range(number_sources):
        # Get the source pad from demuxer for this stream
        srcpad_name = f"src_{i}"
        srcpad = streamdemux.get_request_pad(srcpad_name)
        if not srcpad:
            sys.stderr.write(f" Unable to get source pad {i} from demuxer\n")
            continue

        # Link the processing chain for this stream
        srcpad.link(sink_queues[i][0].get_static_pad("sink"))
        sink_queues[i][0].link(encoders[i])
        encoders[i].link(sink_queues[i][1])
        sink_queues[i][1].link(h264parsers[i])
        h264parsers[i].link(sink_queues[i][2])
        sink_queues[i][2].link(muxers[i])
        muxers[i].link(hlssinks[i])

I corrected my pipeline, it works now

stream_elements =

# Create separate processing chains for each stream
for i in range(number_sources):
    elements = {}  # Dictionary to store elements for this stream
    
    # Create elements for this stream
    queue = Gst.ElementFactory.make("queue", f"queue_{i}")
    if not queue:
        sys.stderr.write(f" Unable to create queue_{i}\n")
    pipeline.add(queue)
    elements['queue'] = queue

    # Video convert for this stream
    nvvidconv = Gst.ElementFactory.make("nvvideoconvert", f"nvvidconv_{i}")
    if not nvvidconv:
        sys.stderr.write(f" Unable to create nvvidconv_{i}\n")
    pipeline.add(nvvidconv)
    elements['nvvidconv'] = nvvidconv

    # OSD for this stream
    nvosd = Gst.ElementFactory.make("nvdsosd", f"nvosd_{i}")
    if not nvosd:
        sys.stderr.write(f" Unable to create nvosd_{i}\n")
    nvosd.set_property('process-mode', 1)
    nvosd.set_property('display-text', 1)
    pipeline.add(nvosd)
    elements['nvosd'] = nvosd

    # Capsfilter for this stream
    capsfilter = Gst.ElementFactory.make("capsfilter", f"capsfilter_{i}")
    if not capsfilter:
        sys.stderr.write(f" Unable to create capsfilter_{i}\n")
    caps = Gst.Caps.from_string("video/x-raw(memory:NVMM), width=1280, height=720")
    capsfilter.set_property("caps", caps)
    pipeline.add(capsfilter)
    elements['capsfilter'] = capsfilter

    # Encoder for this stream
    encoder = Gst.ElementFactory.make("nvv4l2h264enc", f"encoder_{i}")
    if not encoder:
        sys.stderr.write(f" Unable to create encoder_{i}\n")
    encoder.set_property("bitrate", 1800000)
    if is_aarch64():
        encoder.set_property("preset-level", "FastPreset")
    else:
        encoder.set_property("preset-id", 2)
    pipeline.add(encoder)
    elements['encoder'] = encoder

    # H264 parser for this stream
    h264parser = Gst.ElementFactory.make("h264parse", f"h264parser_{i}")
    if not h264parser:
        sys.stderr.write(f" Unable to create h264parser_{i}\n")
    pipeline.add(h264parser)
    elements['h264parser'] = h264parser

    # Muxer for this stream
    muxer = Gst.ElementFactory.make("mpegtsmux", f"muxer_{i}")
    if not muxer:
        sys.stderr.write(f" Unable to create muxer_{i}\n")
    pipeline.add(muxer)
    elements['muxer'] = muxer

    # HLS sink for this stream
    hlssink = Gst.ElementFactory.make("hlssink", f"hlssink_{i}")
    if not hlssink:
        sys.stderr.write(f" Unable to create hlssink_{i}\n")
    hlssink.set_property('location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/segment%05d.ts')
    hlssink.set_property('playlist-location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/playlist.m3u8')
    hlssink.set_property('target-duration', 5)
    hlssink.set_property('max-files', 20)
    pipeline.add(hlssink)
    elements['hlssink'] = hlssink
    
    # Store all elements for this stream
    stream_elements.append(elements)

and while linking, we need to follow a sequence:

print(“Linking elements in the Pipeline \n”)
if args[3] == ‘0’: # ROI disable
streammux.link(pgie)
pgie.link(tracker)
tracker.link(sgie3)
sgie3.link(sgie1)
sgie1.link(sgie2)
sgie2.link(streamdemux)

    # Link each demuxed stream to its processing chain
    for i in range(number_sources):
        elements = stream_elements[i]  # Get elements for this stream
        
        # Get demuxer source pad
        padname = f"src_{i}"
        demuxsrcpad = streamdemux.request_pad_simple(padname)
        if not demuxsrcpad:
            sys.stderr.write(f"Unable to create demux src pad {i}\n")
            continue

        # Get queue sink pad
        queuesinkpad = elements['queue'].get_static_pad("sink")
        if not queuesinkpad:
            sys.stderr.write(f"Unable to create queue sink pad {i}\n")
            continue

        # Link the complete chain for this stream
        demuxsrcpad.link(queuesinkpad)
        elements['queue'].link(elements['nvvidconv'])
        elements['nvvidconv'].link(elements['nvosd'])
        elements['nvosd'].link(elements['capsfilter'])
        elements['capsfilter'].link(elements['encoder'])
        elements['encoder'].link(elements['h264parser'])
        elements['h264parser'].link(elements['muxer'])
        elements['muxer'].link(elements['hlssink'])
1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.