Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.4
• JetPack Version (valid for Jetson only)
• TensorRT Version
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs) Question
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
**• Requirement details
i have a pipeline where multiple streams as input are currently being tiled into a single output hls stream
i want to output separate hls stream for each input stream
i am trying to demux but it isnt working
hlssinks =
h264parsers =
muxers =
encoders =
sink_queues =
for i in range(number_sources):
# Create encoder for this stream
encoder = Gst.ElementFactory.make("nvv4l2h264enc", f"encoder_{i}")
if not encoder:
sys.stderr.write(f" Unable to create encoder {i}\n")
encoder.set_property("bitrate", 1800000)
if is_aarch64():
encoder.set_property("preset-level", "FastPreset")
else:
encoder.set_property("preset-id", 2)
encoders.append(encoder)
# Create H264 parser
h264parser = Gst.ElementFactory.make("h264parse", f"h264-parser_{i}")
if not h264parser:
sys.stderr.write(f" Unable to create h264 parser {i}\n")
h264parsers.append(h264parser)
# Create MP4 muxer
muxer = Gst.ElementFactory.make("mpegtsmux", f"muxer_{i}")
if not muxer:
sys.stderr.write(f" Unable to create muxer {i}\n")
muxers.append(muxer)
# Create HLS sink
hlssink = Gst.ElementFactory.make("hlssink", f"hlssink_{i}")
if not hlssink:
sys.stderr.write(f" Unable to create HLS sink {i}\n")
# Configure HLS sink for this stream
hlssink.set_property('location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/segment%05d.ts')
hlssink.set_property('playlist-location', f'/home/a2i/Desktop/StreamAnalysis/stream_{i}/playlist.m3u8')
hlssink.set_property('target-duration', 5)
hlssink.set_property('max-files', 20)
hlssinks.append(hlssink)
# Create necessary queues
q1 = Gst.ElementFactory.make("queue", f"sink_queue1_{i}")
q2 = Gst.ElementFactory.make("queue", f"sink_queue2_{i}")
q3 = Gst.ElementFactory.make("queue", f"sink_queue3_{i}")
sink_queues.append([q1, q2, q3])
# Add elements to pipeline
pipeline.add(encoder)
pipeline.add(h264parser)
pipeline.add(muxer)
pipeline.add(hlssink)
pipeline.add(q1)
pipeline.add(q2)
pipeline.add(q3)
and here is how i link them:
print(“Adding elements to Pipeline \n”)
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvanalytics)
pipeline.add(sgie1)
pipeline.add(sgie2)
pipeline.add(sgie3)
pipeline.add(tiler)
pipeline.add(nvvidconv)
pipeline.add(filter1)
pipeline.add(nvvidconv1)
pipeline.add(nvosd)
pipeline.add(capsfilter_osd)
pipeline.add(streamdemux)
print("Linking elements in the Pipeline \n")
if args[3] == '0': # ROI disable
streammux.link(pgie)
# Sequential Processing:
# pgie -> tracker -> sgie3 -> sgie1 -> sgie2
# Primary Inference
pgie.link(queue1)
queue1.link(tracker)
# Vehicle Type Detection after tracker
tracker.link(queue2)
queue2.link(sgie3)
# License Plate Detection after Vehicle Type Detection
sgie3.link(queue3)
queue3.link(sgie1)
# License Plate Recognition after License Plate Detection
sgie1.link(queue4)
queue4.link(sgie2)
# Continue with the rest of the pipeline
sgie2.link(queue5)
queue5.link(nvvidconv1)
nvvidconv1.link(queue6)
queue6.link(filter1)
filter1.link(queue7)
# Tiler and Display
queue7.link(nvosd)
nvosd.link(queue9)
queue9.link(nvvidconv)
nvvidconv.link(queue10)
queue10.link(capsfilter_osd)
capsfilter_osd.link(queue11)
queue11.link(streamdemux)
# Link each demuxed stream to its own HLS sink
for i in range(number_sources):
# Get the source pad from demuxer for this stream
srcpad_name = f"src_{i}"
srcpad = streamdemux.get_request_pad(srcpad_name)
if not srcpad:
sys.stderr.write(f" Unable to get source pad {i} from demuxer\n")
continue
# Link the processing chain for this stream
srcpad.link(sink_queues[i][0].get_static_pad("sink"))
sink_queues[i][0].link(encoders[i])
encoders[i].link(sink_queues[i][1])
sink_queues[i][1].link(h264parsers[i])
h264parsers[i].link(sink_queues[i][2])
sink_queues[i][2].link(muxers[i])
muxers[i].link(hlssinks[i])