Please provide complete information as applicable to your setup.
• Hardware Platform : GPU
• DeepStream Version : 6.1.1
• TensorRT Version : 8.4.1.5
• NVIDIA GPU Driver Version (valid for GPU only) : 525.60.13
I have the following pipeline which works well when I give it one stream as input but results in error when I give it more than 1.
def cb_newpad(decodebin, decoder_src_pad,data):
print("In cb_newpad\n")
caps=decoder_src_pad.get_current_caps()
if not caps:
caps = decoder_src_pad.query_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not
# audio.
print("gstname=",gstname)
if(gstname.find("video")!=-1):
# Link the decodebin pad only if decodebin has picked nvidia
# decoder plugin nvdec_*. We do this by checking if the pad caps contain
# NVMM memory features.
print("features=",features)
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
def decodebin_child_added(child_proxy,Object,name,user_data):
print("Decodebin child added:", name, "\n")
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
if "source" in name:
source_element = child_proxy.get_by_name("source")
if source_element.find_property('drop-on-latency') != None:
Object.set_property("drop-on-latency", True)
def create_source_bin(index,uri):
print("Creating source bin")
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
sys.stderr.write(" Unable to create source bin \n")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
if file_loop:
# use nvurisrcbin to enable file-loop
uri_decode_bin=Gst.ElementFactory.make("nvurisrcbin", "uri-decode-bin")
uri_decode_bin.set_property("file-loop", 1)
else:
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
sys.stderr.write(" Unable to create uri decode bin \n")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has beed created by the decodebin
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
sys.stderr.write(" Failed to add ghost pad in source bin \n")
return None
return nbin
TILED_OUTPUT_WIDTH=1280
TILED_OUTPUT_HEIGHT=720
RTSPOUTPUTPORTNUM="1000"
def main(args, requested_pgie=None, config=None):
number_sources=len(args)
# Standard GStreamer initialization
Gst.init(None)
# Create gstreamer elements */
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
is_live = False
if not pipeline:
sys.stderr.write(" Unable to create Pipeline \n")
print("Creating streamux \n ")
# Create nvstreammux instance to form batches from one or more sources.
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for i in range(number_sources):
print("Creating source_bin ",i," \n ")
uri_name=args[i]
if uri_name.find("rtsp://") == 0 :
is_live = True
source_bin=create_source_bin(i, uri_name)
if not source_bin:
sys.stderr.write("Unable to create source bin \n")
pipeline.add(source_bin)
padname="sink_%u" %i
sinkpad= streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad=source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
queue1=Gst.ElementFactory.make("queue","queue1")
queue2=Gst.ElementFactory.make("queue","queue2")
queue3=Gst.ElementFactory.make("queue","queue3")
queue4=Gst.ElementFactory.make("queue","queue4")
pipeline.add(queue1)
pipeline.add(queue2)
pipeline.add(queue3)
pipeline.add(queue4)
print("Creating Pgie \n ")
pgie = Gst.ElementFactory.make("nvinferserver", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie : %s\n" % requested_pgie)
if is_live:
print("At least one of the sources is live")
streammux.set_property('live-source', 1)
streammux.set_property('width', 1920)
streammux.set_property('height', 1080)
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', config)
pgie.set_property("batch-size",number_sources)
print("Creating nvvidconv1 \n ")
nvvidconv1 = Gst.ElementFactory.make("nvvideoconvert", "convertor1")
if not nvvidconv1:
sys.stderr.write(" Unable to create nvvidconv1 \n")
print("Creating filter1 \n ")
caps1 = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
filter1 = Gst.ElementFactory.make("capsfilter", "filter1")
if not filter1:
sys.stderr.write(" Unable to get the caps filter1 \n")
filter1.set_property("caps", caps1)
if not is_aarch64():
# Use CUDA unified memory in the pipeline so frames
# can be easily accessed on CPU in Python.
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
streammux.set_property("nvbuf-memory-type", mem_type)
nvvidconv1.set_property("nvbuf-memory-type", mem_type)
print("Creating nvstreamdemux \n ")
nvstreamdemux = Gst.ElementFactory.make("nvstreamdemux", "nvstreamdemux")
if not nvstreamdemux:
sys.stderr.write(" Unable to create nvstreamdemux \n")
print("Adding elements to Pipeline \n")
pipeline.add(pgie)
pipeline.add(nvvidconv1)
pipeline.add(filter1)
pipeline.add(nvstreamdemux)
print("Linking elements in the Pipeline \n")
streammux.link(queue1)
queue1.link(pgie)
pgie.link(queue2)
queue2.link(nvvidconv1)
nvvidconv1.link(queue3)
queue3.link(filter1)
filter1.link(queue4)
queue4.link(nvstreamdemux)
for i in range(number_sources):
padname = "src_%u" % i
demuxsrcpad = nvstreamdemux.get_request_pad(padname)
if not demuxsrcpad:
sys.stderr.write("Unable to create demux src pad \n")
queue=Gst.ElementFactory.make("queue",f"nvvidconv1_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
queuesinkpad = queue.get_static_pad("sink")
if not queuesinkpad:
sys.stderr.write("Unable to create queue sink pad \n")
demuxsrcpad.link(queuesinkpad)
tiler=Gst.ElementFactory.make("nvmultistreamtiler", f"nvtiler_{i}")
if not tiler:
sys.stderr.write(" Unable to create tiler \n")
tiler.set_property("rows",1)
tiler.set_property("columns",1)
tiler.set_property("width", TILED_OUTPUT_WIDTH)
tiler.set_property("height", TILED_OUTPUT_HEIGHT)
if not is_aarch64():
# Use CUDA unified memory in the pipeline so frames
# can be easily accessed on CPU in Python.
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
tiler.set_property("nvbuf-memory-type", mem_type)
pipeline.add(tiler)
queue.link(tiler)
queue=Gst.ElementFactory.make("queue",f"tiler_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
tiler.link(queue)
# Create OSD to draw on the converted RGBA buffer
nvosd = Gst.ElementFactory.make("nvdsosd", f"onscreendisplay_{i}")
if not nvosd:
sys.stderr.write("ERROR: Unable to create nvosd\n")
sys.exit(1)
pipeline.add(nvosd)
queue.link(nvosd)
queue=Gst.ElementFactory.make("queue",f"nvidconv_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
nvosd.link(queue)
nvvidconv_postosd = Gst.ElementFactory.make("nvvideoconvert", f"convertor_postosd_{i}")
if not nvvidconv_postosd:
sys.stderr.write("ERROR: Unable to create nvvidconv_postosd\n")
sys.exit(1)
if not is_aarch64():
# Use CUDA unified memory in the pipeline so frames
# can be easily accessed on CPU in Python.
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
nvvidconv_postosd.set_property("nvbuf-memory-type", mem_type)
pipeline.add(nvvidconv_postosd)
queue.link(nvvidconv_postosd)
queue=Gst.ElementFactory.make("queue",f"caps_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
nvvidconv_postosd.link(queue)
# Create a caps filter
caps = Gst.ElementFactory.make("capsfilter", f"filter_{i}")
caps.set_property("caps", Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420, width=1200, height=600"))
pipeline.add(caps)
queue.link(caps)
if display == "fakesink":
queue=Gst.ElementFactory.make("queue",f"sink_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
caps.link(queue)
print("Creating Fakesink ", i,"\n")
sink = Gst.ElementFactory.make("fakesink", f"fakesink_{i}")
if not sink:
sys.stderr.write(" Unable to create sink element \n")
sink.set_property('enable-last-sample', 0)
sink.set_property('sync', 0)
sink.set_property("qos",0)
pipeline.add(sink)
queue.link(sink)
else:
queue=Gst.ElementFactory.make("queue",f"encoder_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
caps.link(queue)
# Make the encoder
if CODEC == "H264":
encoder = Gst.ElementFactory.make("nvv4l2h264enc", f"encoder_{i}")
elif CODEC == "H265":
encoder = Gst.ElementFactory.make("nvv4l2h265enc", f"encoder_{i}")
if not encoder:
sys.stderr.write("ERROR: Unable to create encoder")
sys.exit(1)
encoder.set_property('bitrate', int(BITRATE))
if is_aarch64():
encoder.set_property('preset-level', 1)
encoder.set_property('insert-sps-pps', 1)
encoder.set_property('bufapi-version', 1)
# Add the V$L2/H264 encoder element to the pipeline, then link the caps filter to it
pipeline.add(encoder)
queue.link(encoder)
queue=Gst.ElementFactory.make("queue",f"rtp_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
encoder.link(queue)
# Make the payload-encode video into RTP packets
if CODEC == "H264":
rtppay = Gst.ElementFactory.make("rtph264pay", f"rtppay_{i}")
elif CODEC == "H265":
rtppay = Gst.ElementFactory.make("rtph265pay", f"rtppay_{i}")
if not rtppay:
sys.stderr.write("ERROR: Unable to create rtppay")
sys.exit(1)
# Add the RTP packet encoder element to the pipeline, then link the H264 encoder onto it
pipeline.add(rtppay)
queue.link(rtppay)
# The RTSP stream output sink sends to this local multicast UDP port
# This is received by the GstRtspStreamer instance created below once
# the pipeline is started. See "GstRtspStreamer" below for details.
UDP_MULTICAST_ADDRESS = '224.224.255.255'
UDP_MULTICAST_PORT = (int(RTSPOUTPUTPORTNUM) + i) * 2
queue=Gst.ElementFactory.make("queue",f"sink_queue_{i}")
if not queue:
sys.stderr.write(" Unable to create queue element \n")
pipeline.add(queue)
rtppay.link(queue)
sink = Gst.ElementFactory.make("udpsink", f"udpsink_{i}")
if not sink:
sys.stderr.write("ERROR: Unable to create udpsink")
sys.exit(1)
sink.set_property('host', UDP_MULTICAST_ADDRESS)
sink.set_property('port', UDP_MULTICAST_PORT)
sink.set_property('async', False)
# The command below tells it to sync to a clock (1) or don't sync (0).
# I find that using 1 slows things down, but it seems much more regular.
# When I use 0 it is much faster but it freezes intermittently.
sink.set_property("sync", 0)
# Add the RTSP output stream sink element to the pipeline, then link the RTP paket encoder onto it
pipeline.add(sink)
queue.link(sink)
server = GstRtspServer.RTSPServer.new()
server.props.service = str(int(RTSPOUTPUTPORTNUM) + i)
server.attach(None)
factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_shared(True)
factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (UDP_MULTICAST_PORT, CODEC))
server.get_mount_points().add_factory(RTSPOUTPUTPATH, factory)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
# List the sources
print("Now playing...")
for i, source in enumerate(args):
print(i, ": ", source)
print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Exiting app\n")
pipeline.set_state(Gst.State.NULL)
Error on giving more than one stream as input
[NvTiler::ScaleAndPlaceAllMeta] ERROR: 175; scaleX=0.110937 scaleY=0.000000 frameMeta=0x7ff4ac019580
[NvTiler::Composite] ERROR: 335; ScaleAndPlaceAllMeta failed (0x7ff4b402fca0;0x7ff4b402fcc0)
Error: gst-resource-error-quark: GstNvTiler: FATAL ERROR; NvTiler::Composite failed (1): gstnvtiler.cpp(665): gst_nvmultistreamtiler_transform (): /GstPipeline:pipeline0/GstNvMultiStreamTiler:nvtiler_1
Exiting app
Can someone please help me figure out what mistake am I making here?