Please provide complete information as applicable to your setup.
**• NVIDIA GeForce RTX 3050
**• DeepStream 6.1.1
**• NVIDIA GPU Driver Version 525
I am trying to use deepstream to receive RTSP streams, perform inference and then broadcast the streams again via RTSP using the nvmultistreamtiler
I managed to get it working a while back but when I tried it again I am getting the following error log:
Error: gst-resource-error-quark: GstNvTiler: FATAL ERROR; NvTiler::Composite failed (1): gstnvtiler.cpp(665): gst_nvmultistreamtiler_transform (): /GstPipeline:pipeline0/GstNvMultiStreamTiler:nvtiler
[NvTiler::ScaleAndPlaceAllMeta] ERROR: 175; scaleX=0.000000 scaleY=0.000000 frameMeta=0x7f5b4ca34530
[NvTiler::Composite] ERROR: 335; ScaleAndPlaceAllMeta failed (0x7f5b4ca24e80;0x7f5b4c988b40)
Error: gst-resource-error-quark: GstNvTiler: FATAL ERROR; NvTiler::Composite failed (1): gstnvtiler.cpp(665): gst_nvmultistreamtiler_transform (): /GstPipeline:pipeline0/GstNvMultiStreamTiler:nvtiler
[NvTiler::ScaleAndPlaceAllMeta] ERROR: 175; scaleX=0.000000 scaleY=0.000000 frameMeta=0x7f5b4ca34530
[NvTiler::Composite] ERROR: 335; ScaleAndPlaceAllMeta failed (0x7f5b4c840970;0x7f5b4c47ad60)
The error is repeated again and again. Is it something to do with the alignment of the width and height of the streammuxer and the tiler ? My full source code below
def setup_and_run_pipeline(logger, streams, config):
global g_streams
global g_config
global g_source_bins
global pipeline
global nvanalytics
global streammux
global g_logger
global CONTAINER_ID
g_streams = streams
g_config = config
g_logger = logger
CONTAINER_ID = open('/root/containerid').read().strip()
GObject.threads_init()
Gst.init(None)
pipeline = Gst.Pipeline()
pipeline_elements = []
if not pipeline:
print("Error starting gstream pipeline")
sys.exit(1)
number_sources = len(streams.get_streams())
# Force 1 if no online cams
if number_sources <= 0:
number_sources = 1
# Set up streammux
streammux = generate_element('nvstreammux', "Stream-muxer")
streammux.set_property('width', PIPELINE_WIDTH)
streammux.set_property('height', PIPELINE_HEIGHT)
streammux.set_property('batched-push-timeout', 100000)
pipeline_elements.append(streammux)
# Set up PGIE
pgie = generate_element("nvinfer", "primary-inference")
#pgie.set_property('config-file-path', 'model/traffic_cam_infer.txt')
pgie.set_property('config-file-path', 'model/pgie_yolov4_tiny_tao_config.txt')
pipeline_elements.append(pgie)
streammux.set_property('batch-size', number_sources)
pgie.set_property('batch-size', 8)
#pgie.set_property('model-engine-file', "model/resnet34_peoplenet_pruned.etlt_b1_gpu0_fp16.engine".format(1))
#pgie.set_property('model-engine-file', "model/resnet18_trafficcamnet_pruned.etlt_b{}_gpu0_fp32.engine".format(number_sources))
pgie.set_property("model-engine-file", "model_b{}_gpu0_fp32.engine".format(8))
# Tracker
tracker = generate_element('nvtracker', 'tracker')
tracker.set_property('tracker-width', PIPELINE_WIDTH)
tracker.set_property('tracker-height', PIPELINE_HEIGHT)
tracker.set_property('gpu_id', 0)
tracker.set_property('ll-lib-file', "/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so")
#tracker.set_property('ll-config-file', "pipeline/config_tracker_IOU.yml")
tracker.set_property('ll-config-file', 'pipeline/config_tracker_NvDCF_perf.yml')
tracker.set_property('enable_batch_process', 1)
tracker.set_property('compute-hw', 1)
pipeline_elements.append(tracker)
# Analytics
nvanalytics = generate_element('nvdsanalytics', 'analytics')
nvanalytics.set_property('config-file', 'pipeline/config_analytics.txt')
pipeline_elements.append(nvanalytics)
nvvidconv1 = generate_element('nvvideoconvert', 'convertor1')
pipeline_elements.append(nvvidconv1)
if config['output_rtsp'] == "True":
tiler = generate_element('nvmultistreamtiler', 'nvtiler')
tiler_rows = int(math.sqrt(number_sources))
tiler_columns = int(math.ceil((1.0 * number_sources) / tiler_rows))
tiler.set_property('rows', tiler_rows)
tiler.set_property('columns', tiler_columns)
tiler.set_property('width', TILED_OUTPUT_WIDTH)
tiler.set_property('height', TILED_OUTPUT_HEIGHT)
tiler.set_property('qos', 0)
pipeline_elements.append(tiler)
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
streammux.set_property("nvbuf-memory-type", mem_type)
nvvidconv1.set_property("nvbuf-memory-type", mem_type)
tiler.set_property("nvbuf-memory-type", mem_type)
nvosd = generate_element('nvdsosd', 'onscreendisplay')
pipeline_elements.append(nvosd)
nvvidconv_postosd = generate_element('nvvideoconvert', 'convertor_postosd')
pipeline_elements.append(nvvidconv_postosd)
caps = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=I420")
filter1 = generate_element("capsfilter", "filter1")
filter1.set_property('caps', caps)
pipeline_elements.append(filter1)
encoder = generate_element('nvv4l2h264enc', 'encoder')
encoder.set_property('bitrate', 4000000)
pipeline_elements.append(encoder)
rtppay = generate_element('rtph264pay', 'rtppay')
pipeline_elements.append(rtppay)
udpsink_port_num = 5400
sink = generate_element('udpsink', 'udpsink')
sink.set_property('host', '224.224.255.255')
sink.set_property('port', udpsink_port_num)
sink.set_property('async', False)
sink.set_property('sync', 1)
pipeline_elements.append(sink)
else:
caps = Gst.Caps.from_string("video/x-raw(memory:NVMM), format=RGBA")
filter1 = generate_element("capsfilter", "filter1")
filter1.set_property('caps', caps)
pipeline_elements.append(filter1)
sink = generate_element('fakesink', 'fakesink')
sink.set_property('sync', 0)
sink.set_property('qos', 0)
pipeline_elements.append(sink)
for elem in pipeline_elements:
pipeline.add(elem)
if config['stream_video_file'] == "False":
print("Streaming live")
for stream in streams.get_streams():
print("Creating source_bin{} for stream {}".format(stream.camera_id, stream.rtsp_url))
uri_name = streams.get_uri(stream)
source_bin = create_source_bin(g_logger, stream.camera_id, uri_name)
if not source_bin:
print("Error creating source bin")
sys.exit(1)
g_source_bins[stream.camera_id] = source_bin
pipeline.add(source_bin)
padname = "sink_%u" % stream.camera_id
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
print("unable to get sink pad bin")
sys.exit(1)
srcpad = source_bin.get_static_pad("src")
if not srcpad:
print("Unable to get source pad")
sys.exit(1)
srcpad.link(sinkpad)
streammux.set_property('live-source', 1)
else:
print("Adding file source to pipeline")
stream_id = "0"
srcbin = generate_element("filesrc", "file-source")
srcbin.set_property('location', 'testvid5.264')
pipeline.add(srcbin)
h264parse = generate_element('h264parse', 'h264-parser')
pipeline.add(h264parse)
decoder = generate_element('nvv4l2decoder', 'nvv4l2-decoder')
pipeline.add(decoder)
redis_conn.set("stream{}-crossings".format(stream_id), cPickle.dumps([]))
srcbin.link(h264parse)
h264parse.link(decoder)
sinkpad = streammux.get_request_pad('sink_0')
if not sinkpad:
print("Error getting sink pad for video")
sys.exit(1)
srcpad = decoder.get_static_pad('src')
if not srcpad:
print("Error getting source pad for video")
sys.exit(1)
srcpad.link(sinkpad)
print("Done with file source")
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
streammux.set_property("nvbuf-memory-type", mem_type)
nvvidconv1.set_property("nvbuf-memory-type", mem_type)
for i in range(1, len(pipeline_elements)):
pipeline_elements[i - 1].link(pipeline_elements[i])
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
if config['output_rtsp'] == "True":
rtsp_port_num = 8554
generate_rtsp_server(rtsp_port_num, udpsink_port_num)
if config['output_rtsp'] == "True":
sink_pad = tiler.get_static_pad('sink')
else:
sink_pad = sink.get_static_pad('sink')
if not sink_pad:
print("Unable to get sink pad")
sys.exit(1)
else:
sink_pad.add_probe(Gst.PadProbeType.BUFFER, sink_pad_buffer_probe, 0)
pipeline.set_state(Gst.State.PLAYING)
# TODO Check sources
GObject.timeout_add_seconds(10, add_delete_streams)
print(streams.get_streams())
for stream in streams.get_streams():
print(stream.camera_id, "(", stream.name, "): ", streams.get_uri(stream))
try:
loop.run()
except:
print("LOOP EXCEPTION")
pass
print("Exiting App\n")
pipeline.set_state(Gst.State.NULL)
def generate_element(key, name):
elem = Gst.ElementFactory.make(key, name)
if not elem:
print("Error creating", key)
sys.exit(1)
return elem
def cb_newpad(decodebin, decoder_src_pad,data):
caps=decoder_src_pad.get_current_caps()
gststruct=caps.get_structure(0)
gstname=gststruct.get_name()
source_bin=data
features=caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not
# audio.
if(gstname.find("video")!=-1):
# Link the decodebin pad only if decodebin has picked nvidia
# decoder plugin nvdec_*. We do this by checking if the pad caps contain
# NVMM memory features.
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad=source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.exit("Failed to link decoder src pad to source bin ghost pad")
else:
sys.exit("Error: Decodebin did not pick nvidia decoder plugin")
def decodebin_child_added(child_proxy,Object,name,user_data):
global g_streams
source_bin_name = user_data.get_property('name')
camera_id = int(source_bin_name.partition("source-bin-")[2])
if(name.find("source") != -1):
Object.set_property("latency", g_streams.get_rtsp_latency(camera_id))
Object.set_property("retry",65535)
#Object.set_property("tcp-timeout",100000000)
#Object.set_property("timeout",100000000)
Object.set_property("drop-on-latency",True)
Object.set_property("do-retransmission", eval(g_config["do-retransmission"]))
print("Camera id {} rtsp latency: {}".format(camera_id, Object.get_property("latency")))
if "nvv4l2decoder" in name:
#Object.set_property("drop-frame-interval", g_streams.get_decoder_drop_frame_interval(camera_id))
Object.set_property("num-extra-surfaces", g_streams.get_decoder_num_extra_surfaces(camera_id))
print("Camera id {} decode drop-frame-interval: {}".format(camera_id, Object.get_property("drop-frame-interval")))
print("Camera id {} decoder num-extra-surfaces: {}".format(camera_id, Object.get_property("num-extra-surfaces")))
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
def generate_rtsp_server(rtsp_port_num, udpsink_port_num):
server = GstRtspServer.RTSPServer.new()
server.props.service = "%d" % rtsp_port_num
server.attach(None)
factory = GstRtspServer.RTSPMediaFactory.new()
factory.set_launch( "( udpsrc name=pay0 port=%d buffer-size=524288 caps=\"application/x-rtp, media=video, clock-rate=90000, encoding-name=(string)%s, payload=96 \" )" % (udpsink_port_num, 'H264'))
factory.set_shared(True)
server.get_mount_points().add_factory('/ds-test', factory)
print("\n *** DeepStream: Launched RTSP Streaming at rtsp://localhost:%d/ds-test ***\n\n" % rtsp_port_num)
def create_source_bin(logger, index, uri):
# Create a source GstBin to abstract this bin's content from the rest of the
# pipeline
bin_name="source-bin-%02d" %index
nbin=Gst.Bin.new(bin_name)
if not nbin:
logger.error("Unable to create source bin")
sys.exit("Unable to create source bin")
# Source element for reading from the uri.
# We will use decodebin and let it figure out the container format of the
# stream and the codec and plug the appropriate demux and decode plugins.
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
logger.error("Unable to create uri decode bin")
sys.exit("Unable to create uri decode bin")
# We set the input uri to the source element
uri_decode_bin.set_property("uri",uri)
uri_decode_bin.set_property("buffer-duration",1000)
# Connect to the "pad-added" signal of the decodebin which generates a
# callback once a new pad for raw data has been created by the decodebin
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
# We need to create a ghost pad for the source bin which will act as a proxy
# for the video decoder src pad. The ghost pad will not have a target right
# now. Once the decode bin creates the video decoder and generates the
# cb_newpad callback, we will set the ghost pad target to the video decoder
# src pad.
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
logger.error("Failed to add ghost pad in source bin")
sys.exit("Failed to add ghost pad in source bin")
return None
# Reset crossing data for stream
print("srckey", "stream{}".format(index))
redis_conn.set("stream{}".format(index) + '-crossings', cPickle.dumps([]))
return nbin
Any suggestions would be appreciated