• Hardware Platform (GPU): Tesla T4
• DeepStream Version: 6.2
• TensorRT Version: 8.5.2.2-1+cuda11.8
• NVIDIA GPU Driver Version: 515.65.01
• Issue Type (Questions)
Hello,
I have successfully converted DS 6.2 sample application deepstream-3d-action-recognition example into python. While executing this pipeline we are getting bellow mention issue.
Here is the converted code for your reference.
import faulthandler
faulthandler.enable()
import sys, math
sys.path.append("/opt/nvidia/deepstream/deepstream/lib")
import gi
gi.require_version('Gst', '1.0')
gi.require_version('GstRtspServer', '1.0')
from gi.repository import Gst, GLib
from common.bus_call import bus_call
import pyds
MAX_TIME_STAMP_LEN = 32
# Callback function for deep-copying an NvDsEventMsgMeta struct
def meta_copy_func(data, user_data):
user_meta = pyds.NvDsUserMeta.cast(data)
src_meta_data = user_meta.user_meta_data
srcmeta = pyds.NvDsEventMsgMeta.cast(src_meta_data)
dstmeta_ptr = pyds.memdup(pyds.get_ptr(srcmeta),
sys.getsizeof(pyds.NvDsEventMsgMeta))
dstmeta = pyds.NvDsEventMsgMeta.cast(dstmeta_ptr)
dstmeta.ts = pyds.memdup(srcmeta.ts, MAX_TIME_STAMP_LEN + 1)
dstmeta.sensorStr = pyds.get_string(srcmeta.sensorStr)
if srcmeta.objSignature.size > 0:
dstmeta.objSignature.signature = pyds.memdup(
srcmeta.objSignature.signature, srcmeta.objSignature.size)
dstmeta.objSignature.size = srcmeta.objSignature.size
if srcmeta.extMsgSize > 0:
if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_TYCO_COLOR_MESSAGE:
srcobj = pyds.NvDsTycoMessageObject.cast(srcmeta.extMsg)
obj = pyds.alloc_nvds_tyco_message_object()
obj.camera_id = srcobj.camera_id
obj.message_type = srcobj.message_type
obj.host_name = srcobj.host_name
obj.current_UTC_datetime = srcobj.current_UTC_datetime
obj.time_zone = srcobj.time_zone
dstmeta.extMsg = obj
dstmeta.extMsgSize = sys.getsizeof(pyds.NvDsTycoMessageObject)
return dstmeta
# Callback function for freeing an NvDsEventMsgMeta instance
def meta_free_func(data, user_data):
user_meta = pyds.NvDsUserMeta.cast(data)
srcmeta = pyds.NvDsEventMsgMeta.cast(user_meta.user_meta_data)
# pyds.free_buffer takes C address of a buffer and frees the memory
# It's a NOP if the address is NULL
if srcmeta.ts:
pyds.free_buffer(srcmeta.ts)
if srcmeta.sensorStr:
pyds.free_buffer(srcmeta.sensorStr)
if srcmeta.objSignature.size > 0:
pyds.free_buffer(srcmeta.objSignature.signature)
srcmeta.objSignature.size = 0
if srcmeta.extMsgSize > 0:
if srcmeta.objType == pyds.NvDsObjectType.NVDS_OBJECT_TYPE_TYCO_COLOR_MESSAGE:
obj = pyds.NvDsTycoMessageObject.cast(srcmeta.extMsg)
pyds.free_buffer(obj.camera_id)
pyds.free_buffer(obj.message_type)
pyds.free_buffer(obj.host_name)
pyds.free_buffer(obj.current_UTC_datetime)
pyds.free_buffer(obj.time_zone)
pyds.free_gbuffer(srcmeta.extMsg)
srcmeta.extMsgSize = 0
def cb_newpad(decodebin, decoder_src_pad, data):
print("In cb_newpad\n")
caps = decoder_src_pad.get_current_caps()
gststruct = caps.get_structure(0)
gstname = gststruct.get_name()
source_bin = data
features = caps.get_features(0)
# Need to check if the pad created by the decodebin is for video and not
# audio.
if (gstname.find("video") != -1):
# Link the decodebin pad only if decodebin has picked nvidia
# decoder plugin nvdec_*. We do this by checking if the pad caps contain
# NVMM memory features.
if features.contains("memory:NVMM"):
# Get the source bin ghost pad
bin_ghost_pad = source_bin.get_static_pad("src")
if not bin_ghost_pad.set_target(decoder_src_pad):
sys.stderr.write("Failed to link decoder src pad to source bin ghost pad\n")
else:
sys.stderr.write(" Error: Decodebin did not pick nvidia decoder plugin.\n")
def decodebin_child_added(child_proxy,Object,name,user_data):
print("Decodebin child added:", name, "\n")
if(name.find("decodebin") != -1):
Object.connect("child-added",decodebin_child_added,user_data)
if "source" in name:
source_element = child_proxy.get_by_name("source")
if source_element.find_property('drop-on-latency') != None:
Object.set_property("drop-on-latency", True)
def create_source_bin(index,uri):
print("Creating source bin")
bin_name = f"source-bin-{index}"
print(bin_name)
nbin=Gst.Bin.new(bin_name)
if not nbin:
raise " Unable to create source bin"
uri_decode_bin=Gst.ElementFactory.make("uridecodebin", "uri-decode-bin")
if not uri_decode_bin:
raise " Unable to create uri decode bin"
uri_decode_bin.set_property("uri",uri)
uri_decode_bin.connect("pad-added",cb_newpad,nbin)
uri_decode_bin.connect("child-added",decodebin_child_added,nbin)
Gst.Bin.add(nbin,uri_decode_bin)
bin_pad=nbin.add_pad(Gst.GhostPad.new_no_target("src",Gst.PadDirection.SRC))
if not bin_pad:
raise " Failed to add ghost pad in source bin"
return nbin
#/* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
def main():
global label_list
live_stream = False
# Standard GStreamer initialization
Gst.init(None)
# registering callbacks
pyds.register_user_copyfunc(meta_copy_func)
pyds.register_user_releasefunc(meta_free_func)
# Create Pipeline element that will form a connection of other elements
print("Creating Pipeline")
pipeline = Gst.Pipeline()
input_streams = 1
amount_of_deepstreams = math.ceil(input_streams / 30)
print(f"*** openning {amount_of_deepstreams} deepstreams")
for deepstream_num in range(amount_of_deepstreams):
amount_of_streams_left = input_streams - deepstream_num * 30
current_batch_size = min(30, amount_of_streams_left)
# TODO: Is this here on purpose? why not before the loop?
if not pipeline:
raise " Unable to create Pipeline"
print("Creating streamux")
streammux = Gst.ElementFactory.make("nvstreammux", f"Stream-muxer_{deepstream_num}")
if not streammux:
raise " Unable to create NvStreamMux"
pipeline.add(streammux)
for i in range(current_batch_size):
stream_number = i + 30 * deepstream_num
print(f"Creating source_bin {stream_number}")
uri_name = "file:///workdir/video.mp4" #TODO: add the input file name
source_bin = create_source_bin(stream_number, uri_name)
if not source_bin:
raise "Unable to create source bin"
pipeline.add(source_bin)
padname = f"sink_{i}"
sinkpad = streammux.get_request_pad(padname)
if not sinkpad:
raise "Unable to create sink pad bin"
srcpad = source_bin.get_static_pad("src")
if not srcpad:
raise "Unable to create src pad bin"
srcpad.link(sinkpad)
print("Creating nvdspreprocess")
preprocess = Gst.ElementFactory.make("nvdspreprocess", f"nvdspreprocess_{deepstream_num}")
if not preprocess:
raise " Unable to create nvdspreprocess"
print("Creating Pgie")
pgie = Gst.ElementFactory.make("nvinfer", f"primary-inference_{deepstream_num}")
if not pgie:
raise " Unable to create pgie"
print("Creating nvtiler \n ")
nvtiler = Gst.ElementFactory.make("nvmultistreamtiler", f"nvtiler_{deepstream_num}")
if not nvtiler:
raise " Unable to create nvtiler"
print("Creating nvvidconv \n ")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", f"nvvidconv_{deepstream_num}")
if not nvvidconv:
raise " Unable to create nvvidconv"
print("Creating nvosd \n ")
nvosd = Gst.ElementFactory.make("nvdsosd", f"nvosd_{deepstream_num}")
if not nvosd:
raise " Unable to create nvosd"
print("Creating EGLSink \n")
sink = Gst.ElementFactory.make("nveglglessink", f"nvvideo-renderer_{deepstream_num}")
if not sink:
raise " Unable to create sink"\
queue1 = Gst.ElementFactory.make("queue", f"queue1_{deepstream_num}")
queue2 = Gst.ElementFactory.make("queue", f"queue2_{deepstream_num}")
queue3 = Gst.ElementFactory.make("queue", f"queue3_{deepstream_num}")
queue4 = Gst.ElementFactory.make("queue", f"queue4_{deepstream_num}")
queue5 = Gst.ElementFactory.make("queue", f"queue5_{deepstream_num}")
queue6 = Gst.ElementFactory.make("queue", f"queue6_{deepstream_num}")
if live_stream:
streammux.set_property('live-source', 1)
streammux.set_property('width', 1280)
streammux.set_property('height', 720)
streammux.set_property('batch-size', current_batch_size)
# streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', "/workdir/3d-action/config_infer_primary_3d_action.txt") #TODO: add the config file path
preprocess.set_property("config-file", "/workdir/3d-action/config_preprocess_3d_custom.txt")
sink.set_property("sync", 0)
sink.set_property("qos", 0)
print("Adding elements to Pipeline \n") #/* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
pipeline.add(queue1)
pipeline.add(queue2)
pipeline.add(queue3)
pipeline.add(queue4)
pipeline.add(queue5)
pipeline.add(queue6)
pipeline.add(preprocess)
pipeline.add(pgie)
pipeline.add(nvtiler)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(sink)
print("Linking elements in the Pipeline \n")
streammux.link(queue1)
queue1.link(preprocess)
preprocess.link(queue2)
queue2.link(pgie)
pgie.link(queue3)
queue3.link(nvtiler)
nvtiler.link(queue4)
queue4.link(nvvidconv)
nvvidconv.link(queue5)
queue5.link(nvosd)
nvosd.link(queue6)
queue6.link(sink)
# streammux.link(preprocess)
# preprocess.link(pgie)
# create an event loop and feed gstreamer bus mesages to it
loop = GLib.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect("message", bus_call, loop)
print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except:
pass
# cleanup
print("Exiting app\n")
pyds.unset_callback_funcs()
pipeline.set_state(Gst.State.NULL)
if __name__ == '__main__':
main()
Kindly assist us to resolve this problem.
Thanks
Dax Jain