• Hardware Platform (Jetson / GPU) NVIDIA Jetson Nano (Developer Kit Version)
• DeepStream Version deepstream-5.0
• JetPack Version (valid for Jetson only) Jetpack 4.4 DP [L4T 32.4.2]
• TensorRT Version 7.1.0.16
Hi,
I was trying to implement deepstream python samples. I need to implement for multistreaming + detection + tracking + message broker.
So, I have combined python examples: test2, test4 and imagedata-multistream into single script.
Its working well but giving me below error after 200 frames.
!![Exception] Exiting… at the designated frame position: 200
python3: …/…/src/hb-object-private.hh:154: Type* hb_object_reference(Type*) [with Type = hb_unicode_funcs_t]: Assertion `hb_object_is_valid (obj)’ failed.
Aborted (core dumped)
Here is my code: (showing only those function/code which I have edited. Function which are not shown below are used with same code like in samples)
PGIE_CONFIG_FILE="config_infer_primary_yoloV3.txt"
MSCONV_CONFIG_FILE="dstest4_msgconv_config.txt"
def meta_copy_func(data,user_data):
user_meta=pyds.NvDsUserMeta.cast(data)
src_meta_data=user_meta.user_meta_data
# Cast src_meta_data to pyds.NvDsEventMsgMeta
srcmeta=pyds.NvDsEventMsgMeta.cast(src_meta_data)
dstmeta_ptr=pyds.memdup(pyds.get_ptr(srcmeta), sys.getsizeof(pyds.NvDsEventMsgMeta))
# Cast the duplicated memory to pyds.NvDsEventMsgMeta
dstmeta=pyds.NvDsEventMsgMeta.cast(dstmeta_ptr)
dstmeta.ts=pyds.memdup(srcmeta.ts, MAX_TIME_STAMP_LEN+1)
if(srcmeta.objSignature.size>0):
dstmeta.objSignature.signature=pyds.memdup(srcmeta.objSignature.signature,srcMeta.objSignature.size)
dstmeta.objSignature.size = srcmeta.objSignature.size;
return dstmeta
# Callback function for freeing an NvDsEventMsgMeta instance
def meta_free_func(data,user_data):
user_meta=pyds.NvDsUserMeta.cast(data)
srcmeta=pyds.NvDsEventMsgMeta.cast(user_meta.user_meta_data)
pyds.free_buffer(srcmeta.ts)
if(srcmeta.objSignature.size > 0):
pyds.free_buffer(srcmeta.objSignature.signature);
srcmeta.objSignature.size = 0
def osd_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
#Intiallizing object counter with 0.
is_first_object=True
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
if not batch_meta:
return Gst.PadProbeReturn.OK
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
continue
is_first_object = True;
frame_number=frame_meta.frame_num
source_id = frame_meta.source_id
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
continue
if obj_meta.class_id not in [0, 24, 26, 28]: # doing for specific classes
try:
l_obj=l_obj.next
except StopIteration:
break
continue
if(not (frame_number%30)):
print(frame_number)
msg_meta=pyds.alloc_nvds_event_msg_meta()
msg_meta.bbox.top = obj_meta.rect_params.top
msg_meta.bbox.left = obj_meta.rect_params.left
msg_meta.bbox.width = obj_meta.rect_params.width
msg_meta.bbox.height = obj_meta.rect_params.height
msg_meta.frameId = frame_number
msg_meta.trackingId = long_to_int(obj_meta.object_id)
msg_meta.confidence = obj_meta.confidence
msg_meta.componentId = source_id
msg_meta.objClassId = obj_meta.class_id
msg_meta.objType = pyds.NvDsObjectType.NVDS_OBJECT_TYPE_PERSON
msg_meta.ts = pyds.alloc_buffer(MAX_TIME_STAMP_LEN + 1)
pyds.generate_ts_rfc3339(msg_meta.ts, MAX_TIME_STAMP_LEN)
user_event_meta = pyds.nvds_acquire_user_meta_from_pool(batch_meta)
if(user_event_meta):
user_event_meta.user_meta_data = msg_meta;
user_event_meta.base_meta.meta_type = pyds.NvDsMetaType.NVDS_EVENT_MSG_META
pyds.set_user_copyfunc(user_event_meta, meta_copy_func)
pyds.set_user_releasefunc(user_event_meta, meta_free_func)
pyds.nvds_add_user_meta_to_frame(frame_meta, user_event_meta)
else:
print("Error in attaching event meta to buffer\n")
is_first_object = False
try:
l_obj=l_obj.next
except StopIteration:
break
try:
l_frame=l_frame.next
except StopIteration:
break
fps_streams["stream{0}".format(frame_meta.pad_index)].get_fps()
return Gst.PadProbeReturn.OK
def main(args):
number_sources = len(uri_files)
for i in range(number_sources):
fps_streams["stream{0}".format(i)]=GETFPS(i)
GObject.threads_init()
Gst.init(None)
print("Creating Pipeline \n ")
pipeline = Gst.Pipeline()
########### adding multistream to streamux ###########
streammux = Gst.ElementFactory.make("nvstreammux", "Stream-muxer")
if not streammux:
sys.stderr.write(" Unable to create NvStreamMux \n")
pipeline.add(streammux)
for i in range(number_sources):
uri_name=uri_files[i]
if uri_name.find("rtsp://") == 0 :
is_live = True
source_bin=create_source_bin(i, uri_name)
if not source_bin:
sys.stderr.write("Unable to create source bin \n")
pipeline.add(source_bin)
padname="sink_%u" %i
sinkpad= streammux.get_request_pad(padname)
if not sinkpad:
sys.stderr.write("Unable to create sink pad bin \n")
srcpad=source_bin.get_static_pad("src")
if not srcpad:
sys.stderr.write("Unable to create src pad bin \n")
srcpad.link(sinkpad)
####################################################
pgie = Gst.ElementFactory.make("nvinfer", "primary-inference")
if not pgie:
sys.stderr.write(" Unable to create pgie \n")
tracker = Gst.ElementFactory.make("nvtracker", "tracker")
if not tracker:
sys.stderr.write(" Unable to create tracker \n")
nvvidconv = Gst.ElementFactory.make("nvvideoconvert", "convertor")
if not nvvidconv:
sys.stderr.write(" Unable to create nvvidconv \n")
nvosd = Gst.ElementFactory.make("nvdsosd", "onscreendisplay")
if not nvosd:
sys.stderr.write(" Unable to create nvosd \n")
msgconv=Gst.ElementFactory.make("nvmsgconv", "nvmsg-converter")
if not msgconv:
sys.stderr.write(" Unable to create msgconv \n")
msgbroker=Gst.ElementFactory.make("nvmsgbroker", "nvmsg-broker")
if not msgbroker:
sys.stderr.write(" Unable to create msgbroker \n")
streammux.set_property('width', 1280) # 1280
streammux.set_property('height', 720) # 720
streammux.set_property('batch-size', number_sources)
streammux.set_property('batched-push-timeout', 4000000)
pgie.set_property('config-file-path', PGIE_CONFIG_FILE)
# pgie.set_property("batch-size",number_sources)
msgconv.set_property('config',MSCONV_CONFIG_FILE)
msgconv.set_property('payload-type', schema_type)
msgbroker.set_property('proto-lib', proto_lib)
msgbroker.set_property('conn-str', conn_str)
msgbroker.set_property('sync', False)
#Set properties of tracker
config1 = configparser.ConfigParser()
config1.read('dstest2_tracker_config.txt')
config1.sections()
for key in config1['tracker']:
if key == 'tracker-width' :
tracker_width = config1.getint('tracker', key)
tracker.set_property('tracker-width', tracker_width)
if key == 'tracker-height' :
tracker_height = config1.getint('tracker', key)
tracker.set_property('tracker-height', tracker_height)
if key == 'gpu-id' :
tracker_gpu_id = config1.getint('tracker', key)
tracker.set_property('gpu_id', tracker_gpu_id)
if key == 'll-lib-file' :
tracker_ll_lib_file = config1.get('tracker', key)
tracker.set_property('ll-lib-file', tracker_ll_lib_file)
if key == 'll-config-file' :
tracker_ll_config_file = config1.get('tracker', key)
tracker.set_property('ll-config-file', tracker_ll_config_file)
if key == 'enable-batch-process' :
tracker_enable_batch_process = config1.getint('tracker', key)
tracker.set_property('enable_batch_process', tracker_enable_batch_process)
if not is_aarch64():
# Use CUDA unified memory in the pipeline so frames
# can be easily accessed on CPU in Python.
mem_type = int(pyds.NVBUF_MEM_CUDA_UNIFIED)
streammux.set_property("nvbuf-memory-type", mem_type)
nvvidconv.set_property("nvbuf-memory-type", mem_type)
nvvidconv2n.set_property("nvbuf-memory-type", mem_type)
##################### adding rest of the elements to pipeline ##############
pipeline.add(pgie)
pipeline.add(tracker)
pipeline.add(nvvidconv)
pipeline.add(nvosd)
pipeline.add(msgconv)
pipeline.add(msgbroker)
print("Linking elements in the Pipeline \n")
streammux.link(pgie)
pgie.link(tracker)
tracker.link(nvvidconv)
nvvidconv.link(nvosd)
nvosd.link(msgconv)
msgconv.link(msgbroker)
# create an event loop and feed gstreamer bus mesages to it
loop = GObject.MainLoop()
bus = pipeline.get_bus()
bus.add_signal_watch()
bus.connect ("message", bus_call, loop)
osdsinkpad = nvosd.get_static_pad("sink")
if not osdsinkpad:
sys.stderr.write(" Unable to get sink pad of nvosd \n")
osdsinkpad.add_probe(Gst.PadProbeType.BUFFER, osd_sink_pad_buffer_probe, 0)
print("Starting pipeline \n")
# start play back and listed to events
pipeline.set_state(Gst.State.PLAYING)
try:
loop.run()
except Exception as e:
print("loop exception", str(e))
pass
# cleanup
pyds.unset_callback_funcs()
print("unset callback funcs completed ")
pipeline.set_state(Gst.State.NULL)
def parse_args():
parser = OptionParser()
parser.add_option("-p", "--proto-lib", dest="proto_lib",
help="Absolute path of adaptor library", metavar="PATH")
parser.add_option("", "--conn-str", dest="conn_str",
help="Connection string of backend server. Optional if it is part of config file.", metavar="STR")
(options, args) = parser.parse_args()
global proto_lib
global conn_str
global schema_type
global uri_files
proto_lib = options.proto_lib
conn_str = options.conn_str
uri_files = ["file:///home/mohit/deepstream_python_apps/apps/deepstream-imagedata-multistream/video1.mp4",
"file:///home/mohit/deepstream_python_apps/apps/deepstream-imagedata-multistream/video2.mp4"]
if proto_lib is None or conn_str is None:
print("Usage: python3 detect_and_track.py -p <Proto adaptor library> --conn-str=<ip;port;topic>")
return 1
schema_type = 0
return 0