NVIDIA Jetson Xavier NX
DeepStream Version5.0
Jetpack 4.4.1 [L4T 32.4.4]
TensorRT: 7.1.3.0
CUDA: 10.2.89
cuDNN: 8.0.0.180
Visionworks: 1.6.0.501
OpenCV: 4.1.1 compiled CUDA: NO
VPI: 0.4.4
Vulkan: 1.2.70
I think i am about to finish face recognition deepstream app,i used YOLOv3 as primary detector and facenet as classifier. Also used compressed dataset_embeddings.npz as my input datasets that includes photos of faces to recognized in the video. When i run the application,
i get results as shown below:
FRom the second photo, you can see that names predicted are correct. But unfortunately, the name is not shown in the first photo along the bbox instead of “face1”. How can pass that info so that it gets displayed around bbox?
Below is osd_sink_pad_buffer_probe
function
def osd_sink_pad_buffer_probe(pad,info,u_data):
global fps_stream, face_counter
frame_number=0
#Intiallizing object counter with 0.
vehicle_count = 0
person_count = 0
face_count = 0
lp_count = 0
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.glist_get_nvds_frame_meta()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
if obj_meta.unique_component_id == PRIMARY_DETECTOR_UID:
if obj_meta.class_id == PGIE_CLASS_ID_VEHICLE:
vehicle_count += 1
if obj_meta.class_id == PGIE_CLASS_ID_PERSON:
person_count += 1
if obj_meta.unique_component_id == SECONDARY_DETECTOR_UID:
if obj_meta.class_id == SGIE_CLASS_ID_FACE:
face_count += 1
if obj_meta.class_id == SGIE_CLASS_ID_LP:
lp_count += 1
try:
l_obj=l_obj.next
except StopIteration:
break
fps_stream.get_fps()
# Acquiring a display meta object. The memory ownership remains in
# the C code so downstream plugins can still access it. Otherwise
# the garbage collector will claim it when this probe function exits.
display_meta=pyds.nvds_acquire_display_meta_from_pool(batch_meta)
display_meta.num_labels = 1
py_nvosd_text_params = display_meta.text_params[0]
# Setting display text to be shown on screen
# Note that the pyds module allocates a buffer for the string, and the
# memory will not be claimed by the garbage collector.
# Reading the display_text field here will return the C address of the
# allocated string. Use pyds.get_string() to get the string content.
py_nvosd_text_params.display_text = "Frame Number={} Number of Objects={} Person_count={} Face Count={}".format(frame_number, num_rects, person_count, face_count)
face_counter.append(face_count)
# Now set the offsets where the string should appear
py_nvosd_text_params.x_offset = 10
py_nvosd_text_params.y_offset = 12
# Font , font-color and font-size
py_nvosd_text_params.font_params.font_name = "Serif"
py_nvosd_text_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
py_nvosd_text_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Using pyds.get_string() to get display_text as string
print(pyds.get_string(py_nvosd_text_params.display_text))
pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
Also sgie_sink_pad_buffer_probe
function
def sgie_sink_pad_buffer_probe(pad,info,u_data):
frame_number=0
num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
# Note that l_frame.data needs a cast to pyds.NvDsFrameMeta
# The casting is done by pyds.NvDsFrameMeta.cast()
# The casting also keeps ownership of the underlying memory
# in the C code, so the Python garbage collector will leave
# it alone.
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
frame_number=frame_meta.frame_num
num_rects = frame_meta.num_obj_meta
l_obj=frame_meta.obj_meta_list
while l_obj is not None:
try:
# Casting l_obj.data to pyds.NvDsObjectMeta
obj_meta=pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
l_user = obj_meta.obj_user_meta_list
# if obj_meta.class_id == SGIE_CLASS_ID_FACE:
# print(f'obj_meta.obj_user_meta_list {l_user}')
while l_user is not None:
try:
# Casting l_user.data to pyds.NvDsUserMeta
user_meta=pyds.NvDsUserMeta.cast(l_user.data)
except StopIteration:
break
if (
user_meta.base_meta.meta_type
!= pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META
):
continue
# Converting to tensor metadata
# Casting user_meta.user_meta_data to NvDsInferTensorMeta
tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data)
# Get output layer as NvDsInferLayerInfo
layer = pyds.get_nvds_LayerInfo(tensor_meta, 0)
# Convert NvDsInferLayerInfo buffer to numpy array
ptr = ctypes.cast(pyds.get_ptr(layer.buffer), ctypes.POINTER(ctypes.c_float))
v = np.ctypeslib.as_array(ptr, shape=(128,))
# Pridict face neme
yhat = v.reshape((1,-1))
face_to_predict_embedding = normalize_vectors(yhat)
result = predict_using_classifier(faces_embeddings, labels, face_to_predict_embedding)
result = (str(result).title())
print('Predicted name: %s' % result)
# Generate classifer metadata and attach to obj_meta
# Get NvDsClassifierMeta object
classifier_meta = pyds.nvds_acquire_classifier_meta_from_pool(batch_meta)
# Pobulate classifier_meta data with pridction result
classifier_meta.unique_component_id = tensor_meta.unique_id
label_info = pyds.nvds_acquire_label_info_meta_from_pool(batch_meta)
label_info.result_prob = 0
label_info.result_class_id = 0
pyds.nvds_add_label_info_meta_to_classifier(classifier_meta, label_info)
pyds.nvds_add_classifier_meta_to_object(obj_meta, classifier_meta)
display_text = pyds.get_string(obj_meta.text_params.display_text)
obj_meta.text_params.display_text = f'{display_text} {result}'
try:
l_user = l_user.next
except StopIteration:
break
try:
l_obj=l_obj.next
except StopIteration:
break
try:
l_frame=l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
Thank you, i would really appreciate your guide.