Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
Jetson Orin 4012, NVIDIA Jetson Orin NX Bundle, 8x 2GHz, 16GB DDR5
• DeepStream Version
Container: deepstream:7.0-triton-multiarch
• JetPack Version (valid for Jetson only)
see Container: deepstream:7.0-triton-multiarch
• TensorRT Version
see Container: deepstream:7.0-triton-multiarch
• NVIDIA GPU Driver Version (valid for GPU only)
$ nvidia-smi
Returns: Driver Version: N/A
• Issue Type( questions, new requirements, bugs)
Issue
I am running the following gstreamer pipeline in python via the provided bindings:
gst-launch-1.0 v4l2src device=/dev/video0 ! nvvideoconvert src-crop=0:0:1920:1080 ! m.sink_0 nvstreammux name=m batch-size=1 live-source=1 width=1280 height=720 ! nvinfer config-file-path=configs/facedetect.yml ! nvinfer config-file-path=ai_pipeline/configs/landmarks.yml ! fakesink
I added the following landmark probe to the landmarks infer stage:
def _landmarks_inference_probe(self, pad: Gst.Pad, info: Gst.PadProbeInfo) -> None:
buffer = info.get_buffer()
if not buffer:
return Gst.PadProbeReturn.OK
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
except StopIteration:
break
l_object = frame_meta.obj_meta_list
faces: List[Face] = []
id: int = 0
obj_counter = 0
while l_object is not None: # Loop through found faces
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_object.data)
except StopIteration:
break
log.info(f'obj: {obj_counter}')
obj_counter += 1
left: float = obj_meta.detector_bbox_info.org_bbox_coords.left
top: float = obj_meta.detector_bbox_info.org_bbox_coords.top
width: float = obj_meta.detector_bbox_info.org_bbox_coords.width
height: float = obj_meta.detector_bbox_info.org_bbox_coords.height
landmarks: List[Point] = []
l_user = obj_meta.obj_user_meta_list
user_counter = 0
while l_user is not None:
try:
user_meta = pyds.NvDsUserMeta.cast(l_user.data)
except StopIteration:
break
if (user_meta.base_meta.meta_type != pyds.NvDsMetaType.NVDSINFER_TENSOR_OUTPUT_META):
continue
log.info(f'usr: {user_counter}')
user_counter += 1
tensor_meta = pyds.NvDsInferTensorMeta.cast(user_meta.user_meta_data)
frame_outputs = []
output_shapes = [[80,80,80],[80,2],[80]]
for i in range(tensor_meta.num_output_layers):
layer = pyds.get_nvds_LayerInfo(tensor_meta, i)
# Convert NvDsInferLayerInfo buffer to numpy array
ptr = ctypes.cast(pyds.get_ptr(layer.buffer), ctypes.POINTER(ctypes.c_float))
v = np.ctypeslib.as_array(ptr, shape=(output_shapes[i]))
frame_outputs.append(v)
landmarks: List[Point] = Landmarks.from_pipeline_inference_output(frame_outputs)
try:
l_user = l_user.next
except StopIteration:
break
face = Face(
id,
left,
top,
width,
height,
landmarks,
frame_outputs[2], #landmark_confidences
)
faces.append(face)
id += 1
log.info(f'{face}')
try:
l_object = l_object.next
except StopIteration:
break
log.info(f'{faces}')
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
I thought everythin was working fine first, but when teesting with multiple found faces within the frame, i noticed that the landmarks of the last face found (face 2 with 2 found faces, face 3 with 3 found faces etc…) are scattered all over the place. Am i doing something wrong while retrieving the landmarks for each found face?