Frame rate estimation of inference

Please provide complete information as applicable to your setup.

**• Hardware Platform Jetson 4.4
**• DeepStream Version5.0
**• TensorRT Version7.0
**• Jetson xavier nx
hello experts,
how can i evaluate the frame rate of openpose inference.
i tried to evaluate the fps in the osd function, but the result is bizzard.
here is my function:

#print("test osd funtion")
#Intiallizing object counter with 0.
#num_rects=0
gst_buffer = info.get_buffer()
if not gst_buffer:
    print("Unable to get GstBuffer ")
    return

# Retrieve batch metadata from the gst_buffer
# Note that pyds.gst_buffer_get_nvds_batch_meta() expects the
# C address of gst_buffer as input, which is obtained with hash(gst_buffer)
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
t=0.0
while l_frame is not None:
    try:

        frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
    except StopIteration:
        break
    l_obj=frame_meta.obj_meta_list
    #excute time
    t = (cv2.getTickCount() -t)/cv2.getTickFrequency()
    fps = 1.0/t
    print("fps: ", fps)
    #lines = []
    display_meta = pyds.nvds_acquire_display_meta_from_pool(batch_meta)
    py_nvosd_text_params = display_meta.text_params[0]
    py_nvosd_text_params.display_text = "Frame rate: {}".format(fps)
    py_nvosd_text_params.x_offset = 10
    py_nvosd_text_params.y_offset = 12

    # Font , font-color and font-size
    py_nvosd_text_params.font_params.font_name = "Serif"
    py_nvosd_text_params.font_params.font_size = 10
    # set(red, green, blue, alpha); set to White
    py_nvosd_text_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)

    # Text background color
    py_nvosd_text_params.set_bg_clr = 1
    # set(red, green, blue, alpha); set to Black
    py_nvosd_text_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
    display_meta.num_lines = 17
    line_params = display_meta.line_params
    #obj_index = 0
    line_index = 0


    while l_obj is not None:
        obj_meta =  pyds.NvDsObjectMeta.cast(l_obj.data)
        for i in range(17):
            if cocopair[i][0]==obj_meta.class_id:
                temp = l_obj
                while temp is not None:
                    temp_meta = pyds.NvDsObjectMeta.cast(temp.data)
                    temp=temp.next
                    if temp_meta.class_id == cocopair[i][1]:                   
                        line_params[line_index].x1 = int(obj_meta.rect_params.left)
                        line_params[line_index].y1 = int(obj_meta.rect_params.top)
                        line_params[line_index].x2 = int(temp_meta.rect_params.left)
                        line_params[line_index].y2 = int(temp_meta.rect_params.top) 
                        line_params[line_index].line_color.set(1.0, 0, 0, 0.5)
                        line_params[line_index].line_width = 4
                        line_index+=1
        print("Keypoint x: {} y: {}".format(obj_meta.rect_params.left, obj_meta.rect_params.top))
        l_obj=l_obj.next
    print(pyds.get_string(py_nvosd_text_params.display_text))
    pyds.nvds_add_display_meta_to_frame(frame_meta, display_meta)
    try:
        l_frame=l_frame.next
        t=cv2.getTickCount()
    except StopIteration:
        break					
return Gst.PadProbeReturn.OK	

the results is :
Keypoint x: 858.2608642578125 y: 253.8586883544922
Keypoint x: 665.2174072265625 y: 237.71739196777344
Keypoint x: 957.3912963867188 y: 51.35869598388672
Keypoint x: 813.9130249023438 y: 74.83695983886719
Frame rate: 4128.665739093097

i use camera /dev/vedio0 as input, and the camera gets just 20fps

i use different network, obviously one is slower but the frame rate is the same,
its why?

I think you can evaluate the model using trtexec,