Please provide complete information as applicable to your setup.
**• Hardware Platform (Jetson / GPU) XAVIER NX
**• DeepStream Version 5.1
**• JetPack Version (valid for Jetson only) 4.4
**• TensorRT Version 7.1
• NVIDIA GPU Driver Version (valid for GPU only)
I want to integrate the paddleOCR in the jetson to get realtime recognize text,now i have export the paddleOCR detect model and recognize model as onnx model,and i have integrate the paddleOCR detect model with deepstream and it run well, I can detect all the text in the frame.
I refer the demo of deepstream-ssd-parser(python) and deepstream-infer-tensor-meta-test (C++) to learn how to pass the resutl to the secondary,but i get the following error:
0:00:11.586223860 8343 0x28985630 WARN nvinfer gstnvinfer.cpp:1277:convert_batch_and_push_to_input_thread:<secondary1-nvinference-engine> error: NvBufSurfTransform failed with error -3 while converting buffer
0:00:11.586364027 8343 0x28985630 WARN nvinfer gstnvinfer.cpp:1984:gst_nvinfer_output_loop:<primary-inference> error: Internal data stream error.
0:00:11.586388028 8343 0x28985630 WARN nvinfer gstnvinfer.cpp:1984:gst_nvinfer_output_loop:<primary-inference> error: streaming stopped, reason error (-5)
Error: gst-stream-error-quark: NvBufSurfTransform failed with error -3 while converting buffer (1): /dvs/git/dirty/git-master_linux/deepstream/sdk/src/gst-plugins/gst-nvinfer/gstnvinfer.cpp(1277): convert_batch_and_push_to_input_thread (): /GstPipeline:pipeline0/GstNvInfer:secondary1-nvinference-engine
my pgie_config.txt as following
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-engine-file=./model/det.engine
infer-dims=3;640;640
batch-size=1
process-mode=1
network-mode=1
interval=1
gie-unique-id=1
network-type=100
output-tensor-meta=1
my sgie_config.txt as following
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
#net-scale-factor=1
force-implicit-batch-dim=1
model-file=./rec_model.onnx
model-engine-file=./model/rec.engine
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0
model-color-format=1
infer-dims=3;32;100
batch-size=1
process-mode=2
network-mode=1
interval=0
network-type=100
output-tensor-meta=1
i use these methods to generate bbox info
def make_nodi(bbox):
""" Creates a NvDsInferObjectDetectionInfo object from one layer of SSD.
Return None if the class Id is invalid, if the detection confidence
is under the threshold or if the width/height of the bounding box is
null/negative.
Return the created NvDsInferObjectDetectionInfo object otherwise.
"""
# score_layer, class_layer, box_layer = layers
#获取detect对象
res = pyds.NvDsInferObjectDetectionInfo()
#统一设置为0.9
res.detectionConfidence = 0.8
#res.detectionConfidence = pyds.get_detections(score_layer.buffer, index)
#classId 本系统中只有一类,统一设置为 0
res.classId = 0
#通过已经得到的bbox 来设置
rect_x1_f = bbox[0][0]
rect_y1_f = bbox[0][1]
rect_x2_f = bbox[2][0]
rect_y2_f = bbox[2][1]
res.left = rect_y1_f
res.top = rect_x1_f
res.width = rect_y2_f - rect_y1_f
res.height = rect_x2_f - rect_x1_f
if res.width < 8 or res.height <3 or res.width >100 or res.height >32:
# if res.width < 8 or res.height <3:
return None
print (res.width,res.height)
return res
def nvds_infer_parse_custom_tf_ssd(dt_boxes_list):
object_list = []
dt_boxes_list=dt_boxes_list[0].tolist()
print ('dt_boxes_list==',len(dt_boxes_list))
for bbox in dt_boxes_list:
print ('bbox==',bbox)
obj = make_nodi(bbox)
if obj:
object_list.append(obj)
return object_list
i use this method to add object meta to the frame :
def add_obj_meta_to_frame(frame_object, batch_meta, frame_meta, label_name):
""" Inserts an object into the metadata """
obj_meta = pyds.nvds_acquire_obj_meta_from_pool(batch_meta)
# Set bbox properties. These are in input resolution.
obj_meta.unique_component_id = 1
rect_params = obj_meta.rect_params
rect_params.left = int(IMAGE_WIDTH * frame_object.left)
rect_params.top = int(IMAGE_HEIGHT * frame_object.top)
rect_params.width = int(IMAGE_WIDTH * frame_object.width)
rect_params.height = int(IMAGE_HEIGHT * frame_object.height)
# Semi-transparent yellow backgroud
rect_params.has_bg_color = 0
rect_params.bg_color.set(1, 1, 0, 0.4)
# Red border of width 3
rect_params.border_width = 3
rect_params.border_color.set(1, 0, 0, 1)
# Set object info including class, detection confidence, etc.
obj_meta.confidence = frame_object.detectionConfidence
obj_meta.class_id = frame_object.classId
# print ('obj_meta.class_id==',obj_meta.class_id)
# There is no tracking ID upon detection. The tracker will
# assign an ID.
obj_meta.object_id = UNTRACKED_OBJECT_ID
lbl_id = frame_object.classId
# Set the object classification label.
# obj_meta.obj_label = label_names[lbl_id]
obj_meta.obj_label = label_name
# Set display text for the object.
txt_params = obj_meta.text_params
if txt_params.display_text:
pyds.free_buffer(txt_params.display_text)
txt_params.x_offset = int(rect_params.left)
txt_params.y_offset = max(0, int(rect_params.top) - 10)
txt_params.display_text = (
label_name + " " + "{:04.3f}".format(frame_object.detectionConfidence)
)
# Font , font-color and font-size
txt_params.font_params.font_name = "Serif"
txt_params.font_params.font_size = 10
# set(red, green, blue, alpha); set to White
txt_params.font_params.font_color.set(1.0, 1.0, 1.0, 1.0)
# Text background color
txt_params.set_bg_clr = 1
# set(red, green, blue, alpha); set to Black
txt_params.text_bg_clr.set(0.0, 0.0, 0.0, 1.0)
# Inser the object into current frame meta
# This object has no parent
pyds.nvds_add_obj_meta_to_frame(frame_meta, obj_meta, None)
i have test all the solutions in this forum to solve the error, but it doesn’t work, so I want someone to help me out , thank u very much.
If need, i can uplaod all the code.