• Hardware Platform Jetson
• DeepStream Version 6.0.1
• JetPack Version 4.6.4
• TensorRT Version 8.2.1
• Issue Type: questions
• How to reproduce the issue ?
I am running fastsam model on deepstream using nvdsinfer.When i try to apply it to deepstream, i found the data in nvinfer’s src pad is less than the data printed by the parse-bbox-func-name function before returning true. I found that the lost data can be drawn normally by setting the positioning value in the parse-bbox-func-name function and passing it to nvosd. Below is my configuration file, post-processing function and probe function printing.
Please forgive me for not being able to show the very specific code in some functions, but I can probably show the errors.
[property]
gpu-id=0
net-scale-factor=0.00392
model-color-format=0
model-engine-file=model_b1_gpu0_fp32.engine
labelfile-path=labelsBox.txt
batch-size=1
network-mode=0
num-detected-classes=80
interval=0
gie-unique-id=1
process-mode=1
network-type=0
cluster-mode=4
maintain-aspect-ratio=1
symmetric-padding=1
workspace-size=2000
parse-bbox-func-name=NvDsInferParseFastSAMV2y
custom-lib-path=libnvdsinfer_custom_impl_fastsam.so
[class-attrs-all]
nms-iou-threshold=0.1
pre-cluster-threshold=0.1
topk=300
def peig_src_pad_buffer_probe(pad, info, u_data):
gst_buffer = info.get_buffer()
if not gst_buffer:
print("Unable to get GstBuffer ")
return
batch_meta = pyds.gst_buffer_get_nvds_batch_meta(hash(gst_buffer))
l_frame = batch_meta.frame_meta_list
while l_frame is not None:
try:
frame_meta = pyds.NvDsFrameMeta.cast(l_frame.data)
frame_num=frame_meta.frame_num
print("pgie_src_frame_num:",frame_num)
except StopIteration:
break
l_obj = frame_meta.obj_meta_list
while l_obj is not None:
try:
obj_meta = pyds.NvDsObjectMeta.cast(l_obj.data)
except StopIteration:
break
tracking_id=obj_meta.class_id
print("pgie src classID:"+" "+str(tracking_id) + " " + str(obj_meta.rect_params.left)+ " " + str(obj_meta.rect_params.top)+ " " + str(obj_meta.rect_params.height)+ " " + str(obj_meta.rect_params.width) +" "+ "{:04.3f}".format(obj_meta.confidence))
try:
l_obj = l_obj.next
except StopIteration:
break
try:
l_frame = l_frame.next
except StopIteration:
break
return Gst.PadProbeReturn.OK
pgiesrcpad = pgie.get_static_pad("src")
if not pgiesrcpad:
sys.stderr.write(" Unable to get src pad of pgie \n")
else:
pgiesrcpad.add_probe(Gst.PadProbeType.BUFFER, peig_src_pad_buffer_probe, 0)
NvDsInferParseCustomFastSAMV2y(std::vector<NvDsInferLayerInfo> const& outputLayersInfo, NvDsInferNetworkInfo const& networkInfo,
NvDsInferParseDetectionParams const& detectionParams, std::vector<NvDsInferParseObjectInfo>& objectList)
{
auto options = torch::TensorOptions().dtype(torch::kF32).device(torch::kCUDA, 0);
torch::Tensor pred = torch::from_blob((void *)static_cast<const float*>(outputLayersInfo[5].buffer), {1, 37,20}, options);
auto boxData = pred.slice(1, 0, 4).clone();
auto confData = pred.select(1, 4).clone();
for (int i = 0; i < boxData.size(0); i++){
NvDsInferParseObjectInfo maskInfos;
auto box_index_data = boxData[i];
auto xmin = box_index_data[0].item<float>();
auto ymin = box_index_data[1].item<float>();
auto xmax = box_index_data[2].item<float>();
auto ymax = box_index_data[3].item<float>();
maskInfos.left = xmin;
maskInfos.top = ymin;
maskInfos.width = xmax - xmin;
maskInfos.height = ymax -ymin;
maskInfos.detectionConfidence = 1.0f;
maskInfos.classId=i;
objectList.push_back(maskInfos);
}
for (int i = 0; i < objectList.size(); i++){
auto maskData = objectList[i];
std::cout << "maskInfo: " << maskData.classId << " " << maskData.left << ", " << maskData.top << ", " << maskData.width << ", " << maskData.height << std::endl;
}
return true;
}
Here is the print result:
maskInfo: 0 600.25, 418, 85.5, 124.5
maskInfo: 1 605.5, 543, 100, 121
maskInfo: 2 602.75, 373, 44.5, 39.5
maskInfo: 3 335.5, 417.375, 176.5, 226.25
maskInfo: 4 321, 324, 220, 477
maskInfo: 5 581.5, 377.75, 138, 300.5
maskInfo: 6 402, 400, 57.5, 160.5
maskInfo: 7 64, 223.375, 820, 194.25
maskInfo: 8 588.75, 659, 48.5, 21
maskInfo: 9 391.5, 321, 55.5, 30
maskInfo: 10 348.625, 639, 157.75, 117
maskInfo: 11 123.25, 352.5, 803.5, 165.5
maskInfo: 12 681.5, 652.75, 38, 26.5
maskInfo: 13 969.75, 224, 52.5, 17
maskInfo: 14 394.125, 339.625, 51.75, 71.75
maskInfo: 15 337.75, 741, 65, 37
maskInfo: 16 128, 484.5, 836, 159
maskInfo: 17 345.5, 578.25, 28, 31.5
maskInfo: 18 118.5, 671, 806, 135
maskInfo: 19 503, 526.5, 31, 31
maskInfo: 20 659, 686.5, 52, 114
maskInfo: 21 606.5, 392.875, 35, 37.25
maskInfo: 22 474.125, 752.5, 52.75, 33
maskInfo: 23 590.25, 685, 65.5, 115
maskInfo: 24 0, 514.25, 354, 123.5
pgie_src_frame_num: 0
pgie src classID: 24 0.0 544.21875 231.5625 663.75 1.000
pgie src classID: 23 1106.71875 864.375 215.625 122.8125 1.000
pgie src classID: 22 888.984375 990.9375 61.875 98.90625 1.000
pgie src classID: 21 1137.1875 316.640625 69.84375 65.625 1.000
pgie src classID: 19 943.125 567.1875 58.125 58.125 1.000
pgie src classID: 17 647.8125 664.21875 59.0625 52.5 1.000
pgie src classID: 16 240.0 488.4375 298.125 1567.5 1.000
pgie src classID: 15 633.28125 969.375 69.375 121.875 1.000
pgie src classID: 14 738.984375 216.796875 134.53125 97.03125 1.000
pgie src classID: 13 1818.28125 0.0 31.875 98.4375 1.000
pgie src classID: 12 1277.8125 803.90625 49.6875 71.25 1.000
pgie src classID: 11 231.09375 240.9375 310.3125 1506.5625 1.000
pgie src classID: 10 653.671875 778.125 219.375 295.78125 1.000
pgie src classID: 9 734.0625 181.875 56.25 104.0625 1.000
pgie src classID: 8 1103.90625 815.625 39.375 90.9375 1.000
pgie src classID: 6 753.75 330.0 300.9375 107.8125 1.000
pgie src classID: 5 1090.3125 288.28125 563.4375 258.75 1.000
pgie src classID: 3 629.0625 362.578125 424.21875 330.9375 1.000
pgie src classID: 2 1130.15625 279.375 74.0625 83.4375 1.000
pgie src classID: 1 1135.3125 598.125 226.875 187.5 1.000
pgie src classID: 0 1125.46875 363.75 233.4375 160.3125 1.000
As shown above, the same ID will not change at nvinfer’s parse-bbox-func-name function and nvinfer’s src probe, but the xywh coordinates numbered 20, 18, 7, and 4 disappear.
I’m asking for anyone who knows to answer my question, I’d be very grateful.