Please provide complete information as applicable to your setup.
• Hardware Platform (GPU)
• DeepStream Version 6.2
• Issue Type(BUG)
I have use nvpreprocess plugin to preprocess image for my model, but it seem process twice.
this is the preprocess config file:
[property]
enable=1
target-unique-ids=1
# 网络输入顺序 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
process-on-frame=1
# 输入尺寸,batch小于7会出错,暂时未知
network-input-shape=3;3;720;1280
processing-width=1280
processing-height=720
scaling-buf-pool-size=6
tensor-buf-pool-size=6
# 输入颜色格式 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
# 输入数据类型 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
# **输入层名称**
tensor-name=input.1
# 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED 4=NVBUF_MEM_SURFACE_ARRAY
scaling-pool-memory-type=0
# 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
# Scaling Interpolation method
# 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
# 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
# 6=NvBufSurfTransformInter_Default
scaling-filter=0
# 自定义预处理实现(yolov7无需自定义)
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
#custom-lib-path=/lib/x86_64-linux-gnu/libcustom2d_preprocess.so
custom-tensor-preparation-function=CustomTensorPreparation
[group-0]
src-ids=0;1;2;3;4;5;6
#src-ids=0
custom-input-transformation-function=CustomTransformation
# 是否开启ROI
process-on-roi=0
# 可以多个框 left;top;width;height
roi-params-src-0=0;540;900;500;100;100;100;100
#[group-1]
#src-ids=2
#custom-input-transformation-function=CustomAsyncTransformation
#process-on-roi=1
#roi-params-src-2=0;540;900;500;960;0;900;500
#[group-2]
#src-ids=3
#custom-input-transformation-function=CustomAsyncTransformation
#process-on-roi=0
#roi-params-src-3=0;540;900;500;960;0;900;500
[user-configs]
# 缩放1/255=0.003921568
pixel-normalization-factor=0.003921568
#offset=113.766;104.426;100.746
#channel-scale-factors=73.4925;70.9269;72.2931
#channel-mean-fffsets=113.766;104.426;100.746
and infer config file:
[property]
# 运行的GPU ID ===> replace in program
#gpu-id=0
# 缩放 1/255
#net-scale-factor=0.0039215697906911373
# onnx模型路径
onnx-file= ./models/hrnet_b3.onnx
# tensorrt模型路径
model-engine-file=./models/hrnet_ds62_b3_sm75.int8.cache
# 需要提供标签文件,才能顺利渲染
#labelfile-path=./models/labels_city.txt
# 校准表路径
int8-calib-file=./models/hrnet_ds62_b3_sm75.table.int8
# 批量大小 ===> replace in program
#batch-size=3
# 处理模式 1=Primary 2=Secondary ===> replace in program
#process-mode=1
# 模型颜色格式 Integer 0: RGB 1: BGR 2: GRAY
#model-color-format=0
# 网络模式
## 0=FP32, 1=INT8, 2=FP16 mode
#network-mode=1
# 检测类别数
num-detected-classes=1
#input-tensor-meta=1
#interval=0
gie-unique-id=1
# 输出层名称 "output", "528", "546"
#output-blob-names=onnx::Resize_3600;3646
#force-implicit-batch-dim=1
# 解析框函数(Yolov7要实现后处理)
parse-bbox-func-name=NvDsInferParseCustomHRNet
custom-lib-path=/lib/x86_64-linux-gnu/libnvds_infercustomparser.so
## 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering)
# 聚类模式
#cluster-mode=2
#scaling-filter=0
#scaling-compute-hw=0
#Use the config params below for dbscan clustering mode
#[class-attrs-all]
#detected-min-w=4
#detected-min-h=4
#minBoxes=3
# 针对所有类的配置
#Use the config params below for NMS clustering mode
#[class-attrs-all]
#topk=20
#nms-iou-threshold=0.1
#pre-cluster-threshold=0.1
# 针对某个类别的配置
## Per class configurations
#[class-attrs-0]
#topk=20
#nms-iou-threshold=0.45
#pre-cluster-threshold=0.25
#[class-attrs-1]
#pre-cluster-threshold=0.05
#eps=0.7
#dbscan-min-score=0.5
#[class-attrs-2]
#pre-cluster-threshold=0.1
#eps=0.6
#dbscan-min-score=0.95
#[class-attrs-3]
#pre-cluster-threshold=0.05
#eps=0.7
#dbscan-min-score=0.5
this is the code on infer low level api:
bool NvDsInferParseCustomHRNet (std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
NvDsInferNetworkInfo const &networkInfo,
NvDsInferParseDetectionParams const &detectionParams,
std::vector<NvDsInferObjectDetectionInfo> &objectList)
{
assert(outputLayersInfo.size() == 2);
// pred-map
float* pred_map = (float *)outputLayersInfo[0].buffer;
// threshold
float *threshold = (float*)outputLayersInfo[1].buffer;
cv::Mat mask(networkInfo.height, networkInfo.width, CV_8U, cv::Scalar(0));
for(int y = 0; y < networkInfo.height; y++) {
for(int x = 0; x < networkInfo.width; x++) {
float val1 = threshold[y * networkInfo.width + x];
float val2 = pred_map[y * networkInfo.width + x];
if(val2 > val1) {
mask.at<uchar>(y,x) = 1;
}
}
}
cv::Mat labels, stats, centroids;
cv::connectedComponentsWithStats(mask, labels, stats, centroids, 4);
std::cout <<
for(int i = 1; i < stats.rows; i++) {
if(stats.at<int>(i, 4) > 3) {
NvDsInferObjectDetectionInfo res;
res.classId = 0;
res.detectionConfidence = 1.0;
res.left = stats.at<int>(i, 0);
res.top = stats.at<int>(i, 1);
res.width = stats.at<int>(i, 2);
res.height = stats.at<int>(i, 3);
if (res.width && res.height) {
std::cout << "i: " << i << " x: " << res.left << " y: " << res.top << " w: " << res.width << " h: " << res.height << std::endl;
objectList.emplace_back(res);
}
}
}
return true;
}
this is the log:
********************************************************
i: 1 x: 905 y: 170 w: 5 h: 5
i: 2 x: 769 y: 190 w: 5 h: 6
i: 3 x: 146 y: 279 w: 9 h: 11
i: 4 x: 900 y: 343 w: 12 h: 13
i: 5 x: 951 y: 422 w: 23 h: 23
i: 6 x: 1023 y: 451 w: 14 h: 15
#####$###########################################
********************************************************
i: 1 x: 905 y: 170 w: 5 h: 5
i: 2 x: 769 y: 190 w: 5 h: 6
i: 3 x: 146 y: 279 w: 9 h: 11
i: 4 x: 900 y: 343 w: 12 h: 13
i: 5 x: 951 y: 422 w: 23 h: 23
i: 6 x: 1023 y: 451 w: 14 h: 15
#####$###########################################
I0921 13:43:27.149148 65 imagesavebroker.cpp:301] i: 0 x: 1357 y: 255 w: 7 h: 7
I0921 13:43:27.149204 65 imagesavebroker.cpp:301] i: 1 x: 1153 y: 285 w: 7 h: 9
I0921 13:43:27.149230 65 imagesavebroker.cpp:301] i: 2 x: 219 y: 418 w: 13 h: 16
I0921 13:43:27.149242 65 imagesavebroker.cpp:301] i: 3 x: 1350 y: 514 w: 18 h: 19
I0921 13:43:27.149255 65 imagesavebroker.cpp:301] i: 4 x: 1426 y: 633 w: 34 h: 34
I0921 13:43:27.149266 65 imagesavebroker.cpp:301] i: 5 x: 1534 y: 676 w: 21 h: 22
I0921 13:43:27.149277 65 imagesavebroker.cpp:301] i: 6 x: 1357 y: 255 w: 7 h: 7
I0921 13:43:27.149288 65 imagesavebroker.cpp:301] i: 7 x: 1153 y: 285 w: 7 h: 9
I0921 13:43:27.149299 65 imagesavebroker.cpp:301] i: 8 x: 219 y: 418 w: 13 h: 16
I0921 13:43:27.149312 65 imagesavebroker.cpp:301] i: 9 x: 1350 y: 514 w: 18 h: 19
I0921 13:43:27.149322 65 imagesavebroker.cpp:301] i: 10 x: 1426 y: 633 w: 34 h: 34
I0921 13:43:27.149334 65 imagesavebroker.cpp:301] i: 11 x: 1534 y: 676 w: 21 h: 22
you can see it process twice and get the same result, and then double results downstream to broker plugin,
I have test it in deepstream 6.3, it not happened.