How to change detection threashold of my model?

I have a YOLOv4 object detection in which I want to change the threshold to get more objects to be detected. However, it seems like I’m doing it wrong because whatever I change the detection won’t change. My two config files are listed below:

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=1
#gie-kitti-output-dir=/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/labels

[tiled-display]
enable=1
rows=1
columns=1
#width=1280
#height=720
width=3840
height=2160
gpu-id=0
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
nvbuf-memory-type=3


[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
num-sources=1
uri=file://../../streams/IMG_2522.mp4
gpu-id=0
# (0): memtype_device   - Memory type Device
# (1): memtype_pinned   - Memory type Host Pinned
# (2): memtype_unified  - Memory type Unified
cudadec-memtype=0
nvbuf-memory-type=3


[streammux]
gpu-id=0
batch-size=1
batched-push-timeout=80000
## Set muxer output width and height
#width=1280
#height=720
width=3840
height=2160
nvbuf-memory-type=0

[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=1
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=3
qos=0

[osd]
enable=0
gpu-id=0
border-width=3
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
nvbuf-memory-type=3


[primary-gie]
enable=1
gpu-id=0
# Modify as necessary
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
gie-unique-id=1
# Replace the infer primary config file when you need to
# use other detection models
#config-file=config_infer_primary_frcnn.txt
#config-file=config_infer_primary_ssd.txt
#config-file=config_infer_primary_dssd.txt
#config-file=config_infer_primary_retinanet.txt
#config-file=config_infer_primary_yolov3.txt
config-file=config_infer_primary_yolov4_lp.txt
#config-file=config_infer_primary_detectnet_v2.txt
nvbuf-memory-type=3


[sink1]
enable=1
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=22244000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=4
output-file=output.mp4
source-id=0

[sink2]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay
type=1
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=4
# set below properties in case of RTSPStreaming
rtsp-port=8554
udp-port=5400

[tracker]
enable=1
# For NvDCF and DeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=640
tracker-height=384
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=../deepstream-app/config_tracker_IOU.yml
# ll-config-file=../deepstream-app/config_tracker_NvDCF_perf.yml
 ll-config-file=../deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=../deepstream-app/config_tracker_DeepSORT.yml
gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1

[tests]
file-loop=0

[ds-example]
enable=1
processing-width=1248
processing-height=384
full-frame=0
blur-objects=1
unique-id=15
nvbuf-memory-type=3
[property]
gpu-id=0
net-scale-factor=1.0
offsets=103.939;116.779;123.68
model-color-format=1
labelfile-path=lp_labels.txt
model-engine-file=../../models/tao_pretrained_models/yolov4/yolov4_resnet18.etlt_b1_gpu0_int8.engine
int8-calib-file=../../models/tao_pretrained_models/yolov4/cal.bin
tlt-encoded-model=../../models/tao_pretrained_models/yolov4/yolov4_resnet18_epoch_110.etlt
tlt-model-key=Z2hia2Q4MjhuZTk0cWQ0Y2JhOXVxOTAyOWQ6MDJjNmE5YTctNjBkYy00NTk3LWI4YWItNGU2MWQwNTE4MTdl
infer-dims=3;384;1248
maintain-aspect-ratio=1
uff-input-order=0
uff-input-blob-name=Input
batch-size=1
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=0
num-detected-classes=2
interval=0
gie-unique-id=1
is-classifier=0
#network-type=0
cluster-mode=3
output-blob-names=BatchedNMS
parse-bbox-func-name=NvDsInferParseCustomBatchedNMSTLT
custom-lib-path=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_infercustomparser.so

[class-attrs-all]
pre-cluster-threshold=0.3
roi-top-offset=0
roi-bottom-offset=0
detected-min-w=0
detected-min-h=0
detected-max-w=0
detected-max-h=0

[class-attrs-0]
nms-iou-threshold=0.2

[class-attrs-1]
nms-iou-threshold=0.2

Can someone tell me what I need to change?

You may try to change the threshold of the detector on class attribute (class-attr-*), please refer Gst-nvinfer — DeepStream 6.1.1 Release documentation

There is no update from you for a period, assuming this is not an issue anymore.
Hence we are closing this topic. If need further support, please open a new one.
Thanks

seems the model include NMS layer, could you run command below to check if there is NMS in TRT inference?
If it does, you need to change the NMS layer configuration in TAO training instead of in DS config file.

$ /usr/src/tensorrt/bin/trtexec --loadEngine=yolov4_resnet18.etlt_b1_gpu0_int8.engine --dumpProfile

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.