Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
Jetson Orin
• DeepStream Version
6.1
• JetPack Version (valid for Jetson only)
5
• TensorRT Version
8.1
• NVIDIA GPU Driver Version (valid for GPU only)
II) NVIDIA dlloader X Driver 34.1.1 Release Build
• Issue Type( questions, new requirements, bugs)
I have two detectors, A primary GIE and a Secondary GIE. I can only see the inferences of the secondary model inside the bounding boxes of the primary.
This seems to suggest maybe the Secondary GIE is not running on full frame like I want (I set process-mode = 1 for this reason) but instead is running on downstream inferences.
But I’m not sure
APP
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl
# output display details
[tiled-display]
enable=1
rows=1
columns=1
width=1920
height=1080
gpu-id=0
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
nvbuf-memory-type=0
# mp4 video source
[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP
type=2
uri=file:///videos-test/swiss_leopards.mp4
num-sources=1
gpu-id=0
cudadec-memtype=0
source-id=0
camera-width=1920
camera-height=1080
# rtsp video source
[source1]
enable=0
type=4
uri=None
cudadec-memtype=0
source-id=0
# rtsp video out
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
type=4
#1=h264 2=h265
codec=1
##encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=2700000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=8556
udp-port=5400
#source-id=0
# mp4 out
[sink1]
enable=1
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=1
sync=0
bitrate=2700000
output-file=/videos-output/15092023_091023/swiss_leopards.mp4
source-id=0
# on screen display
[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
# stream mux - forms batches of frames from multiple input sources
[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=1
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
# attach-sys-ts-as-ntp=1
# primary gpu inference engine (model)
[primary-gie]
enable=1
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;1;1;1
bbox-border-color3=0;1;0;1
nvbuf-memory-type=0
gie-unique-id=1
config-file=/models_configs/pgie_copy.txt
[tracker]
enable=1
# For NvDCF and DeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=320
tracker-height=256
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.1/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=/opt/nvidia/deepstream/deepstream-6.1/samples/configs/deepstream-app/config_tracker_IOU.yml
ll-config-file=/opt/nvidia/deepstream/deepstream-6.1/samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
# ll-config-file=/opt/nvidia/deepstream/deepstream-6.1/samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=/opt/nvidia/deepstream/deepstream-6.1/samples/configs/deepstream-app/config_tracker_DeepSORT.yml
gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1
# secondary gpu inference engine (model)
[secondary-gie1]
enable=1
gpu-id=0
batch-size=1
## 0=FP32, 1=INT8, 2=FP16 mode
nvbuf-memory-type=0
config-file=/models_configs/sgie_copy.txt
gie-unique-id=2
operate-on-gie-id=1
[tests]
file-loop=0
PGIE
[property]
gpu-id=0
onnx-file=/models/yolov4_military.onnx
model-engine-file=/models/yolov4_military_orin.engine
batch-size=1
gie-unique-id=1
maintain-aspect-ratio=0
symmetric-padding=0
network-mode=2
process-mode=1
network-type=0
interval=4
engine-create-func-name=NvDsInferYoloCudaEngineGet
# from models.json
net-scale-factor=1
labelfile-path=/labels/military.txt
num-detected-classes=2
cluster-mode=3
parse-bbox-func-name=NvDsInferParseCustomBatchedNMSTLT
custom-lib-path=/DeepStream-Yolo/deepstream_tlt_apps/post_processor/libnvds_infercustomparser_tlt.so
model-color-format=1
offsets=103.939;116.779;123.68
[class-attrs-all]
topk=20
nms-iou-threshold=0.5
roi-top-offset=0
roi-bottom-offset=0
detected-min-w=0
detected-min-h=0
detected-max-w=0
detected-max-h=0
# from models.json
pre-cluster-threshold=0.5
SGIE
[property]
gpu-id=0
onnx-file=/models/yolov8_optical.onnx
model-engine-file=/models/yolov8_optical_orin.engine
batch-size=1
gie-unique-id=2
maintain-aspect-ratio=0
symmetric-padding=0
network-mode=2
process-mode=1
network-type=0
interval=4
engine-create-func-name=NvDsInferYoloCudaEngineGet
# from models.json
net-scale-factor=0.003921569790691137
labelfile-path=/labels/coco.txt
num-detected-classes=80
cluster-mode=2
parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=/DeepStream-Yolo/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
model-color-format=0
input-object-min-width=50
input-object-min-height=50
[class-attrs-all]
topk=20
nms-iou-threshold=0.5
pre-cluster-threshold=0.25