Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) jetson
• DeepStream Version 7.1
• JetPack Version (valid for Jetson only) jetpack 6.2
• TensorRT Version 12.6
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)
i am trying to use [pre-process] before [primary-gie] so that i get roi and the pgie only infer on the objects within the roi.
But the pgie is still operating on entire frame and detecting object and giving the red bbox.
i have attached all the required config files for referece pls help.
test5_config_file_nvmultiurisrcbin_src_list_attr_all_anpr.txt
################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl
[tiled-display]
enable=1
rows=3
columns=2
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0
#Set to 1 to automatically tile in Square Grid
square-seq-grid=0
#Note: [source-list] now support REST Server with use-nvmultiurisrcbin=1
[source-list]
num-source-bins=1
list=
use-nvmultiurisrcbin=1
#To display stream name in FPS log, set stream-name-display=1
stream-name-display=0
#sensor-id-list vector is one to one mapped with the uri-list
#identifies each sensor by a unique ID
sensor-id-list=2524
max-batch-size=1
http-ip=localhost
http-port=9000
#low-latency-mode=1
#sgie batch size is number of sources * fair fraction of number of objects detected per frame per source
#the fair fraction of number of object detected is assumed to be 4
sgie-batch-size=1
#Set the below key to keep the application running at all times
[source-attr-all]
enable=1
type=3
num-sources=1
gpu-id=0
cudadec-memtype=0
latency=100
rtsp-reconnect-interval-sec=10
#Limit the rtsp reconnection attempts
rtsp-reconnect-attempts=-1
[streammux]
gpu-id=0
#Note: when used with [source-list], batch-size is ignored
#instead, max-batch-size config is used
batch-size=5
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=33333
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
## drop-pipeline-eos ignores EOS from individual streams muxed in the DS pipeline
## It is useful with source-list/use-nvmultiurisrcbin=1 where the REST server
## will be running post last stream EOS to accept new streams
drop-pipeline-eos=1
##Boolean property to inform muxer that sources are live
##When using nvmultiurisrcbin live-source=1 is preferred default
##to allow batching of available buffers when number of sources is < max-batch-size configuration
live-source=1
#below sink is to be enabled incase OSD is required and Display is available.[should not be an ssh session]
[sink0]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0
[sink1]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
msg-conv-msg2p-lib=
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM - Custom schema payload
msg-conv-payload-type=0
# Enable analytics metadata processing
msg-conv-comp-id=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_amqp_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=
topic=
#Optional:
msg-broker-config=
new-api=1
#(0) Use message adapter library api's
#(1) Use new msgbroker library api's
[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0
# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>
# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt
# This section describes the configuration for sink3, which is a UDPSink (type=3).
# It enables network streaming of the processed video, for example via RTSP.
# The host parameter determines the network interface to stream on (0.0.0.0 for all interfaces).
# The port parameter sets the UDP port for streaming (here, 9999).
# Additional options such as codec and container can be set if needed, but will default to pipeline settings if omitted.
# sync=0 disables synchronization to the clock, which is typical for network sinks.
[sink3]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
type=4
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
#sw-preset=1 #for SW enc=(0)None (1)ultrafast (2)superfast (3)veryfast (4)faster
#(5)fast (6)medium (7)slow (8)slower (9)veryslow (10)placebo
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
# set profile only for hw encoder, sw encoder selects profile based on sw-preset
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=9998
udp-port=5401
# RTSP streaming mount point
mount-point=/ds-test
# Allow all clients to connect
# allow-all=1
[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
# Object counting display settings
#show-obj-count=1
#obj-count-x-offset=10
#obj-count-y-offset=30
#obj-count-text-size=20
#obj-count-color=0;1;0;1
#obj-count-bg-color=0;0;0;0.7
nvbuf-memory-type=0
[pre-process]
enable=1
gpu-id=1
unique-id=5
process-on-frame=1
target-unique-ids=1;7
config-file=config_preprocess.txt
# config-file property is mandatory for any gie section.
# Other properties are optional and if set will override the properties set in
# the infer config file.
[primary-gie]
enable=1
gpu-id=0
gie-unique-id=1
operate-on-gie-id=5
#Required to display the PGIE labels, should be added even when using config-file
#property
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/max-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=5
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
#Required by the app for SGIE, when used along with config-file property
nvbuf-memory-type=0
model-engine-file=
labelfile-path=
config-file=
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/
# ***** NOTE ******:
# NVIDIA TAO ReIdentificationNet
# NVIDIA pre-trained ReIdentificationNet is a high accuracy ResNet-50 model with feature
# length 256. It can be downloaded and used directly with command:
# [ ] mkdir /opt/nvidia/deepstream/deepstream/samples/models/Tracker/
# [ ] wget 'https://api.ngc.nvidia.com/v2/models/nvidia/tao/reidentificationnet/versions/deployable_v1.0/files/resnet50_market1501.etlt' -P /opt/nvidia/deepstream/deepstream/samples/models/Tracker/
[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
#ll-config-file required to set different tracker types
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1
# NvDsAnalytics Configuration
# This section enables real-time analytics like ROI detection, line crossing, and object counting
[nvds-analytics]
enable=0
# Path to the analytics configuration file
config-file=config_nvdsanalytics.txt
[secondary-gie0]
enable=1
gpu-id=0
gie-unique-id=7
operate-on-gie-id=1
operate-on-class-ids=0
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=6
model-engine-file=
labelfile-path=
config-file=
#[secondary-gie1]
#enable=0
#gpu-id=0
#gie-unique-id=5
#operate-on-gie-id=1
#operate-on-class-ids=0;
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
#batch-size=16
#config-file=../../../../../samples/configs/deepstream-app/config_infer_secondary_vehiclemake.txt
#labelfile-path=../../../../../samples/models/Secondary_VehicleMake/labels.txt
#model-engine-file=../../../../../samples/models/Secondary_VehicleMake/resnet18_vehiclemakenet_pruned.onnx_b40_gpu0_int8.engine
[tests]
file-loop=1
config_preprocess.txt
################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################
# The values in the config file are overridden by values set through GObject
# properties.
[property]
enable=1
# list of component gie-id for which tensor is prepared
target-unique-ids=1
# 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
# 0=process on objects 1=process on frames
process-on-frame=1
#uniquely identify the metadata generated by this element
unique-id=5
# gpu-id to be used
gpu-id=0
# if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
# if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
# processig width/height at which image scaled
processing-width=960
processing-height=544
# max buffer in scaling buffer pool
scaling-buf-pool-size=6
# max buffer in tensor buffer pool
tensor-buf-pool-size=6
# tensor shape based on network-input-order
network-input-shape= 8;3;544;960
# 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
# 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
# tensor name same as input layer name
tensor-name=input_1:0
# 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
# 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
# Scaling Interpolation method
# 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
# 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
# 6=NvBufSurfTransformInter_Default
scaling-filter=0
# custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
# custom tensor preparation function name having predefined input/outputs
# check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation
[user-configs]
# Below parameters get used when using default custom library nvdspreprocess_lib
# network scaling factor
pixel-normalization-factor=0.003921568
# mean file path in ppm format
#mean-file=
# array of offsets for each channel
#offsets=
[group-0]
src-ids=0
custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
draw-roi=1
roi-params-src-0=0;0;900;500
the green color bbox is coming from pre-processor and red bbox is coming from the pgie