Pre-process not working

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) jetson
• DeepStream Version 7.1
• JetPack Version (valid for Jetson only) jetpack 6.2
• TensorRT Version 12.6
• NVIDIA GPU Driver Version (valid for GPU only)
• Issue Type( questions, new requirements, bugs)
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

i am trying to use [pre-process] before [primary-gie] so that i get roi and the pgie only infer on the objects within the roi.

But the pgie is still operating on entire frame and detecting object and giving the red bbox.

i have attached all the required config files for referece pls help.

test5_config_file_nvmultiurisrcbin_src_list_attr_all_anpr.txt

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=1
rows=3
columns=2
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0
#Set to 1 to automatically tile in Square Grid
square-seq-grid=0

#Note: [source-list] now support REST Server with use-nvmultiurisrcbin=1

[source-list]
num-source-bins=1
list=
use-nvmultiurisrcbin=1
#To display stream name in FPS log, set stream-name-display=1
stream-name-display=0
#sensor-id-list vector is one to one mapped with the uri-list
#identifies each sensor by a unique ID
sensor-id-list=2524
max-batch-size=1
http-ip=localhost
http-port=9000
#low-latency-mode=1
#sgie batch size is number of sources * fair fraction of number of objects detected per frame per source
#the fair fraction of number of object detected is assumed to be 4
sgie-batch-size=1
#Set the below key to keep the application running at all times

[source-attr-all]
enable=1
type=3
num-sources=1
gpu-id=0
cudadec-memtype=0
latency=100
rtsp-reconnect-interval-sec=10
#Limit the rtsp reconnection attempts
rtsp-reconnect-attempts=-1

[streammux]
gpu-id=0
#Note: when used with [source-list], batch-size is ignored
#instead, max-batch-size config is used
batch-size=5
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=33333
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
## drop-pipeline-eos ignores EOS from individual streams muxed in the DS pipeline
## It is useful with source-list/use-nvmultiurisrcbin=1 where the REST server
## will be running post last stream EOS to accept new streams
drop-pipeline-eos=1
##Boolean property to inform muxer that sources are live
##When using nvmultiurisrcbin live-source=1 is preferred default
##to allow batching of available buffers when number of sources is < max-batch-size configuration
live-source=1

#below sink is to be enabled incase OSD is required and Display is available.[should not be an ssh session]
[sink0]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
msg-conv-msg2p-lib=
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Enable analytics metadata processing
msg-conv-comp-id=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_amqp_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=
topic=
#Optional:
msg-broker-config=
new-api=1
#(0) Use message adapter library api's
#(1) Use new msgbroker library api's

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt



# This section describes the configuration for sink3, which is a UDPSink (type=3).
# It enables network streaming of the processed video, for example via RTSP.
# The host parameter determines the network interface to stream on (0.0.0.0 for all interfaces).
# The port parameter sets the UDP port for streaming (here, 9999).
# Additional options such as codec and container can be set if needed, but will default to pipeline settings if omitted.
# sync=0 disables synchronization to the clock, which is typical for network sinks.

[sink3]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
type=4
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
#sw-preset=1 #for SW enc=(0)None (1)ultrafast (2)superfast (3)veryfast (4)faster
#(5)fast (6)medium (7)slow (8)slower (9)veryslow (10)placebo
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
# set profile only for hw encoder, sw encoder selects profile based on sw-preset
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=9998
udp-port=5401
# RTSP streaming mount point
mount-point=/ds-test
# Allow all clients to connect
# allow-all=1

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
# Object counting display settings
#show-obj-count=1
#obj-count-x-offset=10
#obj-count-y-offset=30
#obj-count-text-size=20
#obj-count-color=0;1;0;1
#obj-count-bg-color=0;0;0;0.7
nvbuf-memory-type=0

[pre-process]
enable=1
gpu-id=1
unique-id=5
process-on-frame=1
target-unique-ids=1;7
config-file=config_preprocess.txt

# config-file property is mandatory for any gie section.
# Other properties are optional and if set will override the properties set in
# the infer config file.
[primary-gie]
enable=1
gpu-id=0
gie-unique-id=1
operate-on-gie-id=5
#Required to display the PGIE labels, should be added even when using config-file
#property
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/max-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=5
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
#Required by the app for SGIE, when used along with config-file property
nvbuf-memory-type=0
model-engine-file=
labelfile-path=
config-file=
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

# ***** NOTE ******:
# NVIDIA TAO ReIdentificationNet
# NVIDIA pre-trained ReIdentificationNet is a high accuracy ResNet-50 model with feature 
# length 256. It can be downloaded and used directly with command:

# [ ] mkdir /opt/nvidia/deepstream/deepstream/samples/models/Tracker/

# [ ] wget 'https://api.ngc.nvidia.com/v2/models/nvidia/tao/reidentificationnet/versions/deployable_v1.0/files/resnet50_market1501.etlt' -P /opt/nvidia/deepstream/deepstream/samples/models/Tracker/

[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
#ll-config-file required to set different tracker types
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1

# NvDsAnalytics Configuration
# This section enables real-time analytics like ROI detection, line crossing, and object counting
[nvds-analytics]
enable=0
# Path to the analytics configuration file
config-file=config_nvdsanalytics.txt

[secondary-gie0]
enable=1
gpu-id=0
gie-unique-id=7
operate-on-gie-id=1
operate-on-class-ids=0
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=6
model-engine-file=
labelfile-path=
config-file=

#[secondary-gie1]
#enable=0
#gpu-id=0
#gie-unique-id=5
#operate-on-gie-id=1
#operate-on-class-ids=0;
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
#batch-size=16
#config-file=../../../../../samples/configs/deepstream-app/config_infer_secondary_vehiclemake.txt
#labelfile-path=../../../../../samples/models/Secondary_VehicleMake/labels.txt
#model-engine-file=../../../../../samples/models/Secondary_VehicleMake/resnet18_vehiclemakenet_pruned.onnx_b40_gpu0_int8.engine

[tests]
file-loop=1

config_preprocess.txt


################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

# The values in the config file are overridden by values set through GObject
# properties.

[property]
enable=1
    # list of component gie-id for which tensor is prepared
target-unique-ids=1
    # 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
    # 0=process on objects 1=process on frames
process-on-frame=1
    #uniquely identify the metadata generated by this element
unique-id=5
    # gpu-id to be used
gpu-id=0
    # if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
    # if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
    # processig width/height at which image scaled
processing-width=960
processing-height=544
    # max buffer in scaling buffer pool
scaling-buf-pool-size=6
    # max buffer in tensor buffer pool
tensor-buf-pool-size=6
    # tensor shape based on network-input-order
network-input-shape= 8;3;544;960
    # 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
    # 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
    # tensor name same as input layer name
tensor-name=input_1:0
    # 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
    # 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
    # Scaling Interpolation method
    # 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
    # 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
    # 6=NvBufSurfTransformInter_Default
scaling-filter=0
    # custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
    # custom tensor preparation function name having predefined input/outputs
    # check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation

[user-configs]
   # Below parameters get used when using default custom library nvdspreprocess_lib
   # network scaling factor
pixel-normalization-factor=0.003921568
   # mean file path in ppm format
#mean-file=
   # array of offsets for each channel
#offsets=

[group-0]
src-ids=0
custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
draw-roi=1
roi-params-src-0=0;0;900;500

the green color bbox is coming from pre-processor and red bbox is coming from the pgie

Try to add input-tensor-meta=1 in [primary-gie] group. Please refer to this configuration file /opt/nvidia/deepstream/deepstream/samples/configs/deepstream-app/source30_1080p_dec_preprocess_infer-resnet_tiled_display.txt.

done this

[primary-gie]
enable=1
gpu-id=0
#operate-on-gie-id=0
input-tensor-meta=1
plugin-type=0 #(0): nvinfer; (1): nvinferserver
#Required to display the PGIE labels, should be added even when using config-file
#property
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/max-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=5
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
#Required by the app for SGIE, when used along with config-file property
gie-unique-id=1
nvbuf-memory-type=0
model-engine-file=
labelfile-path=
config-file=
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

still not working as expected

Did you specify gie-unique-id in the nvinfer configuration file? Please check if the function gst_nvinfer_process_tensor_input is called in the file gstnvinfer.cpp located in sources/gst-plugins/gst-nvinfer/.

yes the function is called. and i set the gie-unique-id in primaryand secondary gie

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=1
rows=3
columns=2
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0
#Set to 1 to automatically tile in Square Grid
square-seq-grid=0

#Note: [source-list] now support REST Server with use-nvmultiurisrcbin=1

[source-list]
num-source-bins=1
list=
use-nvmultiurisrcbin=1
#To display stream name in FPS log, set stream-name-display=1
stream-name-display=0
#sensor-id-list vector is one to one mapped with the uri-list
#identifies each sensor by a unique ID
sensor-id-list=
#Optional sensor-name-list vector is one to one mapped with the uri-list
sensor-name-list=
max-batch-size=1
http-ip=localhost
http-port=9000
# needs to be enabled
#low-latency-mode=1
#sgie batch size is number of sources * fair fraction of number of objects detected per frame per source
#the fair fraction of number of object detected is assumed to be 4
sgie-batch-size=1
#Set the below key to keep the application running at all times

[source-attr-all]
enable=1
type=3
num-sources=1
gpu-id=0
cudadec-memtype=0
latency=100
rtsp-reconnect-interval-sec=10
#Limit the rtsp reconnection attempts
rtsp-reconnect-attempts=-1

[streammux]
gpu-id=0
#Note: when used with [source-list], batch-size is ignored
#instead, max-batch-size config is used
batch-size=5
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=33333
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
## drop-pipeline-eos ignores EOS from individual streams muxed in the DS pipeline
## It is useful with source-list/use-nvmultiurisrcbin=1 where the REST server
## will be running post last stream EOS to accept new streams
drop-pipeline-eos=1
##Boolean property to inform muxer that sources are live
##When using nvmultiurisrcbin live-source=1 is preferred default
##to allow batching of available buffers when number of sources is < max-batch-size configuration
live-source=1

#below sink is to be enabled incase OSD is required and Display is available.[should not be an ssh session]
[sink0]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
msg-conv-msg2p-lib=
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Enable analytics metadata processing
msg-conv-comp-id=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_amqp_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=
topic=hello_world
#Optional:
msg-broker-config=
new-api=1
#(0) Use message adapter library api's
#(1) Use new msgbroker library api's

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt



# This section describes the configuration for sink3, which is a UDPSink (type=3).
# It enables network streaming of the processed video, for example via RTSP.
# The host parameter determines the network interface to stream on (0.0.0.0 for all interfaces).
# The port parameter sets the UDP port for streaming (here, 9999).
# Additional options such as codec and container can be set if needed, but will default to pipeline settings if omitted.
# sync=0 disables synchronization to the clock, which is typical for network sinks.

[sink3]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
type=4
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
#sw-preset=1 #for SW enc=(0)None (1)ultrafast (2)superfast (3)veryfast (4)faster
#(5)fast (6)medium (7)slow (8)slower (9)veryslow (10)placebo
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
# set profile only for hw encoder, sw encoder selects profile based on sw-preset
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=9998
udp-port=5401
# RTSP streaming mount point
mount-point=/ds-test
# Allow all clients to connect
# allow-all=1

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
# Object counting display settings
#show-obj-count=1
#obj-count-x-offset=10
#obj-count-y-offset=30
#obj-count-text-size=20
#obj-count-color=0;1;0;1
#obj-count-bg-color=0;0;0;0.7
nvbuf-memory-type=0

[pre-process]
enable=1
config-file=config_preprocess.txt

# config-file property is mandatory for any gie section.
# Other properties are optional and if set will override the properties set in
# the infer config file.
[primary-gie]
enable=1
gpu-id=0
#operate-on-gie-id=0
input-tensor-meta=1
plugin-type=0 #(0): nvinfer; (1): nvinferserver
#Required to display the PGIE labels, should be added even when using config-file
#property
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/max-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=5
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
#Required by the app for SGIE, when used along with config-file property
gie-unique-id=1
nvbuf-memory-type=0
model-engine-file=
labelfile-path=
config-file=
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

# ***** NOTE ******:
# NVIDIA TAO ReIdentificationNet
# NVIDIA pre-trained ReIdentificationNet is a high accuracy ResNet-50 model with feature 
# length 256. It can be downloaded and used directly with command:
# [ ] mkdir /opt/nvidia/deepstream/deepstream/samples/models/Tracker/
# [ ] wget 'https://api.ngc.nvidia.com/v2/models/nvidia/tao/reidentificationnet/versions/deployable_v1.0/files/resnet50_market1501.etlt' -P /opt/nvidia/deepstream/deepstream/samples/models/Tracker/

[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
#ll-config-file required to set different tracker types
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1

# NvDsAnalytics Configuration
# This section enables real-time analytics like ROI detection, line crossing, and object counting
[nvds-analytics]
enable=0
# Path to the analytics configuration file
config-file=config_nvdsanalytics.txt

[secondary-pre-process0]
enable=0
operate-on-gie-id=1
config-file=config_preprocess_sgie.txt

[secondary-gie0]
enable=1
gpu-id=0
#input-tensor-meta=1
gie-unique-id=7
operate-on-gie-id=1
operate-on-class-ids=0
plugin-type=0 #(0): nvinfer; (1): nvinferserver
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=6
model-engine-file=
labelfile-path=
config-file=

#[secondary-gie1]
#enable=0
#gpu-id=0
#gie-unique-id=5
#operate-on-gie-id=1
#operate-on-class-ids=0;
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
#batch-size=16
#config-file=../../../../../samples/configs/deepstream-app/config_infer_secondary_vehiclemake.txt
#labelfile-path=../../../../../samples/models/Secondary_VehicleMake/labels.txt
#model-engine-file=../../../../../samples/models/Secondary_VehicleMake/resnet18_vehiclemakenet_pruned.onnx_b40_gpu0_int8.engine

[tests]
file-loop=1

can u tell why is this not working?

There are many reasons. I can’t run your configuration and can only guess the reason. Is the input tensor name of your lpd model input_1:0? Please provide detailed log

GST_DEBUG=3 ./your_program > log.log 2>&1
GST_DEBUG=3 ./deepstream-test5-app -c configs/test5_config_file_nvmultiurisrcbin_src_list_attr_all_anpr.txt.ini -p 1               
** WARN: <parse_sink:1918>: Unknown key 'mount-point' for group [sink3]

(deepstream-test5-app:119043): GLib-GObject-WARNING **: 11:17:02.519: g_object_set_is_valid_property: object class 'nvv4l2h264enc' has no property named 'gpu-id'

 *** DeepStream: Launched RTSP Streaming at rtsp://localhost:9998/ds-test ***

Opening in BLOCKING MODE 
0:00:00.132061747 119043 0xaaab049b2230 WARN                    v4l2 gstv4l2object.c:4682:gst_v4l2_object_probe_caps:<sink_sub_bin_encoder2:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
Civetweb version: v1.16
Server running at port: 9000
Setting min object dimensions as 16x16 instead of 1x1 to support VIC compute mode.
0:00:00.333173746 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<secondary_gie_0> NvDsInferContext[UID 7]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 7]: deserialized trt engine from :/home/parkzap/deepstream7.1/anpr/samples/models/LPRNet/lprnet_fixed.onnx_b6_gpu0_fp16.engine
INFO: [FullDims Engine Info]: layers num: 3
0   INPUT  kFLOAT image_input     3x48x106        min: 1x3x48x106      opt: 6x3x48x106      Max: 6x3x48x106      
1   OUTPUT kINT32 tf_op_layer_ArgMax_int32 27              min: 0               opt: 0               Max: 0               
2   OUTPUT kFLOAT tf_op_layer_Max 27              min: 0               opt: 0               Max: 0               

0:00:00.333283670 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<secondary_gie_0> NvDsInferContext[UID 7]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 7]: Use deserialized engine model: /home/parkzap/deepstream7.1/anpr/samples/models/LPRNet/lprnet_fixed.onnx_b6_gpu0_fp16.engine
0:00:00.373425267 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer_impl.cpp:343:notifyLoadModelStatus:<secondary_gie_0> [UID 7]: Load new model:/home/parkzap/deepstream7.1/anpr/samples/configs/deepstream-app/config_infer_lprnet.txt sucessfully
gstnvtracker: Loading low-level lib at /opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
[NvMultiObjectTracker] Initialized
Setting min object dimensions as 16x16 instead of 1x1 to support VIC compute mode.
WARNING: [TRT]: BatchedNMSPlugin is deprecated since TensorRT 9.0. Use INetworkDefinition::addNMS() to add an INMSLayer OR use EfficientNMS plugin.
0:00:00.464862233 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::deserializeEngineAndBackend() <nvdsinfer_context_impl.cpp:2092> [UID = 1]: deserialized trt engine from :/home/parkzap/deepstream7.1/anpr/samples/models/LPDNet/yolov4_tiny_usa_deployable.etlt_b10_gpu0_int8.engine
INFO: [FullDims Engine Info]: layers num: 5
0   INPUT  kFLOAT Input           3x480x640       min: 1x3x480x640     opt: 10x3x480x640    Max: 10x3x480x640    
1   OUTPUT kINT32 BatchedNMS      1               min: 0               opt: 0               Max: 0               
2   OUTPUT kFLOAT BatchedNMS_1    200x4           min: 0               opt: 0               Max: 0               
3   OUTPUT kFLOAT BatchedNMS_2    200             min: 0               opt: 0               Max: 0               
4   OUTPUT kFLOAT BatchedNMS_3    200             min: 0               opt: 0               Max: 0               

0:00:00.464960093 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer.cpp:684:gst_nvinfer_logger:<primary_gie> NvDsInferContext[UID 1]: Info from NvDsInferContextImpl::generateBackendContext() <nvdsinfer_context_impl.cpp:2195> [UID = 1]: Use deserialized engine model: /home/parkzap/deepstream7.1/anpr/samples/models/LPDNet/yolov4_tiny_usa_deployable.etlt_b10_gpu0_int8.engine
0:00:00.467125923 119043 0xaaab049b2230 INFO                 nvinfer gstnvinfer_impl.cpp:343:notifyLoadModelStatus:<primary_gie> [UID 1]: Load new model:/home/parkzap/deepstream7.1/anpr/samples/configs/deepstream-app/config_infer_lpdnet.txt sucessfully

Runtime commands:
	h: Print this help
	q: Quit

	p: Pause
	r: Resume

NOTE: To expand a source in the 2D tiled display and view object details, left-click on the source.
      To go back to the tiled display, right-click anywhere on the window.

new stream added [0:2524:4W Entry Left side Lane(A)]



** INFO: <bus_callback:291>: Pipeline ready

** INFO: <bus_callback:277>: Pipeline running

Opening in BLOCKING MODE 
0:00:00.688556602 119043 0xfffe64016520 WARN                    v4l2 gstv4l2object.c:4682:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
NvMMLiteOpen : Block : BlockType = 261 
NvMMLiteBlockCreate : Block : BlockType = 261 
0:00:00.791524826 119043 0xfffe64016520 WARN                    v4l2 gstv4l2object.c:4682:gst_v4l2_object_probe_caps:<nvv4l2decoder0:src> Failed to probe pixel aspect ratio with VIDIOC_CROPCAP: Unknown error -1
0:00:00.796713251 119043 0xfffe64016520 WARN            v4l2videodec gstv4l2videodec.c:2297:gst_v4l2_video_dec_decide_allocation:<nvv4l2decoder0> Duration invalid, not setting latency
mimetype is video/x-raw
NvMMLiteOpen : Block : BlockType = 4 
===== NvVideo: NVENC =====
NvMMLiteBlockCreate : Block : BlockType = 4 
0:00:00.809205402 119043 0xaaab0423f580 WARN          v4l2bufferpool gstv4l2bufferpool.c:1130:gst_v4l2_buffer_pool_start:<sink_sub_bin_encoder2:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:00.814534696 119043 0xfffe64016520 WARN          v4l2bufferpool gstv4l2bufferpool.c:1130:gst_v4l2_buffer_pool_start:<nvv4l2decoder0:pool:src> Uncertain or not enough buffers, enabling copy threshold
0:00:00.826536016 119043 0xfffe64016f00 WARN          v4l2bufferpool gstv4l2bufferpool.c:1607:gst_v4l2_buffer_pool_dqbuf:<nvv4l2decoder0:pool:src> Driver should never set v4l2_buffer.field to ANY
H264: Profile = 66 Level = 0 
NVMEDIA: Need to set EMC bandwidth : 376000 
NvVideo: bBlitMode is set to TRUE 
0:00:01.215093972 119043 0xfffe400184c0 WARN          v4l2bufferpool gstv4l2bufferpool.c:1607:gst_v4l2_buffer_pool_dqbuf:<sink_sub_bin_encoder2:pool:src> Driver should never set v4l2_buffer.field to ANY
Active sources : 1

**PERF:  FPS 0 (Avg)	
Thu Sep 18 11:17:07 2025
**PERF:  
33.72 (33.54)	

(deepstream-test5-app:119043): GLib-GObject-WARNING **: 11:17:09.749: g_object_get_is_valid_property: object class 'GstUDPSrc' has no property named 'pt'
0:00:07.294081429 119043 0xffff3c001980 WARN                  udpsrc gstudpsrc.c:1637:gst_udpsrc_open:<pay0> warning: Could not create a buffer of requested 524288 bytes (Operation not permitted). Need net.admin privilege?
0:00:07.294196856 119043 0xffff3c001980 WARN                  udpsrc gstudpsrc.c:1647:gst_udpsrc_open:<pay0> have udp buffer of 212992 bytes while 524288 were requested
0:00:07.294738442 119043 0xffff3c001980 WARN               rtspmedia rtsp-media.c:3281:default_handle_message: 0xfffe68141990: got warning Could not get/set settings from/on resource. (../gst/udp/gstudpsrc.c(1637): gst_udpsrc_open (): /GstPipeline:media-pipeline/GstBin:bin0/GstUDPSrc:pay0:
Could not create a buffer of requested 524288 bytes (Operation not permitted). Need net.admin privilege?)
0:00:07.295438273 119043 0xaaab04245120 WARN              rtspstream rtsp-stream.c:4442:gst_rtsp_stream_get_rtpinfo: Could not get payloader stats
0:00:07.295509827 119043 0xaaab04245120 FIXME              rtspmedia rtsp-media.c:4584:gst_rtsp_media_suspend: suspend for dynamic pipelines needs fixing
0:00:07.298025589 119043 0xaaab04245120 FIXME              rtspmedia rtsp-media.c:4584:gst_rtsp_media_suspend: suspend for dynamic pipelines needs fixing
0:00:07.298063383 119043 0xaaab04245120 WARN               rtspmedia rtsp-media.c:4623:gst_rtsp_media_suspend: media 0xfffe68141990 was not prepared
0:00:07.301879635 119043 0xaaab04245120 FIXME              rtspmedia rtsp-media.c:2884:gst_rtsp_media_seek_trickmode:<GstRTSPMedia@0xfffe68141990> Handle going back to 0 for none live not seekable streams.
0:00:07.302377092 119043 0xffff3c001980 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Pipeline construction is invalid, please add queues.
0:00:07.302390404 119043 0xfffe60007aa0 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Pipeline construction is invalid, please add queues.
0:00:07.302420293 119043 0xffff3c001980 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.
0:00:07.302448966 119043 0xfffe60007aa0 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.
0:00:07.302719375 119043 0xffff3c001980 WARN               rtspmedia rtsp-media.c:3281:default_handle_message: 0xfffe68141990: got warning Pipeline construction is invalid, please add queues. (../libs/gst/base/gstbasesink.c(1249): gst_base_sink_query_latency (): /GstPipeline:media-pipeline/GstMultiUDPSink:multiudpsink0:
Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.)
0:00:07.302761808 119043 0xffff3c001980 WARN               rtspmedia rtsp-media.c:3281:default_handle_message: 0xfffe68141990: got warning Pipeline construction is invalid, please add queues. (../libs/gst/base/gstbasesink.c(1249): gst_base_sink_query_latency (): /GstPipeline:media-pipeline/GstMultiUDPSink:multiudpsink0:
Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.)
0:00:08.939196658 119043 0xffff3c001980 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Pipeline construction is invalid, please add queues.
0:00:08.939245396 119043 0xffff3c001980 WARN                basesink gstbasesink.c:1249:gst_base_sink_query_latency:<multiudpsink0> warning: Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.
0:00:08.939446811 119043 0xffff3c001980 WARN               rtspmedia rtsp-media.c:3281:default_handle_message: 0xfffe68141990: got warning Pipeline construction is invalid, please add queues. (../libs/gst/base/gstbasesink.c(1249): gst_base_sink_query_latency (): /GstPipeline:media-pipeline/GstMultiUDPSink:multiudpsink0:
Not enough buffering available for  the processing deadline of 0:00:00.020000000, add enough queues to buffer  0:00:00.020000000 additional data. Shortening processing latency to 0:00:00.000000000.)
Active sources : 1
Thu Sep 18 11:17:12 2025
**PERF:  
29.97 (31.56)	
Active sources : 1
Thu Sep 18 11:17:17 2025
**PERF:  
29.94 (31.00)	
^C** ERROR: <_intr_handler:770>: User Interrupted.. 

Quitting
Stopping the server..!! 
Stopped the server..!! 
0:00:18.172211033 119043 0xaaab049b2230 WARN               rtspmedia rtsp-media.c:4935:gst_rtsp_media_set_state: media 0xfffe68141990 was not prepared
[NvMultiObjectTracker] De-initialized
App run successful
➜  deepstream-test5 git:(roi-fix) ✗ 


Please keep the input tensor name the same as your model: Input.

Maintaining the same tensor shape as nvstreammux/nvinfer can yield some performance benefits.

Please follow this documentation

Done. As you mentioned but still facing issue.

updated config files:

test5_config_file_nvmultiurisrcbin_src_list_attr_all_anpr.txt:

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=1
rows=3
columns=2
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0
#Set to 1 to automatically tile in Square Grid
square-seq-grid=0

#Note: [source-list] now support REST Server with use-nvmultiurisrcbin=1

[source-list]
num-source-bins=1
list=
use-nvmultiurisrcbin=1
#To display stream name in FPS log, set stream-name-display=1
stream-name-display=0
#sensor-id-list vector is one to one mapped with the uri-list
#identifies each sensor by a unique ID
sensor-id-list=
#Optional sensor-name-list vector is one to one mapped with the uri-list
sensor-name-list=
max-batch-size=1
http-ip=localhost
http-port=9000
# needs to be enabled
#low-latency-mode=1
#sgie batch size is number of sources * fair fraction of number of objects detected per frame per source
#the fair fraction of number of object detected is assumed to be 4
sgie-batch-size=1
#Set the below key to keep the application running at all times

[source-attr-all]
enable=1
type=3
num-sources=1
gpu-id=0
cudadec-memtype=0
latency=100
rtsp-reconnect-interval-sec=10
#Limit the rtsp reconnection attempts
rtsp-reconnect-attempts=-1

[streammux]
gpu-id=0
#Note: when used with [source-list], batch-size is ignored
#instead, max-batch-size config is used
batch-size=5
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=33333
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
## drop-pipeline-eos ignores EOS from individual streams muxed in the DS pipeline
## It is useful with source-list/use-nvmultiurisrcbin=1 where the REST server
## will be running post last stream EOS to accept new streams
drop-pipeline-eos=1
##Boolean property to inform muxer that sources are live
##When using nvmultiurisrcbin live-source=1 is preferred default
##to allow batching of available buffers when number of sources is < max-batch-size configuration
live-source=1

#below sink is to be enabled incase OSD is required and Display is available.[should not be an ssh session]
[sink0]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
msg-conv-msg2p-lib=
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Enable analytics metadata processing
msg-conv-comp-id=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_amqp_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=
topic=hello_world
#Optional:
msg-broker-config=cfg_msgbroker_amqp_parkzap.txt
new-api=1
#(0) Use message adapter library api's
#(1) Use new msgbroker library api's

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt



# This section describes the configuration for sink3, which is a UDPSink (type=3).
# It enables network streaming of the processed video, for example via RTSP.
# The host parameter determines the network interface to stream on (0.0.0.0 for all interfaces).
# The port parameter sets the UDP port for streaming (here, 9999).
# Additional options such as codec and container can be set if needed, but will default to pipeline settings if omitted.
# sync=0 disables synchronization to the clock, which is typical for network sinks.

[sink3]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
type=4
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
#sw-preset=1 #for SW enc=(0)None (1)ultrafast (2)superfast (3)veryfast (4)faster
#(5)fast (6)medium (7)slow (8)slower (9)veryslow (10)placebo
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
# set profile only for hw encoder, sw encoder selects profile based on sw-preset
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=9998
udp-port=5401
# RTSP streaming mount point
mount-point=/ds-test
# Allow all clients to connect
# allow-all=1

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
# Object counting display settings
#show-obj-count=1
#obj-count-x-offset=10
#obj-count-y-offset=30
#obj-count-text-size=20
#obj-count-color=0;1;0;1
#obj-count-bg-color=0;0;0;0.7
nvbuf-memory-type=0

[pre-process]
enable=1
config-file=config_preprocess.txt

# config-file property is mandatory for any gie section.
# Other properties are optional and if set will override the properties set in
# the infer config file.
[primary-gie]
enable=1
gpu-id=0
#operate-on-gie-id=0
input-tensor-meta=1
plugin-type=0 #(0): nvinfer; (1): nvinferserver
#Required to display the PGIE labels, should be added even when using config-file
#property
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/max-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=5
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
#Required by the app for SGIE, when used along with config-file property
gie-unique-id=1
nvbuf-memory-type=0
model-engine-file=
labelfile-path=
config-file=
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

# ***** NOTE ******:
# NVIDIA TAO ReIdentificationNet
# NVIDIA pre-trained ReIdentificationNet is a high accuracy ResNet-50 model with feature 
# length 256. It can be downloaded and used directly with command:
# [ ] mkdir /opt/nvidia/deepstream/deepstream/samples/models/Tracker/
# [ ] wget 'https://api.ngc.nvidia.com/v2/models/nvidia/tao/reidentificationnet/versions/deployable_v1.0/files/resnet50_market1501.etlt' -P /opt/nvidia/deepstream/deepstream/samples/models/Tracker/

[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
#ll-config-file required to set different tracker types
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
#ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1

# NvDsAnalytics Configuration
# This section enables real-time analytics like ROI detection, line crossing, and object counting
[nvds-analytics]
enable=0
# Path to the analytics configuration file
config-file=config_nvdsanalytics.txt

[secondary-pre-process0]
enable=0
operate-on-gie-id=1
config-file=config_preprocess_sgie.txt

[secondary-gie0]
enable=1
gpu-id=0
#input-tensor-meta=1
gie-unique-id=7
operate-on-gie-id=1
operate-on-class-ids=0
plugin-type=0 #(0): nvinfer; (1): nvinferserver
#Note 1: when used with [source-list], batch-size is ignored
#instead, [source-list]/sgie-batch-size config is used
#Note 2: Be sure to rename model-engine-file to reflect new batch-size
batch-size=6
model-engine-file=
labelfile-path=
config-file=

[tests]
file-loop=1

config_preprocess.txt:

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2021-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

# The values in the config file are overridden by values set through GObject
# properties.

[property]
enable=1
    # list of component gie-id for which tensor is prepared
target-unique-ids=1
    # 0=NCHW, 1=NHWC, 2=CUSTOM
network-input-order=0
    # 0=process on objects 1=process on frames
process-on-frame=1
    #uniquely identify the metadata generated by this element
unique-id=5
    # gpu-id to be used
gpu-id=0
    # if enabled maintain the aspect ratio while scaling
maintain-aspect-ratio=1
    # if enabled pad symmetrically with maintain-aspect-ratio enabled
symmetric-padding=1
    # processig width/height at which image scaled
processing-width=640
processing-height=480
    # max buffer in scaling buffer pool
scaling-buf-pool-size=6
    # max buffer in tensor buffer pool
tensor-buf-pool-size=6
    # tensor shape based on network-input-order
network-input-shape= 10;3;480;640
    # 0=RGB, 1=BGR, 2=GRAY
network-color-format=0
    # 0=FP32, 1=UINT8, 2=INT8, 3=UINT32, 4=INT32, 5=FP16
tensor-data-type=0
    # tensor name same as input layer name
tensor-name=Input
    # 0=NVBUF_MEM_DEFAULT 1=NVBUF_MEM_CUDA_PINNED 2=NVBUF_MEM_CUDA_DEVICE 3=NVBUF_MEM_CUDA_UNIFIED
scaling-pool-memory-type=0
    # 0=NvBufSurfTransformCompute_Default 1=NvBufSurfTransformCompute_GPU 2=NvBufSurfTransformCompute_VIC
scaling-pool-compute-hw=0
    # Scaling Interpolation method
    # 0=NvBufSurfTransformInter_Nearest 1=NvBufSurfTransformInter_Bilinear 2=NvBufSurfTransformInter_Algo1
    # 3=NvBufSurfTransformInter_Algo2 4=NvBufSurfTransformInter_Algo3 5=NvBufSurfTransformInter_Algo4
    # 6=NvBufSurfTransformInter_Default
scaling-filter=0
    # custom library .so path having custom functionality
custom-lib-path=/opt/nvidia/deepstream/deepstream/lib/gst-plugins/libcustom2d_preprocess.so
    # custom tensor preparation function name having predefined input/outputs
    # check the default custom library nvdspreprocess_lib for more info
custom-tensor-preparation-function=CustomTensorPreparation

[user-configs]
   # Below parameters get used when using default custom library nvdspreprocess_lib
   # network scaling factor
pixel-normalization-factor=0.003921568
   # mean file path in ppm format
#mean-file=
   # array of offsets for each channel
#offsets=

[group-0]
src-ids=0
#custom-input-transformation-function=CustomAsyncTransformation
process-on-roi=1
draw-roi=1
roi-params-src-0=0;0;640;480

when i set input-tensor-meta to 0 in [primary-gie] then only i get detection logs from deepstream.