Dsexample causes deepstream-test5-app to stop or segfault

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU):Jetson
• DeepStream Version:DS7.0
• JetPack Version (valid for Jetson only):6.0
• TensorRT Version:8.6.2
• Issue Type( questions, new requirements, bugs):bugs
• How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)

I ran it using deepstream-test5-app and using dsexample and found that the application stalled or segfaulted.
Here is the configuration file where the bug occurs.
test5_config_file_src_infer.txt (5.9 KB)

After changing several configuration files, we found that the bug occurs under the following conditions.

  • tiled-display id disabled
  • osd is enabled
  • ds-example is enabled, and blur-objects=1

Is there a way to deal with it?

container

nvcr.io/nvidia/deepstream:7.0-triron-multiarch

Commands

apt update
echo 'deb https://repo.download.nvidia.com/jetson/common r36.3 main' >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list && \
    echo 'deb https://repo.download.nvidia.com/jetson/t234 r36.3 main' >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list && \
    echo 'deb https://repo.download.nvidia.com/jetson/ffmpeg r36.3 main' >> /etc/apt/sources.list.d/nvidia-l4t-apt-source.list
apt-get install -y libgstreamer-plugins-base1.0-dev libgstreamer1.0-dev libopencv-dev
cd /opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-dsexample
vi Makefile # Change to WITH_OPENCV?=1 from WITH_OPENCV?=0
export CUDA_VER=12.2
make
make install
cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test5/configs
vi test5_config_file_src_infer.txt # Modify the following configuration file
deepstream-test5-app -c test5_config_file_src_infer.txt

Makefile of gst-dsexample

# SPDX-FileCopyrightText: Copyright (c) 2017-2023 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
#enable this flag to use optimized dsexample plugin
#it can also be exported from command line

WITH_OPENCV?=1 # Change to 1

USE_OPTIMIZED_DSEXAMPLE?=0
CUDA_VER?=
ifeq ($(CUDA_VER),)
  $(error "CUDA_VER is not set")
endif
TARGET_DEVICE = $(shell gcc -dumpmachine | cut -f1 -d -)
CXX:= g++

ifeq ($(USE_OPTIMIZED_DSEXAMPLE),1)
  SRCS:= gstdsexample_optimized.cpp
else
  SRCS:= gstdsexample.cpp
endif

INCS:= $(wildcard *.h)
LIB:=libnvdsgst_dsexample.so

NVDS_VERSION:=7.0

DEP:=dsexample_lib/libdsexample.a
DEP_FILES:=$(wildcard dsexample_lib/dsexample_lib.* )
DEP_FILES-=$(DEP)

CFLAGS+= -fPIC -DDS_VERSION=\"7.0.0\" \
         -I /usr/local/cuda-$(CUDA_VER)/include \
         -I ../../includes

GST_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/gst-plugins/
LIB_INSTALL_DIR?=/opt/nvidia/deepstream/deepstream-$(NVDS_VERSION)/lib/

LIBS := -shared -Wl,-no-undefined \
        -L dsexample_lib -ldsexample \
        -L/usr/local/cuda-$(CUDA_VER)/lib64/ -lcudart -ldl \
        -lnppc -lnppig -lnpps -lnppicc -lnppidei \
        -L$(LIB_INSTALL_DIR) -lnvdsgst_helper -lnvdsgst_meta -lnvds_meta -lnvbufsurface -lnvbufsurftransform\
        -Wl,-rpath,$(LIB_INSTALL_DIR)

OBJS:= $(SRCS:.cpp=.o)

PKGS:= gstreamer-1.0 gstreamer-base-1.0 gstreamer-video-1.0

ifeq ($(WITH_OPENCV),1)
CFLAGS+= -DWITH_OPENCV \
         -I /usr/include/opencv4
PKGS+= opencv4
endif

CFLAGS+=$(shell pkg-config --cflags $(PKGS))
LIBS+=$(shell pkg-config --libs $(PKGS))

all: $(LIB)

%.o: %.cpp $(INCS) Makefile
        @echo $(CFLAGS)
        $(CXX) -c -o $@ $(CFLAGS) $<

$(LIB): $(OBJS) $(DEP) Makefile
        @echo $(CFLAGS)
        $(CXX) -o $@ $(OBJS) $(LIBS)

$(DEP): $(DEP_FILES)
        $(MAKE) -C dsexample_lib/

install: $(LIB)
        cp -rv $(LIB) $(GST_INSTALL_DIR)

clean:
        rm -rf $(OBJS) $(LIB)

test5_config_file_src_infer.txt

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=0 # change to disable
rows=2
columns=2
width=1280
height=720
gpu-id=0
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
nvbuf-memory-type=0


[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=2 # change to 2
uri=file://../../../../../samples/streams/sample_1080p_h264.mp4
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[source1]
enable=0 # change to disable
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
uri=file://../../../../../samples/streams/sample_1080p_h264.mp4
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=1 # change to 1
sync=1
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=0 # change to disable
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=<host>;<port>;<topic>
topic=<topic>
#Optional:
#msg-broker-config=../../deepstream-test4/cfg_kafka.txt

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0

[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=1 # changet to 1
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
# attach-sys-ts-as-ntp=1

[primary-gie]
enable=1
gpu-id=0
batch-size=1 # change to 1
## 0=FP32, 1=INT8, 2=FP16 mode
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;1;1;1
bbox-border-color3=0;1;0;1
nvbuf-memory-type=0
interval=0
gie-unique-id=1
model-engine-file=../../../../../samples/models/Primary_Detector/resnet18_trafficcamnet.etlt_b4_gpu0_int8.engine
labelfile-path=../../../../../samples/models/Primary_Detector/labels.txt
config-file=../../../../../samples/configs/deepstream-app/config_infer_primary.txt
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1

# add ds-example group
[ds-example]
enable=1
processing-width=640
processing-height=480
full-frame=0
blur-objects=1
unique-id=15

[tests]
file-loop=0

1.Recompile and reinstall dsexample

cd /opt/nvidia/deepstream/deepstream/sources/gst-plugins/gst-dsexample
sudo make CUDA_VER=2.2 WITH_OPENCV=1 clean
sudo make CUDA_VER=2.2 WITH_OPENCV=1 install

2.Modify the configuration file as follow

[ds-example]
enable=1
processing-width=640
processing-height=480
full-frame=0
#batch-size for batch supported optimized plugin
batch-size=1
unique-id=15
gpu-id=0
blur-objects=1
# Supported memory types for blur-objects:
# For x86: 1 and 3
# For Jetson: 0
nvbuf-memory-type=0

3.Run test5

cd /opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-test5
./deepstream-test5-app -c configs/test5_config_file_src_infer.txt -p 1

Using opencv will cause data to be copied from the gpu to the cpu, which will cause some performance problems. It is recommended to use CUDA to optimize dsexample

Thank you for checking.
The bug has not been fixed on our end.
Are you running it with tiled-display enabled in your configuration file?
Or are you running it with osd disabled?

Yes, test5 works fine whether I enable osd/tiled-display or not

I have confirmed that the bug occurs when all of the following three conditions are met:

  • tiled-display is disabled
  • osd is enabled
  • dsexample is enabled with blur-objects=1

Does the deepstream-test5-app work correctly if you use the configuration file I attached in my initial post as is?

Can you upgrade to Jetpack 6.1 and Deepstream 7.1, which are the software versions I use and the above scenario works fine

In addition, please follow the instructions in the README to configure the memory type

This is the configuration file I use, it is not much different from yours

################################################################################
# SPDX-FileCopyrightText: Copyright (c) 2018-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved.
# SPDX-License-Identifier: LicenseRef-NvidiaProprietary
#
# NVIDIA CORPORATION, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from NVIDIA CORPORATION or
# its affiliates is strictly prohibited.
################################################################################

[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl

[tiled-display]
enable=0
rows=2
columns=2
width=1280
height=720
gpu-id=0
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
nvbuf-memory-type=0


[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
uri=file://../../../../../samples/streams/sample_1080p_h264.mp4
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[source1]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
uri=file://../../../../../samples/streams/sample_1080p_h264.mp4
num-sources=2
gpu-id=0
nvbuf-memory-type=0

[sink0]
enable=2
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=1
source-id=0
gpu-id=0
nvbuf-memory-type=0

[sink1]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvdrmvideosink 6=MsgConvBroker
type=6
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=<host>;<port>;<topic>
topic=<topic>
#Optional:
#msg-broker-config=../../deepstream-test4/cfg_kafka.txt

[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
## only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0

# sink type = 6 by default creates msg converter + broker.
# To use multiple brokers use this group for converter and use
# sink type = 6 with disable-msgconv = 1
[message-converter]
enable=0
msg-conv-config=dstest5_msgconv_sample_config.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM   - Custom schema payload
msg-conv-payload-type=0
# Name of library having custom implementation.
#msg-conv-msg2p-lib=<val>
# Id of component in case only selected message to parse.
#msg-conv-comp-id=<val>

# Configure this group to enable cloud message consumer.
[message-consumer0]
enable=0
proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_kafka_proto.so
conn-str=<host>;<port>
config-file=<broker config file e.g. cfg_kafka.txt>
subscribe-topic-list=<topic1>;<topic2>;<topicN>
# Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=dstest5_msgconv_sample_config.txt

[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0

[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=4
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
## If set to TRUE, system timestamp will be attached as ntp timestamp
## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
# attach-sys-ts-as-ntp=1

[primary-gie]
enable=1
gpu-id=0
batch-size=4
## 0=FP32, 1=INT8, 2=FP16 mode
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;1;1;1
bbox-border-color3=0;1;0;1
nvbuf-memory-type=0
interval=0
gie-unique-id=1
model-engine-file=../../../../../samples/models/Primary_Detector/resnet18_trafficcamnet_pruned.onnx_b4_gpu0_int8.engine
labelfile-path=../../../../../samples/models/Primary_Detector/labels.txt
config-file=../../../../../samples/configs/deepstream-app/config_infer_primary.txt
#infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/

[tracker]
enable=1
# For NvDCF and NvDeepSORT tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
# ll-config-file required to set different tracker types
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_IOU.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvSORT.yml
ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_perf.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDCF_accuracy.yml
# ll-config-file=../../../../../samples/configs/deepstream-app/config_tracker_NvDeepSORT.yml
gpu-id=0
display-tracking-id=1

[ds-example]
enable=1
processing-width=640
processing-height=480
full-frame=0
#batch-size for batch supported optimized plugin
batch-size=1
unique-id=15
gpu-id=0
blur-objects=1
# Supported memory types for blur-objects:
# For x86: 1 and 3
# For Jetson: 0
# nvbuf-memory-type=0

[tests]
file-loop=0