**• Hardware Platform: Jetson AGX **
• DeepStream Version: 6.0.1
• JetPack Version: 4.6.1-b110
• TensorRT Version: 8.2.1.8-1+cuda10.2
• Issue Type: Bug
How to reproduce the issue ?
Run deepstream-test5-app with config:
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
[tiled-display]
enable=1
rows=1
columns=1
width=640
height=360
gpu-id=0
nvbuf-memory-type=0
[source0]
enable=1
type=4
uri=rtsp://100.65.122.110:8554/camera3
cudadec-memtype=0
source-id=0
smart-record=1
#smart-rec-file-prefix=source0
#smart-rec-container=0
#smart-rec-default-duration=10
#smart-rec-duration=10
smart-rec-cache=6
smart-rec-start-time=3
[sink0]
enable=1
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
sync=1
#iframeinterval=10
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=0
output-file=HMT03_out.mp4
#source-id=0
#[sink1]
#enable=1
##Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming
#type=4
##1=h264 2=h265
#codec=1
###encoder type 0=Hardware 1=Software
#enc-type=0
#sync=0
#bitrate=4000000
##H264 Profile - 0=Baseline 2=Main 4=High
##H265 Profile - 0=Main 1=Main10
#profile=0
set below properties in case of RTSPStreaming
#rtsp-port=8554
#udp-port=5400
##source-id=0
[sink1]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker
type=6
#msg-conv-config=ds_tak_msgconv_config_v3_8cam.txt
msg-conv-config=ds_demo_msg_conv.txt
#(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload
#(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal
#(256): PAYLOAD_RESERVED - Reserved type
#(257): PAYLOAD_CUSTOM - Custom schema payload
msg-conv-payload-type=0
msg-conv-frame-interval=25
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_kafka_proto.so
#Provide your msg-broker-conn-str here
msg-broker-conn-str=localhost;9092;
topic=topic_test11
#Optional:
#msg-broker-config=…/…/deepstream-test4/cfg_kafka.txt
#new-api=0
#(0) Use message adapter library api’s
#(1) Use new msgbroker library api’s
[message-consumer0]
reads returned kafka messages for smart video record
enable=1
proto-lib=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_kafka_proto.so
conn-str=localhost;9092
#config-file=cfg_kafka.txt
subscribe-topic-list=topic_test12
Use this option if message has sensor name as id instead of index (0,1,2 etc.).
#sensor-list-file=ds_demo_msg_conv.txt
[osd]
enable=1
gpu-id=0
border-width=1
text-size=40
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;0.3
font=Serif
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
[streammux]
gpu-id=0
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=1
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
Set muxer output width and height
width=640
height=360
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
If set to TRUE, system timestamp will be attached as ntp timestamp
If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
config-file property is mandatory for any gie section.
Other properties are optional and if set will override the properties set in
the infer config file.
[primary-gie]
enable=1
gpu-id=0
#model-engine-file=…/…/models/Primary_Detector_Nano/resnet10.caffemodel_b1_gpu0_fp16.engine
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=4
#gie-unique-id=1
nvbuf-memory-type=0
#model-engine-file=/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/resnet10.caffemodel_b4_gpu0_int8.engine
#labelfile-path=/opt/nvidia/deepstream/deepstream-6.0/samples/models/Primary_Detector/labels.txt
#config-file=/opt/nvidia/deepstream/deepstream-6.0/samples/configs/deepstream-app/config_infer_primary.txt
#config-file=config_infer_primary.txt
config-file=model_yolo.txt
[tracker]
enable=1
For the case of NvDCF tracker, tracker-width and tracker-height must be a multiple of 32, respectively
tracker-width=640
tracker-height=384
#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_iou.so
#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so
ll-lib-file=/opt/nvidia/deepstream/deepstream-6.0/lib/libnvds_nvmultiobjecttracker.so
ll-config-file=config_tracker_NvDCF_perf.yml
#ll-config-file=iou_config.txt
gpu-id=0
enable-batch-process=1
enable-past-frame=1
display-tracking-id=1
[tests]
file-loop=0
I then have a Python script interpreting and returning kafka messages:
Start recording kafka message:
{“command”: “start-recording”, “start”: “2022-12-12T14:36:37.815Z”, “end”: “2022-12-12T14:37:02.815Z”, “sensor”: {“id”: “0”}}
Stop recording kafka message:
{“command”: “stop-recording”, “start”: “2022-12-12T14:36:37.815Z”, “end”: “2022-12-12T14:37:02.815Z”, “sensor”: {“id”: “0”}}
Expected behavior: outputted smart video always includes 3 second buffer
Actual behavior: around 25% of the time the outputted smart video does not include a buffer
I’m using a video where only one object appears once a minute for ~15 seconds so I can be sure the video cache is full when the smart record message is sent.
Many thanks in advance