Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson Orin NX 16)
• DeepStream Version 7
• JetPack Version (valid for Jetson only)
I am trying to run Deepstream SDK in AINVR app with two models, one is yolov8s (PGIE) which i deployed following the docs, this latter worked correctly, the second one (SGIE) is vehicletypenet from nvidia, the problem is, i only get the output from the first model.
Here are the config files i’m using:
yolov8s_config_file_nx16.txt:
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
#gie-kitti-output-dir=streamscl
[tiled-display]
enable=1
rows=1
columns=1
width=1280
height=720
gpu-id=0
nvbuf-memory-type=0
#Note: [source-list] now support REST Server with use-nvmultiurisrcbin=1
[source-list]
num-source-bins=0
#list=file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h264.mp4;file:///opt/nvidia/deepstream/deepstream/samples/streams/sample_1080p_h265.mp4
use-nvmultiurisrcbin=1
#sensor-id-list vector is one to one mapped with the uri-list
#identifies each sensor by a unique ID
#sensor-id-list=UniqueSensorId1;UniqueSensorId2
max-batch-size=4
http-ip=localhost
http-port=9010
#sgie batch size is number of sources * fair fraction of number of objects detected per frame per source
#the fair fraction of number of object detected is assumed to be 4
sgie-batch-size=40
set the below key to keep the application running at all times
stream-name-display=1
[source-attr-all]
enable=1
type=3
num-sources=1
gpu-id=0
cudadec-memtype=0
latency=100000
rtsp-reconnect-interval-sec=5
[streammux]
gpu-id=0
#Note: when used with [source-list], batch-size is ignored
#instead, max-batch-size config is used
batch-size=4
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=30000
Set muxer output width and height
width=960
height=544
#enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
If set to TRUE, system timestamp will be attached as ntp timestamp
If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached
attach-sys-ts-as-ntp=1
drop-pipeline-eos ignores EOS from individual streams muxed in the DS pipeline
It is useful with source-list/use-nvmultiurisrcbin=1 where the REST server
will be running post last stream EOS to accept new streams
drop-pipeline-eos=1
##Boolean property to inform muxer that sources are live
##When using nvmultiurisrcbin live-source=1 is preferred default
##to allow batching of available buffers when number of sources is < max-batch-size configuration
live-source=1
attach-sys-ts-as-ntp=0
buffer-pool-size=4
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=1
sync=1
source-id=0
gpu-id=0
nvbuf-memory-type=0
[sink1]
enable=1
msg-broker-conn-str=redis;6379;test
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream/lib/libnvds_redis_proto.so
msg-conv-msg2p-new-api=0
msg-conv-frame-interval=1
msg-broker-config=/ds-config-files/yolov8s/cfg_redis.txt
msg-conv-payload-type=1
#multiple-payloads=1
source-id=0
sync=0
type=6
topic=test
[sink2]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
only SW mpeg4 is supported right now.
codec=3
sync=1
bitrate=2000000
output-file=out.mp4
source-id=0
[sink3]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay
type=4
#1=h264 2=h265
codec=1
encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=0
set below properties in case of RTSPStreaming
rtsp-port=8555
udp-port=5511
[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
config-file property is mandatory for any gie section.
Other properties are optional and if set will override the properties set in
the infer config file.
[primary-gie]
enable=1
gpu-id=0
gie-unique-id=1
nvbuf-memory-type=0
config-file=config_infer_primary_yoloV8_nx16.txt
model-engine-file=/yolov8s/model_b4_gpu0_int8.engine
batch-size=4
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
[tracker]
enable=1
tracker-width=960
tracker-height=544
ll-lib-file=/opt/nvidia/deepstream/deepstream/lib/libnvds_nvmultiobjecttracker.so
ll-config-file=config_tracker_NvDCF_perf.yml
sub-batches=2:2
gpu-id=0
display-tracking-id=1
[secondary-gie0]
enable=1
model-engine-file=/yolov8s/Secondary_VehicleTypes/resnet18_vehicletypenet.etlt_b16_gpu0_int8.engine
gpu-id=0
batch-size=16
gie-unique-id=4
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_secondary_vehicletypes.txt
[tests]
file-loop=1
config_infer_primary_yoloV8_nx16.txt
[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
model-color-format=0
onnx-file=/yolov8s/yolov8s-dependencies/yolov8s.onnx
model-engine-file=/yolov8s/model_b4_gpu0_int8.engine
int8-calib-file=/yolov8s/calib.table
labelfile-path=labels.txt
batch-size=4
network-mode=1
num-detected-classes=80
interval=0
gie-unique-id=1
process-mode=1
network-type=0
cluster-mode=2
maintain-aspect-ratio=1
symmetric-padding=1
#workspace-size=2000
parse-bbox-func-name=NvDsInferParseYolo
#parse-bbox-func-name=NvDsInferParseYoloCuda
custom-lib-path=/yolov8s-files/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet
[class-attrs-all]
nms-iou-threshold=0.5
pre-cluster-threshold=0.25
topk=300
config_infer_secondary_vehicletypes.txt
[property]
gpu-id=0
net-scale-factor=1
tlt-model-key=tlt_encode
tlt-encoded-model=/yolov8s/Secondary_VehicleTypes/resnet18_vehicletypenet.etlt
model-engine-file=/yolov8s/Secondary_VehicleTypes/resnet18_vehicletypenet.etlt_b16_gpu0_int8.engine
int8-calib-file=/yolov8s/Secondary_VehicleTypes/cal_trt.bin
labelfile-path=/yolov8s/Secondary_VehicleTypes/labels.txt
force-implicit-batch-dim=1
batch-size=16
model-color-format=1
0=FP32, 1=INT8, 2=FP16 mode
network-mode=1
is-classifier=1
process-mode=2
uff-input-blob-name=input_1
output-blob-names=predictions/Softmax
classifier-async-mode=1
classifier-threshold=0.51
input-object-min-width=128
input-object-min-height=128
operate-on-gie-id=1
operate-on-class-ids=0
classifier-type=vehicletype
#scaling-filter=0
#scaling-compute-hw=0
infer-dims=3;224;224
What am i missing here??
For information, i’m using deepstream-test5 app here.