Hi
we would like to send the inference result by kafka.
We have made it by running deepstream-test4, but when we try using deepstream-app, it is not work as expected.
The following is the cofig file
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
kitti-track-output-dir=out
[tiled-display]
enable=1
rows=2
columns=2
width=1080
height=720
gpu-id=0
nvbuf-memory-type=0
[source0]
enable=1
type=1
camera-width=1280
camera-height=720
camera-fps-n=30
camera-v4l2-dev-node=2
num-sources=1
gpu-id=0
cudadec-memtype=0
[source1]
enable=1
type=1
camera-width=1280
camera-height=720
camera-fps-n=30
camera-v4l2-dev-node=5
num-sources=1
gpu-id=0
cudadec-memtype=0
[source2]
enable=0
type=1
camera-width=1280
camera-height=720
camera-fps-n=30
camera-v4l2-dev-node=8
num-sources=1
gpu-id=0
cudadec-memtype=0
[source3]
enable=0
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=1
camera-width=1280
camera-height=720
camera-fps-n=30
camera-v4l2-dev-node=11
num-sources=1
gpu-id=0
cudadec-memtype=0
[sink0]
enable=0
type=2
sync=0
source-id=0
gpu-id=0
nvbuf-memory-type=0
[sink1]
enable=0
type=2
sync=0
source-id=1
gpu-id=0
nvbuf-memory-type=0
[sink2]
enable=0
type=2
sync=0
source-id=2
gpu-id=0
nvbuf-memory-type=0
[sink3]
enable=0
type=2
sync=0
source-id=3
gpu-id=0
nvbuf-memory-type=0
[sink4]
enable=1
type=4
codec=1
enc-type=0
sync=0
bitrate=4000000
profile=0
rtsp-port=8554
udp-port=5400
[sink5]
enable=1
type=6
msg-conv-config=./msgconv_config.txt
msg-conv-payload-type=0
msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_kafka_proto.so
topic=pedector
#Optional:
msg-broker-config=./cfg_kafka.txt
msg-broker-conn-str=192.168.20.160;9092
disable-msgconv=1
msg-broker-comp-id=1
msg-conv-comp-id=1
msg-conv-msg2p-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_msgconv.so
[osd]
enable=1
gpu-id=0
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Serif
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
[streammux]
gpu-id=0
live-source=1
batch-size=2
batched-push-timeout=-1
width=1920
height=1080
enable-padding=0
nvbuf-memory-type=0
[primary-gie]
enable=1
gpu-id=0
gie-unique-id=1
nvbuf-memory-type=0
config-file=config_infer_primary_yoloV3_tiny.txt
[tracker]
enable=1
tracker-width=640
tracker-height=384
gpu-id=0
ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so
ll-config-file=tracker_config.yml
enable-batch-process=1
[tests]
file-loop=0
#[message-converter]
#enable=1
#msg-conv-config=./msgconv_config.txt
#msg-conv-payload-type=0
#msg-conv-msg2p-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_msgconv.so
#msg-conv-comp-id=1
[message-consumer]
enable=1
proto-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_kafka_proto.so
conn-str=192.168.20.160;9092
config-file=./cfg_kafka.txt
subscribe-topic-list=pedector
##cfg_kafka.txt
[message-broker]
consumer-group-id = mygrp
proto-cfg = “message.max.bytes=200000;log_level=6”
producer-proto-cfg = “queue.buffering.max.messages=200000;message.send.max.retries=3”
consumer-proto-cfg = “max.poll.interval.ms=20000”
partition-key = sensor.id
#share-connection = 1