################################################################################ # Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. ################################################################################ [application] enable-perf-measurement=1 perf-measurement-interval-sec=5 #gie-kitti-output-dir=streamscl [tiled-display] enable=1 rows=1 columns=1 width=1280 height=720 gpu-id=0 #(0): nvbuf-mem-default - Default memory allocated, specific to particular platform #(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory, applicable for Tesla #(2): nvbuf-mem-cuda-device - Allocate Device cuda memory, applicable for Tesla #(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory, applicable for Tesla #(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson nvbuf-memory-type=0 [source0] enable=1 #Type - 1=CameraV4L2 2=URI 3=MultiURI type=3 #uri=file:///opt/nvidia/deepstream/deepstream-5.0/samples/streams/sample_1080p_h264.mp4 num-sources=1 gpu-id=0 nvbuf-memory-type=0 #[source1] #enable=1 #Type - 1=CameraV4L2 2=URI 3=MultiURI 4=RTSP #type=3 uri=file:////opt/nvidia/deepstream/deepstream-5.0/streams/sources/streams/input_video.mp4 #num-sources=1 #gpu-id=0 #nvbuf-memory-type=0 # smart record specific fields, valid only for source type=4 # 0 = disable, 1 = through cloud events, 2 = through cloud + local events smart-record=2 # 0 = mp4, 1 = mkv #smart-rec-container=0 smart-rec-start-time=1 smart-rec-start-time=1 smart-rec-file-prefix=smart_record #smart-rec-dir-path=/home/monika/record # video cache size in seconds smart-rec-video-cache=10 [sink0] enable=1 type=2 #1=mp4 2=mkv./bin/kafka-topics --create --bootstrap-server localhost:9092 \ #--replication-factor 1 --partitions 1 --topic users container=1 #1=h264 2=h265 codec=1 #encoder type 0=Hardware 1=Software enc-type=0 sync=0 #iframeinterval=10 bitrate=100000 #H264 Profile - 0=Baseline 2=Main 4=High #H265 Profile - 0=Main 1=Main10 profile=0 output-file=resnet.mp4 source-id=0 [sink1] enable=1 #Type - 1=FakeSink 2=EglSink 3=File 4=UDPSink 5=nvoverlaysink 6=MsgConvBroker type=6 msg-conv-config=dstest5_msgconv_sample_config.txt #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal #(256): PAYLOAD_RESERVED - Reserved type #(257): PAYLOAD_CUSTOM - Custom schema payload msg-conv-payload-type=0 msg-broker-proto-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_kafka_proto.so #Provide your msg-broker-conn-str here msg-broker-conn-str=localhost;9092;quickstart-events #topic= #Optional: #msg-broker-config=../../deepstream-test4/cfg_kafka.txt [sink2] enable=1 type=1 #1=mp4 2=mkv container=1 #1=h264 2=h265 3=mpeg4 ## only SW mpeg4 is supported right now. codec=3 sync=1 bitrate=2000000 output-file=out.mp4 source-id=0 # sink type = 6 by default creates msg converter + broker. # To use multiple brokers use this group for converter and use # sink type = 6 with disable-msgconv = 1 [message-converter] enable=0 msg-conv-config=dstest5_msgconv_sample_config.txt #(0): PAYLOAD_DEEPSTREAM - Deepstream schema payload #(1): PAYLOAD_DEEPSTREAM_MINIMAL - Deepstream schema payload minimal #(256): PAYLOAD_RESERVED - Reserved type #(257): PAYLOAD_CUSTOM - Custom schema payload msg-conv-payload-type=0 # Name of library having custom implementation. #msg-conv-msg2p-lib= # Id of component in case only selected message to parse. #msg-conv-comp-id= # Configure this group to enable cloud message consumer. [message-consumer0] enable=0 proto-lib=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_kafka_proto.so conn-str=localhost;9092 #config-file= subscribe-topic-list=quickstart-events # Use this option if message has sensor name as id instead of index (0,1,2 etc.). sensor-list-file=dstest5_msgconv_sample_config.txt [osd] enable=1 gpu-id=0 border-width=1 text-size=10 text-color=1;1;1;1; text-bg-color=0.3;0.3;0.3;1 font=Arial show-clock=0 clock-x-offset=800 clock-y-offset=820 clock-text-size=12 clock-color=1;0;0;0 nvbuf-memory-type=0 [streammux] gpu-id=0 ##Boolean property to inform muxer that sources are live live-source=0 batch-size=1 ##time out in usec, to wait after the first buffer is available ##to push the batch even if the complete batch is not formed batched-push-timeout=40000 ## Set muxer output width and height width=1920 height=1080 ##Enable to maintain aspect ratio wrt source, and allow black borders, works ##along with width, height properties enable-padding=0 nvbuf-memory-type=0 ## If set to TRUE, system timestamp will be attached as ntp timestamp ## If set to FALSE, ntp timestamp from rtspsrc, if available, will be attached # attach-sys-ts-as-ntp=1 [primary-gie] enable=1 gpu-id=0 batch-size=1 ## 0=FP32, 1=INT8, 2=FP16 mode bbox-border-color0=1;0;0;1 #bbox-border-color1=0;1;1;1 #bbox-border-color2=0;1;1;1 #bbox-border-color3=0;1;0;1 nvbuf-memory-type=0 interval=0 gie-unique-id=1 model-engine-file=peoplenet/resnet34_peoplenet_pruned.etlt_b1_gpu0_int8.engine labelfile-path=peoplenet/labels.txt config-file=config_infer_primary_peoplenet.txt #infer-raw-output-dir=../../../../../samples/primary_detector_raw_output/ [tracker] enable=1 tracker-width=640 tracker-height=288 ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_nvdcf.so #ll-lib-file=/opt/nvidia/deepstream/deepstream-5.0/lib/libnvds_mot_klt.so #ll-config-file required for DCF/IOU only ll-config-file=tracker_config.yml #ll-config-file=iou_config.txt gpu-id=0 #enable-batch-process applicable to DCF only enable-batch-process=0 #enable-past-frame=1 [nvds-analytics] enable=1 config-file=config_nvdsanalytics.txt [tests] file-loop=0