I have retrained a tlt resnet50 model following this https://docs.nvidia.com/metropolis/TLT/tlt-getting-started-guide/ but once i run its giving this warning
WARNING: Num classes mismatch. Configured:5, detected by network: 300 4 1
and the through is very less around 10 ffs is it because of the warning or i should ignore it also how can i improve the throughput?
–>deepstream config file
Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
NVIDIA Corporation and its licensors retain all intellectual property
and proprietary rights in and to this software, related documentation
and any modifications thereto. Any use, reproduction, disclosure or
distribution of this software and related documentation without an express
license agreement from NVIDIA Corporation is strictly prohibited.
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=5
kitti-track-output-dir=/dfs/AutomationWorkspace/metadata/20191016-170001/camera16/cam16Concat_28fps
#gie-kitti-output-dir=streamscl
[tiled-display]
enable=0
rows=1
columns=1
width=1280
height=720
gpu-id=1
#(0): nvbuf-mem-default - Default memory allocated, specific to particular platform
#(1): nvbuf-mem-cuda-pinned - Allocate Pinned/Host cuda memory
#(2): nvbuf-mem-cuda-device - Allocate Device cuda memory
#(3): nvbuf-mem-cuda-unified - Allocate Unified cuda memory
#(4): nvbuf-mem-surface-array - Allocate Surface Array memory, applicable for Jetson
#(5): nvbuf-mem-handle - Allocate Surface Handle memory, applicable for Jetson
#(6): nvbuf-mem-system - Allocate Surface System memory, allocated using calloc
nvbuf-memory-type=0
[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=3
uri=file:/dfs/AutomationWorkspace/EncodedVideos/20191016-170001/camera16/cam16Concat_28fps.mp4
#uri=file:/software/Videos_Concatenated/28fpsvideo_Encoded.mp4
num-sources=1
gpu-id=1
(0): memtype_device - Memory type Device
(1): memtype_pinned - Memory type Host Pinned
(2): memtype_unified - Memory type Unified
cudadec-memtype=0
[sink1]
enable=1
type=1
output-file=/dfs/AutomationWorkspace/2019-09-17-01200-01500_objdt.mp4
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
only SW mpeg4 is supported right now.
codec=3
sync=0
gpu-id=1
#iframeinterval=10
bitrate=2000000
#output-file=/software/td_cafe/take11/camera16/2019-09-17-01200-01500_objdt.mp4
source-id=0
[sink0]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=1
nvbuf-memory-type=0
[osd]
enable=1
gpu-id=1
border-width=1
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0
nvbuf-memory-type=0
[streammux]
gpu-id=1
##Boolean property to inform muxer that sources are live
live-source=0
batch-size=4
##time out in usec, to wait after the first buffer is available
##to push the batch even if the complete batch is not formed
batched-push-timeout=40000
Set muxer output width and height
width=1280
height=720
#num-surfaces-per-frame=31
##Enable to maintain aspect ratio wrt source, and allow black borders, works
##along with width, height properties
enable-padding=0
nvbuf-memory-type=0
config-file property is mandatory for any gie section.
Other properties are optional and if set will override the properties set in
the infer config file.
[primary-gie]
enable=1
gpu-id=1
#model-engine-file=model_b4_int8.engine
labelfile-path=frcnn_labels.txt
batch-size=4
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=0
gie-unique-id=1
nvbuf-memory-type=0
config-file=config_infer_primary_resnet50.txt
[tracker]
enable=1
tracker-width=320
tracker-height=180
#ll-lib-file=/usr/local/deepstream/libnvds_mot_iou.so
#ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_mot_klt.so
#ll-lib-file=/usr/local/deepstream/libnvds_mot_klt.so
#ll-lib-file=/usr/local/deepstream/libnvds_tracker.so
ll-lib-file=/opt/nvidia/deepstream/deepstream-4.0/lib/libnvds_nvdcf.so
#ll-config-file required for IOU only
ll-config-file=/root/deepstream_sdk_v4.0_x86_64/samples/configs/deepstream-app/tracker_config.yml
#ll-config-file=iou_config.txt
gpu-id=1
enable-batch-process=1
[tests]
file-loop=0
–>resnet50 config file
[property]
gpu-id=0
net-scale-factor=1.0
offsets=103.939;116.779;123.68
model-color-format=1
labelfile-path=frcnn_labels.txt
Provide the .etlt model exported by TLT or a TensorRT engine created by tlt-converter
If use .etlt model, please also specify the key(‘nvidia_tlt’)
model-engine-file=./rcnn.engine
tlt-encoded-model=frcnn_kitti_1.etlt
tlt-model-key=cmswbDk2OHFwcWgwZzAzdWw2ZzVkZjFlbWs6N2ZkMjFhMGItZmVhMS00NzRmLTk2YTQtOTU5NmUwNDAzMDlk
uff-input-dims=3;384;1280;0
uff-input-blob-name=input_1
batch-size=1
0=FP32, 1=INT8, 2=FP16 mode
network-mode=0
num-detected-classes=5
interval=1
gie-unique-id=1
is-classifier=0
#network-type=0
output-blob-names=dense_regress/BiasAdd;dense_class/Softmax;proposal
parse-bbox-func-name=NvDsInferParseCustomFrcnnUff
custom-lib-path=nvdsinfer_customparser_frcnn_uff/libnvds_infercustomparser_frcnn_uff.so
[class-attrs-all]
roi-top-offset=0
roi-bottom-offset=0
detected-min-w=0
detected-min-h=0
detected-max-w=0
detected-max-h=0