Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU)
Jetson TX2
• DeepStream Version
5.1
• JetPack Version (valid for Jetson only)
4.5.1
• TensorRT Version
7.1
I am trying to run face detector (FACEDETECTIR) as the primary classifer and custom caffe models like Age, Gender, Ethnicity and Emotions as secondary models respectively.
All the models are loading successfully and the outputs are displayed on the video. But the secondary models displays
random entry from the label.txt.
My aim is to process the video/stream frame by frame on the secondary models so that the labels of the models like Emotion vary during the change of emotions in the persons face during the process in realtime. How can I achieve that?
I have attached my config files along with this post. Please help .
deepstream_app_source_custom.txt
################################################################################
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
[application]
enable-perf-measurement=1
perf-measurement-interval-sec=1
[tiled-display]
enable=1
rows=1
columns=1
width=1280
height=720
gpu-id=0
[source0]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=2
num-sources=1
#uri=file://../../streams/sample_1080p_h265.mp4
#uri=file://../../streams/Video/What is a CHINDIAN.mp4
uri=rtsp://root:Glueck321@10.0.1.36/axis-media/media.amp?streamprofile=H264
gpu-id=0
[streammux]
gpu-id=0
batch-size=1
batched-push-timeout=40000
## Set muxer output width and height
width=1920
height=1080
[sink0]
enable=1
#Type - 1=FakeSink 2=EglSink 3=File
type=2
sync=0
source-id=0
gpu-id=0
[osd]
enable=1
gpu-id=0
border-width=3
text-size=15
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
[primary-gie]
enable=1
gpu-id=0
# Modify as necessary
model-engine-file=../../models/tlt_pretrained_models/facedetectir/resnet18_facedetectir_pruned.etlt_b1_gpu0_int8.engine
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
gie-unique-id=1
config-file=config_infer_primary_facedetectir.txt
[sink1]
enable=0
type=3
#1=mp4 2=mkv
container=1
#1=h264 2=h265 3=mpeg4
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=2000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=0
output-file=out.mp4
source-id=0
[sink2]
enable=0
#Type - 1=FakeSink 2=EglSink 3=File 4=RTSPStreaming 5=Overlay
type=4
#1=h264 2=h265
codec=1
#encoder type 0=Hardware 1=Software
enc-type=0
sync=0
bitrate=4000000
#H264 Profile - 0=Baseline 2=Main 4=High
#H265 Profile - 0=Main 1=Main10
profile=0
# set below properties in case of RTSPStreaming
rtsp-port=8554
udp-port=5400
[tracker]
enable=1
tracker-width=640
tracker-height=384
#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_iou.so
ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_nvdcf.so
#ll-lib-file=/opt/nvidia/deepstream/deepstream-5.1/lib/libnvds_mot_klt.so
#ll-config-file required for DCF/IOU only
ll-config-file=../deepstream-app/tracker_config.yml
#ll-config-file=iou_config.txt
gpu-id=0
#enable-batch-process applicable to DCF only
enable-batch-process=1
[secondary-gie0]
enable=1
model-engine-file=../../models/Secondary_Gender/gender11.caffemodel_b16_gpu0_fp16.engine
gpu-id=0
batch-size=16
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_gender_classifier.txt
[secondary-gie1]
enable=1
model-engine-file=../../models/Secondary_Age/age.model_b16_gpu0_fp16.engine
gpu-id=0
batch-size=16
gie-unique-id=3
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_age_classifier.txt
[secondary-gie2]
enable=1
model-engine-file=../../models/Secondary_Emotion/emotion_2convs_iter_3000.caffemodel_b16_gpu0_fp16.engine
gpu-id=0
batch-size=16
gie-unique-id=4
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_emotion_classifier.txt
[secondary-gie3]
enable=1
model-engine-file=../../models/Secondary_Ethnicity/ethnicity_generic.caffemodel_b16_gpu0_fp16.engine
gpu-id=0
batch-size=16
gie-unique-id=5
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_ethnicity_classifier.txt
[secondary-gie4]
enable=0
model-engine-file=../../models/Secondary_FaceMask/caffe_model_1_iter_100000.caffemodel_b16_gpu0_fp16.engine
gpu-id=0
batch-size=16
gie-unique-id=6
operate-on-gie-id=1
operate-on-class-ids=0;
config-file=config_infer_mask_classifier.txt
[tests]
file-loop=1
config_infer_emotion_classifier.txt
################################################################################
# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
################################################################################
# Following properties are mandatory when engine files are not specified:
# int8-calib-file(Only in INT8)
# Caffemodel mandatory properties: model-file, proto-file, output-blob-names
# UFF: uff-file, input-dims, uff-input-blob-name, output-blob-names
# ONNX: onnx-file
#
# Mandatory properties for detectors:
# num-detected-classes
#
# Optional properties for detectors:
# cluster-mode(Default=Group Rectangles), interval(Primary mode only, Default=0)
# custom-lib-path,
# parse-bbox-func-name
#
# Mandatory properties for classifiers:
# classifier-threshold, is-classifier
#
# Optional properties for classifiers:
# classifier-async-mode(Secondary mode only, Default=false)
#
# Optional properties in secondary mode:
# operate-on-gie-id(Default=0), operate-on-class-ids(Defaults to all classes),
# input-object-min-width, input-object-min-height, input-object-max-width,
# input-object-max-height
#
# Following properties are always recommended:
# batch-size(Default=1)
#
# Other optional properties:
# net-scale-factor(Default=1), network-mode(Default=0 i.e FP32),
# model-color-format(Default=0 i.e. RGB) model-engine-file, labelfile-path,
# mean-file, gie-unique-id(Default=0), offsets, process-mode (Default=1 i.e. primary),
# custom-lib-path, network-mode(Default=0 i.e FP32)
#
# The values in the config file are overridden by values set through GObject
# properties.
[property]
gpu-id=0
net-scale-factor=1
model-file=../../../samples/models/Secondary_Emotion/emotion_2convs_iter_3000.caffemodel
proto-file=../../../samples/models/Secondary_Emotion/deploy_2convs.prototxt
model-engine-file=../../../samples/models/Secondary_Emotion/emotion_2convs_iter_3000.caffemodel_b16_gpu0_fp16.engine
labelfile-path=../../../samples/models/Secondary_Emotion/labels.txt
force-implicit-batch-dim=1
batch-size=16
# 0=FP32 and 1=INT8 mode
network-mode=1
input-object-min-width=64
input-object-min-height=64
process-mode=2
model-color-format=1
gpu-id=0
gie-unique-id=4
operate-on-gie-id=1
operate-on-class-ids=0
is-classifier=1
output-blob-names=prob
classifier-async-mode=1
classifier-threshold=0.51
process-mode=2
#scaling-filter=0
#scaling-compute-hw=0
labels.txt
neutral;anger;disgust;fear;happy;sadness;surprise
The above mentioned are the classes on which the emotion custom model is trained on.