JETSON TX2 Deepstream for face-detector (Solved)

[application]
enable-perf-measurement=1
roi-marking=0
perf-measurement-interval-sec=5
gie-kitti-output-dir=/home/nvidia/kitti1

0 = standalone; 1 = server; 2 = client

app-mode=3

[source0]
enable=1
Type - 1=CameraCSi 2=CameraV4L2 3=URI
type=3
camera-width=1920
camera-height=1080
camera-fps-n=25
camera-fps-d=1
camera-csi-sensor-id=0
camera-v4l2-dev-node=0
uri=rtsp://admin:hik12345@192.168.0.102:554/

[source1]
enable=1
#Type - 1=CameraCSi 2=CameraV4L2 3=URI
type=3
camera-width=1920
camera-height=1080
camera-fps-n=30
camera-fps-d=1
camera-csi-sensor-id=0
camera-v4l2-dev-node=0
uri=rtsp://admin:hik12345@192.168.0.100:554/

[sink0]
enable=1
#Type - 1=FakeSink 2=OverlaySink 3=EglSink 4=XvImageSink 5=File
type=2
display-id=0
offset-x=0
offset-y=0
width=0
height=0
sync=0
overlay-index=1
source-id=0

[sink1]
enable=0
#Type - 1=FakeSink 2=OverlaySink 3=EglSink 4=XvImageSink 5=File
type=5
#1=mp4 2=mkv
container=2
#1=h264 2=h265
codec=1
bitrate=10000000
#1=cbr 2=vbr
rc-mode=2
iframeinterval=30
#1=baseline 2=main 3=high
profile=3
output-file=out.mp4
source-id=0

[osd]
enable=1
osd-mode=2
border-width=3
text-size=10
text-color=1;1;1;1;
text-bg-color=0.3;0.3;0.3;1
font=Arial
show-clock=0
clock-x-offset=800
clock-y-offset=820
clock-text-size=12
clock-color=1;0;0;0;

[face-detector]
enable=1
mode=0
unique-id=15
interval=2
multi-stream=1

[primary-gie]
enable=1
net-scale-factor=0.0039215697906911373
model-file=file:///home/nvidia/Model/ResNet_18/ResNet_18_threeClass_VGA_pruned.caffemodel
proto-file=file:///home/nvidia/Model/ResNet_18/ResNet_18_threeClass_VGA_deploy_pruned.prototxt
model-cache=file:///home/nvidia/Model/ResNet_18/ResNet_18_threeClass_VGA_pruned.caffemodel_b2_fp16.cache
labelfile-path=file:///home/nvidia/Model/ResNet_18/labels.txt

net-stride=16
batch-size=2
#bbox-bg-color0=0;1;0;0.2
#bbox-bg-color1=0;1;1;0.2
#bbox-bg-color2=0;1;1;0.2
#bbox-bg-color3=1;0;0;0.2
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;1;1;1
bbox-border-color3=0;1;0;1
num-classes=3
class-thresholds=0.2;0.2;0.2;0.2
class-eps=0.1;0.1;0.1;0.1
class-group-thresholds=3;3;3;3
color-format=0
roi-top-offset=0;0;0;0
roi-bottom-offset=0;0;0;0
detected-min-w=0;0;0;0
detected-min-h=0;0;0;0
detected-max-w=1920;100;1920;1920
detected-max-h=1080;1080;1080;1080
interval=1

-2 for all; -1 for none;

To set multiple class id’s use format as “1;2;0”

detect-color-class-ids=0;
gie-unique-id=1
parse-func=4
is-classifier=0
output-bbox-name=Layer11_bbox
output-blob-names=Layer11_cov

Uncomment below lines for DBSCAN. EPS and minBoxes can be tuned for DBSCAN

#enable-dbscan=1
#class-minBoxes=4;4;4;4
#class-eps=0.7;0.7;0.7;0.7

Bit 0: Model decryption required

crypto-flags=0

[secondary-gie4]
enable=1
net-scale-factor=1
model-file=file:///home/nvidia/Model/ivaSecondary_VehicleTypes_V1/snapshot_iter_13740.caffemodel
proto-file=file:///home/nvidia/Model/ivaSecondary_VehicleTypes_V1/deploy.prototxt
model-cache=file:///home/nvidia/Model/ivaSecondary_VehicleTypes_V1/snapshot_iter_13740.caffemodel_b2_fp16.cache
labelfile-path=file:///home/nvidia/Model/ivaSecondary_VehicleTypes_V1/labels.txt
net-stride=16
batch-size=2
num-classes=6
detected-min-w=128
detected-min-h=128
detected-max-w=1920;100;1920;1920
detected-max-h=1080;1080;1080;1080
color-format=1
interval=0
gie-unique-id=4
operate-on-gie-id=1
operate-on-class-ids=2;
is-classifier=1
output-blob-names=softmax
offsets=73.00;77.55;88.9
sgie-async-mode=1
sec-class-threshold=0.51

[secondary-gie5]
enable=1
net-scale-factor=1
model-file=file:///home/nvidia/Model/IVA_secondary_carcolor_V1/CarColorPruned.caffemodel
proto-file=file:///home/nvidia/Model/IVA_secondary_carcolor_V1/deploy.prototxt
mean-file=file:///home/nvidia/Model/IVA_secondary_carcolor_V1/mean.ppm
model-cache=file:///home/nvidia/Model/IVA_secondary_carcolor_V1/CarColorPruned.caffemodel_b2_fp16.cache
labelfile-path=file:///home/nvidia/Model/IVA_secondary_carcolor_V1/labels.txt
net-stride=16
batch-size=2
detected-min-w=128
detected-min-h=128
detected-max-w=1920;100;1920;1920
detected-max-h=1080;1080;1080;1080
color-format=1
num-classes=12
interval=0
gie-unique-id=5
operate-on-gie-id=1
operate-on-class-ids=2;
is-classifier=1
output-blob-names=softmax
sgie-async-mode=1
sec-class-threshold=0.51

[secondary-gie6]
enable=1
net-scale-factor=1
model-file=file:///home/nvidia/Model/IVASecondary_Make_V1/snapshot_iter_6240.caffemodel
proto-file=file:///home/nvidia/Model/IVASecondary_Make_V1/deploy.prototxt
model-cache=file:///home/nvidia/Model/IVASecondary_Make_V1/snapshot_iter_6240.caffemodel_b2_fp16.cache
mean-file=file:///home/nvidia/Model/IVASecondary_Make_V1/mean.ppm
labelfile-path=file:///home/nvidia/Model/IVASecondary_Make_V1/labels.txt
net-stride=16
batch-size=2
num-classes=24
detected-min-w=128
detected-min-h=128
detected-max-w=1920;100;1920;1920
detected-max-h=1080;1080;1080;1080
color-format=1
interval=0
gie-unique-id=6
operate-on-gie-id=1
operate-on-class-ids=2;
is-classifier=1
output-blob-names=softmax
sgie-async-mode=1
sec-class-threshold=0.51
crypto-flags=0

[tracker]
enable=1
tracker-width=960
tracker-height=540

[tests]
file-loop-count=0
#0=send overlaps; 1=do not send overlaps
server-overlap-mode=1
#Fixed to 1 for diplay color in GUI mode
color-mode=1


	CLASS_IDS TO OEPRATE ON gie_id(0) : 2: 

------------> 0,0:1,0:2,0: -----------------
------------> 0,0:1,0:2,0: -----------------
------------> 0,0,0:1,0,0:2,0,0: -----------------
------------> 0,1920,1080:1,100,1080:2,1920,1080: -----------------
------------> 0,0.200000,0.100000,3,0:1,0.200000,0.100000,3,0:2,0.200000,0.100000,3,0: -----------------
** ERROR: <create_face_detector_bin:88>: Failed to create ‘face_detect0’
** ERROR: <create_face_detector_bin:144>: create_face_detector_bin failed
** ERROR: <create_pipeline:1062>: create_pipeline failed
** ERROR: <pipeline_create_thread_func:1632>: Failed to create pipeline


when i use this configure in deepstream, i got the information in terminal. what should i do to solve the problem?

If you have the example of face-detector’s configure in deepstream. Could you send it to me ?

Hi Bingxinhu,
As I know, face detection plugin is not public, so pls don’t enable it in config file.

Thanks
wayne zhu

Hi waynezhu,

can you share with us information whether face detection will be available in next version of Deepstream SDK?

And, can you share some generic info about face detection plugin (is it based on dlib?) and pointers on how a developer could try to write its own, by using hw acceleration on TX1/2 ?

Thanks