[property] gpu-id=0 net-scale-factor=1.0 offsets=103.939;116.779;123.68 infer-dims=3;288;416 #model-color-format=1 labelfile-path=../../../models/ppev1.3/labels.txt int8-calib-file=../../../models/ppev1.3/cal.bin #tlt-model-key=NXEyNzNoZWNybGszYmg1cGYyY3NjYzdwaHY6YTk2YTNhOTItM2FkOS00NDc5LWJmM2EtODg1MjBkY2U3YmU2 #tlt-model-key=ZW4wdW10cGc0YXRmNmw1b3B1dWthYTJrcHE6MTQzNzQ1NjktZWI1ZC00Y2NlLTkxMjQtYmU1YzY2ZjY5MGZh tlt-model-key=ZW4wdW10cGc0YXRmNmw1b3B1dWthYTJrcHE6MTQzNzQ1NjktZWI1ZC00Y2NlLTkxMjQtYmU1YzY2ZjY5MGZh tlt-encoded-model=../../../models/ppev1.3/ppev1_3_mobilenet_v1_epoch_080.etlt #maintain-aspect-ratio=0 #uff-input-order=0 uff-input-blob-name=Input batch-size=4 ## 0=FP32, 1=INT8, 2=FP16 mode network-mode=1 network-type=0 num-detected-classes=6 gie-unique-id=4 gie-unique-name=ppe cluster-mode=3 output-blob-names=BatchedNMS parse-bbox-func-name=NvDsInferParseCustomBatchedNMSTLT custom-lib-path=/opt/DS_TAO/deepstream_tao_apps/post_processor/libnvds_infercustomparser_tao.so output-tensor-meta=0 [class-attrs-all] pre-cluster-threshold=0.8 roi-top-offset=0 roi-bottom-offset=0 detected-min-w=0 detected-min-h=0 detected-max-w=0 detected-max-h=0 [class-attrs-3] pre-cluster-threshold=1.1 roi-top-offset=0 roi-bottom-offset=0 detected-min-w=0 detected-min-h=0 detected-max-w=0 detected-max-h=0