[property] gpu-id=0 net-scale-factor=0.007843 # Since the model input channel is 3, and pre-processing of UNET TAO requires BGR format, set the color format to BGR. # 0-RGB, 1-BGR, 2-Gray #model-color-format=1# For grayscale, this should be set to 2 offsets=127.5;127.5;127.5 labelfile-path=/opt/nvidia/deepstream/deepstream/semantic_segment/640_640/new/labels.txt ##Replace following path to your model file # You can provide the model as etlt file or convert it to tensorrt engine offline using tao-converter and # provide it in the config file. If you are providing the etlt model, do not forget to provide the model key. #tlt-encoded-model=/opt/nvidia/deepstream/deepstream/semantic_segment/640_640/new/model.int8.etlt tlt-model-key=nvidia_tlt # Argument to be used if you are using an tensorrt engine model-engine-file=/opt/nvidia/deepstream/deepstream/semantic_segment/640_640/new/trt_int8.engine #model-engine-file=/opt/nvidia/deepstream/deepstream-6.0/semantic_segment/640_640/new/model.int8.etlt_b1_gpu0_fp16.engine infer-dims=3;640;640 batch-size=1 ## 0=FP32, 1=INT8, 2=FP16 mode network-mode=2 num-detected-classes=22 interval=0 gie-unique-id=1 ## 0=Detector, 1=Classifier, 2=Semantic Segmentation (sigmoid activation), 3=Instance Segmentation, 100=skip nvinfer postprocessing network-type=2 #output-tensor-meta=1 # Set this to 1 when network-type is 100 output-blob-names=argmax # If you had used softmax for segmentation model, it would have been replaced with argmax by TAO for optimization. # Hence, you need to provide argmax_1 segmentation-threshold=0.0 ##specify the output tensor order, 0(default value) for CHW and 1 for HWC segmentation-output-order=0 [class-attrs-all] roi-top-offset=0 roi-bottom-offset=0 detected-min-w=0 detected-min-h=0 detected-max-w=0 detected-max-h=0