Gst-nvinfer - Convert buffer object to GRAY - Float32, and normalize divide 255.0

In this function:

get_converted_buffer_object (GstNvInfer * nvinfer, NvBufSurface * src_surf,
    NvBufSurfaceParams * src_frame, NvOSD_RectParams * crop_rect_params,
    NvBufSurface * dest_surf, NvBufSurfaceParams * dest_frame,
    gdouble & ratio_x, gdouble & ratio_y, void *destCudaPtr)
{
  guint src_left = GST_ROUND_UP_2 ((unsigned int)crop_rect_params->left);
  guint src_top = GST_ROUND_UP_2 ((unsigned int)crop_rect_params->top);
  guint src_width = GST_ROUND_DOWN_2 ((unsigned int)crop_rect_params->width);
  guint src_height = GST_ROUND_DOWN_2 ((unsigned int)crop_rect_params->height);
  guint dest_width, dest_height;

  //printf("src_width:%d, src_height:%d ", src_width, src_height);

  if (nvinfer->maintain_aspect_ratio) {
    printf("_1_");
    /* Calculate the destination width and height required to maintain
     * the aspect ratio. */
    double hdest = dest_frame->width * src_height / (double) src_width;
    double wdest = dest_frame->height * src_width / (double) src_height;
    int pixel_size;
    cudaError_t cudaReturn;

    if (hdest <= dest_frame->height) {
      dest_width = dest_frame->width;
      dest_height = hdest;
    } else {
      dest_width = wdest;
      dest_height = dest_frame->height;
    }
    printf("dest_width:%d,dest_height:%d ",dest_width,dest_height);
    
    switch (dest_frame->colorFormat) {
      case NVBUF_COLOR_FORMAT_RGBA:
        pixel_size = 4;
        break;
      case NVBUF_COLOR_FORMAT_RGB:
        pixel_size = 3;
        break;
      case NVBUF_COLOR_FORMAT_GRAY8:
      case NVBUF_COLOR_FORMAT_NV12:
        pixel_size = 1;
        break;
      default:
        g_assert_not_reached ();
        break;
    }

    /* Pad the scaled image with black color. */
    cudaReturn =
        cudaMemset2DAsync ((uint8_t *) destCudaPtr + pixel_size * dest_width,
        dest_frame->planeParams.pitch[0], 0,
        pixel_size * (dest_frame->width - dest_width), dest_frame->height,
        nvinfer->convertStream);
        
    if (cudaReturn != cudaSuccess) {
      GST_ERROR_OBJECT (nvinfer,
          "cudaMemset2DAsync failed with error %s while converting buffer",
          cudaGetErrorName (cudaReturn));
      return GST_FLOW_ERROR;
    }
    cudaReturn =
        cudaMemset2DAsync ((uint8_t *) destCudaPtr +
        dest_frame->planeParams.pitch[0] * dest_height,
        dest_frame->planeParams.pitch[0], 0, pixel_size * dest_width,
        dest_frame->height - dest_height, nvinfer->convertStream);
        
    if (cudaReturn != cudaSuccess) {
      GST_ERROR_OBJECT (nvinfer,
          "cudaMemset2DAsync failed with error %s while converting buffer",
          cudaGetErrorName (cudaReturn));
      return GST_FLOW_ERROR;
    }
  } else {
    //printf("_2_");
    dest_width = nvinfer->network_width;
    dest_height = nvinfer->network_height;
    printf("dest_width:%d,dest_height:%d ",dest_width,dest_height);
  }
  //printf("w:%d, h:%d", dest_width, dest_height);
  /* Calculate the scaling ratio of the frame / object crop. This will be
   * required later for rescaling the detector output boxes to input resolution.
   */
  ratio_x = (double) dest_width / src_width;
  ratio_y = (double) dest_height / src_height;

  /* Create temporary src and dest surfaces for NvBufSurfTransform API. */
  nvinfer->tmp_surf.surfaceList[nvinfer->tmp_surf.numFilled] = *src_frame;

  /* Set the source ROI. Could be entire frame or an object. */
  nvinfer->transform_params.src_rect[nvinfer->tmp_surf.numFilled] =
      {src_top, src_left, src_width, src_height};
  /* Set the dest ROI. Could be the entire destination frame or part of it to
   * maintain aspect ratio. */
  nvinfer->transform_params.dst_rect[nvinfer->tmp_surf.numFilled] =
      {0, 0, dest_width, dest_height};

  nvinfer->tmp_surf.numFilled++;

  return GST_FLOW_OK;
}

how can i convert to grayscale - float32 and normalize cell value like divide cell value with 255.0

someone help me

What is your problem? Do you want to do the color format transferring and normalization with CUDA interface?

1 Like

my old post

I want convert color format and normalization object dectect by PGIE before put object to SGIE classifier

OK. Back to the original topic. The gstnvinfer can do the pre-processing such as color format transferring , scaling and normalization. Why can’t you just use it directly? Can you show us your config_infer_pgie.txt and crnn_lstm_sgie1_config.txt files?

1 Like

I am still custom gstnvinfer with function get_converted_buffer

config_infer_pgie.txt

[property]
gpu-id=0
net-scale-factor=0.0039215697906911373
#0=RGB, 1=BGR
model-color-format=0
custom-network-config=pgie/yolov3-tiny.cfg
model-file=pgie/yolov3-tiny.weights
#model-engine-file=model_b1_gpu0_fp16.engine
labelfile-path=pgie/labels.txt
## 0=FP32, 1=INT8, 2=FP16 mode
network-mode=2
num-detected-classes=1
gie-unique-id=1
process-mode=1
network-type=0
#is-classifier=0
## 0=Group Rectangles, 1=DBSCAN, 2=NMS, 3= DBSCAN+NMS Hybrid, 4 = None(No clustering)
cluster-mode=2
maintain-aspect-ratio=1
parse-bbox-func-name=NvDsInferParseCustomYoloV3Tiny
custom-lib-path=pgie/nvdsinfer_custom_impl_Yolo/libnvdsinfer_custom_impl_Yolo.so
engine-create-func-name=NvDsInferYoloCudaEngineGet
#scaling-filter=0
#scaling-compute-hw=0

[class-attrs-all]
nms-iou-threshold=0.3
threshold=0.7

crnn_lstm_sgie1_config.txt

[property]
gpu-id=0
net-scale-factor=1
onnx-file=model_CHW.onnx
model-engine-file=model_CHW.onnx_b1_gpu0_fp16.engine
force-explicit-batch-dim=1
force-implicit-batch-dim=0
batch-size=1
# 0=FP32, 1=INT8, 2=FP16 mode
network-mode=2
input-object-max-width=110
input-object-max-height=47
input-object-min-width=48
input-object-min-height=24
model-color-format=2
gpu-id=0
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0
network-type=1
#is-classifier=1
maintain-aspect-ratio=1
output-blob-names=softmax
parse-classifier-func-name=NvDsInferClassiferParseCustomSoftmax
custom-lib-path=/opt/nvidia/deepstream/deepstream-5.0/sources/libs/nvdsinfer_customparser_copy/libnvds_infercustomparser.so
process-mode=2

because sgie model input format 1x47x110: one chanel and need normalization pixcell value to 0 -->1

gstnvinfer can normalization pixcell value object to 0 -->1

Your SGIE need GREY and normalized input, right?

1 Like

yes

You have set “model-color-format=2” in crnn_lstm_sgie1_config.txt, so nvinfer will transfer the input to GRAY format. You need to change “net-scale-factor” value to “0.00392156862745098” which is “1.0/255.0”, so that nvinfer will do the normalization.

These parameters can be found in https://docs.nvidia.com/metropolis/deepstream/dev-guide/index.html#page/DeepStream%20Plugins%20Development%20Guide/deepstream_plugin_details.html#wwpID0E04DB0HA

Thank you, it work well

Can you also reply to the original topic How to check input of sgie classifier after resize Object by Gstdsexample to tell my coworker the result? so that he can close that topic.

1 Like

Thank, ok i will do that