How to check input of sgie classifier after resize Object by Gstdsexample

Hi all, someone can help me!
I have custom project flow: video ->Pgie ObjectDetectorYolo → Gstdsexample → Sgie classifier → NvDsInferClassiferParseCustomSoftmax

I have config Gstdsexample and Sgie classifier in deepstream_app_config_yoloV3_tiny.txt like:

[primary-gie]
enable=1
gpu-id=0
model-engine-file=pgie/model_b1_gpu0_fp16.engine
#labelfile-path=pgie/labels.txt
batch-size=1
#Required by the app for OSD, not a plugin property
bbox-border-color0=1;0;0;1
bbox-border-color1=0;1;1;1
bbox-border-color2=0;0;1;1
bbox-border-color3=0;1;0;1
interval=2
gie-unique-id=1
nvbuf-memory-type=0
config-file=config_infer_pgie.txt

[secondary-gie0]
enable=1
gpu-id=0
gie-unique-id=2
operate-on-gie-id=1
operate-on-class-ids=0
nvbuf-memory-type=0
config-file=sgie1/crnn_lstm_sgie1_config.txt

[ds-example]
enable=1
processing-width=110
processing-height=47
full-frame=0
unique-id=15
gpu-id=0

  • in Gstdsexample i have save image at gst_dsexample_transform_ip , image output had resize

Every think run oke, But how i can check to now object had resize before put to Sgie? Because when i parse custom classifier output, i saw the data not same when i run model not available deepstream.

My sgie model input format CHW (1,47,110)

someone can help me

Hi,
The desing of dsexample is to have identical width and h eight in sink pad and source pad. It cannot do resizing. For your case, you may need to customize nvinfer. Please check the source code in

/opt/nvidia/deepstream/deepstream-5.0/sources/gst-plugins/gst-nvinfer
1 Like

Thank you
You can suggest me what function need custom to convert and resize object

get_converted_buffer Is that function for custom output object ?

@DaneLLL help me

Hi,
The conversion is done in NvBufSurfTransform() in convert_batch_and_push_to_input_thread(). You may take a look.

1 Like

in this code, how to convert obj to grayscale and normalize to Float32, divide value of cell for 255 ?

if (nvinfer->maintain_aspect_ratio) {
    printf("_1_");
    /* Calculate the destination width and height required to maintain
     * the aspect ratio. */
    double hdest = dest_frame->width * src_height / (double) src_width;
    double wdest = dest_frame->height * src_width / (double) src_height;
    int pixel_size;
    cudaError_t cudaReturn;

    if (hdest <= dest_frame->height) {
      dest_width = dest_frame->width;
      dest_height = hdest;
    } else {
      dest_width = wdest;
      dest_height = dest_frame->height;
    }
    printf("dest_width:%d,dest_height:%d ",dest_width,dest_height);
    
    switch (dest_frame->colorFormat) {
      case NVBUF_COLOR_FORMAT_RGBA:
        pixel_size = 4;
        break;
      case NVBUF_COLOR_FORMAT_RGB:
        pixel_size = 3;
        break;
      case NVBUF_COLOR_FORMAT_GRAY8:
      case NVBUF_COLOR_FORMAT_NV12:
        pixel_size = 1;
        break;
      default:
        g_assert_not_reached ();
        break;
    }

    /* Pad the scaled image with black color. */
    cudaReturn =
        cudaMemset2DAsync ((uint8_t *) destCudaPtr + pixel_size * dest_width,
        dest_frame->planeParams.pitch[0], 0,
        pixel_size * (dest_frame->width - dest_width), dest_frame->height,
        nvinfer->convertStream);
    if (cudaReturn != cudaSuccess) {
      GST_ERROR_OBJECT (nvinfer,
          "cudaMemset2DAsync failed with error %s while converting buffer",
          cudaGetErrorName (cudaReturn));
      return GST_FLOW_ERROR;
    }
    cudaReturn =
        cudaMemset2DAsync ((uint8_t *) destCudaPtr +
        dest_frame->planeParams.pitch[0] * dest_height,
        dest_frame->planeParams.pitch[0], 0, pixel_size * dest_width,
        dest_frame->height - dest_height, nvinfer->convertStream);
        
    if (cudaReturn != cudaSuccess) {
      GST_ERROR_OBJECT (nvinfer,
          "cudaMemset2DAsync failed with error %s while converting buffer",
          cudaGetErrorName (cudaReturn));
      return GST_FLOW_ERROR;
    }

@DaneLLL

Every think work well, by post Gst-nvinfer - Convert buffer object to GRAY - Float32, and normalize divide 255.0
Thank @DaneLLL @Fiona.Chen