Getting strange cv::Mat from NvBufSurface

• Jetson AGX Xavier
• Deepstream 6.0
• JetPack 4.6
• TensorRT 8.0.1
• NVIDIA GPU Driver 32.6.1

Hi, I’m trying to get cv:Mat image from NvBufSurface and save it to file, but got image like this:
image

I getting Surface in analytics_done_buf_prob() function with this code:

GstMapInfo in_map_info;

    GstMapInfo inmap = GST_MAP_INFO_INIT;
    if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
        GST_ERROR ("input buffer mapinfo failed");
        gst_buffer_unmap (buf, &in_map_info);
    }
    NvBufSurface *surface = (NvBufSurface *) inmap.data;

    for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;

        if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){

            if (NvBufSurfaceMap(surface, frame_meta->batch_id, 0, NVBUF_MAP_READ) != 0)
            {
            std::cout << "Faild to map the surface buffer\n";
            }
        }
        NvBufSurfaceSyncForCpu (surface, 0, 0);

        guint height = surface->surfaceList[frame_meta->batch_id].height;
        guint width = surface->surfaceList[frame_meta->batch_id].width;
        Mat nv12_mat = Mat(height, width, CV_8UC1, surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
            surface->surfaceList[frame_meta->batch_id].pitch);

        Mat rgba_mat;
        cvtColor(nv12_mat, rgba_mat, CV_YUV2BGRA_NV12);
        imwrite("Mat.jpg", rgba_mat);
        NvBufSurfaceUnMap (surface, frame_meta->batch_id, 0);
        guint i = 0;
        for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
            NvDsObjectMeta *obj = (NvDsObjectMeta *) l_obj->data;
            std::string str = appCtx->predNames[i];
            std::cout<<obj->class_id<<std::endl;
            
            /*Init curl for POST-request*/
            CURL *curl;
            CURLcode res;
            std::string readBuffer;

            curl = curl_easy_init();
            std::cout<<curl<<std::endl;
            if (obj->class_id == 2){
                str = "Unmasked |" + str;
                if(curl) {
                    curl_easy_setopt(curl, CURLOPT_URL, "http://192.168.253.62:8000/");
                    curl_easy_setopt(curl, CURLOPT_POST, 1);
                    curl_easy_setopt(curl, CURLOPT_POSTFIELDS, "Id=" + appCtx->predNames[i] + "&cam=1");
                    res = curl_easy_perform(curl);
                    curl_easy_cleanup(curl);
                }
            }
    }

Can you try CV_8UC4 when creating the nv12_mat from surface?
CV_8UC1 keeps only one channel and it may be the rootcause of wrong image.

It seems that the UV channel is not right. May be you can refer the sample code:

/opt/nvidia/deepstream/deepstream-XX/sources/gst-plugins/gst-dsexample/gstdsexample.cpp

Thanks

I made a new plugin and it worked well. But i’m still can’t understand why every matrix on pad is grayscale

@gff1038m ,About the new grayscale picture problems. Could you open a new topic and show us your simple complete code? Thanks

Well it’s not new. In plugin I can get normal image (for example in blur function). But in pad if I get image with one channel it get’s normally, and if I trying to get 3-channel image I get image as I attached

Have you tried change the CV_8UC1 to CV_8UC4?

        Mat nv12_mat = Mat(height, width, CV_8UC1, surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
            surface->surfaceList[frame_meta->batch_id].pitch);

yes, I got dimention errors

Did you refer the code that I mentioned before in

/opt/nvidia/deepstream/deepstream-XX/sources/gst-plugins/gst-dsexample/gstdsexample.cpp
/**
 * Called when element recieves an input buffer from upstream element.
 */
static GstFlowReturn
gst_dsexample_transform_ip (GstBaseTransform * btrans, GstBuffer * inbuf)
{
  GstDsExample *dsexample = GST_DSEXAMPLE (btrans);
  GstMapInfo in_map_info;
  GstFlowReturn flow_ret = GST_FLOW_ERROR;
  gdouble scale_ratio = 1.0;
  DsExampleOutput *output;

  NvBufSurface *surface = NULL;
  NvDsBatchMeta *batch_meta = NULL;
  NvDsFrameMeta *frame_meta = NULL;
  NvDsMetaList * l_frame = NULL;
  guint i = 0;

  dsexample->frame_num++;
  CHECK_CUDA_STATUS (cudaSetDevice (dsexample->gpu_id),
      "Unable to set cuda device");

  memset (&in_map_info, 0, sizeof (in_map_info));
  if (!gst_buffer_map (inbuf, &in_map_info, GST_MAP_READ)) {
    g_print ("Error: Failed to map gst buffer\n");
    goto error;
  }

  nvds_set_input_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  surface = (NvBufSurface *) in_map_info.data;
  GST_DEBUG_OBJECT (dsexample,
      "Processing Frame %" G_GUINT64_FORMAT " Surface %p\n",
      dsexample->frame_num, surface);

  if (CHECK_NVDS_MEMORY_AND_GPUID (dsexample, surface))
    goto error;

  batch_meta = gst_buffer_get_nvds_batch_meta (inbuf);
  if (batch_meta == nullptr) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvDsBatchMeta not found for input buffer."), (NULL));
    return GST_FLOW_ERROR;
  }

  if (dsexample->process_full_frame) {
    for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next)
    {
      frame_meta = (NvDsFrameMeta *) (l_frame->data);
      NvOSD_RectParams rect_params;

      /* Scale the entire frame to processing resolution */
      rect_params.left = 0;
      rect_params.top = 0;
      rect_params.width = dsexample->video_info.width;
      rect_params.height = dsexample->video_info.height;

      /* Scale and convert the frame */
      if (get_converted_mat (dsexample, surface, i, &rect_params,
            scale_ratio, dsexample->video_info.width,
            dsexample->video_info.height) != GST_FLOW_OK) {
        goto error;
      }

      /* Process to get the output */
#ifdef WITH_OPENCV
      output =
          DsExampleProcess (dsexample->dsexamplelib_ctx,
          dsexample->cvmat->data);
#else
      output =
          DsExampleProcess (dsexample->dsexamplelib_ctx,
          (unsigned char *)dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0]);
#endif
      /* Attach the metadata for the full frame */
      attach_metadata_full_frame (dsexample, frame_meta, scale_ratio, output, i);
      i++;
      free (output);
    }

  } else {
    /* Using object crops as input to the algorithm. The objects are detected by
     * the primary detector */
    NvDsMetaList * l_obj = NULL;
    NvDsObjectMeta *obj_meta = NULL;

    if(!dsexample->is_integrated) {
      if (dsexample->blur_objects) {
        if (!(surface->memType == NVBUF_MEM_CUDA_UNIFIED || surface->memType == NVBUF_MEM_CUDA_PINNED)){
          GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
              ("%s:need NVBUF_MEM_CUDA_UNIFIED or NVBUF_MEM_CUDA_PINNED memory for opencv blurring",__func__), (NULL));
          return GST_FLOW_ERROR;
        }
      }
    }

    for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next)
    {
      frame_meta = (NvDsFrameMeta *) (l_frame->data);

#ifdef WITH_OPENCV
      cv::Mat in_mat;

      if (dsexample->blur_objects) {
        /* Map the buffer so that it can be accessed by CPU */
        if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){
          if (NvBufSurfaceMap (surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0){
            GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
                ("%s:buffer map to be accessed by CPU failed", __func__), (NULL));
            return GST_FLOW_ERROR;
          }
        }

        /* Cache the mapped data for CPU access */
        if(dsexample->inter_buf->memType == NVBUF_MEM_SURFACE_ARRAY)
          NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);

        in_mat =
            cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
            surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
            surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
            surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
      }
#endif

      for (l_obj = frame_meta->obj_meta_list; l_obj != NULL;
          l_obj = l_obj->next)
      {
        obj_meta = (NvDsObjectMeta *) (l_obj->data);

        if (dsexample->blur_objects) {
          /* gaussian blur the detected objects using opencv */
#ifdef WITH_OPENCV
          if (blur_objects (dsexample, frame_meta->batch_id,
            &obj_meta->rect_params, in_mat) != GST_FLOW_OK) {
          /* Error in blurring, skip processing on object. */
            GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
            ("blurring the object failed"), (NULL));
            if (NvBufSurfaceUnMap (surface, frame_meta->batch_id, 0)){
              GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
                ("%s:buffer unmap to be accessed by CPU failed", __func__), (NULL));
            }
            return GST_FLOW_ERROR;
          }
          continue;
#else
          GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
          ("OpenCV has been deprecated, hence object blurring will not work."
          "Enable OpenCV compilation in gst-dsexample Makefile by setting 'WITH_OPENCV:=1"), (NULL));
          return GST_FLOW_ERROR;
#endif
        }

        /* Should not process on objects smaller than MIN_INPUT_OBJECT_WIDTH x MIN_INPUT_OBJECT_HEIGHT
         * since it will cause hardware scaling issues. */
        if (obj_meta->rect_params.width < MIN_INPUT_OBJECT_WIDTH ||
            obj_meta->rect_params.height < MIN_INPUT_OBJECT_HEIGHT)
          continue;

        /* Crop and scale the object */
        if (get_converted_mat (dsexample,
              surface, frame_meta->batch_id, &obj_meta->rect_params,
              scale_ratio, dsexample->video_info.width,
              dsexample->video_info.height) != GST_FLOW_OK) {
          /* Error in conversion, skip processing on object. */
          continue;
        }

#ifdef WITH_OPENCV
        /* Process the object crop to obtain label */
        output = DsExampleProcess (dsexample->dsexamplelib_ctx,
            dsexample->cvmat->data);
#else
        /* Process the object crop to obtain label */
        output = DsExampleProcess (dsexample->dsexamplelib_ctx,
            (unsigned char *)dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0]);
#endif

        /* Attach labels for the object */
        attach_metadata_object (dsexample, obj_meta, output);

        free (output);
      }

      if (dsexample->blur_objects) {
      /* Cache the mapped data for device access */
        if(dsexample->inter_buf->memType == NVBUF_MEM_SURFACE_ARRAY) 
          NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);

#ifdef WITH_OPENCV
#ifdef DSEXAMPLE_DEBUG
        /* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
        * algorithm can handle padded RGBA data. */
#if (CV_MAJOR_VERSION >= 4)
        cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
#else
        cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
#endif
        /* used to dump the converted mat to files for debug */
        static guint cnt = 0;
        cv::imwrite("out_" + std::to_string (cnt) + ".jpeg", *dsexample->cvmat);
        cnt++;
#endif
#endif
      }
    }
  }
  flow_ret = GST_FLOW_OK;

error:

  nvds_set_output_system_timestamp (inbuf, GST_ELEMENT_NAME (dsexample));
  gst_buffer_unmap (inbuf, &in_map_info);
  return flow_ret;
}

It’s the same as what you are doing, save the raw data to jpeg file.

There is no update from you for a period, assuming this is not an issue anymore.
Hence we are closing this topic. If need further support, please open a new one.
Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.