How can I extract images from surface objects if there is more than 1 stream src

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) GPU
• DeepStream Version 6.3 (Docker)
• Issue Type( questions, new requirements, bugs) Questions

Hello everyone, I want to extract images from deepstream process by using the code below to convert images in surface objects to RGB images.

static GstPadProbeReturn
pgie_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
    gpointer u_data)
{  
    NvDsMetaList *l_obj = NULL;
    NvDsObjectMeta *obj_meta = NULL;

    GstBuffer *buf = (GstBuffer *) info->data;
    NvDsMetaList * l_frame = NULL;
    NvDsMetaList * l_user_meta = NULL;
    NvDsUserMeta *user_meta = NULL;
    NvDsInferSegmentationMeta *seg_meta_data = NULL;
    NvBufSurfaceColorFormat *color_format = NULL;
    char file_name[128];
    // get metadata
    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);

    for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
  l_frame = l_frame->next) 
    {
      
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);

        GstMapInfo in_map_info;
        NvBufSurface *surface = NULL;

        memset (&in_map_info, 0, sizeof (in_map_info));
        if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)) {
        g_print ("Error: Failed to map gst buffer\n");
        gst_buffer_unmap (buf, &in_map_info);
        return GST_PAD_PROBE_OK;
        }
        cudaError_t cuda_err;

        NvBufSurfTransformRect src_rect, dst_rect;
        surface = (NvBufSurface *) in_map_info.data;  

        int batch_size= surface->batchSize;

        src_rect.top   = 0;
        src_rect.left  = 0;
        src_rect.width = (guint) surface->surfaceList[0].width;
        src_rect.height= (guint) surface->surfaceList[0].height;
        dst_rect.top   = 0;
        dst_rect.left  = 0;
        dst_rect.width = (guint) surface->surfaceList[0].width;
        dst_rect.height= (guint) surface->surfaceList[0].height;

        NvBufSurfTransformParams nvbufsurface_params;
        nvbufsurface_params.src_rect = &src_rect;
        nvbufsurface_params.dst_rect = &dst_rect;
        nvbufsurface_params.transform_flag =  NVBUFSURF_TRANSFORM_CROP_SRC | NVBUFSURF_TRANSFORM_CROP_DST;
        nvbufsurface_params.transform_filter = NvBufSurfTransformInter_Default;

        NvBufSurface *dst_surface = NULL;
        NvBufSurfaceCreateParams nvbufsurface_create_params;

        // An intermediate buffer for NV12/RGBA to BGR conversion  will be
        // required. Can be skipped if custom algorithm can work directly on NV12/RGBA
        nvbufsurface_create_params.gpuId  = surface->gpuId;
        nvbufsurface_create_params.width  = (gint) surface->surfaceList[0].width;
        nvbufsurface_create_params.height = (gint) surface->surfaceList[0].height;
        nvbufsurface_create_params.size = 0;
        nvbufsurface_create_params.colorFormat = NVBUF_COLOR_FORMAT_RGB;
        nvbufsurface_create_params.layout = NVBUF_LAYOUT_PITCH;
        nvbufsurface_create_params.memType = NVBUF_MEM_CUDA_UNIFIED;

        cuda_err = cudaSetDevice (surface->gpuId);

        cudaStream_t cuda_stream;

        cuda_err=cudaStreamCreate (&cuda_stream);

        int create_result = NvBufSurfaceCreate(&dst_surface,batch_size,&nvbufsurface_create_params);	

        NvBufSurfTransformConfigParams transform_config_params;
        NvBufSurfTransform_Error err;

        transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
        transform_config_params.gpu_id = surface->gpuId;
        transform_config_params.cuda_stream = cuda_stream;
        err = NvBufSurfTransformSetSessionParams (&transform_config_params);

        NvBufSurfaceMemSet (dst_surface, 0, 0, 0);
        err = NvBufSurfTransform (surface, dst_surface, &nvbufsurface_params);
        if (err != NvBufSurfTransformError_Success) {
        g_print ("NvBufSurfTransform failed with error %d while converting buffer\n", err);
        }
        NvBufSurfaceMap (dst_surface, 0, 0, NVBUF_MAP_READ);
        NvBufSurfaceSyncForCpu (dst_surface, 0, 0);

        std::cout <<  "Index " << frame_meta->source_id << std::endl;


        std::cout <<  "Surfacelist.addr " << dst_surface->surfaceList[frame_meta->source_id].mappedAddr.addr[0] << std::endl;

        // Will send to vitpose services if it's detect a person.
        for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next)
        {
          obj_meta = (NvDsObjectMeta *) (l_obj->data);
          if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) 
          {

(I got this code from: Access frame pointer in deepstream-app - #30 by cbstryker)

Then I access the frame data by using this

dst_surface->surfaceList[0].mappedAddr.addr[0]

The above method works for one source, but if I increase the number of sources, then I cannot access all of the source frames. e.g., if I have two sources, I can only access the frames for one of the sources…

I tried to access the frame data by using this code.

dst_surface->surfaceList[frame_meta->source_id].mappedAddr.addr[0]

When the frame_meta->source_id is 0 its point to some memory address. e.g., 0x7f412d000000 but when the frame_meta->source_id is 1 it’s return 0 which I suspects it’s point to NULL.

Note: I use Gst-nvmultiurisrcbin to accept input stream, I do not know whether it’s related or not.

Does anyone knows how can I access each frames from each sources.

Appreciate for any kind of help.

Could you attach your whole pipeline and where you added this probe function?

Hi, Thank you for your replies

Currently, my whole pipeline is

nvmultiurisrcbin → primary-nvinference-engine (yolov7) → nvtracker → nvdslogger → nvmultistreamtiler → nvvideoconvert → nvdosd → fakesink

I added this probe functions at src of primary-nvinference-engine.

Note: I tried to change nvmultiurisrcbin to streammux and I can access frames from each sources.

dst_surface->surfaceList[0].mappedAddr.addr[0]

but somehow the frame got mixed up. e.g., frame_meta->source_id is 0 but somehow I got the frame from src 1.

You can try to use the frame_meta->batch_id instead of frame_meta->source_id. The batch is not strictly grouped according to one frame from source1 and one frame from source2.

Hi, I tried to check the different between source_id and batch_id, I print the results during obj_meta_list loops.

Current source_id, batch_id: 0 0
Curent filename: streamsrc_0_framenum_0.jpg
Current source_id, batch_id: 0 0
Curent filename: streamsrc_0_framenum_1.jpg
Current source_id, batch_id: 0 0
Curent filename: streamsrc_0_framenum_2.jpg
Current source_id, batch_id: 0 0
Curent filename: streamsrc_0_framenum_3.jpg
Current source_id, batch_id: 1 1
Curent filename: streamsrc_1_framenum_4.jpg
Current source_id, batch_id: 1 1
Curent filename: streamsrc_1_framenum_5.jpg
Current source_id, batch_id: 1 1
Curent filename: streamsrc_1_framenum_6.jpg
Current source_id, batch_id: 1 1
Curent filename: streamsrc_1_framenum_7.jpg
Current source_id, batch_id: 1 1
Curent filename: streamsrc_1_framenum_8.jpg
Current source_id, batch_id: 0 0

When I check the saved images, all of them (1-8) come from sources 0 but some of the source_id and batch_id values are 1.

In theory, the batch ID does not correspond one-to-one with the source. But the source id should correspond with the source.
You mean the 8 pictures are from the source 0. But the log show it’s from the source 1, is that right?
Could you attach your source code so that we can reproduce the issues?

Hi, yuweiw

That’s correct, the log show that it’s from source 1 when the images inside come from source 0.
I have attached my source code as your requested.

deepstream_yolo_detection.zip (7.0 KB)

Could you attach the Makefile too?

Sure, here is my makefile generated from cmake.
Makefile.zip (2.0 KB)

Could you try to use nvstreamdemux plugin to separate the stream? The metadatalist and surfaceList are not one-to-one correspondence.

Do you mean using the nvstreamdemux plugin after streammux plugin to seperate each stream into multiple branchs then try saving the image from each of the branchs?

Do you have any example of how to do that?

Yes. We have similar demo for this usage. You can refer to deepstream_parallel_inference_app.

Hi, I change from using NvBufSurfTransform to nvvidconv with capsfilters to convert from NV12 to RGBA and problem got solved.

1 Like

Glad to hear that. Could you attach your new pipeline and the probe code for others to reference?

Sure! I’m happy to help.

My current pipeline is streammux → pgie → nvtracker → nvvideoconverter → capsfilter → nvdslogger → nvmultistreamtiler-> nvosd → fakesink

The key is that usually, the surface object provide an output images in NV12 format. By using nvvideoconverter with capsfilter, you can change the images from NV12 to RGBA format.

Code snippet of how to change NV12 to RGBA

    /* Use convertor to convert from NV12 to RGBA as required by nvosd */
    nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
    g_object_set (G_OBJECT (nvvidconv), "nvbuf-memory-type", 3, NULL);


    // caps to convert from nv12 to rgba
    nvvidconv_cap = gst_element_factory_make("capsfilter", "nvvidconv_cap");

    /* create cap for filter */
    //caps = gst_caps_from_string ("video/x-raw(memory:NVMM),format=RGBA");
    caps = gst_caps_from_string ("video/x-raw(memory:NVMM),format=RGBA");
    g_object_set (G_OBJECT (nvvidconv_cap), "caps", caps, NULL);

Example Probe

static GstPadProbeReturn
capsfilter_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)
{
  cv::Mat in_mat;
  cv::Mat mat_BGR;
  cv::Rect crop_rect;
  std::vector<uchar> buffer;
  NvOSD_RectParams crop_rect_params;

  GstBuffer *buf = (GstBuffer *) info->data;
  GstMapInfo inmap = GST_MAP_INFO_INIT;
  if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
    GST_ERROR ("input buffer mapinfo failed");
    return GST_PAD_PROBE_DROP;
  }
  // get the surface object from input buffer
  NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
  gst_buffer_unmap (buf, &inmap);

  cudaError_t cuda_err;
  
  cuda_err = cudaSetDevice(ip_surf->gpuId);

  cudaStream_t cuda_stream;

  cuda_err = cudaStreamCreate(&cuda_stream);
	
	NvBufSurfaceMap(ip_surf, -1, -1, NVBUF_MAP_READ);
  NvBufSurfaceSyncForCpu(ip_surf, -1, -1);

  NvDsObjectMeta *obj_meta = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;
  NvDsMetaList *l_frame = NULL;
  NvDsMetaList *l_obj = NULL;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);

  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {

    // Get frame_meta information
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);


    guint num_rects = 0;
    for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next) {
      obj_meta = (NvDsObjectMeta *) (l_obj->data);

      // In case of my pipeline, it's will only save images if it's detec human
      if (obj_meta->class_id == PGIE_CLASS_ID_PERSON) {
        static int cnt = 0;

        crop_rect_params = obj_meta->rect_params;

        /* Rectangle for cropped objects */
        crop_rect = cv::Rect (crop_rect_params.left, crop_rect_params.top,
        crop_rect_params.width, crop_rect_params.height);
      
        in_mat =
            cv::Mat (ip_surf->surfaceList[frame_meta->batch_id].planeParams.height[0],
            ip_surf->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
            ip_surf->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
            ip_surf->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
        
        // Convert from RGBA to BGR for opencv usage.
        cv::cvtColor (in_mat, mat_BGR, cv::COLOR_RGBA2BGR);

        /* Crop an image*/
        cv::Mat crop = mat_BGR(crop_rect).clone();
        // Encode croped image to buffer
        cv::imencode (".jpg", crop, buffer);

        // Encode entire frame
        // cv::imencode (".jpg", in_mat, buffer);

        // Variables for gRPC
        gint8 cam_id     = frame_meta->source_id;
        gfloat conf      = obj_meta->confidence;
        gint8 tracker_id = obj_meta->object_id;
        guint32 img_rows   = crop.rows;
        guint32 img_cols   = crop.cols;


      

        // /* Save img*/
        // cv::imwrite("cam_" + std::to_string(frame_meta->source_id) + "_" +
        // "idv_id_" + std::to_string (obj_meta->object_id) + "_" +
        // "out_" + std::to_string (cnt) + ".jpeg", crop);

        cv::imwrite("cam_" + std::to_string(frame_meta->source_id) + "_" +
        "out_" + std::to_string (cnt) + ".jpeg", mat_BGR);
        cnt ++
      }
    }
  }

  return GST_PAD_PROBE_OK;
}
2 Likes

I think, it is faster to set caps format to RGB instead of RGBA:

GstCaps *caps = gst_caps_from_string ("video/x-raw(memory:NVMM),format=RGB");

Then you do not have to call:

cv::cvtColor (in_mat, mat_BGR, cv::COLOR_RGBA2BGR);

Anyway, Is there any faster solution to crop detection frames than this? For example, is it possible to write cuda code to crop detection on the GPU instead of the CPU?

Thanks.

1 Like

Yes. You can refer to our API code sources\includes\nvbufsurftransform.h to use the NvBufSurfTransform do the transformation. If there are new questions, you can open a new topic. Thanks

Hi Thank you for your kind advice.

The reason why I still use RGBA is because I thought (based on gst-inspect-1.0) that nvmultistreamtiler plugins received RGBA as an input. In the case that I do not need nvmultistreamtiler, your suggestion should be faster.

1 Like

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.