Preprocessing of frames - gst-dsexample

Please provide complete information as applicable to your setup.

• Hardware Platform (Jetson / GPU) Jetson Nano
• DeepStream Version 5.0
• JetPack Version (valid for Jetson only) 4.4-b144
• TensorRT Version 7.1.3
• Issue Type( questions, new requirements, bugs) question

Dear all,

I am trying to apply a pre-processing to frames before they reach the PGIE.
I am able to place a dsexample instance after the PGIE, filter the frames with the opencv sepia filtering and save the image on the disk:

  // Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
  // algorithm can handle padded RGBA data.
  in_mat =
      cv::Mat (dest_height, dest_width,
      CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
      nvbuf->surfaceList[0].pitch);
  out_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
  saved_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);

  cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
  cv::transform(out_mat, saved_mat, kernel);
  

  time( &rawtime );
  info = localtime( &rawtime );
  
  static gint dump = 0;
  char filename[64];
  snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
  cv::imwrite(filename, saved_mat);

However, I am not able to execute such filtering before the PGIE on all the frames.

I have tried a number of different approaches.
I have tried to modify the gst-dsexample and follow the same approach that it is used for blurring object, but I obtain a segmentation fault:

/* Cache the mapped data for CPU access */
    NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
    in_mat =
                cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
                surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
                surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
                surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
    
    cv::transform(in_mat, in_mat, kernel);
    
    in_mat.convertTo(in_mat,CV_8UC4);
    /* Cache the mapped data for device access */
    NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);

However, in the execution of cv::transform(in_mat, in_mat, kernel); I get a segmentation fault :(

Moreover, I I get
5168 Segmentation fault (core dumped)
Even if I try to copy the buffer instead of doing a transform:
cv::Mat image_copy = in_mat.clone();
However, no segmentation fault at all if I use another opencv function such as:
cv::filter2D(in_mat, in_mat,-1, kernel);
The problem is that of course the function is not doing what I need to do…
But the PGIE is correctly receiving the filtered images (you see below that the filter2D creates a white image because of the convolution):
image
This is the result of the filter2D that of course is doing a convolution of the pixels and not what I need that is a meshing of the different channels.

I think that this give an interesting information. The problem might come from the specific operation that the opencv function cv::transform is doing on the deepstream buffer.

Finally, I have also tried to edit the function get_converted_mat as follows:

static GstFlowReturn
filter_frame (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
    NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
    gint input_height)
{
  NvBufSurfTransform_Error err;
  NvBufSurfTransformConfigParams transform_config_params;
  NvBufSurfTransformParams transform_params;
  NvBufSurfTransformRect src_rect;
  NvBufSurfTransformRect dst_rect;
  NvBufSurface ip_surf;
  cv::Mat in_mat, out_mat, filtered_mat;
  ip_surf = *input_buf;
  
  time_t rawtime;
  struct tm *info;

  ip_surf.numFilled = ip_surf.batchSize = 1;
  ip_surf.surfaceList = &(input_buf->surfaceList[idx]);

  /*
  gint src_left = GST_ROUND_UP_2(crop_rect_params->left);
  gint src_top = GST_ROUND_UP_2(crop_rect_params->top);
  gint src_width = GST_ROUND_DOWN_2(crop_rect_params->width);
  gint src_height = GST_ROUND_DOWN_2(crop_rect_params->height);
  */
  gint src_left = crop_rect_params->left;
  gint src_top = crop_rect_params->top;
  gint src_width = crop_rect_params->width;
  gint src_height = crop_rect_params->height;
  //g_print("ltwh = %d %d %d %d \n", src_left, src_top, src_width, src_height);

  guint dest_width, dest_height;
  dest_width = src_width;
  dest_height = src_height;

  NvBufSurface *nvbuf;
  NvBufSurfaceCreateParams create_params;
  create_params.gpuId  = dsexample->gpu_id;
  create_params.width  = dest_width;
  create_params.height = dest_height;
  create_params.size = 0;
  create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  create_params.layout = NVBUF_LAYOUT_PITCH;
#ifdef __aarch64__
  create_params.memType = NVBUF_MEM_DEFAULT;
#else
  create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
#endif
  NvBufSurfaceCreate (&nvbuf, 1, &create_params);

  // Configure transform session parameters for the transformation
  transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  transform_config_params.gpu_id = dsexample->gpu_id;
  transform_config_params.cuda_stream = dsexample->cuda_stream;  
  
  // Set the transform session parameters for the conversions executed in this
  // thread.
  
  
  err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
    goto error;
  }
  

  // Calculate scaling ratio while maintaining aspect ratio
  ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);

  
  if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
    goto error;
  }
  

#ifdef __aarch64__
  if (ratio <= 1.0 / 16 || ratio >= 16.0) {
    // Currently cannot scale by ratio > 16 or < 1/16 for Jetson
    goto error;
  }
#endif
  // Set the transform ROIs for source and destination
  src_rect = {(guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  dst_rect = {0, 0, (guint)dest_width, (guint)dest_height};

  // Set the transform parameters
  transform_params.src_rect = &src_rect;
  transform_params.dst_rect = &dst_rect;
  transform_params.transform_flag =
    NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
      NVBUFSURF_TRANSFORM_CROP_DST;
  transform_params.transform_filter = NvBufSurfTransformInter_Default;

  //Memset the memory
  NvBufSurfaceMemSet (nvbuf, 0, 0, 0);

  GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");

  // Transformation scaling+format conversion if any.
  
  
  err = NvBufSurfTransform (&ip_surf, nvbuf, &transform_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransform failed with error %d while converting buffer", err),
        (NULL));
    goto error;
  }
  
  // Map the buffer so that it can be accessed by CPU
  //if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ) != 0){
  if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ_WRITE) != 0){
    goto error;
  }

  // Cache the mapped data for CPU access
  NvBufSurfaceSyncForCpu (nvbuf, 0, 0);

  // Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
  // algorithm can handle padded RGBA data.
  in_mat =
      cv::Mat (dest_height, dest_width,
      CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
      nvbuf->surfaceList[0].pitch);
  out_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
  filtered_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);

  //cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
  //cv::transform(out_mat, filtered_mat, kernel);
  
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGR);
  cv::transform(in_mat, in_mat, kernel);
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_BGR2RGBA);
  
  /* Cache the mapped data for device access */
  NvBufSurfaceSyncForDevice (nvbuf, 0, 0);
/*
  time( &rawtime );
  info = localtime( &rawtime );
  
  static gint dump = 0;
  char filename[64];
  snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
  cv::imwrite(filename, saved_mat);
*/
  if (NvBufSurfaceUnMap (nvbuf, 0, 0)){
    goto error;
  }
  NvBufSurfaceDestroy(nvbuf);

#ifdef __aarch64__
  // To use the converted buffer in CUDA, create an EGLImage and then use
  // CUDA-EGL interop APIs
  if (USE_EGLIMAGE) {
    if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
      goto error;
    }

    // dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage
    // Use interop APIs cuGraphicsEGLRegisterImage and
    // cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA

    // Destroy the EGLImage
    NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  }
#endif

  /* We will first convert only the Region of Interest (the entire frame or the
   * object bounding box) to RGB and then scale the converted RGB frame to
   * processing resolution. */
  return GST_FLOW_OK;

error:
  return GST_FLOW_ERROR;
}

With this approach I do not get any segmentation fault, but the PGIE is NOT receiving the filtered image. Instead what the PGIE receive is the original frames. It seems that I am not writing in the buffer as it was happening in the previous example.

I hope that you could give me some suggestion.

Thank you very much!!

1 Like

What is your pipeline? Can you send the code for us to reproduce the problem?

Ciao Fiona,

of course I can.
First of all, please find attached to the message the source code for gstdsexample.cpp.
You will see in the code that I have used the input parameter “process-full-frame” as a trigger to make this custom plugin applicable both for saving the frames if something is detected by PGIE and for pre-processing all the frames to be the input og PGIE. This second functionality is what I cannot make it work.
Relevant code will be from line 1136 to 1240.
When it comes to the pipeline, I have edited the example of deepstream_test3_app.
Please find here below the main function:

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,
      *nvvidconv = NULL, *nvosd = NULL, *tiler = NULL;
  GstElement *msgconv = NULL, *msgbroker = NULL, *tee = NULL;
  GstElement *queue1 = NULL, *queue2 = NULL;
  GstElement *videobalance1 = NULL;
  GstElement *saveframe = NULL;
  GstElement *filterframe = NULL;
#ifdef PLATFORM_TEGRA
  GstElement *transform = NULL;
#endif
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *tee_render_pad = NULL;
  GstPad *tee_msg_pad = NULL;
  GstPad *tiler_src_pad = NULL;
  guint i, num_sources;
  guint tiler_rows, tiler_columns;
  guint pgie_batch_size;

  GstPad *sink_pad = NULL;
  GstPad *src_pad = NULL;
  
  

  /* Check input arguments */
  if (argc < 2) {
    g_printerr ("Usage: %s <uri1> [uri2] ... [uriN] \n", argv[0]);
    return -1;
  }
  num_sources = argc - 1;

  /* Standard GStreamer initialization */
  gst_init (&argc, &argv);
  loop = g_main_loop_new (NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new ("dstest3-pipeline");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
  
  //videobalance1 = gst_element_factory_make ("videobalance", "videobalance1-pre");

  if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add (GST_BIN (pipeline), streammux);

  for (i = 0; i < num_sources; i++) {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = { };
    GstElement *source_bin = create_source_bin (i, argv[i + 1]);

    if (!source_bin) {
      g_printerr ("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add (GST_BIN (pipeline), source_bin);

    g_snprintf (pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad (streammux, pad_name);
    if (!sinkpad) {
      g_printerr ("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad (source_bin, "src");
    if (!srcpad) {
      g_printerr ("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref (srcpad);
    gst_object_unref (sinkpad);
  }

  //videobalance1 = gst_element_factory_make ("videobalance", "videobalance1-pre");
  
  /* Use nvinfer to infer on batched frame. */
  pgie = gst_element_factory_make ("nvinfer", "primary-nvinference-engine");

  /* Use nvtiler to composite the batched frames into a 2D tiled array based
   * on the source of the frames. */
  tiler = gst_element_factory_make ("nvmultistreamtiler", "nvtiler");

  /* Use convertor to convert from NV12 to RGBA as required by nvosd */
  nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");

  /* Create OSD to draw on the converted RGBA buffer */
  nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
  
  /* Create msg converter to generate payload from buffer metadata */
  msgconv = gst_element_factory_make ("nvmsgconv", "nvmsg-converter");

  /* Create msg broker to send payload to server */
  msgbroker = gst_element_factory_make ("nvmsgbroker", "nvmsg-broker");

  /* Create tee to render buffer and send message simultaneously*/
  tee = gst_element_factory_make ("tee", "nvsink-tee");

  /* Create queues */
  queue1 = gst_element_factory_make ("queue", "nvtee-que1");
  queue2 = gst_element_factory_make ("queue", "nvtee-que2");

  // ############# MODIFICA
  /* Finally render the osd output */ 
#ifdef PLATFORM_TEGRA
  //transform = gst_element_factory_make ("nvegltransform", "nvegl-transform");
  transform = gst_element_factory_make ("queue", "queue");
#endif
  //sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
//  sink = gst_element_factory_make ("nvoverlaysink", "nvvideo-renderer");
  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");
  
  saveframe = gst_element_factory_make ("dsexample", "save-frame");
  
  filterframe = gst_element_factory_make ("dsexample", "filter-frame");
  
  
  if (!pgie || !tiler || !nvvidconv || !nvosd || !msgconv || !msgbroker || !tee
      || !queue1 || !queue2 || !sink || !saveframe || !filterframe) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
  }

#ifdef PLATFORM_TEGRA
  if(!transform) {
    g_printerr ("One tegra element could not be created. Exiting.\n");
    return -1;
  }
#endif
  // ############# MODIFICA
  
  //g_object_set (G_OBJECT(videobalance1), "saturation", 0.0, NULL);
  
  g_object_set (G_OBJECT(sink), "sync", FALSE, NULL);
  
  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT, "batch-size", num_sources,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set (G_OBJECT (pgie),
      "config-file-path", PGIE_CONFIG_FILE, NULL);

  /* Override the batch-size set in the config file with the number of sources. */
  g_object_get (G_OBJECT (pgie), "batch-size", &pgie_batch_size, NULL);
  if (pgie_batch_size != num_sources) {
    g_printerr
        ("WARNING: Overriding infer-config batch-size (%d) with number of sources (%d)\n",
        pgie_batch_size, num_sources);
    g_object_set (G_OBJECT (pgie), "batch-size", num_sources, NULL);
  }

  tiler_rows = (guint) sqrt (num_sources);
  tiler_columns = (guint) ceil (1.0 * num_sources / tiler_rows);
  /* we set the tiler properties here */
  g_object_set (G_OBJECT (tiler), "rows", tiler_rows, "columns", tiler_columns,
      "width", TILED_OUTPUT_WIDTH, "height", TILED_OUTPUT_HEIGHT, NULL);
  
  g_object_set (G_OBJECT(msgconv), "config", MSCONV_CONFIG_FILE, NULL);
  g_object_set (G_OBJECT(msgconv), "payload-type", schema_type, NULL);

  g_object_set (G_OBJECT(msgbroker), "proto-lib", proto_lib,
                "conn-str", conn_str, "config", cfg_file, "topic", topic, "sync", FALSE, NULL);
  
  g_object_set (G_OBJECT (saveframe), "full-frame", TRUE, NULL);
  
  g_object_set (G_OBJECT (filterframe), "full-frame", FALSE, NULL);
  
  
  /* we add a message handler */
  bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
  bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
  gst_object_unref (bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
#ifdef PLATFORM_TEGRA
  gst_bin_add_many (GST_BIN (pipeline),filterframe, pgie, tiler, nvvidconv, nvosd, tee, queue1, queue2, msgconv,
      msgbroker, transform, saveframe, sink, NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
  
  /*
  if (!gst_element_link_many (streammux, pgie, tiler, nvvidconv, nvosd, transform, sink,
          NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
  */
  
  if (!gst_element_link_many (streammux, filterframe, pgie, tiler, nvvidconv, nvosd, tee, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }

  if (!gst_element_link_many (queue1, saveframe, msgconv, msgbroker, NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
  
  if (!gst_element_link_many (queue2, transform, sink, NULL)) {
      g_printerr ("Elements could not be linked. Exiting.\n");
      return -1;
  }
  
#else
gst_bin_add_many (GST_BIN (pipeline), pgie, tiler, nvvidconv, nvosd, tee, queue1, queue2, msgconv,
      msgbroker, sink, 
      NULL);
  /* we link the elements together
   * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> tee -> video-renderer
   *                                                                 |
   *                                                                 |-> msgconv -> msgbroker  */
  
  /* we link the elements together */
  /* file-source -> h264-parser -> nvh264-decoder -> nvstreammux ->
   * nvinfer -> nvvidconv -> nvosd -> tee -> video-renderer
   *                                      |
   *                                      |-> msgconv -> msgbroker  */
  
  
  if (!gst_element_link_many (streammux, pgie, tiler, nvvidconv, nvosd, sink,
          NULL)) {
    g_printerr ("Elements could not be linked. Exiting.\n");
    return -1;
  }
#endif
  
  
  
  
  sink_pad = gst_element_get_static_pad (queue1, "sink");
  tee_msg_pad = gst_element_get_request_pad (tee, "src_%u");
  tee_render_pad = gst_element_get_request_pad (tee, "src_%u");
  if (!tee_msg_pad || !tee_render_pad) {
    g_printerr ("Unable to get request pads\n");
    return -1;
  }

  if (gst_pad_link (tee_msg_pad, sink_pad) != GST_PAD_LINK_OK) {
    g_printerr ("Unable to link tee and message converter\n");
    gst_object_unref (sink_pad);
    return -1;
  }

  gst_object_unref (sink_pad);

  sink_pad = gst_element_get_static_pad (queue2, "sink");
  if (gst_pad_link (tee_render_pad, sink_pad) != GST_PAD_LINK_OK) {
    g_printerr ("Unable to link tee and render\n");
    return -1;
  }

  gst_object_unref (sink_pad);
  
  
  

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  tiler_src_pad = gst_element_get_static_pad (pgie, "src");
  if (!tiler_src_pad)
    g_print ("Unable to get src pad\n");
  else
    gst_pad_add_probe (tiler_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
        tiler_src_pad_buffer_probe, NULL, NULL);

  /* Set the pipeline to "playing" state */
  g_print ("Now playing:");
  for (i = 0; i < num_sources; i++) {
    g_print (" %s,", argv[i + 1]);
  }
  g_print ("\n");
  gst_element_set_state (pipeline, GST_STATE_PLAYING);

  /* Wait till pipeline encounters an error or EOS */
  g_print ("Running...\n");
  g_main_loop_run (loop);

  /* Out of the main loop, clean up nicely */
  g_print ("Returned, stopping playback\n");
  gst_element_set_state (pipeline, GST_STATE_NULL);
  g_print ("Deleting pipeline\n");
  gst_object_unref (GST_OBJECT (pipeline));
  g_source_remove (bus_watch_id);
  g_main_loop_unref (loop);
  return 0;
}

If you need further information, I am available.
Thank you very much in advance for your support.

gstdsexample.cpp (50.8 KB)

What is the result with the original test3 sample app?

Ciao Fiona,

Good morning :)

Please find here below the results of the tests:

  1. Original test3 app: it works perfectly

  2. Test3 app + gst-dsexample in the pipeline to save frames after PGIE: it works perfectly

  3. Test3 app + gst-dsexample in the pipeline to save frames after PGIE + pre-processing with gst-dsexample to all frames before PGIE (implementation as in lines 1170 - 1240 of gstdsexample.cpp - codes follow below) : segmentation fault

     } else if (0) {
       //if (1) {
         NvDsMetaList * l_obj = NULL;
         for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
           l_frame = l_frame->next)
         {
           frame_meta = (NvDsFrameMeta *) (l_frame->data);
           cv::Mat in_mat, conv_mat;
           NvOSD_RectParams rect_params;
    
           /* Scale the entire frame to processing resolution */
           rect_params.left = 0;
           rect_params.top = 0;
           rect_params.width = dsexample->video_info.width;
           rect_params.height = dsexample->video_info.height;
           
           l_obj = frame_meta->obj_meta_list;
           
           if (1) {
             /* Map the buffer so that it can be accessed by CPU */
             if (surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0] == NULL){
               if (NvBufSurfaceMap (surface, frame_meta->batch_id, 0, NVBUF_MAP_READ_WRITE) != 0){
                 GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
                     ("%s:buffer map to be accessed by CPU failed", __func__), (NULL));
                 return GST_FLOW_ERROR;
               }
             }
    
             /* Cache the mapped data for CPU access */
             NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
    
             in_mat =
                 cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
                 surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
                 surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
                 surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
           }
           
           //cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGR);
           //cv::filter2D(in_mat, in_mat,-1, kernel);
           
           //cv::Mat image_copy = in_mat.clone();
           //cv::cvtColor (image_copy, image_copy, cv::COLOR_RGBA2BGR);
           //g_print ("Transform start");
           cv::transform(in_mat, in_mat, kernel);
           //g_print ("Transform done");
           //cv::cvtColor (in_mat, in_mat, cv::COLOR_BGR2RGBA);
           //in_mat.convertTo(in_mat,CV_8UC4);
           
           //g_print("\n inmat \n");
           //g_print(in_mat);
           //std::cout << "conv_mat = " << std::endl << " "  << image_copy << std::endl << std::endl;
           
           //int from_to[] = { 0,0, 1,1, 2,2, 3,3 };
           //cv::mixChannels( &in_mat, 1, &in_mat, 1, from_to, 4 );
           
           /* Process the object crop to obtain label */
           //output = DsExampleProcess (dsexample->dsexamplelib_ctx, dsexample->cvmat->data);
    
             /* Attach labels for the object */
            //attach_metadata_object (dsexample, obj_meta, output);
    
           //free (output); 
           //cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGRA);
           //g_print(std::to_string(frame_meta->batch_id));
           //std::cout << surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0] << std::endl;
           NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);
             
           
         }
    
  4. Test3 app + gst-dsexample in the pipeline to save frames after PGIE + pre-processing with gst-dsexample to all frames before PGIE (implementation as in lines 1136- 1169 of gstdsexample.cpp - codes follow below): no segmentation fault, but the image that arrives to PGIE is NOT processed. It seems that I am not doing anything to the buffer

       } else if (!dsexample->process_full_frame) {
       //if (1) {
         NvDsMetaList * l_obj = NULL;
         for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
           l_frame = l_frame->next)
         {
           frame_meta = (NvDsFrameMeta *) (l_frame->data);
           NvOSD_RectParams rect_params;
    
           /* Scale the entire frame to processing resolution */
           rect_params.left = 0;
           rect_params.top = 0;
           rect_params.width = dsexample->video_info.width;
           rect_params.height = dsexample->video_info.height;
           
           l_obj = frame_meta->obj_meta_list;
             
           if (frame_meta->obj_meta_list != NULL){
             if (filter_frame (dsexample, surface, i, &rect_params,
                   scale_ratio, dsexample->video_info.width,
                   dsexample->video_info.height) != GST_FLOW_OK) {
               goto error;
             }
           }
    
           /* Process to get the output */
           output =
               DsExampleProcess (dsexample->dsexamplelib_ctx,
               dsexample->cvmat->data);
           /* Attach the metadata for the full frame */
           //attach_metadata_full_frame (dsexample, frame_meta, scale_ratio, output, i);
           i++;
           free (output);
         }
    

The function filter_frame is a modified version of get_converted_mat:

static GstFlowReturn
filter_frame (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
    NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
    gint input_height)
{
  NvBufSurfTransform_Error err;
  NvBufSurfTransformConfigParams transform_config_params;
  NvBufSurfTransformParams transform_params;
  NvBufSurfTransformRect src_rect;
  NvBufSurfTransformRect dst_rect;
  NvBufSurface ip_surf;
  cv::Mat in_mat, out_mat, filtered_mat;
  ip_surf = *input_buf;
  
  time_t rawtime;
  struct tm *info;

  ip_surf.numFilled = ip_surf.batchSize = 1;
  ip_surf.surfaceList = &(input_buf->surfaceList[idx]);

  /*
  gint src_left = GST_ROUND_UP_2(crop_rect_params->left);
  gint src_top = GST_ROUND_UP_2(crop_rect_params->top);
  gint src_width = GST_ROUND_DOWN_2(crop_rect_params->width);
  gint src_height = GST_ROUND_DOWN_2(crop_rect_params->height);
  */
  gint src_left = crop_rect_params->left;
  gint src_top = crop_rect_params->top;
  gint src_width = crop_rect_params->width;
  gint src_height = crop_rect_params->height;
  //g_print("ltwh = %d %d %d %d \n", src_left, src_top, src_width, src_height);

  guint dest_width, dest_height;
  dest_width = src_width;
  dest_height = src_height;

  NvBufSurface *nvbuf;
  NvBufSurfaceCreateParams create_params;
  create_params.gpuId  = dsexample->gpu_id;
  create_params.width  = dest_width;
  create_params.height = dest_height;
  create_params.size = 0;
  create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  create_params.layout = NVBUF_LAYOUT_PITCH;
#ifdef __aarch64__
  create_params.memType = NVBUF_MEM_DEFAULT;
#else
  create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
#endif
  NvBufSurfaceCreate (&nvbuf, 1, &create_params);

  // Configure transform session parameters for the transformation
  transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  transform_config_params.gpu_id = dsexample->gpu_id;
  transform_config_params.cuda_stream = dsexample->cuda_stream;  
  
  // Set the transform session parameters for the conversions executed in this
  // thread.
  
  
  err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
    goto error;
  }
  

  // Calculate scaling ratio while maintaining aspect ratio
  ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);

  
  if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
    goto error;
  }
  

#ifdef __aarch64__
  if (ratio <= 1.0 / 16 || ratio >= 16.0) {
    // Currently cannot scale by ratio > 16 or < 1/16 for Jetson
    goto error;
  }
#endif
  // Set the transform ROIs for source and destination
  src_rect = {(guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  dst_rect = {0, 0, (guint)dest_width, (guint)dest_height};

  // Set the transform parameters
  transform_params.src_rect = &src_rect;
  transform_params.dst_rect = &dst_rect;
  transform_params.transform_flag =
    NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
      NVBUFSURF_TRANSFORM_CROP_DST;
  transform_params.transform_filter = NvBufSurfTransformInter_Default;

  //Memset the memory
  NvBufSurfaceMemSet (nvbuf, 0, 0, 0);

  GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");

  // Transformation scaling+format conversion if any.
  
  
  err = NvBufSurfTransform (&ip_surf, nvbuf, &transform_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransform failed with error %d while converting buffer", err),
        (NULL));
    goto error;
  }
  
  // Map the buffer so that it can be accessed by CPU
  //if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ) != 0){
  if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ_WRITE) != 0){
    goto error;
  }

  // Cache the mapped data for CPU access
  NvBufSurfaceSyncForCpu (nvbuf, 0, 0);

  // Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
  // algorithm can handle padded RGBA data.
  in_mat =
      cv::Mat (dest_height, dest_width,
      CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
      nvbuf->surfaceList[0].pitch);
  out_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
  filtered_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);

  //cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
  //cv::transform(out_mat, filtered_mat, kernel);
  
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGR);
  cv::transform(in_mat, in_mat, kernel);
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_BGR2RGBA);
  
  /* Cache the mapped data for device access */
  NvBufSurfaceSyncForDevice (nvbuf, 0, 0);
/*
  time( &rawtime );
  info = localtime( &rawtime );
  
  static gint dump = 0;
  char filename[64];
  snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
  cv::imwrite(filename, saved_mat);
*/
  if (NvBufSurfaceUnMap (nvbuf, 0, 0)){
    goto error;
  }
  NvBufSurfaceDestroy(nvbuf);

#ifdef __aarch64__
  // To use the converted buffer in CUDA, create an EGLImage and then use
  // CUDA-EGL interop APIs
  if (USE_EGLIMAGE) {
    if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
      goto error;
    }

    // dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage
    // Use interop APIs cuGraphicsEGLRegisterImage and
    // cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA

    // Destroy the EGLImage
    NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  }
#endif

  /* We will first convert only the Region of Interest (the entire frame or the
   * object bounding box) to RGB and then scale the converted RGB frame to
   * processing resolution. */
  return GST_FLOW_OK;

error:
  return GST_FLOW_ERROR;
}

I really hope that you can help me @Fiona.Chen .
Thank you very much in advance and have a nice day!

Dear @Fiona.Chen ,

Tentative number 3 that I have described in my previous message still gives me segmentation fault.

However, I was able to solve the problem by changing the way memory is mapped in tentative 4.
In the functiont "gst_dsexample_transform_ip " i changed from GST_MAP_READW to GST_MAP_READWRITE.

    if (!gst_buffer_map (inbuf, &in_map_info, GST_MAP_READWRITE)) {
      g_print ("Error: Failed to map gst buffer\n");
      goto error;
    }

Then, instead of calling the get_converted_mat function. I call the following function:

static GstFlowReturn
filter_frame (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
    NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
    gint input_height)
{
  NvBufSurfTransform_Error err;
  NvBufSurfTransformConfigParams transform_config_params;
  NvBufSurfTransformParams transform_params;
  NvBufSurfTransformRect src_rect;
  NvBufSurfTransformRect dst_rect;
  NvBufSurface ip_surf;
  cv::Mat in_mat, out_mat, filtered_mat;
  ip_surf = *input_buf;
  
  time_t rawtime;
  struct tm *info;

  ip_surf.numFilled = ip_surf.batchSize = 1;
  ip_surf.surfaceList = &(input_buf->surfaceList[idx]);

  /*
  gint src_left = GST_ROUND_UP_2(crop_rect_params->left);
  gint src_top = GST_ROUND_UP_2(crop_rect_params->top);
  gint src_width = GST_ROUND_DOWN_2(crop_rect_params->width);
  gint src_height = GST_ROUND_DOWN_2(crop_rect_params->height);
  */
  gint src_left = crop_rect_params->left;
  gint src_top = crop_rect_params->top;
  gint src_width = crop_rect_params->width;
  gint src_height = crop_rect_params->height;
  //g_print("ltwh = %d %d %d %d \n", src_left, src_top, src_width, src_height);

  guint dest_width, dest_height;
  dest_width = src_width;
  dest_height = src_height;

  NvBufSurface *nvbuf;
  NvBufSurfaceCreateParams create_params;
  create_params.gpuId  = dsexample->gpu_id;
  create_params.width  = dest_width;
  create_params.height = dest_height;
  create_params.size = 0;
  create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
  create_params.layout = NVBUF_LAYOUT_PITCH;
#ifdef __aarch64__
  create_params.memType = NVBUF_MEM_DEFAULT;
#else
  create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
#endif
  NvBufSurfaceCreate (&nvbuf, 1, &create_params);

  // Configure transform session parameters for the transformation
  transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
  transform_config_params.gpu_id = dsexample->gpu_id;
  transform_config_params.cuda_stream = dsexample->cuda_stream;  
  
  // Set the transform session parameters for the conversions executed in this
  // thread.
  
  
  err = NvBufSurfTransformSetSessionParams (&transform_config_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
    goto error;
  }
  

  // Calculate scaling ratio while maintaining aspect ratio
  ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);

  
  if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("%s:crop_rect_params dimensions are zero",__func__), (NULL));
    goto error;
  }
  

#ifdef __aarch64__
  if (ratio <= 1.0 / 16 || ratio >= 16.0) {
    // Currently cannot scale by ratio > 16 or < 1/16 for Jetson
    goto error;
  }
#endif
  // Set the transform ROIs for source and destination
  src_rect = {(guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
  dst_rect = {0, 0, (guint)dest_width, (guint)dest_height};

  // Set the transform parameters
  transform_params.src_rect = &src_rect;
  transform_params.dst_rect = &dst_rect;
  transform_params.transform_flag =
    NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
      NVBUFSURF_TRANSFORM_CROP_DST;
  transform_params.transform_filter = NvBufSurfTransformInter_Default;

  //Memset the memory
  NvBufSurfaceMemSet (nvbuf, 0, 0, 0);

  GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");

  // Transformation scaling+format conversion if any.
  
  
  err = NvBufSurfTransform (&ip_surf, nvbuf, &transform_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransform failed with error %d while converting buffer", err),
        (NULL));
    goto error;
  }
  
  // OpenCV CUDA Test
  /*
  cv::cuda::GpuMat gpuMat;
  gpuMat = cv::cuda::GpuMat(dest_height, 
                            dest_width, 
                            CV_8UC4,
                            (void *) input_buf->surfaceList[index].dataPtr,
                            input_buf->surfaceList[index].pitch);
  const int aDstOrder[] = {2,0,1,3};
  cv::cuda::swapChannels(gpuMat, aDstOrder);
  */
  
  // Map the buffer so that it can be accessed by CPU
  //if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ) != 0){
  if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ_WRITE) != 0){
    goto error;
  }

  // Cache the mapped data for CPU access
  NvBufSurfaceSyncForCpu (nvbuf, 0, 0);

  // Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
  // algorithm can handle padded RGBA data.
  in_mat =
      cv::Mat (dest_height, dest_width,
      CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
      nvbuf->surfaceList[0].pitch);
  
  
  
  /*
  out_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
  filtered_mat =
      cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
*/
  //cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
  //cv::transform(out_mat, filtered_mat, kernel);
  
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGR);
  cv::transform(in_mat, in_mat, kernel);
  //cv::cvtColor (in_mat, in_mat, cv::COLOR_BGR2RGBA);
  
  /* Cache the mapped data for device access */
  NvBufSurfaceSyncForDevice (nvbuf, 0, 0);
  
  NvBufSurfaceMemSet (&ip_surf, 0, 0, 0);
  
  err = NvBufSurfTransform (nvbuf, &ip_surf, &transform_params);
  if (err != NvBufSurfTransformError_Success) {
    GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
        ("NvBufSurfTransform failed with error %d while converting buffer", err),
        (NULL));
    goto error;
  }
  
/*
  time( &rawtime );
  info = localtime( &rawtime );
  
  static gint dump = 0;
  char filename[64];
  snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
  cv::imwrite(filename, saved_mat);
*/
  if (NvBufSurfaceUnMap (nvbuf, 0, 0)){
    goto error;
  }
  NvBufSurfaceDestroy(nvbuf);

#ifdef __aarch64__
  // To use the converted buffer in CUDA, create an EGLImage and then use
  // CUDA-EGL interop APIs
  if (USE_EGLIMAGE) {
    if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
      goto error;
    }

    // dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage
    // Use interop APIs cuGraphicsEGLRegisterImage and
    // cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA

    // Destroy the EGLImage
    NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
  }
#endif

  /* We will first convert only the Region of Interest (the entire frame or the
   * object bounding box) to RGB and then scale the converted RGB frame to
   * processing resolution. */
  return GST_FLOW_OK;

error:
  return GST_FLOW_ERROR;
}
1 Like