How to draw rectangle on NvBufSurface by nvll_osd_draw_rectangles?

• Hardware Platform:GPU
• DeepStream Version:6.3.0
• TensorRT Version:8.6.1.6
• NVIDIA GPU Driver Version:535.86.05
• Issue Type:questions
Hi,
I would like to send jpeg with rectangle to msgbroker.
Refer to /opt/nvidia/deepstream/deepstream-6.3/sources/gst-plugins/gst-nvdsosd/gstnvdsosd.c, I made some change based on deepstream_test5_app like that:

static void
bbox_generated_probe_after_analytics (AppCtx * ctx, GstBuffer * buf,
    NvDsBatchMeta * batch_meta, guint index)
{
  AppCtx5 *ctx5 = (AppCtx5 *)ctx;
  GstMapInfo inmap = GST_MAP_INFO_INIT;
  if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
    GST_ERROR ("input buffer mapinfo failed");
    return;
  }
  NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;
  gst_buffer_unmap (buf, &inmap);

  NvDsObjectMeta *obj_meta = NULL;
  GstClockTime buffer_pts;
  guint32 stream_id;
  NvBufSurface surface;
  NvOSD_FrameRectParams frame_rect_params;
  int rect_cnt = 0, obj_cnt = 0;
  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
    stream_id = frame_meta->source_id;
    GstClockTime buf_ntp_time = 0;
    if (playback_utc == FALSE) {
      /** Calculate the buffer-NTP-time
       * derived from this stream's RTCP Sender Report here:
       */
      StreamSourceInfo *src_stream = &testAppCtx->streams[stream_id];
      buf_ntp_time = frame_meta->ntp_timestamp;

      if (buf_ntp_time < src_stream->last_ntp_time) {
        NVGSTDS_WARN_MSG_V ("Source %d: NTP timestamps are backward in time."
            " Current: %lu previous: %lu", stream_id, buf_ntp_time,
            src_stream->last_ntp_time);
      }
      src_stream->last_ntp_time = buf_ntp_time;
    }

    surface = *ip_surf;
    surface.surfaceList = &(ip_surf->surfaceList[frame_meta->batch_id]);
    surface.numFilled = surface.batchSize = 1;
    for (GList *l = frame_meta->obj_meta_list; l != NULL; l = l->next) {
      /* Now using above information we need to form a text that should
       * be displayed on top of the bounding box, so lets form it here. */

      obj_meta = (NvDsObjectMeta *) (l->data);

      ctx5->rect_params[rect_cnt++] = obj_meta->rect_params;
      obj_cnt++;
      if (rect_cnt == MAX_OSD_ELEMS) {
        frame_rect_params.num_rects = rect_cnt;
        frame_rect_params.rect_params_list = ctx5->rect_params;
        /** Use of buf_ptr is deprecated, use 'frame_rect_params.surf' instead */
        frame_rect_params.buf_ptr = NULL;
        frame_rect_params.mode = MODE_GPU;
        frame_rect_params.surf = &surface;
        nvll_osd_draw_rectangles (ctx5->nvdsosd_context, &frame_rect_params);
        rect_cnt = 0;
      }

      {
        /**
         * Enable only if this callback is after tiler
         * NOTE: Scaling back code-commented
         * now that bbox_generated_probe_after_analytics() is post analytics
         * (say pgie, tracker or sgie)
         * and before tiler, no plugin shall scale metadata and will be
         * corresponding to the nvstreammux resolution
         */
        float scaleW = 0;
        float scaleH = 0;
        /* Frequency of messages to be send will be based on use case.
         * Here message is being sent for first object every 30 frames.
         */
        buffer_pts = frame_meta->buf_pts;
        if (!ctx5->base.config.streammux_config.pipeline_width
            || !ctx5->base.config.streammux_config.pipeline_height) {
          g_print ("invalid pipeline params\n");
          return;
        }
        LOGD ("stream %d==%d [%d X %d]\n", frame_meta->source_id,
            frame_meta->pad_index, frame_meta->source_frame_width,
            frame_meta->source_frame_height)
        scaleW = (float) frame_meta->source_frame_width / (float) ctx5->base.config.streammux_config.pipeline_width;
        scaleH = (float) frame_meta->source_frame_height / (float) ctx5->base.config.streammux_config.pipeline_height;

        if (playback_utc == FALSE) {
          /** Use the buffer-NTP-time derived from this stream's RTCP Sender
           * Report here:
           */
          buffer_pts = buf_ntp_time;
        }
        /** Generate NvDsEventMsgMeta for every object */
        NvDsEventMsgMeta *msg_meta =
            (NvDsEventMsgMeta *) g_malloc0 (sizeof (NvDsEventMsgMeta));
        generate_event_msg_meta (ctx5, msg_meta, obj_meta->class_id, TRUE,
                  /**< useTs NOTE: Pass FALSE for files without base-timestamp in URI */
            buffer_pts,
            ctx5->base.config.multi_source_config[stream_id].uri, (int)stream_id,
            ctx5->base.config.multi_source_config[stream_id].camera_id,
            obj_meta, scaleW, scaleH, frame_meta);
        testAppCtx->streams[stream_id].meta_number++;
        NvDsUserMeta *user_event_meta =
            nvds_acquire_user_meta_from_pool (batch_meta);
        if (user_event_meta) {
          /*
           * Since generated event metadata has custom objects for
           * Vehicle / Person which are allocated dynamically, we are
           * setting copy and free function to handle those fields when
           * metadata copy happens between two components.
           */
          user_event_meta->user_meta_data = (void *) msg_meta;
          user_event_meta->base_meta.batch_meta = batch_meta;
          user_event_meta->base_meta.meta_type = NVDS_EVENT_MSG_META;
          user_event_meta->base_meta.copy_func =
              (NvDsMetaCopyFunc) meta_copy_func;
          user_event_meta->base_meta.release_func =
              (NvDsMetaReleaseFunc) meta_free_func;
          nvds_add_user_meta_to_frame (frame_meta, user_event_meta);
        } else {
          g_print ("Error in attaching event meta to buffer\n");
        }
      }
    }
    frame_rect_params.num_rects = rect_cnt;
    frame_rect_params.rect_params_list = ctx5->rect_params;
    /** Use of buf_ptr is deprecated, use 'frame_rect_params.surf' instead */
    frame_rect_params.buf_ptr = NULL;
    frame_rect_params.mode = MODE_GPU;
    frame_rect_params.surf = &surface;
    int ret = nvll_osd_draw_rectangles (ctx5->nvdsosd_context, &frame_rect_params);

    if (0 != obj_cnt) {
      NvDsObjEncUsrArgs frameData = { 0 };
      /* Preset */
      frameData.isFrame = 1;
      /* To be set by user */
      frameData.saveImg = TRUE;
      frameData.attachUsrMeta = TRUE;
      /* Set if Image scaling Required */
      frameData.scaleImg = FALSE;
      frameData.scaledWidth = 0;
      frameData.scaledHeight = 0;
      /* Quality */
      frameData.quality = 80;
      /* Main Function Call */
      nvds_obj_enc_process (ctx5->obj_ctx_handle, &frameData, ip_surf, NULL, frame_meta);
    }

    testAppCtx->streams[stream_id].frameCount++;
  }
  nvds_obj_enc_finish (ctx5->obj_ctx_handle);
}

the nvdsosd_context has been init using nvll_osd_create_context (). I can get image from msgbroker, but there is no rectangle on the image. Is there something wrong with my code?

You mean there are no bboxs on the image? You can print the relevant parameters to see if they are OK.
Also for the app code, there is no need to use the low level interface to draw rectangles. You can just get the display meta and set the parameters to it.
https://github.com/NVIDIA-AI-IOT/deepstream_tao_apps/blob/master/apps/tao_others/deepstream-emotion-app/deepstream_emotion_app.cpp#L238C28-L238C39

Yes, no bboxs. As I know, drawing bbox by display meta is based on nvdsosd plugin, but there is no that plugin before GstNvMsgBroker in the pipeline of basic deepstream-app, so I prefer to draw bbox by nvll_osd_draw_rectangles. I’ve got frame_rect_params.rect_params_list[0] like that:

left=563.174316
top=58.0448914
width=283.35614
height=731.833862
border_width=3
border_color.red=1
border_color.green=0
border_color.blue=0
border_color.alpha=1
has_bg_color=0
reserved=0
bg_color.red=0
bg_color.green=0
bg_color.blue=0
bg_color.alpha=0
has_color_info=0
color_id=0

Maybe the way I got surface is no right, it’s like that:

surface = *ip_surf;
surface.surfaceList = &(ip_surf->surfaceList[frame_meta->batch_id]);
surface.numFilled = surface.batchSize = 1;

Can you give me some advice or example on using nvll_osd_draw_ractangles?

We have configure field of osd plugin for deepstream-test-5, like [osd] field in opt\nvidia\deepstream\deepstream\sources\apps\sample_apps\deepstream-test5\configs\test5_config_file_src_infer.txt.
If there are no bbox, we can check that issue first.

About how to get nvbufsurface, you can refer to the source code:

opt\nvidia\deepstream\deepstream\sources\apps\sample_apps\deepstream-image-meta-test\deepstream_image_meta_test.c
static GstPadProbeReturn
pgie_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info, gpointer ctx)

The osd is enable. I mean, the osd plugin in the pipeline like that:

GstDsNvMultiUriBin─GstNvinfer─GstNvTracker─GstNvDsAnalytics┬GstNvMsgConv─GstNvMsgBroker
                                                           └GstNvMultiStreamTiler─GstNvDsOsd─Sink

The GstNvDsOsd is on the other branch, so I can’t get image with bbox in GstNvMsgBroker
As for deepstream_image_meta_test.c, I’ve tried it. It seems get batches of nvbufsurface by gst_buffer_map, but I’ve not seen any parameter in nvll_osd_draw_ractangles to point out which surface to draw.

OK. Did you invoke the nvll_osd_apply API in your code? All the OSD operations require the use of this API before they can be executed.

Still not working. Did I miss something else? Here is my code after change:

static void
bbox_generated_probe_after_analytics (AppCtx * ctx, GstBuffer * buf,
    NvDsBatchMeta * batch_meta, guint index)
{
  // get surface
  GstMapInfo inmap = GST_MAP_INFO_INIT;
  if (!gst_buffer_map (buf, &inmap, GST_MAP_READ)) {
    GST_ERROR ("input buffer mapinfo failed");
    return;
  }
  NvBufSurface *ip_surf = (NvBufSurface *) inmap.data;

  NvDsDisplayMeta *display_meta = NULL;
  NvBufSurface surface;
  NvOSD_FrameRectParams frame_rect_params;

  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
    // get display_meta from pool
    display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
    // get single surface from ip_surf
    surface = *ip_surf;
    surface.surfaceList = &(ip_surf->surfaceList[frame_meta->batch_id]);
    surface.numFilled = surface.batchSize = 1;

    // fill rect_params
    for (GList *l = frame_meta->obj_meta_list; l != NULL; l = l->next) {
      obj_meta = (NvDsObjectMeta *) (l->data);
      display_meta->rect_params[display_meta->num_rects++] = obj_meta->rect_params;
    }
    // draw bbox
    frame_rect_params.num_rects = (gint)display_meta->num_rects;
    frame_rect_params.rect_params_list = display_meta->rect_params;
    /** Use of buf_ptr is deprecated, use 'frame_rect_params.surf' instead */
    frame_rect_params.buf_ptr = NULL;
    frame_rect_params.mode = MODE_GPU;
    frame_rect_params.surf = &surface;
    nvll_osd_draw_rectangles (nvdsosd_context, &frame_rect_params);
    if (frame_rect_params.mode == MODE_GPU) {
      if (nvll_osd_apply (nvdsosd_context, NULL, &surface) == -1) {
        g_print ("Unable to draw shapes onto video frame by GPU\n");
      }
    }

    // encode surface to jpeg and attach it to user meta
    NvDsObjEncUsrArgs frameData = { 0 };
    frameData.isFrame = 1;
    frameData.saveImg = FALSE;
    frameData.attachUsrMeta = TRUE;
    frameData.scaleImg = FALSE;
    frameData.scaledWidth = 0;
    frameData.scaledHeight = 0;
    frameData.quality = 80;
    nvds_obj_enc_process (obj_ctx_handle, &frameData, ip_surf, NULL, frame_meta);
  }
  nvds_obj_enc_finish (obj_ctx_handle);
  gst_buffer_unmap (buf, &inmap);
}

There may be some issues with obtaining your context values in your code. Can you attach your code that can run to us? Or you can try to use the CPU mode first.

I’ve tried CPU mode, but it’s not working too. Here is my code based on /opt/nvidia/deepstream/deepstream-6.3/sources/apps/sample_apps/deepstream-test5, no change for other files or configs.
deepstream-test5.zip (17.1 KB)