Object Detection with custom Model that gives bbox and face landmarks

Deepstream 6.2 on dGPU

Im trying to write a custom parser for a model, I have added required files. Im getting seg faults and im not able to understand why. can you please look into it ?

this model is face detector model that outputs bbox and landmarks as well.

this is the probe function that im using to get the bbox and landmark info

struct bbox_lmk
{
  float x1;
  float y1;
  float x2;
  float y2;
  float s;
  float lm_x1;
  float lm_y1;
  float lm_x2;
  float lm_y2;
  float lm_x3;
  float lm_y3;
  float lm_x4;
  float lm_y4;
  float lm_x5;
  float lm_y5;
};



bool NvDsInferParseFaceDetector(
    std::vector<NvDsInferLayerInfo> const &outputLayersInfo,
    NvDsInferNetworkInfo const &networkInfo,
    NvDsInferParseDetectionParams const &detectionParams,
    std::vector<NvDsInferObjectDetectionInfo> &objectList ,
    std::vector<bbox> &lmkList) ;

static GstPadProbeReturn
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
                          gpointer u_data)
{
  static guint use_device_mem = 0;
  GstBuffer *buf = (GstBuffer *)info->data;
  guint num_rects = 0;
  NvDsObjectMeta *obj_meta = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;
  NvDsMetaList *l_frame = NULL;
  NvDsMetaList *l_obj = NULL;
  int bboxLayerIndex = -1, confLayerIndex = -1, lmkLayerIndex = -1;
  NvDsInferParseDetectionParams detectionParams;
  detectionParams.numClassesConfigured = 1;
  static NvDsInferNetworkInfo networkInfo{
      PGIE_NET_WIDTH, PGIE_NET_HEIGHT, 3};

  // NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
  NvDsBatchMeta *batch_meta =
      gst_buffer_get_nvds_batch_meta(GST_BUFFER(info->data));
  for (l_frame = batch_meta->frame_meta_list; l_frame != NULL;
       l_frame = l_frame->next)
  {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
    for (NvDsMetaList *l_user = frame_meta->frame_user_meta_list;
         l_user != NULL; l_user = l_user->next)
    {
      NvDsUserMeta *user_meta = (NvDsUserMeta *)l_user->data;
      if (user_meta->base_meta.meta_type != NVDSINFER_TENSOR_OUTPUT_META)
        continue;

      /* convert to tensor metadata */
      NvDsInferTensorMeta *meta =
          (NvDsInferTensorMeta *)user_meta->user_meta_data;
      for (unsigned int i = 0; i < meta->num_output_layers; i++)
      {
        NvDsInferLayerInfo *info = &meta->output_layers_info[i];
        info->buffer = meta->out_buf_ptrs_host[i];
        if (use_device_mem && meta->out_buf_ptrs_dev[i])
        {
          cudaMemcpy(meta->out_buf_ptrs_host[i], meta->out_buf_ptrs_dev[i],
                     info->inferDims.numElements * 4, cudaMemcpyDeviceToHost);
        }
      }

      std::vector<NvDsInferLayerInfo>
          outputLayersInfo(meta->output_layers_info,
                           meta->output_layers_info + meta->num_output_layers);
      std::vector<NvDsInferObjectDetectionInfo> objectList;
      std::vector<bbox> lmkList;
      NvDsInferParseFaceDetector(outputLayersInfo, networkInfo,
                                 detectionParams, objectList , lmkList);

      std::cout << "after parsing " << std::endl;
      /* Seperate detection rectangles per class for grouping. */
      std::vector<std::vector<
          cv::Rect>>
          objectListClasses(1);
      std::cout << "before the for loop" << std::endl;
      for (auto &obj : objectList)
      {
        objectListClasses[obj.classId].emplace_back(obj.left, obj.top,
                                                    obj.width, obj.height);
      }
      std::cout << "after the for loop" << std::endl;
      for (uint32_t c = 0; c < objectListClasses.size(); ++c)
      {
        auto &objlist = objectListClasses[c];
        if (objlist.empty())
          continue;

        /* Merge and cluster similar detection results */
        cv::groupRectangles(objlist, 0.5, 0.1);

        for (const auto &rect : objlist)
        {
          NvDsObjectMeta *obj_meta =
              nvds_acquire_obj_meta_from_pool(batch_meta);
          obj_meta->unique_component_id = meta->unique_id;
          obj_meta->confidence = 0.0;

          /* This is an untracked object. Set tracking_id to -1. */
          obj_meta->object_id = UNTRACKED_OBJECT_ID;
          obj_meta->class_id = 1;
          std::cout << "middle 1 " << std::endl;
          NvOSD_RectParams &rect_params = obj_meta->rect_params;
          NvOSD_TextParams &text_params = obj_meta->text_params;

          /* Assign bounding box coordinates. */
          rect_params.left = rect.x * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH;
          rect_params.top = rect.y * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT;
          rect_params.width = rect.width * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH;
          rect_params.height =
              rect.height * MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT;

          /* Border of width 3. */
          rect_params.border_width = 3;
          rect_params.has_bg_color = 0;
          rect_params.border_color = (NvOSD_ColorParams){
              1, 0, 0, 1};

          /* display_text requires heap allocated memory. */
          text_params.display_text = g_strdup(pgie_classes_str[c]);
          /* Display text above the left top corner of the object. */
          text_params.x_offset = rect_params.left;
          text_params.y_offset = rect_params.top - 10;
          /* Set black background for the text. */
          text_params.set_bg_clr = 1;
          text_params.text_bg_clr = (NvOSD_ColorParams){
              0, 0, 0, 1};
          /* Font face, size and color. */
          text_params.font_params.font_name = (gchar *)"Serif";
          text_params.font_params.font_size = 11;
          text_params.font_params.font_color = (NvOSD_ColorParams){
              1, 1, 1, 1};
          nvds_add_obj_meta_to_frame(frame_meta, obj_meta, NULL);
          std::cout << "after adding obj meta to frame meta  " << std::endl;
        }
        NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
        NvOSD_CircleParams *circle_params = display_meta->circle_params;
        std::cout << "getting circle params   " << std::endl;
        for (int j = 0; j < lmkList.size()  ; j++)
        {
          for (int i = 0 ; i < 5 ; i++ ) //this may only work for one face in the image 
          { 
          circle_params[i].xc = lmkList[j].landmarks[i].xn * MUXER_OUTPUT_WIDTH / PGIE_NET_WIDTH ;
          circle_params[i].yc = lmkList[j].landmarks[i].yn *  MUXER_OUTPUT_HEIGHT / PGIE_NET_HEIGHT  ;
          circle_params[i].radius = 2;
          circle_params[i].circle_color = (NvOSD_ColorParams){0.0, 1.0, 0.0, 1.0};
          display_meta->num_circles++;
          }
        }
        nvds_add_display_meta_to_frame(frame_meta,display_meta );

      }
      std::cout << "after adding metadata" << std::endl;
    }
  }
  return GST_PAD_PROBE_OK;
}

oneMB.zip (3.4 MB)

custom parser is in the zip file that I have uploaded.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

  1. testing the project, the model can’t detect face. please check if the nvinfer’s configurations are correct. if all are correct, please share the test jpg.
  2. can you use gdb to get the crash stack?
  3. please refer to NV facial landmarks sample deepstream-faciallandmark-app.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.