How to serialize/stringify a whole NvDsFrameMeta?

**• Hardware Platform:Jetson
**• DeepStream Version: 5.1
**• JetPack Version (valid for Jetson only): 4.5.1
**• TensorRT Version: 7.1.3
**• Issue Type: questions
• Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

Is it possible to serialize/stringify a whole NvDsFrameMeta?
For example, the function osd_sink_pad_buffer_probe in /opt/nvidia/deepstream/deepstream-5.1/sources/apps/sample_apps/deepstream-test1 requires to read each of fields manually. However, most of time we don’t familiar to every field. Therefore, I want to know how to serialize/stringify a whole NvDsFrameMeta directly, so that I can got a complete inferring results quickly.

https://docs.nvidia.com/metropolis/deepstream/sdk-api/struct__NvDsFrameMeta.html

Base on your hint, I made a modification from the example:

static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad * pad, GstPadProbeInfo * info, gpointer u_data){
  GstBuffer *buf =(GstBuffer *) info->data;
  NvDsObjectMeta *obj_meta = NULL;
  guint vehicle_count = 0;
  guint person_count = 0;
  guint lp_count = 0;
  guint label_i = 0;
  NvDsMetaList * l_frame = NULL;
  NvDsMetaList * l_obj = NULL;
  NvDsMetaList * l_class = NULL;
  NvDsMetaList * l_label = NULL;
  NvDsDisplayMeta *display_meta = NULL;
  NvDsClassifierMeta * class_meta = NULL;
  NvDsLabelInfo * label_info = NULL;
  GstClockTime now;
  perf_measure * perf =(perf_measure *)(u_data);

  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);

  now = g_get_monotonic_time();

  if(perf->pre_time == GST_CLOCK_TIME_NONE) {
    perf->pre_time = now;
    perf->total_time = GST_CLOCK_TIME_NONE;
  } else {
    if(perf->total_time == GST_CLOCK_TIME_NONE) {
      perf->total_time =(now - perf->pre_time);
    } else {
      perf->total_time +=(now - perf->pre_time);
    }
    perf->pre_time = now;
    perf->count++;
  }

  nlohmann::json js = nlohmann::json::object();
  js["frame"] = nlohmann::json::object();
  

  for(l_frame = batch_meta->frame_meta_list; l_frame != NULL;   l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta =(NvDsFrameMeta *)(l_frame->data);
    int offset = 0;
    if(!frame_meta){
      continue;
    }
    
    js["frame"]["source_id"] = frame_meta->source_id;
    js["frame"]["frame_num"] = frame_meta->frame_num;
    js["frame"]["object"] = nlohmann::json::array();

    for(l_obj = frame_meta->obj_meta_list; l_obj != NULL;    l_obj = l_obj->next) {
      obj_meta =(NvDsObjectMeta *)(l_obj->data);
      nlohmann::json obj = nlohmann::json::object();

      if(!obj_meta){
        continue;
      }

      obj["rect_left"] = obj_meta->rect_params.left;
      obj["rect_top"] = obj_meta->rect_params.top;
      obj["rect_width"] = obj_meta->rect_params.width;
      obj["rect_height"] = obj_meta->rect_params.height;
      obj["rect_confidence"] = obj_meta->confidence;

      obj["unique_component_id"] = obj_meta->unique_component_id;
      obj["class_id"] = obj_meta->class_id;
      obj["object_id"] = obj_meta->object_id;

      // Check that the object has been detected by the primary detector and that the class id is that of vehicles/persons. 
      if(obj_meta->unique_component_id == PRIMARY_DETECTOR_UID) {
        if(obj_meta->class_id == PGIE_CLASS_ID_VEHICLE){
          vehicle_count++;
          obj["class_string"] = "vehicle";
        }
          
        if(obj_meta->class_id == PGIE_CLASS_ID_PERSON){
          person_count++;
          obj["class_string"] = "person";
        }
      }

      if(obj_meta->unique_component_id == SECONDARY_DETECTOR_UID) {
        if(obj_meta->class_id == SGIE_CLASS_ID_LPD) {
          lp_count++;
          /* Print this info only when operating in secondary model. */
          if(obj_meta->parent){
            g_print("License plate found for parent object %p(type=%s)\n",
              obj_meta->parent, pgie_classes_str[obj_meta->parent->class_id]);
            obj["class_str"] = pgie_classes_str[obj_meta->parent->class_id];
          }
            

          obj_meta->text_params.set_bg_clr = 1;
          obj_meta->text_params.text_bg_clr.red = 0.0;
          obj_meta->text_params.text_bg_clr.green = 0.0;
          obj_meta->text_params.text_bg_clr.blue = 0.0;
          obj_meta->text_params.text_bg_clr.alpha = 0.0;

          obj_meta->text_params.font_params.font_color.red = 1.0;
          obj_meta->text_params.font_params.font_color.green = 1.0;
          obj_meta->text_params.font_params.font_color.blue = 0.0;
          obj_meta->text_params.font_params.font_color.alpha = 1.0;
          obj_meta->text_params.font_params.font_size = 12;
        }
      }

      for(l_class = obj_meta->classifier_meta_list; l_class != NULL; l_class = l_class->next) {
        class_meta =(NvDsClassifierMeta *)(l_class->data);
        if(!class_meta) continue;
        if(class_meta->unique_component_id == SECONDARY_CLASSIFIER_UID) {
          for( label_i = 0, l_label = class_meta->label_info_list;
            label_i < class_meta->num_labels && l_label; label_i++,
            l_label = l_label->next) {
            label_info =(NvDsLabelInfo *)(l_label->data);
            if(label_info) {
              if(label_info->label_id == 0 && label_info->result_class_id == 1) {
                g_print("Plate License %s\n",label_info->result_label);
                obj["plate"] = label_info->result_label;
              }
            }
          }
        }
      }

      js["frame"]["object"].push_back(obj);
    }

    display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
    NvOSD_TextParams *txt_params  = &display_meta->text_params[0];
    display_meta->num_labels = 1;
    txt_params->display_text =(char*) g_malloc0(MAX_DISPLAY_LEN);
    offset = snprintf(txt_params->display_text, MAX_DISPLAY_LEN,
                 "Person = %d ", person_count);
    offset += snprintf(txt_params->display_text + offset , MAX_DISPLAY_LEN,
                 "Vehicle = %d ", vehicle_count);

    /* Now set the offsets where the string should appear */
    txt_params->x_offset = 10;
    txt_params->y_offset = 12;

    /* Font , font-color and font-size */
    char font_n[6];
    snprintf(font_n, 6, "Serif");
    txt_params->font_params.font_name = font_n;
    txt_params->font_params.font_size = 10;
    txt_params->font_params.font_color.red = 1.0;
    txt_params->font_params.font_color.green = 1.0;
    txt_params->font_params.font_color.blue = 1.0;
    txt_params->font_params.font_color.alpha = 1.0;

    /* Text background color */
    txt_params->set_bg_clr = 1;
    txt_params->text_bg_clr.red = 0.0;
    txt_params->text_bg_clr.green = 0.0;
    txt_params->text_bg_clr.blue = 0.0;
    txt_params->text_bg_clr.alpha = 1.0;

    nvds_add_display_meta_to_frame(frame_meta, display_meta);
  }

  std::ofstream fd;
  fd.open("/tmp/js_out.txt", std::ios::out | std::ios::app );
  fd << js.dump() << "\n";
  fd.close();

  g_print("Frame Number = %d Vehicle Count = %d Person Count = %d"
           " License Plate Count = %d\n",
           frame_number, vehicle_count, person_count,
           lp_count);
  frame_number++;
  total_plate_number += lp_count;
  return GST_PAD_PROBE_OK;
}

The codes fetch each required value manually, it’s very inconvenient. I mean what if the deepstream provides a method to output a complete inferring results directly. The output could be something like JSON, YAML, XML, et al.

Deepstream is just a SDK. How to output the inference result is the purpose of application but not for a SDK. You can customize your own output format and ways, there is no limitation from deepstream.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.