Save frames in deepstream pipeline || deepstream_3d_action_recognition.cpp

I want to save the frame in running deepstream pipeline of action recognitionnet model.


static GstPadProbeReturn
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
                          gpointer u_data)
{
  GstBuffer *buf = (GstBuffer *)info->data;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);

  // First iterate through frames to get bounding boxes
  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
    
    // Iterate through all objects (bounding boxes) in frame
    for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL;
         l_obj = l_obj->next) {
      NvDsObjectMeta *obj_meta = (NvDsObjectMeta *) l_obj->data;
      
      // Print bounding box information
      g_print("Detected object: bbox [left: %f, top: %f, width: %f, height: %f]\n",
              obj_meta->rect_params.left, obj_meta->rect_params.top,
              obj_meta->rect_params.width, obj_meta->rect_params.height);
    }
  }

  NvDsMetaList *l_user_meta = NULL;
  NvDsUserMeta *user_meta = NULL;
  for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
       l_user_meta = l_user_meta->next)
  {
    user_meta = (NvDsUserMeta *)(l_user_meta->data);
    if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
    {
      GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
          (GstNvDsPreProcessBatchMeta *)(user_meta->user_meta_data);
      std::string model_dims = "";
      if (preprocess_batchmeta->tensor_meta) {
        if (preprocess_batchmeta->tensor_meta->tensor_shape.size() == MODEL_3D_SHAPES) {
          model_dims = "3D: AR - ";
        } else {
          model_dims = "2D: AR - ";
        }
      }
      for (auto &roi_meta : preprocess_batchmeta->roi_vector)
      {
        // Print ROI information which should correspond to the bounding box
        g_print("ROI: [left: %f, top: %f, width: %f, height: %f]\n",
                roi_meta.roi.left, roi_meta.roi.top,
                roi_meta.roi.width, roi_meta.roi.height);

        NvDsMetaList *l_user = NULL;
        for (l_user = roi_meta.roi_user_meta_list; l_user != NULL;
             l_user = l_user->next)
        {
          NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
          if (user_meta->base_meta.meta_type == NVDSINFER_TENSOR_OUTPUT_META)
          {
            NvDsInferTensorMeta *tensor_meta = (NvDsInferTensorMeta *)(user_meta->user_meta_data);
            gfloat max_prob = 0;
            gint class_id = -1;
            gfloat *buffer = (gfloat *)tensor_meta->out_buf_ptrs_host[0];
            for (size_t i = 0; i < tensor_meta->output_layers_info[0].inferDims.d[0]; i++)
            {
              if (buffer[i] > max_prob)
              {
                max_prob = buffer[i];
                class_id = i;
              }
            }
            const gchar *label = "";
            if (class_id < MAX_CLASS_LEN)
              label = kActioClasseLabels[class_id];
            LOG_DEBUG("output tensor result: cls_id: %d, scrore:%.3f, label: %s", class_id, max_prob, label);
            g_print("Action recognition result for ROI: cls_id: %d, score: %.3f, label: %s\n", 
                    class_id, max_prob, label);
            if (should_save_frame(label) && max_prob > 1) { // Only save if confidence > 40%
                save_frame(buf, roi_meta.frame_meta, label);
                g_print("[SAVED successfully] Action recognition result for ROI: cls_id: %d, score: %.3f, label: %s\n")
            }
          }
        }

        NvDsMetaList *l_classifier = NULL;
        for (l_classifier = roi_meta.classifier_meta_list; l_classifier != NULL;
             l_classifier = l_classifier->next)
        {
          NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
          NvDsLabelInfoList *l_label;
          for (l_label = classifier_meta->label_info_list; l_label != NULL;
               l_label = l_classifier->next)
          {
            NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;

            NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
            display_meta->num_labels = 1;

            NvOSD_TextParams *txt_params = &display_meta->text_params[0];
            txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);

            snprintf(txt_params->display_text, MAX_STR_LEN - 1,
                     "%s: %s", model_dims.c_str(), label_info->result_label);
            LOG_DEBUG("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
            g_print("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
            /* Now set the offsets where the string should appear */
            txt_params->x_offset = roi_meta.roi.left;
            txt_params->y_offset = (uint32_t)std::max<int32_t>(roi_meta.roi.top - 10, 0);

            /* Font , font-color and font-size */
            txt_params->font_params.font_name = (char *)"Serif";
            txt_params->font_params.font_size = 12;
            txt_params->font_params.font_color.red = 1.0;
            txt_params->font_params.font_color.green = 1.0;
            txt_params->font_params.font_color.blue = 1.0;
            txt_params->font_params.font_color.alpha = 1.0;

            /* Text background color */
            txt_params->set_bg_clr = 1;
            txt_params->text_bg_clr.red = 0.0;
            txt_params->text_bg_clr.green = 0.0;
            txt_params->text_bg_clr.blue = 0.0;
            txt_params->text_bg_clr.alpha = 1.0;

            nvds_add_display_meta_to_frame(roi_meta.frame_meta, display_meta);
          }
        }
      }
    }
  }

In this i cam calling the save frame funciton when specific class came and,


static bool should_save_frame(const gchar* label) {
    return true;
    // Check if the detected action is one we want to save
    for (int i = 0; i < TARGET_ACTIONS; i++) {
        if (strcmp(label, kTargetActions[i]) == 0) {
            return true;
        }
    }
    return false;
}

// Create directory if it doesn't exist
void create_directory(const std::string &path) {
    struct stat info;
    if (stat(path.c_str(), &info) != 0) {
        mkdir(path.c_str(), 0777); // Create directory
    }
}

// Function to save frame as an image
void save_frame(GstBuffer *buf, NvDsFrameMeta *frame_meta, const gchar *label) {
    GstMapInfo map;
    if (!gst_buffer_map(buf, &map, GST_MAP_READ)) {
        g_printerr("Failed to map GstBuffer\n");
        return;
    }

    // Get frame data (assuming NV12 format)
    cv::Mat img(frame_meta->source_frame_height * 3 / 2, frame_meta->source_frame_width, CV_8UC1, map.data);
    
    // Convert NV12 to BGR
    cv::Mat bgr;
    cv::cvtColor(img, bgr, cv::COLOR_YUV2BGR_NV12);

    // Create label-wise directory
    std::string save_path = "saved_frames/" + std::string(label);
    create_directory("saved_frames");
    create_directory(save_path);

    // Save frame with timestamp
    std::string filename = save_path + "/frame_" + std::to_string(frame_meta->frame_num) + ".jpg";
    cv::imwrite(filename, bgr);

    g_print("Saved frame: %s\n", filename.c_str());

    gst_buffer_unmap(buf, &map);
}

but there is green frame is coming, can anyone tell me what is wrong in it or correct way to saving the frame?

Please provide complete information as applicable to your setup. Thanks
Hardware Platform (Jetson / GPU)
DeepStream Version
JetPack Version (valid for Jetson only)
TensorRT Version
NVIDIA GPU Driver Version (valid for GPU only)
Issue Type( questions, new requirements, bugs)
How to reproduce the issue ? (This is for bugs. Including which sample app is using, the configuration files content, the command line used and other details for reproducing)
Requirement details( This is for new requirement. Including the module name-for which plugin or for which sample application, the function description)

Hardware Platform (Jetson / GPU) : Tesla P40
DeepStream Version : nvcr.io/nvidia/deepstream:6.3-samples
NVIDIA GPU Driver Version (valid for GPU only) : Driver Version: 535.183.01 CUDA Version: 12.2
Issue Type( questions, new requirements, bugs) : question
How to reproduce the issue ? :
when i run sample app of deepstream pipeline of action recognition model in container nvcr.io/nvidia/deepstream:6.3-samples and path “/opt/nvidia/deepstream/deepstream/sources/apps/sample_apps/deepstream-3d-action-recognition”

steps :

  • make
  • “./deepstream-3d-action-recognition -c deepstream_action_recognition_config.txt”

then pipeline works and detecting the “walk”, “run”, “push”, “fall_floor”

logs :

[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.670, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.660, label: walk
Saved frame: saved_frames/walk/frame_605.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.660, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.682, label: walk
Saved frame: saved_frames/walk/frame_606.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.682, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.675, label: walk
Saved frame: saved_frames/walk/frame_607.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.675, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.643, label: walk
Saved frame: saved_frames/walk/frame_608.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.643, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.614, label: walk
Saved frame: saved_frames/walk/frame_609.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.614, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.669, label: walk
Saved frame: saved_frames/walk/frame_610.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.669, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.650, label: walk
Saved frame: saved_frames/walk/frame_611.jpg
[SAVED successfully] Action recognition result for ROI: cls_id: 2, score: 1.650, label: walk
classification result: cls_id: 2, label: walkROI: [left: 0.000000, top: 0.000000, width: 1280.000000, height: 720.000000]
Action recognition result for ROI: cls_id: 2, score: 1.611, label: walk

but i want to save the frames in running pipeline with respect to its detection, i did some changes in deepstream_3d_action_recognition.cpp

/*
 * Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * to deal in the Software without restriction, including without limitation
 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
 * and/or sell copies of the Software, and to permit persons to whom the
 * Software is furnished to do so, subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in
 * all copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
 * DEALINGS IN THE SOFTWARE.
 */

#include "deepstream_action.h"
#include <sys/stat.h>
#include <opencv2/opencv.hpp>

/** Defines the maximum size of a string. */
#define MAX_STR_LEN 2048

/** Defines the maximum size of an array for storing a text result. */
#define MAX_LABEL_SIZE 128

/** 3D model input NCDHW has 5 dims; 2D model input NSHW has 4 dims */
#define MODEL_3D_SHAPES 5

/* By default, OSD process-mode is set to GPU_MODE. To change mode, set as:
 * 0: CPU mode
 * 1: GPU mode
 */
#define OSD_PROCESS_MODE 1

/* By default, OSD will not display text. To display text, change this to 1 */
#define OSD_DISPLAY_TEXT 1

/* Print FPS per every several frames*/
#define FPS_INTERVAL 30

/* Action recognition config */
static NvDsARConfig gActionConfig;

/* Check signal interrupt invoked */
static volatile gboolean gIntr = false;

/* main gstreamer pipeline */
static volatile GstElement *gPipeline = nullptr;

/* NVIDIA Decoder source pad memory feature. This feature signifies that source
 * pads having this capability will push GstBuffers containing cuda buffers. */
#define GST_CAPS_FEATURES_NVMM "memory:NVMM"

/* Debug envrionment variable name for libnvds_custom_sequence_preprocess.so */
#define ENV_CUSTOM_SEQUENC_DEBUG "DS_CUSTOM_SEQUENC_DEBUG"

#define MAX_CLASS_LEN 5
static const gchar kActioClasseLabels[MAX_CLASS_LEN][MAX_LABEL_SIZE] = {
    "push", "fall_floor" , "walk", "run", "ride_bike"};

static FpsCalculation gFpsCal(50);

/* add fps display metadata into frame */
static void
add_fps_display_meta(NvDsFrameMeta *frame, NvDsBatchMeta *batch_meta) {
  float fps = gFpsCal.updateFps(frame->source_id);
  if (fps < 0) {
    return;
  }

  NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
  display_meta->num_labels = 1;
  NvOSD_TextParams *txt_params = &display_meta->text_params[0];
  txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);

  snprintf(txt_params->display_text, MAX_STR_LEN - 1, "FPS: %.2f", fps);
  /* Now set the offsets where the string should appear */
  txt_params->x_offset = 0;
  txt_params->y_offset = 40;

  /* Font , font-color and font-size */
  txt_params->font_params.font_name = (char *)"Serif";
  txt_params->font_params.font_size = 10;
  txt_params->font_params.font_color.red = 1.0;
  txt_params->font_params.font_color.green = 1.0;
  txt_params->font_params.font_color.blue = 1.0;
  txt_params->font_params.font_color.alpha = 1.0;

  /* Text background color */
  txt_params->set_bg_clr = 1;
  txt_params->text_bg_clr.red = 0.0;
  txt_params->text_bg_clr.green = 0.0;
  txt_params->text_bg_clr.blue = 0.0;
  txt_params->text_bg_clr.alpha = 1.0;

  nvds_add_display_meta_to_frame(frame, display_meta);
}

#define SAVE_FRAME_PATH "./detected_actions/"
#define TARGET_ACTIONS 3
static const gchar* kTargetActions[TARGET_ACTIONS] = {"run", "fall_floor", "push"};

/* tiler_sink_pad_buffer_probe  will extract metadata received on OSD sink pad
 * and update params for drawing rectangle, object information etc. */

////////////////////

// Add these functions after the existing includes and before main()

static bool should_save_frame(const gchar* label) {
    return true;
    // Check if the detected action is one we want to save
    for (int i = 0; i < TARGET_ACTIONS; i++) {
        if (strcmp(label, kTargetActions[i]) == 0) {
            return true;
        }
    }
    return false;
}

// Create directory if it doesn't exist
void create_directory(const std::string &path) {
    struct stat info;
    if (stat(path.c_str(), &info) != 0) {
        mkdir(path.c_str(), 0777); // Create directory
    }
}

// Function to save frame as an image
void save_frame(GstBuffer *buf, NvDsFrameMeta *frame_meta, const gchar *label) {
     GstMapInfo map;
    if (!gst_buffer_map(buf, &map, GST_MAP_READ)) {
        g_printerr("Failed to map GstBuffer\n");
        return;
    }

    // Create a cv::Mat for the NV12 frame
    cv::Mat nv12_frame(frame_meta->source_frame_height * 3 / 2, frame_meta->source_frame_width, CV_8UC1, map.data);

    // Create a cv::Mat for the BGR frame
    cv::Mat bgr_frame(frame_meta->source_frame_height, frame_meta->source_frame_width, CV_8UC3);

    // Convert NV12 to BGR using OpenCV
    cv::cvtColor(nv12_frame, bgr_frame, cv::COLOR_YUV2BGR_NV12);

    // Create label-wise directory
    std::string save_path = "saved_frames/" + std::string(label);
    create_directory("saved_frames");
    create_directory(save_path);

    // Save frame with timestamp
    std::string filename = save_path + "/frame_" + std::to_string(frame_meta->frame_num) + ".jpg";
    cv::imwrite(filename, bgr_frame);

    g_print("Saved frame: %s\n", filename.c_str());

    gst_buffer_unmap(buf, &map);
}

////////////////////


static GstPadProbeReturn
pgie_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info,
                          gpointer u_data)
{
  GstBuffer *buf = (GstBuffer *)info->data;
  NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);

  // First iterate through frames to get bounding boxes
  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
    
    // Iterate through all objects (bounding boxes) in frame
    for (NvDsMetaList *l_obj = frame_meta->obj_meta_list; l_obj != NULL;
         l_obj = l_obj->next) {
      NvDsObjectMeta *obj_meta = (NvDsObjectMeta *) l_obj->data;
      
      // Print bounding box information
      g_print("Detected object: bbox [left: %f, top: %f, width: %f, height: %f]\n",
              obj_meta->rect_params.left, obj_meta->rect_params.top,
              obj_meta->rect_params.width, obj_meta->rect_params.height);
    }
  }

  NvDsMetaList *l_user_meta = NULL;
  NvDsUserMeta *user_meta = NULL;
  for (l_user_meta = batch_meta->batch_user_meta_list; l_user_meta != NULL;
       l_user_meta = l_user_meta->next)
  {
    user_meta = (NvDsUserMeta *)(l_user_meta->data);
    if (user_meta->base_meta.meta_type == NVDS_PREPROCESS_BATCH_META)
    {
      GstNvDsPreProcessBatchMeta *preprocess_batchmeta =
          (GstNvDsPreProcessBatchMeta *)(user_meta->user_meta_data);
      std::string model_dims = "";
      if (preprocess_batchmeta->tensor_meta) {
        if (preprocess_batchmeta->tensor_meta->tensor_shape.size() == MODEL_3D_SHAPES) {
          model_dims = "3D: AR - ";
        } else {
          model_dims = "2D: AR - ";
        }
      }
      for (auto &roi_meta : preprocess_batchmeta->roi_vector)
      {
        // Print ROI information which should correspond to the bounding box
        g_print("ROI: [left: %f, top: %f, width: %f, height: %f]\n",
                roi_meta.roi.left, roi_meta.roi.top,
                roi_meta.roi.width, roi_meta.roi.height);

        NvDsMetaList *l_user = NULL;
        for (l_user = roi_meta.roi_user_meta_list; l_user != NULL;
             l_user = l_user->next)
        {
          NvDsUserMeta *user_meta = (NvDsUserMeta *)(l_user->data);
          if (user_meta->base_meta.meta_type == NVDSINFER_TENSOR_OUTPUT_META)
          {
            NvDsInferTensorMeta *tensor_meta = (NvDsInferTensorMeta *)(user_meta->user_meta_data);
            gfloat max_prob = 0;
            gint class_id = -1;
            gfloat *buffer = (gfloat *)tensor_meta->out_buf_ptrs_host[0];
            for (size_t i = 0; i < tensor_meta->output_layers_info[0].inferDims.d[0]; i++)
            {
              if (buffer[i] > max_prob)
              {
                max_prob = buffer[i];
                class_id = i;
              }
            }
            const gchar *label = "";
            if (class_id < MAX_CLASS_LEN)
              label = kActioClasseLabels[class_id];
            LOG_DEBUG("output tensor result: cls_id: %d, scrore:%.3f, label: %s", class_id, max_prob, label);
            g_print("Action recognition result for ROI: cls_id: %d, score: %.3f, label: %s\n", 
                    class_id, max_prob, label);
            if (should_save_frame(label) && max_prob > 1) { // Only save if confidence > 40%
                save_frame(buf, roi_meta.frame_meta, label);
                g_print("[SAVED successfully] Action recognition result for ROI: cls_id: %d, score: %.3f, label: %s\n",class_id, max_prob, label);
            }
          }
        }

        NvDsMetaList *l_classifier = NULL;
        for (l_classifier = roi_meta.classifier_meta_list; l_classifier != NULL;
             l_classifier = l_classifier->next)
        {
          NvDsClassifierMeta *classifier_meta = (NvDsClassifierMeta *)(l_classifier->data);
          NvDsLabelInfoList *l_label;
          for (l_label = classifier_meta->label_info_list; l_label != NULL;
               l_label = l_classifier->next)
          {
            NvDsLabelInfo *label_info = (NvDsLabelInfo *)l_label->data;

            NvDsDisplayMeta *display_meta = nvds_acquire_display_meta_from_pool(batch_meta);
            display_meta->num_labels = 1;

            NvOSD_TextParams *txt_params = &display_meta->text_params[0];
            txt_params->display_text = (char *)g_malloc0(MAX_STR_LEN);

            snprintf(txt_params->display_text, MAX_STR_LEN - 1,
                     "%s: %s", model_dims.c_str(), label_info->result_label);
            LOG_DEBUG("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
            g_print("classification result: cls_id: %d, label: %s", label_info->result_class_id, label_info->result_label);
            /* Now set the offsets where the string should appear */
            txt_params->x_offset = roi_meta.roi.left;
            txt_params->y_offset = (uint32_t)std::max<int32_t>(roi_meta.roi.top - 10, 0);

            /* Font , font-color and font-size */
            txt_params->font_params.font_name = (char *)"Serif";
            txt_params->font_params.font_size = 12;
            txt_params->font_params.font_color.red = 1.0;
            txt_params->font_params.font_color.green = 1.0;
            txt_params->font_params.font_color.blue = 1.0;
            txt_params->font_params.font_color.alpha = 1.0;

            /* Text background color */
            txt_params->set_bg_clr = 1;
            txt_params->text_bg_clr.red = 0.0;
            txt_params->text_bg_clr.green = 0.0;
            txt_params->text_bg_clr.blue = 0.0;
            txt_params->text_bg_clr.alpha = 1.0;

            nvds_add_display_meta_to_frame(roi_meta.frame_meta, display_meta);
          }
        }
      }
    }
  }

  /* Iterate each frame metadata in batch */
  for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
      l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) l_frame->data;
    // print FPS on each stream
    if (gActionConfig.enableFps) {
      add_fps_display_meta(frame_meta, batch_meta);
    }
  }

  static uint64_t sFrameCount = 0;
  sFrameCount++;
  if (gActionConfig.enableFps && sFrameCount >= FPS_INTERVAL) {
    sFrameCount = 0;
    std::vector<std::pair<float, float>> fps;
    gFpsCal.getAllFps(fps);
    char fpsText[MAX_STR_LEN] = {'\0'};
    for (auto& p : fps) {
      snprintf(fpsText + strlen(fpsText), MAX_STR_LEN - 1, "%.2f (%.2f) \t", p.first, p.second);
    }
    if (!fps.empty()) {
      g_print("FPS(cur/avg): %s\n", fpsText);
    }
  }

  return GST_PAD_PROBE_OK;
}

static gboolean
bus_call(GstBus *bus, GstMessage *msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *)data;
  switch (GST_MESSAGE_TYPE(msg))
  {
  case GST_MESSAGE_EOS:
    g_print("End of stream\n");
    g_main_loop_quit(loop);
    break;
  case GST_MESSAGE_WARNING:
  {
    gchar *debug;
    GError *error;
    gst_message_parse_warning(msg, &error, &debug);
    g_printerr("WARNING from element %s: %s\n",
               GST_OBJECT_NAME(msg->src), error->message);
    g_free(debug);
    g_printerr("Warning: %s\n", error->message);
    g_error_free(error);
    break;
  }
  case GST_MESSAGE_ERROR:
  {
    gchar *debug;
    GError *error;
    gst_message_parse_error(msg, &error, &debug);
    g_printerr("ERROR from element %s: %s\n",
               GST_OBJECT_NAME(msg->src), error->message);
    if (debug)
      g_printerr("Error details: %s\n", debug);
    g_free(debug);
    g_error_free(error);
    g_main_loop_quit(loop);
    break;
  }
#ifndef PLATFORM_TEGRA
  case GST_MESSAGE_ELEMENT:
  {
    if (gst_nvmessage_is_stream_eos(msg))
    {
      guint stream_id;
      if (gst_nvmessage_parse_stream_eos(msg, &stream_id))
      {
        g_print("Got EOS from stream %d\n", stream_id);
      }
    }
    break;
  }
#endif
  default:
    break;
  }
  return TRUE;
}

static void
cb_newpad(GstElement *decodebin, GstPad *decoder_src_pad, gpointer data)
{
  g_print("In cb_newpad\n");
  GstCaps *caps = gst_pad_get_current_caps(decoder_src_pad);
  g_print("Caps: %s\n", gst_caps_to_string(caps));
  const GstStructure *str = gst_caps_get_structure(caps, 0);
  const gchar *name = gst_structure_get_name(str);
  GstElement *source_bin = (GstElement *)data;
  GstCapsFeatures *features = gst_caps_get_features(caps, 0);

  /* Need to check if the pad created by the decodebin is for video and not
   * audio. */
  if (!strncmp(name, "video", 5))
  {
    /* Link the decodebin pad only if decodebin has picked nvidia
     * decoder plugin nvdec_*. We do this by checking if the pad caps contain
     * NVMM memory features. */
    if (gst_caps_features_contains(features, GST_CAPS_FEATURES_NVMM))
    {
      /* Get the source bin ghost pad */
      GstPad *bin_ghost_pad = gst_element_get_static_pad(source_bin, "src");
      if (!gst_ghost_pad_set_target(GST_GHOST_PAD(bin_ghost_pad),
                                    decoder_src_pad))
      {
        g_printerr("Failed to link decoder src pad to source bin ghost pad\n");
      }
      gst_object_unref(bin_ghost_pad);
    }
    else
    {
      g_printerr("Error: Decodebin did not pick nvidia decoder plugin.\n");
    }
  }
}

static void
decodebin_child_added(GstChildProxy *child_proxy, GObject *object,
                      gchar *name, gpointer user_data)
{
  g_print("Decodebin child added: %s\n", name);
  if (g_strrstr(name, "decodebin") == name)
  {
    g_signal_connect(G_OBJECT(object), "child-added",
                     G_CALLBACK(decodebin_child_added), user_data);
  }
}

static GstElement *
create_source_bin(guint index, const gchar *uri)
{
  GstElement *bin = NULL, *uri_decode_bin = NULL;
  gchar bin_name[16] = {};

  g_snprintf(bin_name, 15, "source-bin-%02d", index);
  /* Create a source GstBin to abstract this bin's content from the rest of the
   * pipeline */
  bin = gst_bin_new(bin_name);

  /* Source element for reading from the uri.
   * We will use decodebin and let it figure out the container format of the
   * stream and the codec and plug the appropriate demux and decode plugins. */
  uri_decode_bin = gst_element_factory_make("uridecodebin", "uri-decode-bin");

  if (!bin || !uri_decode_bin)
  {
    g_printerr("One element in source bin could not be created.\n");
    return NULL;
  }

  /* We set the input uri to the source element */
  g_object_set(G_OBJECT(uri_decode_bin), "uri", uri, NULL);

  /* Connect to the "pad-added" signal of the decodebin which generates a
   * callback once a new pad for raw data has beed created by the decodebin */
  g_signal_connect(G_OBJECT(uri_decode_bin), "pad-added",
                   G_CALLBACK(cb_newpad), bin);
  g_signal_connect(G_OBJECT(uri_decode_bin), "child-added",
                   G_CALLBACK(decodebin_child_added), bin);

  gst_bin_add(GST_BIN(bin), uri_decode_bin);

  /* We need to create a ghost pad for the source bin which will act as a proxy
   * for the video decoder src pad. The ghost pad will not have a target right
   * now. Once the decode bin creates the video decoder and generates the
   * cb_newpad callback, we will set the ghost pad target to the video decoder
   * src pad. */
  if (!gst_element_add_pad(bin, gst_ghost_pad_new_no_target("src",
                                                            GST_PAD_SRC)))
  {
    g_printerr("Failed to add ghost pad in source bin\n");
    return NULL;
  }

  return bin;
}

/**
 * Function to handle program interrupt signal.
 * It installs default handler after handling the interrupt.
 */
static void _intr_handler (int signum)
{
  gIntr = TRUE;
  g_printerr ("User Interrupted.. \n");

  if (gPipeline) {
    /* Send EOS to the pipeline */
    if (!gst_element_send_event (GST_ELEMENT(gPipeline),
          gst_event_new_eos())) {
      g_print("Interrupted, EOS not sent");
    }
  }
}

/*
* Function to install custom handler for program interrupt signal.
*/
static void _intr_setup (void)
{
  struct sigaction action;

  memset (&action, 0, sizeof (action));
  action.sa_handler = _intr_handler;

  sigaction (SIGINT, &action, NULL);
}

int main(int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *pgie = NULL,
             *preprocess = NULL, *queue1 = NULL, *queue2 = NULL,
             *queue3 = NULL, *queue4 = NULL, *queue5 = NULL, *queue6 = NULL,
             *nvvidconv = NULL, *nvosd = NULL, *tiler = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *pgie_src_pad = NULL;
  guint i, num_sources;
  guint tiler_rows, tiler_columns;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  /* Standard GStreamer initialization */
  gst_init(&argc, &argv);

    /* setup signal handler */
  _intr_setup();

    /* Check input arguments */
  if (argc < 3 || strncmp(argv[1], "-c", 3))
  {
    g_printerr("Usage: %s -c <action_recognition_config.txt>\n", argv[0]);
    return -1;
  }

  if (!parse_action_config(argv[2], gActionConfig)) {
    g_printerr("parse config file: %s failed.\n", argv[2]);
    return -1;
  }

  if (gActionConfig.debug >= kDebugVerbose) {
    setenv(ENV_CUSTOM_SEQUENC_DEBUG, "1", 1);
  } else {
    unsetenv(ENV_CUSTOM_SEQUENC_DEBUG);
  }

  num_sources = gActionConfig.uri_list.size();

  loop = g_main_loop_new(NULL, FALSE);

  /* Create gstreamer elements */
  /* Create Pipeline element that will form a connection of other elements */
  pipeline = gst_pipeline_new("preprocess-test-pipeline");

  /* Create nvstreammux instance to form batches from one or more sources. */
  streammux = gst_element_factory_make("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux)
  {
    g_printerr("One element could not be created. Exiting.\n");
    return -1;
  }
  gst_bin_add(GST_BIN(pipeline), streammux);

  for (i = 0; i < num_sources; i++)
  {
    GstPad *sinkpad, *srcpad;
    gchar pad_name[16] = {};
    GstElement *source_bin = create_source_bin(i, gActionConfig.uri_list[i].c_str());

    if (!source_bin)
    {
      g_printerr("Failed to create source bin. Exiting.\n");
      return -1;
    }

    gst_bin_add(GST_BIN(pipeline), source_bin);

    g_snprintf(pad_name, 15, "sink_%u", i);
    sinkpad = gst_element_get_request_pad(streammux, pad_name);
    if (!sinkpad)
    {
      g_printerr("Streammux request sink pad failed. Exiting.\n");
      return -1;
    }

    srcpad = gst_element_get_static_pad(source_bin, "src");
    if (!srcpad)
    {
      g_printerr("Failed to get src pad of source bin. Exiting.\n");
      return -1;
    }

    if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK)
    {
      g_printerr("Failed to link source bin to stream muxer. Exiting.\n");
      return -1;
    }

    gst_object_unref(srcpad);
    gst_object_unref(sinkpad);
  }

  /* to preprocess the rois and form a raw tensor for inferencing */
  preprocess = gst_element_factory_make("nvdspreprocess", "preprocess-plugin");

  /* Create inference plugin to inference batched frames. */
  if (!gActionConfig.triton_infer_config.empty()) {
    pgie = gst_element_factory_make("nvinferserver", "primary-triton-nvinference");
  } else {
    pgie = gst_element_factory_make("nvinfer", "primary-nvinference-engine");
  }

  /* Add queue elements between every two elements */
  queue1 = gst_element_factory_make("queue", "queue1");
  queue2 = gst_element_factory_make("queue", "queue2");
  queue3 = gst_element_factory_make("queue", "queue3");

  if (!preprocess || !pgie || !queue1 || !queue2 || !queue3)
  {
    g_printerr("One element could not be created. Exiting.\n");
    return -1;
  }

  if (gActionConfig.useFakeSink) {
    sink = gst_element_factory_make("fakesink", "nvvideo-sink");
    if (!sink)
    {
      g_printerr("element fakesink could not be created. Exiting.\n");
      return -1;
    }
  } else {

    queue4 = gst_element_factory_make("queue", "queue4");
    queue5 = gst_element_factory_make("queue", "queue5");
    queue6 = gst_element_factory_make("queue", "queue6");

    /* Use nvtiler to composite the batched frames into a 2D tiled array based
    * on the source of the frames. */
    tiler = gst_element_factory_make("nvmultistreamtiler", "nvtiler");

    /* Use convertor to convert from NV12 to RGBA as required by nvosd */
    nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");

    /* Create OSD to draw on the converted RGBA buffer */
    nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");

    /* Finally render the osd output */
    if (prop.integrated)
    {
      sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
    } else {
      sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");
    }

    if (!tiler || !nvvidconv || !nvosd || !sink)
    {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
    }

    tiler_rows = (guint)sqrt(num_sources);
    tiler_columns = (guint)ceil(1.0 * num_sources / tiler_rows);
    /* we set the tiler properties here */
    g_object_set(G_OBJECT(tiler), "rows", tiler_rows, "columns", tiler_columns,
                "width", gActionConfig.tiler_width, "height", gActionConfig.tiler_height, NULL);

    g_object_set(G_OBJECT(nvosd), "process-mode", OSD_PROCESS_MODE,
                "display-text", OSD_DISPLAY_TEXT, NULL);
  }

  g_object_set(G_OBJECT(streammux), "batch-size", num_sources, NULL);

  g_object_set(G_OBJECT(streammux), "width", gActionConfig.muxer_width, "height",
               gActionConfig.muxer_height,
               "batched-push-timeout", gActionConfig.muxer_batch_timeout, NULL);

  g_object_set(G_OBJECT(preprocess), "config-file", gActionConfig.preprocess_config.c_str(), NULL);

  /* Configure the nvinfer element using the nvinfer config file. */
  g_object_set(G_OBJECT(pgie), "input-tensor-meta", TRUE,
               "config-file-path",
               (!gActionConfig.triton_infer_config.empty() ?
                 gActionConfig.triton_infer_config.c_str() :
                 gActionConfig.infer_config.c_str()), NULL);

  g_print("num-sources = %d\n", num_sources);

  g_object_set(G_OBJECT(sink), "qos", 0, "sync", gActionConfig.display_sync, NULL);

  /* we add a message handler */
  bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
  bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
  gst_object_unref(bus);

  /* Set up the pipeline */
  /* we add all elements into the pipeline */
  if (gActionConfig.useFakeSink) {
    gst_bin_add_many(GST_BIN(pipeline), queue1, preprocess, queue2, pgie, queue3, sink, NULL);
    /* we link the elements together
    * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
    if (!gst_element_link_many(streammux, queue1, preprocess, queue2, pgie, queue3, sink, NULL))
    {
      g_printerr("Elements could not be linked. Exiting.\n");
      return -1;
    }
  }
  else
  {
    gst_bin_add_many(GST_BIN(pipeline), queue1, preprocess, queue2, pgie, queue3, tiler,
                     queue4, nvvidconv, queue5, nvosd, queue6, sink, NULL);
    /* we link the elements together
    * nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd -> video-renderer */
    if (!gst_element_link_many(streammux, queue1, preprocess, queue2, pgie, queue3, tiler,
                               queue4, nvvidconv, queue5, nvosd, queue6, sink, NULL))
    {
      g_printerr("Elements could not be linked. Exiting.\n");
      return -1;
    }
  }

  /* Lets add probe to get informed of the meta data generated, we add probe to
   * the sink pad of the osd element, since by that time, the buffer would have
   * had got all the metadata. */
  pgie_src_pad = gst_element_get_static_pad(pgie, "src");
  if (!pgie_src_pad)
    g_print("Unable to get pgie src pad\n");
  else
    gst_pad_add_probe(pgie_src_pad, GST_PAD_PROBE_TYPE_BUFFER,
                      pgie_src_pad_buffer_probe, NULL, NULL);
  gst_object_unref(pgie_src_pad);

  /* Set the pipeline to "playing" state */
  g_print("Now playing:");
  for (i = 0; i < num_sources; i++)
  {
    g_print(" %s,", gActionConfig.uri_list[i].c_str());
  }
  g_print("\n");

  gst_element_set_state(pipeline, GST_STATE_PLAYING);
  gPipeline = pipeline;

  /* Wait till pipeline encounters an error or EOS */
  g_print("Running...\n");
  g_main_loop_run(loop);

  gPipeline = nullptr;

  /* Out of the main loop, clean up nicely */
  g_print("Returned, stopping playback\n");
  gst_element_set_state(pipeline, GST_STATE_NULL);
  g_print("Deleting pipeline\n");
  gst_object_unref(GST_OBJECT(pipeline));
  g_source_remove(bus_watch_id);
  g_main_loop_unref(loop);
  return 0;
}

but as i check the frames these are green.

@yuweiw can you plese help with this

We recommend that you follow our deepstream-image-meta-test demo to save pictures. This uses the hardware encoder, which is more efficient.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.