Request for Assistance in Improving Output Render Quality Thermal camera Video(IR)

• Hardware Platform (Jetson Orin NX)
• DeepStream Version: 7.0

I am encountering an issue when processing thermal camera video with my DeepStream-based application. While RGB video works perfectly and renders the output correctly, the rendering is not accurate or not visually correct when using thermal (IR) camera video as input.

I have attached my code for reference. Could you please help me identify what might be causing this issue and how to correct it?

#include <gst/gst.h>

#include <opencv2/opencv.hpp>

#include <cuda_runtime.h>

#include "gstnvdsmeta.h"

#include "nvdsmeta.h"

static GstPadProbeReturn nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
    GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
    if (!buf) {
        g_print("No buffer found.\n");
        return GST_PAD_PROBE_OK;
    }

    GstMapInfo inmap = GST_MAP_INFO_INIT;
    if (!gst_buffer_map(buf, &inmap, GST_MAP_READWRITE)) {
        g_print("Failed to map buffer.\n");
        return GST_PAD_PROBE_OK;
    }

    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
    if (!batch_meta) {
        g_print("No batch meta found.\n");
        gst_buffer_unmap(buf, &inmap);
        return GST_PAD_PROBE_OK;
    }

    for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;

        guint width = frame_meta->source_frame_width;
        guint height = frame_meta->source_frame_height;

        if (width == 0 || height == 0) {
            g_print("Invalid frame dimensions: %d x %d\n", width, height);
            continue;
        }

        // Convert RGBA to BGR
        cv::Mat rgba_frame(height, width, CV_8UC4, inmap.data);
        cv::Mat bgr_frame;
        cv::cvtColor(rgba_frame, bgr_frame, cv::COLOR_RGBA2BGR);

        // Define cropping parameters
        int x = 300, y = 300;
        int crop_width = 60, crop_height = 60;

        int x_start = std::max(0, x - crop_width / 2);
        int y_start = std::max(0, y - crop_height / 2);
        guint x_end = std::min((guint)width, (guint)(x + crop_width / 2));
        guint y_end = std::min((guint)height, (guint)(y + crop_height / 2));

        int crop_width_actual = x_end - x_start;
        int crop_height_actual = y_end - y_start;
        crop_width_actual = std::min(crop_width_actual, crop_width);
        crop_height_actual = std::min(crop_height_actual, crop_height);

        // Perform the cropping operation
        cv::Rect crop_roi(x_start, y_start, crop_width_actual, crop_height_actual);
        cv::Mat cropped_frame = bgr_frame(crop_roi);

        // Resize the cropped frame
        cv::Mat resized_frame;
        cv::resize(cropped_frame, resized_frame, cv::Size(cropped_frame.cols * 2, cropped_frame.rows * 2), 0, 0, cv::INTER_CUBIC);
        cv::fastNlMeansDenoising(resized_frame, resized_frame, 7, 7, 21);

        // Apply sharpening kernel
        cv::Mat sharpening_kernel = (cv::Mat_<float>(3, 3) << 
            0, -1, 0, 
            -1, 5, -1, 
            0, -1, 0);
        cv::filter2D(resized_frame, resized_frame, -1, sharpening_kernel);

        // Draw a red boundary around the resized frame
        cv::rectangle(resized_frame, cv::Point(0, 0), cv::Point(resized_frame.cols, resized_frame.rows), cv::Scalar(0, 0, 255), 4);

        // Overlay the resized frame back onto the original frame
        int overlay_x = std::max(0, x_start);
        int overlay_y = std::max(0, y_start);

        int overlay_width = std::min(resized_frame.cols, bgr_frame.cols - overlay_x);
        int overlay_height = std::min(resized_frame.rows, bgr_frame.rows - overlay_y);

        if (overlay_width > 0 && overlay_height > 0) {
            resized_frame(cv::Rect(0, 0, overlay_width, overlay_height))
                .copyTo(bgr_frame(cv::Rect(overlay_x, overlay_y, overlay_width, overlay_height)));
        }

        // Convert BGR back to RGBA
        cv::cvtColor(bgr_frame, rgba_frame, cv::COLOR_BGR2RGBA);

        // Copy the modified RGBA frame back to the buffer
        memcpy(inmap.data, rgba_frame.data, rgba_frame.total() * rgba_frame.elemSize());
    }

    gst_buffer_unmap(buf, &inmap);
    return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}

int main(int argc, char *argv[]) {
    GMainLoop *loop = NULL;
    GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL,
        *streammux = NULL, *sink = NULL, *nvvidconv = NULL, *nvosd = NULL, *capsfilter = NULL;

  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *nvvidconv_sink_pad = NULL;
  gboolean yaml_config = FALSE;
  //NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  if (argc != 2) {
      g_printerr("Usage: %s <yml file>\n", argv[0]);
      g_printerr("OR: %s <H264 filename>\n", argv[0]);
      return -1;
  }

  gst_init(&argc, &argv);
  loop = g_main_loop_new(NULL, FALSE);

  yaml_config = (g_str_has_suffix(argv[1], ".yml") ||
                 g_str_has_suffix(argv[1], ".yaml"));

  pipeline = gst_pipeline_new("dstest1-pipeline");

  source = gst_element_factory_make("filesrc", "file-source");
  h264parser = gst_element_factory_make("h264parse", "h264-parser");
  decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
  streammux = gst_element_factory_make("nvstreammux", "stream-muxer");

  nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");


  nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
  capsfilter = gst_element_factory_make("capsfilter", "filter");
  g_object_set(G_OBJECT(capsfilter), 
           "caps", 
           gst_caps_from_string("video/x-raw, format=RGBA"), 
           NULL);


  if (prop.integrated) {
      sink = gst_element_factory_make("nveglglessink", "nv3d-sink");
  } else {
      sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");
  }

  if (!source || !h264parser || !decoder || !nvvidconv ||  !sink || !capsfilter) {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
  }

  g_object_set(G_OBJECT(source), "location", argv[1], NULL);
  g_object_set(G_OBJECT(streammux), "batch-size", 1, NULL);
  g_object_set(G_OBJECT(streammux), "width", 1280, "height", 720, "batched-push-timeout", 4000000, NULL);

  bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
  bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
  gst_object_unref(bus);

  gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, streammux, nvosd,nvvidconv, capsfilter, sink, NULL);

  GstPad *sinkpad, *srcpad;
  gchar pad_name_sink[16] = "sink_0";
  gchar pad_name_src[16] = "src";

  sinkpad = gst_element_request_pad_simple(streammux, pad_name_sink);
  if (!sinkpad) {
      g_printerr("Streammux request sink pad failed. Exiting.\n");
      return -1;
  }

  srcpad = gst_element_get_static_pad(decoder, pad_name_src);
  if (!srcpad) {
      g_printerr("Decoder request src pad failed. Exiting.\n");
      return -1;
  }

  if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
      return -1;
  }

  gst_object_unref(sinkpad);
  gst_object_unref(srcpad);

  if (!gst_element_link_many(source, h264parser, decoder, NULL)) {
      g_printerr("Elements could not be linked: 1. Exiting.\n");
      return -1;
  }

  // Adjusted pipeline linking order with probe at nvvidconv
  if (!gst_element_link_many(streammux, nvosd,nvvidconv, capsfilter, sink, NULL)) {
      g_printerr("Elements could not be linked: 2. Exiting.\n");
      return -1;
  }

  // Add probe to nvvidconv's source pad
  nvvidconv_sink_pad = gst_element_get_static_pad(nvvidconv, "src");
  if (nvvidconv_sink_pad) {
      gst_pad_add_probe(nvvidconv_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
      gst_object_unref(nvvidconv_sink_pad);
  }

  gst_element_set_state(pipeline, GST_STATE_PLAYING);
  g_main_loop_run(loop);

  gst_element_set_state(pipeline, GST_STATE_NULL);
  gst_object_unref(GST_OBJECT(pipeline));
  g_source_remove(bus_watch_id);
  g_main_loop_unref(loop);

  return 0;
}

This is the output of RGB video, It is giving output clearly :

This is the output when I give Thermal Camera Video(IR), It is not giving good output:

Thank you for your assistance.

Abdul Manaf PV

Since you are using filesrc in your code, could you attach your source file to us? And can you elaborate on what the problem is on the image?

I am currently working with a code that performs various operations like cropping, resizing, denoising, and sharpening on the specific part of video frames where i custom selected point. My code is working very well on day time video but my code is not working very well on night time video

Here I attaching my make file, code and source files :

This is the command I used for to run Thermal camera IR Video:
./deepstream-test1-app checking.h264

This is the command I used for to run RGB type video(day time video):

./deepstream-test1-app sample_720p.h264

You can see above picture the day time video (RGB), my output of rendering is very good and give correct output, when i use thermal camera(IR) Video as input i didnt correct way rendering you can see some artifacts in rendering output

files.zip (14.3 MB)

Hi… I understand the issue the video which have high resolution(1920*1080) is only giving good output rendering without any issue , if the input video have resolution below 1000, for (eg: 1280 × 720), which didnt give me good rendering output(above the issue i already said). here I attaching my code where I want to change in my code to correct this issue

#include <gst/gst.h>

#include <glib.h>

#include <stdio.h>

#include <math.h>

#include <string.h>

#include <sys/time.h>

#include <cuda_runtime_api.h>

#include "gstnvdsmeta.h"

#include "nvds_yml_parser.h"

#include "gst-nvmessage.h"

#include <opencv2/opencv.hpp>

#define MAX_DISPLAY_LEN 64

#define PGIE_CLASS_ID_VEHICLE 0

#define PGIE_CLASS_ID_PERSON 2

/* By default, OSD process-mode is set to GPU_MODE. To change mode, set as:

* 0: CPU mode

* 1: GPU mode

*/

#define OSD_PROCESS_MODE 1

/* By default, OSD will not display text. To display text, change this to 1 */

#define OSD_DISPLAY_TEXT 0

/* The muxer output resolution must be set if the input streams will be of

* different resolution. The muxer will scale all the input frames to this

* resolution. */

#define MUXER_OUTPUT_WIDTH 1920

#define MUXER_OUTPUT_HEIGHT 1080

/* Muxer batch formation timeout, for e.g. 40 millisec. Should ideally be set

* based on the fastest source's framerate. */

#define MUXER_BATCH_TIMEOUT_USEC 40000

#define TILED_OUTPUT_WIDTH 1280

#define TILED_OUTPUT_HEIGHT 720

/* NVIDIA Decoder source pad memory feature. This feature signifies that source

* pads having this capability will push GstBuffers containing cuda buffers. */

#define GST_CAPS_FEATURES_NVMM "memory:NVMM"

/* Check for parsing error. */

#define RETURN_ON_PARSER_ERROR(parse_expr) \

if (NVDS_YAML_PARSER_SUCCESS != parse_expr) { \

g_printerr("Error in parsing configuration file.\n"); \

return -1; \

}

static gboolean PERF_MODE = FALSE;

static GstPadProbeReturn nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
    GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
    if (!buf) {
        g_print("No buffer found.\n");
        return GST_PAD_PROBE_OK;
    }

    GstMapInfo inmap = GST_MAP_INFO_INIT;
    if (!gst_buffer_map(buf, &inmap, GST_MAP_READWRITE)) {
        g_print("Failed to map buffer.\n");
        return GST_PAD_PROBE_OK;
    }

    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
    if (!batch_meta) {
        g_print("No batch meta found.\n");
        gst_buffer_unmap(buf, &inmap);
        return GST_PAD_PROBE_OK;
    }

    for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;

        guint width = frame_meta->source_frame_width;
        guint height = frame_meta->source_frame_height;

        if (width == 0 || height == 0) {
            g_print("Invalid frame dimensions: %d x %d\n", width, height);
            continue;
        }

        // Convert RGBA to BGR
        cv::Mat rgba_frame(height, width, CV_8UC4, inmap.data);
        cv::Mat bgr_frame;
        cv::cvtColor(rgba_frame, bgr_frame, cv::COLOR_RGBA2BGR);

        // Define cropping parameters
        int x = 50, y = 50;
        int crop_width = 60, crop_height = 60;

        int x_start = std::max(0, x - crop_width / 2);
        int y_start = std::max(0, y - crop_height / 2);
        guint x_end = std::min((guint)width, (guint)(x + crop_width / 2));
        guint y_end = std::min((guint)height, (guint)(y + crop_height / 2));

        int crop_width_actual = x_end - x_start;
        int crop_height_actual = y_end - y_start;
        crop_width_actual = std::min(crop_width_actual, crop_width);
        crop_height_actual = std::min(crop_height_actual, crop_height);

        // Perform the cropping operation
        cv::Rect crop_roi(x_start, y_start, crop_width_actual, crop_height_actual);
        cv::Mat cropped_frame = bgr_frame(crop_roi);

        // Resize the cropped frame
        cv::Mat resized_frame;
        cv::resize(cropped_frame, resized_frame, cv::Size(cropped_frame.cols * 2, cropped_frame.rows * 2), 0, 0, cv::INTER_CUBIC);
        //cv::fastNlMeansDenoising(resized_frame, resized_frame, 7, 7, 21);

        // Apply sharpening kernel
        cv::Mat sharpening_kernel = (cv::Mat_<float>(3, 3) << 
            0, -1, 0, 
            -1, 5, -1, 
            0, -1, 0);
        cv::filter2D(resized_frame, resized_frame, -1, sharpening_kernel);

        // Draw a red boundary around the resized frame
        cv::rectangle(resized_frame, cv::Point(0, 0), cv::Point(resized_frame.cols, resized_frame.rows), cv::Scalar(0, 0, 255), 4);

        // Overlay the resized frame back onto the original frame
        int overlay_x = std::max(0, x_start);
        int overlay_y = std::max(0, y_start);

        int overlay_width = std::min(resized_frame.cols, bgr_frame.cols - overlay_x);
        int overlay_height = std::min(resized_frame.rows, bgr_frame.rows - overlay_y);

        if (overlay_width > 0 && overlay_height > 0) {
            resized_frame(cv::Rect(0, 0, overlay_width, overlay_height))
                .copyTo(bgr_frame(cv::Rect(overlay_x, overlay_y, overlay_width, overlay_height)));
        }

        // Convert BGR back to RGBA
        cv::cvtColor(bgr_frame, rgba_frame, cv::COLOR_BGR2RGBA);

        // Copy the modified RGBA frame back to the buffer
        memcpy(inmap.data, rgba_frame.data, rgba_frame.total() * rgba_frame.elemSize());
    }

    gst_buffer_unmap(buf, &inmap);
    return GST_PAD_PROBE_OK;
}

  static gboolean
  bus_call (GstBus * bus, GstMessage * msg, gpointer data)
  {
    GMainLoop *loop = (GMainLoop *) data;
    switch (GST_MESSAGE_TYPE (msg)) {
      case GST_MESSAGE_EOS:
        g_print ("End of stream\n");
        g_main_loop_quit (loop);
        break;
      case GST_MESSAGE_WARNING:
      {
        gchar *debug;
        GError *error;
        gst_message_parse_warning (msg, &error, &debug);
        g_printerr ("WARNING from element %s: %s\n",
            GST_OBJECT_NAME (msg->src), error->message);
        g_free (debug);
        g_printerr ("Warning: %s\n", error->message);
        g_error_free (error);
        break;
      }
      case GST_MESSAGE_ERROR:
      {
        gchar *debug;
        GError *error;
        gst_message_parse_error (msg, &error, &debug);
        g_printerr ("ERROR from element %s: %s\n",
            GST_OBJECT_NAME (msg->src), error->message);
        if (debug)
          g_printerr ("Error details: %s\n", debug);
        g_free (debug);
        g_error_free (error);
        g_main_loop_quit (loop);
        break;
      }
      case GST_MESSAGE_ELEMENT:
      {
        if (gst_nvmessage_is_stream_eos (msg)) {
          guint stream_id;
          if (gst_nvmessage_parse_stream_eos (msg, &stream_id)) {
            g_print ("Got EOS from stream %d\n", stream_id);
          }
        }
        break;
      }
      default:
        break;
    }
    return TRUE;
  }
  
  static void
  cb_newpad (GstElement * decodebin, GstPad * decoder_src_pad, gpointer data)
  {
    GstCaps *caps = gst_pad_get_current_caps (decoder_src_pad);
    if (!caps) {
      caps = gst_pad_query_caps (decoder_src_pad, NULL);
    }
    const GstStructure *str = gst_caps_get_structure (caps, 0);
    const gchar *name = gst_structure_get_name (str);
    GstElement *source_bin = (GstElement *) data;
    GstCapsFeatures *features = gst_caps_get_features (caps, 0);
  
    /* Need to check if the pad created by the decodebin is for video and not
     * audio. */
    if (!strncmp (name, "video", 5)) {
      /* Link the decodebin pad only if decodebin has picked nvidia
       * decoder plugin nvdec_*. We do this by checking if the pad caps contain
       * NVMM memory features. */
      if (gst_caps_features_contains (features, GST_CAPS_FEATURES_NVMM)) {
        /* Get the source bin ghost pad */
        GstPad *bin_ghost_pad = gst_element_get_static_pad (source_bin, "src");
        if (!gst_ghost_pad_set_target (GST_GHOST_PAD (bin_ghost_pad),
                decoder_src_pad)) {
          g_printerr ("Failed to link decoder src pad to source bin ghost pad\n");
        }
        gst_object_unref (bin_ghost_pad);
      } else {
        g_printerr ("Error: Decodebin did not pick nvidia decoder plugin.\n");
      }
    }
  }
  
  static void
  decodebin_child_added (GstChildProxy * child_proxy, GObject * object,
      gchar * name, gpointer user_data)
  {
    g_print ("Decodebin child added: %s\n", name);
    if (g_strrstr (name, "decodebin") == name) {
      g_signal_connect (G_OBJECT (object), "child-added",
          G_CALLBACK (decodebin_child_added), user_data);
    }
    if (g_strrstr (name, "source") == name) {
          g_object_set(G_OBJECT(object),"drop-on-latency",true,NULL);
    }
  
  }
  
  static GstElement *
  create_source_bin (guint index, gchar * uri)
  {
    GstElement *bin = NULL, *uri_decode_bin = NULL;
    gchar bin_name[16] = { };
  
    g_snprintf (bin_name, 15, "source-bin-%02d", index);
    /* Create a source GstBin to abstract this bin's content from the rest of the
     * pipeline */
    bin = gst_bin_new (bin_name);
  
    /* Source element for reading from the uri.
     * We will use decodebin and let it figure out the container format of the
     * stream and the codec and plug the appropriate demux and decode plugins. */
    if (PERF_MODE) {
      uri_decode_bin = gst_element_factory_make ("nvurisrcbin", "uri-decode-bin");
      g_object_set (G_OBJECT (uri_decode_bin), "file-loop", TRUE, NULL);
      g_object_set (G_OBJECT (uri_decode_bin), "cudadec-memtype", 0, NULL);
    } else {
      uri_decode_bin = gst_element_factory_make ("uridecodebin", "uri-decode-bin");
    }
  
    if (!bin || !uri_decode_bin) {
      g_printerr ("One element in source bin could not be created.\n");
      return NULL;
    }
  
    /* We set the input uri to the source element */
    g_object_set (G_OBJECT (uri_decode_bin), "uri", uri, NULL);
  
    /* Connect to the "pad-added" signal of the decodebin which generates a
     * callback once a new pad for raw data has beed created by the decodebin */
    g_signal_connect (G_OBJECT (uri_decode_bin), "pad-added",
        G_CALLBACK (cb_newpad), bin);
    g_signal_connect (G_OBJECT (uri_decode_bin), "child-added",
        G_CALLBACK (decodebin_child_added), bin);
  
    gst_bin_add (GST_BIN (bin), uri_decode_bin);
  
    /* We need to create a ghost pad for the source bin which will act as a proxy
     * for the video decoder src pad. The ghost pad will not have a target right
     * now. Once the decode bin creates the video decoder and generates the
     * cb_newpad callback, we will set the ghost pad target to the video decoder
     * src pad. */
    if (!gst_element_add_pad (bin, gst_ghost_pad_new_no_target ("src",
                GST_PAD_SRC))) {
      g_printerr ("Failed to add ghost pad in source bin\n");
      return NULL;
    }
  
    return bin;
  }

int
main (int argc, char *argv[])
{
  GMainLoop *loop = NULL;
  GstElement *pipeline = NULL, *streammux = NULL, *sink = NULL, *capsfilter = NULL,
      *nvvidconv = NULL;
  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *tiler_src_pad = NULL;
  guint i =0, num_sources = 0;
  guint tiler_rows, tiler_columns;
  GstPad *nvvidconv_sink_pad = NULL;
  guint pgie_batch_size;
  gboolean yaml_config = FALSE;
  NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;
  PERF_MODE = g_getenv("NVDS_TEST3_PERF_MODE") &&
      !g_strcmp0(g_getenv("NVDS_TEST3_PERF_MODE"), "1");

int current_device = -1;
cudaGetDevice(&current_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);

/* Check input arguments */
if (argc < 2) {
  g_printerr ("Usage: %s <yml file>\n", argv[0]);
  g_printerr ("OR: %s <uri1> [uri2] ... [uriN] \n", argv[0]);
  return -1;
}

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Parse inference plugin type */
yaml_config = (g_str_has_suffix (argv[1], ".yml") ||
        g_str_has_suffix (argv[1], ".yaml"));

if (yaml_config) {
  RETURN_ON_PARSER_ERROR(nvds_parse_gie_type(&pgie_type, argv[1],
              "primary-gie"));
}

/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("dstest3-pipeline");

/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

if (!pipeline || !streammux) {
  g_printerr ("One element could not be created. Exiting.\n");
  return -1;
}
gst_bin_add (GST_BIN (pipeline), streammux);

GList *src_list = NULL ;

if (yaml_config) {

  RETURN_ON_PARSER_ERROR(nvds_parse_source_list(&src_list, argv[1], "source-list"));

  GList * temp = src_list;
  while(temp) {
    num_sources++;
    temp=temp->next;
  }
  g_list_free(temp);
} else {
    num_sources = argc - 1;
}

for (i = 0; i < num_sources; i++) {
  GstPad *sinkpad, *srcpad;
  gchar pad_name[16] = { };

  GstElement *source_bin= NULL;
  if (g_str_has_suffix (argv[1], ".yml") || g_str_has_suffix (argv[1], ".yaml")) {
    g_print("Now playing : %s\n",(char*)(src_list)->data);
    source_bin = create_source_bin (i, (char*)(src_list)->data);
  } else {
    source_bin = create_source_bin (i, argv[i + 1]);
  }
  if (!source_bin) {
    g_printerr ("Failed to create source bin. Exiting.\n");
    return -1;
  }

  gst_bin_add (GST_BIN (pipeline), source_bin);

  g_snprintf (pad_name, 15, "sink_%u", i);
  sinkpad = gst_element_request_pad_simple (streammux, pad_name);
  if (!sinkpad) {
    g_printerr ("Streammux request sink pad failed. Exiting.\n");
    return -1;
  }

  srcpad = gst_element_get_static_pad (source_bin, "src");
  if (!srcpad) {
    g_printerr ("Failed to get src pad of source bin. Exiting.\n");
    return -1;
  }

  if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
    g_printerr ("Failed to link source bin to stream muxer. Exiting.\n");
    return -1;
  }

  gst_object_unref (srcpad);
  gst_object_unref (sinkpad);

  if (yaml_config) {
    src_list = src_list->next;
  }
}

if (yaml_config) {
  g_list_free(src_list);
}


/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
capsfilter = gst_element_factory_make("capsfilter", "filter");
g_object_set(G_OBJECT(capsfilter), 
          "caps", 
          gst_caps_from_string("video/x-raw, format=RGBA"), 
          NULL);

/* Create OSD to draw on the converted RGBA buffer */


if (PERF_MODE) {
  sink = gst_element_factory_make ("fakesink", "nvvideo-renderer");
} else {
  /* Finally render the osd output */
  if(prop.integrated) {
    sink = gst_element_factory_make ("nv3dsink", "nv3d-sink");
  } else {
#ifdef __aarch64__
      sink = gst_element_factory_make ("nv3dsink", "nvvideo-renderer");
#else
      sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
#endif
    }
  }

if (  !nvvidconv ||  !sink) {
  g_printerr ("One element could not be created. Exiting.\n");
  return -1;
}

if (yaml_config) {

  RETURN_ON_PARSER_ERROR(nvds_parse_streammux(streammux, argv[1],"streammux"));

  




  if (PERF_MODE) {
    RETURN_ON_PARSER_ERROR(nvds_parse_fake_sink (sink, argv[1], "sink"));
  }
  else if(prop.integrated) {
    RETURN_ON_PARSER_ERROR(nvds_parse_3d_sink(sink, argv[1], "sink"));
  } else {
#ifdef __aarch64__
      RETURN_ON_PARSER_ERROR(nvds_parse_3d_sink(sink, argv[1], "sink"));
#else
      RETURN_ON_PARSER_ERROR(nvds_parse_egl_sink(sink, argv[1], "sink"));
#endif
    }

}
else {

  g_object_set (G_OBJECT (streammux), "batch-size", num_sources, NULL);

  g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
      MUXER_OUTPUT_HEIGHT,
      "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

  /* Configure the nvinfer element using the nvinfer config file. */

 


  g_object_set (G_OBJECT (sink), "qos", 0, NULL);

}

if (PERF_MODE) {
    if(prop.integrated) {
        g_object_set (G_OBJECT (streammux), "nvbuf-memory-type", 4, NULL);
    } else {
        g_object_set (G_OBJECT (streammux), "nvbuf-memory-type", 2, NULL);
    }
}

/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline */
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline), 
   nvvidconv,  capsfilter,sink, NULL);
/* we link the elements together
* nvstreammux -> nvinfer -> nvtiler -> nvvidconv -> nvosd
* -> video-renderer */
if (!gst_element_link_many (streammux,  nvvidconv, capsfilter,sink, NULL)) {
  g_printerr ("Elements could not be linked. Exiting.\n");
  return -1;
}

/* Lets add probe to get informed of the meta data generated, we add probe to
 * the sink pad of the osd element, since by that time, the buffer would have
 * had got all the metadata. */


/* Set the pipeline to "playing" state */
if (yaml_config) {
  g_print ("Using file: %s\n", argv[1]);
}
else {
  g_print ("Now playing:");
  for (i = 0; i < num_sources; i++) {
    g_print (" %s,", argv[i + 1]);
  }
  g_print ("\n");
}

nvvidconv_sink_pad = gst_element_get_static_pad(nvvidconv, "src");
if (nvvidconv_sink_pad) {
    gst_pad_add_probe(nvvidconv_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
    gst_object_unref(nvvidconv_sink_pad);
}
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Could you try to set the width and height to the resolution of your video?

Thank you, Its corrected when I use the width and height to the resolution of my input video.

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.