• Hardware Platform (Jetson Orin NX)
• DeepStream Version: 7.0
I am encountering an issue when processing thermal camera video with my DeepStream-based application. While RGB video works perfectly and renders the output correctly, the rendering is not accurate or not visually correct when using thermal (IR) camera video as input.
I have attached my code for reference. Could you please help me identify what might be causing this issue and how to correct it?
#include <gst/gst.h>
#include <opencv2/opencv.hpp>
#include <cuda_runtime.h>
#include "gstnvdsmeta.h"
#include "nvdsmeta.h"
static GstPadProbeReturn nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
if (!buf) {
g_print("No buffer found.\n");
return GST_PAD_PROBE_OK;
}
GstMapInfo inmap = GST_MAP_INFO_INIT;
if (!gst_buffer_map(buf, &inmap, GST_MAP_READWRITE)) {
g_print("Failed to map buffer.\n");
return GST_PAD_PROBE_OK;
}
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) {
g_print("No batch meta found.\n");
gst_buffer_unmap(buf, &inmap);
return GST_PAD_PROBE_OK;
}
for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
guint width = frame_meta->source_frame_width;
guint height = frame_meta->source_frame_height;
if (width == 0 || height == 0) {
g_print("Invalid frame dimensions: %d x %d\n", width, height);
continue;
}
// Convert RGBA to BGR
cv::Mat rgba_frame(height, width, CV_8UC4, inmap.data);
cv::Mat bgr_frame;
cv::cvtColor(rgba_frame, bgr_frame, cv::COLOR_RGBA2BGR);
// Define cropping parameters
int x = 300, y = 300;
int crop_width = 60, crop_height = 60;
int x_start = std::max(0, x - crop_width / 2);
int y_start = std::max(0, y - crop_height / 2);
guint x_end = std::min((guint)width, (guint)(x + crop_width / 2));
guint y_end = std::min((guint)height, (guint)(y + crop_height / 2));
int crop_width_actual = x_end - x_start;
int crop_height_actual = y_end - y_start;
crop_width_actual = std::min(crop_width_actual, crop_width);
crop_height_actual = std::min(crop_height_actual, crop_height);
// Perform the cropping operation
cv::Rect crop_roi(x_start, y_start, crop_width_actual, crop_height_actual);
cv::Mat cropped_frame = bgr_frame(crop_roi);
// Resize the cropped frame
cv::Mat resized_frame;
cv::resize(cropped_frame, resized_frame, cv::Size(cropped_frame.cols * 2, cropped_frame.rows * 2), 0, 0, cv::INTER_CUBIC);
cv::fastNlMeansDenoising(resized_frame, resized_frame, 7, 7, 21);
// Apply sharpening kernel
cv::Mat sharpening_kernel = (cv::Mat_<float>(3, 3) <<
0, -1, 0,
-1, 5, -1,
0, -1, 0);
cv::filter2D(resized_frame, resized_frame, -1, sharpening_kernel);
// Draw a red boundary around the resized frame
cv::rectangle(resized_frame, cv::Point(0, 0), cv::Point(resized_frame.cols, resized_frame.rows), cv::Scalar(0, 0, 255), 4);
// Overlay the resized frame back onto the original frame
int overlay_x = std::max(0, x_start);
int overlay_y = std::max(0, y_start);
int overlay_width = std::min(resized_frame.cols, bgr_frame.cols - overlay_x);
int overlay_height = std::min(resized_frame.rows, bgr_frame.rows - overlay_y);
if (overlay_width > 0 && overlay_height > 0) {
resized_frame(cv::Rect(0, 0, overlay_width, overlay_height))
.copyTo(bgr_frame(cv::Rect(overlay_x, overlay_y, overlay_width, overlay_height)));
}
// Convert BGR back to RGBA
cv::cvtColor(bgr_frame, rgba_frame, cv::COLOR_BGR2RGBA);
// Copy the modified RGBA frame back to the buffer
memcpy(inmap.data, rgba_frame.data, rgba_frame.total() * rgba_frame.elemSize());
}
gst_buffer_unmap(buf, &inmap);
return GST_PAD_PROBE_OK;
}
static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR:{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr ("ERROR from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr ("Error details: %s\n", debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
int main(int argc, char *argv[]) {
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL,
*streammux = NULL, *sink = NULL, *nvvidconv = NULL, *nvosd = NULL, *capsfilter = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *nvvidconv_sink_pad = NULL;
gboolean yaml_config = FALSE;
//NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
if (argc != 2) {
g_printerr("Usage: %s <yml file>\n", argv[0]);
g_printerr("OR: %s <H264 filename>\n", argv[0]);
return -1;
}
gst_init(&argc, &argv);
loop = g_main_loop_new(NULL, FALSE);
yaml_config = (g_str_has_suffix(argv[1], ".yml") ||
g_str_has_suffix(argv[1], ".yaml"));
pipeline = gst_pipeline_new("dstest1-pipeline");
source = gst_element_factory_make("filesrc", "file-source");
h264parser = gst_element_factory_make("h264parse", "h264-parser");
decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
capsfilter = gst_element_factory_make("capsfilter", "filter");
g_object_set(G_OBJECT(capsfilter),
"caps",
gst_caps_from_string("video/x-raw, format=RGBA"),
NULL);
if (prop.integrated) {
sink = gst_element_factory_make("nveglglessink", "nv3d-sink");
} else {
sink = gst_element_factory_make("nveglglessink", "nvvideo-renderer");
}
if (!source || !h264parser || !decoder || !nvvidconv || !sink || !capsfilter) {
g_printerr("One element could not be created. Exiting.\n");
return -1;
}
g_object_set(G_OBJECT(source), "location", argv[1], NULL);
g_object_set(G_OBJECT(streammux), "batch-size", 1, NULL);
g_object_set(G_OBJECT(streammux), "width", 1280, "height", 720, "batched-push-timeout", 4000000, NULL);
bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
gst_object_unref(bus);
gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, streammux, nvosd,nvvidconv, capsfilter, sink, NULL);
GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = "sink_0";
gchar pad_name_src[16] = "src";
sinkpad = gst_element_request_pad_simple(streammux, pad_name_sink);
if (!sinkpad) {
g_printerr("Streammux request sink pad failed. Exiting.\n");
return -1;
}
srcpad = gst_element_get_static_pad(decoder, pad_name_src);
if (!srcpad) {
g_printerr("Decoder request src pad failed. Exiting.\n");
return -1;
}
if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
return -1;
}
gst_object_unref(sinkpad);
gst_object_unref(srcpad);
if (!gst_element_link_many(source, h264parser, decoder, NULL)) {
g_printerr("Elements could not be linked: 1. Exiting.\n");
return -1;
}
// Adjusted pipeline linking order with probe at nvvidconv
if (!gst_element_link_many(streammux, nvosd,nvvidconv, capsfilter, sink, NULL)) {
g_printerr("Elements could not be linked: 2. Exiting.\n");
return -1;
}
// Add probe to nvvidconv's source pad
nvvidconv_sink_pad = gst_element_get_static_pad(nvvidconv, "src");
if (nvvidconv_sink_pad) {
gst_pad_add_probe(nvvidconv_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
gst_object_unref(nvvidconv_sink_pad);
}
gst_element_set_state(pipeline, GST_STATE_PLAYING);
g_main_loop_run(loop);
gst_element_set_state(pipeline, GST_STATE_NULL);
gst_object_unref(GST_OBJECT(pipeline));
g_source_remove(bus_watch_id);
g_main_loop_unref(loop);
return 0;
}
This is the output of RGB video, It is giving output clearly :
This is the output when I give Thermal Camera Video(IR), It is not giving good output:
Thank you for your assistance.
Abdul Manaf PV