Issue with Cropping and Saving a Specific Frame Area in .jpg Format

Dear Support Team,

**• Hardware Platform (NVIDIA ORIN NX)**
**• DeepStream 7.0**

Dear Support Team,

I hope this message finds you well. I am encountering an issue while attempting to crop a frame from a video stream in my DeepStream 7.0 application, running on a NVIDIA Jetson Orin NX with CUDA 12.2.

My objective is to save a cropped part of the frame when I provide the x and y coordinates of a pixel. Specifically, I wish to crop a 60x60 pixel region around this point, considering 30 pixels above, below, left, and right of the given coordinates. I aim to save this 60x60 cropped frame as a .jpg image.

However, I am facing the following error message repeatedly:

Image dimensions: 1920x1080, Step size: 2048
Invalid step size: 2048. Skipping frame...

This error occurs despite the image dimensions being 1920x1080, and I believe the issue lies with the step size calculation or its handling within the cropping operation.

I have attached the relevant code and configuration details for reference. Could you please assist me in resolving this issue or provide any suggestions on how to correctly crop and save the image in the specified manner?

static int save_img = 0;
static int frame_count = 0;  // Counter to save multiple frames with unique filenames



static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
    cv::Mat in_mat;
    cv::Mat matBGR;

    GstBuffer *buf = (GstBuffer *)info->data;
    NvDsMetaList *l_frame = NULL;
    NvDsMetaList *l_user_meta = NULL;
    NvDsUserMeta *user_meta = NULL;
    NvDsInferSegmentationMeta *seg_meta_data = NULL;

    // Get original raw data
    GstMapInfo in_map_info;
    char *src_data = NULL;
    if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ)) {
        g_print("Error: Failed to map gst buffer\n");
        gst_buffer_unmap(buf, &in_map_info);
        return GST_PAD_PROBE_OK;
    }

    NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);

    for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);

        // Validate user meta and check if the frame contains segmentation meta
        for (l_user_meta = frame_meta->frame_user_meta_list; l_user_meta != NULL; l_user_meta = l_user_meta->next) {
            user_meta = (NvDsUserMeta *)(l_user_meta->data);
            if (user_meta && user_meta->base_meta.meta_type == NVDSINFER_SEGMENTATION_META) {
                seg_meta_data = (NvDsInferSegmentationMeta *)user_meta->user_meta_data;
            }
        }

        // Allocate memory for source data
        src_data = (char *)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
        if (src_data == NULL) {
            g_print("Error: failed to malloc src_data \n");
            continue;
        }

#ifdef PLATFORM_TEGRA
        // Tegra-specific mapping of NvBufSurface
        NvBufSurfaceMap(surface, -1, -1, NVBUF_MAP_READ);
        NvBufSurfacePlaneParams *pParams = &surface->surfaceList[frame_meta->batch_id].planeParams;
        unsigned int offset = 0;

        // Loop through each plane in the NvBufSurface and copy data to src_data
        for (unsigned int num_planes = 0; num_planes < pParams->num_planes; num_planes++) {
            if (num_planes > 0) 
                offset += pParams->height[num_planes - 1] * (pParams->bytesPerPix[num_planes - 1] * pParams->width[num_planes - 1]);

            for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
                memcpy((void *)(src_data + offset + h * pParams->bytesPerPix[num_planes] * pParams->width[num_planes]),
                    (void *)((char *)surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes] + h * pParams->pitch[num_planes]),
                    pParams->bytesPerPix[num_planes] * pParams->width[num_planes]);
            }
        }

        NvBufSurfaceSyncForDevice(surface, -1, -1);
        NvBufSurfaceUnMap(surface, -1, -1);
#else
        // Non-Tegra case: copy memory using CUDA
        cudaMemcpy((void *)src_data,
                (void *)surface->surfaceList[frame_meta->batch_id].dataPtr,
                surface->surfaceList[frame_meta->batch_id].dataSize,
                cudaMemcpyDeviceToHost);
#endif

        // If source data is not NULL, convert it to OpenCV Mat and save the image
        if (src_data != NULL) {
            // Get image dimensions and step size (pitch)
            int width = surface->surfaceList[frame_meta->batch_id].planeParams.width[0];
            int height = surface->surfaceList[frame_meta->batch_id].planeParams.height[0];
            int step = surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0];

            // Print the dimensions and step size for debugging
            g_print("Image dimensions: %dx%d, Step size: %d\n", width, height, step);

            // Corrected step validation: the step can be larger than width * 4 (for RGBA format)
            if (step < width * 3) {
    g_print("Invalid step size: %d. Skipping frame...\n", step);
    free(src_data);  // Free allocated memory
    continue;
}

if (step > width * 3) {
    // Adjust for step size greater than width * 3 (indicating padding)
    g_print("Step size is greater than expected, adjusting for padding...\n");
}

    // Convert the source data into OpenCV Mat
    in_mat = cv::Mat(height, width, CV_8UC4, src_data, step);

    // Convert the frame to BGR format for saving as an image
    cv::cvtColor(in_mat, matBGR, cv::COLOR_RGBA2RGB);

    // Define the cropping region (200, 200) as the center, with a margin of 30 pixels
    int x = 200, y = 200;
    int crop_x1 = std::max(0, x - 30);  // Ensure we don't go out of bounds
    int crop_y1 = std::max(0, y - 30);
    int crop_x2 = std::min(width, x + 30);  // Ensure we don't go out of bounds
    int crop_y2 = std::min(height, y + 30);

    // Crop the image (60x60 with (200, 200) at the center)
    cv::Rect crop_rect(crop_x1, crop_y1, crop_x2 - crop_x1, crop_y2 - crop_y1);
    cv::Mat cropped = matBGR(crop_rect);

    // Resize the cropped image to 60x60
    cv::Mat resized_cropped;
    cv::resize(cropped, resized_cropped, cv::Size(60, 60));

    // Save the cropped image as 'cropped_frame_XXX.jpg' where XXX is the frame count
    char filename[128];
    sprintf(filename, "cropped_frame_%03d.jpg", frame_count);
    cv::imwrite(filename, resized_cropped);
    frame_count++;  // Increment frame counter for the next frame

            // Free allocated memory
            free(src_data);
            src_data = NULL;
        }
    }

    gst_buffer_unmap(buf, &in_map_info);

    return GST_PAD_PROBE_OK;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
    g_print ("End of stream\n");
    g_main_loop_quit (loop);
    break;
    case GST_MESSAGE_ERROR:{
    gchar *debug;
    GError *error;
    gst_message_parse_error (msg, &error, &debug);
    g_printerr ("ERROR from element %s: %s\n",
        GST_OBJECT_NAME (msg->src), error->message);
    if (debug)
        g_printerr ("Error details: %s\n", debug);
    g_free (debug);
    g_error_free (error);
    g_main_loop_quit (loop);
    break;
    }
    default:
    break;
}
return TRUE;
}




int
main (int argc, char *argv[])
{
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL,
    *decoder = NULL, *streammux = NULL, *sink = NULL, *nvvidconv = NULL,
    *nvosd = NULL;

GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
gboolean yaml_config = FALSE;
NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;

int current_device = -1;
cudaGetDevice(&current_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
/* Check input arguments */
if (argc != 2) {
    g_printerr ("Usage: %s <yml file>\n", argv[0]);
    g_printerr ("OR: %s <H264 filename>\n", argv[0]);
    return -1;
}

/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);

/* Parse inference plugin type */
yaml_config = (g_str_has_suffix (argv[1], ".yml") ||
        g_str_has_suffix (argv[1], ".yaml"));



/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("dstest1-pipeline");

/* Source element for reading from the file */
source = gst_element_factory_make ("filesrc", "file-source");

/* Since the data format in the input file is elementary h264 stream,
* we need a h264parser */
h264parser = gst_element_factory_make ("h264parse", "h264-parser");

/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");

/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");

if (!pipeline || !streammux) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
}

/* Use nvinfer or nvinferserver to run inferencing on decoder's output,
* behaviour of inferencing is set through config file */


/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");

/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");

/* Finally render the osd output */
if(prop.integrated) {
    sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
} else {
#ifdef __aarch64__
    sink = gst_element_factory_make ("nv3dsink", "nvvideo-renderer");
#else
    sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
#endif
}

if (!source || !h264parser || !decoder 
    || !nvvidconv || !nvosd || !sink) {
    g_printerr ("One element could not be created. Exiting.\n");
    return -1;
}

/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);

if (g_str_has_suffix (argv[1], ".h264")) {
    g_object_set (G_OBJECT (source), "location", argv[1], NULL);

    g_object_set (G_OBJECT (streammux), "batch-size", 1, NULL);

    g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
        MUXER_OUTPUT_HEIGHT,
        "batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);

    /* Set all the necessary properties of the nvinfer element,
    * the necessary ones are : */
    
}



/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);

/* Set up the pipeline */
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
    source, h264parser, decoder, streammux, 
    nvvidconv, nvosd, sink, NULL);
g_print ("Added elements to bin\n");

GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = "sink_0";
gchar pad_name_src[16] = "src";

sinkpad = gst_element_request_pad_simple (streammux, pad_name_sink);
if (!sinkpad) {
    g_printerr ("Streammux request sink pad failed. Exiting.\n");
    return -1;
}

srcpad = gst_element_get_static_pad (decoder, pad_name_src);
if (!srcpad) {
    g_printerr ("Decoder request src pad failed. Exiting.\n");
    return -1;
}

if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
    g_printerr ("Failed to link decoder to stream muxer. Exiting.\n");
    return -1;
}

gst_object_unref (sinkpad);
gst_object_unref (srcpad);

/* we link the elements together */
/* file-source -> h264-parser -> nvh264-decoder ->
* pgie -> nvvidconv -> nvosd -> video-renderer */

if (!gst_element_link_many (source, h264parser, decoder, NULL)) {
    g_printerr ("Elements could not be linked: 1. Exiting.\n");
    return -1;
}

if (!gst_element_link_many (streammux, 
        nvvidconv, nvosd, sink, NULL)) {
    g_printerr ("Elements could not be linked: 2. Exiting.\n");
    return -1;
}

/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
if (!osd_sink_pad)
    g_print ("Unable to get sink pad\n");
else
    gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
        osd_sink_pad_buffer_probe, NULL, NULL);
gst_object_unref (osd_sink_pad);

/* Set the pipeline to "playing" state */
g_print ("Using file: %s\n", argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);

/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);

/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}

Thank you for your time and support.

Best regards,
Abdul Manaf PV

Hi @abdul.m

When you crop using the VIC there is a limitation on the min output resolution. The output must be at leas 1/16 of the input resolution. For 1920x1080 the min output resolution would be 120x68.

You can either adjust your output resolution to continue using the accelerated cropping or convert the image to regular memory and use videocrop for the cropping:

gst-launch-1.0 nvarguscamerasrc  ! 'video/x-raw(memory:NVMM), width=1920, height=1080' ! nvvidconv  ! 'video/x-raw, format=BGRx' ! videocrop top=30 left=30 bottom=990 right=1830 !...

But i want to use opencv in my cpp code , i want to do another method also i want to resize the cropped frame and overlay the exact frame with red box around resized cropped part…

I think there’s an optimized way to achieve this using nvcompositor for cropping and overlaying, and nvdsosd for adding the red box. However, you will still encounter the resolution limitation I mentioned earlier because nvcompositor relies on the VIC.

We have used OpenCV with CUDA acceleration over NVMM memory. However, keep in mind that there are different types of NVMM: pitch linear and block linear. You must ensure you are using the correct type. Additionally, if DeepStream buffers are batched, you need to un-batch them using nvmultistreamtiler or nvstreamdemux before mapping the buffer for CUDA processing.

Unfortunately, I can’t share the code for NVMM mapping as it is part of our proprietary out-of-the-box solution for CUDA processing on GStreamer, called GstCUDA.

My suggestion would be to use a slightly larger box and leverage the compositor because that will be faster and will use less GPU.

If you just want to cropping and saving a specific area, you can try to refer to our deepstream\sources\apps\sample_apps\deepstream-image-meta-test sample.

You can try to add a bbox coordinate to the metadata and use the nvds_obj_enc_process to save that to the jpg directly.

But i want to use opencv in my cpp code , i want to do another method also i want to resize the cropped frame by using bicubic interpolation and overlay the exact frame with red box around resized cropped part…

i am using only one input video that video format is h264, but i am using Nvstreamux in pipeline

OK. So why do you need this conditional statement in your code?

if (step < width * 3) {
    g_print("Invalid step size: %d. Skipping frame...\n", step);
    free(src_data);  // Free allocated memory
    continue;
}

You just added a nvvideoconvert in your code, but you did not specify a filter for the RGBA format. The color format may not be RGBA.

can you help me, now i am looking for to save each frame in live streaming using cv:: imwrite but i am getting noise frame like this , If i can solve this issue, i can solve rest of things.


this is the code i am used :

#define MUXER_OUTPUT_WIDTH 1280
#define MUXER_OUTPUT_HEIGHT 720
#define MUXER_BATCH_TIMEOUT_USEC 40000

static GstPadProbeReturn
nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
    // Access the buffer from the probe info
    GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
    if (!buf) {
        return GST_PAD_PROBE_PASS;
    }

    // Map the buffer to access its data
    GstMapInfo inmap = GST_MAP_INFO_INIT;
    if (!gst_buffer_map(buf, &inmap, GST_MAP_READ)) {
        return GST_PAD_PROBE_PASS;
    }

    // Retrieve the frame's metadata
    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
    if (!batch_meta) {
        gst_buffer_unmap(buf, &inmap);
        return GST_PAD_PROBE_PASS;
    }

    // Iterate over each frame in the batch
    for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
        NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
        guint width = frame_meta->source_frame_width;
        guint height = frame_meta->source_frame_height;

        // Ensure width and height are valid
        if (width == 0 || height == 0) {
            continue;
        }

        // Debug print to check dimensions and format
        g_print("Frame width: %d, height: %d\n", width, height);
        g_print("YUV Format: NV12\n");

        // Convert the buffer data to an OpenCV Mat
        cv::Mat in_mat(height, width, CV_8UC1, inmap.data);
        cv::Mat bgr_frame;
        
        // Check if it's NV12 format and convert
        cv::cvtColor(in_mat, bgr_frame, cv::COLOR_YUV2BGR_NV12);

        // Save the frame as a JPEG file
        cv::imwrite("frame.jpg", bgr_frame);
    }

    // Unmap the buffer
    gst_buffer_unmap(buf, &inmap);

    return GST_PAD_PROBE_PASS;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}

int main(int argc, char *argv[]) {
    GMainLoop *loop = NULL;
    GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL,
        *streammux = NULL, *sink = NULL, *nvvidconv = NULL, *nvosd = NULL;

  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *osd_sink_pad = NULL;
  gboolean yaml_config = FALSE;
  NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  if (argc != 2) {
      g_printerr("Usage: %s <yml file>\n", argv[0]);
      g_printerr("OR: %s <H264 filename>\n", argv[0]);
      return -1;
  }

  gst_init(&argc, &argv);
  loop = g_main_loop_new(NULL, FALSE);

  yaml_config = (g_str_has_suffix(argv[1], ".yml") ||
                 g_str_has_suffix(argv[1], ".yaml"));

  pipeline = gst_pipeline_new("dstest1-pipeline");

  source = gst_element_factory_make("filesrc", "file-source");
  h264parser = gst_element_factory_make("h264parse", "h264-parser");
  decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
  streammux = gst_element_factory_make("nvstreammux", "stream-muxer");

  if (!pipeline || !streammux) {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
  }

  nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
  nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");

  if (prop.integrated) {
      sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
  } else {
      sink = gst_element_factory_make("nv3dsink", "nvvideo-renderer");
  }

  if (!source || !h264parser || !decoder || !nvvidconv || !nvosd || !sink) {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
  }

  g_object_set(G_OBJECT(source), "location", argv[1], NULL);

  g_object_set(G_OBJECT(streammux), "batch-size", 1, NULL);
  g_object_set(G_OBJECT(streammux), "width", 1280, "height", 720, "batched-push-timeout", 4000000, NULL);

  bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
  bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
  gst_object_unref(bus);

  gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, streammux, nvvidconv, nvosd, sink, NULL);

  GstPad *sinkpad, *srcpad;
  gchar pad_name_sink[16] = "sink_0";
  gchar pad_name_src[16] = "src";

  sinkpad = gst_element_request_pad_simple(streammux, pad_name_sink);
  if (!sinkpad) {
      g_printerr("Streammux request sink pad failed. Exiting.\n");
      return -1;
  }

  srcpad = gst_element_get_static_pad(decoder, pad_name_src);
  if (!srcpad) {
      g_printerr("Decoder request src pad failed. Exiting.\n");
      return -1;
  }

  if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
      return -1;
  }

  gst_object_unref(sinkpad);
  gst_object_unref(srcpad);

  if (!gst_element_link_many(source, h264parser, decoder, NULL)) {
      g_printerr("Elements could not be linked: 1. Exiting.\n");
      return -1;
  }

  if (!gst_element_link_many(streammux, nvvidconv, nvosd, sink, NULL)) {
      g_printerr("Elements could not be linked: 2. Exiting.\n");
      return -1;
  }

  osd_sink_pad = gst_element_get_static_pad(nvosd, "sink");
  if (osd_sink_pad) {
      gst_pad_add_probe(osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
  }

  gst_element_set_state(pipeline, GST_STATE_PLAYING);
  g_main_loop_run(loop);

  gst_element_set_state(pipeline, GST_STATE_NULL);
  gst_object_unref(GST_OBJECT(pipeline));
  g_source_remove(bus_watch_id);
  g_main_loop_unref(loop);

  return 0;

}

Can you try adding an nvvidconv and capsfilter after nvdsosd and moving the prove to that nvvidconv?

... ! nvdsosd ! nvvidconv ! capsfilter caps="video/x-raw" ! ...
                          ^------------- probe here

That will copy the NVMM memory to the CPU regular memory and remove batching, simplifying the mapping process for use in OpenCV.

My code is worked, but my output is not clear here i attaching my output and code

static GstPadProbeReturn
nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
    // Access the buffer from the probe info
    GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);
    if (!buf) {
        return GST_PAD_PROBE_PASS;
        }

    // Map the buffer to access its data
    GstMapInfo inmap = GST_MAP_INFO_INIT;
    if (!gst_buffer_map(buf, &inmap, GST_MAP_READ)) {
        return GST_PAD_PROBE_PASS;
    }

    // Retrieve the frame's metadata
    NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
    if (!batch_meta) {
        gst_buffer_unmap(buf, &inmap);
        return GST_PAD_PROBE_PASS;
    }

  // Iterate over each frame in the batch
  for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
      NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
      guint width = frame_meta->source_frame_width;
      guint height = frame_meta->source_frame_height;

      // Ensure width and height are valid
      if (width == 0 || height == 0) {
          continue;
      }

      // Debug print to check dimensions and format
      g_print("Frame width: %d, height: %d\n", width, height);
      g_print("YUV Format: NV12\n");

      // Convert the buffer data to an OpenCV Mat
      cv::Mat in_mat(height, width, CV_8UC1, inmap.data);
      cv::Mat bgr_frame;
      
      // Check if it's NV12 format and convert
      cv::cvtColor(in_mat, bgr_frame, cv::COLOR_YUV2BGR_NV12);

      // Save the frame as a JPEG file
      cv::imwrite("frame.jpg", bgr_frame);
  }

  // Unmap the buffer
  gst_buffer_unmap(buf, &inmap);

  return GST_PAD_PROBE_PASS;
}

static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}

int main(int argc, char *argv[]) {
    GMainLoop *loop = NULL;
    GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL,
        *streammux = NULL, *sink = NULL, *nvvidconv = NULL, *nvosd = NULL, *capsfilter = NULL;

  GstBus *bus = NULL;
  guint bus_watch_id;
  GstPad *nvvidconv_sink_pad = NULL;
  gboolean yaml_config = FALSE;
  NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;

  int current_device = -1;
  cudaGetDevice(&current_device);
  struct cudaDeviceProp prop;
  cudaGetDeviceProperties(&prop, current_device);

  if (argc != 2) {
      g_printerr("Usage: %s <yml file>\n", argv[0]);
      g_printerr("OR: %s <H264 filename>\n", argv[0]);
      return -1;
  }

  gst_init(&argc, &argv);
  loop = g_main_loop_new(NULL, FALSE);

  yaml_config = (g_str_has_suffix(argv[1], ".yml") ||
                 g_str_has_suffix(argv[1], ".yaml"));

  pipeline = gst_pipeline_new("dstest1-pipeline");

  source = gst_element_factory_make("filesrc", "file-source");
  h264parser = gst_element_factory_make("h264parse", "h264-parser");
  decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
  streammux = gst_element_factory_make("nvstreammux", "stream-muxer");

  nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
  nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
  capsfilter = gst_element_factory_make("capsfilter", "filter");
  g_object_set(G_OBJECT(capsfilter), "caps", gst_caps_from_string("video/x-raw"), NULL);

  if (prop.integrated) {
      sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
  } else {
      sink = gst_element_factory_make("nv3dsink", "nvvideo-renderer");
  }

  if (!source || !h264parser || !decoder || !nvvidconv || !nvosd || !sink || !capsfilter) {
      g_printerr("One element could not be created. Exiting.\n");
      return -1;
  }

  g_object_set(G_OBJECT(source), "location", argv[1], NULL);
  g_object_set(G_OBJECT(streammux), "batch-size", 1, NULL);
  g_object_set(G_OBJECT(streammux), "width", 1280, "height", 720, "batched-push-timeout", 4000000, NULL);

  bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
  bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
  gst_object_unref(bus);

  gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, streammux, nvosd, nvvidconv, capsfilter, sink, NULL);

  GstPad *sinkpad, *srcpad;
  gchar pad_name_sink[16] = "sink_0";
  gchar pad_name_src[16] = "src";

  sinkpad = gst_element_request_pad_simple(streammux, pad_name_sink);
  if (!sinkpad) {
      g_printerr("Streammux request sink pad failed. Exiting.\n");
      return -1;
  }

  srcpad = gst_element_get_static_pad(decoder, pad_name_src);
  if (!srcpad) {
      g_printerr("Decoder request src pad failed. Exiting.\n");
      return -1;
  }

  if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
      g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
      return -1;
  }

  gst_object_unref(sinkpad);
  gst_object_unref(srcpad);

  if (!gst_element_link_many(source, h264parser, decoder, NULL)) {
      g_printerr("Elements could not be linked: 1. Exiting.\n");
      return -1;
  }

  // Adjusted pipeline linking order with probe at nvvidconv
  if (!gst_element_link_many(streammux, nvosd, nvvidconv, capsfilter, sink, NULL)) {
      g_printerr("Elements could not be linked: 2. Exiting.\n");
      return -1;
  }

  // Add probe to nvvidconv's source pad
  nvvidconv_sink_pad = gst_element_get_static_pad(nvvidconv, "src");
  if (nvvidconv_sink_pad) {
      gst_pad_add_probe(nvvidconv_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
      gst_object_unref(nvvidconv_sink_pad);
  }

  gst_element_set_state(pipeline, GST_STATE_PLAYING);
  g_main_loop_run(loop);

  gst_element_set_state(pipeline, GST_STATE_NULL);
  gst_object_unref(GST_OBJECT(pipeline));
  g_source_remove(bus_watch_id);
  g_main_loop_unref(loop);

  return 0;
}

Could you attach your whole source code and Makefile?

Hello,

I am able to save the cropped frame after resizing it using bicubic interpolation. After that, I apply denoising and sharpening filters, and I add a red box around the resized frame. I then save this processed frame in a folder named “photos,” with each frame saved individually.

However, I am facing an issue where the resized frame with the red box overlay is not visible on the live streaming display. It seems like the overlay is not being displayed, even though it is correctly saved in the “photos” folder.

I have attached the relevant code along with the output showing the saved frames in the “photos” folder for reference.

Could you please assist me in resolving this issue?

Thank you.


This is the output of live streaming:

here i attaching deepstream code zip file which include my cpp code, make file and photos folder
deepstream_code.zip (321.0 KB)

Our nvdsosd plugin is used to draw the bbox on the image. So if you want to add some bbox in the metadata, please add that before the nvdsosd plugin.

can you help me by showing in my pipeline where i want to edit and how i can see the resized frame overlay with exact frame during streaming

Is anyone check this method , but i didnt get any result for showing resized frame with bbox on deepstream rendering , by using this code i am getting a full black screen rendering … is this approach is good …
This is the output i am getting


It didnt showing any error in terminal… but my rendering is full black screen

here i attaching my code

#include <gst/gst.h>

#include <opencv2/opencv.hpp>

#include <cuda_runtime.h>

#include "gstnvdsmeta.h"

#include "nvdsmeta.h"

static GstPadProbeReturn nvvidconv_src_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data) {
GstBuffer *buf = GST_PAD_PROBE_INFO_BUFFER(info);

if (!buf) {
    g_print("No buffer found.\n");
    return GST_PAD_PROBE_PASS;
}

GstMapInfo inmap = GST_MAP_INFO_INIT;
if (!gst_buffer_map(buf, &inmap, GST_MAP_READ)) {
    g_print("Failed to map buffer for reading.\n");
    return GST_PAD_PROBE_PASS;
}

// Get the batch metadata from the buffer
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
if (!batch_meta) {
    g_print("No batch meta found.\n");
    gst_buffer_unmap(buf, &inmap);
    return GST_PAD_PROBE_PASS;
}

// Iterate over the frame meta list in the batch
for (NvDsMetaList *l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
    NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)l_frame->data;
    guint width = frame_meta->source_frame_width;
    guint height = frame_meta->source_frame_height;

    if (width == 0 || height == 0) {
        g_print("Invalid frame dimensions: width = %d, height = %d\n", width, height);
        continue;
    }

    // Convert NV12 to BGR using OpenCV
    cv::Mat in_mat(height * 3 / 2, width, CV_8UC1, inmap.data);
    cv::Mat bgr_frame;
    
    // Ensure the format is NV12 and conversion is successful
    if (in_mat.empty()) {
        g_print("Failed to convert NV12 to BGR.\n");
        continue;
    }

    // Convert NV12 to BGR using OpenCV
    cv::cvtColor(in_mat, bgr_frame, cv::COLOR_YUV2BGR_NV12);

    // Define crop dimensions and location
    int x = 300, y = 300;
    int crop_width = 60, crop_height = 60;

    // Ensure the crop area is within the image bounds
    int x_start = std::max(0, x - crop_width / 2);
    int y_start = std::max(0, y - crop_height / 2);

    guint x_end = std::min((guint)width, (guint)(x + crop_width / 2));
    guint y_end = std::min((guint)height, (guint)(y + crop_height / 2));

    int crop_width_actual = x_end - x_start;
    int crop_height_actual = y_end - y_start;

    crop_width_actual = std::min(crop_width_actual, crop_width);
    crop_height_actual = std::min(crop_height_actual, crop_height);

    // Perform the cropping operation
    cv::Rect crop_roi(x_start, y_start, crop_width_actual, crop_height_actual);
    cv::Mat cropped_frame = bgr_frame(crop_roi);

    // Resize the cropped frame
    cv::Mat resized_frame;
    cv::resize(cropped_frame, resized_frame, cv::Size(cropped_frame.cols * 2, cropped_frame.rows * 2), 0, 0, cv::INTER_CUBIC);

    // Denoise the resized frame
    cv::fastNlMeansDenoising(resized_frame, resized_frame, 7, 7, 21);

    // Apply sharpening kernel
    cv::Mat sharpening_kernel = (cv::Mat_<float>(3, 3) << 
        0, -1, 0, 
        -1, 5, -1, 
        0, -1, 0);
    cv::filter2D(resized_frame, resized_frame, -1, sharpening_kernel);

    // Draw a red boundary around the resized frame
    cv::rectangle(resized_frame, cv::Point(0, 0), cv::Point(resized_frame.cols, resized_frame.rows), cv::Scalar(0, 0, 255), 2);

    // Convert the BGR frame to RGBA
    cv::Mat frame_rgba;
    cv::cvtColor(resized_frame, frame_rgba, cv::COLOR_BGR2RGBA);

    // Map the output buffer to write the data
    GstMapInfo output_map;
    if (!gst_buffer_map(buf, &output_map, GST_MAP_WRITE)) {
        g_print("Failed to map output buffer for writing.\n");
        gst_buffer_unmap(buf, &inmap);
        return GST_PAD_PROBE_PASS;
    }

    // Ensure the buffer data is copied to the output buffer
    if (output_map.data) {
        memcpy(output_map.data, frame_rgba.data, frame_rgba.total() * frame_rgba.elemSize());
    } else {
        g_print("No output data to write.\n");
        gst_buffer_unmap(buf, &inmap);
        gst_buffer_unmap(buf, &output_map);
        return GST_PAD_PROBE_PASS;
    }

    // Unmap the buffers
    gst_buffer_unmap(buf, &output_map);

    // Push the processed frame back to the pad
    if (gst_pad_push(pad, buf) != GST_PAD_PROBE_OK) {
        g_print("Failed to push frame to pad.\n");
    }

    g_print("Processed frame and pushed to DeepStream.\n");
}

// Unmap the input buffer after processing
gst_buffer_unmap(buf, &inmap);

return GST_PAD_PROBE_PASS;
}

    static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
  GMainLoop *loop = (GMainLoop *) data;
  switch (GST_MESSAGE_TYPE (msg)) {
    case GST_MESSAGE_EOS:
      g_print ("End of stream\n");
      g_main_loop_quit (loop);
      break;
    case GST_MESSAGE_ERROR:{
      gchar *debug;
      GError *error;
      gst_message_parse_error (msg, &error, &debug);
      g_printerr ("ERROR from element %s: %s\n",
          GST_OBJECT_NAME (msg->src), error->message);
      if (debug)
        g_printerr ("Error details: %s\n", debug);
      g_free (debug);
      g_error_free (error);
      g_main_loop_quit (loop);
      break;
    }
    default:
      break;
  }
  return TRUE;
}


  int main(int argc, char *argv[]) {
      GMainLoop *loop = NULL;
      GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL, *decoder = NULL,
          *streammux = NULL, *sink = NULL, *nvvidconv = NULL, *nvosd = NULL, *capsfilter = NULL;
  
      GstBus *bus = NULL;
      guint bus_watch_id;
      GstPad *nvvidconv_sink_pad = NULL;
      gboolean yaml_config = FALSE;
      //NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;
  
      int current_device = -1;
      cudaGetDevice(&current_device);
      struct cudaDeviceProp prop;
      cudaGetDeviceProperties(&prop, current_device);
  
      if (argc != 2) {
          g_printerr("Usage: %s <yml file>\n", argv[0]);
          g_printerr("OR: %s <H264 filename>\n", argv[0]);
          return -1;
      }
  
      gst_init(&argc, &argv);
      loop = g_main_loop_new(NULL, FALSE);
  
      yaml_config = (g_str_has_suffix(argv[1], ".yml") ||
                     g_str_has_suffix(argv[1], ".yaml"));
  
      pipeline = gst_pipeline_new("dstest1-pipeline");
  
      source = gst_element_factory_make("filesrc", "file-source");
      h264parser = gst_element_factory_make("h264parse", "h264-parser");
      decoder = gst_element_factory_make("nvv4l2decoder", "nvv4l2-decoder");
      streammux = gst_element_factory_make("nvstreammux", "stream-muxer");
  
      nvvidconv = gst_element_factory_make("nvvideoconvert", "nvvideo-converter");
      nvosd = gst_element_factory_make("nvdsosd", "nv-onscreendisplay");
      capsfilter = gst_element_factory_make("capsfilter", "filter");
      g_object_set(G_OBJECT(capsfilter), "caps", gst_caps_from_string("video/x-raw"), NULL);
  
      if (prop.integrated) {
          sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
      } else {
          sink = gst_element_factory_make("nv3dsink", "nvvideo-renderer");
      }
  
      if (!source || !h264parser || !decoder || !nvvidconv ||  !sink || !capsfilter) {
          g_printerr("One element could not be created. Exiting.\n");
          return -1;
      }
  
      g_object_set(G_OBJECT(source), "location", argv[1], NULL);
      g_object_set(G_OBJECT(streammux), "batch-size", 1, NULL);
      g_object_set(G_OBJECT(streammux), "width", 1280, "height", 720, "batched-push-timeout", 4000000, NULL);
  
      bus = gst_pipeline_get_bus(GST_PIPELINE(pipeline));
      bus_watch_id = gst_bus_add_watch(bus, bus_call, loop);
      gst_object_unref(bus);
  
      gst_bin_add_many(GST_BIN(pipeline), source, h264parser, decoder, streammux, nvosd,nvvidconv, capsfilter, sink, NULL);
  
      GstPad *sinkpad, *srcpad;
      gchar pad_name_sink[16] = "sink_0";
      gchar pad_name_src[16] = "src";
  
      sinkpad = gst_element_request_pad_simple(streammux, pad_name_sink);
      if (!sinkpad) {
          g_printerr("Streammux request sink pad failed. Exiting.\n");
          return -1;
      }
  
      srcpad = gst_element_get_static_pad(decoder, pad_name_src);
      if (!srcpad) {
          g_printerr("Decoder request src pad failed. Exiting.\n");
          return -1;
      }
  
      if (gst_pad_link(srcpad, sinkpad) != GST_PAD_LINK_OK) {
          g_printerr("Failed to link decoder to stream muxer. Exiting.\n");
          return -1;
      }
  
      gst_object_unref(sinkpad);
      gst_object_unref(srcpad);
  
      if (!gst_element_link_many(source, h264parser, decoder, NULL)) {
          g_printerr("Elements could not be linked: 1. Exiting.\n");
          return -1;
      }
  
      // Adjusted pipeline linking order with probe at nvvidconv
      if (!gst_element_link_many(streammux, nvosd,nvvidconv, capsfilter, sink, NULL)) {
          g_printerr("Elements could not be linked: 2. Exiting.\n");
          return -1;
      }
  
      // Add probe to nvvidconv's source pad
      nvvidconv_sink_pad = gst_element_get_static_pad(nvvidconv, "src");
      if (nvvidconv_sink_pad) {
          gst_pad_add_probe(nvvidconv_sink_pad, GST_PAD_PROBE_TYPE_BUFFER, nvvidconv_src_pad_buffer_probe, NULL, NULL);
          gst_object_unref(nvvidconv_sink_pad);
      }
  
      gst_element_set_state(pipeline, GST_STATE_PLAYING);
      g_main_loop_run(loop);
  
      gst_element_set_state(pipeline, GST_STATE_NULL);
      gst_object_unref(GST_OBJECT(pipeline));
      g_source_remove(bus_watch_id);
      g_main_loop_unref(loop);
  
      return 0;
  }

This pointer does not point to the raw image data. You can refer to our Dump NV12 NvBufSurface into a YUV file to learn how to get the raw image data from the NvbufSurface.

There is no update from you for a period, assuming this is not an issue anymore. Hence we are closing this topic. If need further support, please open a new one. Thanks

This topic was automatically closed 14 days after the last reply. New replies are no longer allowed.