Dear Support Team,
**• | Hardware Platform (NVIDIA ORIN NX)** |
---|---|
**• | DeepStream 7.0** |
Dear Support Team,
I hope this message finds you well. I am encountering an issue while attempting to crop a frame from a video stream in my DeepStream 7.0 application, running on a NVIDIA Jetson Orin NX with CUDA 12.2.
My objective is to save a cropped part of the frame when I provide the x and y coordinates of a pixel. Specifically, I wish to crop a 60x60 pixel region around this point, considering 30 pixels above, below, left, and right of the given coordinates. I aim to save this 60x60 cropped frame as a .jpg image.
However, I am facing the following error message repeatedly:
Image dimensions: 1920x1080, Step size: 2048
Invalid step size: 2048. Skipping frame...
This error occurs despite the image dimensions being 1920x1080, and I believe the issue lies with the step size calculation or its handling within the cropping operation.
I have attached the relevant code and configuration details for reference. Could you please assist me in resolving this issue or provide any suggestions on how to correctly crop and save the image in the specified manner?
static int save_img = 0;
static int frame_count = 0; // Counter to save multiple frames with unique filenames
static GstPadProbeReturn osd_sink_pad_buffer_probe(GstPad *pad, GstPadProbeInfo *info, gpointer u_data)
{
cv::Mat in_mat;
cv::Mat matBGR;
GstBuffer *buf = (GstBuffer *)info->data;
NvDsMetaList *l_frame = NULL;
NvDsMetaList *l_user_meta = NULL;
NvDsUserMeta *user_meta = NULL;
NvDsInferSegmentationMeta *seg_meta_data = NULL;
// Get original raw data
GstMapInfo in_map_info;
char *src_data = NULL;
if (!gst_buffer_map(buf, &in_map_info, GST_MAP_READ)) {
g_print("Error: Failed to map gst buffer\n");
gst_buffer_unmap(buf, &in_map_info);
return GST_PAD_PROBE_OK;
}
NvBufSurface *surface = (NvBufSurface *)in_map_info.data;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta(buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *)(l_frame->data);
// Validate user meta and check if the frame contains segmentation meta
for (l_user_meta = frame_meta->frame_user_meta_list; l_user_meta != NULL; l_user_meta = l_user_meta->next) {
user_meta = (NvDsUserMeta *)(l_user_meta->data);
if (user_meta && user_meta->base_meta.meta_type == NVDSINFER_SEGMENTATION_META) {
seg_meta_data = (NvDsInferSegmentationMeta *)user_meta->user_meta_data;
}
}
// Allocate memory for source data
src_data = (char *)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
if (src_data == NULL) {
g_print("Error: failed to malloc src_data \n");
continue;
}
#ifdef PLATFORM_TEGRA
// Tegra-specific mapping of NvBufSurface
NvBufSurfaceMap(surface, -1, -1, NVBUF_MAP_READ);
NvBufSurfacePlaneParams *pParams = &surface->surfaceList[frame_meta->batch_id].planeParams;
unsigned int offset = 0;
// Loop through each plane in the NvBufSurface and copy data to src_data
for (unsigned int num_planes = 0; num_planes < pParams->num_planes; num_planes++) {
if (num_planes > 0)
offset += pParams->height[num_planes - 1] * (pParams->bytesPerPix[num_planes - 1] * pParams->width[num_planes - 1]);
for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
memcpy((void *)(src_data + offset + h * pParams->bytesPerPix[num_planes] * pParams->width[num_planes]),
(void *)((char *)surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes] + h * pParams->pitch[num_planes]),
pParams->bytesPerPix[num_planes] * pParams->width[num_planes]);
}
}
NvBufSurfaceSyncForDevice(surface, -1, -1);
NvBufSurfaceUnMap(surface, -1, -1);
#else
// Non-Tegra case: copy memory using CUDA
cudaMemcpy((void *)src_data,
(void *)surface->surfaceList[frame_meta->batch_id].dataPtr,
surface->surfaceList[frame_meta->batch_id].dataSize,
cudaMemcpyDeviceToHost);
#endif
// If source data is not NULL, convert it to OpenCV Mat and save the image
if (src_data != NULL) {
// Get image dimensions and step size (pitch)
int width = surface->surfaceList[frame_meta->batch_id].planeParams.width[0];
int height = surface->surfaceList[frame_meta->batch_id].planeParams.height[0];
int step = surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0];
// Print the dimensions and step size for debugging
g_print("Image dimensions: %dx%d, Step size: %d\n", width, height, step);
// Corrected step validation: the step can be larger than width * 4 (for RGBA format)
if (step < width * 3) {
g_print("Invalid step size: %d. Skipping frame...\n", step);
free(src_data); // Free allocated memory
continue;
}
if (step > width * 3) {
// Adjust for step size greater than width * 3 (indicating padding)
g_print("Step size is greater than expected, adjusting for padding...\n");
}
// Convert the source data into OpenCV Mat
in_mat = cv::Mat(height, width, CV_8UC4, src_data, step);
// Convert the frame to BGR format for saving as an image
cv::cvtColor(in_mat, matBGR, cv::COLOR_RGBA2RGB);
// Define the cropping region (200, 200) as the center, with a margin of 30 pixels
int x = 200, y = 200;
int crop_x1 = std::max(0, x - 30); // Ensure we don't go out of bounds
int crop_y1 = std::max(0, y - 30);
int crop_x2 = std::min(width, x + 30); // Ensure we don't go out of bounds
int crop_y2 = std::min(height, y + 30);
// Crop the image (60x60 with (200, 200) at the center)
cv::Rect crop_rect(crop_x1, crop_y1, crop_x2 - crop_x1, crop_y2 - crop_y1);
cv::Mat cropped = matBGR(crop_rect);
// Resize the cropped image to 60x60
cv::Mat resized_cropped;
cv::resize(cropped, resized_cropped, cv::Size(60, 60));
// Save the cropped image as 'cropped_frame_XXX.jpg' where XXX is the frame count
char filename[128];
sprintf(filename, "cropped_frame_%03d.jpg", frame_count);
cv::imwrite(filename, resized_cropped);
frame_count++; // Increment frame counter for the next frame
// Free allocated memory
free(src_data);
src_data = NULL;
}
}
gst_buffer_unmap(buf, &in_map_info);
return GST_PAD_PROBE_OK;
}
static gboolean
bus_call (GstBus * bus, GstMessage * msg, gpointer data)
{
GMainLoop *loop = (GMainLoop *) data;
switch (GST_MESSAGE_TYPE (msg)) {
case GST_MESSAGE_EOS:
g_print ("End of stream\n");
g_main_loop_quit (loop);
break;
case GST_MESSAGE_ERROR:{
gchar *debug;
GError *error;
gst_message_parse_error (msg, &error, &debug);
g_printerr ("ERROR from element %s: %s\n",
GST_OBJECT_NAME (msg->src), error->message);
if (debug)
g_printerr ("Error details: %s\n", debug);
g_free (debug);
g_error_free (error);
g_main_loop_quit (loop);
break;
}
default:
break;
}
return TRUE;
}
int
main (int argc, char *argv[])
{
GMainLoop *loop = NULL;
GstElement *pipeline = NULL, *source = NULL, *h264parser = NULL,
*decoder = NULL, *streammux = NULL, *sink = NULL, *nvvidconv = NULL,
*nvosd = NULL;
GstBus *bus = NULL;
guint bus_watch_id;
GstPad *osd_sink_pad = NULL;
gboolean yaml_config = FALSE;
NvDsGieType pgie_type = NVDS_GIE_PLUGIN_INFER;
int current_device = -1;
cudaGetDevice(¤t_device);
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, current_device);
/* Check input arguments */
if (argc != 2) {
g_printerr ("Usage: %s <yml file>\n", argv[0]);
g_printerr ("OR: %s <H264 filename>\n", argv[0]);
return -1;
}
/* Standard GStreamer initialization */
gst_init (&argc, &argv);
loop = g_main_loop_new (NULL, FALSE);
/* Parse inference plugin type */
yaml_config = (g_str_has_suffix (argv[1], ".yml") ||
g_str_has_suffix (argv[1], ".yaml"));
/* Create gstreamer elements */
/* Create Pipeline element that will form a connection of other elements */
pipeline = gst_pipeline_new ("dstest1-pipeline");
/* Source element for reading from the file */
source = gst_element_factory_make ("filesrc", "file-source");
/* Since the data format in the input file is elementary h264 stream,
* we need a h264parser */
h264parser = gst_element_factory_make ("h264parse", "h264-parser");
/* Use nvdec_h264 for hardware accelerated decode on GPU */
decoder = gst_element_factory_make ("nvv4l2decoder", "nvv4l2-decoder");
/* Create nvstreammux instance to form batches from one or more sources. */
streammux = gst_element_factory_make ("nvstreammux", "stream-muxer");
if (!pipeline || !streammux) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* Use nvinfer or nvinferserver to run inferencing on decoder's output,
* behaviour of inferencing is set through config file */
/* Use convertor to convert from NV12 to RGBA as required by nvosd */
nvvidconv = gst_element_factory_make ("nvvideoconvert", "nvvideo-converter");
/* Create OSD to draw on the converted RGBA buffer */
nvosd = gst_element_factory_make ("nvdsosd", "nv-onscreendisplay");
/* Finally render the osd output */
if(prop.integrated) {
sink = gst_element_factory_make("nv3dsink", "nv3d-sink");
} else {
#ifdef __aarch64__
sink = gst_element_factory_make ("nv3dsink", "nvvideo-renderer");
#else
sink = gst_element_factory_make ("nveglglessink", "nvvideo-renderer");
#endif
}
if (!source || !h264parser || !decoder
|| !nvvidconv || !nvosd || !sink) {
g_printerr ("One element could not be created. Exiting.\n");
return -1;
}
/* we set the input filename to the source element */
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
if (g_str_has_suffix (argv[1], ".h264")) {
g_object_set (G_OBJECT (source), "location", argv[1], NULL);
g_object_set (G_OBJECT (streammux), "batch-size", 1, NULL);
g_object_set (G_OBJECT (streammux), "width", MUXER_OUTPUT_WIDTH, "height",
MUXER_OUTPUT_HEIGHT,
"batched-push-timeout", MUXER_BATCH_TIMEOUT_USEC, NULL);
/* Set all the necessary properties of the nvinfer element,
* the necessary ones are : */
}
/* we add a message handler */
bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline));
bus_watch_id = gst_bus_add_watch (bus, bus_call, loop);
gst_object_unref (bus);
/* Set up the pipeline */
/* we add all elements into the pipeline */
gst_bin_add_many (GST_BIN (pipeline),
source, h264parser, decoder, streammux,
nvvidconv, nvosd, sink, NULL);
g_print ("Added elements to bin\n");
GstPad *sinkpad, *srcpad;
gchar pad_name_sink[16] = "sink_0";
gchar pad_name_src[16] = "src";
sinkpad = gst_element_request_pad_simple (streammux, pad_name_sink);
if (!sinkpad) {
g_printerr ("Streammux request sink pad failed. Exiting.\n");
return -1;
}
srcpad = gst_element_get_static_pad (decoder, pad_name_src);
if (!srcpad) {
g_printerr ("Decoder request src pad failed. Exiting.\n");
return -1;
}
if (gst_pad_link (srcpad, sinkpad) != GST_PAD_LINK_OK) {
g_printerr ("Failed to link decoder to stream muxer. Exiting.\n");
return -1;
}
gst_object_unref (sinkpad);
gst_object_unref (srcpad);
/* we link the elements together */
/* file-source -> h264-parser -> nvh264-decoder ->
* pgie -> nvvidconv -> nvosd -> video-renderer */
if (!gst_element_link_many (source, h264parser, decoder, NULL)) {
g_printerr ("Elements could not be linked: 1. Exiting.\n");
return -1;
}
if (!gst_element_link_many (streammux,
nvvidconv, nvosd, sink, NULL)) {
g_printerr ("Elements could not be linked: 2. Exiting.\n");
return -1;
}
/* Lets add probe to get informed of the meta data generated, we add probe to
* the sink pad of the osd element, since by that time, the buffer would have
* had got all the metadata. */
osd_sink_pad = gst_element_get_static_pad (nvosd, "sink");
if (!osd_sink_pad)
g_print ("Unable to get sink pad\n");
else
gst_pad_add_probe (osd_sink_pad, GST_PAD_PROBE_TYPE_BUFFER,
osd_sink_pad_buffer_probe, NULL, NULL);
gst_object_unref (osd_sink_pad);
/* Set the pipeline to "playing" state */
g_print ("Using file: %s\n", argv[1]);
gst_element_set_state (pipeline, GST_STATE_PLAYING);
/* Wait till pipeline encounters an error or EOS */
g_print ("Running...\n");
g_main_loop_run (loop);
/* Out of the main loop, clean up nicely */
g_print ("Returned, stopping playback\n");
gst_element_set_state (pipeline, GST_STATE_NULL);
g_print ("Deleting pipeline\n");
gst_object_unref (GST_OBJECT (pipeline));
g_source_remove (bus_watch_id);
g_main_loop_unref (loop);
return 0;
}
Thank you for your time and support.
Best regards,
Abdul Manaf PV