Please provide complete information as applicable to your setup.
• Hardware Platform (Jetson / GPU) Jetson Nano
• DeepStream Version 5.0
• JetPack Version (valid for Jetson only) 4.4-b144
• TensorRT Version 7.1.3
• Issue Type( questions, new requirements, bugs) question
Dear all,
I am trying to apply a pre-processing to frames before they reach the PGIE.
I am able to place a dsexample instance after the PGIE, filter the frames with the opencv sepia filtering and save the image on the disk:
// Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
// algorithm can handle padded RGBA data.
in_mat =
cv::Mat (dest_height, dest_width,
CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
nvbuf->surfaceList[0].pitch);
out_mat =
cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
saved_mat =
cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
cv::transform(out_mat, saved_mat, kernel);
time( &rawtime );
info = localtime( &rawtime );
static gint dump = 0;
char filename[64];
snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
cv::imwrite(filename, saved_mat);
However, I am not able to execute such filtering before the PGIE on all the frames.
I have tried a number of different approaches.
I have tried to modify the gst-dsexample and follow the same approach that it is used for blurring object, but I obtain a segmentation fault:
/* Cache the mapped data for CPU access */
NvBufSurfaceSyncForCpu (surface, frame_meta->batch_id, 0);
in_mat =
cv::Mat (surface->surfaceList[frame_meta->batch_id].planeParams.height[0],
surface->surfaceList[frame_meta->batch_id].planeParams.width[0], CV_8UC4,
surface->surfaceList[frame_meta->batch_id].mappedAddr.addr[0],
surface->surfaceList[frame_meta->batch_id].planeParams.pitch[0]);
cv::transform(in_mat, in_mat, kernel);
in_mat.convertTo(in_mat,CV_8UC4);
/* Cache the mapped data for device access */
NvBufSurfaceSyncForDevice (surface, frame_meta->batch_id, 0);
However, in the execution of cv::transform(in_mat, in_mat, kernel); I get a segmentation fault :(
Moreover, I I get
5168 Segmentation fault (core dumped)
Even if I try to copy the buffer instead of doing a transform:
cv::Mat image_copy = in_mat.clone();
However, no segmentation fault at all if I use another opencv function such as:
cv::filter2D(in_mat, in_mat,-1, kernel);
The problem is that of course the function is not doing what I need to do…
But the PGIE is correctly receiving the filtered images (you see below that the filter2D creates a white image because of the convolution):
This is the result of the filter2D that of course is doing a convolution of the pixels and not what I need that is a meshing of the different channels.
I think that this give an interesting information. The problem might come from the specific operation that the opencv function cv::transform is doing on the deepstream buffer.
Finally, I have also tried to edit the function get_converted_mat as follows:
static GstFlowReturn
filter_frame (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
gint input_height)
{
NvBufSurfTransform_Error err;
NvBufSurfTransformConfigParams transform_config_params;
NvBufSurfTransformParams transform_params;
NvBufSurfTransformRect src_rect;
NvBufSurfTransformRect dst_rect;
NvBufSurface ip_surf;
cv::Mat in_mat, out_mat, filtered_mat;
ip_surf = *input_buf;
time_t rawtime;
struct tm *info;
ip_surf.numFilled = ip_surf.batchSize = 1;
ip_surf.surfaceList = &(input_buf->surfaceList[idx]);
/*
gint src_left = GST_ROUND_UP_2(crop_rect_params->left);
gint src_top = GST_ROUND_UP_2(crop_rect_params->top);
gint src_width = GST_ROUND_DOWN_2(crop_rect_params->width);
gint src_height = GST_ROUND_DOWN_2(crop_rect_params->height);
*/
gint src_left = crop_rect_params->left;
gint src_top = crop_rect_params->top;
gint src_width = crop_rect_params->width;
gint src_height = crop_rect_params->height;
//g_print("ltwh = %d %d %d %d \n", src_left, src_top, src_width, src_height);
guint dest_width, dest_height;
dest_width = src_width;
dest_height = src_height;
NvBufSurface *nvbuf;
NvBufSurfaceCreateParams create_params;
create_params.gpuId = dsexample->gpu_id;
create_params.width = dest_width;
create_params.height = dest_height;
create_params.size = 0;
create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
create_params.layout = NVBUF_LAYOUT_PITCH;
#ifdef __aarch64__
create_params.memType = NVBUF_MEM_DEFAULT;
#else
create_params.memType = NVBUF_MEM_CUDA_UNIFIED;
#endif
NvBufSurfaceCreate (&nvbuf, 1, &create_params);
// Configure transform session parameters for the transformation
transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
transform_config_params.gpu_id = dsexample->gpu_id;
transform_config_params.cuda_stream = dsexample->cuda_stream;
// Set the transform session parameters for the conversions executed in this
// thread.
err = NvBufSurfTransformSetSessionParams (&transform_config_params);
if (err != NvBufSurfTransformError_Success) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
("NvBufSurfTransformSetSessionParams failed with error %d", err), (NULL));
goto error;
}
// Calculate scaling ratio while maintaining aspect ratio
ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);
if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
("%s:crop_rect_params dimensions are zero",__func__), (NULL));
goto error;
}
#ifdef __aarch64__
if (ratio <= 1.0 / 16 || ratio >= 16.0) {
// Currently cannot scale by ratio > 16 or < 1/16 for Jetson
goto error;
}
#endif
// Set the transform ROIs for source and destination
src_rect = {(guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
dst_rect = {0, 0, (guint)dest_width, (guint)dest_height};
// Set the transform parameters
transform_params.src_rect = &src_rect;
transform_params.dst_rect = &dst_rect;
transform_params.transform_flag =
NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
NVBUFSURF_TRANSFORM_CROP_DST;
transform_params.transform_filter = NvBufSurfTransformInter_Default;
//Memset the memory
NvBufSurfaceMemSet (nvbuf, 0, 0, 0);
GST_DEBUG_OBJECT (dsexample, "Scaling and converting input buffer\n");
// Transformation scaling+format conversion if any.
err = NvBufSurfTransform (&ip_surf, nvbuf, &transform_params);
if (err != NvBufSurfTransformError_Success) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
("NvBufSurfTransform failed with error %d while converting buffer", err),
(NULL));
goto error;
}
// Map the buffer so that it can be accessed by CPU
//if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ) != 0){
if (NvBufSurfaceMap (nvbuf, 0, 0, NVBUF_MAP_READ_WRITE) != 0){
goto error;
}
// Cache the mapped data for CPU access
NvBufSurfaceSyncForCpu (nvbuf, 0, 0);
// Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
// algorithm can handle padded RGBA data.
in_mat =
cv::Mat (dest_height, dest_width,
CV_8UC4, nvbuf->surfaceList[0].mappedAddr.addr[0],
nvbuf->surfaceList[0].pitch);
out_mat =
cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
filtered_mat =
cv::Mat (cv::Size(dest_width, dest_height), CV_8UC3);
//cv::cvtColor (in_mat, out_mat, cv::COLOR_RGBA2BGR);
//cv::transform(out_mat, filtered_mat, kernel);
//cv::cvtColor (in_mat, in_mat, cv::COLOR_RGBA2BGR);
cv::transform(in_mat, in_mat, kernel);
//cv::cvtColor (in_mat, in_mat, cv::COLOR_BGR2RGBA);
/* Cache the mapped data for device access */
NvBufSurfaceSyncForDevice (nvbuf, 0, 0);
/*
time( &rawtime );
info = localtime( &rawtime );
static gint dump = 0;
char filename[64];
snprintf(filename, 64, "/home/jnano/jnanoImages/%04d_%02d_%02d_%02d_%02d_%02d.jpg", info->tm_year+1900, info->tm_mon+1, info->tm_mday+1, info->tm_hour, info->tm_min, info->tm_sec);
cv::imwrite(filename, saved_mat);
*/
if (NvBufSurfaceUnMap (nvbuf, 0, 0)){
goto error;
}
NvBufSurfaceDestroy(nvbuf);
#ifdef __aarch64__
// To use the converted buffer in CUDA, create an EGLImage and then use
// CUDA-EGL interop APIs
if (USE_EGLIMAGE) {
if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
goto error;
}
// dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage
// Use interop APIs cuGraphicsEGLRegisterImage and
// cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA
// Destroy the EGLImage
NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
}
#endif
/* We will first convert only the Region of Interest (the entire frame or the
* object bounding box) to RGB and then scale the converted RGB frame to
* processing resolution. */
return GST_FLOW_OK;
error:
return GST_FLOW_ERROR;
}
With this approach I do not get any segmentation fault, but the PGIE is NOT receiving the filtered image. Instead what the PGIE receive is the original frames. It seems that I am not writing in the buffer as it was happening in the previous example.
I hope that you could give me some suggestion.
Thank you very much!!