Please provide complete information as applicable to your setup.
**• Hardware Platform (Jetson / GPU)**jetson nano
• DeepStream Version5.1
**• JetPack Version (valid for Jetson only)**4.5.1
Hi I have successfully added and tried with the tampering detection code using opencv in my dgpu Laptop without any fps reduction
But with nano I am seeing much reduction in fps(15 fps- 3fps)!! Can you recommend some changes to eliminate this
below is the code
static GstFlowReturn
get_converted_mat (GstDsExample * dsexample, NvBufSurface *input_buf, gint idx,
NvOSD_RectParams * crop_rect_params, gdouble & ratio, gint input_width,
gint input_height,NvDsBatchMeta *batch_meta,NvDsFrameMeta *frame_meta)
{
NvBufSurfTransform_Error err;
NvBufSurfTransformConfigParams transform_config_params;
NvBufSurfTransformParams transform_params;
NvBufSurfTransformRect src_rect;
NvBufSurfTransformRect dst_rect;
NvBufSurface ip_surf;
bg->setNMixtures(10);
cv::Mat in_mat;
ip_surf = *input_buf;
ip_surf.numFilled = ip_surf.batchSize = 1;
ip_surf.surfaceList = &(input_buf->surfaceList[idx]);
gint src_left = GST_ROUND_UP_2((unsigned int)crop_rect_params->left);
gint src_top = GST_ROUND_UP_2((unsigned int)crop_rect_params->top);
gint src_width = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->width);
gint src_height = GST_ROUND_DOWN_2((unsigned int)crop_rect_params->height);
/* Maintain aspect ratio */
double hdest = dsexample->processing_width * src_height / (double) src_width;
double wdest = dsexample->processing_height * src_width / (double) src_height;
guint dest_width, dest_height;
if (hdest <= dsexample->processing_height) {
dest_width = dsexample->processing_width;
dest_height = hdest;
} else {
dest_width = wdest;
dest_height = dsexample->processing_height;
}
/* Configure transform session parameters for the transformation */
transform_config_params.compute_mode = NvBufSurfTransformCompute_Default;
transform_config_params.gpu_id = dsexample->gpu_id;
transform_config_params.cuda_stream = dsexample->cuda_stream;
/* Set the transform session parameters for the conversions executed in this
- thread. */
err = NvBufSurfTransformSetSessionParams (&transform_config_params);
if (err != NvBufSurfTransformError_Success) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
(“NvBufSurfTransformSetSessionParams failed with error %d”, err), (NULL));
return GST_FLOW_ERROR;
}
/* Calculate scaling ratio while maintaining aspect ratio */
ratio = MIN (1.0 * dest_width/ src_width, 1.0 * dest_height / src_height);
if ((crop_rect_params->width == 0) || (crop_rect_params->height == 0)) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
(“%s:crop_rect_params dimensions are zero”,func), (NULL));
return GST_FLOW_ERROR;
}
#ifdef aarch64
if (ratio <= 1.0 / 16 || ratio >= 16.0) {
/* Currently cannot scale by ratio > 16 or < 1/16 for Jetson /
return GST_FLOW_ERROR;
}
#endif
/ Set the transform ROIs for source and destination */
src_rect = {(guint)src_top, (guint)src_left, (guint)src_width, (guint)src_height};
dst_rect = {0, 0, (guint)dest_width, (guint)dest_height};
/* Set the transform parameters */
transform_params.src_rect = &src_rect;
transform_params.dst_rect = &dst_rect;
transform_params.transform_flag =
NVBUFSURF_TRANSFORM_FILTER | NVBUFSURF_TRANSFORM_CROP_SRC |
NVBUFSURF_TRANSFORM_CROP_DST;
transform_params.transform_filter = NvBufSurfTransformInter_Default;
/* Memset the memory */
NvBufSurfaceMemSet (dsexample->inter_buf, 0, 0, 0);
GST_DEBUG_OBJECT (dsexample, “Scaling and converting input buffer\n”);
/* Transformation scaling+format conversion if any. /
err = NvBufSurfTransform (&ip_surf, dsexample->inter_buf, &transform_params);
if (err != NvBufSurfTransformError_Success) {
GST_ELEMENT_ERROR (dsexample, STREAM, FAILED,
(“NvBufSurfTransform failed with error %d while converting buffer”, err),
(NULL));
return GST_FLOW_ERROR;
}
/ Map the buffer so that it can be accessed by CPU */
if (NvBufSurfaceMap (dsexample->inter_buf, 0, 0, NVBUF_MAP_READ) != 0){
return GST_FLOW_ERROR;
}
/* Cache the mapped data for CPU access */
NvBufSurfaceSyncForCpu (dsexample->inter_buf, 0, 0);
/* Use openCV to remove padding and convert RGBA to BGR. Can be skipped if
- algorithm can handle padded RGBA data. */
in_mat =
cv::Mat (dsexample->processing_height, dsexample->processing_width,
CV_8UC4, dsexample->inter_buf->surfaceList[0].mappedAddr.addr[0],
dsexample->inter_buf->surfaceList[0].pitch);
bg->apply(in_mat, fore);
erode (fore, fore, cv::Mat ());
erode (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
dilate (fore, fore, cv::Mat ());
findContours (fore, contours,cv::RETR_EXTERNAL,cv::CHAIN_APPROX_SIMPLE);
int a=0;
std::vectorcv::Rect boundRect( contours.size() );
for( int it = 0; it < contours.size(); it++ )
{
boundRect[it] = boundingRect( contours[it] );
}
for( int it = 0; it< contours.size(); it++ )
{
if(boundRect[it].width>=40 || boundRect[it].height>=40)
{
a=a+(boundRect[it].height)*(boundRect[it].width);
}
//cout<<“Net contour area is “<<a<<”\n”;
if(a>=int(in_mat.rows)*int(in_mat.cols)/2)
{
// std::cout<<“camera tampering in camera”<<frame_meta->source_id<<“\n”;
cam_tamp = 1;
}
else
cam_tamp = 0;
}
//std::cout<<“camera source id:”<<frame_meta->source_id<<“\n”;
#if (CV_MAJOR_VERSION >= 4)
cv::cvtColor (in_mat, *dsexample->cvmat, cv::COLOR_RGBA2BGR);
#else
cv::cvtColor (in_mat, *dsexample->cvmat, CV_RGBA2BGR);
#endif
if (NvBufSurfaceUnMap (dsexample->inter_buf, 0, 0)){
return GST_FLOW_ERROR;
}
#ifdef aarch64
/* To use the converted buffer in CUDA, create an EGLImage and then use
- CUDA-EGL interop APIs */
if (USE_EGLIMAGE) {
if (NvBufSurfaceMapEglImage (dsexample->inter_buf, 0) !=0 ) {
return GST_FLOW_ERROR;
}
/* dsexample->inter_buf->surfaceList[0].mappedAddr.eglImage
* Use interop APIs cuGraphicsEGLRegisterImage and
* cuGraphicsResourceGetMappedEglFrame to access the buffer in CUDA */
/* Destroy the EGLImage */
NvBufSurfaceUnMapEglImage (dsexample->inter_buf, 0);
}
#endif
/* We will first convert only the Region of Interest (the entire frame or the
- object bounding box) to RGB and then scale the converted RGB frame to
- processing resolution. */
return GST_FLOW_OK;
}