Live calibration from fisheye 4K camera

Live calibration from fisheye 4K camera

I would like to do live video calibration from fisheye 4K camera using opencv and gstreamer on Jetson nano.
So I developed a application with cuda and run my application. But, my application was slow(16fps). How can I speed up my application to 30 fps.

My program is

#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include "customer_functions.h"
#include "cudaEGL.h"
#include "iva_metadata.h"
#include "opencv2/core.hpp"
#include "opencv2/calib3d.hpp"
#include "opencv2/cudawarping.hpp" 

const int max_width = 3840;
const int max_height = 2160;
static cv::cuda::GpuMat gpu_xmap, gpu_ymap;
cv::cuda::Stream stream[1];

static void pre_process (void **sBaseAddr,unsigned int *smemsize,unsigned int *swidth,unsigned int *sheight,unsigned int *spitch,ColorFormat  *sformat,unsigned int nsurfcount, void ** usrptr){}
static void post_process (void **sBaseAddr,unsigned int *smemsize,unsigned int *swidth,unsigned int *sheight,unsigned int *spitch,ColorFormat  *sformat,unsigned int nsurfcount,void ** usrptr){}

static void cv_process_RGBA(void *pdata, int32_t width, int32_t height)
{
    cv::cuda::GpuMat d_Mat_RGBA(height, width, CV_8UC4, pdata);
    cv::cuda::GpuMat d_Mat_RGBA_Src;
    
    d_Mat_RGBA.copyTo(d_Mat_RGBA_Src,stream[0]);

    cv::cuda::remap(d_Mat_RGBA_Src, d_Mat_RGBA, gpu_xmap, gpu_ymap, cv::INTER_NEAREST, cv::BORDER_CONSTANT, cv::Scalar(0.f, 0.f, 0.f, 0.f),stream[0]);
}

static void gpu_process (EGLImageKHR image, void ** usrptr)
{
  CUresult status;
  CUeglFrame eglFrame;
  CUgraphicsResource pResource = NULL;

  cudaFree(0);
  status = cuGraphicsEGLRegisterImage(&pResource, image, CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE);
  if (status != CUDA_SUCCESS) {
    printf("cuGraphicsEGLRegisterImage failed : %d ¥n        ", status);
    return;
  }

  status = cuGraphicsResourceGetMappedEglFrame( &eglFrame, pResource, 0, 0);
  if (status != CUDA_SUCCESS) {
    printf ("cuGraphicsSubResourceGetMappedArray failed¥n        ");
  }

  if (eglFrame.frameType == CU_EGL_FRAME_TYPE_PITCH) {
    if (eglFrame.eglColorFormat == CU_EGL_COLOR_FORMAT_ABGR) {
 	cv_process_RGBA(eglFrame.frame.pPitch[0], eglFrame.width, eglFrame.height);
    } else
        printf ("Invalid eglcolorformat %d¥n        ", eglFrame.eglColorFormat);
  }

  status = cuGraphicsUnregisterResource(pResource);
  if (status != CUDA_SUCCESS) {
    printf("cuGraphicsEGLUnRegisterResource failed: %d ¥n        ", status);
  }
}

My pipeline is

gst-launch-1.0 -e v4l2src device=/dev/video0 io-mode=2 ! image/jpeg, width=3840, height=2160, framerate=30/1 ! nvv4l2decoder mjpeg=1 ! ‘video/x-raw(memory:NVMM),format=NV12’ ! queue !nvivafilter customer-lib-name=./libnvsample_cudaprocess2.so cuda-process=true ! queue ! ‘video/x-raw(memory:NVMM), format=RGBA’ ! nvvidconv ! ‘video/x-raw(memory:NVMM), format=NV12’ ! nvvidconv ! nvv4l2h264enc ! h264parse ! queue2 ! qtmux ! filesink location=4K_h264HW_calib.mp4

And, the screen shot of Visual Profiler is

Thank you for your support.

Not sure you can get 30 fps with 4K on Nano with nvivafilter.
Maybe you don’t need all these queues, especially for NVMM buffers.

My only suggestion may be to process in NV12 format rather than RGBA. It would save the NV12 to RGBA conversion in nvivalfilter, and the RGBA to NV12 later with nvvidconv.
You would keep full resolution maps for Y, and create half resolution maps for UV.
Check this example for processing NV12 in nvivafilter.

Thank you for your help. I tried it with NV12 and fps was 24FPS. I think that is limitation of the Jetson nano.