How to perform fish-eye lens distortion correction in gstreamer pipeline? (Hfov ~150)

While trying this cuda filter I get it to work for the 1920x1080 resolution on my camera but not the 4032x3040, any Ideas

/Fredrik

Hi I need to convert fisheye video into normal video using gstreamer or openCV
I have RTSP Stream . please tell me how to do

You would build the custom lib from post #16, then you would use a pipeline reading your rtsp source and decoding to NVMM memory with uridecodebin and then pass to nvivafilter with your custom library, and here displaying:

gst-launch-1.0 -e uridecodebin uri=rtsp://<server>:<port>/path ! nvivafilter customer-lib-name=./lib-gst-custom-opencv_cudaprocess.so cuda-process=true !  'video/x-raw(memory:NVMM), format=RGBA ! nvoverlaysink

你的这个问题,我觉得可以回复一下你,我最近也在尝试将广角镜头采集的图像进行畸变矫正,我是直接用的deepstream官方提供的例子,在文件夹\opt\nvidia\deepstream\deepstream-5.1\sources\apps\sample_apps\deepstream-dewarper-test中,有现成的例子,我们只需要改变config_dewarper_perspective.txt文件中的相关参数,默认的参数如下:

[property]
#dewarp-dump-frames=10
output-width=3680
output-height=2428
num-batch-buffers=1
[surface0]
# 1=PushBroom, 2=VertRadCyl, 3=PerspectivePerspective
projection-type=3
width=3680
height=2428
focal-length=1721.592
#src-fov=180
#top-angle=30
#bottom-angle=-30
distortion=-0.09787;0.00574;-0.00013;0;0
src-x0=1861.88
src-y0=1216.04
# 0=cudaAddressModeClamp, 1=cudaAddressModeBorder
#cuda-address-mode=0

我们需要根据自己的实际视频进行修改。主要修改的地方有三处:
1、图像的分辨率宽高,我的视频是1920*1080;
2、修改畸变系数;(相机外参)
3、修改焦距;
我修改后的config_dewarper_perspective.txt文件配置如下:

################################################################################
config_dewarper_perspective.txt
################################################################################

[property]
#dewarp-dump-frames=10
output-width=1920
output-height=1080
num-batch-buffers=1

#########################################
# Note - Max 4 surfaces are supported
#########################################

[surface0]
# 1=PushBroom, 2=VertRadCyl, 3=PerspectivePerspective
projection-type=3
width=1920
height=1080
#focal-length=1721.592
focal-length=9958.649257742036
#src-fov=180
#top-angle=30
#bottom-angle=-30
#distortion=-0.09787;0.00574;-0.00013;0;0
#distortion= -6.956561513881647;-68.83902522804168; -0.004834538444671919;0.01471273691928269;-0.4916103704308509
distortion= -6.956561513881647;-68.83902522804168; -0.004834538444671919;0;0
src-x0=960
src-y0=540
# 0=cudaAddressModeClamp, 1=cudaAddressModeBorder
#cuda-address-mode=0

然后先进行编译(具体可以看readme)
$ cd apps/deepstream-dewarper-app/
$ make
然后运行:
./deepstream-dewarper-app file:///home/nvidia/my_test.mp4 0

我测试了最终效果与opencv利用initUndistortRectifyMap与remap(src_picture, newimage, mapx, mapy, INTER_LINEAR);函数的效果是一样的。但是似乎opencv的参数比deepstream中deepstream-dewarper-test参数多一些。我opencv的例子如下:

/*****************************************************
2021.4.29:畸变校正
******************************************************/
#include "opencv2/core/core.hpp"    
#include "opencv2/imgproc/imgproc.hpp"    
#include "opencv2/calib3d/calib3d.hpp"    
#include "opencv2/highgui/highgui.hpp"    
#include <iostream>    
#include <fstream>    

using namespace cv;
using namespace std;

#define SRC_WIDTH  1920
#define SRC_HEIGHT 1080

void InitMat(float* num , Mat& m)
{
	for (int i = 0; i<m.rows; i++)
	for (int j = 0; j<m.cols; j++)
		m.at<float>(i, j) = *(num + i * m.rows + j);
}

Mat mapx = Mat(Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1);
Mat mapy = Mat(Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1);
Mat newimage ; //校正后输出图片

void jiaozheng_init(void)
{
	int OK = 0;


	Mat R = Mat::eye(3, 3, CV_32F);
	//参数矩阵
	float neican_data[] = { 9558.649257742036, 0, 959.3165310990756, 0, 9435.752651759443, 532.7507141910969, 0, 0, 1 };
	Mat cameraMatrix(3, 3, CV_32FC1);
	InitMat(neican_data,cameraMatrix);

	cout << "cameraMatrix= " << endl << " " << cameraMatrix << endl << endl;
	//测得的畸变系数
	float jibian_data[] = { -6.956561513881647, -68.83902522804168, -0.004834538444671919, 0.01471273691928269, -0.4916103704308509 };
	Mat distCoeffs(1, 5, CV_32FC1); /* 摄像机的5个畸变系数:k1,k2,p1,p2,k3 */
	InitMat( jibian_data, distCoeffs);
	cout << "distCoeffs= " << endl << " " << distCoeffs << endl << endl;

	/********相机矫正*******************************************************************************/
	//cout << "mapx= " << endl << " " << mapx << endl << endl;
	//cout << "mapy= " << endl << " " << mapy << endl << endl;
	initUndistortRectifyMap(cameraMatrix, distCoeffs, R, cameraMatrix, Size(SRC_WIDTH, SRC_HEIGHT), CV_32FC1, mapx, mapy);
	//cout << "mapx= " << endl << " " << mapx << endl << endl;
	//cout << "mapy= " << endl << " " << mapy << endl << endl;

}

int jibianjiaozheng(Mat src_picture, Mat & dst_picture)
{
	int OK = 0;
	static unsigned int INIT_MAT_OK = 1;

	if (INIT_MAT_OK)
	{
		newimage = src_picture.clone(); //校正后输出图片
		INIT_MAT_OK = 0;
	}

	remap(src_picture, newimage, mapx, mapy, INTER_LINEAR);
	dst_picture= newimage+1;
	return OK;
}


int main()
{
# if 0//测试图片
	int hrst = 0;
	Mat frame = imread("yuantu.jpg"); //读取畸变图片
	Mat after;
	static unsigned int jiaozheng_init_flag = 0;
	if (!jiaozheng_init_flag)
	{
		printf("初始化\n");
		jiaozheng_init();
		jiaozheng_init_flag = 1;
	}

	hrst = jibianjiaozheng(frame, after);
	if (hrst != 0)
	{
		printf("畸变矫正失败\n");
	}
	namedWindow("畸变校正后的图片", 0);//参数为零,则可以自由拖动
	imshow("畸变校正后的图片", after);
#else//测试视频
	VideoCapture capture;
	capture.open("test_1.mp4");
	if (!capture.isOpened())
	{
		printf("文件打开失败");
	}
	Mat frame;
	Mat after;
	int hrst = 0;
	unsigned int jiaozheng_init_flag = 0;
	int timestart, timeEnd, duration;
	while (true)
	{
		timestart = clock() * 1000 / CLOCKS_PER_SEC;
		capture >> frame;
		if (frame.data == NULL)
		{
			printf("Image is empty\n");
			//writer.write(frame);
			break;
			//continue;
		}
		duration = clock() - timestart;
		printf("读图用时约: %d毫秒 \n", duration * 1000 / CLOCKS_PER_SEC);
		if (!jiaozheng_init_flag)
		{
			printf("初始化\n");
			jiaozheng_init();
			jiaozheng_init_flag = 1;
		}

		hrst = jibianjiaozheng(frame, after);
		if (hrst != 0)
		{
			printf("畸变矫正失败\n");
		}
		else
		{
			//printf("畸变矫正成功\n");
		}
		timeEnd = clock() * 1000 / CLOCKS_PER_SEC;
		duration = timeEnd- timestart;
		printf("畸变校正用时约: %d毫秒 \n", duration );
		printf("\n");
		//namedWindow("畸变校正后的图片", 0);//参数为零,则可以自由拖动
		//imshow("畸变校正后的图片", after);
		//waitKey(2);
	}

#endif

	return 0;

}

可以看到deepstream的例子在畸变参数中只用了前三个,最后两个为0。内参数矩阵只用了焦距。

Hi all,

I use a FOV120 camera.
I record to MP4 file by my camera.
The OpenCV example of 2532098972 : How to perform fish-eye lens distortion correction in gstreamer pipeline? (Hfov ~150) - #24 by 2532098972
I modified some parameters(depend my camera) in the OpenCV example of 2532098972.
The MP4 correctly remove distortion when I use the OpenCV example of 2532098972.
I would like to use deepstream-app for my camera.
Is there any method that could merge the OpenCV example to deepstream-app?
Finally, the video stream remove distortion when use deepstream-app for my camera.

Best regards

-Jason

src-x0 src-y0是什么参数呢?

可以的,你可以把畸变矫正加到deepstream中,即使用deepstream-app -c 配置文件的形式。解决办法如下:
1、 配置文件修改——通过配置文件达到是否使能畸变矫正
主要增加:dewarper_enable=1
dewarper_config_file=/home/movex/config_dewarper_perspective.txt,告诉deepstream我使能了畸变矫正。

[source3]
enable=1
#Type - 1=CameraV4L2 2=URI 3=MultiURI
type=2
uri=file:///home/movex/test_1.mp4
num-sources=1
#drop-frame-interval=2
gpu-id=0
# (0): memtype_device   - Memory type Device
# (1): memtype_pinned   - Memory type Host Pinned
# (2): memtype_unified  - Memory type Unified
cudadec-memtype=0

dewarper_enable=1
dewarper_config_file=/home/movex/config_dewarper_perspective.txt
nvbuf-memory-type=3
#source-id=0
num-output-buffers=1
num-batch-buffers=1

2、修改解析配置文件的函数——gboolean parse_source (NvDsSourceConfig *config, GKeyFile *key_file, gchar *group, gchar *cfg_file_path)

在gboolean parse_source (NvDsSourceConfig *config, GKeyFile *key_file, gchar *group, gchar *cfg_file_path)
中,添加以下内容:

/***************************************************************************************/
#define CONFIG_GROUP_SOURCE_DEWARPER_ENABLE "dewarper_enable"
#define CONFIG_GROUP_SOURCE_DEWARPER_CONFIG_FILE "dewarper_config_file"
#if 1
	} else if (!g_strcmp0 (*key, CONFIG_GROUP_SOURCE_DEWARPER_ENABLE))
	{
		printf("add by maohuifei 0519 1101,config->dewarper_config.enable=%d\n",config->dewarper_config.enable);
		config->dewarper_config.enable =
		g_key_file_get_integer (key_file, group,
		CONFIG_GROUP_SOURCE_DEWARPER_ENABLE, &error);
		printf("add by maohuifei 0519 1101,config->dewarper_config.enable=%d\n",config->dewarper_config.enable);
		CHECK_ERROR (error);
	} 

	else if (!g_strcmp0 (*key, CONFIG_GROUP_SOURCE_DEWARPER_CONFIG_FILE)) 
	{
		printf("add by maohuifei 0519 1101,config->dewarper_config.config_file=%s\n",config->dewarper_config.config_file);
		config->dewarper_config.config_file =
		g_key_file_get_string (key_file, group,
		CONFIG_GROUP_SOURCE_DEWARPER_CONFIG_FILE, &error);
		printf("add by maohuifei 0519 1101,config->dewarper_config.config_file=%s\n",config->dewarper_config.config_file);

		CHECK_ERROR (error);
#endif
/***************************************************************************************/

这样你的工程就能通过解析配置文件来达到是否使能畸变矫正,而不是写死代码。

3、在函数static gboolean
create_camera_source_bin (NvDsSourceConfig * config, NvDsSrcBin * bin)中
增加或者替换如下代码:
3.1、增加create_dewarper_bin

	if (config->dewarper_config.enable) 
	{
		/*创建畸变矫正bin*/
		if (!create_dewarper_bin (&config->dewarper_config, &bin->dewarper_bin)) 
		{
			g_print ("Creating Dewarper bin failed \n");
			goto done;
		}
		else
		{
			printf("\n\n\n !!!!!  add by maohuifei, create_dewarper_bin ok \n");
		}
	}

3.2、修改pipline,增加畸变矫正模块。

#ifndef IS_TEGRA
    gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter1,
        nvvidconv1, bin->dewarper_bin.bin,nvvidconv2, bin->cap_filter,
        NULL);
#else
    gst_bin_add_many (GST_BIN (bin->bin), bin->src_elem, bin->cap_filter1,
        bin->dewarper_bin.bin, nvvidconv2, bin->cap_filter, NULL);
#endif

4、最后重新编译deepstream
make clean;make all

1 Like

我看deepstream官方是图像宽高的一半,实际意义应该是图像传感器正中心

Please initial a new topic for discussion. Thanks