1. 程式人生 > >opencv實現雙攝像頭視訊拼接

opencv實現雙攝像頭視訊拼接

#include "stdafx.h"
#include "highgui/highgui.hpp"    
#include "opencv2/nonfree/nonfree.hpp"    
#include "opencv2/legacy/legacy.hpp"   
#include <iostream>


using namespace cv;
using namespace std;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);

typedef struct
{
	Point2f left_top;
	Point2f left_bottom;
	Point2f right_top;
	Point2f right_bottom;
}four_corners_t;

four_corners_t corners;

void CalcCorners(const Mat& H, const Mat& src)
{
	double v2[] = { 0, 0, 1 };//左上角
	double v1[3];//變換後的座標值
	Mat V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	Mat V1 = Mat(3, 1, CV_64FC1, v1);  //列向量

	V1 = H * V2;
	//左上角(0,0,1)
	cout << "V2: " << V2 << endl;
	cout << "V1: " << V1 << endl;
	corners.left_top.x = v1[0] / v1[2];
	corners.left_top.y = v1[1] / v1[2];

	//左下角(0,src.rows,1)
	v2[0] = 0;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.left_bottom.x = v1[0] / v1[2];
	corners.left_bottom.y = v1[1] / v1[2];

	//右上角(src.cols,0,1)
	v2[0] = src.cols;
	v2[1] = 0;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_top.x = v1[0] / v1[2];
	corners.right_top.y = v1[1] / v1[2];

	//右下角(src.cols,src.rows,1)
	v2[0] = src.cols;
	v2[1] = src.rows;
	v2[2] = 1;
	V2 = Mat(3, 1, CV_64FC1, v2);  //列向量
	V1 = Mat(3, 1, CV_64FC1, v1);  //列向量
	V1 = H * V2;
	corners.right_bottom.x = v1[0] / v1[2];
	corners.right_bottom.y = v1[1] / v1[2];

}

//優化兩圖的連線處,使得拼接自然
void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst)
{
	int start = MIN(corners.left_top.x, corners.left_bottom.x);//開始位置,即重疊區域的左邊界  

	double processWidth = img1.cols - start;//重疊區域的寬度  
	int rows = dst.rows;
	int cols = img1.cols; //注意,是列數*通道數
	double alpha = 1;//img1中畫素的權重  
	for (int i = 0; i < rows; i++)
	{
		uchar* p = img1.ptr<uchar>(i);  //獲取第i行的首地址
		uchar* t = trans.ptr<uchar>(i);
		uchar* d = dst.ptr<uchar>(i);
		for (int j = start; j < cols; j++)
		{
			//如果遇到影象trans中無畫素的黑點,則完全拷貝img1中的資料
			if (t[j * 3] == 0 && t[j * 3 + 1] == 0 && t[j * 3 + 2] == 0)
			{
				alpha = 1;
			}
			else
			{
				//img1中畫素的權重,與當前處理點距重疊區域左邊界的距離成正比,實驗證明,這種方法確實好  
				alpha = (processWidth - (j - start)) / processWidth;
			}

			d[j * 3] = p[j * 3] * alpha + t[j * 3] * (1 - alpha);
			d[j * 3 + 1] = p[j * 3 + 1] * alpha + t[j * 3 + 1] * (1 - alpha);
			d[j * 3 + 2] = p[j * 3 + 2] * alpha + t[j * 3 + 2] * (1 - alpha);

		}
	}

}


int _tmain(int argc, _TCHAR* argv[])
{
	VideoCapture cap1(0);//左
	VideoCapture cap2(1);//右

	double rate = 60;
	int delay = 1000 / rate;
	bool stop(false);
	Mat frame1;
	Mat frame2;
	Mat frame3;
	Mat frame4;
	int k = 100;
	int n = 50;
	Mat image01;
	Mat image02;
	Mat image03;
	Mat image04;
	Mat imageTransform1, imageTransform2;
	Mat homo;

	namedWindow("cam1", CV_WINDOW_AUTOSIZE);
	namedWindow("cam2", CV_WINDOW_AUTOSIZE);

	if (cap1.isOpened() && cap2.isOpened())
	{
		cout << "*** ***" << endl;
		cout << "攝像頭已啟動!" << endl;
	}
	else
	{
		cout << "*** ***" << endl;
		cout << "警告:請檢查攝像頭是否安裝好!" << endl;
		cout << "程式結束!" << endl << "*** ***" << endl;
		return -1;
	}

	cap1.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	cap1.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
	cap2.set(CV_CAP_PROP_FRAME_WIDTH, 640);
	cap2.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
	cap1.set(CV_CAP_PROP_FOCUS, 0);
	cap2.set(CV_CAP_PROP_FOCUS, 0);

	//獲取兩幅影象,通過這兩幅影象來估計攝像機引數

	while (n--)
	{
		if (cap1.read(frame1) && cap2.read(frame2))
		{
			imshow("cam1", frame1);
			imshow("cam2", frame2);
			imwrite("frame1.bmp", frame1);
			imwrite("frame2.bmp", frame2);
		}
		if (waitKey(1) == 27)//按ESC鍵
		{
			stop = true;
			cout << "程式結束!" << endl;
			cout << "*** ***" << endl;
		}
	}

	image02 = imread("frame1.bmp", 1);    //左圖
	image01 = imread("frame2.bmp", 1);    //右圖

	//灰度圖轉換  
	Mat image1, image2;
	cvtColor(image01, image1, CV_RGB2GRAY);
	cvtColor(image02, image2, CV_RGB2GRAY);


	//提取特徵點    
	SurfFeatureDetector Detector(2000);
	vector<KeyPoint> keyPoint1, keyPoint2;
	Detector.detect(image1, keyPoint1);
	Detector.detect(image2, keyPoint2);

	//特徵點描述,為下邊的特徵點匹配做準備    
	SurfDescriptorExtractor Descriptor;
	Mat imageDesc1, imageDesc2;
	Descriptor.compute(image1, keyPoint1, imageDesc1);
	Descriptor.compute(image2, keyPoint2, imageDesc2);

	FlannBasedMatcher matcher;
	vector<vector<DMatch> > matchePoints;
	vector<DMatch> GoodMatchePoints;

	vector<Mat> train_desc(1, imageDesc1);
	matcher.add(train_desc);
	matcher.train();

	matcher.knnMatch(imageDesc2, matchePoints, 2);
	cout << "total match points: " << matchePoints.size() << endl;

	// Lowe's algorithm,獲取優秀匹配點
	for (int i = 0; i < matchePoints.size(); i++)
	{
		if (matchePoints[i][0].distance < 0.4 * matchePoints[i][1].distance)
		{
			GoodMatchePoints.push_back(matchePoints[i][0]);
		}
	}

	Mat first_match;
	drawMatches(image02, keyPoint2, image01, keyPoint1, GoodMatchePoints, first_match);

	vector<Point2f> imagePoints1, imagePoints2;

	for (int i = 0; i<GoodMatchePoints.size(); i++)
	{
		imagePoints2.push_back(keyPoint2[GoodMatchePoints[i].queryIdx].pt);
		imagePoints1.push_back(keyPoint1[GoodMatchePoints[i].trainIdx].pt);
	}

	//獲取影象1到影象2的投影對映矩陣 尺寸為3*3  
	homo = findHomography(imagePoints1, imagePoints2, CV_RANSAC);
	////也可以使用getPerspectiveTransform方法獲得透視變換矩陣,不過要求只能有4個點,效果稍差  
	/*Mat   homo=getPerspectiveTransform(imagePoints1,imagePoints2);  */
	cout << "變換矩陣為:\n" << homo << endl << endl; //輸出對映矩陣   
   

	//計算配準圖的四個頂點座標
	CalcCorners(homo, image01);
	cout << "left_top:" << corners.left_top << endl;
	cout << "left_bottom:" << corners.left_bottom << endl;
	cout << "right_top:" << corners.right_top << endl;
	cout << "right_bottom:" << corners.right_bottom << endl;

	while (!stop)
	{
		double t1 = getTickCount();
		if (cap1.read(frame3) && cap2.read(frame4))
		{
			imshow("cam1", frame3);
			imshow("cam2", frame4);

			image03 = frame3.clone();
			image04 = frame4.clone();
			/*imwrite("frame3.bmp", frame1);
			imwrite("frame4.bmp", frame2);*/

			//影象配準  
			warpPerspective(image04, imageTransform1, homo, Size(MAX(corners.right_top.x, corners.right_bottom.x), image03.rows));
			//warpPerspective(image01, imageTransform2, adjustMat*homo, Size(image02.cols*1.3, image02.rows*1.8));

			//建立拼接後的圖,需提前計算圖的大小
			int dst_width = imageTransform1.cols;  //取最右點的長度為拼接圖的長度
			/*int dst_height = image03.rows;*/
			int dst_height = imageTransform1.rows;

			int dst1_width = imageTransform1.cols;
			int dst1_height = corners.right_bottom.y;

			Mat dst1(dst1_height, dst1_width, CV_8UC3);
			dst1.setTo(0);

			Mat dst(dst_height, dst_width, CV_8UC3);
			dst.setTo(0);

			imageTransform1.copyTo(dst(Rect(0, 0, imageTransform1.cols, imageTransform1.rows)));
			image03.copyTo(dst(Rect(0, 0, image03.cols, image03.rows)));

			OptimizeSeam(image03, imageTransform1, dst);
			/*dst.rowRange(1,dst1_height).copyTo(dst1.rowRange(1, dst1_height));*/

			imshow("dst", dst);

			if (waitKey(1) == 27)//按ESC鍵
			{
				stop = true;
				cout << "程式結束!" << endl;
				cout << "*** ***" << endl;
			}
		}
		cout << "拼接時間: " << ((getTickCount() - t1) / getTickFrequency()) << " sec" << endl;
	}

	return 0;
}