1. 程式人生 > >opencv學習筆記七十:影象拼接

opencv學習筆記七十:影象拼接

簡要:

影象拼接在實際的應用場景很廣,舉一個例子,用你的手機對某一場景拍照,但是沒有辦法一次將所有你要拍的景物全部拍下來,所以你對該場景從左往右依次拍了好幾張圖,來把你要拍的所有景物記錄下來,影象拼接就是要將這些影象拼接成一個完整的大圖。

核心:

  • 特徵點檢測
  • 特徵點匹配
  • 影象配準(透視變換)
  • 影象拷貝
  • 影象融合
#include<opencv2\opencv.hpp>
#include<opencv2\xfeatures2d.hpp>
using namespace cv;
using namespace xfeatures2d;
using namespace std;

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst);
vector<Point2f>src2Corners(4), trans2Corners(4);

int main(int arc, char** argv) 
{ 
	Mat src1 = imread("15.jpeg");
	Mat src2 = imread("16.jpeg");
	namedWindow("input1", CV_WINDOW_AUTOSIZE);
	imshow("input1", src1);
	imshow("input2", src2);

	//SURF特徵點檢測與提取
	int minHessian = 400;
	Ptr<SURF>surf = SURF::create(minHessian);
	vector<KeyPoint>keypoints1, keypoints2;
	Mat descriptors1, descriptors2;
	surf->detectAndCompute(src1, Mat(), keypoints1, descriptors1);
	surf->detectAndCompute(src2, Mat(), keypoints2, descriptors2);

	//特徵點匹配
	FlannBasedMatcher matcher;
	vector<DMatch>matches;
	matcher.match(descriptors1, descriptors2, matches);

	//尋找優良匹配,也可以用sort(matches.begin(), matches.end())對匹配點按照匹配的距離進行升序排列
	double minDist = 1000;
	for (int i = 0; i < descriptors1.rows; i++) {
		double dist = matches[i].distance;
		if (dist < minDist) {
			minDist = dist;
		}
	}
	printf("min distance:%lf\n", minDist);
	vector<DMatch>goodMatchers;
	for (int i = 0; i < descriptors1.rows; i++) {
		double dist = matches[i].distance;
		if (dist < max(3*minDist,0.02)) {
			goodMatchers.push_back(matches[i]);
		}
	}

	//畫出優良匹配圖
	Mat matches_img;
	drawMatches(src1, keypoints1, src2, keypoints2, goodMatchers, matches_img, Scalar::all(-1), Scalar::all(-1),vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	imshow("output", matches_img);

	//影象配準
	vector<Point2f> imgPoints1, imgPoints2;
	for (int j = 0; j < goodMatchers.size(); j++) {
		imgPoints1.push_back(keypoints1[goodMatchers[j].queryIdx].pt);
		imgPoints2.push_back(keypoints2[goodMatchers[j].trainIdx].pt);
	}
	Mat M = findHomography(imgPoints2, imgPoints1,RANSAC);

	//src2的四個角點
	src2Corners[0].x = 0;//左上角
	src2Corners[0].y = 0;

	src2Corners[1].x = 0;//左下角
	src2Corners[1].y = src2.rows;

	src2Corners[2].x = src2.cols;//右上角
	src2Corners[2].y = 0;

	src2Corners[3].x = src2.cols;//右下角
	src2Corners[3].y = src2.rows;

	//src2經過透視變換後的四個角點
	perspectiveTransform(src2Corners, trans2Corners,M);
	
	//透視變換
	Mat src2Transform;
	warpPerspective(src2, src2Transform ,M, Size(min(trans2Corners[2].x, trans2Corners[3].x), src1.rows));
	imshow("imgTransform2", src2Transform);

	//影象拷貝
	int dst_width = src2Transform.cols;  //取最右點的長度為拼接圖的長度
	int dst_height = src2Transform.rows;
	Mat dst(dst_height, dst_width, src2.type());
	dst.setTo(0);
	src2Transform.copyTo(dst(Rect(0, 0, src2Transform.cols, src2Transform.rows)));
	src1.copyTo(dst(Rect(0, 0, src1.cols, src1.rows)));
	imshow("dst", dst);

	//影象融合
	OptimizeSeam(src1, src2Transform, dst);
	imshow("fusion_dst", dst);
	imwrite("fusion.jpg", dst);
	waitKey(0);
	return 0;
}

void OptimizeSeam(Mat& img1, Mat& trans, Mat& dst){
	int start = min(trans2Corners[0].x, trans2Corners[1].x);//開始位置,即重疊區域的左邊界  
	double processWidth = img1.cols - start;//重疊區域的寬度  
	int rows = dst.rows;
	int cols = img1.cols; 
	double alpha = 1;//img1中畫素的權重  
	for (int i = 0; i < rows; i++){
		uchar* p = img1.ptr<uchar>(i);  //獲取第i行的首地址
		uchar* t = trans.ptr<uchar>(i);
		uchar* d = dst.ptr<uchar>(i);
		for (int j = start; j < cols; j++) {
			//如果遇到影象trans中是黑點的畫素,則完全拷貝src1中的資料
			if (t[j*3] == 0 && t[j * 3+1] == 0 && t[j * 3+2] == 0) {
				alpha = 1;
			}
			else {				 
				alpha = (processWidth - (j - start)) / processWidth;					
			}				
			d[j*3] = p[j*3] * alpha + t[j*3] * (1 - alpha);
			d[j * 3+1] = p[j * 3+1] * alpha + t[j * 3+1] * (1 - alpha);
			d[j * 3+2] = p[j * 3+2] * alpha + t[j * 3+2] * (1 - alpha);	
		}
	}
}

案例一: 

   

案例二:

   

 

案例三:

      

 

案例四:

       

 

案例五:

 

 

 參考文獻:https://www.cnblogs.com/skyfsm/p/7411961.html