1. 程式人生 > >Opencv Sift和Surf特徵實現影象無縫拼接生成全景影象

Opencv Sift和Surf特徵實現影象無縫拼接生成全景影象

Sift和Surf演算法實現兩幅影象拼接的過程是一樣的,主要分為4大部分:

  • 1. 特徵點提取和描述
  • 2. 特徵點配對,找到兩幅影象中匹配點的位置
  • 3. 通過配對點,生成變換矩陣,並對影象1應用變換矩陣生成對影象2的對映影象
  • 4. 影象2拼接到對映影象上,完成拼接

過程1、2、3沒啥好說的了,關鍵看看步驟4中的拼接部分。這裡先採用比較簡單一點的拼接方式來實現:

  • 1. 找到影象1和影象2中最強的匹配點所在的位置
  • 2. 通過對映矩陣變換,得到影象1的最強匹配點經過對映後投影到新影象上的位置座標
  • 3. 在新影象上的最強匹配點的對映座標處,銜接兩幅影象,該點左側影象完全是影象1,右側完全是影象2

這裡拼接的正確與否完全取決於特徵點的選取,如果選取的是錯誤匹配的特徵點,拼接一定失敗,所以這裡選了排在第一個的最強的匹配點,作為拼接點。

測試用例一原圖1:


測試用例一原圖2:


Sift拼接效果:


Surf拼接效果:


本例中最強匹配點的位置在圖中紅色小汽車附近,可以看到有一條像摺痕一樣的線條,這個就是兩個圖片的拼接線,並且如果圖1和圖2在拼接處的光線條件有變化的還,拼接後在銜接處左右就會顯得很突兀,如Surf拼接中。拼接效果Sift貌似要比Surf好一點。

測試用例二原圖1:


測試用例二原圖2:


Sift拼接效果:


Surf拼接效果:


以下是Opencv實現:

#include "highgui/highgui.hpp"  
#include "opencv2/nonfree/nonfree.hpp"  
#include "opencv2/legacy/legacy.hpp" 

using namespace cv;

//計算原始影象點位在經過矩陣變換後在目標影象上對應位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri);

int main(int argc,char *argv[])  
{  
	Mat image01=imread(argv[1]);  
	Mat image02=imread(argv[2]);
	imshow("拼接影象1",image01);
	imshow("拼接影象2",image02);

	//灰度圖轉換
	Mat image1,image2;  
	cvtColor(image01,image1,CV_RGB2GRAY);
	cvtColor(image02,image2,CV_RGB2GRAY);

	//提取特徵點  
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值
	vector<KeyPoint> keyPoint1,keyPoint2;  
	siftDetector.detect(image1,keyPoint1);  
	siftDetector.detect(image2,keyPoint2);	

	//特徵點描述,為下邊的特徵點匹配做準備  
	SiftDescriptorExtractor siftDescriptor;  
	Mat imageDesc1,imageDesc2;  
	siftDescriptor.compute(image1,keyPoint1,imageDesc1);  
	siftDescriptor.compute(image2,keyPoint2,imageDesc2);	

	//獲得匹配特徵點,並提取最優配對  	
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;  
	matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());
	sort(matchePoints.begin(),matchePoints.end()); //特徵點排序	
	//獲取排在前N個的最優匹配特徵點
	vector<Point2f> imagePoints1,imagePoints2;
	for(int i=0;i<10;i++)
	{		
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);		
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);		
	}

	//獲取影象1到影象2的投影對映矩陣,尺寸為3*3
	Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);		
	Mat adjustMat=(Mat_<double>(3,3)<<1.0,0,image01.cols,0,1.0,0,0,0,1.0);
	Mat adjustHomo=adjustMat*homo;

	//獲取最強配對點在原始影象和矩陣變換後圖像上的對應位置,用於影象拼接點的定位
	Point2f originalLinkPoint,targetLinkPoint,basedImagePoint;
	originalLinkPoint=keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint=getTransformPoint(originalLinkPoint,adjustHomo);
	basedImagePoint=keyPoint2[matchePoints[0].trainIdx].pt;

	//影象配準
	Mat imageTransform1;
	warpPerspective(image01,imageTransform1,adjustMat*homo,Size(image02.cols+image01.cols+10,image02.rows));

	//在最強匹配點的位置處銜接,最強匹配點左側是圖1,右側是圖2,這樣直接替換影象銜接不好,光線有突變
	Mat ROIMat=image02(Rect(Point(basedImagePoint.x,0),Point(image02.cols,image02.rows)));	
	ROIMat.copyTo(Mat(imageTransform1,Rect(targetLinkPoint.x,0,image02.cols-basedImagePoint.x+1,image02.rows)));

	namedWindow("拼接結果",0);
	imshow("拼接結果",imageTransform1);	
	waitKey();  
	return 0;  
}

//計算原始影象點位在經過矩陣變換後在目標影象上對應位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri)
{
	Mat originelP,targetP;
	originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
	targetP=transformMaxtri*originelP;
	float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
	float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
	return Point2f(x,y);
}


對於銜接處存在的縫隙問題,有一個解決辦法是按一定權重疊加圖1和圖2的重疊部分,在重疊處圖2的比重是1,向著圖1的方向,越遠離銜接處,圖1的權重越來越大,圖2的權重越來越低,實現平穩過渡按照這個思路優化過的程式碼如下:

#include "highgui/highgui.hpp"  
#include "opencv2/nonfree/nonfree.hpp"  
#include "opencv2/legacy/legacy.hpp" 

using namespace cv;

//計算原始影象點位在經過矩陣變換後在目標影象上對應位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri);

int main(int argc,char *argv[])  
{  
	Mat image01=imread(argv[1]);  
	Mat image02=imread(argv[2]);
	imshow("拼接影象1",image01);
	imshow("拼接影象2",image02);

	//灰度圖轉換
	Mat image1,image2;  
	cvtColor(image01,image1,CV_RGB2GRAY);
	cvtColor(image02,image2,CV_RGB2GRAY);

	//提取特徵點  
	SiftFeatureDetector siftDetector(800);  // 海塞矩陣閾值
	vector<KeyPoint> keyPoint1,keyPoint2;  
	siftDetector.detect(image1,keyPoint1);  
	siftDetector.detect(image2,keyPoint2);	

	//特徵點描述,為下邊的特徵點匹配做準備  
	SiftDescriptorExtractor siftDescriptor;  
	Mat imageDesc1,imageDesc2;  
	siftDescriptor.compute(image1,keyPoint1,imageDesc1);  
	siftDescriptor.compute(image2,keyPoint2,imageDesc2);	

	//獲得匹配特徵點,並提取最優配對  	
	FlannBasedMatcher matcher;
	vector<DMatch> matchePoints;  
	matcher.match(imageDesc1,imageDesc2,matchePoints,Mat());
	sort(matchePoints.begin(),matchePoints.end()); //特徵點排序	
	//獲取排在前N個的最優匹配特徵點
	vector<Point2f> imagePoints1,imagePoints2;
	for(int i=0;i<10;i++)
	{		
		imagePoints1.push_back(keyPoint1[matchePoints[i].queryIdx].pt);		
		imagePoints2.push_back(keyPoint2[matchePoints[i].trainIdx].pt);		
	}

	//獲取影象1到影象2的投影對映矩陣,尺寸為3*3
	Mat homo=findHomography(imagePoints1,imagePoints2,CV_RANSAC);		
	Mat adjustMat=(Mat_<double>(3,3)<<1.0,0,image01.cols,0,1.0,0,0,0,1.0);
	Mat adjustHomo=adjustMat*homo;

	//獲取最強配對點在原始影象和矩陣變換後圖像上的對應位置,用於影象拼接點的定位
	Point2f originalLinkPoint,targetLinkPoint,basedImagePoint;
	originalLinkPoint=keyPoint1[matchePoints[0].queryIdx].pt;
	targetLinkPoint=getTransformPoint(originalLinkPoint,adjustHomo);
	basedImagePoint=keyPoint2[matchePoints[0].trainIdx].pt;

	//影象配準
	Mat imageTransform1;
	warpPerspective(image01,imageTransform1,adjustMat*homo,Size(image02.cols+image01.cols+110,image02.rows));

	//在最強匹配點左側的重疊區域進行累加,是銜接穩定過渡,消除突變
	Mat image1Overlap,image2Overlap; //圖1和圖2的重疊部分	
	image1Overlap=imageTransform1(Rect(Point(targetLinkPoint.x-basedImagePoint.x,0),Point(targetLinkPoint.x,image02.rows)));
	image2Overlap=image02(Rect(0,0,image1Overlap.cols,image1Overlap.rows));
	Mat image1ROICopy=image1Overlap.clone();  //複製一份圖1的重疊部分
	for(int i=0;i<image1Overlap.rows;i++)
	{
		for(int j=0;j<image1Overlap.cols;j++)
		{
			double weight;
			weight=(double)j/image1Overlap.cols;  //隨距離改變而改變的疊加係數
			image1Overlap.at<Vec3b>(i,j)[0]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[0]+weight*image2Overlap.at<Vec3b>(i,j)[0];
			image1Overlap.at<Vec3b>(i,j)[1]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[1]+weight*image2Overlap.at<Vec3b>(i,j)[1];
			image1Overlap.at<Vec3b>(i,j)[2]=(1-weight)*image1ROICopy.at<Vec3b>(i,j)[2]+weight*image2Overlap.at<Vec3b>(i,j)[2];
		}
	}
	Mat ROIMat=image02(Rect(Point(image1Overlap.cols,0),Point(image02.cols,image02.rows)));	 //圖2中不重合的部分
	ROIMat.copyTo(Mat(imageTransform1,Rect(targetLinkPoint.x,0, ROIMat.cols,image02.rows))); //不重合的部分直接銜接上去
	namedWindow("拼接結果",0);
	imshow("拼接結果",imageTransform1);	
	imwrite("D:\\拼接結果.jpg",imageTransform1);
	waitKey();  
	return 0;  
}

//計算原始影象點位在經過矩陣變換後在目標影象上對應位置
Point2f getTransformPoint(const Point2f originalPoint,const Mat &transformMaxtri)
{
	Mat originelP,targetP;
	originelP=(Mat_<double>(3,1)<<originalPoint.x,originalPoint.y,1.0);
	targetP=transformMaxtri*originelP;
	float x=targetP.at<double>(0,0)/targetP.at<double>(2,0);
	float y=targetP.at<double>(1,0)/targetP.at<double>(2,0);
	return Point2f(x,y);
}


Sift拼接效果:


Surf拼接效果:


拼接處的線條消失了,也沒有見突兀的光線變化,基本實現了無縫拼接

測試用例三原圖1:


測試用例三原圖2:


拼接效果: