1. 程式人生 > >OpenCV中feature2D學習——SIFT和SURF運算元實現特徵點提取與匹配

OpenCV中feature2D學習——SIFT和SURF運算元實現特徵點提取與匹配

概述

      之前的文章SURF和SIFT運算元實現特徵點檢測簡單地講了利用SIFT和SURF運算元檢測特徵點,在檢測的基礎上可以使用SIFT和SURF運算元對特徵點進行特徵提取並使用匹配函式進行特徵點的匹配。具體實現是首先採用SurfFeatureDetector檢測特徵點,再使用SurfDescriptorExtractor計算特徵點的特徵向量,最後採用BruteForceMatcher暴力匹配法或者FlannBasedMatcher選擇性匹配法(二者的不同)來進行特徵點匹配。

      實驗所用環境是opencv2.4.0+vs2008+win7,需要注意opencv2.4.X版本中SurfFeatureDetector是包含在opencv2/nonfree/features2d.hpp中,BruteForceMatcher是包含在opencv2/legacy/legacy.hpp中,FlannBasedMatcher是包含在opencv2/features2d/features2d.hpp中。

BruteForce匹配法

首先使用BruteForceMatcher暴力匹配法,程式碼如下:

/**
* @採用SURF運算元檢測特徵點,對特徵點進行特徵提取,並使用BruteForce匹配法進行特徵點的匹配
* @SurfFeatureDetector + SurfDescriptorExtractor + BruteForceMatcher
* @author holybin
*/

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
#include "opencv2/legacy/legacy.hpp"	//BruteForceMatcher實際在該標頭檔案中
//#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher實際在該標頭檔案中
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
	Mat src_1 = imread( "D:\\opencv_pic\\cat3d120.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	Mat src_2 = imread( "D:\\opencv_pic\\cat0.jpg", CV_LOAD_IMAGE_GRAYSCALE );

	if( !src_1.data || !src_2.data )
	{ 
		cout<< " --(!) Error reading images "<<endl;
		return -1; 
	}

	//-- Step 1: 使用SURF運算元檢測特徵點
	int minHessian = 400;
	SurfFeatureDetector detector( minHessian );
	vector<KeyPoint> keypoints_1, keypoints_2;
	detector.detect( src_1, keypoints_1 );
	detector.detect( src_2, keypoints_2 );
	cout<<"img1--number of keypoints: "<<keypoints_1.size()<<endl;
	cout<<"img2--number of keypoints: "<<keypoints_2.size()<<endl;

	//-- Step 2: 使用SURF運算元提取特徵(計算特徵向量)
	SurfDescriptorExtractor extractor;
	Mat descriptors_1, descriptors_2;
	extractor.compute( src_1, keypoints_1, descriptors_1 );
	extractor.compute( src_2, keypoints_2, descriptors_2 );

	//-- Step 3: 使用BruteForce法進行暴力匹配
	BruteForceMatcher< L2<float> > matcher;
	vector< DMatch > matches;
	matcher.match( descriptors_1, descriptors_2, matches );
	cout<<"number of matches: "<<matches.size()<<endl;

	//-- 顯示匹配結果
	Mat matchImg;
	drawMatches( src_1, keypoints_1, src_2, keypoints_2, matches, matchImg ); 
	imshow("matching result", matchImg );
	waitKey(0);

	return 0;
}

實驗結果:


FLANN匹配法

使用暴力匹配的結果不怎麼好,下面使用FlannBasedMatcher進行特徵匹配,只保留好的特徵匹配點,程式碼如下:

/**
* @採用SURF運算元檢測特徵點,對特徵點進行特徵提取,並使用FLANN匹配法進行特徵點的匹配
* @SurfFeatureDetector + SurfDescriptorExtractor + FlannBasedMatcher
* @author holybin
*/

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
//#include "opencv2/legacy/legacy.hpp"	//BruteForceMatcher實際在該標頭檔案中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher實際在該標頭檔案中
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
using namespace std;

int main( int argc, char** argv )
{
	Mat src_1 = imread( "D:\\opencv_pic\\cat3d120.jpg", CV_LOAD_IMAGE_GRAYSCALE );
	Mat src_2 = imread( "D:\\opencv_pic\\cat0.jpg", CV_LOAD_IMAGE_GRAYSCALE );

	if( !src_1.data || !src_2.data )
	{ 
		cout<< " --(!) Error reading images "<<endl;
		return -1; 
	}

	//-- Step 1: 使用SURF運算元檢測特徵點
	int minHessian = 400;
	SurfFeatureDetector detector( minHessian );
	vector<KeyPoint> keypoints_1, keypoints_2;
	detector.detect( src_1, keypoints_1 );
	detector.detect( src_2, keypoints_2 );
	cout<<"img1--number of keypoints: "<<keypoints_1.size()<<endl;
	cout<<"img2--number of keypoints: "<<keypoints_2.size()<<endl;

	//-- Step 2: 使用SURF運算元提取特徵(計算特徵向量)
	SurfDescriptorExtractor extractor;
	Mat descriptors_1, descriptors_2;
	extractor.compute( src_1, keypoints_1, descriptors_1 );
	extractor.compute( src_2, keypoints_2, descriptors_2 );

	//-- Step 3: 使用FLANN法進行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match( descriptors_1, descriptors_2, allMatches );
	cout<<"number of matches before filtering: "<<allMatches.size()<<endl;

	//-- 計算關鍵點間的最大最小距離
	double maxDist = 0;
	double minDist = 100;
	for( int i = 0; i < descriptors_1.rows; i++ )
	{
		double dist = allMatches[i].distance;
		if( dist < minDist )
			minDist = dist;
		if( dist > maxDist )
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist );
	printf("	min dist : %f \n", minDist );

	//-- 過濾匹配點,保留好的匹配點(這裡採用的標準:distance<2*minDist)
	vector< DMatch > goodMatches;
	for( int i = 0; i < descriptors_1.rows; i++ )
	{
		if( allMatches[i].distance < 2*minDist )
			goodMatches.push_back( allMatches[i]); 
	}
	cout<<"number of matches after filtering: "<<goodMatches.size()<<endl;

	//-- 顯示匹配結果
	Mat matchImg;
	drawMatches( src_1, keypoints_1, src_2, keypoints_2, 
		goodMatches, matchImg, Scalar::all(-1), Scalar::all(-1), 
		vector<char>(), 
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不顯示未匹配的點
		); 
	imshow("matching result", matchImg );
	//-- 輸出匹配點的對應關係
	for( int i = 0; i < goodMatches.size(); i++ )
		printf( "	good match %d: keypoints_1 [%d]  -- keypoints_2 [%d]\n", i, 
		goodMatches[i].queryIdx, goodMatches[i].trainIdx );

	waitKey(0);
	return 0;
}

實驗結果:


從第二個實驗結果可以看出,經過過濾之後特徵點數目從49減少到33,匹配的準確度有所上升。當然也可以使用SIFT運算元進行上述兩種匹配實驗,只需要將SurfFeatureDetector換成SiftFeatureDetector,將SurfDescriptorExtractor換成SiftDescriptorExtractor即可。

拓展

       在FLANN匹配法的基礎上,還可以進一步利用透視變換和空間對映找出已知物體(目標檢測),具體來說就是利用findHomography函式利用匹配的關鍵點找出相應的變換,再利用perspectiveTransform函式對映點群。具體可以參考這篇文章:OpenCV中feature2D學習——SIFT和SURF演算法實現目標檢測