1. 程式人生 > >opencv ORB特徵檢測+FLANN匹配程式報錯問題

opencv ORB特徵檢測+FLANN匹配程式報錯問題

看到OpenCV2.3.1裡面ORB特徵提取演算法也在裡面了,套用給的SURF特徵例子程式改為ORB特徵一直提示錯誤,型別不匹配神馬的,由於沒有找到示例程式,只能自己找答案。(ORB特徵論文:ORB: an efficient alternative to SIFT or SURF.點選下載論文
經過查詢發現:
描述符資料型別有是float的,比如說SIFT,SURF描述符,還有是uchar的,比如說有ORB,BRIEF
對於float 匹配方式有:
FlannBased
BruteForce

BruteForceMatcher< L2<float> > matcher;//改動的地方

完整程式碼如下:

#include <iostream>  
#include "opencv2/core/core.hpp"  
#include "opencv2/features2d/features2d.hpp"  
#include "opencv2/highgui/highgui.hpp"  
#include <iostream>  
#include <vector>  
using namespace cv;  
using namespace std;  
int main()  
{  
    Mat img_1 = imread("D:\\image\\img1.jpg"
); Mat img_2 = imread("D:\\image\\img2.jpg"); if (!img_1.data || !img_2.data) { cout << "error reading images " << endl; return -1; } ORB orb; vector<KeyPoint> keyPoints_1, keyPoints_2; Mat descriptors_1, descriptors_2; orb(img_1, Mat(), keyPoints_1, descriptors_1); orb(img_2, Mat(), keyPoints_2, descriptors_2); BruteForceMatcher<HammingLUT> matcher; vector
<DMatch>
matches; matcher.match(descriptors_1, descriptors_2, matches); double max_dist = 0; double min_dist = 100; //-- Quick calculation of max and min distances between keypoints for( int i = 0; i < descriptors_1.rows; i++ ) { double dist = matches[i].distance; if( dist < min_dist ) min_dist = dist; if( dist > max_dist ) max_dist = dist; } printf("-- Max dist : %f \n", max_dist ); printf("-- Min dist : %f \n", min_dist ); //-- Draw only "good" matches (i.e. whose distance is less than 0.6*max_dist ) //-- PS.- radiusMatch can also be used here. std::vector< DMatch > good_matches; for( int i = 0; i < descriptors_1.rows; i++ ) { if( matches[i].distance < 0.6*max_dist ) { good_matches.push_back( matches[i]); } } Mat img_matches; drawMatches(img_1, keyPoints_1, img_2, keyPoints_2, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); imshow( "Match", img_matches); cvWaitKey(); return 0; }

另外: SURF SIFT

SIFT sift;
sift(img_1, Mat(), keyPoints_1, descriptors_1);
sift(img_2, Mat(), keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;
SURF surf;
surf(img_1, Mat(), keyPoints_1);
surf(img_2, Mat(), keyPoints_2);
SurfDescriptorExtractor extrator;
extrator.compute(img_1, keyPoints_1, descriptors_1);
extrator.compute(img_2, keyPoints_2, descriptors_2);
BruteForceMatcher<L2<float> >  matcher;

效果:
這裡寫圖片描述
另外一個是尋找目標匹配
在右邊的場景圖裡面尋找左邊那幅圖的starbucks標誌

效果如下:
這裡寫圖片描述
需要在之前的那個imshow之前加上如下程式碼即可完成一個簡單的功能展示:

// localize the object  
std::vector<Point2f> obj;  
std::vector<Point2f> scene;  

for (size_t i = 0; i < good_matches.size(); ++i)  
{  
    // get the keypoints from the good matches  
    obj.push_back(keyPoints_1[ good_matches[i].queryIdx ].pt);  
    scene.push_back(keyPoints_2[ good_matches[i].trainIdx ].pt);  
}  
Mat H = findHomography( obj, scene, CV_RANSAC );  

// get the corners from the image_1  
std::vector<Point2f> obj_corners(4);  
obj_corners[0] = cvPoint(0,0);  
obj_corners[1] = cvPoint( img_1.cols, 0);  
obj_corners[2] = cvPoint( img_1.cols, img_1.rows);  
obj_corners[3] = cvPoint( 0, img_1.rows);  
std::vector<Point2f> scene_corners(4);  

perspectiveTransform( obj_corners, scene_corners, H);  

// draw lines between the corners (the mapped object in the scene - image_2)  
line( img_matches, scene_corners[0] + Point2f( img_1.cols, 0), scene_corners[1] + Point2f( img_1.cols, 0),Scalar(0,255,0));  
line( img_matches, scene_corners[1] + Point2f( img_1.cols, 0), scene_corners[2] + Point2f( img_1.cols, 0),Scalar(0,255,0));  
line( img_matches, scene_corners[2] + Point2f( img_1.cols, 0), scene_corners[3] + Point2f( img_1.cols, 0),Scalar(0,255,0));  
line( img_matches, scene_corners[3] + Point2f( img_1.cols, 0), scene_corners[0] + Point2f( img_1.cols, 0),Scalar(0,255,0));