1. 程式人生 > >Opencv 各種特徵點提取和匹配

Opencv 各種特徵點提取和匹配

opencv 特徵點的提取和匹配

1. 當中的資料結構

KeyPoint這資料結構中有如下資料結構:

class KeyPoint
{ Point2f pt; //座標
float size; //特徵點鄰域直徑
float angle; //特徵點的方向,值為[零,三百六十),負值表示不使用
float response;
int octave; //特徵點所在的影象金字塔的組
int class_id; //用於聚類的id

angle:角度,表示關鍵點的方向,SIFT演算法通過對關鍵點周圍鄰域進行梯度運算,求得該點方向。-1為初值。

class_id:當要對圖片進行分類時,我們可以用class_id對每個特徵點進行區分,未設定時為-1

octave:代表是從金字塔哪一層提取的得到的資料。

pt:關鍵點點的座標(pt.x pt.y)

response:響應程度,代表該點強壯大小。response代表著該關鍵點how good,更確切的說,是該點角點的程度。瞬間明白。

size:該點直徑的大小

注意:keypoint只是儲存了opencv的sift庫檢測到的特徵點的一些基本資訊,也就上面所說的這些,但sift所提取出來的特徵向量其實不是在這個裡面,特徵向量通過SiftDescriptorExtractor 提取,結果放在一個Mat的資料結構中。新版的SIFT可以直接提取。

DMATCH 資料結構:

struct DMatch
{ //三個建構函式
DMatch():
queryIdx(-1),trainIdx(-1),imgIdx(-1),distance(std::numeric_limits::max()) {}

DMatch(int _queryIdx, int _trainIdx, float _distance ) :
queryIdx( _queryIdx),trainIdx( _trainIdx), imgIdx(-1),distance( _distance) {}

DMatch(int _queryIdx, int _trainIdx, int _imgIdx, float _distance ) :
queryIdx(_queryIdx), trainIdx( _trainIdx), imgIdx( _imgIdx),distance( _distance) {}
int queryIdx; //此匹配對應的查詢影象的特徵描述子索引(輸入圖1)
int trainIdx; //此匹配對應的訓練(模板)影象的特徵描述子索引(輸入圖2)
int imgIdx; //訓練影象的索引(若有多個)
float distance; //兩個特徵向量之間的歐氏距離,越小表明匹配度越高。
booloperator < (const DMatch &m) const;
};

2. 各種的特徵點

SIFT SURF FAST。。。。
使用特徵提取過程得到的特徵描述符(descriptor)資料型別有的是float型別的,比如說:surf SurfDescriptorExtractor,sift
SiftDescriptorExtractor,有的是uchar型別的,比如說有ORB,BriefDescriptorExtractor。

對應float型別的匹配方式有:FlannBasedMatcher,BruteForce等。對應uchar型別的匹配方式有:BruteForce,BruteForce。所以ORB和BRIEF特徵描述子只能使用BruteForce匹配法。 #include opencv2/legacy/legacy.hpp>
在連結選項當中加入: opencv_legacy248d.lib (release版本的就是: opencv_legacy248.lib 248 換成你的版本號)
在連結選項當中加入: opencv_legacy248d.lib (release版本的就是: opencv_legacy248.lib 248 換成你的版本號)

具體的程式碼如下:

#include <iostream>
#include "opencv2/opencv.hpp"
#include "opencv2/core/core.hpp"  
#include "opencv2/features2d/features2d.hpp"  
#include "opencv2/highgui/highgui.hpp"  
#include  "opencv2/legacy/legacy.hpp" // 暴力匹配的標頭檔案
#include  "opencv2/nonfree/nonfree.hpp"
#include <iostream>  
#include <vector> 

#include "cv_import_static_lib.h"

using namespace std;
using namespace cv;

void main(){


    Mat img_1 = imread("E:\\3Dtestdata\\3.jpg");
    Mat img_2 = imread("E:\\3Dtestdata\\4.jpg");
    if (!img_1.data || !img_2.data)
    {
        cout << "error reading images " << endl;
        return ;
    }

    vector<KeyPoint> keyPoints_1, keyPoints_2;
    Mat descriptors_1, descriptors_2;

    /*-----------------SIFT featrue Point----------------
    SIFT sift;
    sift(img_1, Mat(), keyPoints_1, descriptors_1);
    sift(img_2, Mat(), keyPoints_2, descriptors_2);
    */

    /*-----------------SURF featrue Point----------------
    SURF surf;
    surf(img_1, Mat(), keyPoints_1, descriptors_1);
    surf(img_2, Mat(), keyPoints_2, descriptors_2); 
    //SurfDescriptorExtractor extrator;           // another surf sift operation 
    //extrator.compute(img_1, keyPoints_1, descriptors_1);
    //extrator.compute(img_2, keyPoints_2, descriptors_2);
    */

    //-----------------ORB featrue Point----------------
    ORB orb;   // float Feature, can not use FlannBase Match.
    orb(img_1, Mat(), keyPoints_1, descriptors_1);
    orb(img_2, Mat(), keyPoints_2, descriptors_2);


    /*-----------------ORB featrue Point----------------
    MSER mesr;
     */

    /*-----------------FAST featrue Point----------------
    FastFeatureDetector fast1(100);   // 檢測的閾值為40  
    FastFeatureDetector fast2(100);

    fast1.detect(img_1, keyPoints_1);
    fast2.detect(img_2, keyPoints_2);
    //SurfDescriptorExtractor extrator;           // another surf sift operation 
    //extrator.compute(img_1, keyPoints_1, descriptors_1);
    //extrator.compute(img_2, keyPoints_2, descriptors_2);

    OrbDescriptorExtractor extrator;
    extrator.compute(img_1, keyPoints_1, descriptors_1);
    extrator.compute(img_2, keyPoints_2, descriptors_2);
    */


    BruteForceMatcher<HammingLUT> matcher;// orb 等float型的

    //FlannBasedMatcher matcher;   // 只能 對uchar的點進行匹配

    vector< DMatch > matches;

    matcher.match(descriptors_1, descriptors_2, matches);

    double max_dist = 0; double min_dist = 100;
    //-- Quick calculation of max and min distances between keypoints  
    for (int i = 0; i < descriptors_1.rows; i++)
    {
        double dist = matches[i].distance;
        if (dist < min_dist) min_dist = dist;
        if (dist > max_dist) max_dist = dist;
    }
    cout<<"-- Max dist :"<< max_dist<<endl;
    cout<<"-- Min dist :"<< min_dist<<endl;

    //-- Draw only "good" matches (i.e. whose distance is less than 0.6*max_dist )  
    //-- PS.- radiusMatch can also be used here.  
    vector< DMatch > good_matches;
    for (int i = 0; i < descriptors_1.rows; i++)
    {
        if (matches[i].distance < 0.6*max_dist)
        {
            good_matches.push_back(matches[i]);
        }
    }


    // vector<KeyPoint> m_LeftKey;
    // vector<KeyPoint> m_RightKey;
    // vector<DMatch> m_Matches;
    // 以上三個變數已經被計算出來,分別是提取的關鍵點及其匹配,下面直接計算F

    // 分配空間
    int ptCount = (int)matches.size();
    Mat p1(ptCount, 2, CV_32F);
    Mat p2(ptCount, 2, CV_32F);

    // 把Keypoint轉換為Mat
    Point2f pt;
    for (int i = 0; i<ptCount; i++)
    {
        pt = keyPoints_1[matches[i].queryIdx].pt;
        p1.at<float>(i, 0) = pt.x;
        p1.at<float>(i, 1) = pt.y;

        pt = keyPoints_2[matches[i].trainIdx].pt;
        p2.at<float>(i, 0) = pt.x;
        p2.at<float>(i, 1) = pt.y;
    }


    // 用RANSAC方法計算 基本矩陣F
    Mat m_Fundamental;
    vector<uchar> m_RANSACStatus;

    m_Fundamental = findFundamentalMat(p1, p2, m_RANSACStatus, FM_RANSAC);//?????????????????

    // 計算野點個數
    int OutlinerCount = 0;
    for (int i = 0; i<ptCount; i++)
    {
        if (m_RANSACStatus[i] == 0) // 狀態為0表示野點
        {
            OutlinerCount++;
        }
    }

    // 計算內點
    vector<Point2f> m_LeftInlier;
    vector<Point2f> m_RightInlier;
    vector<DMatch> m_InlierMatches;
    // 上面三個變數用於儲存內點和匹配關係
    int InlinerCount = ptCount - OutlinerCount;
    m_InlierMatches.resize(InlinerCount);
    m_LeftInlier.resize(InlinerCount);
    m_RightInlier.resize(InlinerCount);
    InlinerCount = 0;
    for (int i = 0; i<ptCount; i++)
    {
        if (m_RANSACStatus[i] != 0)
        {
            m_LeftInlier[InlinerCount].x = p1.at<float>(i, 0);
            m_LeftInlier[InlinerCount].y = p1.at<float>(i, 1);
            m_RightInlier[InlinerCount].x = p2.at<float>(i, 0);
            m_RightInlier[InlinerCount].y = p2.at<float>(i, 1);
            m_InlierMatches[InlinerCount].queryIdx = InlinerCount;
            m_InlierMatches[InlinerCount].trainIdx = InlinerCount;
            InlinerCount++;
        }
    }

    // 把內點轉換為drawMatches可以使用的格式
    vector<KeyPoint> key1(InlinerCount);
    vector<KeyPoint> key2(InlinerCount);
    KeyPoint::convert(m_LeftInlier, key1);
    KeyPoint::convert(m_RightInlier, key2);

    // 顯示計算F過後的內點匹配
     //Mat m_matLeftImage;
     //Mat m_matRightImage;
    // 以上兩個變數儲存的是左右兩幅影象
    Mat OutImage;
    drawMatches(img_1, key1, img_2, key2, m_InlierMatches, OutImage);

    //stereoRectifyUncalibrated();

    Mat img_matches;
    drawMatches(img_1, keyPoints_1, img_2, keyPoints_2,
    good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
    vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

    imwrite("FASTResult.jpg", img_matches);
    imshow("Match", img_matches);

    imwrite("FmatrixResult.jpg", OutImage);
    imshow("Match2", OutImage);
    waitKey(0);

    return;
}

這是匹配的效果,用過RANSAC處理以後的效果

做好匹配以後,對於以後的三維重建都很有幫助。