1. 程式人生 > >OpenCv-C++-KAZE(AKAZE)區域性特徵匹配(二)

OpenCv-C++-KAZE(AKAZE)區域性特徵匹配(二)

上一篇已經做出了KAZE(AKAZE)區域性特徵的檢測,就差匹配沒有做到。
那麼,現在來實現一下:

放上程式碼:

#include<opencv2/opencv.hpp>
#include<iostream>
#include<math.h>

using namespace cv;
using namespace std;

Mat img1,img2;
int main(int argc, char** argv)
{
	img1 = imread("D:/test/box.png", IMREAD_GRAYSCALE);
	img2 = imread("D:/test/box_in_scene.png", IMREAD_GRAYSCALE);
	if (!img1.data||!img2.data)
	{
		printf("圖片未找到...");
		return -1;
	}
	imshow("input box", img1);
	imshow("input box_in_scene", img2);
	double t1 = getTickCount();
	//檢測特徵點(非線性)
	Ptr<AKAZE>detector = AKAZE::create();
	//Ptr<KAZE>detector = KAZE::create();//KAZE檢測
	
	//存放描述子
	Mat descriptor_obj,descriptor_scene;

	//img1特徵點檢測並計算描述子
	vector<KeyPoint> keypoints_obj;
	detector->detectAndCompute(img1, Mat(), keypoints_obj, descriptor_obj);
	
	//img2特徵點檢測並計算描述子
	vector<KeyPoint> keypoints_scene;
	detector->detectAndCompute(img2, Mat(), keypoints_scene,descriptor_scene);
	double t2 = getTickCount();
	double t = (t2 - t1) * 1000 / getTickFrequency();//結果轉化為毫秒
	printf("特徵點尋找所花費時間(ms):%f", t);

	//使用FLANN匹配器比較兩個關鍵點的匹配度
	FlannBasedMatcher fbMatcher(new flann::LshIndexParams(20,10,2));//用LshIndexParams
	
    /*這裡不能使用FlannBasedMatcher fbMatcher();這條語句,因為它不支援CV_8UC1型別,
	會報錯,OpenCv暫時還沒有解決這一問題。*/
	//也可以使用暴力匹配(BFMatcher bfmatches;)

	BFMatcher bfmatches;
	vector<DMatch>matches;
	fbMatcher.match(descriptor_obj, descriptor_scene, matches);


	//繪製匹配線
	Mat resultImg;
	drawMatches(img1, keypoints_obj, img2, keypoints_scene, matches, resultImg,
	Scalar::all(-1), Scalar::all(-1),vector<char>(),DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	
	/*最後一個引數使用DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS就可以把檢測到的特徵點隱去,
	只留下匹配到的特徵點。*/

	imshow("AKAZE Matches", resultImg);

	/*那麼上面的操作所顯示的結果匹配到的特徵點很多,為了減少多餘的特徵點,下面進行
	如下操作*/

	vector<DMatch>goodmatches;//儲存從眾多匹配點中找出的最優點
	/*
	1、遍歷整個描述子;
	2、從描述子中找出最優匹配點(距離最小)
	*/
	double minDist = 1000;//初始化
	double maxDist = 0;
	for (int i = 0; i < descriptor_obj.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist > maxDist)
		{
			maxDist = dist;
		}
		if (dist < minDist)
		{
			minDist = dist;
		}

	}
	for (int i = 0; i < descriptor_obj.rows; i++)
	{
		double dist = matches[i].distance;
		if (dist < max(2 * minDist, 0.02))
		{
			goodmatches.push_back(matches[i]);
		}
		
	}
	Mat goodmatchesImg;
	drawMatches(img1, keypoints_obj, img2, keypoints_scene, goodmatches, goodmatchesImg,
		Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
	imshow("goodmatchesImg", goodmatchesImg);


  //用線匹配不直觀,用透視矩陣來做
	//-------------------平面物件識別(將匹配到的內容替換為矩形)--------------------------
	//生成透視變換矩陣
	vector<Point2f> obj;
	vector<Point2f> objinscene;

	for (size_t i = 0; i < goodmatches.size(); i++)
	{
		obj.push_back(keypoints_obj[goodmatches[i].queryIdx].pt);
		objinscene.push_back(keypoints_scene[goodmatches[i].trainIdx].pt);
	}
	Mat H = findHomography(obj, objinscene, RANSAC);     //生成透視變換矩陣

	vector<Point2f> obj_corner(4);//源圖片4個角的座標
	vector<Point2f> objinscene_corner(4);
	obj_corner[0] = Point(0,0);
	obj_corner[1] = Point(img1.cols, 0);
	obj_corner[2] = Point(img1.cols, img1.rows);
	obj_corner[3] = Point(0, img1.rows);


	//------------------透視變換---------------------
	perspectiveTransform(obj_corner, objinscene_corner, H);
	Mat pptfImg= goodmatchesImg.clone();
	line(pptfImg, objinscene_corner[0] + Point2f(img1.cols, 0), objinscene_corner[1] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
	line(pptfImg, objinscene_corner[1] + Point2f(img1.cols, 0), objinscene_corner[2] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
	line(pptfImg, objinscene_corner[2] + Point2f(img1.cols, 0), objinscene_corner[3] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);
	line(pptfImg, objinscene_corner[3] + Point2f(img1.cols, 0), objinscene_corner[0] + Point2f(img1.cols, 0), Scalar(0, 0, 255), 2, 8, 0);

	imshow("pptfImg", pptfImg);

	waitKey(0);
	return 0;
}

具體說明已經在註釋中有說明。
執行結果:
在這裡插入圖片描述