1. 程式人生 > >【OpenCV學習筆記 019】SIFT和SURF演算法實現目標檢測

【OpenCV學習筆記 019】SIFT和SURF演算法實現目標檢測

一、SIFT和SURF運算元實現特徵點檢測

概述

在OpenCV的features2d中實現了SIFT和SURF演算法,可以用於影象特徵點的自動檢測。具體實現是採用SiftFeatureDetector/SurfFeatureDetector類的detect函式檢測SIFT/SURF特徵的關鍵點,並儲存在vector容器中,最後使用drawKeypoints函式繪製出特徵點。

實驗所用環境是opencv2.4.9+vs2013+win7

SIFT特徵點檢測
實驗程式碼如下。這裡需要注意SiftFeatureDetector是包含在opencv2/nonfree/features2d.hpp中,所以應該include這個標頭檔案,並且在“專案屬性->連結器->輸入->附加依賴項”中加入庫檔案:opencv_nonfree249d.lib。

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat src = imread("image1.jpg", 0);

	if (!src.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	//1--初始化SIFT檢測運算元
	int minHessian = 400;
	SiftFeatureDetector detector(minHessian);

	//2--使用SIFT運算元檢測特徵點
	vector<KeyPoint> keypoints;
	detector.detect(src, keypoints);

	//3--繪製特徵點
	Mat keypointImg;
	drawKeypoints(src, keypoints, keypointImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
	imshow("SIFT keypoints", keypointImg);
	cout << "keypoint number: " << keypoints.size() << endl;

	waitKey(0);
	return 0;
}

   

SURF特徵點檢測
同樣的,使用SURF特徵描述子進行特徵點檢測的過程類似,只不過換成了SurfFeatureDetector類,實驗程式碼如下:

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat src = imread("image1.jpg", 0);

	if (!src.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	//1--初始化SIFT檢測運算元
	int minHessian = 400;
	SurfFeatureDetector  detector(minHessian);

	//2--使用SIFT運算元檢測特徵點
	vector<KeyPoint> keypoints;
	detector.detect(src, keypoints);

	//3--繪製特徵點
	Mat keypointImg;
	drawKeypoints(src, keypoints, keypointImg, Scalar::all(-1), DrawMatchesFlags::DEFAULT);
	imshow("SIFT keypoints", keypointImg);
	cout << "keypoint number: " << keypoints.size() << endl;

	waitKey(0);
	return 0;
}
   

從檢測結果可以看出,SURF運算元檢測到的特徵點遠遠多於SIFT運算元,至於檢測的精確度如何,後面試試利用SIFT和SURF運算元進行特徵點匹配來區分。

二、SIFT和SURF運算元實現特徵點提取與匹配

前面SIFT和SURF運算元實現特徵點檢測簡單地講了利用SIFT和SURF運算元檢測特徵點,在檢測的基礎上可以使用SIFT和SURF運算元對特徵點進行特徵提取並使用匹配函式進行特徵點的匹配。具體實現是首先採用SurfFeatureDetector檢測特徵點,再使用SurfDescriptorExtractor計算特徵點的特徵向量,最後採用BruteForceMatcher暴力匹配法或者FlannBasedMatcher選擇性匹配法(二者的不同)來進行特徵點匹配。
實驗所用環境是opencv2.4.9+vs2013+win7,需要注意opencv2.4.X版本中SurfFeatureDetector是包含在opencv2/nonfree/features2d.hpp中,BruteForceMatcher是包含在opencv2/legacy/legacy.hpp中,FlannBasedMatcher是包含在opencv2/features2d/features2d.hpp中。

BruteForce匹配法
首先使用BruteForceMatcher暴力匹配法,程式碼如下:

#include <stdio.h>  
#include <iostream>  
#include "opencv2/core/core.hpp"  
#include "opencv2/nonfree/features2d.hpp"   //SurfFeatureDetector實際在該標頭檔案中  
#include "opencv2/legacy/legacy.hpp"    //BruteForceMatcher實際在該標頭檔案中  
//#include "opencv2/features2d/features2d.hpp"  //FlannBasedMatcher實際在該標頭檔案中  
#include "opencv2/highgui/highgui.hpp"  
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat src_1 = imread("image1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	Mat src_2 = imread("image2.jpg", CV_LOAD_IMAGE_GRAYSCALE);

	if (!src_1.data || !src_2.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	//-- Step 1: 使用SURF運算元檢測特徵點  
	int minHessian = 400;
	SurfFeatureDetector detector(minHessian);
	vector<KeyPoint> keypoints_1, keypoints_2;
	detector.detect(src_1, keypoints_1);
	detector.detect(src_2, keypoints_2);
	cout << "img1--number of keypoints: " << keypoints_1.size() << endl;
	cout << "img2--number of keypoints: " << keypoints_2.size() << endl;

	//-- Step 2: 使用SURF運算元提取特徵(計算特徵向量)  
	SurfDescriptorExtractor extractor;
	Mat descriptors_1, descriptors_2;
	extractor.compute(src_1, keypoints_1, descriptors_1);
	extractor.compute(src_2, keypoints_2, descriptors_2);

	//-- Step 3: 使用BruteForce法進行暴力匹配  
	BruteForceMatcher< L2<float> > matcher;
	vector< DMatch > matches;
	matcher.match(descriptors_1, descriptors_2, matches);
	cout << "number of matches: " << matches.size() << endl;

	//-- 顯示匹配結果  
	Mat matchImg;
	drawMatches(src_1, keypoints_1, src_2, keypoints_2, matches, matchImg);
	imshow("matching result", matchImg);
	waitKey(0);

	return 0;
}

實驗結果:


FLANN匹配法
使用暴力匹配的結果不怎麼好,下面使用FlannBasedMatcher進行特徵匹配,只保留好的特徵匹配點,程式碼如下:

#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
//#include "opencv2/legacy/legacy.hpp"	//BruteForceMatcher實際在該標頭檔案中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher實際在該標頭檔案中
#include "opencv2/highgui/highgui.hpp"
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat src_1 = imread("image1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	Mat src_2 = imread("image2.jpg", CV_LOAD_IMAGE_GRAYSCALE);

	if (!src_1.data || !src_2.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	//-- Step 1: 使用SURF運算元檢測特徵點
	int minHessian = 400;
	SurfFeatureDetector detector(minHessian);
	vector<KeyPoint> keypoints_1, keypoints_2;
	detector.detect(src_1, keypoints_1);
	detector.detect(src_2, keypoints_2);
	cout << "img1--number of keypoints: " << keypoints_1.size() << endl;
	cout << "img2--number of keypoints: " << keypoints_2.size() << endl;

	//-- Step 2: 使用SURF運算元提取特徵(計算特徵向量)
	SurfDescriptorExtractor extractor;
	Mat descriptors_1, descriptors_2;
	extractor.compute(src_1, keypoints_1, descriptors_1);
	extractor.compute(src_2, keypoints_2, descriptors_2);

	//-- Step 3: 使用FLANN法進行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match(descriptors_1, descriptors_2, allMatches);
	cout << "number of matches before filtering: " << allMatches.size() << endl;

	//-- 計算關鍵點間的最大最小距離
	double maxDist = 0;
	double minDist = 100;
	for (int i = 0; i < descriptors_1.rows; i++)
	{
		double dist = allMatches[i].distance;
		if (dist < minDist)
			minDist = dist;
		if (dist > maxDist)
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist);
	printf("	min dist : %f \n", minDist);

	//-- 過濾匹配點,保留好的匹配點(這裡採用的標準:distance<2*minDist)
	vector< DMatch > goodMatches;
	for (int i = 0; i < descriptors_1.rows; i++)
	{
		if (allMatches[i].distance < 2 * minDist)
			goodMatches.push_back(allMatches[i]);
	}
	cout << "number of matches after filtering: " << goodMatches.size() << endl;

	//-- 顯示匹配結果
	Mat matchImg;
	drawMatches(src_1, keypoints_1, src_2, keypoints_2,
		goodMatches, matchImg, Scalar::all(-1), Scalar::all(-1),
		vector<char>(),
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不顯示未匹配的點
		);
	imshow("matching result", matchImg);
	//-- 輸出匹配點的對應關係
	for (int i = 0; i < goodMatches.size(); i++)
		printf("	good match %d: keypoints_1 [%d]  -- keypoints_2 [%d]\n", i,
		goodMatches[i].queryIdx, goodMatches[i].trainIdx);

	waitKey(0);
	return 0;
}
實驗結果:


從第二個實驗結果可以看出,經過過濾之後特徵點數目從154減少到25,匹配的準確度有所上升。當然也可以使用SIFT運算元進行上述兩種匹配實驗,只需要將SurfFeatureDetector換成SiftFeatureDetector,將SurfDescriptorExtractor換成SiftDescriptorExtractor即可。

區別

    二者的區別在於BFMatcher總是嘗試所有可能的匹配,從而使得它總能夠找到最佳匹配,這也是Brute Force(暴力法)的原始含義。而FlannBasedMatcher中FLANN的含義是Fast Library forApproximate Nearest Neighbors,從字面意思可知它是一種近似法,演算法更快但是找到的是最近鄰近似匹配,所以當我們需要找到一個相對好的匹配但是不需要最佳匹配的時候往往使用FlannBasedMatcher。當然也可以通過調整FlannBasedMatcher的引數來提高匹配的精度或者提高演算法速度,但是相應地演算法速度或者演算法精度會受到影響。
    此外,使用特徵提取過程得到的特徵描述符(descriptor)資料型別有的是float型別的,比如說SurfDescriptorExtractor,SiftDescriptorExtractor,有的是uchar型別的,比如說有ORB,BriefDescriptorExtractor。對應float型別的匹配方式有:FlannBasedMatcher,BruteForce<L2<float>>,BruteForce<SL2<float>>,BruteForce<L1<float>>。對應uchar型別的匹配方式有:BruteForce<Hammin>,BruteForce<HammingLUT>。所以ORB和BRIEF特徵描述子只能使用BruteForce匹配法。

三、SIFT和SURF演算法實現目標檢測

概述

    之前SIFT和SURF運算元實現特徵點檢測和SURF運算元實現特徵點提取與匹配簡單地講了利用SIFT和SURF運算元檢測特徵點,並且對特徵點進行特徵提取得到特徵描述符(descriptors),在此基礎上還可以進一步利用透視變換和空間對映找出已知物體(目標檢測)。這裡具體的實現是首先採用SURF/SIFT特徵點檢測與特徵提取,然後採用FLANN匹配法保留好的匹配點,再利用findHomography找出相應的透視變換,最後採用perspectiveTransform函式對映點群,在場景中獲取目標的位置。
       實驗所用環境是opencv2.4.9+vs2013+win7,需要注意opencv2.4.X版本中SurfFeatureDetector/SiftFeatureDetector是包含在opencv2/nonfree/features2d.hpp中,FlannBasedMatcher是包含在opencv2/features2d/features2d.hpp中。

SURF運算元
首先使用SURF運算元進行目標檢測,程式碼如下:

/**
* @概述: 採用SURF運算元在場景中進行已知目標檢測
* @類和函式: SurfFeatureDetector + SurfDescriptorExtractor + FlannBasedMatcher + findHomography + perspectiveTransform
* @實現步驟:
*		Step 1: 在影象中使用SURF演算法SurfFeatureDetector檢測關鍵點
*		Step 2: 對檢測到的每一個關鍵點使用SurfDescriptorExtractor計算其特徵向量(也稱描述子)
*		Step 3: 使用FlannBasedMatcher通過特徵向量對關鍵點進行匹配,使用閾值剔除不好的匹配
*		Step 4: 利用findHomography基於匹配的關鍵點找出相應的透視變換
*		Step 5: 利用perspectiveTransform函式對映點群,在場景中獲取目標的位置
*/

#include <ctime>
#include <iostream>
#include "opencv2/core/core.hpp"	
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SurfFeatureDetector實際在該標頭檔案中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher實際在該標頭檔案中
#include "opencv2/calib3d/calib3d.hpp"	//findHomography所需標頭檔案
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat imgObject = imread("image1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	Mat imgScene = imread("image2.jpg", CV_LOAD_IMAGE_GRAYSCALE);

	if (!imgObject.data || !imgScene.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	double begin = clock();

	///-- Step 1: 使用SURF運算元檢測特徵點
	int minHessian = 400;
	SurfFeatureDetector detector(minHessian);
	vector<KeyPoint> keypointsObject, keypointsScene;
	detector.detect(imgObject, keypointsObject);
	detector.detect(imgScene, keypointsScene);
	cout << "object--number of keypoints: " << keypointsObject.size() << endl;
	cout << "scene--number of keypoints: " << keypointsScene.size() << endl;

	///-- Step 2: 使用SURF運算元提取特徵(計算特徵向量)
	SurfDescriptorExtractor extractor;
	Mat descriptorsObject, descriptorsScene;
	extractor.compute(imgObject, keypointsObject, descriptorsObject);
	extractor.compute(imgScene, keypointsScene, descriptorsScene);

	///-- Step 3: 使用FLANN法進行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match(descriptorsObject, descriptorsScene, allMatches);
	cout << "number of matches before filtering: " << allMatches.size() << endl;

	//-- 計算關鍵點間的最大最小距離
	double maxDist = 0;
	double minDist = 100;
	for (int i = 0; i < descriptorsObject.rows; i++)
	{
		double dist = allMatches[i].distance;
		if (dist < minDist)
			minDist = dist;
		if (dist > maxDist)
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist);
	printf("	min dist : %f \n", minDist);

	//-- 過濾匹配點,保留好的匹配點(這裡採用的標準:distance<3*minDist)
	vector< DMatch > goodMatches;
	for (int i = 0; i < descriptorsObject.rows; i++)
	{
		if (allMatches[i].distance < 2 * minDist)
			goodMatches.push_back(allMatches[i]);
	}
	cout << "number of matches after filtering: " << goodMatches.size() << endl;

	//-- 顯示匹配結果
	Mat resultImg;
	drawMatches(imgObject, keypointsObject, imgScene, keypointsScene,
		goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(),
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不顯示未匹配的點
		);
	//-- 輸出匹配點的對應關係
	for (int i = 0; i < goodMatches.size(); i++)
		printf("	good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i,
		goodMatches[i].queryIdx, goodMatches[i].trainIdx);

	///-- Step 4: 使用findHomography找出相應的透視變換
	vector<Point2f> object;
	vector<Point2f> scene;
	for (size_t i = 0; i < goodMatches.size(); i++)
	{
		//-- 從好的匹配中獲取關鍵點: 匹配關係是關鍵點間具有的一 一對應關係,可以從匹配關係獲得關鍵點的索引
		//-- e.g. 這裡的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一對關鍵點的索引
		object.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
		scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
	}
	Mat H = findHomography(object, scene, CV_RANSAC);

	///-- Step 5: 使用perspectiveTransform對映點群,在場景中獲取目標位置
	std::vector<Point2f> objCorners(4);
	objCorners[0] = cvPoint(0, 0);
	objCorners[1] = cvPoint(imgObject.cols, 0);
	objCorners[2] = cvPoint(imgObject.cols, imgObject.rows);
	objCorners[3] = cvPoint(0, imgObject.rows);
	std::vector<Point2f> sceneCorners(4);
	perspectiveTransform(objCorners, sceneCorners, H);

	//-- 在被檢測到的目標四個角之間劃線
	line(resultImg, sceneCorners[0] + Point2f(imgObject.cols, 0), sceneCorners[1] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[1] + Point2f(imgObject.cols, 0), sceneCorners[2] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[2] + Point2f(imgObject.cols, 0), sceneCorners[3] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[3] + Point2f(imgObject.cols, 0), sceneCorners[0] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);

	//-- 顯示檢測結果
	imshow("detection result", resultImg);

	double end = clock();
	cout << "\nSURF--elapsed time: " << (end - begin) / CLOCKS_PER_SEC * 1000 << " ms\n";

	waitKey(0);
	return 0;
}

SIFT運算元
作為對比,再使用SIFT運算元進行目標檢測,只需要將SurfFeatureDetector換成SiftFeatureDetector,將SurfDescriptorExtractor換成SiftDescriptorExtractor即可。程式碼如下:
/**
* @概述: 採用SIFT運算元在場景中進行已知目標檢測
* @類和函式: SiftFeatureDetector + SiftDescriptorExtractor + FlannBasedMatcher + findHomography + perspectiveTransform
* @實現步驟:
*		Step 1: 在影象中使用SIFT演算法SiftFeatureDetector檢測關鍵點
*		Step 2: 對檢測到的每一個關鍵點使用SiftDescriptorExtractor計算其特徵向量(也稱描述子)
*		Step 3: 使用FlannBasedMatcher通過特徵向量對關鍵點進行匹配,使用閾值剔除不好的匹配
*		Step 4: 利用findHomography基於匹配的關鍵點找出相應的透視變換
*		Step 5: 利用perspectiveTransform函式對映點群,在場景中獲取目標的位置
*/

#include <ctime>
#include <iostream>
#include "opencv2/core/core.hpp"	
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"	//SiftFeatureDetector實際在該標頭檔案中
#include "opencv2/features2d/features2d.hpp"	//FlannBasedMatcher實際在該標頭檔案中
#include "opencv2/calib3d/calib3d.hpp"	//findHomography所需標頭檔案
using namespace cv;
using namespace std;

int main(int argc, char** argv)
{
	Mat imgObject = imread("image1.jpg", CV_LOAD_IMAGE_GRAYSCALE);
	Mat imgScene = imread("image2.jpg", CV_LOAD_IMAGE_GRAYSCALE);

	if (!imgObject.data || !imgScene.data)
	{
		cout << " --(!) Error reading images " << endl;
		return -1;
	}

	double begin = clock();

	///-- Step 1: 使用SIFT運算元檢測特徵點
	//int minHessian = 400;
	SiftFeatureDetector detector;//( minHessian );
	vector<KeyPoint> keypointsObject, keypointsScene;
	detector.detect(imgObject, keypointsObject);
	detector.detect(imgScene, keypointsScene);
	cout << "object--number of keypoints: " << keypointsObject.size() << endl;
	cout << "scene--number of keypoints: " << keypointsScene.size() << endl;

	///-- Step 2: 使用SIFT運算元提取特徵(計算特徵向量)
	SiftDescriptorExtractor extractor;
	Mat descriptorsObject, descriptorsScene;
	extractor.compute(imgObject, keypointsObject, descriptorsObject);
	extractor.compute(imgScene, keypointsScene, descriptorsScene);

	///-- Step 3: 使用FLANN法進行匹配
	FlannBasedMatcher matcher;
	vector< DMatch > allMatches;
	matcher.match(descriptorsObject, descriptorsScene, allMatches);
	cout << "number of matches before filtering: " << allMatches.size() << endl;

	//-- 計算關鍵點間的最大最小距離
	double maxDist = 0;
	double minDist = 100;
	for (int i = 0; i < descriptorsObject.rows; i++)
	{
		double dist = allMatches[i].distance;
		if (dist < minDist)
			minDist = dist;
		if (dist > maxDist)
			maxDist = dist;
	}
	printf("	max dist : %f \n", maxDist);
	printf("	min dist : %f \n", minDist);

	//-- 過濾匹配點,保留好的匹配點(這裡採用的標準:distance<3*minDist)
	vector< DMatch > goodMatches;
	for (int i = 0; i < descriptorsObject.rows; i++)
	{
		if (allMatches[i].distance < 2 * minDist)
			goodMatches.push_back(allMatches[i]);
	}
	cout << "number of matches after filtering: " << goodMatches.size() << endl;

	//-- 顯示匹配結果
	Mat resultImg;
	drawMatches(imgObject, keypointsObject, imgScene, keypointsScene,
		goodMatches, resultImg, Scalar::all(-1), Scalar::all(-1), vector<char>(),
		DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS //不顯示未匹配的點
		);
	//-- 輸出匹配點的對應關係
	for (int i = 0; i < goodMatches.size(); i++)
		printf("	good match %d: keypointsObject [%d]  -- keypointsScene [%d]\n", i,
		goodMatches[i].queryIdx, goodMatches[i].trainIdx);

	///-- Step 4: 使用findHomography找出相應的透視變換
	vector<Point2f> object;
	vector<Point2f> scene;
	for (size_t i = 0; i < goodMatches.size(); i++)
	{
		//-- 從好的匹配中獲取關鍵點: 匹配關係是關鍵點間具有的一 一對應關係,可以從匹配關係獲得關鍵點的索引
		//-- e.g. 這裡的goodMatches[i].queryIdx和goodMatches[i].trainIdx是匹配中一對關鍵點的索引
		object.push_back(keypointsObject[goodMatches[i].queryIdx].pt);
		scene.push_back(keypointsScene[goodMatches[i].trainIdx].pt);
	}
	Mat H = findHomography(object, scene, CV_RANSAC);

	///-- Step 5: 使用perspectiveTransform對映點群,在場景中獲取目標位置
	std::vector<Point2f> objCorners(4);
	objCorners[0] = cvPoint(0, 0);
	objCorners[1] = cvPoint(imgObject.cols, 0);
	objCorners[2] = cvPoint(imgObject.cols, imgObject.rows);
	objCorners[3] = cvPoint(0, imgObject.rows);
	std::vector<Point2f> sceneCorners(4);
	perspectiveTransform(objCorners, sceneCorners, H);

	//-- 在被檢測到的目標四個角之間劃線
	line(resultImg, sceneCorners[0] + Point2f(imgObject.cols, 0), sceneCorners[1] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[1] + Point2f(imgObject.cols, 0), sceneCorners[2] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[2] + Point2f(imgObject.cols, 0), sceneCorners[3] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);
	line(resultImg, sceneCorners[3] + Point2f(imgObject.cols, 0), sceneCorners[0] + Point2f(imgObject.cols, 0), Scalar(0, 255, 0), 4);

	//-- 顯示檢測結果
	imshow("detection result", resultImg);

	double end = clock();
	cout << "\nSIFT--elapsed time: " << (end - begin) / CLOCKS_PER_SEC * 1000 << " ms\n";

	waitKey(0);
	return 0;
}