Opencv 使用串聯匹配影象拼接
阿新 • • 發佈:2018-11-01
opencv自帶的stitching速度很慢, 其中一個最大的原因是每一張圖都要和其它的圖去匹配,如果有10張圖,除去自身不用匹配外,要匹配 10X(10-1) = 90 次。10張532*300圖拼接耗時14s左右,還姑且能忍受。可是100張圖要匹配9900次。耗時不是簡單的線性增長。
Stitch讀入影象不用按照從左到右的順序,拼接結果和執行時間都是一樣的。
我們拍攝全景圖的時候都是從左到右,或者從右到左,前後兩張圖一般有部分重合。所以一個節省時間的好辦法就是我們這裡只對前後兩張圖匹配,然後連成一串。即用串聯匹配代替原匹配。
一些修改:
1.把匹配方法換成串聯匹配
2.把費時的光束平差法改成"ray";//射線發散誤差方法
3.再把費時的曝光補償改成ExposureCompensator::GAIN;//增益法
4.接著把也費時的尋找接縫線改成"voronoi"; //逐點法
#include "opencv2/core/core.hpp" #include "highgui.h" #include "opencv2/imgproc/imgproc.hpp" #include "opencv2/features2d/features2d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/nonfree/nonfree.hpp" #include "opencv2/legacy/legacy.hpp" #include "cvaux.h" //必須引此標頭檔案 #include "opencv2/stitching/detail/autocalib.hpp" #include "opencv2/stitching/detail/blenders.hpp" #include "opencv2/stitching/detail/camera.hpp" #include "opencv2/stitching/detail/exposure_compensate.hpp" #include "opencv2/stitching/detail/matchers.hpp" #include "opencv2/stitching/detail/motion_estimators.hpp" #include "opencv2/stitching/detail/seam_finders.hpp" #include "opencv2/stitching/detail/util.hpp" #include "opencv2/stitching/detail/warpers.hpp" #include "opencv2/stitching/warpers.hpp" #include <iostream> #include <fstream> #include <string> #include <iomanip> #include<vector> using namespace cv; using namespace std; using namespace detail; void f2_matcher(vector<ImageFeatures> &features, vector<MatchesInfo> &f2_matches) { //vector<MatchesInfo> f2_matches; //特徵匹配 BestOf2NearestMatcher matcher(false, 0.3f, 6, 6); //定義特徵匹配器,2NN方法 matcher(features, f2_matches); //進行特徵匹配 } void i_matcher(vector<ImageFeatures> &features, vector<MatchesInfo> &pairwise_matches) { int num_images=features.size (); //1。串聯匹配 vector<vector<MatchesInfo> > f2_2;//f2_2[i] 表示 i 和 i+1 的匹配關係(0 開頭,比影象數小 1) for (int i = 1; i < num_images; ++i) { vector<ImageFeatures> f2; vector<MatchesInfo> m2; f2.push_back (features[i-1]); f2.push_back (features[i]); f2_matcher(f2,m2); f2_2.push_back(m2); } //2。把串聯匹配 ----按opencv stitching 拼接的匹配關係組在一起 MatchesInfo f;//大小: n x n (n個圖) for (int i = 0; i < num_images; ++i) { for (int j = 0; j < num_images; ++j) { //cout<<"i,j:"<<i<<","<<j<<endl; if(i==j)//自身不用匹配 { f.src_img_idx = -1; f.dst_img_idx = -1; f.num_inliers = 0; f.confidence = 0; pairwise_matches.push_back (f); }else if(i+1==j)//相連(順) { pairwise_matches.push_back (f2_2[i][1]); //修改匹配關係 pairwise_matches[pairwise_matches.size ()-1].src_img_idx =i; pairwise_matches[pairwise_matches.size ()-1].dst_img_idx =j; }else if(j+1==i)//相連(倒) { pairwise_matches.push_back (f2_2[j][2]); //修改匹配關係 pairwise_matches[pairwise_matches.size ()-1].src_img_idx =i; pairwise_matches[pairwise_matches.size ()-1].dst_img_idx =j; }else//其它略過 { f.src_img_idx = -1; f.dst_img_idx = -1; f.num_inliers = 0; f.confidence = 0; pairwise_matches.push_back (f); } //cout<<"size:"<<pairwise_matches.size ()<<endl; } } } int main(int argc, char** argv) { vector<Mat> imgs; //輸入影象 Mat img; char temp[100]; double ge[100];//100張圖的特徵點個數 for (int i = 1; i<= 100; i++) { sprintf(temp, "D:\\低解析度截圖重新命名\\%d.jpg", i);// 將圖片以數字命名:例如1.jpg 2.jpg等 img = imread(temp); imgs.push_back(img); } int num_images =100; //影象數量 vector<ImageFeatures> features(num_images); //表示影象特徵 Point2f point; KeyPoint kp; float temp1 = 0, temp2 = 0; char ptsname[100]; char descname[100]; ifstream g("D:\\特徵\\特徵點個數.txt");//將100張圖的特徵點個數匯入陣列 assert(g.is_open()); for(int i=1;i<=100;i++) { g>>ge[i-1]; } g.close(); for(int i=1;i<=100;i++) { sprintf(ptsname, "D:\\特徵\\pts%d.txt",i); //格式化輸出檔名 ifstream infile(ptsname); assert(infile.is_open()); //若失敗,則輸出錯誤訊息,並終止程式執行 for (int a = 0; !infile.eof(); a++) { infile >> temp1 >> temp2; point.x = temp1; point.y = temp2; kp = KeyPoint(point, 1.f); features[i-1].keypoints.push_back(kp); } infile.close(); //infile.clear(); sprintf(descname, "D:\\特徵\\desc%d.txt",i); //格式化輸出檔名 ifstream des(descname); assert(des.is_open()); //若失敗,則輸出錯誤訊息,並終止程式執行 cout<<ge[i-1]; features[i-1].descriptors = Mat::zeros(ge[i-1], 256, CV_32FC1);//同理features[0].descriptors for (int k = 0; k <ge[i-1]; k++) { for (int j = 0; j < 256; j++) { des >> features[i-1].descriptors.at<float>(k, j); } } des.close(); //des.clear(); } vector<MatchesInfo> pairwise_matches; //表示特徵匹配資訊變數 BestOf2NearestMatcher matcher(false, 0.3f, 6, 6); //定義特徵匹配器,2NN方法 //matcher(features, pairwise_matches); //進行特徵匹配 i_matcher(features, pairwise_matches);//這裡用我們自己的匹配代替 cout<<"96行"; HomographyBasedEstimator estimator; //定義引數評估器 vector<CameraParams> cameras; //表示相機引數 estimator(features, pairwise_matches, cameras); //進行相機引數評估 cout<<'1'; for (size_t i = 0; i < cameras.size(); ++i) //轉換相機旋轉引數的資料型別 { Mat R; cameras[i].R.convertTo(R, CV_32F); cameras[i].R = R; } Ptr<detail::BundleAdjusterBase> adjuster; //光束平差法,精確相機引數 //adjuster = new detail::BundleAdjusterReproj(); //重對映誤差方法 adjuster = new detail::BundleAdjusterRay(); //射線發散誤差方法 cout<<"96行"; adjuster->setConfThresh(1); //設定匹配置信度,該值設為1 (*adjuster)(features, pairwise_matches, cameras); //精確評估相機引數 cout<<"96行"; vector<Mat> rmats; for (size_t i = 0; i < cameras.size(); ++i) //複製相機的旋轉引數 rmats.push_back(cameras[i].R.clone()); waveCorrect(rmats, WAVE_CORRECT_HORIZ); //進行波形校正 for (size_t i = 0; i < cameras.size(); ++i) //相機引數賦值 cameras[i].R = rmats[i]; rmats.clear(); //清變數 vector<Point> corners(num_images); //表示對映變換後圖像的左上角座標 vector<Mat> masks_warped(num_images); //表示對映變換後的影象掩碼 vector<Mat> images_warped(num_images); //表示對映變換後的影象 vector<Size> sizes(num_images); //表示對映變換後的影象尺寸 vector<Mat> masks(num_images); //表示源圖的掩碼 cout<<"129行"; for (int i = 0; i < num_images; ++i) //初始化源圖的掩碼 { masks[i].create(imgs[i].size(), CV_8U); //定義尺寸大小 masks[i].setTo(Scalar::all(255)); //全部賦值為255,表示源圖的所有區域都使用 } Ptr<WarperCreator> warper_creator; //定義影象對映變換創造器 warper_creator = new cv::PlaneWarper(); //平面投影 //warper_creator = new cv::CylindricalWarper(); //柱面投影 //warper_creator = new cv::SphericalWarper(); //球面投影 //warper_creator = new cv::FisheyeWarper(); //魚眼投影 //warper_creator = new cv::StereographicWarper(); //立方體投影 //定義影象對映變換器,設定對映的尺度為相機的焦距,所有相機的焦距都相同 Ptr<RotationWarper> warper = warper_creator->create(static_cast<float>(cameras[0].focal)); for (int i = 0; i < num_images; ++i) { Mat_<float> K; cameras[i].K().convertTo(K, CV_32F); //轉換相機內參數的資料型別 //對當前影象映象投影變換,得到變換後的影象以及該影象的左上角座標 corners[i] = warper->warp(imgs[i], K, cameras[i].R, INTER_LINEAR, BORDER_REFLECT, images_warped[i]); sizes[i] = images_warped[i].size(); //得到尺寸 //得到變換後的影象掩碼 warper->warp(masks[i], K, cameras[i].R, INTER_NEAREST, BORDER_CONSTANT, masks_warped[i]); } imgs.clear(); //清變數 masks.clear(); //建立曝光補償器,應用增益補償方法 Ptr<ExposureCompensator> compensator = ExposureCompensator::createDefault(ExposureCompensator::GAIN); compensator->feed(corners, images_warped, masks_warped); //得到曝光補償器 for (int i = 0; i < num_images; ++i) //應用曝光補償器,對影象進行曝光補償 { compensator->apply(i, corners[i], images_warped[i], masks_warped[i]); } //在後面,我們還需要用到對映變換圖的掩碼masks_warped,因此這裡為該變數新增一個副本masks_seam vector<Mat> masks_seam(num_images); for (int i = 0; i < num_images; i++) masks_warped[i].copyTo(masks_seam[i]); Ptr<SeamFinder> seam_finder; //定義接縫線尋找器 //seam_finder = new NoSeamFinder(); //無需尋找接縫線 seam_finder = new VoronoiSeamFinder(); //逐點法 //seam_finder = new DpSeamFinder(DpSeamFinder::COLOR); //動態規範法 //seam_finder = new DpSeamFinder(DpSeamFinder::COLOR_GRAD); //圖割法 //seam_finder = new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR); //seam_finder = new GraphCutSeamFinder(GraphCutSeamFinder::COST_COLOR_GRAD); vector<Mat> images_warped_f(num_images); for (int i = 0; i < num_images; ++i) //影象資料型別轉換 images_warped[i].convertTo(images_warped_f[i], CV_32F); images_warped.clear(); //清記憶體 //得到接縫線的掩碼影象masks_seam seam_finder->find(images_warped_f, corners, masks_seam); cout<<"190行"; vector<Mat> images_warped_s(num_images); Ptr<Blender> blender; //定義影象融合器 //blender = Blender::createDefault(Blender::NO, false); //簡單融合方法 //羽化融合方法 //blender = Blender::createDefault(Blender::FEATHER, false); //FeatherBlender* fb = dynamic_cast<FeatherBlender*>(static_cast<Blender*>(blender)); //fb->setSharpness(0.005); //設定羽化銳度 blender = Blender::createDefault(Blender::MULTI_BAND, false); //多頻段融合 MultiBandBlender* mb = dynamic_cast<MultiBandBlender*>(static_cast<Blender*>(blender)); mb->setNumBands(8); //設定頻段數,即金字塔層數 blender->prepare(corners, sizes); //生成全景影象區域 //在融合的時候,最重要的是在接縫線兩側進行處理,而上一步在尋找接縫線後得到的掩碼的邊界就是接縫線處,因此我們還需要在接縫線兩側開闢一塊區域用於融合處理,這一處理過程對羽化方法尤為關鍵 //應用膨脹演算法縮小掩碼面積 vector<Mat> dilate_img(num_images); Mat element = getStructuringElement(MORPH_RECT, Size(20, 20)); //定義結構元素 for (int k = 0; k < num_images; k++) { images_warped_f[k].convertTo(images_warped_s[k], CV_16S); //改變資料型別 dilate(masks_seam[k], masks_seam[k], element); //膨脹運算 //對映變換圖的掩碼和膨脹後的掩碼相“與”,從而使擴充套件的區域僅僅限於接縫線兩側,其他邊界處不受影響 masks_seam[k] = masks_seam[k] & masks_warped[k]; blender->feed(images_warped_s[k], masks_seam[k], corners[k]); //初始化資料 } masks_seam.clear(); //清記憶體 images_warped_s.clear(); masks_warped.clear(); images_warped_f.clear(); Mat result, result_mask; //完成融合操作,得到全景影象result和它的掩碼result_mask blender->blend(result, result_mask); imwrite("pano.jpg", result); //儲存全景影象 return 0; }