利用opencv自帶的example進行雙目標定
利用opencv自帶的example進行雙目標定
#include "opencv2/calib3d/calib3d.hpp" #include "opencv2/highgui/highgui.hpp" #include "opencv2/imgproc/imgproc.hpp" #include <vector> #include <string> #include <algorithm> #include <iostream> #include <iterator> #include <stdio.h> #include <stdlib.h> #include <ctype.h> using namespace cv; using namespace std; static int print_help() { cout << " Given a list of chessboard images, the number of corners (nx, ny)\n" " on the chessboards, and a flag: useCalibrated for \n" " calibrated (0) or\n" " uncalibrated \n" " (1: use cvStereoCalibrate(), 2: compute fundamental\n" " matrix separately) stereo. \n" " Calibrate the cameras and display the\n" " rectified results along with the computed disparity images. \n" << endl; cout << "Usage:\n ./stereo_calib -w board_width -h board_height [-nr /*dot not view results*/] <image list XML/YML file>\n" << endl; return 0; } static void StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true) { if( imagelist.size() % 2 != 0 )//成對的影象 { cout << "Error: the image list contains odd (non-even) number of elements\n"; return; } bool displayCorners = true;//true; const int maxScale = 2; //實際的正方格尺寸 //const float squareSize = 1.f; // Set this to your actual square size const float squareSize = 36.f; // danwei mm // ARRAY AND VECTOR STORAGE: //建立影象座標和世界座標系座標矩陣 vector<vector<Point2f> > imagePoints[2];//影象點(儲存角點) vector<vector<Point3f> > objectPoints; //物體三維座標點 Size imageSize; int i, j, k, nimages = (int)imagelist.size()/2; //左右兩個相機圖片 //確定左右檢視矩陣的數量,比如10副圖,左右矩陣分別為5個 imagePoints[0].resize(nimages); imagePoints[1].resize(nimages); vector<string> goodImageList; cout << "nimages: " << nimages <<endl; // 檔案列表 需要交替 "left01.jpg" "right01.jpg"... for( i = j = 0; i < nimages; i++ ) //5對影象 { for( k = 0; k < 2; k++ )//左右兩個相機圖片 k=0,1 { const string& filename = imagelist[i*2+k]; //逐個讀取圖片 Mat img = imread(filename, 0); //影象資料 if(img.empty()) break; if( imageSize == Size() ) imageSize = img.size(); else if( img.size() != imageSize )// 圖片需要保持一樣的大小 { cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n"; break; } bool found = false; vector<Point2f>& corners = imagePoints[k][j]; for( int scale = 1; scale <= maxScale; scale++ ) { Mat timg; //影象是8bit的灰度或彩色影象 if( scale == 1 ) timg = img; else resize(img, timg, Size(), scale, scale);//轉換成 8bit的灰度或彩色影象 // //引數需為 8bit的灰度或彩色影象 found = findChessboardCorners(timg, boardSize, corners,//得到棋盤內角點座標 存入 imagePoin CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_NORMALIZE_IMAGE); if(found) { //如果為多通道影象 if( scale > 1 ) { Mat cornersMat(corners); cornersMat *= 1./scale; } break; } } //顯示角點 if( displayCorners ) { cout << filename << endl; Mat cimg, cimg1; cvtColor(img, cimg, COLOR_GRAY2BGR);//轉換成灰度圖 drawChessboardCorners(cimg, boardSize, corners, found);//顯示 double sf = 640./MAX(img.rows, img.cols);//尺度因子 resize(cimg, cimg1, Size(), sf, sf);//變換到合適大小 imshow("corners", cimg); char c = (char)waitKey(50);//等待500ms if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit exit(-1); } else putchar('.'); if( !found ) { cout << "ChessboardCorners not found" <<endl; break; } // 亞畫素級優化 cornerSubPix(img, corners, Size(11,11), Size(-1,-1), TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 0.01)); } if( k == 2 )//上面的for迴圈結束後 k=2 { goodImageList.push_back(imagelist[i*2]); goodImageList.push_back(imagelist[i*2+1]); j++; } } cout << j << " pairs have been successfully detected.\n"; nimages = j; if( nimages < 2 ) { cout << "Error: too little pairs to run the calibration\n"; return; } imagePoints[0].resize(nimages);//左相機 角點位置 imagePoints[1].resize(nimages);//右相機 角點位置 // 角點實際 位置 按照 squareSize 得出 objectPoints.resize(nimages); for( i = 0; i < nimages; i++ ) { for( j = 0; j < boardSize.height; j++ ) //每一行 for( k = 0; k < boardSize.width; k++ ) //每一列 objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0)); //直接轉為float型別,座標為行、列 } cout << "Running stereo calibration ...\n"; //建立內參矩陣 Mat cameraMatrix[2], distCoeffs[2];//左右兩個相機 的內參數矩陣和 畸變引數 //cameraMatrix[0] = Mat::eye(3, 3, CV_64F);//初始化內參數矩陣 //cameraMatrix[1] = Mat::eye(3, 3, CV_64F); cameraMatrix[0] = initCameraMatrix2D(objectPoints,imagePoints[0],imageSize,0); cameraMatrix[1] = initCameraMatrix2D(objectPoints,imagePoints[1],imageSize,0); Mat R, T, E, F;//R 旋轉向量 T平移向量 E本徵矩陣 F基礎矩陣 double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],//真實點座標 左右兩個相機點座標 cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, E, F, CALIB_FIX_ASPECT_RATIO + CALIB_ZERO_TANGENT_DIST + CALIB_SAME_FOCAL_LENGTH + CALIB_RATIONAL_MODEL + CALIB_FIX_K3 + CALIB_FIX_K4 + CALIB_FIX_K5, TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 100, 1e-5) ); cout << "done with RMS error=" << rms << endl; cout << "cameraMatrix[0]=" << cameraMatrix[0]<<endl; cout << "cameraMatrix[1]=" << cameraMatrix[1]<<endl; // CALIBRATION QUALITY CHECK // because the output fundamental matrix implicitly // includes all the output information, // we can check the quality of calibration using the // epipolar geometry constraint: m2^t*F*m1=0 //計算標定誤差 double err = 0; int npoints = 0; vector<Vec3f> lines[2];//極線 for( i = 0; i < nimages; i++ ) { int npt = (int)imagePoints[0][i].size(); Mat imgpt[2]; for( k = 0; k < 2; k++ ) { imgpt[k] = Mat(imagePoints[k][i]); //未去畸變 undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]); computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]); } for( j = 0; j < npt; j++ ) { double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] + imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) + fabs(imagePoints[1][i][j].x*lines[0][j][0] + imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]); err += errij; } npoints += npt; } cout << "average reprojection err = " << err/npoints << endl; // save intrinsic parameters // 內參數 save intrinsic parameters FileStorage fs("intrinsics.yml", FileStorage::WRITE); if( fs.isOpened() ) { fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1]; fs.release(); } else cout << "Error: can not save the intrinsic parameters\n";
/* /外引數 // R--右相機相對左相機的旋轉矩陣 // T--右相機相對左相機的平移矩陣 // R1,R2--左右相機校準變換(旋轉)矩陣 3×3 // P1,P2--左右相機在校準後坐標系中的投影矩陣 3×4 // Q--視差-深度對映矩陣,我利用它來計算單個目標點的三維座標 */ Mat R1, R2, P1, P2, Q;//由stereoRectify()求得 Rect validRoi[2];//影象校正之後,會對影象進行裁剪,這裡的validROI就是指裁剪之後的區域 // 校準雙目影象 擺正兩幅影象 stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]); /* R T 左相機到右相機 的 旋轉 平移矩陣 R 3*3 T 3*1 T中第一個Tx為 基線長度 立體校正的時候需要兩幅影象共面並且行對準 以使得立體匹配更加的可靠 使得兩幅影象共面的方法就是把兩個攝像頭的影象投影到一個公共成像面上,這樣每幅影象從本影象平面投影到公共影象平面都需要一個旋轉矩陣R stereoRectify 這個函式計算的就是從影象平面投影都公共成像平面的旋轉矩陣Rl,Rr。 Rl,Rr即為左右相機平面行對準的校正旋轉矩陣。 左相機經過Rl旋轉,右相機經過Rr旋轉之後,兩幅影象就已經共面並且行對準了。 其中Pl,Pr為兩個相機的投影矩陣,其作用是將3D點的座標轉換到影象的2D點的座標:P*[X Y Z 1]' =[x y w] Q矩陣為重投影矩陣,即矩陣Q可以把2維平面(影象平面)上的點投影到3維空間的點:Q*[x y d 1] = [X Y Z W]。其中d為左右兩幅影象的視差 */ fs.open("extrinsics.yml", FileStorage::WRITE); if( fs.isOpened() ) { fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q; fs.release(); } else cout << "Error: can not save the extrinsic parameters\n"; cout << "R1=" << R1<<endl; cout << "R2=" << R2<<endl; cout << "P1=" << P1<<endl; cout << "P2=" << P2<<endl; // OpenCV can handle left-right // or up-down camera arrangements // 辨認 左右結構的相機 或者 上下結構的相機 bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3)); // COMPUTE AND DISPLAY RECTIFICATION if( !showRectified ) return; Mat rmap[2][2];//對映表 // IF BY CALIBRATED (BOUGUET'S METHOD) if( useCalibrated ) { // we already computed everything } // OR ELSE HARTLEY'S METHOD else // use intrinsic parameters of each camera, but // compute the rectification transformation directly // from the fundamental matrix { vector<Point2f> allimgpt[2]; for( k = 0; k < 2; k++ ) { for( i = 0; i < nimages; i++ ) std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k])); } F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0); Mat H1, H2; stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3); cout << "F=" << F.rows<<"*"<<F.cols << " H1:"<<H1.rows<<"*"<<H1.cols<<endl; R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0]; R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1]; P1 = cameraMatrix[0]; P2 = cameraMatrix[1]; cout << "R1 = " << R1<<endl; cout << "R2 = " << R2<<endl; cout << "P1 = " << P1<<endl; cout << "P2 = " << P2<<endl; } /* 根據stereoRectify 計算出來的R 和 P 來計算影象的對映表 mapx,mapy mapx,mapy這兩個對映表接下來可以給remap()函式呼叫,來校正影象,使得兩幅影象共面並且行對準 ininUndistortRectifyMap()的引數newCameraMatrix就是校正後的攝像機矩陣。在openCV裡面,校正後的計算機矩陣Mrect是跟投影矩陣P一起返回的。 所以我們在這裡傳入投影矩陣P,此函式可以從投影矩陣P中讀出校正後的攝像機矩陣 */ //Precompute maps for cv::remap() initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]); initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]); //cout <<"cameraMatrix[0]"<<cameraMatrix[0]<<"distCoeffs[0]"<<distCoeffs[0]<<"R1"<<R1<<"P1"<<P1<<"imageSize"<<imageSize<<"rmap[0][0]"<<rmap[0][0]<<"rmap[0][1]"<<rmap[0][1]<<"\n\n"<<endl; //cout <<"cameraMatrix[1]"<<cameraMatrix[1]<<"distCoeffs[1]"<<distCoeffs[0]<<"R2"<<R2<<"P2"<<P2<<"imageSize"<<imageSize<<"rmap[1][0]"<<rmap[1][0]<<"rmap[1][1]"<<rmap[1][1]<<"\n\n"<<endl; /* 把校正結果顯示出來 把左右兩幅影象顯示到同一個畫面上 這裡只顯示了最後一副影象的校正結果。並沒有把所有的影象都顯示出來 */ Mat canvas; double sf; int w, h; if( !isVerticalStereo ) { sf =600./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h, w*2, CV_8UC3); } else { sf = 300./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h*2, w, CV_8UC3); } for( i = 0; i < nimages; i++ ) { for( k = 0; k < 2; k++ ) { Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; //經過remap之後,左右相機的影象已經共面並且行對準了 remap(img, rimg, rmap[k][0], rmap[k][1], INTER_LINEAR); cvtColor(rimg, cimg, COLOR_GRAY2BGR); imshow("cimg", cimg); cout << "isVerticalStereo=" << isVerticalStereo <<" sf="<<sf<< endl; //得到畫布的一部分 Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); imshow("rectified-000", canvas); resize(cimg, canvasPart, canvasPart.size(), 0, 0, INTER_AREA); if( useCalibrated ) { //獲得被擷取的區域 Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf), cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf)); rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);////畫上一個矩形 } } //畫上對應的線條 if( !isVerticalStereo ) for( j = 0; j < canvas.rows; j += 16 ) //畫平行線 line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8); else for( j = 0; j < canvas.cols; j += 16 ) line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8); imshow("rectified", canvas); char c = (char)waitKey(); if( c == 27 || c == 'q' || c == 'Q' ) break; } } static bool readStringList( const string& filename, vector<string>& l ) { l.resize(0); FileStorage fs(filename, FileStorage::READ); if( !fs.isOpened() ) return false; FileNode n = fs.getFirstTopLevelNode(); if( n.type() != FileNode::SEQ ) return false; FileNodeIterator it = n.begin(), it_end = n.end(); for( ; it != it_end; ++it ) l.push_back((string)*it); return true; } int main(int argc, char** argv) { Size boardSize; string imagelistfn; bool showRectified = true; for( int i = 1; i < argc; i++ ) { if( string(argv[i]) == "-w" ) { if( sscanf(argv[++i], "%d", &boardSize.width) != 1 || boardSize.width <= 0 ) { cout << "invalid board width" << endl; return print_help(); } } else if( string(argv[i]) == "-h" ) { if( sscanf(argv[++i], "%d", &boardSize.height) != 1 || boardSize.height <= 0 ) { cout << "invalid board height" << endl; return print_help(); } } else if( string(argv[i]) == "-nr" ) showRectified = false; else if( string(argv[i]) == "--help" ) return print_help(); else if( argv[i][0] == '-' ) { cout << "invalid option " << argv[i] << endl; return 0; } else imagelistfn = argv[i]; } if( imagelistfn == "" ) { imagelistfn = "stereo_calib.xml"; boardSize = Size(9, 6); } else if( boardSize.width <= 0 || boardSize.height <= 0 ) { cout << "if you specified XML file with chessboards, you should also specify the board width and height (-w and -h options)" << endl; return 0; } vector<string> imagelist; bool ok = readStringList(imagelistfn, imagelist); if(!ok || imagelist.empty()) { cout << "can not open " << imagelistfn << " or the string list is empty" << endl; return print_help(); } StereoCalib(imagelist, boardSize, true, showRectified); return 0; }
得到內參和外參
%YAML:1.0
M1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.9118398778494270e+02, 0., 3.0200043006172234e+02, 0.,
4.5254392629023482e+02, 2.4429761654279827e+02, 0., 0., 1. ]
D1: !!opencv-matrix
rows: 1
cols: 12
dt: d
data: [ 6.5502968305794426e-02, -4.3702707841165300e-01, 0., 0., 0.,
0., 0., -7.3822045850612006e-01, 0., 0., 0., 0. ]
M2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 4.9118398778494270e+02, 0., 3.0251380762012855e+02, 0.,
4.5254392629023482e+02, 2.4325509154105887e+02, 0., 0., 1. ]
D2: !!opencv-matrix
rows: 1
cols: 12
dt: d
data: [ 1.1948494081208850e-03, -8.6262227111310374e-02, 0., 0., 0.,
0., 0., -1.6192883952372866e-01, 0., 0., 0., 0. ]
M1,M2–內參矩陣
D1,D2–畸變向量
攝像機內參矩陣:
Fx s x0
K= 0 Fy y0
0 0 1
fx fy 為焦距 x0 y0 主點座標(相對成像平面)s為座標傾斜引數(理想情況下為0)
1
D= { K1,K2,P1, P2{K3{K4,K5,K6}, { S1,S2,S3,S4} } }
k1,k2,k3,k4,k5,k6為徑向畸變,p1,p2為切向畸變
%YAML:1.0
R: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9884316762675907e-01, -4.8077480586275118e-02,
-9.3933264525424703e-04, 4.8079103838389509e-02,
9.9884192810661887e-01, 1.7895335563569564e-03,
8.5220856570500326e-04, -1.8326256377959298e-03,
9.9999795760983046e-01 ]
T: !!opencv-matrix
rows: 3
cols: 1
dt: d
data: [ -7.5570887376059744e+01, -1.0940390462549969e+00,
9.5613262991292036e-01 ]
R1: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9934373466630544e-01, -3.3587861035826658e-02,
-1.3563022312117781e-02, 3.3602237038357116e-02,
9.9943493791852078e-01, 8.3338695733287842e-04,
1.3527366677186979e-02, -1.2885879250515359e-03,
9.9990767068361885e-01 ]
R2: !!opencv-matrix
rows: 3
cols: 3
dt: d
data: [ 9.9981522140855861e-01, 1.4474315827705250e-02,
-1.2649791345115189e-02, -1.4487731769855497e-02,
9.9989457722534925e-01, -9.6956872107580310e-04,
1.2634423925127316e-02, 1.1526563494858851e-03,
9.9991951811904345e-01 ]
P1: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.4205452306772054e+02, 0., 3.1627860260009766e+02, 0., 0.,
4.4205452306772054e+02, 2.4280571746826172e+02, 0., 0., 0., 1.,
0. ]
P2: !!opencv-matrix
rows: 3
cols: 4
dt: d
data: [ 4.4205452306772054e+02, 0., 3.1627860260009766e+02,
-3.3412626514892305e+04, 0., 4.4205452306772054e+02,
2.4280571746826172e+02, 0., 0., 0., 1., 0. ]
Q: !!opencv-matrix
rows: 4
cols: 4
dt: d
data: [ 1., 0., 0., -3.1627860260009766e+02, 0., 1., 0.,
-2.4280571746826172e+02, 0., 0., 0., 4.4205452306772054e+02, 0.,
0., 1.3230163838532507e-02, 0. ]
extrinsic params外參
R–右相機相對左相機的旋轉矩陣
T–右相機相對左相機的平移矩陣
R1,R2–左右相機校準變換(旋轉)矩陣
P1,P2–左右相機在校準後坐標系中的投影矩陣
Q–視差-深度對映矩陣,我利用它來計算單個目標點的三維座標
---------------------
作者:Lary_Rock
來源:CSDN
原文:https://blog.csdn.net/ruidongren/article/details/79900259
版權宣告:本文為博主原創文章,轉載請附上博文連結!