SLAM從入門到放棄:SLAM十四講第八章習題(4)
阿新 • • 發佈:2019-01-02
以下均為簡單筆記,如有錯誤,請多多指教。
- 使用Ceres實現RGB-D上稀疏直接法和半稠密直接法。
答:由於稀疏直接法和半稠密直接法並沒有本質區別,所以此處只提供了稀疏直接法的計算結果。我的實驗結果發現,在同樣的資料集上g2o和Ceres的結果似乎不太一樣,目前還不太清楚為啥,也許程式碼寫錯了~~。不過我分別嘗試了在SE3和旋轉向量上分別進行了計算,兩者的結果比較接近,但就是和g2o的不一樣。
基於軸角的方法class SE3Parameterization : public ceres::LocalParameterization { public: SE3Parameterization() {} virtual ~SE3Parameterization() {} virtual bool Plus(const double* x, const double* delta, double* x_plus_delta) const { Eigen::Map<const Eigen::Matrix<double, 6, 1>> lie(x); Eigen::Map<const Eigen::Matrix<double, 6, 1>> delta_lie(delta); Sophus::SE3 T = Sophus::SE3::exp(lie); Sophus::SE3 delta_T = Sophus::SE3::exp(delta_lie); Eigen::Matrix<double, 6, 1> x_plus_delta_lie = (delta_T * T).log(); for(int i = 0; i < 6; ++i) x_plus_delta[i] = x_plus_delta_lie(i, 0); return true; } virtual bool ComputeJacobian(const double* x, double* jacobian) const { ceres::MatrixRef(jacobian, 6, 6) = ceres::Matrix::Identity(6, 6); return true; } virtual int GlobalSize() const { return 6; } virtual int LocalSize() const { return 6; } }; class SparseBA : public ceres::SizedCostFunction<1,6> { public: cv::Mat *gray_; double cx_,cy_; double fx_,fy_; double pixelValue_; Eigen::Vector3d point_; SparseBA(cv::Mat *gray, double cx,double cy, double fx,double fy, Eigen::Vector3d point,double pixelValue) { gray_ = gray; cx_ = cx; cy_ = cy; fx_ = fx; fy_ = fy; point_ = point; pixelValue_ = pixelValue; } virtual bool Evaluate(double const* const* pose, double *residual, double **jacobians) const { Eigen::Map<const Eigen::Matrix<double, 6, 1>> lie(pose[0]); Sophus::SE3 T = Sophus::SE3::exp(lie); Eigen::Vector3d newPoint = T*point_; double x = newPoint(0); double y = newPoint(1); double z = newPoint(2); // Project double ux = fx_*x/z+cx_; double uy = fy_*y/z+cy_; residual[0] = getPixelValue(ux,uy) - pixelValue_; if(jacobians) { double invz = 1.0/z; double invz_2 = invz*invz; // jacobian from se3 to u,v // NOTE that in g2o the Lie algebra is (\omega, \epsilon), where \omega is so(3) and \epsilon the translation Eigen::Matrix<double, 2, 6> jacobian_uv_ksai; // 注意順序 jacobian_uv_ksai ( 0,3 ) = - x*y*invz_2 *fx_; jacobian_uv_ksai ( 0,4 ) = ( 1+ ( x*x*invz_2 ) ) *fx_; jacobian_uv_ksai ( 0,5 ) = - y*invz *fx_; jacobian_uv_ksai ( 0,0 ) = invz *fx_; jacobian_uv_ksai ( 0,1 ) = 0; jacobian_uv_ksai ( 0,2 ) = -x*invz_2 *fx_; jacobian_uv_ksai ( 1,3 ) = - ( 1+y*y*invz_2 ) *fy_; jacobian_uv_ksai ( 1,4 ) = x*y*invz_2 *fy_; jacobian_uv_ksai ( 1,5 ) = x*invz *fy_; jacobian_uv_ksai ( 1,0 ) = 0; jacobian_uv_ksai ( 1,1 ) = invz *fy_; jacobian_uv_ksai ( 1,2 ) = -y*invz_2 *fy_; Eigen::Matrix<double, 1, 2> jacobian_pixel_uv; jacobian_pixel_uv ( 0,0 ) = ( getPixelValue ( ux+1,uy )-getPixelValue ( ux-1,uy ) ) /2; jacobian_pixel_uv ( 0,1 ) = ( getPixelValue ( ux,uy+1 )-getPixelValue ( ux,uy-1 ) ) /2; Eigen::Matrix<double, 1, 6> jacobian = jacobian_pixel_uv*jacobian_uv_ksai; jacobians[0][0] = jacobian(0); jacobians[0][1] = jacobian(1); jacobians[0][2] = jacobian(2); jacobians[0][3] = jacobian(3); jacobians[0][4] = jacobian(4); jacobians[0][5] = jacobian(5); } return true; } // get a gray scale value from reference image (bilinear interpolated) double getPixelValue ( double x, double y ) const { uchar* data = & gray_->data[ int ( y ) * gray_->step + int ( x ) ]; double xx = x - floor ( x ); double yy = y - floor ( y ); return double ( ( 1-xx ) * ( 1-yy ) * data[0] + xx* ( 1-yy ) * data[1] + ( 1-xx ) *yy*data[ gray_->step ] + xx*yy*data[gray_->step+1] ); } }; bool poseEstimationDirectCeres ( const vector< Measurement >& measurements, cv::Mat* gray, Eigen::Matrix3f& K, Eigen::Isometry3d& Tcw ) { ceres::Problem problem; double pose[6]; Sophus::SE3 poseSE(Tcw.rotation(),Tcw.translation()); Eigen::Matrix<double,6,1> poseVec = poseSE.log(); pose[0] = poseVec(0); pose[1] = poseVec(1); pose[2] = poseVec(2); pose[3] = poseVec(3); pose[4] = poseVec(4); pose[5] = poseVec(5); // 新增 for ( Measurement m: measurements ) { ceres::CostFunction *costFunction = new SparseBA(gray,K(0,2),K(1,2),K(0,0),K(1,1), m.pos_world, double(m.grayscale) ); problem.AddResidualBlock(costFunction,NULL,pose); } problem.SetParameterization(pose, new SE3Parameterization()); ceres::Solver::Options options; options.linear_solver_type = ceres::DENSE_QR; ceres::Solver::Summary summary; ceres::Solve(options,&problem,&summary); // Update poseVec(0) = pose[0]; poseVec(1) = pose[1]; poseVec(2) = pose[2]; poseVec(3) = pose[3]; poseVec(4) = pose[4]; poseVec(5) = pose[5]; Tcw = Sophus::SE3::exp(poseVec).matrix(); }
class SparseBA : public ceres::SizedCostFunction<1,6> { public: cv::Mat *gray_; double cx_,cy_; double fx_,fy_; double pixelValue_; double X_,Y_,Z_; SparseBA(cv::Mat *gray, double cx,double cy, double fx,double fy, double X,double Y,double Z,double pixelValue) { gray_ = gray; cx_ = cx; cy_ = cy; fx_ = fx; fy_ = fy; X_ = X; Y_ = Y; Z_ = Z; pixelValue_ = pixelValue; } virtual bool Evaluate(double const* const* pose, double *residual, double **jacobians) const { double P[3]; P[0] = X_; P[1] = Y_; P[2] = Z_; double newP[3]; double R[3]; R[0] = pose[0][0]; R[1]=pose[0][1]; R[2]=pose[0][2]; ceres::AngleAxisRotatePoint(R,P,newP); newP[0] += pose[0][3]; newP[1] += pose[0][4]; newP[2] += pose[0][5]; // Project double ux = fx_*newP[0]/newP[2]+cx_; double uy = fy_*newP[1]/newP[2]+cy_; residual[0] = getPixelValue(ux,uy) - pixelValue_; if(jacobians) { double invz = 1.0/newP[2]; double invz_2 = invz*invz; // jacobian from se3 to u,v // NOTE that in g2o the Lie algebra is (\omega, \epsilon), where \omega is so(3) and \epsilon the translation Eigen::Matrix<double, 2, 6> jacobian_uv_ksai; jacobian_uv_ksai ( 0,0 ) = - newP[0]*newP[1]*invz_2 *fx_; jacobian_uv_ksai ( 0,1 ) = ( 1+ ( newP[0]*newP[0]*invz_2 ) ) *fx_; jacobian_uv_ksai ( 0,2 ) = - newP[1]*invz *fx_; jacobian_uv_ksai ( 0,3 ) = invz *fx_; jacobian_uv_ksai ( 0,4 ) = 0; jacobian_uv_ksai ( 0,5 ) = -newP[0]*invz_2 *fx_; jacobian_uv_ksai ( 1,0 ) = - ( 1+newP[1]*newP[1]*invz_2 ) *fy_; jacobian_uv_ksai ( 1,1 ) = newP[0]*newP[1]*invz_2 *fy_; jacobian_uv_ksai ( 1,2 ) = newP[0]*invz *fy_; jacobian_uv_ksai ( 1,3 ) = 0; jacobian_uv_ksai ( 1,4 ) = invz *fy_; jacobian_uv_ksai ( 1,5 ) = -newP[1]*invz_2 *fy_; Eigen::Matrix<double, 1, 2> jacobian_pixel_uv; jacobian_pixel_uv ( 0,0 ) = ( getPixelValue ( ux+1,uy )-getPixelValue ( ux-1,uy ) ) /2; jacobian_pixel_uv ( 0,1 ) = ( getPixelValue ( ux,uy+1 )-getPixelValue ( ux,uy-1 ) ) /2; Eigen::Matrix<double, 1, 6> jacobian = jacobian_pixel_uv*jacobian_uv_ksai; jacobians[0][0] = jacobian(0); jacobians[0][1] = jacobian(1); jacobians[0][2] = jacobian(2); jacobians[0][3] = jacobian(3); jacobians[0][4] = jacobian(4); jacobians[0][5] = jacobian(5); } return true; } // get a gray scale value from reference image (bilinear interpolated) double getPixelValue ( double x, double y ) const { uchar* data = & gray_->data[ int ( y ) * gray_->step + int ( x ) ]; double xx = x - floor ( x ); double yy = y - floor ( y ); return double ( ( 1-xx ) * ( 1-yy ) * data[0] + xx* ( 1-yy ) * data[1] + ( 1-xx ) *yy*data[ gray_->step ] + xx*yy*data[gray_->step+1] ); } }; bool poseEstimationDirectCeres ( const vector< Measurement >& measurements, cv::Mat* gray, Eigen::Matrix3f& K, Eigen::Isometry3d& Tcw ) { ceres::Problem problem; double pose[6]; Eigen::AngleAxisd rotationVector(Tcw.rotation()); pose[0] = rotationVector.angle()*rotationVector.axis()(0); pose[1] = rotationVector.angle()*rotationVector.axis()(1); pose[2] = rotationVector.angle()*rotationVector.axis()(2); pose[3] = Tcw.translation()(0); pose[4] = Tcw.translation()(1); pose[5] = Tcw.translation()(2); // 新增 for ( Measurement m: measurements ) { ceres::CostFunction *costFunction = new SparseBA(gray,K(0,2),K(1,2),K(0,0),K(1,1), m.pos_world(0),m.pos_world(1),m.pos_world(2), double(m.grayscale) ); problem.AddResidualBlock(costFunction,NULL,pose); } ceres::Solver::Options options; options.linear_solver_type = ceres::DENSE_QR; ceres::Solver::Summary summary; ceres::Solve(options,&problem,&summary); // Update cv::Mat rotateVectorCV = cv::Mat::zeros(3,1,CV_64FC1); rotateVectorCV.at<double>(0) = pose[0]; rotateVectorCV.at<double>(1) = pose[1]; rotateVectorCV.at<double>(2) = pose[2]; cv::Mat RCV; cv::Rodrigues(rotateVectorCV,RCV); Tcw(0,0) = RCV.at<double>(0,0); Tcw(0,1) = RCV.at<double>(0,1); Tcw(0,2) = RCV.at<double>(0,2); Tcw(1,0) = RCV.at<double>(1,0); Tcw(1,1) = RCV.at<double>(1,1); Tcw(1,2) = RCV.at<double>(1,2); Tcw(2,0) = RCV.at<double>(2,0); Tcw(2,1) = RCV.at<double>(2,1); Tcw(2,2) = RCV.at<double>(2,2); Tcw(0,3) = pose[3]; Tcw(1,3) = pose[4]; Tcw(2,3) = pose[5]; }