1. 程式人生 > >ORB-SLAM2從理論到程式碼實現(八):Tracking.cc程式詳解(下)

ORB-SLAM2從理論到程式碼實現(八):Tracking.cc程式詳解(下)

本人郵箱[email protected],歡迎交流!

接著講tracking.cc。

  •  bool Tracking::NeedNewKeyFrame()
函式功能 判斷是否需要生成新的關鍵幀,確定關鍵幀的標準
步驟

1. 在上一次進行重定位之後,過了20幀資料,或關鍵幀數小於20個,不滿足不能生成

2. 在上一個關鍵幀插入之後,過了20幀,或區域性建圖是空閒狀態,不滿足不能生成。

3. 當前幀跟蹤到大於若干個點,不滿足不能生成

4. 當前幀的跟蹤點數小於90%的參考關鍵幀跟蹤點數,並且當前幀跟蹤點數大於15,不滿足不能生成

bool Tracking::NeedNewKeyFrame()
{
    // 步驟1:如果使用者在介面上選擇重定位,那麼將不插入關鍵幀
    // 由於插入關鍵幀過程中會生成MapPoint,因此使用者選擇重定位後地圖上的點雲和關鍵幀都不會再增加
    if(mbOnlyTracking)//如果僅跟蹤,不選關鍵幀
        return false;
    //If Local Mapping is freezed by a Loop Closure do not insert keyframes
    // 如果區域性地圖被閉環檢測使用,則不插入關鍵幀
    if(mpLocalMapper->isStopped() || mpLocalMapper->stopRequested())
        return false;
    const int nKFs = mpMap->KeyFramesInMap();//關鍵幀數

    // Do not insert keyframes if not enough frames have passed from last relocalisation
    // 步驟2:判斷是否距離上一次插入關鍵幀的時間太短
    // mCurrentFrame.mnId是當前幀的ID
    // mnLastRelocFrameId是最近一次重定位幀的ID
    // mMaxFrames等於影象輸入的幀率
    // 如果關鍵幀比較少,則考慮插入關鍵幀
    // 或距離上一次重定位超過1s,則考慮插入關鍵幀
    if(mCurrentFrame.mnId<mnLastRelocFrameId+mMaxFrames && nKFs>mMaxFrames)
        return false;

    // Tracked MapPoints in the reference keyframe
    // 步驟3:得到參考關鍵幀跟蹤到的MapPoints數量
    // 在UpdateLocalKeyFrames函式中會將與當前關鍵幀共視程度最高的關鍵幀設定為當前幀的參考關鍵幀

    int nMinObs = 3;
    if(nKFs<=2)
        nMinObs=2;
    int nRefMatches = mpReferenceKF->TrackedMapPoints(nMinObs);//獲取參考關鍵幀跟蹤到的MapPoints數量




    // Local Mapping accept keyframes?
    // 步驟4:查詢區域性地圖管理器是否繁忙
    bool bLocalMappingIdle = mpLocalMapper->AcceptKeyFrames();



    // Stereo & RGB-D: Ratio of close "matches to map"/"total matches"
    //雙目和RGBD:比率接近地圖匹配數/總匹配數
    // "total matches = matches to map + visual odometry matches"
    //總匹配數=地圖匹配數+視覺里程計匹配數
    // Visual odometry matches will become MapPoints if we insert a keyframe.
    // This ratio measures how many MapPoints we could create if we insert a keyframe.
    //這個比率測量如果我們插入一個關鍵幀,我們可以建立多少個MapPoints
    // 步驟5:對於雙目或RGBD攝像頭,統計總的可以新增的MapPoints數量和跟蹤到地圖中的MapPoints數量

    int nMap = 0;//地圖匹配數
    int nTotal= 0;//總匹配數
    if(mSensor!=System::MONOCULAR)// 雙目或rgbd
    {
       for(int i =0; i<mCurrentFrame.N; i++)//遍歷當前幀所有匹配點
        {
            if(mCurrentFrame.mvDepth[i]>0 && mCurrentFrame.mvDepth[i]<mThDepth)//map點的速度在合理範圍內
            {
                nTotal++;// 總的可以新增mappoints數
                if(mCurrentFrame.mvpMapPoints[i])
                    if(mCurrentFrame.mvpMapPoints[i]->Observations()>0)//mappoint能被觀測
                        nMap++;// 被關鍵幀觀測到的mappoints數,即觀測到地圖中的MapPoints數量
            }
        }
    }
    else
    {
        // There are no visual odometry matches in the monocular case
        nMap=1;
        nTotal=1;
    }
    const float ratioMap = (float)nMap/(float)(std::max(1,nTotal));
    // 步驟6:決策是否需要插入關鍵幀
    // Thresholds
    // 設定inlier閾值,和之前幀特徵點匹配的inlier比例
    float thRefRatio = 0.75f;
    if(nKFs<2)
        thRefRatio = 0.4f;// 關鍵幀只有一幀,那麼插入關鍵幀的閾值設定很低
    if(mSensor==System::MONOCULAR)
        thRefRatio = 0.9f;

    // MapPoints中和地圖關聯的比例閾值
    float thMapRatio = 0.35f;
    if(mnMatchesInliers>300)
        thMapRatio = 0.20f;

    // Condition 1a: More than "MaxFrames" have passed from last keyframe insertion
    // 很長時間沒有插入關鍵幀
    const bool c1a = mCurrentFrame.mnId>=mnLastKeyFrameId+mMaxFrames;
    // Condition 1b: More than "MinFrames" have passed and Local Mapping is idle
    // localMapper處於空閒狀態
    const bool c1b = (mCurrentFrame.mnId>=mnLastKeyFrameId+mMinFrames && bLocalMappingIdle);

    // Condition 1c: tracking is weak
    // 跟蹤要跪的節奏,0.25和0.3是一個比較低的閾值
    const bool c1c =  mSensor!=System::MONOCULAR && (mnMatchesInliers<nRefMatches*0.25 || ratioMap<0.3f) ;
    // Condition 2: Few tracked points compared to reference keyframe. Lots of visual odometry compared to map matches.
    // 閾值比c1c要高,與之前參考幀(最近的一個關鍵幀)重複度不是太高
    const bool c2 = ((mnMatchesInliers<nRefMatches*thRefRatio || ratioMap<thMapRatio) && mnMatchesInliers>15);

    if((c1a||c1b||c1c)&&c2)
    {
        // If the mapping accepts keyframes, insert keyframe.
        // Otherwise send a signal to interrupt BA
        //如果mapping接受關鍵幀,則插入關鍵幀,否則傳送訊號到中斷BA
        if(bLocalMappingIdle)
        {
           return true;
        }

        else
        {
            mpLocalMapper->InterruptBA();//中斷BA
            if(mSensor!=System::MONOCULAR)
            {
                // 佇列裡不能阻塞太多關鍵幀
                // tracking插入關鍵幀不是直接插入,而且先插入到mlNewKeyFrames中,
                // 然後localmapper再逐個pop出來插入到mspKeyFrames
                if(mpLocalMapper->KeyframesInQueue()<3)//佇列中關鍵幀小於3
                    return true;
                else
                    return false;
            }
            else
                return false;
        }
    }
    else
        return false;
}
  • void Tracking::CreateNewKeyFrame() 
函式功能 生成新的關鍵幀
步驟

1:將當前幀構造成關鍵幀

2:將當前關鍵幀設定為當前幀的參考關鍵幀

3:對於雙目或rgbd攝像頭,為當前幀生成新的MapPoints

void Tracking::CreateNewKeyFrame()
{
    if(!mpLocalMapper->SetNotStop(true))
        return;
    // 步驟1:將當前幀構造成關鍵幀
    KeyFrame* pKF = new KeyFrame(mCurrentFrame,mpMap,mpKeyFrameDB);
    // 步驟2:將當前關鍵幀設定為當前幀的參考關鍵幀
    // 在UpdateLocalKeyFrames函式中會將與當前關鍵幀共視程度最高的關鍵幀設定為當前幀的參考關鍵幀
    mpReferenceKF = pKF;
    mCurrentFrame.mpReferenceKF = pKF;
    // 這段程式碼和UpdateLastFrame中的那一部分程式碼功能相同
    // 步驟3:對於雙目或rgbd攝像頭,為當前幀生成新的MapPoints
    if(mSensor!=System::MONOCULAR)
    {
        // 根據Tcw計算mRcw、mtcw和mRwc、mOw
        mCurrentFrame.UpdatePoseMatrices();
        // We sort points by the measured depth by the stereo/RGBD sensor.
        // We create all those MapPoints whose depth < mThDepth.
        // If there are less than 100 close points we create the 100 closest.
        // 步驟3.1:得到當前幀深度小於閾值的特徵點
        // 建立新的MapPoint, depth < mThDepth
        vector<pair<float,int> > vDepthIdx;
        vDepthIdx.reserve(mCurrentFrame.N);
        for(int i=0; i<mCurrentFrame.N; i++)
        {
           float z = mCurrentFrame.mvDepth[i];
            if(z>0)
            {
                vDepthIdx.push_back(make_pair(z,i));
            }
        }

        if(!vDepthIdx.empty())
        {
            // 步驟3.2:按照深度從小到大排序
            sort(vDepthIdx.begin(),vDepthIdx.end());
            // 步驟3.3:將距離比較近的點包裝成MapPoints
            int nPoints = 0;
            for(size_t j=0; j<vDepthIdx.size();j++)
            {
               int i = vDepthIdx[j].second;
                bool bCreateNew = false;
                MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
                if(!pMP)
                   bCreateNew = true;
                else if(pMP->Observations()<1)
                {
                    bCreateNew = true;
                    mCurrentFrame.mvpMapPoints[i] = static_cast<MapPoint*>(NULL);
                }
                if(bCreateNew)
                {
                    cv::Mat x3D = mCurrentFrame.UnprojectStereo(i);
                    MapPoint* pNewMP = new MapPoint(x3D,pKF,mpMap);
                    // 這些新增屬性的操作是每次建立MapPoint後都要做的
                    pNewMP->AddObservation(pKF,i);
                    pKF->AddMapPoint(pNewMP,i);
                    pNewMP->ComputeDistinctiveDescriptors();
                    pNewMP->UpdateNormalAndDepth();
                    mpMap->AddMapPoint(pNewMP);
                    mCurrentFrame.mvpMapPoints[i]=pNewMP;
                    nPoints++;
                }
                else
                {
                    nPoints++;
                }
                // 這裡決定了雙目和rgbd攝像頭時地圖點雲的稠密程度
                // 但是僅僅為了讓地圖稠密直接改這些不太好,
                // 因為這些MapPoints會參與之後整個slam過程
                if(vDepthIdx[j].first>mThDepth && nPoints>100)
                    break;
            }
        }
    }
    mpLocalMapper->InsertKeyFrame(pKF);
    mpLocalMapper->SetNotStop(false);
    mnLastKeyFrameId = mCurrentFrame.mnId;
    mpLastKeyFrame = pKF;
}
  • void Tracking::SearchLocalPoints()
函式功能 在區域性地圖中查詢在當前幀視野範圍內的點,將視野範圍內的點和當前幀的特徵點進行投影匹配
步驟

1:遍歷當前幀的mvpMapPoints,標記這些MapPoints不參與之後的搜尋

2:將所有區域性MapPoints投影到當前幀,判斷是否在視野範圍內,然後進行投影匹配

3:對於雙目或rgbd攝像頭,為當前幀生成新的MapPoints

void Tracking::SearchLocalPoints()
{
    // Do not search map points already matched
    // 步驟1:遍歷當前幀的mvpMapPoints,標記這些MapPoints不參與之後的搜尋
    // 因為當前的mvpMapPoints一定在當前幀的視野中
    for(vector<MapPoint*>::iterator vit=mCurrentFrame.mvpMapPoints.begin(), vend=mCurrentFrame.mvpMapPoints.end(); vit!=vend; vit++)
    {
        MapPoint* pMP = *vit;
        if(pMP)
        {
            if(pMP->isBad())
            {
                *vit = static_cast<MapPoint*>(NULL);
            }
            else
            {
                // 更新能觀測到該點的幀數加1
                pMP->IncreaseVisible();
                // 標記該點被當前幀觀測到
                pMP->mnLastFrameSeen = mCurrentFrame.mnId;
                // 標記該點將來不被投影,因為已經匹配過
                pMP->mbTrackInView = false;
            }
        }
    }
    int nToMatch=0;
    // Project points in frame and check its visibility
    // 步驟2:將所有區域性MapPoints投影到當前幀,判斷是否在視野範圍內,然後進行投影匹配
    for(vector<MapPoint*>::iterator vit=mvpLocalMapPoints.begin(), vend=mvpLocalMapPoints.end(); vit!=vend; vit++)
    {
        MapPoint* pMP = *vit;
        // 已經被當前幀觀測到MapPoint不再判斷是否能被當前幀觀測到
        if(pMP->mnLastFrameSeen == mCurrentFrame.mnId)
            continue;
        if(pMP->isBad())
            continue;
        // Project (this fills MapPoint variables for matching)
        // 步驟2.1:判斷LocalMapPoints中的點是否在在視野內
        if(mCurrentFrame.isInFrustum(pMP,0.5))
        {
        	// 觀測到該點的幀數加1,該MapPoint在某些幀的視野範圍內
            pMP->IncreaseVisible();
            // 只有在視野範圍內的MapPoints才參與之後的投影匹配
            nToMatch++;
        }
    }
    if(nToMatch>0)
    {
        ORBmatcher matcher(0.8);
        int th = 1;
        if(mSensor==System::RGBD)
            th=3;
        // If the camera has been relocalised recently, perform a coarser search
        // 如果不久前進行過重定位,那麼進行一個更加寬泛的搜尋,閾值需要增大
        if(mCurrentFrame.mnId<mnLastRelocFrameId+2)
            th=5;
        // 步驟2.2:對視野範圍內的MapPoints通過投影進行特徵點匹配
        matcher.SearchByProjection(mCurrentFrame,mvpLocalMapPoints,th);
   }
}
/**
 * @brief 更新LocalMap
 *
 * 區域性地圖包括: \n
 * - K1個關鍵幀、K2個臨近關鍵幀和參考關鍵幀
 * - 由這些關鍵幀觀測到的MapPoints
 */
void Tracking::UpdateLocalMap()
{
    // This is for visualization
    // 這行程式放在UpdateLocalPoints函式後面是不是好一些
    mpMap->SetReferenceMapPoints(mvpLocalMapPoints);
    // Update
    // 更新區域性關鍵幀和區域性MapPoints
    UpdateLocalKeyFrames();
    UpdateLocalPoints();
}
/**
 * @brief 更新區域性關鍵點,called by UpdateLocalMap()
 * 
 * 區域性關鍵幀mvpLocalKeyFrames的MapPoints,更新mvpLocalMapPoints
 */
void Tracking::UpdateLocalPoints()
{
    // 步驟1:清空區域性MapPoints
    mvpLocalMapPoints.clear();
    // 步驟2:遍歷區域性關鍵幀mvpLocalKeyFrames
    for(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
    {
        KeyFrame* pKF = *itKF;
        const vector<MapPoint*> vpMPs = pKF->GetMapPointMatches();
        // 步驟2:將區域性關鍵幀的MapPoints新增到mvpLocalMapPoints
        for(vector<MapPoint*>::const_iterator itMP=vpMPs.begin(), itEndMP=vpMPs.end(); itMP!=itEndMP; itMP++)
        {
           MapPoint* pMP = *itMP;
            if(!pMP)
                continue;
            // mnTrackReferenceForFrame防止重複新增區域性MapPoint
            if(pMP->mnTrackReferenceForFrame==mCurrentFrame.mnId)
                continue;
            if(!pMP->isBad())
            {
                mvpLocalMapPoints.push_back(pMP);
                pMP->mnTrackReferenceForFrame=mCurrentFrame.mnId;
            }
        }
    }
}
/**
 * @brief 更新區域性關鍵幀,called by UpdateLocalMap()
 *
 * 遍歷當前幀的MapPoints,將觀測到這些MapPoints的關鍵幀和相鄰的關鍵幀取出,更新mvpLocalKeyFrames
 */
void Tracking::UpdateLocalKeyFrames()
{
    // Each map point vote for the keyframes in which it has been observed
    // 步驟1:遍歷當前幀的MapPoints,記錄所有能觀測到當前幀MapPoints的關鍵幀
    map<KeyFrame*,int> keyframeCounter;
    for(int i=0; i<mCurrentFrame.N; i++)
    {
        if(mCurrentFrame.mvpMapPoints[i])
        {
            MapPoint* pMP = mCurrentFrame.mvpMapPoints[i];
            if(!pMP->isBad())
            {
                // 能觀測到當前幀MapPoints的關鍵幀
                const map<KeyFrame*,size_t> observations = pMP->GetObservations();
                for(map<KeyFrame*,size_t>::const_iterator it=observations.begin(), itend=observations.end(); it!=itend; it++)
                    keyframeCounter[it->first]++;
            }
            else
            {
                mCurrentFrame.mvpMapPoints[i]=NULL;
            }
        }
    }
    if(keyframeCounter.empty())
        return;
    int max=0;
    KeyFrame* pKFmax= static_cast<KeyFrame*>(NULL);
    // 步驟2:更新區域性關鍵幀(mvpLocalKeyFrames),新增區域性關鍵幀有三個策略
    // 先清空區域性關鍵幀
    mvpLocalKeyFrames.clear();
    mvpLocalKeyFrames.reserve(3*keyframeCounter.size());
    // All keyframes that observe a map point are included in the local map. Also check which keyframe shares most points
    // V-D K1: shares the map points with current frame
    // 策略1:能觀測到當前幀MapPoints的關鍵幀作為區域性關鍵幀
    for(map<KeyFrame*,int>::const_iterator it=keyframeCounter.begin(), itEnd=keyframeCounter.end(); it!=itEnd; it++)
    {
        KeyFrame* pKF = it->first;
        if(pKF->isBad())
            continue;
        if(it->second>max)
        {
            max=it->second;
            pKFmax=pKF;
        }
        mvpLocalKeyFrames.push_back(it->first);
        // mnTrackReferenceForFrame防止重複新增區域性關鍵幀
        pKF->mnTrackReferenceForFrame = mCurrentFrame.mnId;
    }
    // Include also some not-already-included keyframes that are neighbors to already-included keyframes
    // V-D K2: neighbors to K1 in the covisibility graph
    // 策略2:與策略1得到的區域性關鍵幀共視程度很高的關鍵幀作為區域性關鍵幀
    for(vector<KeyFrame*>::const_iterator itKF=mvpLocalKeyFrames.begin(), itEndKF=mvpLocalKeyFrames.end(); itKF!=itEndKF; itKF++)
    {
        // Limit the number of keyframes
        if(mvpLocalKeyFrames.size()>80)
            break;
        KeyFrame* pKF = *itKF;
        // 策略2.1:最佳共視的10幀
        const vector<KeyFrame*> vNeighs = pKF->GetBestCovisibilityKeyFrames(10);
        for(vector<KeyFrame*>::const_iterator itNeighKF=vNeighs.begin(), itEndNeighKF=vNeighs.end(); itNeighKF!=itEndNeighKF; itNeighKF++)
        {
            KeyFrame* pNeighKF = *itNeighKF;
            if(!pNeighKF->isBad())
            {
                // mnTrackReferenceForFrame防止重複新增區域性關鍵幀
                if(pNeighKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
                {
                    mvpLocalKeyFrames.push_back(pNeighKF);
                    pNeighKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
                    break;
                }
            }
        }
        // 策略2.2:自己的子關鍵幀
        const set<KeyFrame*> spChilds = pKF->GetChilds();
        for(set<KeyFrame*>::const_iterator sit=spChilds.begin(), send=spChilds.end(); sit!=send; sit++)
        {
            KeyFrame* pChildKF = *sit;
            if(!pChildKF->isBad())
            {
                if(pChildKF->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
                {
                    mvpLocalKeyFrames.push_back(pChildKF);
                    pChildKF->mnTrackReferenceForFrame=mCurrentFrame.mnId;
                    break;
                }
            }
        }
        // 策略2.3:自己的父關鍵幀
        KeyFrame* pParent = pKF->GetParent();
        if(pParent)
        {
           // mnTrackReferenceForFrame防止重複新增區域性關鍵幀
           if(pParent->mnTrackReferenceForFrame!=mCurrentFrame.mnId)
           {
                mvpLocalKeyFrames.push_back(pParent);
                pParent->mnTrackReferenceForFrame=mCurrentFrame.mnId;
                break;
            }
        }
    }
    // V-D Kref: shares the most map points with current frame
    // 步驟3:更新當前幀的參考關鍵幀,與自己共視程度最高的關鍵幀作為參考關鍵幀
    if(pKFmax)
    {
        mpReferenceKF = pKFmax;
       mCurrentFrame.mpReferenceKF = mpReferenceKF;
    }
}
  •  bool Tracking::Relocalization()
函式功能 重定位,從之前的關鍵幀中找出與當前幀之間擁有充足匹配點的候選幀,利用Ransac迭代,通過PnP求解位姿。
步驟

1. 先計算當前幀的BOW值,並從關鍵幀資料庫中查詢候選的匹配關鍵幀

2. 構建PnP求解器,標記雜點,準備好每個關鍵幀和當前幀的匹配點集

3. 用PnP演算法求解位姿,進行若干次P4P Ransac迭代,並使用非線性最小二乘優化,直到發現一個有充足inliers支援的相機位置

4. 返回成功或失敗

bool Tracking::Relocalization()
{
    // Compute Bag of Words Vector
    // 步驟1:計算當前幀特徵點的Bow對映
    mCurrentFrame.ComputeBoW();
    // Relocalization is performed when tracking is lost當跟蹤丟失執行重定位
    // Track Lost: Query KeyFrame Database for keyframe candidates for relocalisation
    // 步驟2:找到與當前幀相似的候選關鍵幀
    vector<KeyFrame*> vpCandidateKFs = mpKeyFrameDB->DetectRelocalizationCandidates(&mCurrentFrame);
    if(vpCandidateKFs.empty())//如果沒找到候選關鍵幀,返回
        return false;
    const int nKFs = vpCandidateKFs.size();//候選關鍵幀個數
    // We perform first an ORB matching with each candidat
    // If enough matches are found we setup a PnP solver
    //我們首先執行與每個候選匹配的ORB匹配
    //如果找到足夠的匹配,我們設定一個PNP解算器
    ORBmatcher matcher(0.75,true);
    vector<PnPsolver*> vpPnPsolvers;
    vpPnPsolvers.resize(nKFs);
    vector<vector<MapPoint*> > vvpMapPointMatches;
    vvpMapPointMatches.resize(nKFs);
    vector<bool> vbDiscarded;
    vbDiscarded.resize(nKFs);
    int nCandidates=0;
    for(int i=0; i<nKFs; i++)
    {
        KeyFrame* pKF = vpCandidateKFs[i];
        if(pKF->isBad())
            vbDiscarded[i] = true;//去除不好的候選關鍵幀
        else
        {
            // 步驟3:通過BoW進行匹配
            int nmatches = matcher.SearchByBoW(pKF,mCurrentFrame,vvpMapPointMatches[i]);
            if(nmatches<15)//如果匹配點小於15剔除
            {
                vbDiscarded[i] = true;
                continue;
            }
            else//用pnp求解
            {
                // 初始化PnPsolver
                PnPsolver* pSolver = new PnPsolver(mCurrentFrame,vvpMapPointMatches[i]);
                pSolver->SetRansacParameters(0.99,10,300,4,0.5,5.991);
                vpPnPsolvers[i] = pSolver;
                nCandidates++;
            }
        }
    }
    // Alternatively perform some iterations of P4P RANSAC可選地執行P4P RANSAC的一些迭代
    // Until we found a camera pose supported by enough inliers直到早到符合很多內點的相機位置
    bool bMatch = false;
    ORBmatcher matcher2(0.9,true);
    while(nCandidates>0 && !bMatch)
    {
        for(int i=0; i<nKFs; i++)
        {
            if(vbDiscarded[i])
                continue;

            // Perform 5 Ransac Iterations
            vector<bool> vbInliers;
            int nInliers;
            bool bNoMore;

            // 步驟4:通過EPnP演算法估計姿態
            PnPsolver* pSolver = vpPnPsolvers[i];
            cv::Mat Tcw = pSolver->iterate(5,bNoMore,vbInliers,nInliers);

            // If Ransac reachs max. iterations discard keyframe
            if(bNoMore)
            {
                vbDiscarded[i]=true;
                nCandidates--;
            }



            // If a Camera Pose is computed, optimize
            if(!Tcw.empty())
            {
                Tcw.copyTo(mCurrentFrame.mTcw);
                set<MapPoint*> sFound;
                const int np = vbInliers.size();//內點個數

                for(int j=0; j<np; j++)
                {
                    if(vbInliers[j])
                    {
                        mCurrentFrame.mvpMapPoints[j]=vvpMapPointMatches[i][j];
                        sFound.insert(vvpMapPointMatches[i][j]);
                    }
                    else
                        mCurrentFrame.mvpMapPoints[j]=NULL;
                }

                // 步驟5:通過PoseOptimization對姿態進行優化求解

                int nGood = Optimizer::PoseOptimization(&mCurrentFrame);
                if(nGood<10)
                    continue;
                for(int io =0; io<mCurrentFrame.N; io++)
                    if(mCurrentFrame.mvbOutlier[io])
                        mCurrentFrame.mvpMapPoints[io]=static_cast<MapPoint*>(NULL);

                // If few inliers, search by projection in a coarse window and optimize again
                // 步驟6:如果內點較少,則通過投影的方式對之前未匹配的點進行匹配,再進行優化求解
                if(nGood<50)
                {
                   int nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,10,100);
                    if(nadditional+nGood>=50)
                    {
                        nGood = Optimizer::PoseOptimization(&mCurrentFrame);//優化

                        // If many inliers but still not enough, search by projection again in a narrower window
//如果許多內點仍然不夠,則在較窄的視窗中再次用投影搜尋
                        // the camera has been already optimized with many points
                        if(nGood>30 && nGood<50)
                        {
                            sFound.clear();
                            for(int ip =0; ip<mCurrentFrame.N; ip++)
                                if(mCurrentFrame.mvpMapPoints[ip])
                                    sFound.insert(mCurrentFrame.mvpMapPoints[ip]);
                            nadditional =matcher2.SearchByProjection(mCurrentFrame,vpCandidateKFs[i],sFound,3,64);
                            // Final optimization
                            if(nGood+nadditional>=50)
                            {
                                nGood = Optimizer::PoseOptimization(&mCurrentFrame);
                                for(int io =0; io<mCurrentFrame.N; io++)
                                    if(mCurrentFrame.mvbOutlier[io])
                                       mCurrentFrame.mvpMapPoints[io]=NULL;
                            }
                        }
                    }
                }

                // If the pose is supported by enough inliers stop ransacs and continue
                if(nGood>=50)
                {
                    bMatch = true;
                    break;
                }
            }
        }
    }

    if(!bMatch)
    {
        return false;
    }
    else
    {
        mnLastRelocFrameId = mCurrentFrame.mnId;
        return true;
    }
}