1. 程式人生 > >非線性最小二乘之Guass-Newton和Levenberg-Marquardt

非線性最小二乘之Guass-Newton和Levenberg-Marquardt

直接給出實現過程,主要參考類高翔博士的《SLAM十四講》

本文中,使用到以下資料,函式模型為y = a*e^(b*t),殘差函式為r = a*e^(b*t) - y,代價函式fx=0.5*r^2

    double t[8] = {1, 2, 3, 4, 5, 6, 7, 8}; //變數
    double y[8] = {8.3, 11.0, 14.7, 19.7, 26.7, 35.2, 44.4, 55.9};  //觀測值

對模型函式的a和b分別求偏導

//the derivative for a of model function
double Jacobian_a(double ti, double a, double b)
{
    return exp(b*ti);
}

//the derivative for b of model function
double Jacobian_b(double ti, double a, double b)
{
    return a*ti*exp(b*ti);
}

高斯牛頓法

#include <iostream>
#include <vector>
#include <cmath>
#include <Eigen/Dense>

using namespace Eigen;
using namespace std;

int main() {
    std::cout << "Guassian-Newton iteration method" << std::endl;

    //初始化引數
    int N = 8;  //N組資料
    double delta=0.05; //步長閾值,當步長小於該值時停止迭代
    double t[8] = {1, 2, 3, 4, 5, 6, 7, 8}; //變數
    double y[8] = {8.3, 11.0, 14.7, 19.7, 26.7, 35.2, 44.4, 55.9};  //觀測值


    int iterMax=5; //最大迭代次數
    double a=6., b=0.3; //初始化引數a和b
    double fx=0;
    for (int k = 0; k < iterMax; ++k)
    {
        std::cout <<" ============================" << std::endl;
        std::cout << k<<" iter !" << std::endl;

        //計算fx和殘差
        fx=0;
        VectorXd r(8);
        for(int i=0; i<N; i++){
            double ri = a*exp(b*t[i]) - y[i];
            fx +=0.5*ri*ri;
            r(i) = ri;
        }
        cout<<"r = \n"<<r<<endl<<endl;
        cout<<"fx = "<<fx<<endl<<endl;

        //計算Jacobian矩陣
        MatrixXd JacobMat(8,2);
        for(int i=0; i<N; i++){
            JacobMat(i,0) = Jacobian_a(t[i], a, b);
            JacobMat(i,1) = Jacobian_b(t[i], a, b);
        }
        cout<<"JacobMat\n"<<JacobMat<<endl;

        Matrix2d JTJ = JacobMat.transpose()*JacobMat;
        cout<<"\nJTJ\n"<<JTJ<<endl;

        MatrixXd B = -JacobMat.transpose()*r;
        cout<<"\nB\n"<<B<<endl;

        //構建線性方程組並求解 ‘JTJ*x1 = B’
        Vector2d x1;
        x1 = JTJ.colPivHouseholderQr().solve(B);
        //x1 = JTJ.llt().solve(B);
        //x1 = JTJ.ldlt().solve(B);
        cout<<"\nx1\n"<<x1<<endl;

        double step_norm = x1.norm();   //步長
        cout<<"\nstep_size is "<<step_norm<<endl;

        if(step_norm<delta){
            std::cout <<k+1<<"  步長小於閾值,收斂,停止迭代 !" << std::endl;
            break;
        } else{
            a += x1(0);
            b += x1(1);
            cout<<"\nupdate 'a' and 'b'\n\ta is "<<a<<"; b is "<<b<<endl;

            fx=0;
            for(int i=0; i<N; i++){
                double ri = a*exp(b*t[i]) - y[i];
                fx +=0.5*ri*ri;
                r(i) = ri;
            }
            cout<<"\nupdate fx = "<<fx<<endl<<endl;

            if(k==N-1)
                std::cout <<k<<" 迭代次數大於 iterMax !" << std::endl;
        }
    }

    fx=0;
    for(int i=0; i<N; i++){
        double ri = a*exp(b*t[i]) - y[i];
        fx +=0.5*ri*ri;
    }
    cout<<"\nLastly 'a' and 'b'\n\ta is "<<a<<"; b is "<<b<<endl;
    cout<<"fx is "<<fx<<endl;

    return 0;
}

執行程式碼,迭代了三次後收斂,優化前後對比結果如下

//初始值
a is 6.;    b is 0.3;   fx is 63.6547
//優化之後
a is 7.0016;    b is 0.262038;  fx is 3.00657

列文伯格-馬夸爾特方法

#include <iostream>
#include <vector>
#include <cmath>
#include <Eigen/Core>
#include <Eigen/Dense>

using namespace Eigen;
using namespace std;

int main() {
    std::cout << "Levenberg-Marquardt iteration method" << std::endl;
    int N = 8;
    double delta_step = 0.05;//判斷收斂的步長的閾值
    double rho_delta = 0.25; //判斷此次結算出的步長下,代價函式實際下降和近似下降的相似程度,大於該閾值時才進行更新
    double miu=0.1;  //步長的信賴域
    double lamda =1.;//拉格朗日乘子
    double pk_norm=0, pk_norm_last=0.01;//求解出的引數的變化量的模
    double t[8] = {1, 2, 3, 4, 5, 6, 7, 8};
    double y[8] = {8.3, 11.0, 14.7, 19.7, 26.7, 35.2, 44.4, 55.9};

    //初始化引數a和b
    double a=6., b=0.3;
    double fx=0, fx_update=0;
    int iterMax = 5;
    double rho=0.;
    for (int k = 0; k < iterMax; ++k)
    {
        std::cout <<" ============================" << std::endl;
        std::cout << k<<" iter !" << std::endl;

        int while_cnt=0;
        Vector2d x2;
        while(1) {
            fx=0;
            VectorXd r(8);
            for(int i=0; i<N; i++){
                double ri = a*exp(b*t[i]) - y[i];
                fx +=0.5*ri*ri;
                r(i,0) = ri;
            }
            //求fx和殘差
            cout<<"r = \n"<<r<<endl<<endl;
            cout<<"fx = "<<fx<<endl<<endl;

            //求雅克比矩陣
            MatrixXd Jacobian(8,2);
            for(int i=0; i<N; i++){
                double Jai = Jacobian_a(t[i], a, b);
                double Jbi = Jacobian_b(t[i], a, b);
                Jacobian(i,0) = Jai;
                Jacobian(i,1) = Jbi;
            }
            cout<<Jacobian<<endl;

            Matrix2d JTJ = Jacobian.transpose()*Jacobian;
            Matrix2d A = JTJ + lamda*MatrixXd::Identity(2,2);
            MatrixXd B = -Jacobian.transpose()*r;

            //構建線性方程組,並求解 'A*x2 = B'
            x2 = A.colPivHouseholderQr().solve(B);
            pk_norm = x2.norm();
            cout<<"\nstep_size is "<<pk_norm<<endl;

            //計算 rho
            fx_update=0;
            for(int i=0; i<N; i++){
                double r0 = (a+x2(0)) * exp((b+x2(1))*t[i]) - y[i];
                fx_update +=0.5*r0*r0;
            }
            //1. 計算rho的分母,二階
            double L_bias = 0.5*x2.transpose()*(lamda*x2+B);
            //2. 計算rho的分母,一階
            //L_bias = -0.5*r.transpose()*Jacobian*x2;
            rho = (fx-fx_update)/L_bias;

            cout<<"fx delta is "<<(fx-fx_update)<<endl;
            cout<<"L_bias is "<<L_bias<<endl;
            cout<<"rho is "<<rho<<endl;

            //更新miu值
            if(rho>0.75){
                miu*=2.;
            }else if(rho<0.25){
                miu*=0.5;
            }

            std::cout<<"\nwhile_cnt is "<<while_cnt<<"  "<<x2<<"  "<miu<<"\n";
            //rho = model function的實際下降 / phi function的近似下降、
            // 當rho大於閾值rho_delta時,認為實際下降和近似下降近似,該擬合可行
            // 並且步長在信賴區域內
            if ((rho>0.25) && (pk_norm<miu))
                break;

            while_cnt++;
            if(while_cnt>10){
                std::cout<<"陷入步長小於設定的閾值,重新選擇步長閾值\n";
                exit(0);
            }
        }

        //更新lamda,不大於1
        if(pk_norm>pk_norm_last)
            lamda *= 0.1;
        else
            lamda = lamda*2 > 1 ? 1. : lamda*2;
        pk_norm_last = pk_norm;

        cout<<"\nfx = "<<fx<<endl;
        cout<<"\nupdate fx = "<<fx_update<<endl;

        a += x2(0);
        b += x2(1);

        //判斷是否收斂
        if(pk_norm<delta_step){
            cout<<"步長很小,收斂並退出"<<endl;
            cout<<"lamda is "<<lamda<<endl;
            break;
        }
        if(k==N-1)
            std::cout <<k<<" 迭代次數大於 iterMax !" << std::endl;
    }

    fx=0;
    for(int i=0; i<N; i++){
        double ri = a*exp(b*t[i]) - y[i];
        fx +=0.5*ri*ri;
    }
    cout<<"\nLastly 'a' and 'b'\n\ta is "<<a<<"; b is "<<b<<endl;
    cout<<"fx is "<<fx<<endl;

    return 0;
}

同樣迭代三次之後收斂

//初始值
a is 6.;    b is 0.3;   fx is 63.6547
//優化之後
a is 7.00008; b is 0.262078; fx is 3.00654

小結
程式碼是參考了很多資料,結合自己的認識寫的,有不同想法的歡迎交流[email protected]

引數miu應該是和lamda有聯絡的,從而改變步長,使其在信賴區域內,但是十四講中並未涉及到,在參考資料3中有講解

高斯牛頓法其實是用Jacobian^T*Jacobian近似Hessian矩陣,節省了計算,但是在計算中Jacobian^T*Jacobian只有半正定性,可能計算不出正確的結果,導致步長太大或者區域性不夠準確。 
列文伯格-馬夸爾特方法中當lamda比較小的時候,說明二次近似模型在該區域內是比較好的,更接近與高斯牛頓法;但是當lamda比較大的時候,更接近於一階梯度下降法。它在一定程度上避免了現行方程組的係數矩陣的非奇異和病態問題,給出更好的解。

參考資料
高翔 《SLAM十四講》
Alfonso Croeze《solving nonlinear least squares problems with the gauss-newton and levenberg-marquardt method》
K.Madsen《methods for non-linear least squares probles》
轉:https://blog.csdn.net/qq_31806429/article/details/82667779