1. 程式人生 > >faster_rcnn c++版本的 caffe 封裝(1)

faster_rcnn c++版本的 caffe 封裝(1)

由於需要把FasterRCNN做的工程化,因此這裡需要對Caffe進行封裝。其實封裝聽起來感覺很高深的樣子,其實就是將自己在caffe上再呼叫的介面做成一個動態庫,同時將Caffe的庫連著Caffe的那些庫依賴一起做成自己工程的庫依賴就可以了。如果你只是直接使用Caffe的話,那麼到時候直接連結到caffe下面build目錄中的libcaffe.so或者libcaffe.a就可以了。如果有對Caffe有C++程式碼的改動,操作也是一樣的,但是如果用了Python 模組比如使用了Python Layer,那麼在使用中,還需要為Caffe指定其Python模組的位置。

#include <stdio.h>  // for snprintf
#include <string>
#include <vector>
#include <math.h>
#include <fstream>
#include <boost/python.hpp>
#include "caffe/caffe.hpp"
#include "gpu_nms.hpp"
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
using namespace caffe;
using namespace std;

#define max(a, b) (((a)>(b)) ? (a) :(b))
#define min(a, b) (((a)<(b)) ? (a) :(b))
const int class_num=2;

/*
 * ===  Class  ======================================================================
 *         Name:  Detector
 *  Description:  FasterRCNN CXX Detector
 * =====================================================================================
 */
class Detector {
public:
    Detector(const string& model_file, const string& weights_file);
    void Detection(const string& im_name);
    void bbox_transform_inv(const int num, const float* box_deltas, const float* pred_cls, float* boxes, float* pred, int img_height, int img_width);
    void vis_detections(cv::Mat image, int* keep, int num_out, float* sorted_pred_cls, float CONF_THRESH);
    void boxes_sort(int num, const float* pred, float* sorted_pred);
private:
    shared_ptr<Net<float> > net_;
    Detector(){}
};

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  Detector
 *  Description:  Load the model file and weights file
 * =====================================================================================
 */
//load modelfile and weights
Detector::Detector(const string& model_file, const string& weights_file)
{
    net_ = shared_ptr<Net<float> >(new Net<float>(model_file, caffe::TEST));
    net_->CopyTrainedLayersFrom(weights_file);
}


//Using for box sort
struct Info
{
    float score;
    const float* head;
};
bool compare(const Info& Info1, const Info& Info2)
{
    return Info1.score > Info2.score;
}

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  Detect
 *  Description:  Perform detection operation
 *                 Warning the max input size should less than 1000*600
 * =====================================================================================
 */
//perform detection operation
//input image max size 1000*600
void Detector::Detection(const string& im_name)
{
    float CONF_THRESH = 0.8;
    float NMS_THRESH = 0.3;
    const int  max_input_side=1000;
    const int  min_input_side=600;

    cv::Mat cv_img = cv::imread(im_name);
    cv::Mat cv_new(cv_img.rows, cv_img.cols, CV_32FC3, cv::Scalar(0,0,0));
    if(cv_img.empty())
    {
        std::cout<<"Can not get the image file !"<<endl;
        return ;
    }
    int max_side = max(cv_img.rows, cv_img.cols);
    int min_side = min(cv_img.rows, cv_img.cols);

    float max_side_scale = float(max_side) / float(max_input_side);
    float min_side_scale = float(min_side) /float( min_input_side);
    float max_scale=max(max_side_scale, min_side_scale);

    float img_scale = 1;

    if(max_scale > 1)
    {
        img_scale = float(1) / max_scale;
    }

    int height = int(cv_img.rows * img_scale);
    int width = int(cv_img.cols * img_scale);
    int num_out;
    cv::Mat cv_resized;

    std::cout<<"imagename "<<im_name<<endl;
    float im_info[3];
    float data_buf[height*width*3];
    float *boxes = NULL;
    float *pred = NULL;
    float *pred_per_class = NULL;
    float *sorted_pred_cls = NULL;
    int *keep = NULL;
    const float* bbox_delt;
    const float* rois;
    const float* pred_cls;
    int num;

    for (int h = 0; h < cv_img.rows; ++h )
    {
        for (int w = 0; w < cv_img.cols; ++w)
        {
            cv_new.at<cv::Vec3f>(cv::Point(w, h))[0] = float(cv_img.at<cv::Vec3b>(cv::Point(w, h))[0])-float(102.9801);
            cv_new.at<cv::Vec3f>(cv::Point(w, h))[1] = float(cv_img.at<cv::Vec3b>(cv::Point(w, h))[1])-float(115.9465);
            cv_new.at<cv::Vec3f>(cv::Point(w, h))[2] = float(cv_img.at<cv::Vec3b>(cv::Point(w, h))[2])-float(122.7717);

        }
    }

    cv::resize(cv_new, cv_resized, cv::Size(width, height));
    im_info[0] = cv_resized.rows;
    im_info[1] = cv_resized.cols;
    im_info[2] = img_scale;

    for (int h = 0; h < height; ++h )
    {
        for (int w = 0; w < width; ++w)
        {
            data_buf[(0*height+h)*width+w] = float(cv_resized.at<cv::Vec3f>(cv::Point(w, h))[0]);
            data_buf[(1*height+h)*width+w] = float(cv_resized.at<cv::Vec3f>(cv::Point(w, h))[1]);
            data_buf[(2*height+h)*width+w] = float(cv_resized.at<cv::Vec3f>(cv::Point(w, h))[2]);
        }
    }

    net_->blob_by_name("data")->Reshape(1, 3, height, width);
    net_->blob_by_name("data")->set_cpu_data(data_buf);
    net_->blob_by_name("im_info")->set_cpu_data(im_info);
    net_->ForwardFrom(0);
    bbox_delt = net_->blob_by_name("bbox_pred")->cpu_data();
    num = net_->blob_by_name("rois")->num();


    rois = net_->blob_by_name("rois")->cpu_data();
    pred_cls = net_->blob_by_name("cls_prob")->cpu_data();
    boxes = new float[num*4];
    pred = new float[num*5*class_num];
    pred_per_class = new float[num*5];
    sorted_pred_cls = new float[num*5];
    keep = new int[num];

    for (int n = 0; n < num; n++)
    {
        for (int c = 0; c < 4; c++)
        {
            boxes[n*4+c] = rois[n*5+c+1] / img_scale;
        }
    }

    bbox_transform_inv(num, bbox_delt, pred_cls, boxes, pred, cv_img.rows, cv_img.cols);
    for (int i = 1; i < class_num; i ++)
    {
        for (int j = 0; j< num; j++)
        {
            for (int k=0; k<5; k++)
                pred_per_class[j*5+k] = pred[(i*num+j)*5+k];
        }
        boxes_sort(num, pred_per_class, sorted_pred_cls);
        _nms(keep, &num_out, sorted_pred_cls, num, 5, NMS_THRESH, 0);
        vis_detections(cv_img, keep, num_out, sorted_pred_cls, CONF_THRESH);
    }

    cv::imwrite("vis.jpg",cv_img);
    delete []boxes;
    delete []pred;
    delete []pred_per_class;
    delete []keep;
    delete []sorted_pred_cls;

}

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  vis_detections
 *  Description:  Visuallize the detection result
 * =====================================================================================
 */
void Detector::vis_detections(cv::Mat image, int* keep, int num_out, float* sorted_pred_cls, float CONF_THRESH)
{
    int i=0;
    while(sorted_pred_cls[keep[i]*5+4]>CONF_THRESH && i < num_out)
    {
        if(i>=num_out)
            return;
        cv::rectangle(image,cv::Point(sorted_pred_cls[keep[i]*5+0], sorted_pred_cls[keep[i]*5+1]),cv::Point(sorted_pred_cls[keep[i]*5+2], sorted_pred_cls[keep[i]*5+3]),cv::Scalar(255,0,0));
        i++;
    }
}

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  boxes_sort
 *  Description:  Sort the bounding box according score
 * =====================================================================================
 */
void Detector::boxes_sort(const int num, const float* pred, float* sorted_pred)
{
    vector<Info> my;
    Info tmp;
    for (int i = 0; i< num; i++)
    {
        tmp.score = pred[i*5 + 4];
        tmp.head = pred + i*5;
        my.push_back(tmp);
    }
    std::sort(my.begin(), my.end(), compare);
    for (int i=0; i<num; i++)
    {
        for (int j=0; j<5; j++)
            sorted_pred[i*5+j] = my[i].head[j];
    }
}

/*
 * ===  FUNCTION  ======================================================================
 *         Name:  bbox_transform_inv
 *  Description:  Compute bounding box regression value
 * =====================================================================================
 */
void Detector::bbox_transform_inv(int num, const float* box_deltas, const float* pred_cls, float* boxes, float* pred, int img_height, int img_width)
{
    float width, height, ctr_x, ctr_y, dx, dy, dw, dh, pred_ctr_x, pred_ctr_y, pred_w, pred_h;
    for(int i=0; i< num; i++)
    {
        width = boxes[i*4+2] - boxes[i*4+0] + 1.0;
        height = boxes[i*4+3] - boxes[i*4+1] + 1.0;
        ctr_x = boxes[i*4+0] + 0.5 * width;
        ctr_y = boxes[i*4+1] + 0.5 * height;
        for (int j=0; j< class_num; j++)
        {

            dx = box_deltas[(i*class_num+j)*4+0];
            dy = box_deltas[(i*class_num+j)*4+1];
            dw = box_deltas[(i*class_num+j)*4+2];
            dh = box_deltas[(i*class_num+j)*4+3];
            pred_ctr_x = ctr_x + width*dx;
            pred_ctr_y = ctr_y + height*dy;
            pred_w = width * exp(dw);
            pred_h = height * exp(dh);
            pred[(j*num+i)*5+0] = max(min(pred_ctr_x - 0.5* pred_w, img_width -1), 0);
            pred[(j*num+i)*5+1] = max(min(pred_ctr_y - 0.5* pred_h, img_height -1), 0);
            pred[(j*num+i)*5+2] = max(min(pred_ctr_x + 0.5* pred_w, img_width -1), 0);
            pred[(j*num+i)*5+3] = max(min(pred_ctr_y + 0.5* pred_h, img_height -1), 0);
            pred[(j*num+i)*5+4] = pred_cls[i*class_num+j];
        }
    }

}

int main()
{
    string model_file = "/home/lyh1/workspace/py-faster-rcnn/models/pascal_voc/VGG_CNN_M_1024/faster_rcnn_alt_opt/faster_rcnn_test.pt";
    string weights_file = "/home/lyh1/workspace/py-faster-rcnn/output/default/yuanzhang_car/vgg_cnn_m_1024_fast_rcnn_stage2_iter_40000.caffemodel";
    int GPUID=0;
    Caffe::SetDevice(GPUID);
    Caffe::set_mode(Caffe::GPU);
    Detector det = Detector(model_file, weights_file);
    det.Detection("/home/lyh1/workspace/py-faster-rcnn/data/demo/car.jpg");
return 0;
}

這個檔案對應的CMakeLists.txt,其中對編譯部分加了相應的註釋。此處需要新增$PYTHONPATH,原因是因為我們在使用中是C++介面呼叫的Caffe,但是在Caffe執行中呼叫了Python,因此需要告訴Caffe我們自己的Python模組的路徑,比如這裡去模型的定義檔案中看,裡面用了py-faster-rcnn根目錄下的lib中的rpn資料夾下的proposal模組,因此需要在$PYTHONPATH中加入這個模組路徑...\py-faster-rcnn\lib以及Caffe的Python介面路徑...\py-faster-rcnn\caffe-fast-rcnn\python,下面是我在~.bashrc

中新增$PYTHONPATH,可供參考。新增之後執行source ~/.bashrc,對bash立即生效,提醒一下.bashrc是在你啟動一個bash的時候會被立即執行的。要麼你直接在當前bash中直接source,否則需要再開啟一個新的bash。還有一點,順帶提一下,加入到$PYTHONPATH之後,如果你係統中有多個caffe,那麼你得想一下怎麼防止衝突,當時我就給自己埋了個坑,別的caffe呼叫PYTHON介面時,就直接調到這個加入到$PYTHONPATH中的這個caffe了,所以各種報錯,如果可以話可以考慮用Docker來處理這種東西。

如圖新增

還是接下來講一下這個CMakeList.txt。我也是第一次用,之前用Autotools

,真是各種麻煩,對Linux下的手動編譯都有一些陰影。用Cmake真的是比較傻瓜式的,能把東西梳理的比較清晰。

#This part is used for compile faster_rcnn_demo.cpp
#這裡是版本要求,根據自己專案而定,我用了預設的
cmake_minimum_required (VERSION 2.8)
#我們的工程的名字
project (faster_rcnn_demo)

#新增我們要生成的可執行檔案的名字,以及相應的原始碼檔案
add_executable(faster_rcnn_demo faster_rcnn_demo.cpp)
#這裡新增這個faster_rcnn_demo.cpp所依賴的標頭檔案路徑
#首先是Caffe目錄的include
#其次是用了gpu_nms.cu,所以也要新增相應的標頭檔案gpu_nms.hpp在py-faster-rcnn根目錄下的lib/nms中
#下面就是幾個Caffe的依賴項,包括Python
#值得注意的是boost/python。hpp的標頭檔案路徑也要加入
#還有opencv的路徑,cuda路徑,線性代數庫路徑相應的都要新增
include_directories ( "${PROJECT_SOURCE_DIR}/../caffe-fast-rcnn/include"
    "${PROJECT_SOURCE_DIR}/../lib/nms" 
    /share/apps/local/include
    /usr/local/include 
    /opt/python/include/python2.7
    /share/apps/opt/intel/mkl/include 
    /usr/local/cuda/include )
#在這裡值得說一下,target_link_libraries 語句中 生成的目標是可執行檔案 後面緊跟的得是動態庫的完整路徑,否則會出錯
#我一開始用的是Link_directoreis然後在後面直接加入了動態庫的路徑,結果他一直報錯,提示找不到庫,ORZ真是跪了,
#所以要麼在這裡直接加入完整路徑或者同通過另一條語句find_library(),這種方式也比較好,直接去指定路徑查詢,返回相應的絕對路徑也可避免直接新增地址的問題
#gpu_nms.so 在py-faster-rcnn根目錄下的lib\nms中,直接make就會生成這個so檔案
target_link_libraries(faster_rcnn_demo /home/lyh1/workspace/py-faster-rcnn/caffe-fast-rcnn/build/lib/libcaffe.so
    /home/lyh1/workspace/py-faster-rcnn/lib/nms/gpu_nms.so 
    /share/apps/local/lib/libopencv_highgui.so 
    /share/apps/local/lib/libopencv_core.so 
    /share/apps/local/lib/libopencv_imgproc.so 
    /share/apps/local/lib/libopencv_imgcodecs.so
    /share/apps/local/lib/libglog.so
    /share/apps/local/lib/libboost_system.so
    /share/apps/local/lib/libboost_python.so
    /share/apps/local/lib/libglog.so
    /opt/rh/python27/root/usr/lib64/libpython2.7.so
    )

編譯的時候比較坑爹,還會到這個問題

但是我用find命令查找了一下整個caffe的工程目錄下居然沒有這個caffe.pb.h 後來Google一下之後才知道需要手動生成,解決辦法如下用protoc命令手動生成,並放到include資料夾下

protoc src/caffe/proto/caffe.proto --cpp_out=.
mkdir include/caffe/proto
mv src/caffe/proto/caffe.pb.h include/caffe/proto

最後cmake .然後make編譯成功,執行正常。
接下來就是這麼打包成動態庫了,具體操作在下一個博文中