1. 程式人生 > >[caffe筆記004]: caffe新增新層之新增maxout層

[caffe筆記004]: caffe新增新層之新增maxout層

針對2017年2月時caffe官網版本。

1. caffe官網中新增新層的流程

  • Add a class declaration for your layer to include/caffe/layers/your_layer.hpp.
    • Include an inline implementation of type overriding the method virtual inline const char* type() const { return "YourLayerName"; } replacing YourLayerName with your layer’s name.
    • Implement the {*}Blobs() methods to specify blob number requirements; see /caffe/include/caffe/layers.hpp to enforce strict top and bottom Blob counts using the inline {*}Blobs() methods.
    • Omit the *_gpu declarations if you’ll only be implementing CPU code.
  • Implement your layer in src/caffe/layers/your_layer.cpp
    .
    • (optional) LayerSetUp for one-time initialization: reading parameters, fixed-size allocations, etc.
    • Reshape for computing the sizes of top blobs, allocating buffers, and any other work that depends on the shapes of bottom blobs
    • Forward_cpu for the function your layer computes
    • Backward_cpu for its gradient (Optional – a layer can be forward-only)
  • (Optional) Implement the GPU versions Forward_gpu and Backward_gpu in layers/your_layer.cu.
  • If needed, declare parameters in proto/caffe.proto, using (and then incrementing) the “next available layer-specific ID” declared in a comment above message LayerParameter
  • Instantiate and register your layer in your cpp file with the macro provided in layer_factory.hpp. Assuming that you have a new layer MyAwesomeLayer, you can achieve it with the following command:
INSTANTIATE_CLASS(MyAwesomeLayer);
REGISTER_LAYER_CLASS(MyAwesome);
  • Note that you should put the registration code in your own cpp file, so your implementation of a layer is self-contained.
  • Optionally, you can also register a Creator if your layer has multiple engines. For an example on how to define a creator function and register it, see GetConvolutionLayer in caffe/layer_factory.cpp.
  • Write tests in test/test_your_layer.cpp. Use test/test_gradient_check_util.hpp to check that your Forward and Backward implementations are in numerical agreement.

2. 增加新層實踐

Step1: 確定要新增的層的基類

相比於上面部落格中舊版caffe對層的分類,現在的caffe中層的分類有所改變,去掉了vision層,直接由layer層派生。除此之外還有loss層,neuron層,以及data層。
- loss層和data層顧名思義,不加贅述
- 輸入blob和輸出blob的大小一樣,從neuron層派生。例如啟用層ReLU,以及逐點操作的exp層和power層。需要實現虛擬函式SetUpForward_cpuBackward_cpu
- 輸入blob和輸出blob的大小不一樣,直接從layer層派生。例如conv層,將要新增的maxout層。需要實現虛擬函式SetUpReshapeForward_cpuBackward_cpu

Step2: caffe.proto定義該層的引數
- 新增Maxout LayerParameter的ID
message LayerParameter最後一行新增MyMaxoutParameter,並將ID按順序設定為沒有用過的數字。

optional MyMaxoutParameter my_maxout_param = 147;

message LayerParameter的註釋中有最後新增的層名以及可用的ID號,為了便於以後使用,建議更改一下。

// NOTE
// Update the next available ID when you add a new LayerParameter field.
//
// LayerParameter next available layer-specific ID: 147 (last added: recurrent_param)
  • 新增Maxout layer的引數訊息
    在caffe.proto任意位置新增Maxout layer的引數訊息:
// message that stores paremeters used to maxout layers
message MyMaxoutParameter {
  // the number of output for this layer
  optional uint32 num_output = 1;
}
  • 新增Maxout layer的層名
    message V1LayerParameter中的enum LayerType新增Maxout層的層名:
MYMAXOUT = 40;

同時新增:

optional MyMaxoutParameter my_maxout_param = 43;

數字只要不重複就可以。

Step3: 增加maxout層的標頭檔案到./include/caffe/layers/mymaxout.hpp

主要在MyMaxoutLayer類中定義建構函式和SetUpReshapeForward_cpuBackward_cpu函式以及一些變數。

#ifndef CAFFE_MY_MAXOUT_LAYER_HPP_
#define CAFFE_MY_MAXOUT_LAYER_HPP_

#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {  
template <typename Dtype>  
class MyMaxoutLayer : public Layer<Dtype> {  
 public:  
  explicit MyMaxoutLayer(const LayerParameter& param)  
      : Layer<Dtype>(param) {}  
  // initialize the bottom and top blobs
  virtual inline const char* type() const { return "MyMaxout"; }
  virtual void SetUp(const vector<Blob<Dtype>*>& bottom, vector<Blob<Dtype>*>& top);
  virtual void Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);

 protected:  
  virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top);  
  //virtual Dtype Forward_gpu(const vector<Blob<Dtype>*>& bottom,  
  //    vector<Blob<Dtype>*>* top);  
  virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom);
  //virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,  
  //    const vector<bool>& propagate_down, vector<Blob<Dtype>*>* bottom);  

  int num_output_;  
  int num_;  
  int channels_;  
  int height_;  
  int width_;  
  int group_size_;  
  Blob<Dtype> max_idx_;  

};  

}
#endif

Step4: 增加maxout層的原始檔到./src/caffe/layers/mymaxout.cpp
SetUp: 進行check
Reshape: 更改top blob的大小
Forward_cpu: 實現正向傳播
Backward_cpu: 實現反向傳播
REGISTER_LAYER_CLASS: 最後註冊層。

#include <vector>  

#include "caffe/util/im2col.hpp"   
#include "caffe/util/math_functions.hpp"  
#include "caffe/layers/my_maxout_layer.hpp"  

namespace caffe {   
template <typename Dtype>  
void MyMaxoutLayer<Dtype>::SetUp(const vector<Blob<Dtype>*>& bottom,   
      vector<Blob<Dtype>*>& top) {
  const MyMaxoutParameter& my_maxout_param = this->layer_param_.my_maxout_param();
  CHECK(my_maxout_param.has_num_output())
      << "num_output should be specified.";   
}  

template <typename Dtype>  
void MyMaxoutLayer<Dtype>::Reshape(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top)
{
  num_output_ = this->layer_param_.my_maxout_param().num_output();   
  CHECK_GT(num_output_, 0) << "output number cannot be zero.";  
  // bottom 
  num_ = bottom[0]->num();  
  channels_ = bottom[0]->channels();  
  height_ = bottom[0]->height();    
  width_ = bottom[0]->width();     

  // TODO: generalize to handle inputs of different shapes.    
  for (int bottom_id = 1; bottom_id < bottom.size(); ++bottom_id) {    
    CHECK_EQ(num_, bottom[bottom_id]->num()) << "Inputs must have same num.";    
    CHECK_EQ(channels_, bottom[bottom_id]->channels())    
        << "Inputs must have same channels.";    
    CHECK_EQ(height_, bottom[bottom_id]->height())    
        << "Inputs must have same height.";    
    CHECK_EQ(width_, bottom[bottom_id]->width())    
        << "Inputs must have same width.";    
  }  

  // Set the parameters, compute the group size
  CHECK_EQ(channels_ % num_output_, 0)   
      << "Number of channel should be multiples of output number.";   
  group_size_ = channels_ / num_output_;  

    top[0]->Reshape(num_, num_output_, height_, width_); 
    max_idx_.Reshape(num_, num_output_, height_, width_);
}


template <typename Dtype>   
void MyMaxoutLayer<Dtype>::Forward_cpu(const vector<Blob<Dtype>*>& bottom,
      const vector<Blob<Dtype>*>& top) {
    int featureSize = height_ * width_;  
    Dtype* mask = NULL;  
    mask = max_idx_.mutable_cpu_data();  
//printf("1.maxout_forward\n");  
    const int top_count = top[0]->count();  
    caffe_set(top_count, Dtype(0), mask);  
//printf("2.maxout_forward\n");  
    for (int i = 0; i < bottom.size(); ++i) {  
        const Dtype* bottom_data = bottom[i]->cpu_data();  
        Dtype* top_data = top[i]->mutable_cpu_data();    
        for (int n = 0; n < num_; n ++) {  
            for (int o = 0; o < num_output_; o ++) {  
                for (int g = 0; g < group_size_; g ++) {   
                    if (g == 0) {  
                        for (int h = 0; h < height_; h ++) { // á?2??-?·óDμ??ù?aà?  
                            for (int w = 0; w < width_; w ++) {  
                                int index = w + h * width_;  
                                top_data[index] = bottom_data[index];  
                                mask[index] = index;  
                            }  
                        }  
                    }  
                    else {  
                        for (int h = 0; h < height_; h ++) {  
                            for (int w = 0; w < width_; w ++) {  
                                int index0 = w + h * width_;  
                                int index1 = index0 + g * featureSize;  
                                if (top_data[index0] < bottom_data[index1]) {  
                                    top_data[index0] = bottom_data[index1];  
                                    mask[index0] = index1;  
                                }                                 
                            }  
                        }  
                    }  
                }  
                bottom_data += featureSize * group_size_;  
                top_data += featureSize;  
                mask += featureSize;  
            }  
        }  
    }    
}  



template <typename Dtype>  
void MyMaxoutLayer<Dtype>::Backward_cpu(const vector<Blob<Dtype>*>& top,
      const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {  
    if (!propagate_down[0]) {  
        return;  
    }  
    // const Dtype* top_diff = top[0]->cpu_diff();  
    Dtype* bottom_diff = bottom[0]->mutable_cpu_diff();  
    caffe_set(bottom[0]->count(), Dtype(0), bottom_diff);  
    const Dtype* mask = max_idx_.mutable_cpu_data();  
    int featureSize = height_ * width_;  
    for (int i = 0; i < top.size(); i ++) {  
        const Dtype* top_diff = top[i]->cpu_diff();  
        Dtype* bottom_diff = bottom[i]->mutable_cpu_diff();    
        for (int n = 0; n < num_; n ++) { 
            for (int o = 0; o < num_output_; o ++) {  
                for (int h = 0; h < height_; h ++) { // á?2??-?·óDμ??ù?aà?  
                    for (int w = 0; w < width_; w ++) {  
                        int index = w + h * width_;  
                        int bottom_index = mask[index];  
                        bottom_diff[bottom_index] += top_diff[index];  
                    }  
                }  
                bottom_diff += featureSize * group_size_;  
                top_diff += featureSize;  
                mask += featureSize;  
            }  
        }  
    }  
}  

#ifdef CPU_ONLY
STUB_GPU(MyMaxoutLayer);
#endif

INSTANTIATE_CLASS(MyMaxoutLayer);
REGISTER_LAYER_CLASS(MyMaxout); 
}  // namespace caffe  

Step5: 重新編譯

make clean
make all -j16

3. 注意事項

  1. 從layer層派生一定得實現四個虛擬函式SetUpReshapeForward_cpuBackward_cpu;從neuron派生則不需要實現Reshape函式。

  2. 虛擬函式的函式名,函式返回值,函式引數以及引數型別必須得和基類中的定義一致,建議直接從基類中拷貝。否則可能在編譯時報如下錯誤:

./include/caffe/layer_factory.hpp"135:67: error: cannot allocate an object of abstract type