1. 程式人生 > >c++ 知識點(不斷更新)

c++ 知識點(不斷更新)

1 .指標賦值

 typedef tree_node<T> node_type;
node_type* root;
node_type **n =&root; //這裡root雖然是個指標但是可能是空值,為了給指標賦值,所以取地址
*n =new node_type(v,p,0,0);

2 . c++ callback 函式

Using StatusCallback = std::function<void<const Status&>>
使用template建構函式:
// A generic for loop on GPU
template <typename SizeT, typename OpT>
__global__
void loop_kernel(
        SizeT loop_size,
        OpT   op)
{
    const SizeT STRIDE = (SizeT)gridDim.x * blockDim.x;
    SizeT i = (SizeT)blockDim.x * blockIdx.x + threadIdx.x;

    while (i < loop_size) {
        op(i);
        i += STRIDE;
    }
}

3 . 其中shared_ptr 指標初始化類似上面的:

template <MPIDataType DT, DeviceType Dev, class T, class MASKT >
Status TorchOpContext<DT, Dev, T, MASKT>::AllocatePersistent(
    int64_t size, std::shared_ptr<PersistentBuffer>* tensor) {
  // Allocation errors are handled using PyTorch exceptions.
  *tensor = std::make_shared<TorchPersistentBuffer>(device_, size);
  return Status::OK();
}

4 . pair型別vector

std::vector<std::pair<std::string,uint64_t>> layers;
layers.push_back(std::make_pair(1,1));

5 . tuple型別vector

std::vector<std::tuple<SizeT, SizeT, size_t> > chunks;
chunks.push_back(std::make_tuple(
  chunk_start, chunk_size, chunk_offset_bytes));
SizeT chunk_start = std::get<0>(chunk);
SizeT chunk_size  = std::get<1>(chunk);
size_t chunk_offset = std::get<2>(chunk);

6 . explict 關鍵字:禁止隱式建構函式

7 . typedef double(*func)();

8 . realloc : 動態記憶體分配

9 . c++ lambda 函式:
語法定義:[capture](parameters) mutable -> return-type{ statements }

10 . reinterpret_cast

unsigned int key_bits   = *reinterpret_cast<unsigned*>(const_cast<float*>(&key));

11 . operation 相關過載函式

Operator: 相關的過載函式
1. std::ostream & operator <<(std::ostream & out ,const half_t &x){
             out<<(float)x; 
             return out;
}     
2. bool operator >=(const half_t & other) const{
                return float(*this) >= float(other);
}
3. half_t & operator +=(const half_t &rhs){
                *this =half_t(float(*this)+float(rhs));
                reuturn &=*this;
}

4. operator float() const{
                uint32_t f =0;
                return *reinterpret_cast<float const *>(&f);
}
5. operator __half()const{
            Return reinterpret_cast<const __half &>(__x);
}
6. ++ 過載
self_type &operator++(){
    if(pos){
        //首先確保不是空的迭代器,再檢視有沒有右子樹
        if(pos->right){
            //定位到右子樹的最左節點
            pos=pos->right;
            while (pos->left)pos=pos->left;
        }else{
            //定位到尚未訪問過的祖先節點
            while ((pos->parent)&&(pos->parent->right==pos))pos=pos->parent;
            pos=pos->parent;
        }
    }
    return *this;

}
7. 指標過載
reference_type & operator *()const throw(std::runtime_error){
    if(pos)return pos->value;
    else {
        throw  std::runtime_error("dereference null iterator!");
    }
}
8. !=  過載
template <typename N>
bool operator !=(tree_iterator<N>const & left,tree_iterator<N> const & right){
    return !(left==right);
}