1. 程式人生 > >【深度學習】CNN的實現以及在手寫數字識別中的應用

【深度學習】CNN的實現以及在手寫數字識別中的應用

回顧

上面兩篇部落格,實現了CNN包含的層,下面我們只需要將他們組合起來,搭建進行手寫數字識別的CNN

CNN實現

在這裡插入圖片描述

我們按上圖CNN的網路結構進行實現,這裡只包含一層卷積層

  • 下面給出各層的實現程式碼,具體內容可參考之前的部落格:
# im2col 影象資料的展開
def im2col(input_data, filter_h, filter_w, stride=1, pad=0):
    N, C, H, W = input_data.
shape out_h = (H + 2*pad - filter_h)//stride + 1 out_w = (W + 2*pad - filter_w)//stride + 1 img = np.pad(input_data, [(0,0), (0,0), (pad, pad), (pad, pad)], 'constant') col = np.zeros((N, C, filter_h, filter_w, out_h, out_w)) for y in range(filter_h): y_max = y + stride*out_h for
x in range(filter_w): x_max = x + stride*out_w col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride] col = col.transpose(0, 4, 5, 1, 2, 3).reshape(N*out_h*out_w, -1) return col # col2im 逆變換 def col2im(col, input_shape, filter_h, filter_w, stride=1, pad=0):
N, C, H, W = input_shape out_h = (H + 2*pad - filter_h)//stride + 1 out_w = (W + 2*pad - filter_w)//stride + 1 col = col.reshape(N, out_h, out_w, C, filter_h, filter_w).transpose(0, 3, 4, 5, 1, 2) img = np.zeros((N, C, H + 2*pad + stride - 1, W + 2*pad + stride - 1)) for y in range(filter_h): y_max = y + stride*out_h for x in range(filter_w): x_max = x + stride*out_w img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :] return img[:, :, pad:H + pad, pad:W + pad] # ReLU層 class Relu: def __init__(self): self.mask = None def forward(self, x): self.mask = (x <= 0) out = x.copy() out[self.mask] = 0 return out def backward(self, dout): dout[self.mask] = 0 dx = dout return dx # Affine層 class Affine: def __init__(self, W, b): self.W =W self.b = b self.x = None self.original_x_shape = None # 權重和偏置引數的導數 self.dW = None self.db = None def forward(self, x): # 對應張量 self.original_x_shape = x.shape x = x.reshape(x.shape[0], -1) self.x = x out = np.dot(self.x, self.W) + self.b return out def backward(self, dout): dx = np.dot(dout, self.W.T) self.dW = np.dot(self.x.T, dout) self.db = np.sum(dout, axis=0) dx = dx.reshape(*self.original_x_shape) # 還原輸入資料的形狀(對應張量) return dx # 卷積層 class Convolution: def __init__(self, W, b, stride=1, pad=0): self.W = W self.b = b self.stride = stride self.pad = pad # 中間資料(backward時使用) self.x = None self.col = None self.col_W = None # 權重和偏置引數的梯度 self.dW = None self.db = None def forward(self, x): FN, C, FH, FW = self.W.shape N, C, H, W = x.shape out_h = 1 + int((H + 2*self.pad - FH) / self.stride) out_w = 1 + int((W + 2*self.pad - FW) / self.stride) col = im2col(x, FH, FW, self.stride, self.pad) col_W = self.W.reshape(FN, -1).T out = np.dot(col, col_W) + self.b out = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2) self.x = x self.col = col self.col_W = col_W return out def backward(self, dout): FN, C, FH, FW = self.W.shape dout = dout.transpose(0,2,3,1).reshape(-1, FN) self.db = np.sum(dout, axis=0) self.dW = np.dot(self.col.T, dout) self.dW = self.dW.transpose(1, 0).reshape(FN, C, FH, FW) dcol = np.dot(dout, self.col_W.T) dx = col2im(dcol, self.x.shape, FH, FW, self.stride, self.pad) return dx # 池化層 class Pooling: def __init__(self, pool_h, pool_w, stride=1, pad=0): self.pool_h = pool_h self.pool_w = pool_w self.stride = stride self.pad = pad self.x = None self.arg_max = None def forward(self, x): N, C, H, W = x.shape out_h = int(1 + (H - self.pool_h) / self.stride) out_w = int(1 + (W - self.pool_w) / self.stride) col = im2col(x, self.pool_h, self.pool_w, self.stride, self.pad) col = col.reshape(-1, self.pool_h*self.pool_w) arg_max = np.argmax(col, axis=1) out = np.max(col, axis=1) out = out.reshape(N, out_h, out_w, C).transpose(0, 3, 1, 2) self.x = x self.arg_max = arg_max return out def backward(self, dout): dout = dout.transpose(0, 2, 3, 1) pool_size = self.pool_h * self.pool_w dmax = np.zeros((dout.size, pool_size)) dmax[np.arange(self.arg_max.size), self.arg_max.flatten()] = dout.flatten() dmax = dmax.reshape(dout.shape + (pool_size,)) dcol = dmax.reshape(dmax.shape[0] * dmax.shape[1] * dmax.shape[2], -1) dx = col2im(dcol, self.x.shape, self.pool_h, self.pool_w, self.stride, self.pad) return dx
  • CNN實現
import numpy as np
from collections import OrderedDict
import pickle

class SoftmaxWithLoss:
    def __init__(self):
        self.loss = None
        self.y = None # softmax的輸出
        self.t = None # 監督資料

    def forward(self, x, t):
        self.t = t
        self.y = softmax(x)
        self.loss = cross_entropy_error(self.y, self.t)
        
        return self.loss

    def backward(self, dout=1):
        batch_size = self.t.shape[0]
        if self.t.size == self.y.size: # 監督資料是one-hot-vector的情況
            dx = (self.y - self.t) / batch_size
        else:
            dx = self.y.copy()
            dx[np.arange(batch_size), self.t] -= 1
            dx = dx / batch_size
        
        return dx

class SimpleConvNet:
    """簡單的ConvNet

    conv - relu - pool - affine - relu - affine - softmax
    
    Parameters
    ----------
    input_size : 輸入大小(MNIST的情況下為784)
    conv_param : 卷積層的超引數(字典)
        filter_num : 濾波器(卷積核)的數量
        filter_size : 濾波器的大小
        pad : 填充
        stride : 步幅
    hidden_size : 隱藏層的神經元數量
    output_size : 輸出大小(MNIST的情況下為10)
    activation : 'relu' or 'sigmoid'
    weight_init_std : 指定權重的標準差(e.g. 0.01)
        指定'relu'或'he'的情況下設定“He的初始值”
        指定'sigmoid'或'xavier'的情況下設定“Xavier的初始值”
    """
    # 初始化操作
    def __init__(self, input_dim=(1, 28, 28), 
                 conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
                 hidden_size=100, output_size=10, weight_init_std=0.01):
        # 從conv_param字典中取出相應的value
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        # 計算卷積層的輸出大小
        conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
        # 計算池化層的輸出大小
        pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

        # 初始化權重
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        # 生成層
        self.layers = OrderedDict()  # 有序字典
        # 依次向有序字典中新增層
        self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'],conv_param['stride'], conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        # 最後一層新增到變數last_layer中
        self.last_layer = SoftmaxWithLoss()
    
    # 推理
    def predict(self, x):
        # 從頭開始一次呼叫已經新增的層,並進行正向傳播運算,並將結果傳遞給下一層
        for layer in self.layers.values():
            x = layer.forward(x)
        return x  
    
    # 求損失
    def loss(self, x, t):
        """求損失函式
        引數x是輸入資料、t是教師標籤
        """
        y = self.predict(x)
        return self.last_layer.forward(y, t)
    
    # 計算準確率
    def accuracy(self, x, t, batch_size=100):
        if t.ndim != 1 : t = np.argmax(t, axis=1)
        
        acc = 0.0
        
        for i in range(int(x.shape[0] / batch_size)):
            tx = x[i*batch_size:(i+1)*batch_size]
            tt = t[i*batch_size:(i+1)*batch_size]
            y = self.predict(tx)
            y = np.argmax(y, axis=1)
            acc += np.sum(y == tt) 
        
        return acc / x.shape[0]

    # 計算梯度
    def gradient(self, x, t):
        """求梯度(誤差反向傳播法)

        Parameters
        ----------
        x : 輸入資料
        t : 教師標籤

        Returns
        -------
        具有各層的梯度的字典變數
            grads['W1']、grads['W2']、...是各層的權重
            grads['b1']、grads['b2']、...是各層的偏置
        """
        # forward
        self.loss(x, t)

        # backward
        dout = 1
        dout = self.last_layer.backward(dout)

        layers = list(self.layers.values())
        layers.reverse()
        for layer in layers:
            dout = layer.backward(dout)

        # 設定
        grads = {}
        grads['W1'], grads['b1'] = self.layers['Conv1'].dW, self.layers['Conv1'].db
        grads['W2'], grads['b2'] = self.layers['Affine1'].dW, self.layers['Affine1'].db
        grads['W3'], grads['b3'] = self.layers['Affine2'].dW, self.layers['Affine2'].db

        return grads
    
    # 儲存模型
    def save_params(self,<