1. 程式人生 > >使用L2正則化和平均滑動模型的LeNet-5MNIST手寫數字識別模型

使用L2正則化和平均滑動模型的LeNet-5MNIST手寫數字識別模型

put 輸出矩陣 conv2 cross -m collect variable global 空間

使用L2正則化和平均滑動模型的LeNet-5MNIST手寫數字識別模型

覺得有用的話,歡迎一起討論相互學習~Follow Me

參考文獻Tensorflow實戰Google深度學習框架
實驗平臺:
Tensorflow1.4.0
python3.5.0
MNIST數據集將四個文件下載後放到當前目錄下的MNIST_data文件夾下
L2正則化
Dropout
滑動平均方法

定義模型框架與前向傳播

import tensorflow as tf

# 配置神經網絡的參數
INPUT_NODE = 784
OUTPUT_NODE = 10

IMAGE_SIZE = 28
NUM_CHANNELS = 1
NUM_LABELS = 10
# 第一層卷積層的尺寸和深度
CONV1_DEEP = 32
CONV1_SIZE = 5
# 第二層卷積層的尺寸和深度
CONV2_DEEP = 64
CONV2_SIZE = 5
# 全連接層的節點個數
FC_SIZE = 512


# 定義卷積神經網絡的前向傳播過程,這裏添加了一個參數train,用於區分訓練過程和測試過程。
# 這裏使用dropout方法,dropout方法可以進一步提升模型可靠性並防止過擬合,dropout只在訓練過程中使用。
def inference(input_tensor, train, regularizer):
    # 通過使用不同的命名空間來隔離變量,可以使每一層的變量命名只需要考慮在當前層的作用,而不需要考慮重名的問題
    with tf.variable_scope(‘layer1-conv1‘):
        conv1_weights = tf.get_variable(
            "weight", [CONV1_SIZE, CONV1_SIZE, NUM_CHANNELS, CONV1_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv1_biases = tf.get_variable("bias", [CONV1_DEEP], initializer=tf.constant_initializer(0.0))
        conv1 = tf.nn.conv2d(input_tensor, conv1_weights, strides=[1, 1, 1, 1], padding=‘SAME‘)
        relu1 = tf.nn.relu(tf.nn.bias_add(conv1, conv1_biases))

    with tf.name_scope("layer2-pool1"):
        pool1 = tf.nn.max_pool(relu1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="SAME")

    with tf.variable_scope("layer3-conv2"):
        conv2_weights = tf.get_variable(
            "weight", [CONV2_SIZE, CONV2_SIZE, CONV1_DEEP, CONV2_DEEP],
            initializer=tf.truncated_normal_initializer(stddev=0.1))
        conv2_biases = tf.get_variable("bias", [CONV2_DEEP], initializer=tf.constant_initializer(0.0))
        conv2 = tf.nn.conv2d(pool1, conv2_weights, strides=[1, 1, 1, 1], padding=‘SAME‘)
        relu2 = tf.nn.relu(tf.nn.bias_add(conv2, conv2_biases))

    with tf.name_scope("layer4-pool2"):
        pool2 = tf.nn.max_pool(relu2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=‘SAME‘)
        # pool2.getshape函數可以得到第四層輸出矩陣的維度而不需要手工計算。
        # 註意因為每一層神經網絡的輸入輸出都為一個batch矩陣,所以這裏得到的維度也包含了一個batch中數據的個數。
        pool_shape = pool2.get_shape().as_list()
        # 計算將矩陣拉直成向量後的長度,這個長度就是矩陣的長寬及深度的乘積,註意這裏的pool_shape[0]為一個batch中數據的個數
        nodes = pool_shape[1]*pool_shape[2]*pool_shape[3]
        # 通過tf.shape函數將第四層的輸出變成一個batch的向量
        reshaped = tf.reshape(pool2, [pool_shape[0], nodes])

    # dropout一般只在全連接層而不是卷積層或者池化層使用
    with tf.variable_scope(‘layer5-fc1‘):
        fc1_weights = tf.get_variable("weight", [nodes, FC_SIZE],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        # 只有全連接層的權重需要加入正則化
        if regularizer != None: tf.add_to_collection(‘losses‘, regularizer(fc1_weights))
        fc1_biases = tf.get_variable("bias", [FC_SIZE], initializer=tf.constant_initializer(0.1))

        fc1 = tf.nn.relu(tf.matmul(reshaped, fc1_weights) + fc1_biases)
        # 如果train標簽為真,則引入dropout函數使輸出層一半的神經元失活
        if train: fc1 = tf.nn.dropout(fc1, 0.5)

    with tf.variable_scope(‘layer6-fc2‘):
        fc2_weights = tf.get_variable("weight", [FC_SIZE, NUM_LABELS],
                                      initializer=tf.truncated_normal_initializer(stddev=0.1))
        if regularizer != None: tf.add_to_collection(‘losses‘, regularizer(fc2_weights))
        fc2_biases = tf.get_variable("bias", [NUM_LABELS], initializer=tf.constant_initializer(0.1))
        logit = tf.matmul(fc1, fc2_weights) + fc2_biases

    return logit

訓練基於LeNet的MNIST模型

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import LeNet5_infernece
import os
import numpy as np

# #### 1. 定義神經網絡相關的參數

BATCH_SIZE = 100  # 批處理數量大小
LEARNING_RATE_BASE = 0.01  # 基礎學習率
LEARNING_RATE_DECAY = 0.99  # 學習率衰減速率
REGULARIZATION_RATE = 0.0001  # 正則化參數
TRAINING_STEPS = 6000  # 訓練周期數
MOVING_AVERAGE_DECAY = 0.99  # 平均滑動步長


# #### 2. 定義訓練過程

def train(mnist):
    # 定義輸出為4維矩陣的placeholder
    x = tf.placeholder(tf.float32, [
        BATCH_SIZE,
        LeNet5_infernece.IMAGE_SIZE,
        LeNet5_infernece.IMAGE_SIZE,
        LeNet5_infernece.NUM_CHANNELS],
                       name=‘x-input‘)
    # y_表示正確的標簽
    y_ = tf.placeholder(tf.float32, [None, LeNet5_infernece.OUTPUT_NODE], name=‘y-input‘)

    # 定義L2正則化
    regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
    y = LeNet5_infernece.inference(x, False, regularizer)  # 表示不使用dropout,但是使用正則化
    global_step = tf.Variable(0, trainable=False)

    # 定義損失函數、學習率、滑動平均操作以及訓練過程。
    variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    # 使用平均滑動模型
    variables_averages_op = variable_averages.apply(tf.trainable_variables())
    # 定以交叉熵函數
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
    cross_entropy_mean = tf.reduce_mean(cross_entropy)
    # 將權重的L2正則化部分加到損失函數中
    loss = cross_entropy_mean + tf.add_n(tf.get_collection(‘losses‘))
    # 定義遞減的學習率
    learning_rate = tf.train.exponential_decay(
        LEARNING_RATE_BASE,
        global_step,
        mnist.train.num_examples/BATCH_SIZE, LEARNING_RATE_DECAY,
        staircase=True)

    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    # with tf.control_dependencies([train_step, variables_averages_op]):
    #     train_op = tf.no_op(name=‘train‘)
    # 在反向傳播梯度下降的過程中更新變量的滑動平均值
    train_op = tf.group(train_step, variables_averages_op)
    # 初始化TensorFlow持久化類。
    saver = tf.train.Saver()
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        for i in range(TRAINING_STEPS):
            xs, ys = mnist.train.next_batch(BATCH_SIZE)

            reshaped_xs = np.reshape(xs, (
                BATCH_SIZE,
                LeNet5_infernece.IMAGE_SIZE,
                LeNet5_infernece.IMAGE_SIZE,
                LeNet5_infernece.NUM_CHANNELS))
            _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: reshaped_xs, y_: ys})

            if i%1000 == 0:
                print("After %d training step(s), loss on training batch is %g."%(step, loss_value))


# #### 3. 主程序入口

def main(argv=None):
    mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)
    train(mnist)


if __name__ == ‘__main__‘:
    main()

使用L2正則化和平均滑動模型的LeNet-5MNIST手寫數字識別模型