1. 程式人生 > >3用於MNIST的卷積神經網路-3.4卷積濾波器核的數量與網路效能之間的關係

3用於MNIST的卷積神經網路-3.4卷積濾波器核的數量與網路效能之間的關係

這裡寫圖片描述

這裡寫圖片描述

這裡寫圖片描述

這裡寫圖片描述

這裡寫圖片描述

這裡寫圖片描述

這裡寫圖片描述

程式碼:

#-*- coding:utf-8 -*-
#實現簡單卷積神經網路對MNIST資料集進行分類:conv2d + activation + pool + fc
import csv
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# 設定演算法超引數
learning_rate_init = 0.001
training_epochs = 1
batch_size = 100 display_step = 10 # Network Parameters n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) #根據指定的維數返回初始化好的指定名稱的權重 Variable def WeightsVariable(shape, name_str, stddev=0.1): initial = tf.random_normal(shape=shape, stddev=stddev, dtype=tf.float32) # initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
return tf.Variable(initial, dtype=tf.float32, name=name_str) #根據指定的維數返回初始化好的指定名稱的偏置 Variable def BiasesVariable(shape, name_str, stddev=0.00001): initial = tf.random_normal(shape=shape, stddev=stddev, dtype=tf.float32) # initial = tf.constant(stddev, shape=shape) return tf.Variable(initial, dtype=tf.float32, name=name_str) # 2維卷積層(conv2d+bias)的封裝
def Conv2d(x, W, b, stride=1, padding='SAME'): with tf.name_scope('Wx_b'): y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding) y = tf.nn.bias_add(y, b) return y #非線性啟用層的封裝 def Activation(x, activation=tf.nn.relu, name = 'relu'): with tf.name_scope(name): y = activation(x) return y # 2維池化層pool的封裝 def Pool2d(x, pool= tf.nn.max_pool, k=2, stride=2): return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding='VALID') # 全連線層activate(wx+b)的封裝 def FullyConnected(x, W, b, activate=tf.nn.relu, act_name='relu'): with tf.name_scope('Wx_b'): y = tf.matmul(x, W) y = tf.add(y, b) with tf.name_scope(act_name): y = activate(y) return y #通用的評估函式,用來評估模型在給定的資料集上的損失和準確率 def EvaluateModelOnDataset(sess, images, labels): n_samples = images.shape[0] per_batch_size = 100 loss = 0 acc = 0 # 樣本量比較少的時候,一次性評估完畢;否則拆成若干個批次評估,主要是防止記憶體不夠用 if (n_samples <= per_batch_size): batch_count = 1 loss, acc = sess.run([cross_entropy_loss, accuracy], feed_dict={X_origin: images, Y_true: labels, learning_rate: learning_rate_init}) else: batch_count = int(n_samples / per_batch_size) batch_start = 0 for idx in range(batch_count): batch_loss, batch_acc = sess.run([cross_entropy_loss, accuracy], feed_dict={X_origin: images[batch_start:batch_start + per_batch_size, :], Y_true: labels[batch_start:batch_start + per_batch_size, :], learning_rate: learning_rate_init}) batch_start += per_batch_size # 累計所有批次上的損失和準確率 loss += batch_loss acc += batch_acc # 返回平均值 return loss / batch_count, acc / batch_count #呼叫上面寫的函式構造計算圖 with tf.Graph().as_default(): # 計算圖輸入 with tf.name_scope('Inputs'): X_origin = tf.placeholder(tf.float32, [None, n_input], name='X_origin') Y_true = tf.placeholder(tf.float32, [None, n_classes], name='Y_true') #把影象資料從N*784的張量轉換為N*28*28*1的張量 X_image = tf.reshape(X_origin, [-1, 28, 28, 1]) # 計算圖前向推斷過程 with tf.name_scope('Inference'): # 第一個卷積層(conv2d + biase)(studyai.com) with tf.name_scope('Conv2d'): conv1_kernels_num = 5 weights = WeightsVariable(shape=[5, 5, 1, conv1_kernels_num], name_str='weights') biases = BiasesVariable(shape=[conv1_kernels_num], name_str='biases') conv_out = Conv2d(X_image, weights, biases, stride=1, padding='VALID') #非線性啟用層 with tf.name_scope('Activate'): activate_out = Activation(conv_out, activation=tf.nn.relu, name='relu') # 第一個池化層(max pool 2d) with tf.name_scope('Pool2d'): pool_out = Pool2d(activate_out, pool=tf.nn.max_pool, k=2, stride=2) #將二維特徵圖變換為一維特徵向量(studyai.com) with tf.name_scope('FeatsReshape'): features = tf.reshape(pool_out, [-1, 12 * 12 * conv1_kernels_num]) # 第一個全連線層(fully connected layer) with tf.name_scope('FC_Linear'): weights = WeightsVariable(shape=[12 * 12 * conv1_kernels_num, n_classes], name_str='weights') biases = BiasesVariable(shape=[n_classes], name_str='biases') Ypred_logits = FullyConnected(features, weights, biases, activate=tf.identity, act_name='identity') # 定義損失層(loss layer) with tf.name_scope('Loss'): cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( labels=Y_true, logits=Ypred_logits)) # 定義優化訓練層(train layer)(studyai.com) with tf.name_scope('Train'): learning_rate = tf.placeholder(tf.float32) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate) trainer = optimizer.minimize(cross_entropy_loss) # 定義模型評估層(evaluate layer) with tf.name_scope('Evaluate'): correct_pred = tf.equal(tf.argmax(Ypred_logits, 1), tf.argmax(Y_true, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # 新增所有變數的初始化節點(studyai.com) init = tf.global_variables_initializer() print('把計算圖寫入事件檔案,在TensorBoard裡面檢視') summary_writer = tf.summary.FileWriter(logdir='logs/excise313/', graph=tf.get_default_graph()) summary_writer.close() # 匯入 MNIST data mnist = input_data.read_data_sets("../MNIST_data/", one_hot=True) #將評估結果儲存到檔案 results_list = list() # 寫入引數配置 results_list.append(['learning_rate', learning_rate_init, 'training_epochs', training_epochs, 'batch_size', batch_size, 'display_step', display_step, 'conv1_kernels_num',conv1_kernels_num]) results_list.append(['train_step', 'train_loss', 'validation_loss', 'train_step', 'train_accuracy', 'validation_accuracy']) # 啟動計算圖(studyai.com) with tf.Session() as sess: sess.run(init) total_batches = int(mnist.train.num_examples / batch_size) print("Per batch Size: ", batch_size) print("Train sample Count: ", mnist.train.num_examples) print("Total batch Count: ", total_batches) training_step = 0 #記錄模型被訓練的步數 # 訓練指定輪數,每一輪所有訓練樣本都要過一遍 for epoch in range(training_epochs): # 每一輪都要把所有的batch跑一邊(studyai.com) for batch_idx in range(total_batches): # 取出資料(studyai.com) batch_x, batch_y = mnist.train.next_batch(batch_size) # 執行優化器訓練節點 (backprop) sess.run(trainer, feed_dict={X_origin: batch_x, Y_true: batch_y, learning_rate: learning_rate_init}) # 每呼叫一次訓練節點,training_step就加1,最終==training_epochs*total_batch training_step += 1 #每訓練display_step次,計算當前模型的損失和分類準確率(studyai.com) if training_step % display_step == 0: # 計算當前模型在目前(最近)見過的display_step個batchsize的訓練集上的損失和分類準確率(studyai.com) start_idx = max(0, (batch_idx-display_step)*batch_size) end_idx = batch_idx*batch_size train_loss, train_acc = EvaluateModelOnDataset(sess, mnist.train.images[start_idx:end_idx, :], mnist.train.labels[start_idx:end_idx, :]) print("Training Step: " + str(training_step) + ", Training Loss= " + "{:.6f}".format(train_loss) + ", Training Accuracy= " + "{:.5f}".format(train_acc)) # 計算當前模型在驗證集的損失和分類準確率(studyai.com) validation_loss, validation_acc = EvaluateModelOnDataset(sess, mnist.validation.images, mnist.validation.labels) print("Training Step: " + str(training_step) + ", Validation Loss= " + "{:.6f}".format(validation_loss) + ", Validation Accuracy= " + "{:.5f}".format(validation_acc)) # 將評估結果儲存到檔案 results_list.append([training_step, train_loss, validation_loss, training_step, train_acc, validation_acc]) print("訓練完畢!") #計算指定數量的測試集上的準確率(studyai.com) test_samples_count = mnist.test.num_examples test_loss, test_accuracy = EvaluateModelOnDataset(sess, mnist.test.images, mnist.test.labels) print("Testing Samples Count:", test_samples_count) print("Testing Loss:", test_loss) print("Testing Accuracy:", test_accuracy) results_list.append(['test step', 'loss', test_loss, 'accuracy', test_accuracy]) # 將評估結果儲存到檔案(studyai.com) results_file = open('evaluate_results/evaluate_results.csv', 'w', newline='') csv_writer = csv.writer(results_file, dialect='excel') for row in results_list: csv_writer.writerow(row)