1. 程式人生 > >Tensorflow深度學習筆記(二)--BPNN手寫數字識別視覺化

Tensorflow深度學習筆記(二)--BPNN手寫數字識別視覺化

資料集:MNIST 啟用函式:Relu 損失函式:交叉熵 Optimizer:AdamOptimizer 視覺化工具:tensorboad

迭代21epoch,accuracy結果如下: Iter 16,Testing Accuracy: 0.9824 ,Training Accuracy: 0.9949273 Iter 17,Testing Accuracy: 0.9822 ,Training Accuracy: 0.99496365 Iter 18,Testing Accuracy: 0.9832 ,Training Accuracy: 0.9950182 Iter 19,Testing Accuracy: 0.983 ,Training Accuracy: 0.99503636 Iter 20,Testing Accuracy: 0.983 ,Training Accuracy: 0.9950727

視覺化展示: Graph: Gragh

accuracy變化: loss變化: 這裡寫圖片描述

第一層部分引數變化圖: bias mean值變化 這裡寫圖片描述 bias max值變化 這裡寫圖片描述 weight mean變化 這裡寫圖片描述

程式碼如下:

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data


#讀取資料
mnist = input_data.read_data_sets('MNIST_data',one_hot=True)
batch_size = 100
n_batch = mnist.train.num_examples//batch_size


def
variable_summaries(var):
with tf.name_scope('summaries'): mean = tf.reduce_mean(var) tf.summary.scalar('mean',mean) with tf.name_scope('stddev'): stddev = tf.sqrt(tf.reduce_mean(tf.square(var-mean))) tf.summary.scalar('stddev',stddev) tf.summary.scalar('max'
,tf.reduce_max(var)) tf.summary.scalar('min',tf.reduce_min(var)) tf.summary.histogram('histogram',var) with tf.name_scope('input'): x = tf.placeholder(tf.float32,[None,784]) y = tf.placeholder(tf.float32,[None,10]) keep_prob = tf.placeholder(tf.float32) lr = tf.Variable(0.001,dtype=tf.float32) with tf.name_scope('layer'): with tf.name_scope('layer1'): with tf.name_scope('wights_L1'): weights_L1 = tf.Variable(tf.truncated_normal([784,500],stddev=0.1)) variable_summaries(weights_L1) with tf.name_scope('biases_L1'): biases_L1 = tf.Variable(tf.zeros([500])+0.1) variable_summaries(biases_L1) wx_plus_b_L1 = tf.matmul(x,weights_L1) + biases_L1 L1 = tf.nn.relu(wx_plus_b_L1) L1_trop = tf.nn.dropout(L1,keep_prob) with tf.name_scope('layer2'): with tf.name_scope('wights_L2'): weights_L2 = tf.Variable(tf.truncated_normal([500,100],stddev=0.1)) variable_summaries(weights_L2) with tf.name_scope('biases_L2'): biases_L2 = tf.Variable(tf.zeros([100])+0.1) variable_summaries(biases_L2) wx_plus_b_L2 = tf.matmul(L1_trop,weights_L2) + biases_L2 L2 = tf.nn.relu(wx_plus_b_L2) L2_drop = tf.nn.dropout(L2,keep_prob) with tf.name_scope('layer3'): with tf.name_scope('wights_L3'): weights_L3 = tf.Variable(tf.truncated_normal([100,10],stddev=0.1)) variable_summaries(weights_L3) with tf.name_scope('biases_L3'): biases_L3 = tf.Variable(tf.zeros([10])+0.1) variable_summaries(biases_L3) wx_plus_b_L3 = tf.matmul(L2_drop,weights_L3) + biases_L3 predictions = tf.nn.softmax(wx_plus_b_L3) with tf.name_scope('loss'): loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=predictions)) tf.summary.scalar('loss',loss) with tf.name_scope('train'): train_step = tf.train.AdamOptimizer(lr).minimize(loss) with tf.name_scope('accuracy'): correct_predictions = tf.equal(tf.argmax(y,1),tf.argmax(predictions,1)) accuracy = tf.reduce_mean(tf.cast(correct_predictions,tf.float32)) tf.summary.scalar('accuracy',accuracy) #合併所有的summary merged = tf.summary.merge_all() with tf.Session() as sess: sess.run(tf.global_variables_initializer()) writer = tf.summary.FileWriter('log/',graph=sess.graph) for epoch in range(21): sess.run(tf.assign(lr,lr*(0.98**epoch))) for batch in range(n_batch): batch_x,batch_y = mnist.train.next_batch(batch_size) summary,_ = sess.run([merged,train_step],feed_dict={x:batch_x,y:batch_y,keep_prob:1.0}) writer.add_summary(summary,epoch) test_acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels,keep_prob:1}) train_acc = sess.run(accuracy,feed_dict={x:mnist.train.images,y:mnist.train.labels,keep_prob:1}) print('Iter ' + str(epoch) + ',Testing Accuracy: ' + str(test_acc) + ' ,Training Accuracy: ' + str(train_acc))