tensorflow 學習筆記12 迴圈神經網路RNN LSTM結構實現MNIST手寫識別
阿新 • • 發佈:2019-02-14
長短時記憶網路(LSTM)就是為了解決在複雜的場景中,有用資訊的間隔有大有小、長短不一問題。LSTM是一種擁有三個門結構的特殊網路結構。
LSTM靠一些門的結構讓資訊有選擇的影響迴圈神經網路中每個時刻的狀態。所謂門的結構就是一個使用sigmoid神經網路和按位做乘法的操作,這兩個操作合在一起就是一個門的結構。當門開啟時(sigmoid神經網路層輸出為1時),全部資訊都可以通過;當門關上時(sigmoid神經網路層輸出為0時),任何資訊都無法通過。
輸入門(什麼資訊更新):it=sigmoid(wi*[ht-1,xt]+bi)
遺忘門(丟棄什麼資訊):ft=sigmoid(wf*[ht-1,xt]+bf)
輸出門(確定什麼資訊輸出):ot=sigmoid(wo*[ht-1,xt]+bo)
資訊流:ht-1 xt ct-1
新資訊:new=tanh(wc*[ht-1,xt]+bt)
歷史資訊累積:ct=ft*ct-1+it*new
輸出資訊:ht=ot*tanh(ct)
程式碼實現:
import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) # configuration # O * W + b -> 10 labels for each image, O[? 128], W[128 10], B[10] # ^ (O: output 28 vec from 28 vec input) # | # +-+ +-+ +--+ # |1|->|2|-> ... |28| n_steps = 28 # +-+ +-+ +--+ # ^ ^ ... ^ # | | | # img1:[28] [28] ... [28] # img2:[28] [28] ... [28] # img3:[28] [28] ... [28] # ... # img128(batch_size=128) # each input size =28 # hyperparameters learning_rate = 0.001 training_iters = 100000 batch_size = 128 n_inputs = 28 # 輸入向量的維度 n_steps = 28 # 迴圈層長度 n_hidden_units = 128 # neurons in hidden layer 隱含層的特徵數 n_classes = 10 # MNIST classes (0-9 digits) # X, input shape: (batch_size, n_steps, n_inputs) x = tf.placeholder(tf.float32, [None, n_steps, n_inputs]) #y, shape:(batch_size,n_classes) y = tf.placeholder(tf.float32, [None, n_classes]) # Define weights and biases #in:每個cell輸入的全連線層引數 #out:定義用於輸出的全連線層引數 weights = { # (28, 128) 'in': tf.Variable(tf.random_normal([n_inputs, n_hidden_units])), # (128, 10) 'out': tf.Variable(tf.random_normal([n_hidden_units, n_classes])) } biases = { # (128, ) 'in': tf.Variable(tf.constant(0.1, shape=[n_hidden_units, ])), # (10, ) 'out': tf.Variable(tf.constant(0.1, shape=[n_classes, ])) } def RNN(X, weights, biases): # hidden layer for input to cell ######################################## # X (128 batch,28 steps,28 inputs) ==> (128 batch * 28 steps, 28 inputs) X = tf.reshape(X, [-1, n_inputs]) # into hidden # X_in =[128 bach*28 steps,28 inputs]*[28 inputs,128 hidden_units]=[128 batch * 28 steps, 128 hidden] X_in = tf.matmul(X, weights['in']) + biases['in'] # X_in ==> (128 batch, 28 steps, 128 hidden) X_in = tf.reshape(X_in, [-1, n_steps, n_hidden_units]) # cell ############################################# # basic LSTM Cell.初始的bias=1,不希望遺忘任何資訊 cell = tf.contrib.rnn.BasicLSTMCell(n_hidden_units,forget_bias=1.0,state_is_tuple=True) # lstm cell is divided into two parts (c_state, h_state) init_state = cell.zero_state(batch_size, dtype=tf.float32) # dynamic_rnn receive Tensor (batch, steps, inputs) or (steps, batch, inputs) as X_in. # n_steps位於次要維度 time_major=False outputs, final_state = tf.nn.dynamic_rnn(cell, X_in, initial_state=init_state, time_major=False) # hidden layer for output as the final results ############################################# # unpack to list [(batch, outputs)..] * steps # permute time_step_size and batch_size,[28, 128, 28] outputs = tf.unstack(tf.transpose(outputs, [1,0,2])) #選擇最後一個output與輸出的全連線weights相乘再加上biases results = tf.matmul(outputs[-1], weights['out']) + biases['out'] # shape = (128, 10) return results pred = RNN(x, weights, biases) cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost) correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) with tf.Session() as sess: # 初始化 init = tf.global_variables_initializer() sess.run(init) step = 0 # 持續迭代 while step * batch_size < training_iters: # 隨機抽出這一次迭代訓練時用的資料 batch_xs, batch_ys = mnist.train.next_batch(batch_size) # 對資料進行處理,使得其符合輸入 batch_xs = batch_xs.reshape([batch_size, n_steps, n_inputs]) #迭代 sess.run([train_op], feed_dict={x: batch_xs,y: batch_ys,}) # 在特定的迭代回合進行資料的輸出 if step % 20 == 0: #輸出準確度 print(sess.run(accuracy, feed_dict={x: batch_xs,y: batch_ys,})) step += 1
結果: