1. 程式人生 > >python實現LSTM神經網路模型

python實現LSTM神經網路模型

'''
用tensorflow實現遞迴迴圈網路(LSTM)
'''
from __future__ import print_function

import tensorflow as tf
from tensorflow.contrib import rnn

#匯入MINIST資料
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/",one_hot=True)
'''
為了使用遞迴神經網路對影象進行分類,我們考慮每個影象
行作為一系列畫素。 因為MNIST的影象形狀是28 * 28px,我們會
為每個樣本處理28個步驟的28個序列。
'''
#訓練引數 learning_rate = 0.001 training_steps = 10000 batch_size = 128 display_step = 200 #神經網路引數 num_input = 28 timesteps = 28 num_hidden = 128 num_classes = 10 #tf圖表輸入 X = tf.placeholder("float",[None,timesteps,num_input]) Y = tf.placeholder("float",[None,num_classes]) #定義權重 weights = { 'out':tf.Variable(tf.random_normal([num_hidden,num_classes])) } biases = { 'out'
:tf.Variable(tf.random_normal([num_classes])) } def RNN(x,weights,biases): #準備資料形狀以匹配`rnn`功能需求 #當前資料輸入形狀:(batch_size,timesteps,n_input) #所需形狀:形狀的'timesteps'張量列表(batch_size,n_input) #Unstack獲取形狀的“時間步長”張量列表(batch_size,n_input) x = tf.unstack(x,timesteps,1) #通過tensorflow定義一個lstm單元
lstm_cell = rnn.BasicLSTMCell(num_hidden,forget_bias=1.0) #lstm輸出單元 outputs,states = rnn.static_rnn(lstm_cell,x,dtype=tf.float32) #線性啟用,使用rnn內部迴圈的最後輸出 return tf.matmul(outputs[-1],weights['out']) + biases['out'] logits = RNN(X,weights,biases) prediction = tf.nn.softmax(logits) #定義損失和優化器 loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits,labels=Y)) optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) train_op = optimizer.minimize(loss_op) #評估模型(使用測試日誌,禁用退出) correct_pred = tf.equal(tf.argmax(prediction,1),tf.argmax(Y,1)) accuracy = tf.reduce_mean(tf.cast(correct_pred,tf.float32)) #變數初始化 init = tf.global_variables_initializer() #開始訓練 with tf.Session() as sess: sess.run(init) for step in range(1,training_steps+1): batch_x,batch_y = mnist.train.next_batch(batch_size) #重塑資料以獲得28個元素的28個序列 batch_x = batch_x.reshape((batch_size,timesteps,num_input)) #執行優化操作(backprop) sess.run(train_op,feed_dict={X:batch_x,Y:batch_y}) if step % display_step == 0 or step ==1: #計算批次損失和準確性 loss,acc = sess.run([loss_op,accuracy],feed_dict={X:batch_x,Y:batch_y}) print("step" + str(step) + ",Minibatch Loss=" + "{:.4f}".format(loss) + ",Training Accuracy=" + "{:.3f}".format(acc)) print("優化完成") #計算128個mnist測試影象準確度 test_len = 128 test_data = mnist.test.images[:test_len].reshape((-1,timesteps,num_input)) test_label = mnist.test.labels[:test_len] print("Testing Accuracy:",sess.run(accuracy,feed_dict={X:test_data,Y:test_label}))