1. 程式人生 > >使用tensorflow訓練自己的資料集(三)——定義反向傳播過程

使用tensorflow訓練自己的資料集(三)——定義反向傳播過程

使用tensorflow訓練自己的資料集—定義反向傳播

上一篇使用tensorflow訓練自己的資料集(二)中製作已經介紹了定義神經網路、接下來就是定義反向傳播過程進行訓練神經網路了。反向傳播過程中使用了滑動平均類和學習率指數下降來優化神經網路。
ps.沒有GPU加速訓練過程無比慢(五代i7,A卡,DDR3 8G記憶體)

import tensorflow as tf
import forward
import os
import genertateds
# 定義神經網路相關引數
BACTH_SIZE = 100
LEARNING_RATE_BASE = 0.01
LEARNING_RATE_DECAY =
0.99 REGULARAZTION_RATE = 0.0001 TRAINING_STEPS = 10000 MOVING_AVERAGE_DECAY = 0.99 train_num_examples = 17500 # 模型儲存的路徑和檔名 MODEL_SAVE_PATH = "LeNet5_model_of_catvsdog/" MODEL_NAME = "LeNet5_model_of_catvsdog" # 定義訓練過程 def train(): # 定義輸入輸出的placeholder x = tf.placeholder(tf.float32, [ BACTH_SIZE,
forward.IMAGE_SIZE, forward.IMAGE_SIZE, forward.NUM_CHANNELS]) y_ = tf.placeholder(tf.int32, [None], name='y-input') # label為int型別 y = forward.inference(x,True,REGULARAZTION_RATE) # 訓練過程需要使用正則化 global_step = tf.Variable(0, trainable=False) # 記錄step、不可訓練的變數
# 定義滑動平均類 variable_average = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_average_op = variable_average.apply(tf.trainable_variables()) # 定義損失函式 # cross_entropy_mean = tf.reduce_mean(tf.square(y - y_)) # 使用softmax層時的loss函式 cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=y_) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) # 定義指數衰減學習率 learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, train_num_examples/BACTH_SIZE, LEARNING_RATE_DECAY, staircase=True) # 使用AdamOptimizer優化器、記錄step train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss,global_step=global_step) # 控制計算流程(自己這麼理解的...) with tf.control_dependencies([train_step, variable_average_op]): train_op = tf.no_op(name='train') # 初始化TensorFlow持久化類 saver = tf.train.Saver() # 讀取訓練集 image_batch,label_batch = genertateds.get_batch_record(genertateds.train_record_path,100) with tf.Session() as sess: # 初始化所有變數 init_op = tf.global_variables_initializer() sess.run(init_op) # 斷點檢查 ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH) # 有checkpoint的話繼續上一次的訓練 if ckpt and ckpt.model_checkpoint_path: saver.restore(sess,ckpt.model_checkpoint_path) # 建立執行緒 coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess,coord) # 開始訓練 for i in range(TRAINING_STEPS): xs, ys = sess.run([image_batch,label_batch]) _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs,y_:ys}) # 每100輪儲存一次模型 if i % 100 == 0: # 輸出當前的訓練情況 print("After %d training step(s),loss on training batch is %g." % (step, loss_value)) # 儲存當前模型 saver.save( sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) # 關閉執行緒 coord.request_stop() coord.join(threads) def main(): train() if __name__ == '__main__': main()

下一篇將介紹計算準確率
如有錯誤望多多指教~~