1. 程式人生 > >88、使用tensorboard進行視覺化學習,檢視具體使用時間,訓練輪數,使用記憶體大小

88、使用tensorboard進行視覺化學習,檢視具體使用時間,訓練輪數,使用記憶體大小

'''
Created on 2017年5月23日

@author: weizhen
'''
import os
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# minist_inference中定義的常量和前向傳播的函式不需要改變,
# 因為前向傳播已經通過tf.variable_scope實現了計算節點按照網路結構的劃分
import mnist_inference
from mnist_train import MOVING_AVERAGE_DECAY, REGULARAZTION_RATE, \
    LEARNING_RATE_BASE, BATCH_SIZE, LEARNING_RATE_DECAY, TRAINING_STEPS, MODEL_SAVE_PATH, MODEL_NAME
INPUT_NODE 
= 784 OUTPUT_NODE = 10 LAYER1_NODE = 500 def train(mnist): # 將處理輸入資料集的計算都放在名子為"input"的名稱空間下 with tf.name_scope("input"): x = tf.placeholder(tf.float32, [None, mnist_inference.INPUT_NODE], name='x-input') y_ = tf.placeholder(tf.float32, [None, mnist_inference.OUTPUT_NODE], name='
y-cinput') regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE) y = mnist_inference.inference(x, regularizer) global_step = tf.Variable(0, trainable=False) # 將滑動平均相關的計算都放在名為moving_average的名稱空間下 with tf.name_scope("moving_average"): variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step) variable_averages_op
= variable_averages.apply(tf.trainable_variables()) # 將計算損失函式相關的計算都放在名為loss_function的名稱空間下 with tf.name_scope("loss_function"): cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) cross_entropy_mean = tf.reduce_mean(cross_entropy) loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses')) # 將定義學習率、優化方法以及每一輪訓練需要執行的操作都放在名子為"train_step"的名稱空間下 with tf.name_scope("train_step"): learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE, global_step, mnist.train._num_examples / BATCH_SIZE, LEARNING_RATE_DECAY, staircase=True) train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step) with tf.control_dependencies([train_step, variable_averages_op]): train_op = tf.no_op(name='train') # 訓練模型。 with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): xs, ys = mnist.train.next_batch(BATCH_SIZE) if i % 1000 == 0: # 配置執行時需要記錄的資訊。 run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) # 執行時記錄執行資訊的proto。 run_metadata = tf.RunMetadata() _, loss_value, step = sess.run( [train_op, loss, global_step], feed_dict={x: xs, y_: ys}, options=run_options, run_metadata=run_metadata) print("After %d training step(s), loss on training batch is %g." % (step, loss_value)) writer = tf.summary.FileWriter("/log/modified_mnist_train.log", tf.get_default_graph()) writer.add_run_metadata(run_metadata, "stop%03d" % i) writer.close() print("After %d training steps(s),loss on training batch is %g."%(step,loss_value)) else: _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x: xs, y_: ys}) # 初始化Tensorflow持久化類 # saver = tf.train.Saver() # with tf.Session() as sess: # tf.global_variables_initializer().run() # # 在訓練過程中不再測試模型在驗證資料上的表現,驗證和測試的過程將會有一個獨立的程式來完成 # for i in range(TRAINING_STEPS): # xs, ys = mnist.train.next_batch(BATCH_SIZE) # _, loss_value, step = sess.run([train_op, loss, global_step], feed_dict={x:xs, y_:ys}) # 每1000輪儲存一次模型 # if i % 1000 == 0: # 輸出當前訓練情況。這裡只輸出了模型在當前訓練batch上的損失函式大小 # 通過損失函式的大小可以大概瞭解訓練的情況。在驗證資料集上的正確率資訊 # 會有一個單獨的程式來生成 # print("After %d training step(s),loss on training batch is %g" % (step, loss_value)) # 儲存當前的模型。注意這裡給出了global_step引數,這樣可以讓每個被儲存模型的檔案末尾加上訓練的輪數 # 比如"model.ckpt-1000"表示訓練1000輪之後得到的模型 # saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step) # 將當前的計算圖輸出到TensorBoard日誌檔案 # writer=tf.summary.FileWriter("/path/to/log",tf.get_default_graph()) # writer.close() def main(argv=None): mnist = input_data.read_data_sets("/tmp/data", one_hot=True) train(mnist) if __name__ == '__main__': tf.app.run()