1. 程式人生 > >TensorFlow(八):tensorboard可視化

TensorFlow(八):tensorboard可視化

最大的 \n 下載地址 CA 之前 vhk float dde 數量

import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from tensorflow.contrib.tensorboard.plugins import projector


#載入數據集
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
#運行次數
max_steps = 1001
#圖片數量
image_num = 3000  # 最多10000,因為測試集為10000
#文件路徑
DIR = "C:/Users/FELIX/Desktop/tensor學習/
" #定義會話 sess = tf.Session() #載入圖片 embedding = tf.Variable(tf.stack(mnist.test.images[:image_num]), trainable=False, name=embedding) #參數概要 def variable_summaries(var): with tf.name_scope(summaries): mean = tf.reduce_mean(var) tf.summary.scalar(mean, mean)#平均值 with tf.name_scope(
stddev): stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean))) tf.summary.scalar(stddev, stddev)#標準差 tf.summary.scalar(max, tf.reduce_max(var))#最大值 tf.summary.scalar(min, tf.reduce_min(var))#最小值 tf.summary.histogram(histogram, var)#直方圖 #命名空間 with tf.name_scope(
input): #這裏的none表示第一個維度可以是任意的長度 x = tf.placeholder(tf.float32,[None,784],name=x-input) #正確的標簽 y = tf.placeholder(tf.float32,[None,10],name=y-input) #顯示圖片 with tf.name_scope(input_reshape): image_shaped_input = tf.reshape(x, [-1, 28, 28, 1]) # -1表示不確定的值 tf.summary.image(input, image_shaped_input, 10) # 一共放10張圖片 with tf.name_scope(layer): #創建一個簡單神經網絡 with tf.name_scope(weights): W = tf.Variable(tf.zeros([784,10]),name=W) variable_summaries(W) with tf.name_scope(biases): b = tf.Variable(tf.zeros([10]),name=b) variable_summaries(b) with tf.name_scope(wx_plus_b): wx_plus_b = tf.matmul(x,W) + b with tf.name_scope(softmax): prediction = tf.nn.softmax(wx_plus_b) with tf.name_scope(loss): #交叉熵代價函數 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=y,logits=prediction)) tf.summary.scalar(loss,loss) with tf.name_scope(train): #使用梯度下降法 train_step = tf.train.GradientDescentOptimizer(0.5).minimize(loss) #初始化變量 sess.run(tf.global_variables_initializer()) with tf.name_scope(accuracy): with tf.name_scope(correct_prediction): #結果存放在一個布爾型列表中 correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(prediction,1))#argmax返回一維張量中最大的值所在的位置 with tf.name_scope(accuracy): #求準確率 accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))#把correct_prediction變為float32類型 tf.summary.scalar(accuracy,accuracy) #產生metadata文件 if tf.gfile.Exists(DIR + projector/projector/metadata.tsv):# 檢測是否已存在 tf.gfile.DeleteRecursively(DIR + projector/projector/metadata.tsv) with open(DIR + projector/projector/metadata.tsv, w) as f: labels = sess.run(tf.argmax(mnist.test.labels[:],1)) for i in range(image_num): f.write(str(labels[i]) + \n) #合並所有的summary merged = tf.summary.merge_all() projector_writer = tf.summary.FileWriter(DIR + projector/projector,sess.graph) saver = tf.train.Saver() # 用來保存網絡模型 config = projector.ProjectorConfig() # 定義了配置文件 embed = config.embeddings.add() embed.tensor_name = embedding.name embed.metadata_path = DIR + projector/projector/metadata.tsv embed.sprite.image_path = DIR + projector/data/mnist_10k_sprite.png embed.sprite.single_image_dim.extend([28,28]) projector.visualize_embeddings(projector_writer,config) # 可視化的一個工具 for i in range(max_steps): #每個批次100個樣本 batch_xs,batch_ys = mnist.train.next_batch(100) run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE) run_metadata = tf.RunMetadata() summary,_ = sess.run([merged,train_step],feed_dict={x:batch_xs,y:batch_ys},options=run_options,run_metadata=run_metadata) projector_writer.add_run_metadata(run_metadata, step%03d % i) projector_writer.add_summary(summary, i) # 每訓練100次打印準確率 if i%100 == 0: acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels}) print ("Iter " + str(i) + ", Testing Accuracy= " + str(acc)) # 訓練完保存模型 saver.save(sess, DIR + projector/projector/a_model.ckpt, global_step=max_steps) projector_writer.close() sess.close()

執行之前先在當前目錄下建立projector文件夾,然後在projector文件夾下建立data和projector文件夾。

在data文件夾下放入數據圖片--》數據圖片下載地址 提取碼:vhkl

然後運行後打開cmd,進入當前文件夾,執行:tensorboard --logdir=C:\Users\FELIX\Desktop\tensor學習\projector\projector

然後就可以看到全部的可視化。

技術分享圖片

叠代500多次後,由原來較混亂的逐漸的分類,因為模型的準確率只有90%左右,所有有一些會分錯類的情況

技術分享圖片

TensorFlow(八):tensorboard可視化