1. 程式人生 > >使用tensorflow訓練自己的資料集(四)——計算模型準確率

使用tensorflow訓練自己的資料集(四)——計算模型準確率

使用tensorflow訓練自己的資料集—定義反向傳播

上一篇使用tensorflow訓練自己的資料集(三)中製作已經介紹了定義反向傳播過程來訓練神經網路,訓練完神經網路後應對神經網路進行準確率的計算。

import time
import forward
import backward
import genertateds
import tensorflow as tf

# 等待時間
TEST_INTERVAL_SECS = 5
# 總測試集樣本數量
test_num_examples = 128
def test():
    with tf.Graph().as_default(
) as g: x = tf.placeholder(tf.float32,[test_num_examples, forward.IMAGE_SIZE, forward.IMAGE_SIZE, forward.NUM_CHANNELS]) y_ = tf.placeholder(tf.int64,[None]) # 測試過程不需要正則化和dropout
y = forward.inference(x,False,None) # 還原模型中的滑動平均 variable_average = tf.train.ExponentialMovingAverage(backward.MOVING_AVERAGE_DECAY) variable_average_restore = variable_average.variables_to_restore() saver = tf.train.Saver(variable_average_restore) # 計算準確率
correct_prediction = tf.equal(tf.argmax(y,1),y_) accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32)) image_batch,label_batch = genertateds.get_batch_record(genertateds.test_record_path,20) while True: with tf.Session() as sess: ckpt = tf.train.get_checkpoint_state(backward.MODEL_SAVE_PATH) if ckpt and ckpt.model_checkpoint_path: coord = tf.train.Coordinator() threads = tf.train.start_queue_runners(sess, coord) image, label = sess.run([image_batch, label_batch]) saver.restore(sess,ckpt.model_checkpoint_path) # 從檔名稱中讀取第幾次訓練 global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1] accuracy_score = sess.run(accuracy,feed_dict={x:image,y_:label}) coord.request_stop() coord.join(threads) print("After %s training step(s),test accuray = %g"%(global_step,accuracy_score)) else: time.sleep(TEST_INTERVAL_SECS) def main(): test() if __name__ == '__main__': main()

到此就完成一個用tensorflow進行影象分類的簡單的神經網路了。
如有錯誤望多多指教~~