摘要:這篇文章將講解TensorFlow如何儲存變數和神經網路引數,通過Saver儲存神經網路,再通過Restore呼叫訓練好的神經網路。
本文分享自華為雲社群《[Python人工智慧] 十一.Tensorflow如何儲存神經網路引數 丨【百變AI秀】》,作者: eastmount。
一.儲存變數
通過tf.Variable()定義權重和偏置變數,然後呼叫tf.train.Saver()儲存變數,將資料儲存至本地“my_net/save_net.ckpt”檔案中。
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 20:04:57 2020
@author: xiuzhang Eastmount CSDN
"""
import tensorflow as tf
import numpy as np #---------------------------------------儲存檔案---------------------------------------
W = tf.Variable([[1,2,3], [3,4,5]], dtype=tf.float32, name='weights') #2行3列的資料
b = tf.Variable([[1,2,3]], dtype=tf.float32, name='biases') # 初始化
init = tf.initialize_all_variables() # 定義saver 儲存各種變數
saver = tf.train.Saver() # 使用Session執行初始化
with tf.Session() as sess:
sess.run(init)
# 儲存 官方儲存格式為ckpt
save_path = saver.save(sess, "my_net/save_net.ckpt")
print("Save to path:", save_path)
“Save to path: my_net/save_net.ckpt”儲存成功如下圖所示:
開啟內容如下圖所示:
接著定義標記變數train,通過Restore操作使用我們儲存好的變數。注意,在Restore時需要定義相同的dtype和shape,不需要再定義init。最後直接通過 saver.restore(sess, “my_net/save_net.ckpt”) 提取儲存的變數並輸出即可。
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 2 20:04:57 2020
@author: xiuzhang Eastmount CSDN
"""
import tensorflow as tf
import numpy as np # 標記變數
train = False #---------------------------------------儲存檔案---------------------------------------
# Save
if train==True:
# 定義變數
W = tf.Variable([[1,2,3], [3,4,5]], dtype=tf.float32, name='weights') #2行3列的資料
b = tf.Variable([[1,2,3]], dtype=tf.float32, name='biases') # 初始化
init = tf.global_variables_initializer() # 定義saver 儲存各種變數
saver = tf.train.Saver() # 使用Session執行初始化
with tf.Session() as sess:
sess.run(init)
# 儲存 官方儲存格式為ckpt
save_path = saver.save(sess, "my_net/save_net.ckpt")
print("Save to path:", save_path)
#---------------------------------------Restore變數-------------------------------------
# Restore
if train==False:
# 記住在Restore時定義相同的dtype和shape
# redefine the same shape and same type for your variables
W = tf.Variable(np.arange(6).reshape((2,3)), dtype=tf.float32, name='weights') #空變數
b = tf.Variable(np.arange(3).reshape((1,3)), dtype=tf.float32, name='biases') #空變數 # Restore不需要定義init
saver = tf.train.Saver()
with tf.Session() as sess:
# 提取儲存的變數
saver.restore(sess, "my_net/save_net.ckpt")
# 尋找相同名字和標識的變數並存儲在W和b中
print("weights", sess.run(W))
print("biases", sess.run(b))
執行程式碼,如果報錯“NotFoundError: Restoring from checkpoint failed. This is most likely due to a Variable name or other graph key that is missing from the checkpoint. Please ensure that you have not altered the graph expected based on the checkpoint. ”,則需要重置Spyder即可。
最後輸出之前所儲存的變數,weights為 [[1,2,3], [3,4,5]],偏置為 [[1,2,3]]。
二.儲存神經網路
那麼,TensorFlow如何儲存我們的神經網路框架呢?我們需要把整個網路訓練好再進行儲存,其方法和上面類似,完整程式碼如下:
"""
Created on Sun Dec 29 19:21:08 2019
@author: xiuzhang Eastmount CSDN
"""
import os
import glob
import cv2
import numpy as np
import tensorflow as tf # 定義圖片路徑
path = 'photo/' #---------------------------------第一步 讀取影象-----------------------------------
def read_img(path):
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
fpath = []
for idx, folder in enumerate(cate):
# 遍歷整個目錄判斷每個檔案是不是符合
for im in glob.glob(folder + '/*.jpg'):
#print('reading the images:%s' % (im))
img = cv2.imread(im) #呼叫opencv庫讀取畫素點
img = cv2.resize(img, (32, 32)) #影象畫素大小一致
imgs.append(img) #影象資料
labels.append(idx) #影象類標
fpath.append(path+im) #影象路徑名
#print(path+im, idx) return np.asarray(fpath, np.string_), np.asarray(imgs, np.float32), np.asarray(labels, np.int32) # 讀取影象
fpaths, data, label = read_img(path)
print(data.shape) # (1000, 256, 256, 3)
# 計算有多少類圖片
num_classes = len(set(label))
print(num_classes) # 生成等差數列隨機調整影象順序
num_example = data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
data = data[arr]
label = label[arr]
fpaths = fpaths[arr] # 拆分訓練集和測試集 80%訓練集 20%測試集
ratio = 0.8
s = np.int(num_example * ratio)
x_train = data[:s]
y_train = label[:s]
fpaths_train = fpaths[:s]
x_val = data[s:]
y_val = label[s:]
fpaths_test = fpaths[s:]
print(len(x_train),len(y_train),len(x_val),len(y_val)) #800 800 200 200
print(y_val)
#---------------------------------第二步 建立神經網路-----------------------------------
# 定義Placeholder
xs = tf.placeholder(tf.float32, [None, 32, 32, 3]) #每張圖片32*32*3個點
ys = tf.placeholder(tf.int32, [None]) #每個樣本有1個輸出
# 存放DropOut引數的容器
drop = tf.placeholder(tf.float32) #訓練時為0.25 測試時為0 # 定義卷積層 conv0
conv0 = tf.layers.conv2d(xs, 20, 5, activation=tf.nn.relu) #20個卷積核 卷積核大小為5 Relu啟用
# 定義max-pooling層 pool0
pool0 = tf.layers.max_pooling2d(conv0, [2, 2], [2, 2]) #pooling視窗為2x2 步長為2x2
print("Layer0:\n", conv0, pool0) # 定義卷積層 conv1
conv1 = tf.layers.conv2d(pool0, 40, 4, activation=tf.nn.relu) #40個卷積核 卷積核大小為4 Relu啟用
# 定義max-pooling層 pool1
pool1 = tf.layers.max_pooling2d(conv1, [2, 2], [2, 2]) #pooling視窗為2x2 步長為2x2
print("Layer1:\n", conv1, pool1) # 將3維特徵轉換為1維向量
flatten = tf.layers.flatten(pool1) # 全連線層 轉換為長度為400的特徵向量
fc = tf.layers.dense(flatten, 400, activation=tf.nn.relu)
print("Layer2:\n", fc) # 加上DropOut防止過擬合
dropout_fc = tf.layers.dropout(fc, drop) # 未啟用的輸出層
logits = tf.layers.dense(dropout_fc, num_classes)
print("Output:\n", logits) # 定義輸出結果
predicted_labels = tf.arg_max(logits, 1)
#---------------------------------第三步 定義損失函式和優化器--------------------------------- # 利用交叉熵定義損失
losses = tf.nn.softmax_cross_entropy_with_logits(
labels = tf.one_hot(ys, num_classes), #將input轉化為one-hot型別資料輸出
logits = logits) # 平均損失
mean_loss = tf.reduce_mean(losses) # 定義優化器 學習效率設定為0.0001
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4).minimize(losses)
#------------------------------------第四步 模型訓練和預測-----------------------------------
# 用於儲存和載入模型
saver = tf.train.Saver()
# 訓練或預測
train = False
# 模型檔案路徑
model_path = "model/image_model" with tf.Session() as sess:
if train:
print("訓練模式")
# 訓練初始化引數
sess.run(tf.global_variables_initializer())
# 定義輸入和Label以填充容器 訓練時dropout為0.25
train_feed_dict = {
xs: x_train,
ys: y_train,
drop: 0.25
}
# 訓練學習1000次
for step in range(1000):
_, mean_loss_val = sess.run([optimizer, mean_loss], feed_dict=train_feed_dict)
if step % 50 == 0: #每隔50次輸出一次結果
print("step = {}\t mean loss = {}".format(step, mean_loss_val))
# 儲存模型
saver.save(sess, model_path)
print("訓練結束,儲存模型到{}".format(model_path))
else:
print("測試模式")
# 測試載入引數
saver.restore(sess, model_path)
print("從{}載入模型".format(model_path))
# label和名稱的對照關係
label_name_dict = {
0: "人類",
1: "沙灘",
2: "建築",
3: "公交",
4: "恐龍",
5: "大象",
6: "花朵",
7: "野馬",
8: "雪山",
9: "美食"
}
# 定義輸入和Label以填充容器 測試時dropout為0
test_feed_dict = {
xs: x_val,
ys: y_val,
drop: 0
} # 真實label與模型預測label
predicted_labels_val = sess.run(predicted_labels, feed_dict=test_feed_dict)
for fpath, real_label, predicted_label in zip(fpaths_test, y_val, predicted_labels_val):
# 將label id轉換為label名
real_label_name = label_name_dict[real_label]
predicted_label_name = label_name_dict[predicted_label]
print("{}\t{} => {}".format(fpath, real_label_name, predicted_label_name))
# 評價結果
print("正確預測個數:", sum(y_val==predicted_labels_val))
print("準確度為:", 1.0*sum(y_val==predicted_labels_val) / len(y_val))
核心步驟為:
saver = tf.train.Saver()
model_path = "model/image_model"
with tf.Session() as sess:
if train:
#儲存神經網路
sess.run(tf.global_variables_initializer())
for step in range(1000):
_, mean_loss_val = sess.run([optimizer, mean_loss], feed_dict=train_feed_dict)
if step % 50 == 0:
print("step = {}\t mean loss = {}".format(step, mean_loss_val))
saver.save(sess, model_path)
else:
#載入神經網路
saver.restore(sess, model_path)
predicted_labels_val = sess.run(predicted_labels, feed_dict=test_feed_dict)
for fpath, real_label, predicted_label in zip(fpaths_test, y_val, predicted_labels_val):
real_label_name = label_name_dict[real_label]
predicted_label_name = label_name_dict[predicted_label]
print("{}\t{} => {}".format(fpath, real_label_name, predicted_label_name))
預測輸出結果如下圖所示,最終預測正確181張圖片,準確度為0.905。相比之前機器學習KNN的0.500有非常高的提升。
測試模式
INFO:tensorflow:Restoring parameters from model/image_model
從model/image_model載入模型
b'photo/photo/3\\335.jpg' 公交 => 公交
b'photo/photo/1\\129.jpg' 沙灘 => 沙灘
b'photo/photo/7\\740.jpg' 野馬 => 野馬
b'photo/photo/5\\564.jpg' 大象 => 大象
...
b'photo/photo/9\\974.jpg' 美食 => 美食
b'photo/photo/2\\220.jpg' 建築 => 公交
b'photo/photo/9\\912.jpg' 美食 => 美食
b'photo/photo/4\\459.jpg' 恐龍 => 恐龍
b'photo/photo/5\\525.jpg' 大象 => 大象
b'photo/photo/0\\44.jpg' 人類 => 人類 正確預測個數: 181
準確度為: 0.905