『TensorFlow』讀書筆記_降噪自編碼器
阿新 • • 發佈:2017-12-07
沒有 tutorials oftp transfer 初始化 hot nis gauss ant 『TensorFlow』降噪自編碼器設計
之前學習過的代碼,又敲了一遍,新的收獲也還是有的,因為這次註釋寫的比較詳盡,所以再次記錄一下,具體的相關知識查閱之前寫的文章即可(見上面鏈接)。
# Author : Hellcat # Time : 2017/12/6 import numpy as np import sklearn.preprocessing as prep import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data def xavier_init(fan_in,fan_out, constant = 1): ‘‘‘ xavier 權重初始化方式 :param fan_in: 行數 :param fan_out: 列數 :param constant: 常數權重,調節初始化範圍的倍數 :return: 初始化後的權重tensor ‘‘‘ low = -constant * np.sqrt(6.0 / (fan_in + fan_out)) high = constant * np.sqrt(6.0 / (fan_in + fan_out)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high) class AdditiveGaussianNoiseAutoencoder(): def __init__(self, n_input, n_hidden, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(),scale=0.1): ‘‘‘ 初始化自編碼器 :param n_input: 輸入層結點數 :param n_hidden: 隱藏層節點數 :param transfer_function: 隱藏層激活函數 :param optimizer: 優化器,是實例化的對象 :param scale: 高斯噪聲系數 ‘‘‘ self.n_input = n_input self.n_hidden = n_hidden self.transfer = transfer_function self.scale = tf.placeholder(tf.float32) # 實際網絡中調用的 self.training_scale = scale # 訓練用噪聲系數 network_weights = self._initialize_weights() self.weights = network_weights self.x = tf.placeholder(tf.float32, [None, self.n_input]) self.hidden = self.transfer( tf.add( tf.matmul( self.x + self.scale * tf.random_normal((n_input,)), self.weights[‘w1‘]), self.weights[‘b1‘])) # 重建部分沒有使用激活函數 self.reconstruction = tf.add( tf.matmul( self.hidden, self.weights[‘w2‘]), self.weights[‘b2‘]) self.cost = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.reconstruction,self.x),2.0)) # 可以將類的實例過程作為實參傳入函數 self.optimizer = optimizer.minimize(self.cost) init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init) def _initialize_weights(self): ‘‘‘ 初始化全部變量 :return: 裝有變量的字典 ‘‘‘ all_weights = dict() all_weights[‘w1‘] = tf.Variable(xavier_init(self.n_input, self.n_hidden)) all_weights[‘b1‘] = tf.Variable(tf.zeros([self.n_hidden], dtype=tf.float32)) all_weights[‘w2‘] = tf.Variable(tf.zeros([self.n_hidden, self.n_input], dtype=tf.float32)) all_weights[‘b2‘] = tf.Variable(tf.zeros([self.n_input], dtype=tf.float32)) return all_weights def partial_fit(self, X): ‘‘‘ 進行單次訓練並返回loss :param X: 訓練數據 :return: 本次損失函數值 ‘‘‘ cost, opt = self.sess.run((self.cost, self.optimizer), feed_dict={self.x:X, self.scale:self.training_scale}) return cost def calc_totul_cost(self, X): ‘‘‘ 計算損失函數,不觸發訓練 :param X: 訓練數據 :return: 損失函數 ‘‘‘ return self.sess.run(self.cost, feed_dict={self.x:X, self.scale:self.training_scale}) def transform(self, X): ‘‘‘ 返回隱藏層輸出結果,目的是獲取抽象後的特征 :param X: 訓練數據 :return: 隱藏層輸出 ‘‘‘ return self.sess.run(self.hidden, feed_dict={self.x:X, self.scale:self.training_scale}) def generate(self, hidden=None): ‘‘‘ 通過隱藏層特征重建 :param hidden: 隱藏層特征 :return: 重建數據 ‘‘‘ if hidden is None: hidden = np.random.normal(size=[self.n_input]) return self.sess.run(self.reconstruction, feed_dict={self.hidden:hidden}) def reconstruct(self,X): ‘‘‘ 從原始數據重建 :param X: 訓練數據 :return: 重建數據 ‘‘‘ return self.sess.run(self.reconstruction, feed_dict={self.x:X, self.scale:self.training_scale}) def getWeights(self): ‘‘‘ 獲取參數值 :return: 隱藏層權重 ‘‘‘ return self.sess.run(self.weights[‘w1‘]) def getBaises(self): ‘‘‘ 獲取參數值 :return: 隱藏層偏置 ‘‘‘ return self.sess.run(self.weights[‘b1‘]) def standard_scale(X_train, X_test): ‘‘‘ 標準化數據 :param X_train: 訓練數據 :param X_test: 測試數據 :return: 標準化之後的訓練、測試數據 ‘‘‘ preprocessor = prep.StandardScaler().fit(X_train) X_train = preprocessor.transform(X_train) X_test = preprocessor.transform(X_test) return X_train, X_test def get_random_block_from_data(data, batch_size): start_index = np.random.randint(0, len(data) - batch_size) return data[start_index:(start_index + batch_size)] if __name__ == ‘__main__‘: mnist = input_data.read_data_sets(‘../../../Mnist_data/‘,one_hot=True) X_train, X_test = standard_scale(mnist.train.images, mnist.test.images) n_samples = int(mnist.train.num_examples) train_epochs = 20 batch_size = 20 display_step = 1 autoencoder = AdditiveGaussianNoiseAutoencoder( n_input=784, n_hidden=200, transfer_function=tf.nn.softplus, optimizer=tf.train.AdamOptimizer(learning_rate=0.001), scale=0.01) for epoch in range(train_epochs): avg_cost = 0. totu_batch = int(n_samples / batch_size) for i in range(totu_batch): batch_xs = get_random_block_from_data(X_train, batch_size) # 單數據塊訓練並計算損失函數 cost = autoencoder.partial_fit(batch_xs) avg_cost += cost / n_samples * batch_size if epoch % display_step == 0: print(‘epoch : %04d, cost = %.9f‘ % (epoch + 1,avg_cost)) # 計算測試集上的cost print(‘Total coat:‘,str(autoencoder.calc_totul_cost(X_test)))
部分輸出如下:
……
epoch : 0020, cost = 1509.876800515
epoch : 0020, cost = 1510.107261985
epoch : 0020, cost = 1510.332509055
epoch : 0020, cost = 1510.551538707
Total coat: 768927.0
1.xavier初始化權重方法
2.函數實參可以是class(),即實例化的類
『TensorFlow』讀書筆記_降噪自編碼器