1. 程式人生 > >tensorflow06 《TensorFlow實戰Google深度學習框架》筆記-04-04正則化

tensorflow06 《TensorFlow實戰Google深度學習框架》筆記-04-04正則化

# 《TensorFlow實戰Google深度學習框架》04 深層神經網路
# win10 Tensorflow1.0.1 python3.5.3
# CUDA v8.0 cudnn-8.0-windows10-x64-v5.1
# filename:ts04.04.py 正則化(regularization)
# 正則化是避免過擬合的有效方法。L2正則化可導,計算簡單

# 1. 生成模擬資料集
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np

data = []
label = []
np.random.seed(0
) # 以原點為圓心,半徑為1的圓把散點劃分成紅藍兩部分,並加入隨機噪音。 for i in range(150): x1 = np.random.uniform(-1,1) x2 = np.random.uniform(0,2) if x1**2 + x2**2 <= 1: data.append([np.random.normal(x1, 0.1),np.random.normal(x2,0.1)]) label.append(0) else: data.append([np.random.normal(x1, 0.1
), np.random.normal(x2, 0.1)]) label.append(1) data = np.hstack(data).reshape(-1,2) label = np.hstack(label).reshape(-1, 1) plt.scatter(data[:,0], data[:,1], c=label, cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white") plt.show() # 2. 定義一個獲取權重,並自動加入正則項到損失的函式 def get_weight(shape, lambda1)
:
var = tf.Variable(tf.random_normal(shape), dtype=tf.float32) tf.add_to_collection('losses', tf.contrib.layers.l2_regularizer(lambda1)(var)) return var # 3. 定義神經網路 x = tf.placeholder(tf.float32, shape=(None, 2)) y_ = tf.placeholder(tf.float32, shape=(None, 1)) sample_size = len(data) # 每層節點的個數 layer_dimension = [2,10,5,3,1] n_layers = len(layer_dimension) cur_layer = x in_dimension = layer_dimension[0] # 迴圈生成網路結構 for i in range(1, n_layers): out_dimension = layer_dimension[i] weight = get_weight([in_dimension, out_dimension], 0.003) bias = tf.Variable(tf.constant(0.1, shape=[out_dimension])) cur_layer = tf.nn.elu(tf.matmul(cur_layer, weight) + bias) in_dimension = layer_dimension[i] y= cur_layer # 損失函式的定義。 mse_loss = tf.reduce_sum(tf.pow(y_ - y, 2)) / sample_size tf.add_to_collection('losses', mse_loss) loss = tf.add_n(tf.get_collection('losses')) # 4. 訓練不帶正則項的損失函式mse_loss # 定義訓練的目標函式mse_loss,訓練次數及訓練模型 train_op = tf.train.AdamOptimizer(0.001).minimize(mse_loss) TRAINING_STEPS = 40000 with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, mse_loss: %f" % (i,sess.run(mse_loss, feed_dict={x: data, y_: label}))) # 畫出訓練後的分割曲線 xx, yy = np.mgrid[-1.2:1.2:.01, -0.2:2.2:.01] grid = np.c_[xx.ravel(), yy.ravel()] probs = sess.run(y, feed_dict={x:grid}) probs = probs.reshape(xx.shape) plt.scatter(data[:,0], data[:,1], c=label, cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white") plt.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.1) plt.show() ''' After 0 steps, mse_loss: 3.866614 After 2000 steps, mse_loss: 0.037082 After 4000 steps, mse_loss: 0.029324 After 6000 steps, mse_loss: 0.023534 After 8000 steps, mse_loss: 0.021745 After 10000 steps, mse_loss: 0.020566 After 12000 steps, mse_loss: 0.019632 After 14000 steps, mse_loss: 0.018284 After 16000 steps, mse_loss: 0.016959 After 18000 steps, mse_loss: 0.015033 After 20000 steps, mse_loss: 0.008671 After 22000 steps, mse_loss: 0.006562 After 24000 steps, mse_loss: 0.005542 After 26000 steps, mse_loss: 0.004682 After 28000 steps, mse_loss: 0.003911 After 30000 steps, mse_loss: 0.003389 After 32000 steps, mse_loss: 0.002933 After 34000 steps, mse_loss: 0.002545 After 36000 steps, mse_loss: 0.002171 After 38000 steps, mse_loss: 0.001805 ''' # 5. 訓練帶正則項的損失函式loss # 定義訓練的目標函式loss,訓練次數及訓練模型 train_op = tf.train.AdamOptimizer(0.001).minimize(loss) TRAINING_STEPS = 40000 with tf.Session() as sess: tf.global_variables_initializer().run() for i in range(TRAINING_STEPS): sess.run(train_op, feed_dict={x: data, y_: label}) if i % 2000 == 0: print("After %d steps, loss: %f" % (i, sess.run(loss, feed_dict={x: data, y_: label}))) # 畫出訓練後的分割曲線 xx, yy = np.mgrid[-1:1:.01, 0:2:.01] grid = np.c_[xx.ravel(), yy.ravel()] probs = sess.run(y, feed_dict={x:grid}) probs = probs.reshape(xx.shape) plt.scatter(data[:,0], data[:,1], c=label, cmap="RdBu", vmin=-.2, vmax=1.2, edgecolor="white") plt.contour(xx, yy, probs, levels=[.5], cmap="Greys", vmin=0, vmax=.1) plt.show() ''' After 0 steps, loss: 3.600181 After 2000 steps, loss: 0.139405 After 4000 steps, loss: 0.097296 After 6000 steps, loss: 0.069810 After 8000 steps, loss: 0.059866 After 10000 steps, loss: 0.055504 After 12000 steps, loss: 0.054992 After 14000 steps, loss: 0.054985 After 16000 steps, loss: 0.054981 After 18000 steps, loss: 0.054980 After 20000 steps, loss: 0.054979 After 22000 steps, loss: 0.054979 After 24000 steps, loss: 0.054979 After 26000 steps, loss: 0.054979 After 28000 steps, loss: 0.054979 After 30000 steps, loss: 0.054979 After 32000 steps, loss: 0.054979 After 34000 steps, loss: 0.054979 After 36000 steps, loss: 0.054979 After 38000 steps, loss: 0.054979 '''

這裡寫圖片描述


這裡寫圖片描述

這裡寫圖片描述