1. 程式人生 > >theano學習之正則化

theano學習之正則化

先上程式碼:

from __future__ import print_function
import theano
from sklearn.datasets import load_boston#波士頓房價資料
import theano.tensor as T
import numpy as np
import matplotlib.pyplot as plt

#定義類層,套神經網路
class Layer(object):
    def __init__(self, inputs, in_size, out_size, activation_function=None):
        self.W = theano.shared(np.random.normal(0, 1, (in_size, out_size)))
        self.b = theano.shared(np.zeros((out_size, )) + 0.1)
        self.Wx_plus_b = T.dot(inputs, self.W) + self.b
        self.activation_function = activation_function
        if activation_function is None:
            self.outputs = self.Wx_plus_b
        else:
            self.outputs = self.activation_function(self.Wx_plus_b)

#對x各個特徵標準化處理
def minmax_normalization(data):
    xs_max = np.max(data, axis=0)
    xs_min = np.min(data, axis=0)
    xs = (1 - 0) * (data - xs_min) / (xs_max - xs_min) + 0
    return xs

#載入資料
np.random.seed(100)
x_data = load_boston().data
# minmax normalization, rescale the inputs
x_data = minmax_normalization(x_data)
y_data = load_boston().target[:, np.newaxis]#[:,np.newaxis]的意思是有列表結構變為矩陣形式
print(x_data)
print(y_data)

#把資料集分為訓練集,測試集,交叉驗證,檢驗模型是否學習好了,也可以用來篩選合適引數
x_train, y_train = x_data[:400], y_data[:400]
x_test, y_test = x_data[400:], y_data[400:]

x = T.dmatrix("x")#如果這一步中的x裡面沒被喂值,涉及到x的程式碼都是空的,因為沒資料去跑
y = T.dmatrix("y")#如果這一步中的x裡面沒被喂值,涉及到x的程式碼都是空的,因為沒資料去跑

#建立模型
l1 = Layer(x, 13, 50, T.tanh)
l2 = Layer(l1.outputs, 50, 1, None)

#代價的計算
cost = T.mean(T.square(l2.outputs - y))      #不加正則化的代價函式
# cost = T.mean(T.square(l2.outputs - y)) + 0.1 * ((l1.W ** 2).sum() + (l2.W ** 2).sum())  # with l2 regularization
# cost = T.mean(T.square(l2.outputs - y)) + 0.1 * (abs(l1.W).sum() + abs(l2.W).sum())  # with l1 regularization
gW1, gb1, gW2, gb2 = T.grad(cost, [l1.W, l1.b, l2.W, l2.b])

learning_rate = 0.01
train = theano.function(
    inputs=[x, y],
    updates=[(l1.W, l1.W - learning_rate * gW1),
             (l1.b, l1.b - learning_rate * gb1),
             (l2.W, l2.W - learning_rate * gW2),
             (l2.b, l2.b - learning_rate * gb2)])

compute_cost = theano.function(inputs=[x, y], outputs=cost)

#各種資料儲存
train_err_list = []
test_err_list = []
learning_time = []
for i in range(100):
    train(x_train, y_train)#把資料喂到網路中去訓練
    if i % 10 == 0:
        # record cost
        train_err_list.append(compute_cost(x_train, y_train))#用訓練資料計算代價,一旦執行這個程式碼,x和y中就有值了
        test_err_list.append(compute_cost(x_test, y_test))#用訓練資料計算代價,一旦執行這個程式碼,x和y中就有值了
        learning_time.append(i)

#畫出代價
plt.plot(learning_time, train_err_list, 'r-')
plt.plot(learning_time, test_err_list, 'b--')
plt.show()

結果:

以上程式碼用波士頓房價作為資料,實現正則化。

來源