Deep learning with python 學習筆記(一)
阿新 • • 發佈:2019-02-19
一、前饋神經網路
第三章: 編寫前饋神經網路的程式碼:
#Layer Neural Network for Regression import autograd.numpy as np import autograd.numpy.random as npr from autograd import grad import sklearn.metrics import pylab #Generate Dataset 初始化資料集 examples = 1000 features = 100 D = (npr.randn(examples,features),npr.randn(examples)) #specify the network 定義神經網路 layer1_units = 10 layer2_units = 1 w1 = npr.rand(features,layer1_units) #定義w,b b1 = npr.rand(layer1_units) w2 = npr.rand(layer1_units,layer2_units) b2 = 0.0 theta = (w1,b1,w2,b2) #θ #define the loss function 定義損失函式 def squared_loss(y,y_hat): #y_hat:即為f函式 return np.dot((y-y_hat),(y-y_hat)) #np.dot 矩陣的乘法 #output layer 輸出層 def binary_cross_entropy(y,y_hat): #交叉熵 return np.sum(-(y*np.log(y_hat))+(1-y)*np.log(1-y_hat)) #wraper around the NN 包裝神經網路 def neural_network(x,theta): w1,b1,w2,b2 = theta return np.tanh(np.dot((np.tanh(np.dot(x,w1)+b1)),w2)+b2) #wrapper around the bojective function to be optimised 最優化 def objective(theta,idx): return squared_loss(D[1][idx],neural_network(D[0][idx],theta)) #update 更新 def updata_theta(theta,delta,alpha): w1,b1,w2,b2 = theta w1_delta,b1_delta,w2_delta,b2_delta = delta #delta 增量 △ w1_new = w1 - alpha * w1_delta b1_new = b1 - alpha * b1_delta w2_new = w2 - alpha * w2_delta b2_new = b2 - alpha * b2_delta new_theta = (w1_new,w2_new,b1_new,b2_new) return new_theta #compute gradient 計算梯度 grad_objective = grad(objective) #Train the NN 訓練神經網路 epochs = 10 print("RMSE before traning:",sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta))) #標準誤差 rmse = [] for i in range(0,epochs): for j in range(0,examples): delta = grad_objective(theta,j) theta = updata_theta(theta,delta,0.01) rmse.append(sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta))) print("RMSE after traning",sklearn.metrics.mean_squared_error(D[1],neural_network(D[0],theta))) pylab.plot(rmse) pylab.show()
結果:
RMSE before traning: 1.89717170214
RMSE after traning 1.06282114173