1. 程式人生 > >keras以tensorflow為後端的迴歸問題

keras以tensorflow為後端的迴歸問題

import numpy as np
np.random.seed(1337)
from keras.models import  Sequential
from keras.layers import Dense
import matplotlib.pyplot as plt

# 建立一些資料
X = np.linspace(-1,1,50)
np.random.shuffle(X)
Y = 25 * X + 100 + np.random.normal(0,0.05,(50,))

# 資料圖
plt.scatter (X,Y)
plt.show()

X_train,Y_train = X[:36],
Y[:36] X_test ,Y_test = X[36:],Y[36:] # 構建神經網路 model = Sequential() model.add(Dense(output_dim=1,input_dim=1)) #model.add(Dense(output_dim=1,)) # 選擇優化器和損失函式 model.compile(loss='mse',optimizer='sgd') # 訓練 print("Training.........................") for step in range(501): cost = model.train_on_batch(X_train,
Y_train) if step % 10 == 0: print('train cost',cost) # 測試 print("Testing..........................") cost = model.evaluate(X_test,Y_test,batch_size=14) print('test cost',cost) W,b = model.layers[0].get_weights() print('Weight=',W,'\nbiase',b) # 畫出預測 Y_pred = model.predict(X_test) plt.scatter(X_test,
Y_test) plt.plot(X_test,Y_pred) plt.show()