機器學習——單層神經網路線性迴歸解釋解實現
阿新 • • 發佈:2018-12-22
線性迴歸
機器學習——單層神經網路線性迴歸從零實現上篇部落格使用小批量隨機梯度下降法對loss函式進行優化,這篇部落格將從解釋解角度(即直接求解)對演算法進行優化。
演算法實現
import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D import numpy as np import sklearn.datasets as load_diabetes class linear_regression(): def __init__(self): self.w = None def fit(self, X, y, k): X = np.insert(X, 0, 1, axis = 1) print(X.shape) X_ = np.linalg.inv(X.T.dot(X)+k*np.eye(X.shape[1])) print(X_.shape) self.w = X_.dot(X.T).dot(y) def predict(self, X): X = np.insert(X, 0, 1, axis = 1) y_predict = X.dot(self.w) return y_predict def mean_squared_error(y, y_predict): return np.mean(pow(y-y_predict, 2)) def main(): k = float(input()) diabetes = load_diabetes.load_diabetes() print(format(diabetes.keys())) print(diabetes['feature_names']) X = diabetes.data #X = diabetes.data[:, np.newaxis, 3] X_train, X_test = X[:-20], X[-20:] y_train, y_test = diabetes.target[:-20], diabetes.target[-20:] clf = linear_regression() clf.fit(X_train, y_train, k) y_predict = clf.predict(X_test) print(format(mean_squared_error(y_test, y_predict))) fig = plt.figure() ax = fig.add_subplot(111, projection='3d') ax.scatter(X_test[:,0],X_test[:,1], y_test, c='black') plt.show() main()