1. 程式人生 > >Python實現平面資料分類

Python實現平面資料分類

import numpy as np
import matplotlib.pyplot as plt
from testCases import *
import sklearn
import sklearn.datasets
import sklearn.linear_model
from planar_utils import plot_decision_boundary, sigmoid, load_planar_dataset, load_extra_datasets

X, Y = load_planar_dataset()
'''
x.shape (2, 400)
y.shape (1, 400)
'''
print(X[:, 0])
def layer_size(x, y, n_h):
    '''
    :param x: shape (2, 400)
    :param y: shape (1, 400)
    n_h: 隱藏層節點個數
    '''
    n_x = x.shape[1]
    n_y = y.shape[1]
    n_h = 4
    Dict = {
        'n_x': n_x,
        'n_y': n_y,
        'n_h': n_h
    }
    return Dict

def initial(n_x, n_y, n_h):
    '''
    :param n_x: 輸入個數
    :param n_y: 輸出個數
    :param n_h: 隱藏層節點數
    :return:
    '''
    w1 = np.random.randn(n_h, n_x) * 0.01
    b1 = np.zeros((n_h, 1))
    w2 = np.random.randn(n_y, n_h) * 0.01
    b2 = np.zeros((n_y, 1))
    Dict = {
        'w1': w1,
        'b1': b1,
        'w2': w2,
        'b2': b2
    }
    return Dict

def forward_propagata(x , dict_wb):
    '''
    :param x:輸入
    :param dict_wb:相關權重及偏置
    :return:
    z1.shape (4, 400)
    a1.shape (4, 400)
    z2.shape (1, 400)
    b2.shape (1, 1)
    a2.shape (1, 400)
    '''
    w1 = dict_wb['w1']
    b1 = dict_wb['b1']
    w2 = dict_wb['w2']
    b2 = dict_wb['b2']

    z1 = np.dot(w1, x) + b1
    a1 = np.tanh(z1)
    # print(np.dot(w2, a1).shape, b2.shape)
    z2 = np.dot(w2, a1) + b2
    a2 = np.tanh(z2)

    # 使用斷言確保我的資料格式是正確的
    assert (a2.shape == (1, x.shape[1]))

    Dict = {
        'z1': z1,
        'a1': a1,
        'z2': z2,
        'a2': a2
    }
    return Dict

def backward_propagata(dict_forward, dict_wb, x, y):
    '''
    w1.shape (4, 2)
    w2.shape (1, 4)
    a1.shape (4, 400)
    a2.shape (1, 400)
    dz2.shape (1, 400)
    dw2.shape (1, 4)
    db2.shape (1, 1)
    dz1.shape = z1.shape
    dw1.shape = w1.shape
    db1.shape = (1, 1)
    '''
    w1 = dict_wb['w1']
    w2 = dict_wb['w2']
    a1 = dict_forward['a1']
    a2 = dict_forward['a2']

    dz2 = a2 - y
    dw2 = np.dot(dz2, a1.T)
    db2 = (1 / x.shape[1]) * np.sum(dz2, axis=1, keepdims=True)
    dz1 = np.multiply(np.dot(w2.T, dz2), 1 - np.power(a1, 2))
    dw1 = np.dot(dz1, x.T)
    db1 = (1 / x.shape[1]) * np.sum(dz1, axis=1, keepdims=True)

    Dict = {
        'dw2': dw2,
        'db2': db2,
        'dw1': dw1,
        'db1': db1
    }
    return Dict

def updata(dict_wb, dict_dwdb, learningRate):
    w1 = dict_wb['w1']
    w2 = dict_wb['w2']
    b1 = dict_wb['b1']
    b2 = dict_wb['b2']

    dw1 = dict_dwdb['dw1']
    dw2 = dict_dwdb['dw2']
    db1 = dict_dwdb['db1']
    db2 = dict_dwdb['db2']

    w1 = w1 - dw1 * learningRate
    w2 = w2 - dw2 * learningRate
    b1 = b1 - db1 * learningRate
    b2 = b2 - db2 * learningRate

    Dict = {
        'w1': w1,
        'b1': b1,
        'w2': w2,
        'b2': b2
    }
    return Dict

def trainModel(x, y, n_h, n_iter):
    n_x = x.shape[0]
    n_y = y.shape[0]
    learningRate = 0.005

    dict_wb = initial(n_x, n_y, n_h)

    for i in range(n_iter):
        dict_forward = forward_propagata(x, dict_wb)
        dict_dwdb = backward_propagata(dict_forward, dict_wb, x, y)
        dict_wb = updata(dict_wb, dict_dwdb, learningRate)
    return dict_wb

def predict(dict_wb, x):
    dict_forward = forward_propagata(x, dict_wb)
    a2 = dict_forward['a2']
    pred = np.round(a2)
    return pred

print(1)
dict_wb = trainModel(X, Y, 4, 20000)
print(2)

print(dict_wb['w1'], dict_wb['w2'])

'''
[[-0.10197359 -6.57062786]
 [-5.13298076 -5.41754432]
 [ 0.00673609  0.03952171]
 [-5.51919058  6.41997001]] 
 [[-0.99171186  0.7163268  -1.38306798 -0.72930063]]
'''
#
# tx = [1.20444229, 3.57611415]
# ty = predict(dict_wb, tx)
# print(ty)

#繪製邊界
plot_decision_boundary(lambda x: predict(dict_wb, x.T), X, Y)
print(3)
plt.title("Decision Boundary for hidden layer size " + str(4))

print(4)
predictions = predict(dict_wb, X)
print ('準確率: %d' % float((np.dot(Y, predictions.T) + np.dot(1 - Y, 1 - predictions.T)) / float(Y.size) * 100) + '%')
plt.show()