1. 程式人生 > >python的神經網路實現之BP神經網路

python的神經網路實現之BP神經網路

在這裡插入程式碼片# -*- coding: utf-8 -*-
"""
Created on Sun Oct 14 10:41:33 2018

@author: fang
"""

# -*- coding:utf-8 -*-
#! python2
import numpy as np


# 定義tanh函式
def tanh(x):
  return np.tanh(x)
 
 
# tanh函式的導數
def tan_deriv(x):
  return 1.0 - np.tanh(x) * np.tan(x)
 
 
# sigmoid函式
def logistic(x):
  return 1 / (1 + np.exp(-x))
 
 
# sigmoid函式的導數
def logistic_derivative(x):
  return logistic(x) * (1 - logistic(x))


class neuralnetwork:
    def __init__(self,layer,active_function):
        self.layernum=len(layer)
        
        if(active_function=='tanh'):
            self.active=tanh
            self.active_derive=tan_deriv
        if(active_function=='logistic'):
            self.active=logistic
            self.active_derive= logistic_derivative
        
        #初始化矩陣
        self.weight=[]
        for l in range(1,self.layernum):
            self.weight.append(np.random.rand(layer[l-1],layer[l])-0.5)
        
        for l in range(0,self.layernum-1):
            print(self.weight[l])
            
    def train_nn(self,x,y,learn_rate=0.1,epoch=1000):
        
#        epoch=1
        
        for l in range(epoch):
            i=np.random.randint(x.shape[0])
            
            print('隨機索引 i:%d' %i)
            a=[x[i]]
           
            for l in range(len(self.weight)):
                a.append(self.active(np.dot(a[-1],self.weight[l])))
            print('前向')
            for l in range(len(a)):
                print(a[l])
            
            error=a[-1]-y[i]
            
            detal=[]
            detal.append(error*self.active_derive(a[-1]))
            
            for l in range(len(a)-2,0,-1):
                a1=detal[-1],
                a2=self.weight[l]               
                a3=self.active_derive(a[l])
                aa=np.dot(self.weight[l],detal[-1])
                detal.append(np.dot(self.weight[l],detal[-1])*self.active_derive(a[l]))
            
            print('detal ')
            for l in range(len(detal)):
                print(detal[l])
            
            detal.reverse()
            
            print('before update')  
            for l in range(len(self.weight)):
                print(self.weight[l])
                
            for l in range(0,len(detal)):
                a1=np.atleast_2d(detal[l])
                a2=np.atleast_2d(a[l])
                a3=self.weight[l]
                self.weight[l] -= learn_rate*np.dot(a2.T,a1)
                
            print('after update')            
            for l in range(len(self.weight)):
                print(self.weight[l])
            
    def test_nn(self,x):
        a=np.atleast_2d(x)
        
        for l in range(len(self.weight)):
            b=self.weight[l]
            a=self.active(np.dot(a,self.weight[l]))
        return a
        

nn=neuralnetwork([2,3,4,5,1],'tanh')

temp = [[0, 0], [0, 1], [1, 0], [1, 1]]
x = np.array(temp)
y = np.array([0, 1, 1, 0])

nn.train_nn(x,y,0.2,3000) 
print('start test')
for l in ([0, 0], [0, 1], [1, 0], [1, 1]):
    print(nn.test_nn(l))