1. 程式人生 > >線性支援向量機TensorFlow實現

線性支援向量機TensorFlow實現

#載入庫
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from sklearn import datasets
from tensorflow.python.framework import ops

#獲得資料集,並且將方向轉為便於計算的形狀,橫:特徵,豎:樣本一列一列排起來
iris = datasets.load_iris()
x_vals = iris.data
y_vals = np.array([1 if y==0 else -1 for y in iris.target]) 
x = np.transpose(x_vals)
y = y_vals.reshape((1,-1))


# 劃分訓練集和測試集
train_indices = np.random.choice(len(x_vals),round(len(x_vals)*0.8),replace=False)
test_indices = np.array(list(set(range(len(x_vals))) - set(train_indices)))
x_train = x[:,train_indices]
x_test = x[:,test_indices]
y_train = y[:,train_indices]
y_test = y[:,test_indices]

#整個計算結構模型
def model(x_train,y_train,x_test,y_test,learning_rate=0.001):
    ops.reset_default_graph()#每次執行都對計算流圖重置
    
    X_place = tf.placeholder(tf.float32,[4,None]) #設定特徵佔位空間
    Y_place = tf.placeholder(tf.float32,[1,None])#設定target佔位空間
    
    w = tf.Variable(tf.random_normal(shape=[1,4]))#定義變數
    b = tf.Variable(tf.random_normal(shape=[1,1]))#bias
    
    a = tf.add(tf.matmul(w,X_place),b) #predict output
    
    loss = tf.reduce_mean(tf.maximum(0.,tf.subtract(1.,tf.multiply(a,Y_place))))
    optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate).minimize(loss)
    
    init = tf.global_variables_initializer()
    
    with tf.Session() as sess:
        sess.run(init)
        train_loss = []
        test_loss = []
        
        for epoch in range(500):
            _,train_cost = sess.run([optimizer,loss],feed_dict={X_place:x_train,Y_place:y_train})
            train_loss.append(train_cost)
            test_cost = sess.run(loss,feed_dict={X_place:x_test,Y_place:y_test})
            test_loss.append(test_cost)
            
            if (epoch+1)%50==0:
                print('epoch'+str(epoch+1)+':cost'+str(train_cost))
                
        w_new = sess.run(w)
        b_new = sess.run(b)
        
        print('w:'+str(w_new) + '   b:'+str(b_new))
        
        predict_train = tf.add(tf.matmul(w_new,tf.to_float(x_train)),b_new)
        predict_test = tf.add(tf.matmul(w_new,tf.to_float(x_test)),b_new)
        
        prediction = tf.sign(predict_train)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_train), tf.float32))
        train_accuracy = sess.run(accuracy)
        print('train_accuracy:' + str(train_accuracy))
        
        prediction = tf.sign(predict_test)
        accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, y_test), tf.float32))
        test_accuracy = sess.run(accuracy)
        print('test_accuracy:' + str(test_accuracy))
        
    return train_loss,test_loss 

train_loss,test_loss = model(x_train,y_train,x_test,y_test)

%matplotlib inline
# Plot loss (MSE) over time
plt.plot(train_loss, 'k-', label='Train Loss')
plt.plot(test_loss, 'r--', label='test Loss')
plt.title('Loss per Generation')
plt.legend(loc='upper right')
plt.xlabel('Generation')
plt.ylabel('Loss')
plt.show()