1. 程式人生 > >深度學習框架Tensorflow學習與應用 第2課

深度學習框架Tensorflow學習與應用 第2課

2-1:非線性迴歸

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt


#使用numpy生成200個隨機點,[:,np.newaxis]增加一個維度
x_data = np.linspace(-0.5,0.5,200)[:,np.newaxis]
noise = np.random.normal(0,0.02,x_data.shape)
y_data = np.square(x_data) + noise

#定義兩個placeholder
x = tf.placeholder(tf.float32,[None,1])
y = tf.placeholder(tf.float32,[None,1])

#定義神經網路中間層
Weights_L1 = tf.Variable(tf.random_normal([1,10]))
biases_L1 = tf.Variable(tf.zeros([1,10]))
Wx_plus_b_L1 = tf.matmul(x,Weights_L1) + biases_L1
L1 = tf.nn.tanh(Wx_plus_b_L1)

#定義神經網路輸出層
Weights_L2 = tf.Variable(tf.random_normal([10,1]))
biases_L2 = tf.Variable(tf.zeros([1,1]))
Wx_plus_b_L2 = tf.matmul(L1,Weights_L2) + biases_L2
prediction = tf.nn.tanh(Wx_plus_b_L2)

#二次代價函式
loss = tf.reduce_mean(tf.square(y-prediction))
#使用梯度下降法訓練
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)

with tf.Session() as sess:
    #變數初始化
    sess.run(tf.global_variables_initializer())
    for _ in range(2000):
        sess.run(train_step,feed_dict={x:x_data,y:y_data})
        
    #獲得預測值
    prediction_value = sess.run(prediction,feed_dict={x:x_data})
    #畫圖
    plt.figure()
    plt.scatter(x_data,y_data)
    plt.plot(x_data,prediction_value,'r-',lw=5)
    plt.show()
    


2-2:MNIST資料集分類簡單版本

MNIST資料集介紹:

60000行的訓練資料集(mnist.train)

10000行測試資料集(mnist.test)

每張圖片包含28*28個畫素

 MNIST資料集的標籤是介於0-9的數字

from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf


#載入資料集
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)

#每個批次的大小
batch_size = 100
#計算一共有多少個批次
n_batch = mnist.train.num_examples//batch_size

#定義兩個placeholder
x = tf.placeholder(tf.float32,[None, 784])
y = tf.placeholder(tf.float32,[None,10])

#建立一個簡單的神經網路
W = tf.Variable(tf.zeros([784,10]))
b = tf.Variable(tf.zeros([10]))
predicton = tf.nn.softmax(tf.matmul(x,W)+b)

#二次代價函式
loss = tf.reduce_mean(tf.square(y-predicton))
#使用梯度下降法
train_step = tf.train.GradientDescentOptimizer(0.2).minimize(loss)

#初始化變數
init = tf.global_variables_initializer()

#結果存放在一個布林型列表中
#tf.argmax(input, axis=None, name=None, dimension=None)此函式是對矩陣按行或列計算最大值
correct_prediction = tf.equal(tf.argmax(y,1),tf.argmax(predicton,1))
#求準確率
#tf.cast(x, dtype, name=None) ,把x轉化為dtype型
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))


with tf.Session() as sess:
    sess.run(init)
    for epoch in range(21):#訓練21次
        for batch in range(n_batch):
            batch_xs,batch_ys = mnist.train.next_batch(batch_size)
            sess.run(train_step,feed_dict={x:batch_xs,y:batch_ys})
        acc = sess.run(accuracy,feed_dict={x:mnist.test.images,y:mnist.test.labels})
        print("Iter "+str(epoch)+"Test Accuracy " + str(acc)) 

輸出: