1. 程式人生 > >Tensorflow實現梯度下降各種方法

Tensorflow實現梯度下降各種方法

1、不使用Tensorflow任何梯度下降方法

# -*- coding: utf8 -*-

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1
# Parameters learning_rate = 0.01 training_epochs = 10 batch_size = 100 display_step = 1 # tf Graph Input x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784 y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes # Set model weights W = tf.Variable(tf.zeros([784
, 10])) b = tf.Variable(tf.zeros([10])) # Construct model pred = tf.nn.softmax(tf.matmul(x, W)+b) # Softmax # Minimize error using cross entropy cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) W_grad = - tf.matmul ( tf.transpose(x) , y - pred) b_grad = - tf.reduce_mean( tf.matmul(tf.transpose(x), y - pred), reduction_indices=0
) new_W = W.assign(W - learning_rate * W_grad) new_b = b.assign(b - learning_rate * b_grad) init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) # Training cycle for epoch in range(training_epochs): avg_cost = 0. total_batch = int(mnist.train.num_examples / batch_size) # Loop over all batches for i in range(total_batch): batch_xs, batch_ys = mnist.train.next_batch(batch_size) # Fit training using batch data _, _, c = sess.run([new_W, new_b, cost], feed_dict={x: batch_xs, y: batch_ys}) # Compute average loss avg_cost += c / total_batch # Display logs per epoch step if (epoch + 1) % display_step == 0: print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost)) # test acc=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32)) print('test acc',acc.eval({x: mnist.test.images, y: mnist.test.labels})) print("Optimization Finished!") ''' Epoch: 0001 cost= 0.435249257 Epoch: 0002 cost= 0.326791120 Epoch: 0003 cost= 0.312421723 Epoch: 0004 cost= 0.306861030 Epoch: 0005 cost= 0.299174942 Epoch: 0006 cost= 0.299132919 Epoch: 0007 cost= 0.296173550 Epoch: 0008 cost= 0.291235593 Epoch: 0009 cost= 0.289357805 Epoch: 0010 cost= 0.287001750 test acc 0.9145 Optimization Finished! '''

2、使用tf.gradients實現梯度下降

# 使用隨機梯度下降
vars=tf.trainable_variables()
vars_grad=tf.gradients(loss_op,vars)
vars_new=[]
for i in range(len(vars)):
    vars_new.append(vars[i].assign(vars[i]-learning_rate*vars_grad[i])) # 權重更新

……
……
sess.run(vars_new, feed_dict={X: batch_x, Y: batch_y, keep_prob: 0.8})
# -*- coding: utf8 -*-

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes

# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# Construct model
pred = tf.nn.softmax(tf.matmul(x, W)+b) # Softmax

# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

# W_grad =  - tf.matmul ( tf.transpose(x) , y - pred)
# b_grad = - tf.reduce_mean( tf.matmul(tf.transpose(x), y - pred), reduction_indices=0)
W_grad, b_grad=tf.gradients(cost,[W,b])

new_W = W.assign(W - learning_rate * W_grad)
new_b = b.assign(b - learning_rate * b_grad)

init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Fit training using batch data
            _, _, c = sess.run([new_W, new_b, cost], feed_dict={x: batch_xs, y: batch_ys})

            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if (epoch + 1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))

    # test
    acc=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32))
    print('test acc',acc.eval({x: mnist.test.images, y: mnist.test.labels}))

    print("Optimization Finished!")

'''
Epoch: 0001 cost= 1.183830844
Epoch: 0002 cost= 0.665357503
Epoch: 0003 cost= 0.552856994
Epoch: 0004 cost= 0.498671508
Epoch: 0005 cost= 0.465461748
Epoch: 0006 cost= 0.442519576
Epoch: 0007 cost= 0.425528774
Epoch: 0008 cost= 0.412182832
Epoch: 0009 cost= 0.401395917
Epoch: 0010 cost= 0.392391636
test acc 0.904
Optimization Finished!
'''

3、使用Tensorflow內建優化器

3.1 minimize

# -*- coding: utf8 -*-

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes

# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# Construct model
pred = tf.nn.softmax(tf.matmul(x, W)+b) # Softmax

# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

# W_grad =  - tf.matmul ( tf.transpose(x) , y - pred)
# b_grad = - tf.reduce_mean( tf.matmul(tf.transpose(x), y - pred), reduction_indices=0)
# W_grad, b_grad=tf.gradients(cost,[W,b])
#
# new_W = W.assign(W - learning_rate * W_grad)
# new_b = b.assign(b - learning_rate * b_grad)
train_op=tf.train.AdamOptimizer().minimize(cost)

init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Fit training using batch data
            # _, _, c = sess.run([new_W, new_b, cost], feed_dict={x: batch_xs, y: batch_ys})

            _,c=sess.run([train_op,cost],feed_dict={x: batch_xs, y: batch_ys})

            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if (epoch + 1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))

    # test
    acc=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32))
    print('test acc',acc.eval({x: mnist.test.images, y: mnist.test.labels}))

    print("Optimization Finished!")

'''
Epoch: 0001 cost= 0.636883429
Epoch: 0002 cost= 0.353397415
Epoch: 0003 cost= 0.314724692
Epoch: 0004 cost= 0.296492913
Epoch: 0005 cost= 0.285558672
Epoch: 0006 cost= 0.278158906
Epoch: 0007 cost= 0.272399174
Epoch: 0008 cost= 0.268084726
Epoch: 0009 cost= 0.264532625
Epoch: 0010 cost= 0.261652230
test acc 0.9261
Optimization Finished!
'''

3.2 compute_gradients與apply_gradients

# -*- coding: utf8 -*-

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes

# Set model weights
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))

# Construct model
pred = tf.nn.softmax(tf.matmul(x, W)+b) # Softmax

# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

# W_grad =  - tf.matmul ( tf.transpose(x) , y - pred)
# b_grad = - tf.reduce_mean( tf.matmul(tf.transpose(x), y - pred), reduction_indices=0)
# W_grad, b_grad=tf.gradients(cost,[W,b])
#
# new_W = W.assign(W - learning_rate * W_grad)
# new_b = b.assign(b - learning_rate * b_grad)
# train_op=tf.train.AdamOptimizer().minimize(cost)
optimizer=tf.train.AdamOptimizer()
gradients=optimizer.compute_gradients(cost)
clipped_gradients = [(tf.clip_by_value(_[0], -1, 1), _[1]) for _ in gradients] # _[0] 對應dw ,_[1]對應db
train_op = optimizer.apply_gradients(clipped_gradients)
# 或
# train_op = optimizer.apply_gradients(gradients)

init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Fit training using batch data
            # _, _, c = sess.run([new_W, new_b, cost], feed_dict={x: batch_xs, y: batch_ys})

            _,c=sess.run([train_op,cost],feed_dict={x: batch_xs, y: batch_ys})

            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if (epoch + 1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))

    # test
    acc=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32))
    print('test acc',acc.eval({x: mnist.test.images, y: mnist.test.labels}))

    print("Optimization Finished!")

'''
Epoch: 0001 cost= 0.635951615
Epoch: 0002 cost= 0.352858345
Epoch: 0003 cost= 0.314492606
Epoch: 0004 cost= 0.296240487
Epoch: 0005 cost= 0.285252309
Epoch: 0006 cost= 0.277979300
Epoch: 0007 cost= 0.272447400
Epoch: 0008 cost= 0.268263275
Epoch: 0009 cost= 0.264513332
Epoch: 0010 cost= 0.261729331
test acc 0.926
Optimization Finished!
'''

3.3 compute_gradients與apply_gradients優化變數作用域範圍的變數梯度

# -*- coding: utf8 -*-

import tensorflow as tf

# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("./MNIST_data", one_hot=True)

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# Parameters
learning_rate = 0.01
training_epochs = 10
batch_size = 100
display_step = 1

# tf Graph Input
x = tf.placeholder(tf.float32, [None, 784]) # mnist data image of shape 28*28=784
y = tf.placeholder(tf.float32, [None, 10]) # 0-9 digits recognition => 10 classes

with tf.variable_scope('D'):
    # Set model weights
    W = tf.Variable(tf.zeros([784, 10]))
    b = tf.Variable(tf.zeros([10]))

    # Construct model
    pred = tf.nn.softmax(tf.matmul(x, W)+b) # Softmax

# Minimize error using cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))

# W_grad =  - tf.matmul ( tf.transpose(x) , y - pred)
# b_grad = - tf.reduce_mean( tf.matmul(tf.transpose(x), y - pred), reduction_indices=0)
# W_grad, b_grad=tf.gradients(cost,[W,b])
#
# new_W = W.assign(W - learning_rate * W_grad)
# new_b = b.assign(b - learning_rate * b_grad)
# train_op=tf.train.AdamOptimizer().minimize(cost)
# optimizer=tf.train.AdamOptimizer()
# gradients=optimizer.compute_gradients(cost)
# clipped_gradients = [(tf.clip_by_value(_[0], -1, 1), _[1]) for _ in gradients] # _[0] 對應dw ,_[1]對應db
# train_op = optimizer.apply_gradients(clipped_gradients)
# 或
# train_op = optimizer.apply_gradients(gradients)

tvars = tf.trainable_variables()
d_params = [v for v in tvars if v.name.startswith('D/')]
trainerD = tf.train.AdamOptimizer(learning_rate=0.0002, beta1=0.5)
d_grads = trainerD.compute_gradients(cost, d_params)#Only update the weights for the discriminator network.
train_op = trainerD.apply_gradients(d_grads)

init = tf.global_variables_initializer()


with tf.Session() as sess:
    sess.run(init)

    # Training cycle
    for epoch in range(training_epochs):
        avg_cost = 0.
        total_batch = int(mnist.train.num_examples / batch_size)
        # Loop over all batches
        for i in range(total_batch):
            batch_xs, batch_ys = mnist.train.next_batch(batch_size)
            # Fit training using batch data
            # _, _, c = sess.run([new_W, new_b, cost], feed_dict={x: batch_xs, y: batch_ys})

            _,c=sess.run([train_op,cost],feed_dict={x: batch_xs, y: batch_ys})

            # Compute average loss
            avg_cost += c / total_batch
        # Display logs per epoch step
        if (epoch + 1) % display_step == 0:
            print("Epoch:", '%04d' % (epoch + 1), "cost=", "{:.9f}".format(avg_cost))

    # test
    acc=tf.reduce_mean(tf.cast(tf.equal(tf.argmax(pred,1),tf.argmax(y,1)),tf.float32))
    print('test acc',acc.eval({x: mnist.test.images, y: mnist.test.labels}))

    print("Optimization Finished!")

'''
Epoch: 0001 cost= 1.246136050
Epoch: 0002 cost= 0.633864975
Epoch: 0003 cost= 0.493198991
Epoch: 0004 cost= 0.429072889
Epoch: 0005 cost= 0.391775173
Epoch: 0006 cost= 0.367063242
Epoch: 0007 cost= 0.349466111
Epoch: 0008 cost= 0.336418079
Epoch: 0009 cost= 0.326132726
Epoch: 0010 cost= 0.318074080
test acc 0.9158
Optimization Finished!
'''