1. 程式人生 > >tensorflow100天-第3天:線性迴歸

tensorflow100天-第3天:線性迴歸

tensorflow版

# tensorflow 實現線性迴歸

import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# plt.ion()
rng = np.random

# 超引數
learning_rate = 0.01
traing_epochs = 1000
display_step = 50

# Training Data
train_x = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
                         7.042
,10.791,5.313,7.997,5.654,9.27,3.1]) train_y = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221, 2.827,3.465,1.65,2.904,2.42,2.94,1.3]) num_samples = train_x.shape[0] x = tf.placeholder(tf.float32) y = tf.placeholder(tf.float32) # 要訓練的引數 w = tf.Variable(rng.randn(
), name='weight') # 宣告可學習的引數 b = tf.Variable(rng.randn(), name='bias') predict = w * x + b # cost = sum((predict-y)^2)/(2*num_samples) # 不能用,平方會報錯 cost = tf.reduce_sum(tf.pow(predict - y, 2)) / (2 * num_samples) optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) for epoch in range(traing_epochs): for (tx,ty) in zip(train_x, train_y): sess.run(optimizer, feed_dict={x:tx, y:ty}) if (epoch + 1) % display_step == 0: c = sess.run(cost, feed_dict={x:train_x, y:train_y}) print('epoch:','%04d'%(epoch+1),'cost=','{:.9f}'.format(c), 'w=',sess.run(w),'b=',sess.run(b)) plt.plot(train_x, train_y,'ro', label = 'original data') plt.plot(train_x, sess.run(w) * train_x + sess.run(b), label='fitted data') plt.legend() plt.show()

pytorch版

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt


# Hyper-parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001

# Toy dataset
x_train = np.asarray([3.3,4.4,5.5,6.71,6.93,4.168,9.779,6.182,7.59,2.167,
                         7.042,10.791,5.313,7.997,5.654,9.27,3.1], dtype=np.float32)
y_train = np.asarray([1.7,2.76,2.09,3.19,1.694,1.573,3.366,2.596,2.53,1.221,
                         2.827,3.465,1.65,2.904,2.42,2.94,1.3], dtype=np.float32)
x_train = np.expand_dims(x_train, axis=1)
y_train = np.expand_dims(y_train, axis=1)
# Linear regression model
model = nn.Linear(input_size, output_size)

# Loss and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)  

# Train the model
for epoch in range(num_epochs):
    # Convert numpy arrays to torch tensors
    inputs = torch.from_numpy(x_train)
    targets = torch.from_numpy(y_train)

    # Forward pass
    outputs = model(inputs)
    loss = criterion(outputs, targets)
    
    # Backward and optimize
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
    
    if (epoch+1) % 5 == 0:
        print ('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))

# Plot the graph
predicted = model(torch.from_numpy(x_train)).detach().numpy()
plt.plot(x_train, y_train, 'ro', label='Original data')
plt.plot(x_train, predicted, label='Fitted line')
plt.legend()
plt.show()

# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')

在這裡插入圖片描述

對比

  • tensorflow需要可將學習的引數定位為tf.Variable,同時附加了一個‘name’屬性,好像是用於畫圖顯示用的;
  • tensorflow的可學習引數必須初始化,就是tf.Session().run(tf.global_variables_initializer()),不然會報錯。pytorch模型的實現多使用nn模組,可學習引數可不用初始化,也可採用指定的方式初始化;
  • tensorflow支援numpy資料直接送入網路,而pytorch需要轉換成tensor,torch.from_numpy(),torch.tensor(),是一樣的效果。
  • tensorflow搭建好模型之後,只需要batchsize的喂入模型資料就好了,feed_dict={},pytorch需要
loss = criterion(outputs, targets)
 # Backward and optimize
 optimizer.zero_grad()
 loss.backward()
optimizer.step()
  • pytorch支援ide的單步除錯,對理解演算法有好處,tensorflow封裝嚴重,不支援單步除錯,但是社群廣,功能多,拓展多。