1. 程式人生 > >Pytorch學習筆記(三)線性迴歸與邏輯迴歸

Pytorch學習筆記(三)線性迴歸與邏輯迴歸

在瞭解了Pytorch的一些機制後,當然要進行一些例項的學習,畢竟實踐出真知嘛。
對於所有的機器學習愛好者來說,第一個要學的模型無疑是線性迴歸
所謂線性迴歸,指的就是用對輸入資料的每個維度進行線性組合擬合Label-y。最簡單的線性迴歸即是二維平面內的直線擬合。

為此我們可以編造一些資料:

import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
from torch.autograd import Variable

#toy dataset
x_train = np.linespace(1
,0.1,100) y = 3*x y_train = [yo + np.random.uniform(0,1) for yo in y]

然後是模型的主體部分,在Pytorch中,所有的模型都要繼承自nn.Module這個類(或繼承自nn.Module這個類的子類)

# Hyper Parameters
input_size = 1
output_size = 1
num_epochs = 60
learning_rate = 0.001

# Linear Regression Model
class LinearRegression(nn.Module):
    def __init__(self, input_size, output_size)
:
super(LinearRegression, self).__init__() self.linear = nn.Linear(input_size, output_size) def forward(self, x): out = self.linear(x) return out model = LinearRegression(input_size, output_size) # Loss and Optimizer criterion = nn.MSELoss() #採用最小均方誤差是線性迴歸 optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) # Train the Model
for epoch in range(num_epochs): # Convert numpy array to torch Variable inputs = Variable(torch.from_numpy(x_train)) targets = Variable(torch.from_numpy(y_train)) # Forward + Backward + Optimize optimizer.zero_grad() outputs = model(inputs) #forward loss = criterion(outputs, targets) #loss loss.backward() #backward optimizer.step() if (epoch+1) % 5 == 0: print ('Epoch [%d/%d], Loss: %.4f' %(epoch+1, num_epochs, loss.data[0])) # Plot the graph predicted = model(Variable(torch.from_numpy(x_train))).data.numpy() plt.plot(x_train, y_train, 'ro', label='Original data') plt.plot(x_train, predicted, label='Fitted line') plt.legend() plt.show() # Save the Model torch.save(model.state_dict(), 'model.pkl')

邏輯迴歸雖然名為迴歸,但是其是一個分類模型,邏輯迴歸與線性迴歸一樣是對資料進行線性變換,但是由於其輸出時使用了Sigmoid函式,人們往往並不認為它是一個純的線性模型。得益於Sigmoid函式,邏輯迴歸可以輕鬆地進行二分類。在處理多分類問題時,邏輯迴歸進化為Softmax迴歸,在強大的Softmax函式的幫助下(其可以將任意向量對映成概率分佈),諸多多分類問題得到有效解決,例如影象識別(ImageNet)。
下面是一個用Pytorch進行Mnist手寫資料集識別分類的例子,在這個例子中,沒有使用CNN,直接把輸入打成一個一維向量進行線性運算。

import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable


# Hyper Parameters 
input_size = 784
num_classes = 10
num_epochs = 5
batch_size = 100
learning_rate = 0.001

# MNIST Dataset (Images and Labels)
train_dataset = dsets.MNIST(root='./data', 
                            train=True, 
                            transform=transforms.ToTensor(),
                            download=True)

test_dataset = dsets.MNIST(root='./data', 
                           train=False, 
                           transform=transforms.ToTensor())

# Dataset Loader (Input Pipline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, 
                                           batch_size=batch_size, 
                                           shuffle=True)

test_loader = torch.utils.data.DataLoader(dataset=test_dataset, 
                                          batch_size=batch_size, 
                                          shuffle=False)

# Model
class LogisticRegression(nn.Module):
    def __init__(self, input_size, num_classes):
        super(LogisticRegression, self).__init__()
        self.linear = nn.Linear(input_size, num_classes)

    def forward(self, x):
        out = self.linear(x)
        return out

model = LogisticRegression(input_size, num_classes)

# Loss and Optimizer
# Softmax is internally computed.
# Set parameters to be updated.
criterion = nn.CrossEntropyLoss()  
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)  

# Training the Model
for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(train_loader):
        images = Variable(images.view(-1, 28*28))
        labels = Variable(labels)  #這裡labels是one_hot編碼的

        # Forward + Backward + Optimize
        optimizer.zero_grad()
        outputs = model(images)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        if (i+1) % 100 == 0:
            print ('Epoch: [%d/%d], Step: [%d/%d], Loss: %.4f' 
                   % (epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.data[0]))

# Test the Model
correct = 0
total = 0
for images, labels in test_loader:
    images = Variable(images.view(-1, 28*28))
    outputs = model(images)
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()

print('Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))

# Save the Model
torch.save(model.state_dict(), 'model.pkl')