1. 程式人生 > >pytorch自我學習基礎(一) 線性迴歸

pytorch自我學習基礎(一) 線性迴歸

pytorch 版本 0.2.0

#coding=utf-8
from matplotlib import pylab as plt
import numpy as np
import random
import torch.nn as nn
import torch
from torch.autograd import Variable

num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2

features = np.random.randn(num_examples,num_inputs)
print features.shape

labels = features[:,0] * true_w[0] + features[:,1] * true_w[1] +true_b
labels += np.random.normal(scale=1.0,size=labels.shape)
print labels.shape

print features[0],labels[0]

# plt.scatter(features[:, 0], labels, 1)
# plt.show()
batch_size = 10
def data_iter(batch_size, features, labels):
    #num_examples = len(features)
    num_examples = features.shape[0]
    indices = list(range(num_examples))
    # 樣本的讀取順序是隨機的。
    random.shuffle(indices)
    for i in range(0, num_examples, batch_size):
        yield  features[indices[i: min(i + batch_size, num_examples)]],labels[indices[i: min(i + batch_size, num_examples)]]
'''
for X,y in data_iter(batch_size,features,labels):
    print X,y
'''
net = nn.Sequential(nn.Linear(2,10),
                    nn.Linear(10,10),
                    nn.Linear(10,1))
print net

loss_func= nn.MSELoss()
optimser = torch.optim.Adam(net.parameters(),lr=0.001)
for epoch in range(100):
    for X,y in data_iter(batch_size,features,labels):
        X = Variable(torch.from_numpy(X).float())
        y = Variable(torch.from_numpy(y).float())
        #print X,y
        output = net(X)
        loss = loss_func(output,y)
        optimser.zero_grad()
        loss.backward()
        optimser.step()

        print('epoch %d, loss: %f' % (epoch, loss.data.numpy()))