1. 程式人生 > >【Kaggle-MNIST之路】兩層的神經網路Pytorch(改進版)(二)

【Kaggle-MNIST之路】兩層的神經網路Pytorch(改進版)(二)

簡述

基於我的上一篇文章改進。 其實就是把損失函式調整了一下。

從CrossEntroyLoss到MultiMarginLoss。

  • 得分:0.81
  • 排名:2609

程式碼

import pandas as pd
import torch.utils.data as data
import torch
import torch.nn as nn

file = './all/train.csv'
LR = 0.01


class MNISTCSVDataset(data.Dataset):

    def __init__(self, csv_file, Train=True):
        self.
dataframe = pd.read_csv(csv_file, iterator=True) self.Train = Train def __len__(self): if self.Train: return 42000 else: return 28000 def __getitem__(self, idx): data = self.dataframe.get_chunk(100) ylabel = data['label'].as_matrix().
astype('float') xdata = data.ix[:, 1:].as_matrix().astype('float') return ylabel, xdata mydataset = MNISTCSVDataset(file) train_loader = torch.utils.data.DataLoader(mydataset, batch_size=1, shuffle=True) net = nn.Sequential( nn.Linear(28 * 28, 100), nn.ReLU(), nn.Linear(
100, 10) ) loss_function = nn.MultiMarginLoss() optimizer = torch.optim.Adam(net.parameters(), lr=LR) for step, (yl, xd) in enumerate(train_loader): output = net(xd.squeeze().float()) yl = yl.long() loss = loss_function(output, yl.squeeze()) optimizer.zero_grad() loss.backward() optimizer.step() if step % 20 == 0: print('step %d' % step, loss) torch.save(net, 'divided-net.pkl')