1. 程式人生 > >『MXNet』第七彈_分類器demo示意

『MXNet』第七彈_分類器demo示意

padding NPU output nump class tensor rac 檢測 HA

解壓文件命令:

with zipfile.ZipFile(‘../data/kaggle_cifar10/‘ + fin, ‘r‘) as zin:
            zin.extractall(‘../data/kaggle_cifar10/‘)

拷貝文件命令:

shutil.copy(原文件, 目標文件)

整理數據

我們有兩個文件夾‘../data/kaggle_cifar10/train‘和‘../data/kaggle_cifar10/test‘,一個記錄了文件名和類別的索引文件

我們的目的是在新的文件夾下形成拷貝,包含三個文件夾train_valid、train、valid,每個文件夾下存放不同的類別文件夾,裏面存放對應類別的圖片,

import os
import shutil

def reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir, valid_ratio):
    """
    處理之後,新建三個文件夾存放數據,train_valid、train、valid
    data_dir:‘../data/kaggle_cifar10‘
    label_file:‘trainLabels.csv‘
    train_dir = ‘train‘
    test_dir = ‘test‘
    input_dir = ‘train_valid_test‘
    valid_ratio = 0.1
    """
    # 讀取訓練數據標簽。
    # 打開csv索引:‘../data/kaggle_cifar10/trainLabels.csv‘
    with open(os.path.join(data_dir, label_file), ‘r‘) as f:
        # 跳過文件頭行(欄名稱)。
        lines = f.readlines()[1:]
        tokens = [l.rstrip().split(‘,‘) for l in lines]
        # {索引:標簽}
        idx_label = dict(((int(idx), label) for idx, label in tokens))
    # 標簽集合
    labels = set(idx_label.values())
    # 訓練數據數目:‘../data/kaggle_cifar10/train‘
    num_train = len(os.listdir(os.path.join(data_dir, train_dir)))
    # train數目(對應valid)
    num_train_tuning = int(num_train * (1 - valid_ratio))
    # <---異常檢測
    assert 0 < num_train_tuning < num_train
    # 每個label的train數據條目
    num_train_tuning_per_label = num_train_tuning // len(labels)
    label_count = dict()

    def mkdir_if_not_exist(path):
        if not os.path.exists(os.path.join(*path)):
            os.makedirs(os.path.join(*path))

    # 整理訓練和驗證集。
    # 循環訓練數據圖片 ‘../data/kaggle_cifar10/train‘
    for train_file in os.listdir(os.path.join(data_dir, train_dir)):
        # 去掉擴展名作為索引
        idx = int(train_file.split(‘.‘)[0])
        # 索引到標簽
        label = idx_label[idx]
        
        # ‘../data/kaggle_cifar10/train_valid_test/train_valid‘ +  標簽名稱
        mkdir_if_not_exist([data_dir, input_dir, ‘train_valid‘, label])
        # 拷貝圖片
        shutil.copy(os.path.join(data_dir, train_dir, train_file),
                    os.path.join(data_dir, input_dir, ‘train_valid‘, label))
        
        # 保證train文件夾下的每類標簽訓練數目足夠後,分給valid文件夾
        if label not in label_count or label_count[label] < num_train_tuning_per_label:
            # ‘../data/kaggle_cifar10/train_valid_test/train‘ +  標簽名稱
            mkdir_if_not_exist([data_dir, input_dir, ‘train‘, label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file),
                        os.path.join(data_dir, input_dir, ‘train‘, label))
            label_count[label] = label_count.get(label, 0) + 1
        else:
            mkdir_if_not_exist([data_dir, input_dir, ‘valid‘, label])
            shutil.copy(os.path.join(data_dir, train_dir, train_file),
                        os.path.join(data_dir, input_dir, ‘valid‘, label))

    # 整理測試集
    # ‘../data/kaggle_cifar10/train_valid_test/test/unknown‘ 裏面存放test圖片
    mkdir_if_not_exist([data_dir, input_dir, ‘test‘, ‘unknown‘])
    for test_file in os.listdir(os.path.join(data_dir, test_dir)):
        shutil.copy(os.path.join(data_dir, test_dir, test_file),
                    os.path.join(data_dir, input_dir, ‘test‘, ‘unknown‘))

train_dir = ‘train‘
test_dir = ‘test‘
batch_size = 128

data_dir = ‘../data/kaggle_cifar10‘
label_file = ‘trainLabels.csv‘
input_dir = ‘train_valid_test‘
valid_ratio = 0.1
reorg_cifar10_data(data_dir, label_file, train_dir, test_dir, input_dir, valid_ratio)

預處理

# 預處理
from mxnet import autograd
from mxnet import gluon
from mxnet import init
from mxnet import nd
from mxnet.gluon.data import vision
from mxnet.gluon.data.vision import transforms
import numpy as np

transform_train = transforms.Compose([
    # transforms.CenterCrop(32)
    # transforms.RandomFlipTopBottom(),
    # transforms.RandomColorJitter(brightness=0.0, contrast=0.0, saturation=0.0, hue=0.0),
    # transforms.RandomLighting(0.0),
    # transforms.Cast(‘float32‘),
    # transforms.Resize(32),

    # 隨機按照scale和ratio裁剪,並放縮為32x32的正方形
    transforms.RandomResizedCrop(32, scale=(0.08, 1.0), ratio=(3.0/4.0, 4.0/3.0)),
    # 隨機左右翻轉圖片
    transforms.RandomFlipLeftRight(),
    # 將圖片像素值縮小到(0,1)內,並將數據格式從"高*寬*通道"改為"通道*高*寬"
    transforms.ToTensor(),
    # 對圖片的每個通道做標準化
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

# 測試時,無需對圖像做標準化以外的增強數據處理。
transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])


# ‘../data/kaggle_cifar10、train_valid_test/‘
input_str = data_dir + ‘/‘ + input_dir + ‘/‘

# 讀取原始圖像文件。flag=1說明輸入圖像有三個通道(彩色)。
train_ds = vision.ImageFolderDataset(input_str + ‘train‘, flag=1)
valid_ds = vision.ImageFolderDataset(input_str + ‘valid‘, flag=1)
train_valid_ds = vision.ImageFolderDataset(input_str + ‘train_valid‘, flag=1)
test_ds = vision.ImageFolderDataset(input_str + ‘test‘, flag=1)

loader = gluon.data.DataLoader
train_data = loader(train_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch=‘keep‘)
valid_data = loader(valid_ds.transform_first(transform_test),
batch_size, shuffle=True, last_batch=‘keep‘)
train_valid_data = loader(train_valid_ds.transform_first(transform_train),
batch_size, shuffle=True, last_batch=‘keep‘)
test_data = loader(test_ds.transform_first(transform_test),
batch_size, shuffle=False, last_batch=‘keep‘)

# 交叉熵損失函數。
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()
mxnet.gluon.vision.ImageFolderDataset
mxnet.gluon.data.DataLoader

數據的預處理放在DataLoader中,這樣後面可以調用ImageFolderDataset,獲取原始圖片集

至此,數據準備完成。

模型定義

from mxnet.gluon import nn
from mxnet import nd

class Residual(nn.HybridBlock):
    def __init__(self, channels, same_shape=True, **kwargs):
        super(Residual, self).__init__(**kwargs)
        self.same_shape = same_shape
        with self.name_scope():
            strides = 1 if same_shape else 2
            self.conv1 = nn.Conv2D(channels, kernel_size=3, padding=1,
                                  strides=strides)
            self.bn1 = nn.BatchNorm()
            self.conv2 = nn.Conv2D(channels, kernel_size=3, padding=1)
            self.bn2 = nn.BatchNorm()
            if not same_shape:
                self.conv3 = nn.Conv2D(channels, kernel_size=1,
                                      strides=strides)

    def hybrid_forward(self, F, x):
        out = F.relu(self.bn1(self.conv1(x)))
        out = self.bn2(self.conv2(out))
        if not self.same_shape:
            x = self.conv3(x)
        return F.relu(out + x)


class ResNet(nn.HybridBlock):
    def __init__(self, num_classes, verbose=False, **kwargs):
        super(ResNet, self).__init__(**kwargs)
        self.verbose = verbose
        with self.name_scope():
            net = self.net = nn.HybridSequential()
            # 模塊1
            net.add(nn.Conv2D(channels=32, kernel_size=3, strides=1, padding=1))
            net.add(nn.BatchNorm())
            net.add(nn.Activation(activation=‘relu‘))
            # 模塊2
            for _ in range(3):
                net.add(Residual(channels=32))
            # 模塊3
            net.add(Residual(channels=64, same_shape=False))
            for _ in range(2):
                net.add(Residual(channels=64))
            # 模塊4
            net.add(Residual(channels=128, same_shape=False))
            for _ in range(2):
                net.add(Residual(channels=128))
            # 模塊5
            net.add(nn.AvgPool2D(pool_size=8))
            net.add(nn.Flatten())
            net.add(nn.Dense(num_classes))

    def hybrid_forward(self, F, x):
        out = x
        for i, b in enumerate(self.net):
            out = b(out)
            if self.verbose:
                print(‘Block %d output: %s‘%(i+1, out.shape))
        return out


def get_net(ctx):
    num_outputs = 10
    net = ResNet(num_outputs)
    net.initialize(ctx=ctx, init=init.Xavier())
    return net

訓練

gb.accuracy(output, label)
trainer.set_learning_rate(trainer.learning_rate * lr_decay)
gb.evaluate_accuracy(valid_data, net, ctx)
import datetime
import sys
sys.path.append(‘..‘)
import gluonbook as gb

def train(net, train_data, valid_data, num_epochs, lr, wd, ctx, lr_period, lr_decay):
    trainer = gluon.Trainer(
        net.collect_params(), ‘sgd‘, {‘learning_rate‘: lr, ‘momentum‘: 0.9, ‘wd‘: wd})

    prev_time = datetime.datetime.now()
    for epoch in range(num_epochs):
        train_loss = 0.0
        train_acc = 0.0
        if epoch > 0 and epoch % lr_period == 0:
            trainer.set_learning_rate(trainer.learning_rate * lr_decay)
        for data, label in train_data:
            label = label.astype(‘float32‘).as_in_context(ctx)
            with autograd.record():
                output = net(data.as_in_context(ctx))
                loss = softmax_cross_entropy(output, label)
            loss.backward()
            trainer.step(batch_size)
            train_loss += nd.mean(loss).asscalar()
            train_acc += gb.accuracy(output, label)
        cur_time = datetime.datetime.now()
        h, remainder = divmod((cur_time - prev_time).seconds, 3600)
        m, s = divmod(remainder, 60)
        time_str = "Time %02d:%02d:%02d" % (h, m, s)
        if valid_data is not None:
            valid_acc = gb.evaluate_accuracy(valid_data, net, ctx)
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, Valid acc %f, "
                         % (epoch, train_loss / len(train_data),
                            train_acc / len(train_data), valid_acc))
        else:
            epoch_str = ("Epoch %d. Loss: %f, Train acc %f, "
                         % (epoch, train_loss / len(train_data),
                            train_acc / len(train_data)))
        prev_time = cur_time
        print(epoch_str + time_str + ‘, lr ‘ + str(trainer.learning_rate))  

實際訓練起來,

ctx = gb.try_gpu()
num_epochs = 1
learning_rate = 0.1
weight_decay = 5e-4
lr_period = 80
lr_decay = 0.1

net = get_net(ctx)
net.hybridize()
train(net, train_data, valid_data, num_epochs, learning_rate,
      weight_decay, ctx, lr_period, lr_decay)

預測

import numpy as np
import pandas as pd

# 訓練
net = get_net(ctx)
net.hybridize()
train(net, train_valid_data, None, num_epochs, learning_rate,
      weight_decay, ctx, lr_period, lr_decay)

# 預測
preds = []
for data, label in test_data:
    output = net(data.as_in_context(ctx))
    preds.extend(output.argmax(axis=1).astype(int).asnumpy())


sorted_ids = list(range(1, len(test_ds) + 1))
sorted_ids.sort(key = lambda x:str(x))

df = pd.DataFrame({‘id‘: sorted_ids, ‘label‘: preds})
df[‘label‘] = df[‘label‘].apply(lambda x: train_valid_ds.synsets[x])
df.to_csv(‘submission.csv‘, index=False)

  

  

『MXNet』第七彈_分類器demo示意