【Kaggle-MNIST之路】CNN再新增一個層卷積(八)
阿新 • • 發佈:2018-11-22
簡述
- 基於之前的框架
- 【Kaggle-MNIST之路】自定義程式結構(七)
- 得分:0.9914
- 排名:900+
程式碼
- CNN.py 檔案內容
import torch.nn as nn
import torch
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.N = 1
self.layer1 = nn.Sequential(
# (1, 28, 28)
nn.Conv2d(
in_channels=1,
out_channels=32,
kernel_size=3, # 卷積filter, 移動塊長
stride=1, # filter的每次移動步長
),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(
in_channels= 32,
out_channels=32,
kernel_size=3, # 卷積filter, 移動塊長
stride=1, # filter的每次移動步長
),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Conv2d(
in_channels=32,
out_channels=32,
kernel_size= 5, # 卷積filter, 移動塊長
stride=2, # filter的每次移動步長
padding=2,
),
nn.ReLU(),
nn.BatchNorm2d(32),
nn.Dropout(0.4),
)
self.layer2 = nn.Sequential(
nn.Conv2d(
in_channels=32,
out_channels=64,
kernel_size=3, # 卷積filter, 移動塊長
stride=1, # filter的每次移動步長
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=3, # 卷積filter, 移動塊長
stride=1, # filter的每次移動步長
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Conv2d(
in_channels=64,
out_channels=64,
kernel_size=5, # 卷積filter, 移動塊長
stride=2, # filter的每次移動步長
padding=2,
),
nn.ReLU(),
nn.BatchNorm2d(64),
nn.Dropout(0.4),
)
self.layer3 = nn.Sequential(
nn.Conv2d(
in_channels=64,
out_channels=128,
kernel_size=4, # 卷積filter, 移動塊長
stride=1, # filter的每次移動步長
),
nn.ReLU(),
nn.BatchNorm2d(128),
)
self.layer4 = nn.Linear(128 * self.N, 10)
def forward(self, x):
con = torch.Tensor()
for i in range(self.N):
temp = x.clone()
temp = self.layer1(temp)
temp = self.layer2(temp)
temp = self.layer3(temp)
con = torch.cat((con, temp), dim=1) # 在dim=1上concat
con = con.view(con.size(0), -1)
con = self.layer4(con)
return con