1. 程式人生 > >Inception(Pytorch實現)

Inception(Pytorch實現)

github部落格傳送門
部落格園傳送門

論文在此: Going deeper with convolutions

論文下載: https://arxiv.org/pdf/1409.4842.pdf

網路結構圖:

Inception
詳細
引數

Pytorch程式碼實現:

import torch
import torch.nn as nn
import torch.nn.functional as F


class Inception3(nn.Module):

    def __init__(self, num_classes=1000, aux_logits=True, transform_input=
False): super(Inception3, self).__init__() self.aux_logits = aux_logits self.transform_input = transform_input self.Conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2) self.Conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3) self.Conv2d_2b_3x3 =
BasicConv2d(32, 64, kernel_size=3, padding=1) self.Conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1) self.Conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3) self.Mixed_5b = InceptionA(192, pool_features=32) self.Mixed_5c = InceptionA(256, pool_features=64) self.
Mixed_5d = InceptionA(288, pool_features=64) self.Mixed_6a = InceptionB(288) self.Mixed_6b = InceptionC(768, channels_7x7=128) self.Mixed_6c = InceptionC(768, channels_7x7=160) self.Mixed_6d = InceptionC(768, channels_7x7=160) self.Mixed_6e = InceptionC(768, channels_7x7=192) if aux_logits: self.AuxLogits = InceptionAux(768, num_classes) self.Mixed_7a = InceptionD(768) self.Mixed_7b = InceptionE(1280) self.Mixed_7c = InceptionE(2048) self.fc = nn.Linear(2048, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.Tensor(X.rvs(m.weight.data.numel())) values = values.view(m.weight.data.size()) m.weight.data.copy_(values) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def forward(self, x): if self.transform_input: x = x.clone() x[:, 0] = x[:, 0] * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 x[:, 1] = x[:, 1] * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 x[:, 2] = x[:, 2] * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 # 299 x 299 x 3 x = self.Conv2d_1a_3x3(x) # 149 x 149 x 32 x = self.Conv2d_2a_3x3(x) # 147 x 147 x 32 x = self.Conv2d_2b_3x3(x) # 147 x 147 x 64 x = F.max_pool2d(x, kernel_size=3, stride=2) # 73 x 73 x 64 x = self.Conv2d_3b_1x1(x) # 73 x 73 x 80 x = self.Conv2d_4a_3x3(x) # 71 x 71 x 192 x = F.max_pool2d(x, kernel_size=3, stride=2) # 35 x 35 x 192 x = self.Mixed_5b(x) # 35 x 35 x 256 x = self.Mixed_5c(x) # 35 x 35 x 288 x = self.Mixed_5d(x) # 35 x 35 x 288 x = self.Mixed_6a(x) # 17 x 17 x 768 x = self.Mixed_6b(x) # 17 x 17 x 768 x = self.Mixed_6c(x) # 17 x 17 x 768 x = self.Mixed_6d(x) # 17 x 17 x 768 x = self.Mixed_6e(x) # 17 x 17 x 768 if self.training and self.aux_logits: aux = self.AuxLogits(x) # 17 x 17 x 768 x = self.Mixed_7a(x) # 8 x 8 x 1280 x = self.Mixed_7b(x) # 8 x 8 x 2048 x = self.Mixed_7c(x) # 8 x 8 x 2048 x = F.avg_pool2d(x, kernel_size=8) # 1 x 1 x 2048 x = F.dropout(x, training=self.training) # 1 x 1 x 2048 x = x.view(x.size(0), -1) # 2048 x = self.fc(x) # 1000 (num_classes) if self.training and self.aux_logits: return x, aux return x class InceptionA(nn.Module): def __init__(self, in_channels, pool_features): super(InceptionA, self).__init__() self.branch1x1 = BasicConv2d(in_channels, 64, kernel_size=1) self.branch5x5_1 = BasicConv2d(in_channels, 48, kernel_size=1) self.branch5x5_2 = BasicConv2d(48, 64, kernel_size=5, padding=2) self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, padding=1) self.branch_pool = BasicConv2d(in_channels, pool_features, kernel_size=1) def forward(self, x): branch1x1 = self.branch1x1(x) branch5x5 = self.branch5x5_1(x) branch5x5 = self.branch5x5_2(branch5x5) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class InceptionB(nn.Module): def __init__(self, in_channels): super(InceptionB, self).__init__() self.branch3x3 = BasicConv2d(in_channels, 384, kernel_size=3, stride=2) self.branch3x3dbl_1 = BasicConv2d(in_channels, 64, kernel_size=1) self.branch3x3dbl_2 = BasicConv2d(64, 96, kernel_size=3, padding=1) self.branch3x3dbl_3 = BasicConv2d(96, 96, kernel_size=3, stride=2) def forward(self, x): branch3x3 = self.branch3x3(x) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class InceptionC(nn.Module): def __init__(self, in_channels, channels_7x7): super(InceptionC, self).__init__() self.branch1x1 = BasicConv2d(in_channels, 192, kernel_size=1) c7 = channels_7x7 self.branch7x7_1 = BasicConv2d(in_channels, c7, kernel_size=1) self.branch7x7_2 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7_3 = BasicConv2d(c7, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_1 = BasicConv2d(in_channels, c7, kernel_size=1) self.branch7x7dbl_2 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_3 = BasicConv2d(c7, c7, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7dbl_4 = BasicConv2d(c7, c7, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7dbl_5 = BasicConv2d(c7, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) def forward(self, x): branch1x1 = self.branch1x1(x) branch7x7 = self.branch7x7_1(x) branch7x7 = self.branch7x7_2(branch7x7) branch7x7 = self.branch7x7_3(branch7x7) branch7x7dbl = self.branch7x7dbl_1(x) branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] return torch.cat(outputs, 1) class InceptionD(nn.Module): def __init__(self, in_channels): super(InceptionD, self).__init__() self.branch3x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) self.branch3x3_2 = BasicConv2d(192, 320, kernel_size=3, stride=2) self.branch7x7x3_1 = BasicConv2d(in_channels, 192, kernel_size=1) self.branch7x7x3_2 = BasicConv2d(192, 192, kernel_size=(1, 7), padding=(0, 3)) self.branch7x7x3_3 = BasicConv2d(192, 192, kernel_size=(7, 1), padding=(3, 0)) self.branch7x7x3_4 = BasicConv2d(192, 192, kernel_size=3, stride=2) def forward(self, x): branch3x3 = self.branch3x3_1(x) branch3x3 = self.branch3x3_2(branch3x3) branch7x7x3 = self.branch7x7x3_1(x) branch7x7x3 = self.branch7x7x3_2(branch7x7x3) branch7x7x3 = self.branch7x7x3_3(branch7x7x3) branch7x7x3 = self.branch7x7x3_4(branch7x7x3) branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) outputs = [branch3x3, branch7x7x3, branch_pool] return torch.cat(outputs, 1) class InceptionE(nn.Module): def __init__(self, in_channels): super(InceptionE, self).__init__() self.branch1x1 = BasicConv2d(in_channels, 320, kernel_size=1) self.branch3x3_1 = BasicConv2d(in_channels, 384, kernel_size=1) self.branch3x3_2a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3_2b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch3x3dbl_1 = BasicConv2d(in_channels, 448, kernel_size=1) self.branch3x3dbl_2 = BasicConv2d(448, 384, kernel_size=3, padding=1) self.branch3x3dbl_3a = BasicConv2d(384, 384, kernel_size=(1, 3), padding=(0, 1)) self.branch3x3dbl_3b = BasicConv2d(384, 384, kernel_size=(3, 1), padding=(1, 0)) self.branch_pool = BasicConv2d(in_channels, 192, kernel_size=1) def forward(self, x): branch1x1 = self.branch1x1(x) branch3x3 = self.branch3x3_1(x) branch3x3 = [ self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3), ] branch3x3 = torch.cat(branch3x3, 1) branch3x3dbl = self.branch3x3dbl_1(x) branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) branch3x3dbl = [ self.branch3x3dbl_3a(branch3x3dbl), self.branch3x3dbl_3b(branch3x3dbl), ] branch3x3dbl = torch.cat(branch3x3dbl, 1) branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) branch_pool = self.branch_pool(branch_pool) outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] return torch.cat(outputs, 1) class InceptionAux(nn.Module): def __init__(self, in_channels, num_classes): super(InceptionAux, self).__init__() self.conv0 = BasicConv2d(in_channels, 128

相關推薦

Inception(Pytorch實現)

github部落格傳送門 部落格園傳送門 論文在此: Going deeper with convolutions 論文下載: https://arxiv.org/pdf/1409.4842.pdf 網路結構圖: Pytorch程式碼實現: import t

使用pytorch實現Inception模組

在pytorch中沒有找到Inception模組,自己寫了一個,以供呼叫。 Inception模組的順序為: 1. 輸入 -> 1*1卷積 -> BatchNorm -> ReLU -> 1*5卷積 -> BatchNorm

“你什麽意思”之基於RNN的語義槽填充(Pytorch實現)

入門 pre 驗證 sigma arr str https 控制 AC 1. 概況 1.1 任務 口語理解(Spoken Language Understanding, SLU)作為語音識別與自然語言處理之間的一個新興領域,其目的是為了讓計算機從用戶的講話中理解他們的意圖。

Pytorch實現的語義分割器

獲取 發生 文件夾 rect OS 取消 方法安裝 tools 問題 使用Detectron預訓練權重輸出 *e2e_mask_rcnn-R-101-FPN_2x* 的示例 從Detectron輸出的相關示例 使用Detectron預訓練權重輸出 *e2e_keypo

自然語言推斷(NLI)、文本相似度相關開源項目推薦(Pytorch 實現)

neu Language load lstm ica lob cat repos bim Awesome-Repositories-for-NLI-and-Semantic-Similarity mainly record pytorch implementations f

群等變網絡的pytorch實現

kernel 叠代 能夠 ant tails 於平 debug con 進行 CNN對於旋轉不具有等變性,對於平移有等變性,data augmentation的提出就是為了解決這個問題,但是data augmentation需要很大的模型容量,更多的叠代次數才能夠在訓練數據

hourglass pytorch 實現

主要分為幾塊 1、資料集讀取 2、hg-model 3、training 4、程式碼主要來自於github上幾個 大佬的 程式碼的結合 @bearpaw 以及 @roytseng-tw 的訓練程式碼和 @anibali 的evaluation程式碼, 主要這兩位

pytorch 實現變分自動編碼器

本來以為自動編碼器是很簡單的東西,但是也是看了好多資料仍然不太懂它的原理。先把程式碼記錄下來,有時間好好研究。 這個例子是用MNIST資料集生成為例子。 # -*- coding: utf-8 -*- """ Created on Fri Oct 12 11:42:19 2018 @a

AlexNet 講解及pytorch實現 ----1 AlexNet主要技術突破點

一. AlexNet網路結構 2012年,該網路ILSVRC-2012影象分類的冠軍,top-5的識別錯誤率為15.3%, 比第二名高出10個百分點。 下面是論文中的網路結構: 原始網路將模型分為兩部分,分開在兩個GPU上訓練,與下面合併的網路結構等價:

pytorch實現神經網路

import torch import torch.nn as nn import torch.nn.functional as F import inspect import torch.optim as optim ''' 自動求導機制: 對一個標量用backward() 會反向計算在計算圖中

資源 | 注意遷移的PyTorch實現

分享一下我老師大神的人工智慧教程!零基礎,通俗易懂!http://blog.csdn.net/jiangjunshow 也歡迎大家轉載本篇文章。分享知識,造福人民,實現我們中華民族偉大復興!        

pytorch實現多層感知機(MLP)(全連線神經網路FC)分類MNIST手寫數字體的識別

1.匯入必備的包 1 import torch 2 import numpy as np 3 from torchvision.datasets import mnist 4 from torch import nn 5 from torch.autograd import Variable 6

Pytorch實現PointNet中的點雲分類網路。

下面是PointNet論文中分類模型的結構: 但是對於模型的細節,PointNet論文中並沒有詳細的解釋,尤其是T-Net,可以參考PointNet的supplemental部分。如果找不到,可以留言找我要。 話不多說,下面是程式碼,基本上完全還原了論文中的PointNet分類模型

基於Pytorch實現風格遷移(CS231n assignment3)

       風格遷移由Gatys等與2015年提出,論文:https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_20

基於Pytorch實現網路視覺化(CS231n assignment3)

       這篇部落格主要是對CS231n assignment3中的網路視覺化部分進行整理。我使用的是Pytorch框架完成的整個練習,但是和Tensorflow框架相比只是實現有些不一樣而已,數學原理還是一致的。     &nbs

Pytorch實現DenseNet結構

       在上一篇部落格中說到,由於框架結構的原因,Keras很難實現DenseNet的記憶體優化版本。在這一篇部落格中將參考官方對DenseNet的實現,來寫基於Pytorch框架實現用於cifar10資料集分類的DenseNet-BC結構。網路中各模組

pytorch實現seq2seq時如何對loss進行mask

如何對loss進行mask pytorch官方教程中有一個Chatbot教程,就是利用seq2seq和注意力機制實現的,感覺和機器翻譯沒什麼不同啊,如果對話中一句話有下一句,那麼就把這一對句子加入模型進行訓練。其中在訓練階段,損失函式通常需要進行mask操作,因為一個batch中句子的長

pytorch實現self-attention機制,並可視化

pytorch 實現 self attention 並可視化 python 3 pytorch 0.4.0 請閱讀原文。 模型 class SelfAttention(nn.Module): def __init__(self, hidden

PyTorch實現Pointer Networks

pytorch實現簡單的pointer networks 部分程式碼參照該GitHub以及該部落格。純屬個人模仿實驗。 - python3 - pytorch 0.4.0 Pointer Networks Our model solves the problem

ssd模演算法的pytorch實現與解讀

首先先放下github地址:https://github.com/acm5656/ssd_pytorch 然後放上參考的程式碼的github地址:https://github.com/amdegroot/ssd.pytorch 為什麼要使用pytorch復現呢,因為好多大佬的程式碼對於萌新真的不友好,看半