参考博客-zhiguo98
个人体会
:Inception块需要卷积核与池化层保证same
.多个分支才能拼接成同样大小的块,区别是各个分支的通道数不同,最后由反向传递自己选择哪一个分支作为中间层.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,),(0.3081,))
])
train_dataset = datasets.MNIST(root='./data', train=True, transform=transform, download=True)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transform, download=True)
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=True)
# 构建Inception模型
class MyInception(nn.Module):
def __init__(self, in_channels):
super(MyInception, self).__init__()
# 以下所有卷积核以及池化层都是same操作
# 第一个分支 (1,28,28)
self.branch1 = nn.Conv2d(
in_channels=in_channels,
out_channels=16,
kernel_size=1
)
# 第二个分支
self.branch2 = nn.Sequential(