给出inception v2的pytorch代码
时间: 2023-06-30 20:13:48 浏览: 154
以下是Inception V2的PyTorch代码,其中包括了模型的定义和训练过程:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
class InceptionV2(nn.Module):
def __init__(self):
super(InceptionV2, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.norm1 = nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1)
self.conv2_1 = nn.Conv2d(64, 64, kernel_size=1)
self.conv2_2 = nn.Conv2d(64, 192, kernel_size=3, padding=1)
self.norm2 = nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1)
self.pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception3a = InceptionModule(192, 64, 96, 128, 16, 32, 32)
self.inception3b = InceptionModule(256, 128, 128, 192, 32, 96, 64)
self.pool3 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception4a = InceptionModule(480, 192, 96, 208, 16, 48, 64)
self.inception4b = InceptionModule(512, 160, 112, 224, 24, 64, 64)
self.inception4c = InceptionModule(512, 128, 128, 256, 24, 64, 64)
self.inception4d = InceptionModule(512, 112, 144, 288, 32, 64, 64)
self.inception4e = InceptionModule(528, 256, 160, 320, 32, 128, 128)
self.pool4 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.inception5a = InceptionModule(832, 256, 160, 320, 32, 128, 128)
self.inception5b = InceptionModule(832, 384, 192, 384, 48, 128, 128)
self.pool5 = nn.AvgPool2d(kernel_size=7, stride=1)
self.dropout = nn.Dropout(p=0.4)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
x = self.conv1(x)
x = self.pool1(x)
x = self.norm1(x)
x = self.conv2_1(x)
x = self.conv2_2(x)
x = self.norm2(x)
x = self.pool2(x)
x = self.inception3a(x)
x = self.inception3b(x)
x = self.pool3(x)
x = self.inception4a(x)
x = self.inception4b(x)
x = self.inception4c(x)
x = self.inception4d(x)
x = self.inception4e(x)
x = self.pool4(x)
x = self.inception5a(x)
x = self.inception5b(x)
x = self.pool5(x)
x = x.view(x.size(0), -1)
x = self.dropout(x)
x = self.linear(x)
return x
class InceptionModule(nn.Module):
def __init__(self, in_channels, out1x1, reduce3x3, out3x3, reduce5x5, out5x5, pool_proj):
super(InceptionModule, self).__init__()
self.branch1 = nn.Conv2d(in_channels, out1x1, kernel_size=1)
self.branch2_1 = nn.Conv2d(in_channels, reduce3x3, kernel_size=1)
self.branch2_2 = nn.Conv2d(reduce3x3, out3x3, kernel_size=3, padding=1)
self.branch3_1 = nn.Conv2d(in_channels, reduce5x5, kernel_size=1)
self.branch3_2 = nn.Conv2d(reduce5x5, out5x5, kernel_size=5, padding=2)
self.branch4_1 = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branch4_2 = nn.Conv2d(in_channels, pool_proj, kernel_size=1)
def forward(self, x):
branch1 = self.branch1(x)
branch2 = self.branch2_1(x)
branch2 = self.branch2_2(branch2)
branch3 = self.branch3_1(x)
branch3 = self.branch3_2(branch3)
branch4 = self.branch4_1(x)
branch4 = self.branch4_2(branch4)
outputs = [branch1, branch2, branch3, branch4]
return torch.cat(outputs, 1)
# 加载数据
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
train_dataset = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=4, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=4, pin_memory=True)
# 定义损失函数和优化器
model = InceptionV2().cuda()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=0.0001)
# 训练模型
for epoch in range(100):
model.train()
for i, (inputs, targets) in enumerate(train_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
loss = criterion(outputs, targets)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 100 == 0:
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f' % (epoch+1, 100, i+1, len(train_loader), loss.item()))
# 测试模型
model.eval()
with torch.no_grad():
correct = 0
total = 0
for inputs, targets in test_loader:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += (predicted == targets).sum().item()
print('Epoch [%d/%d] Test Accuracy: %.2f %%' % (epoch+1, 100, 100 * correct / total))
```
需要注意的是,此代码只是Inception V2的一个简单实现,可能与论文中的架构设计存在一些差异。
阅读全文