pytorch实现卷积神经网络(LeNet)平均池化正则化的代码实现及模型预测
时间: 2024-02-05 11:03:06 浏览: 86
以下是基于PyTorch实现LeNet网络的代码,包括了平均池化和正则化:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
# 定义LeNet网络结构
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5, padding=2)
self.avgpool1 = nn.AvgPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5)
self.avgpool2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
self.relu = nn.ReLU()
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
out = self.relu(self.conv1(x))
out = self.avgpool1(out)
out = self.relu(self.conv2(out))
out = self.avgpool2(out)
out = out.view(out.size(0), -1)
out = self.relu(self.fc1(out))
out = self.relu(self.fc2(out))
out = self.dropout(out)
out = self.fc3(out)
return out
# 加载MNIST数据集
train_dataset = dsets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = dsets.MNIST(root='./data', train=False, transform=transforms.ToTensor())
# 定义超参数
batch_size = 100
learning_rate = 0.001
num_epochs = 10
# 加载数据集
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=batch_size, shuffle=False)
# 实例化模型
model = LeNet()
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=0.01)
# 训练模型
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
images = Variable(images)
labels = Variable(labels)
# 前向传播
outputs = model(images)
loss = criterion(outputs, labels)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print('Epoch [%d/%d], Step [%d/%d], Loss: %.4f' % (epoch+1, num_epochs, i+1, len(train_dataset)//batch_size, loss.item()))
# 测试模型
model.eval() # 设置模型为评估模式
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum()
print('Accuracy of the model on the test images: %d %%' % (100 * correct / total))
```
在上面的代码中,我们使用了`nn.AvgPool2d`实现了平均池化,并在LeNet的全连接层中使用了`nn.Dropout`实现了正则化。运行上面的代码将会训练一个LeNet网络,并在MNIST测试集上进行测试,输出测试集上的准确率结果。
阅读全文