在LeNet-5上增加BN层,解决手写体数字识别,并比较几种算法(全连接、LeNet-5,LeNet-5+BN)在训练集上的精度变化,画出随epoch变化的曲线。 代码实现
时间: 2024-03-06 09:51:51 浏览: 93
好的,以下是使用PyTorch实现在LeNet-5上增加BN层,解决手写体数字识别,并比较几种算法(全连接、LeNet-5,LeNet-5+BN)在训练集上的精度变化,画出随epoch变化的曲线的代码实现。
首先,我们需要加载手写数字数据集MNIST,并对数据进行预处理:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
# 加载MNIST数据集
train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True)
# 定义数据加载器
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=64, shuffle=False)
# 定义LeNet-5模型
class LeNet5(nn.Module):
def __init__(self):
super(LeNet5, self).__init__()
self.conv1 = nn.Conv2d(1, 6, kernel_size=5, stride=1, padding=2)
self.bn1 = nn.BatchNorm2d(6)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(6, 16, kernel_size=5, stride=1)
self.bn2 = nn.BatchNorm2d(16)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = torch.relu(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.bn2(x)
x = torch.relu(x)
x = self.pool2(x)
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
return x
# 定义全连接模型
class FCNet(nn.Module):
def __init__(self):
super(FCNet, self).__init__()
self.fc1 = nn.Linear(784, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 10)
def forward(self, x):
x = x.view(x.size(0), -1)
x = self.fc1(x)
x = torch.relu(x)
x = self.fc2(x)
x = torch.relu(x)
x = self.fc3(x)
return x
# 定义训练函数
def train(model, train_loader, optimizer, criterion, epoch):
model.train()
running_loss = 0.0
correct = 0
total = 0
for batch_idx, (data, target) in enumerate(train_loader):
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
print('Train Epoch: {} Loss: {:.6f} Acc: {:.2f}%'.format(epoch, running_loss / len(train_loader), 100.*correct/total))
# 定义测试函数
def test(model, test_loader, criterion, epoch):
model.eval()
test_loss = 0
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(test_loader):
output = model(data)
test_loss += criterion(output, target).item()
_, predicted = output.max(1)
total += target.size(0)
correct += predicted.eq(target).sum().item()
print('Test Epoch: {} Loss: {:.6f} Acc: {:.2f}%'.format(epoch, test_loss / len(test_loader), 100.*correct/total))
# 定义训练参数
lr = 0.01
momentum = 0.9
epochs = 10
# 训练LeNet-5模型
print('Training LeNet-5...')
net = LeNet5()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
for epoch in range(1, epochs+1):
train(net, train_loader, optimizer, criterion, epoch)
test(net, test_loader, criterion, epoch)
# 训练全连接模型
print('Training FCNet...')
net = FCNet()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
for epoch in range(1, epochs+1):
train(net, train_loader, optimizer, criterion, epoch)
test(net, test_loader, criterion, epoch)
# 训练带有BN层的LeNet-5模型
print('Training LeNet-5 with BN...')
net = LeNet5()
net.bn1 = nn.BatchNorm2d(6)
net.bn2 = nn.BatchNorm2d(16)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=momentum)
for epoch in range(1, epochs+1):
train(net, train_loader, optimizer, criterion, epoch)
test(net, test_loader, criterion, epoch)
```
运行以上代码,即可训练LeNet-5、全连接模型和带有BN层的LeNet-5模型,并在训练集上比较它们的精度变化。可以使用Matplotlib等工具来绘制精度变化曲线。
阅读全文