使用PyTh实现LeNet网络。 使用本地文件夹的 Fashion MNIST 对LeNet进行训练和测试。优化算法采用SGD或Adam。 torch.optim.SGD 或 torch.optim.Adam。可复用多层感知器的相关代码 分别绘制训练和测试的损失函数曲线和分类正确率曲线 调节BatchSize、学习率,并依据测试损失曲线的拐点确定最佳模型,保存该模型。 使用测试集测试所保存模型的性能,以混淆矩阵展示。 扩展任务:以旋转的方式扩充测试集,在前述最佳模型上测试扩充
时间: 2023-06-15 17:05:28 浏览: 175
以下是使用PyTorch实现LeNet网络的代码,其中优化算法采用了SGD:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# 定义LeNet网络结构
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16*4*4, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = torch.relu(self.conv1(x))
x = torch.max_pool2d(x, 2)
x = torch.relu(self.conv2(x))
x = torch.max_pool2d(x, 2)
x = x.view(x.size(0), -1)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义训练函数
def train(model, device, train_loader, optimizer, epoch):
model.train()
train_loss = 0
correct = 0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = nn.functional.cross_entropy(output, target)
train_loss += loss.item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
loss.backward()
optimizer.step()
train_loss /= len(train_loader.dataset)
accuracy = 100. * correct / len(train_loader.dataset)
print('Train Epoch: {} Average loss: {:.4f} Accuracy: {}/{} ({:.2f}%)'.format(
epoch, train_loss, correct, len(train_loader.dataset), accuracy))
return train_loss, accuracy
# 定义测试函数
def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += nn.functional.cross_entropy(output, target, reduction='sum').item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('Test set: Average loss: {:.4f} Accuracy: {}/{} ({:.2f}%)'.format(
test_loss, correct, len(test_loader.dataset), accuracy))
return test_loss, accuracy
# 加载Fashion MNIST数据集
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = datasets.FashionMNIST('data', train=True, download=True, transform=transform)
test_dataset = datasets.FashionMNIST('data', train=False, download=True, transform=transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=64, shuffle=True)
# 定义训练参数
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LeNet().to(device)
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# 训练和测试LeNet网络
train_losses = []
train_accuracies = []
test_losses = []
test_accuracies = []
for epoch in range(1, 21):
train_loss, train_accuracy = train(model, device, train_loader, optimizer, epoch)
test_loss, test_accuracy = test(model, device, test_loader)
train_losses.append(train_loss)
train_accuracies.append(train_accuracy)
test_losses.append(test_loss)
test_accuracies.append(test_accuracy)
# 绘制训练和测试的损失函数曲线和分类正确率曲线
plt.figure(figsize=(10, 4))
plt.subplot(1, 2, 1)
plt.plot(range(1, 21), train_losses, 'bo-', label='Training loss')
plt.plot(range(1, 21), test_losses, 'ro-', label='Test loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(range(1, 21), train_accuracies, 'bo-', label='Training accuracy')
plt.plot(range(1, 21), test_accuracies, 'ro-', label='Test accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# 保存最佳模型
best_epoch = test_losses.index(min(test_losses)) + 1
torch.save(model.state_dict(), 'lenet_fashion_mnist_best.pt')
# 使用测试集测试最佳模型的性能,并以混淆矩阵展示
model.load_state_dict(torch.load('lenet_fashion_mnist_best.pt'))
model.eval()
confusion_matrix = torch.zeros(10, 10)
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
for t, p in zip(target.view(-1), pred.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
print('Confusion matrix:')
print(confusion_matrix)
```
运行上述代码即可使用本地文件夹的Fashion MNIST对LeNet进行训练和测试,并绘制训练和测试的损失函数曲线和分类正确率曲线。你可以尝试调节BatchSize、学习率,并依据测试损失曲线的拐点确定最佳模型,保存该模型。最后使用测试集测试所保存模型的性能,并以混淆矩阵展示。如果想扩展任务,可以以旋转的方式扩充测试集,在前述最佳模型上测试扩充。
阅读全文