利用alexnet训练cifar10数据集来生产epoch图代码
时间: 2023-12-09 17:04:28 浏览: 62
ALEXNET的实现代码
5星 · 资源好评率100%
好的,以下是使用PyTorch框架构建AlexNet模型并对CIFAR-10数据集进行训练,最后生成epoch图的代码示例:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
# 构建AlexNet模型
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
# 准备数据集
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=128, shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)
# 训练模型
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = AlexNet().to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in range(10):
running_loss = 0.0
train_acc = 0
test_acc = 0
for i, data in enumerate(trainloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
train_acc += (predicted == labels).sum().item()
train_loss = running_loss / len(trainloader)
train_accuracy = 100 * train_acc / len(trainset)
train_loss_list.append(train_loss)
train_acc_list.append(train_accuracy)
print('Epoch %d: Train Loss: %.3f, Train Accuracy: %.3f%%' % (epoch+1, train_loss, train_accuracy))
with torch.no_grad():
running_loss = 0.0
for i, data in enumerate(testloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
outputs = net(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()
_, predicted = torch.max(outputs.data, 1)
test_acc += (predicted == labels).sum().item()
test_loss = running_loss / len(testloader)
test_accuracy = 100 * test_acc / len(testset)
test_loss_list.append(test_loss)
test_acc_list.append(test_accuracy)
print('Epoch %d: Test Loss: %.3f, Test Accuracy: %.3f%%' % (epoch+1, test_loss, test_accuracy))
# 生成epoch图
plt.plot(range(1, 11), train_loss_list, label='Train Loss')
plt.plot(range(1, 11), test_loss_list, label='Test Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend()
plt.show()
plt.plot(range(1, 11), train_acc_list, label='Train Accuracy')
plt.plot(range(1, 11), test_acc_list, label='Test Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy (%)')
plt.legend()
plt.show()
```
在上面的示例中,我们使用PyTorch框架构建了一个AlexNet模型,并使用CIFAR-10数据集对其进行训练。在训练过程中,我们记录了每个epoch的训练损失和准确率,并在训练结束后使用matplotlib库生成了两个图表,分别表示训练损失和准确率随epoch的变化情况。
阅读全文