帮我生成一段用pycharm运行的用InceptionV3搭建的简单代码,不需要自己操作,不用keras,且库比较少。可以得到训练集和验证集的准确率曲线、损失函数曲线以及交叉熵函数曲线
时间: 2023-12-10 11:37:45 浏览: 82
好的,以下是使用PyTorch实现InceptionV3模型的简单代码,可以得到训练集和验证集的准确率曲线、损失函数曲线以及交叉熵函数曲线,同时没有使用Keras,库比较少:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# 定义数据预处理方式
train_transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 加载数据集
train_data = datasets.CIFAR10(root='./data', train=True, download=True, transform=train_transform)
test_data = datasets.CIFAR10(root='./data', train=False, download=True, transform=test_transform)
# 定义超参数
batch_size = 128
learning_rate = 0.1
num_epochs = 100
# 定义数据加载器
train_loader = DataLoader(dataset=train_data, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=batch_size, shuffle=False)
# 定义InceptionV3模型
class InceptionV3(nn.Module):
def __init__(self):
super(InceptionV3, self).__init__()
self.conv1 = nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.conv3 = nn.Conv2d(64, 80, kernel_size=1, stride=1)
self.conv4 = nn.Conv2d(80, 192, kernel_size=3, stride=1, padding=1)
self.inception1 = Inception(192, 64, 96, 128, 16, 32, 32)
self.inception2 = Inception(256, 128, 128, 192, 32, 96, 64)
self.inception3 = Inception(480, 192, 96, 208, 16, 48, 64)
self.inception4 = Inception(512, 160, 112, 224, 24, 64, 64)
self.inception5 = Inception(512, 128, 128, 256, 24, 64, 64)
self.inception6 = Inception(512, 112, 144, 288, 32, 64, 64)
self.inception7 = Inception(528, 256, 160, 320, 32, 128, 128)
self.inception8 = Inception(832, 256, 160, 320, 32, 128, 128)
self.inception9 = Inception(832, 384, 192, 384, 48, 128, 128)
self.avgpool = nn.AvgPool2d(kernel_size=8, stride=1)
self.linear = nn.Linear(1024, 10)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = nn.functional.max_pool2d(nn.functional.relu(self.conv2(x)), kernel_size=2, stride=2)
x = nn.functional.relu(self.conv3(x))
x = nn.functional.max_pool2d(nn.functional.relu(self.conv4(x)), kernel_size=2, stride=2)
x = self.inception1(x)
x = self.inception2(x)
x = nn.functional.max_pool2d(self.inception3(x), kernel_size=2, stride=2)
x = self.inception4(x)
x = self.inception5(x)
x = self.inception6(x)
x = nn.functional.max_pool2d(self.inception7(x), kernel_size=2, stride=2)
x = self.inception8(x)
x = nn.functional.avg_pool2d(self.inception9(x), kernel_size=8, stride=1)
x = x.view(x.size(0), -1)
x = self.linear(x)
return x
# 定义Inception模块
class Inception(nn.Module):
def __init__(self, in_channels, out1x1, reduce3x3_1, reduce3x3_2, reduce5x5_1, reduce5x5_2, out5x5):
super(Inception, self).__init__()
self.conv1 = nn.Conv2d(in_channels, out1x1, kernel_size=1, stride=1)
self.conv2_1 = nn.Conv2d(in_channels, reduce3x3_1, kernel_size=1, stride=1)
self.conv2_2 = nn.Conv2d(reduce3x3_1, reduce3x3_2, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(in_channels, reduce5x5_1, kernel_size=1, stride=1)
self.conv3_2 = nn.Conv2d(reduce5x5_1, reduce5x5_2, kernel_size=5, stride=1, padding=2)
self.conv4_1 = nn.Conv2d(in_channels, out5x5, kernel_size=1, stride=1)
self.conv4_2 = nn.Conv2d(out5x5, out5x5, kernel_size=3, stride=1, padding=1)
def forward(self, x):
out1 = nn.functional.relu(self.conv1(x))
out2 = nn.functional.relu(self.conv2_2(nn.functional.relu(self.conv2_1(x))))
out3 = nn.functional.relu(self.conv3_2(nn.functional.relu(self.conv3_1(x))))
out4 = nn.functional.relu(self.conv4_2(nn.functional.relu(self.conv4_1(x))))
out = torch.cat([out1, out2, out3, out4], dim=1)
return out
# 初始化模型和优化器
model = InceptionV3()
optimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
criterion = nn.CrossEntropyLoss()
# 训练模型
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in range(num_epochs):
train_loss = 0.0
train_correct = 0
model.train()
for images, labels in train_loader:
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item() * images.size(0)
_, preds = torch.max(outputs, 1)
train_correct += (preds == labels).sum().item()
train_loss /= len(train_loader.dataset)
train_acc = train_correct / len(train_loader.dataset)
train_loss_list.append(train_loss)
train_acc_list.append(train_acc)
test_loss = 0.0
test_correct = 0
model.eval()
with torch.no_grad():
for images, labels in test_loader:
outputs = model(images)
loss = criterion(outputs, labels)
test_loss += loss.item() * images.size(0)
_, preds = torch.max(outputs, 1)
test_correct += (preds == labels).sum().item()
test_loss /= len(test_loader.dataset)
test_acc = test_correct / len(test_loader.dataset)
test_loss_list.append(test_loss)
test_acc_list.append(test_acc)
print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}'
.format(epoch+1, num_epochs, train_loss, train_acc, test_loss, test_acc))
# 绘制准确率曲线
plt.plot(train_acc_list, label='Train Acc')
plt.plot(test_acc_list, label='Test Acc')
plt.legend()
plt.show()
# 绘制损失函数曲线
plt.plot(train_loss_list, label='Train Loss')
plt.plot(test_loss_list, label='Test Loss')
plt.legend()
plt.show()
# 绘制交叉熵函数曲线
plt.plot(train_loss_list, label='Train Cross Entropy')
plt.plot(test_loss_list, label='Test Cross Entropy')
plt.legend()
plt.show()
```
注意:在运行代码之前,需要先安装PyTorch库。
阅读全文