用pytorch框架写vgg16猫狗识别训练并打印出其精度和损失函数图像,并使用GPU编写
时间: 2024-05-14 20:16:00 浏览: 115
好的,以下是使用Pytorch框架编写的VGG16猫狗识别模型的代码。
首先,需要导入必要的库和模块。
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torch.utils.data as data
import torchvision
from torchvision import transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
```
接下来,定义网络结构。这里我们使用VGG16作为基础模型,在最后添加一个全连接层用于二分类。
```python
class VGG16(nn.Module):
def __init__(self):
super(VGG16, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(256, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.classifier = nn.Sequential(
nn.Linear(512 * 7 * 7, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 2),
)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
```
然后,定义训练函数。
```python
def train(model, train_loader, criterion, optimizer):
model.train()
train_loss = 0.0
train_acc = 0.0
for i, (inputs, labels) in enumerate(train_loader):
inputs = inputs.cuda()
labels = labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
train_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
train_acc += torch.sum(preds == labels.data)
train_loss = train_loss / len(train_loader.dataset)
train_acc = train_acc.double() / len(train_loader.dataset)
return train_loss, train_acc
```
定义测试函数。
```python
def test(model, test_loader, criterion):
model.eval()
test_loss = 0.0
test_acc = 0.0
with torch.no_grad():
for inputs, labels in test_loader:
inputs = inputs.cuda()
labels = labels.cuda()
outputs = model(inputs)
loss = criterion(outputs, labels)
test_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs, 1)
test_acc += torch.sum(preds == labels.data)
test_loss = test_loss / len(test_loader.dataset)
test_acc = test_acc.double() / len(test_loader.dataset)
return test_loss, test_acc
```
接下来,加载数据集。
```python
train_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(10),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transforms = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
train_set = torchvision.datasets.ImageFolder(root='train/', transform=train_transforms)
test_set = torchvision.datasets.ImageFolder(root='test/', transform=test_transforms)
train_loader = DataLoader(train_set, batch_size=32, shuffle=True)
test_loader = DataLoader(test_set, batch_size=32, shuffle=False)
```
然后,定义损失函数和优化器。
```python
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
```
最后,训练模型并打印出其精度和损失函数图像。
```python
num_epochs = 20
model = VGG16().cuda()
train_loss_list = []
train_acc_list = []
test_loss_list = []
test_acc_list = []
for epoch in range(num_epochs):
train_loss, train_acc = train(model, train_loader, criterion, optimizer)
test_loss, test_acc = test(model, test_loader, criterion)
train_loss_list.append(train_loss)
train_acc_list.append(train_acc)
test_loss_list.append(test_loss)
test_acc_list.append(test_acc)
print("Epoch [{}/{}] Train Loss: {:.4f}, Train Acc: {:.4f}, Test Loss: {:.4f}, Test Acc: {:.4f}".format(
epoch + 1, num_epochs, train_loss, train_acc, test_loss, test_acc))
plt.title("Train/Test Loss")
plt.plot(train_loss_list, label="Train")
plt.plot(test_loss_list, label="Test")
plt.legend(loc="upper right")
plt.show()
plt.title("Train/Test Accuracy")
plt.plot(train_acc_list, label="Train")
plt.plot(test_acc_list, label="Test")
plt.legend(loc="lower right")
plt.show()
```
运行以上代码即可在GPU上训练VGG16猫狗识别模型并打印出其精度和损失函数图像。
阅读全文