利用飞桨定义一个resnet34的训练代码
时间: 2024-03-09 13:17:52 浏览: 180
利用ResNet34实现猫狗分类(包括数据集和代码)
好的,以下是一个使用飞桨定义ResNet34模型并进行训练的代码示例:
```python
import paddle
import paddle.nn as nn
from paddle.vision.transforms import Compose, Normalize
from paddle.vision.datasets import CIFAR10
# 定义ResNet34模型
class ResNet34(nn.Layer):
def __init__(self, num_classes=10):
super(ResNet34, self).__init__()
self.conv1 = nn.Conv2D(in_channels=3, out_channels=64, kernel_size=3, padding=1, bias_attr=False)
self.bn1 = nn.BatchNorm2D(num_features=64)
self.relu = nn.ReLU()
self.maxpool = nn.MaxPool2D(kernel_size=3, stride=2, padding=1)
self.layer1 = nn.Sequential(
nn.Conv2D(in_channels=64, out_channels=64, kernel_size=3, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=64),
nn.ReLU(),
nn.Conv2D(in_channels=64, out_channels=64, kernel_size=3, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=64),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Conv2D(in_channels=64, out_channels=128, kernel_size=3, stride=2, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=128),
nn.ReLU(),
nn.Conv2D(in_channels=128, out_channels=128, kernel_size=3, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=128),
nn.ReLU()
)
self.layer3 = nn.Sequential(
nn.Conv2D(in_channels=128, out_channels=256, kernel_size=3, stride=2, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=256),
nn.ReLU(),
nn.Conv2D(in_channels=256, out_channels=256, kernel_size=3, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=256),
nn.ReLU()
)
self.layer4 = nn.Sequential(
nn.Conv2D(in_channels=256, out_channels=512, kernel_size=3, stride=2, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=512),
nn.ReLU(),
nn.Conv2D(in_channels=512, out_channels=512, kernel_size=3, padding=1, bias_attr=False),
nn.BatchNorm2D(num_features=512),
nn.ReLU()
)
self.avgpool = nn.AdaptiveAvgPool2D(output_size=1)
self.fc = nn.Linear(in_features=512, out_features=num_classes)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = paddle.flatten(x, 1)
x = self.fc(x)
return x
# 定义数据预处理
transform = Compose([Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], data_format='CHW')])
# 加载CIFAR-10数据集
train_dataset = CIFAR10(mode='train', transform=transform)
test_dataset = CIFAR10(mode='test', transform=transform)
# 定义数据加载器
train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True)
test_loader = paddle.io.DataLoader(test_dataset, batch_size=64)
# 定义优化器和损失函数
model = ResNet34(num_classes=10)
optimizer = paddle.optimizer.Adam(learning_rate=0.001, parameters=model.parameters())
criterion = nn.CrossEntropyLoss()
# 训练模型
for epoch in range(10):
for batch_idx, (data, target) in enumerate(train_loader):
output = model(data)
loss = criterion(output, target)
optimizer.clear_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item()))
# 在测试集上验证模型的性能
model.eval()
test_loss = 0
correct = 0
with paddle.no_grad():
for data, target in test_loader:
output = model(data)
test_loss += criterion(output, target).item()
pred = paddle.argmax(output, axis=1)
correct += paddle.sum(pred == target).item()
test_loss /= len(test_loader.dataset)
accuracy = 100. * correct / len(test_loader.dataset)
print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(test_loss, correct, len(test_loader.dataset), accuracy))
```
这里的示例代码使用了CIFAR-10数据集,可以根据自己的需求调整数据集和超参数。
阅读全文