用resnet18对四种天气分类,自定义归一化方法,采用均值方差归一化方法
时间: 2023-11-04 20:09:14 浏览: 98
好的,下面是使用ResNet18对四种天气分类的示例代码,其中包含自定义均值方差归一化方法的实现:
```python
import torch
import torch.nn as nn
import torchvision.transforms as transforms
import torchvision.datasets as datasets
# 定义自定义归一化方法
class CustomNormalize(object):
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, x):
x = (x - self.mean) / self.std
return x
# 定义数据预处理
data_transforms = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
CustomNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
# 加载数据集
train_dataset = datasets.ImageFolder(root='./data/train', transform=data_transforms)
val_dataset = datasets.ImageFolder(root='./data/val', transform=data_transforms)
# 定义模型
model = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1),
nn.AdaptiveAvgPool2d((1, 1)),
nn.Flatten(),
nn.Linear(512, 4)
)
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
# 定义训练函数和测试函数
def train(model, dataloader, criterion, optimizer):
model.train()
running_loss = 0.0
correct = 0
total = 0
for i, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
train_loss = running_loss / len(dataloader)
train_acc = correct / total
return train_loss, train_acc
def test(model, dataloader, criterion):
model.eval()
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for i, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
outputs = model(inputs)
loss = criterion(outputs, labels)
running_loss += loss.item()
_, predicted = outputs.max(1)
total += labels.size(0)
correct += predicted.eq(labels).sum().item()
test_loss = running_loss / len(dataloader)
test_acc = correct / total
return test_loss, test_acc
# 训练模型
num_epochs = 10
batch_size = 32
train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
val_dataloader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
model = model.cuda()
for epoch in range(num_epochs):
train_loss, train_acc = train(model, train_dataloader, criterion, optimizer)
test_loss, test_acc = test(model, val_dataloader, criterion)
print('Epoch [%d/%d], Train Loss: %.4f, Train Acc: %.4f, Test Loss: %.4f, Test Acc: %.4f'
% (epoch+1, num_epochs, train_loss, train_acc, test_loss, test_acc))
```
在上述代码中,我们定义了一个名为`CustomNormalize`的类来实现自定义归一化方法。在数据预处理阶段,我们将图片先调整为256x256大小,并居中裁剪为224x224大小,然后使用`ToTensor()`方法将图片转换为PyTorch张量,并使用自定义的归一化方法对每个通道进行均值方差归一化处理。在模型的训练和测试函数中,我们使用了PyTorch内置的`DataLoader`类来实现数据的批量加载,并在每个epoch结束时输出训练和测试的损失和准确率。
阅读全文