你能给一个基于PyTorch的眼底图像分割的完整实例吗
时间: 2024-03-22 20:40:20 浏览: 52
当然可以,下面是一个基于PyTorch的简单眼底图像分割的代码示例:
首先,需要导入必要的库:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms.functional as TF
from torch.utils.data import DataLoader, Dataset
from PIL import Image
import numpy as np
import os
```
然后,定义数据集类:
```python
class EyeDataset(Dataset):
def __init__(self, img_folder, mask_folder, transform=None):
self.img_folder = img_folder
self.mask_folder = mask_folder
self.transform = transform
self.images = os.listdir(img_folder)
def __len__(self):
return len(self.images)
def __getitem__(self, index):
img_path = os.path.join(self.img_folder, self.images[index])
mask_path = os.path.join(self.mask_folder, self.images[index])
image = Image.open(img_path).convert('RGB')
mask = Image.open(mask_path).convert('L')
if self.transform is not None:
image = self.transform(image)
return image, mask
```
这个类将眼底图像和相应的分割掩模作为输入,将它们转换为 PyTorch 张量并返回。
接下来,定义 U-Net 模型:
```python
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.conv(x)
class UNet(nn.Module):
def __init__(self):
super().__init__()
self.down1 = DoubleConv(3, 64)
self.pool1 = nn.MaxPool2d(2)
self.down2 = DoubleConv(64, 128)
self.pool2 = nn.MaxPool2d(2)
self.down3 = DoubleConv(128, 256)
self.pool3 = nn.MaxPool2d(2)
self.down4 = DoubleConv(256, 512)
self.pool4 = nn.MaxPool2d(2)
self.center = DoubleConv(512, 1024)
self.up4 = nn.ConvTranspose2d(1024, 512, 2, stride=2)
self.upconv4 = DoubleConv(1024, 512)
self.up3 = nn.ConvTranspose2d(512, 256, 2, stride=2)
self.upconv3 = DoubleConv(512, 256)
self.up2 = nn.ConvTranspose2d(256, 128, 2, stride=2)
self.upconv2 = DoubleConv(256, 128)
self.up1 = nn.ConvTranspose2d(128, 64, 2, stride=2)
self.upconv1 = DoubleConv(128, 64)
self.final = nn.Conv2d(64, 1, 1)
def forward(self, x):
down1 = self.down1(x)
pool1 = self.pool1(down1)
down2 = self.down2(pool1)
pool2 = self.pool2(down2)
down3 = self.down3(pool2)
pool3 = self.pool3(down3)
down4 = self.down4(pool3)
pool4 = self.pool4(down4)
center = self.center(pool4)
up4 = self.up4(center)
concat4 = torch.cat([down4, up4], dim=1)
upconv4 = self.upconv4(concat4)
up3 = self.up3(upconv4)
concat3 = torch.cat([down3, up3], dim=1)
upconv3 = self.upconv3(concat3)
up2 = self.up2(upconv3)
concat2 = torch.cat([down2, up2], dim=1)
upconv2 = self.upconv2(concat2)
up1 = self.up1(upconv2)
concat1 = torch.cat([down1, up1], dim=1)
upconv1 = self.upconv1(concat1)
final = self.final(upconv1)
return torch.sigmoid(final)
```
这个模型使用了 U-Net 结构,是一个常用的图像分割模型。
接下来,定义训练函数:
```python
def train(model, train_loader, criterion, optimizer, device):
model.train()
train_loss = 0.0
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target)
loss.backward()
optimizer.step()
train_loss += loss.item() * data.size(0)
train_loss /= len(train_loader.dataset)
return model, train_loss
```
这个函数将模型、训练数据集、损失函数、优化器和设备作为输入,然后执行训练过程并返回训练后的模型和训练损失。
最后,定义测试函数:
```python
def test(model, test_loader, criterion, device):
model.eval()
test_loss = 0.0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
loss = criterion(output, target)
test_loss += loss.item() * data.size(0)
test_loss /= len(test_loader.dataset)
return test_loss
```
这个函数将模型、测试数据集、损失函数和设备作为输入,然后执行测试过程并返回测试损失。
现在,可以开始训练了:
```python
# 设置超参数
BATCH_SIZE = 16
LR = 0.001
EPOCHS = 10
# 加载数据集
train_dataset = EyeDataset('train_images', 'train_masks')
test_dataset = EyeDataset('test_images', 'test_masks')
train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE)
# 定义模型、损失函数和优化器
model = UNet().to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
# 开始训练
for epoch in range(EPOCHS):
model, train_loss = train(model, train_loader, criterion, optimizer, device)
test_loss = test(model, test_loader, criterion, device)
print('Epoch: {}, Train Loss: {:.4f}, Test Loss: {:.4f}'.format(epoch+1, train_loss, test_loss))
```
这段代码将训练 U-Net 模型,使用 BCELoss 作为损失函数,Adam 作为优化器。训练过程将持续多个 epoch,每个 epoch 结束后输出训练损失和测试损失。
希望这个例子对你有所帮助!
阅读全文