pytorch中conv3d完整案例
时间: 2023-05-26 16:03:34 浏览: 112
pytorch之inception_v3的实现案例
以下是一个完整的3D卷积案例,用于对3D体积图像进行分类:
1. 建立数据集
我们下载了一个名为"Brain tumor dataset"的3D体积图像数据集,该数据集有2个类别:正常(类别0)和带有肿瘤(类别1)。每个样本是由155x240x240 3D体积组成的。
我们将在这里使用pytorch中的torchvision.transforms进行数据增强。
```python
import os
import torch
import random
import numpy as np
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
import torch.utils.data as data
from torch.utils.data import DataLoader, Dataset
from PIL import Image
class CustomDataset(Dataset):
def __init__(self, data_dir, transform=None):
self.data_dir = data_dir
self.transform = transform
self.file_list = os.listdir(data_dir)
def __len__(self):
return len(self.file_list)
def __getitem__(self, idx):
img_path = os.path.join(self.data_dir, self.file_list[idx])
img = np.load(img_path)
if self.transform:
img = self.transform(img)
label = int(self.file_list[idx].split("_")[1].split(".npy")[0])
return img, label
def create_datasets(data_dir, batch_size):
transform = transforms.Compose([
transforms.ToPILImage(),
transforms.RandomHorizontalFlip(0.5),
transforms.RandomRotation(20, resample=False, expand=False),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])
])
dataset = CustomDataset(data_dir, transform)
train_size = int(len(dataset) * 0.8)
test_size = len(dataset) - train_size
train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
return train_loader, test_loader
```
2. 建立3D CNN模型
我们建立了一个3D CNN模型,它包含了几层卷积层和池化层。
```python
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv3d(1, 32, kernel_size=3, stride=1, padding=1)
self.activation1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool3d(kernel_size=2)
self.conv2 = nn.Conv3d(32, 64, kernel_size=3, stride=1, padding=1)
self.activation2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool3d(kernel_size=2)
self.conv3 = nn.Conv3d(64, 128, kernel_size=3, stride=1, padding=1)
self.activation3 = nn.ReLU(inplace=True)
self.pool3 = nn.MaxPool3d(kernel_size=2)
self.conv4 = nn.Conv3d(128, 256, kernel_size=3, stride=1, padding=1)
self.activation4 = nn.ReLU(inplace=True)
self.pool4 = nn.MaxPool3d(kernel_size=2)
self.fc1 = nn.Linear(256*11*14*14, 512)
self.activation5 = nn.ReLU(inplace=True)
self.fc2 = nn.Linear(512, 2)
def forward(self, x):
x = self.conv1(x)
x = self.activation1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.activation2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.activation3(x)
x = self.pool3(x)
x = self.conv4(x)
x = self.activation4(x)
x = self.pool4(x)
x = x.view(-1, 256*11*14*14)
x = self.fc1(x)
x = self.activation5(x)
x = self.fc2(x)
return x
```
3. 训练模型
接下来,我们将训练我们的模型。我们使用Adam优化器和交叉熵损失函数。我们还使用了学习率衰减和早期停止技术,以避免过拟合问题。
```python
def train(model, train_loader, test_loader, num_epochs, learning_rate=0.001, weight_decay=0.0):
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.5, patience=5, verbose=True)
best_acc = 0.0
for epoch in range(num_epochs):
train_loss = 0.0
train_acc = 0.0
for i, (inputs, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(inputs.float().cuda())
loss = criterion(outputs, labels.cuda())
loss.backward()
optimizer.step()
train_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs.data, 1)
train_acc += torch.sum(preds == labels.cuda().data)
train_acc = train_acc.double() / len(train_loader.dataset)
train_loss = train_loss / len(train_loader.dataset)
print('Epoch [{}/{}], Train Loss: {:.4f}, Train Acc: {:.4f}'.format(epoch+1, num_epochs, train_loss, train_acc))
test_loss = 0.0
test_acc = 0.0
with torch.no_grad():
for inputs, labels in test_loader:
outputs = model(inputs.float().cuda())
loss = criterion(outputs, labels.cuda())
test_loss += loss.item() * inputs.size(0)
_, preds = torch.max(outputs.data, 1)
test_acc += torch.sum(preds == labels.cuda().data)
test_acc = test_acc.double() / len(test_loader.dataset)
test_loss = test_loss / len(test_loader.dataset)
scheduler.step(test_loss)
if test_acc > best_acc:
best_acc = test_acc
torch.save(model.state_dict(), 'best_model.pth')
print('Epoch [{}/{}], Test Loss: {:.4f}, Test Acc: {:.4f}'.format(epoch+1, num_epochs, test_loss, test_acc))
```
4. 运行模型
最后,我们调用我们建立的模型和数据集等函数,运行模型:
```python
def main():
data_dir = 'Brain_tumor_dataset'
batch_size = 8
num_epochs = 100
train_loader, test_loader = create_datasets(data_dir, batch_size)
model = ConvNet().cuda()
train(model, train_loader, test_loader, num_epochs)
if __name__ == '__main__':
main()
```
阅读全文