基于卷积网络实现图像去噪代码
时间: 2024-04-28 19:26:35 浏览: 143
基于python使用深度卷积神经网络的图像去噪设计与实现
5星 · 资源好评率100%
以下是基于卷积神经网络实现图像去噪的示例代码,使用 PyTorch 框架实现:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor, Normalize, Compose
from torchvision.utils import save_image
# 定义超参数
batch_size = 128
learning_rate = 0.001
num_epochs = 100
# 数据预处理
transform = Compose([
ToTensor(),
Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
# 加载 CIFAR10 数据集
train_dataset = CIFAR10(root='./data', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# 定义卷积神经网络模型
class DenoiseCNN(nn.Module):
def __init__(self):
super(DenoiseCNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(3, 32, kernel_size=3, padding=1),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, padding=1),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=3, padding=1),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.Conv2d(128, 256, kernel_size=3, padding=1),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2)
)
self.layer3 = nn.Sequential(
nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2),
nn.BatchNorm2d(128),
nn.ReLU(),
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.BatchNorm2d(32),
nn.ReLU(),
nn.Conv2d(32, 3, kernel_size=3, padding=1),
nn.Tanh()
)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
return x
# 实例化模型,定义损失函数和优化器
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = DenoiseCNN().to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 训练模型
for epoch in range(num_epochs):
for i, (images, _) in enumerate(train_loader):
# 添加噪声
noisy_images = images + torch.randn(images.shape) * 0.1
noisy_images = torch.clamp(noisy_images, -1.0, 1.0)
# 将数据移到 GPU 中
noisy_images = noisy_images.to(device)
images = images.to(device)
# 前向传播
outputs = model(noisy_images)
loss = criterion(outputs, images)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 打印损失值
if (i+1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, len(train_loader), loss.item()))
# 保存模型
if (epoch+1) % 10 == 0:
torch.save(model.state_dict(), 'denoise_cnn_{}.ckpt'.format(epoch+1))
# 测试模型
test_dataset = CIFAR10(root='./data', train=False, download=True, transform=transform)
test_loader = DataLoader(test_dataset, batch_size=10, shuffle=False)
with torch.no_grad():
for images, _ in test_loader:
# 添加噪声
noisy_images = images + torch.randn(images.shape) * 0.1
noisy_images = torch.clamp(noisy_images, -1.0, 1.0)
# 将数据移到 GPU 中
noisy_images = noisy_images.to(device)
images = images.to(device)
# 前向传播
outputs = model(noisy_images)
# 保存去噪后的图像
save_image(outputs, 'denoise_cnn_output.png')
break
```
这个示例代码使用了 CIFAR10 数据集,将输入的图像加上了高斯噪声,然后使用卷积神经网络模型进行去噪,输出去噪后的图像。其中,`DenoiseCNN` 类定义了卷积神经网络模型,包括编码器和解码器两部分,使用了转置卷积(反卷积)实现了上采样。在训练过程中,使用均方误差作为损失函数,使用 Adam 优化器进行优化。在测试过程中,使用保存的模型参数对新的图像进行去噪,并将去噪后的图像保存到文件中。
阅读全文