去噪自编码器pytorch
时间: 2023-09-29 21:02:56 浏览: 145
以下是一个基于PyTorch实现的去噪自编码器:
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1),
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=7)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=7),
nn.ReLU(),
nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.ReLU(),
nn.ConvTranspose2d(16, 1, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.Sigmoid()
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def train_model(model, train_loader, optimizer, criterion, num_epochs):
for epoch in range(num_epochs):
running_loss = 0.0
for data in train_loader:
noisy_data, clean_data = data
optimizer.zero_grad()
outputs = model(noisy_data)
loss = criterion(outputs, clean_data)
loss.backward()
optimizer.step()
running_loss += loss.item() * noisy_data.size(0)
epoch_loss = running_loss / len(train_loader.dataset)
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, epoch_loss))
def test_model(model, test_loader):
with torch.no_grad():
for data in test_loader:
noisy_data, clean_data = data
outputs = model(noisy_data)
mse_loss = nn.MSELoss()(outputs, clean_data)
psnr = 10 * torch.log10(1 / mse_loss)
print('PSNR: {:.2f} dB'.format(psnr.item()))
if __name__ == '__main__':
# load dataset (MNIST)
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True)
# add noise
noise_factor = 0.5
train_noisy_dataset = add_noise(train_dataset, noise_factor)
test_noisy_dataset = add_noise(test_dataset, noise_factor)
# prepare dataloader
train_loader = DataLoader(train_noisy_dataset, batch_size=256, shuffle=True)
test_loader = DataLoader(test_noisy_dataset, batch_size=256, shuffle=False)
# define model
model = Autoencoder()
# define loss function and optimizer
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# train the model
train_model(model, train_loader, optimizer, criterion, num_epochs=10)
# test the model
test_model(model, test_loader)
```
这个自编码器使用了一个简单的卷积神经网络来进行编码和解码。在训练时,我们添加了一些噪声到输入图像中,然后将其作为模型的输入。模型的输出是还原的图像,我们使用均方误差作为损失函数进行优化。在测试时,我们计算压缩和解压缩图像之间的均方误差,并将其转换为PSNR值,以评估模型的性能。
阅读全文