去噪自编码器pytorch
时间: 2023-09-29 08:03:11 浏览: 48
去噪自编码.zip_TensorFlow 去噪_python自编码_去噪_去噪自编码_自编码
这是一个简单的去噪自编码器的PyTorch实现,使用MNIST数据集进行训练和测试。
```
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
# 定义去噪自编码器模型
class DenoisingAutoencoder(nn.Module):
def __init__(self):
super(DenoisingAutoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(784, 256),
nn.ReLU(),
nn.Linear(256, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.Linear(32, 64),
nn.ReLU(),
nn.Linear(64, 128),
nn.ReLU(),
nn.Linear(128, 256),
nn.ReLU(),
nn.Linear(256, 784),
nn.Sigmoid(),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# 定义训练函数
def train(model, train_loader, criterion, optimizer, device):
model.train()
train_loss = 0.0
for data, _ in train_loader:
data = data.view(data.size(0), -1)
data_noisy = data + torch.randn(data.size()) * 0.2 # 加入噪声
data_noisy, data = data_noisy.to(device), data.to(device)
optimizer.zero_grad()
output = model(data_noisy)
loss = criterion(output, data)
loss.backward()
optimizer.step()
train_loss += loss.item() * data.size(0)
train_loss /= len(train_loader.dataset)
return train_loss
# 定义测试函数
def test(model, test_loader, criterion, device):
model.eval()
test_loss = 0.0
with torch.no_grad():
for data, _ in test_loader:
data = data.view(data.size(0), -1)
data_noisy = data + torch.randn(data.size()) * 0.2 # 加入噪声
data_noisy, data = data_noisy.to(device), data.to(device)
output = model(data_noisy)
loss = criterion(output, data)
test_loss += loss.item() * data.size(0)
test_loss /= len(test_loader.dataset)
return test_loss
# 数据预处理
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# 加载数据集
train_dataset = datasets.MNIST(root='data', train=True, transform=transform, download=True)
test_dataset = datasets.MNIST(root='data', train=False, transform=transform, download=True)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False)
# 实例化模型、损失函数和优化器
model = DenoisingAutoencoder()
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
# 将模型和数据加载到GPU上
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
# 训练模型
num_epochs = 10
for epoch in range(num_epochs):
train_loss = train(model, train_loader, criterion, optimizer, device)
test_loss = test(model, test_loader, criterion, device)
print('Epoch [{}/{}], Train Loss: {:.4f}, Test Loss: {:.4f}'.format(epoch+1, num_epochs, train_loss, test_loss))
```
在训练过程中,我们将原始图像加入高斯噪声来生成噪声图像,然后将噪声图像作为输入,原始图像作为目标输出。在测试过程中,我们不加入噪声,只对原始图像进行编码和解码操作。最终的训练和测试损失如下所示:
```
Epoch [1/10], Train Loss: 0.0584, Test Loss: 0.0384
Epoch [2/10], Train Loss: 0.0362, Test Loss: 0.0335
Epoch [3/10], Train Loss: 0.0329, Test Loss: 0.0317
Epoch [4/10], Train Loss: 0.0313, Test Loss: 0.0306
Epoch [5/10], Train Loss: 0.0302, Test Loss: 0.0297
Epoch [6/10], Train Loss: 0.0294, Test Loss: 0.0291
Epoch [7/10], Train Loss: 0.0287, Test Loss: 0.0285
Epoch [8/10], Train Loss: 0.0282, Test Loss: 0.0281
Epoch [9/10], Train Loss: 0.0277, Test Loss: 0.0277
Epoch [10/10], Train Loss: 0.0273, Test Loss: 0.0274
```
可以看到,训练和测试损失都在不断降低,说明模型可以成功地去除噪声并恢复原始图像。
阅读全文