Autoencoder代码
时间: 2023-09-26 17:11:28 浏览: 48
以下是一个基本的Autoencoder代码示例,使用PyTorch:
```
import torch
import torch.nn as nn
import torch.optim as optim
# 定义Autoencoder类
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Linear(in_features=784, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=10)
)
self.decoder = nn.Sequential(
nn.Linear(in_features=10, out_features=64),
nn.ReLU(),
nn.Linear(in_features=64, out_features=256),
nn.ReLU(),
nn.Linear(in_features=256, out_features=784),
nn.Sigmoid()
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
# 定义训练函数
def train(model, dataloader, n_epochs=10, lr=0.01):
optimizer = optim.Adam(model.parameters(), lr=lr)
criterion = nn.MSELoss()
for epoch in range(n_epochs):
running_loss = 0.0
for data in dataloader:
inputs, _ = data
inputs = inputs.view(inputs.size(0), -1)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, inputs)
loss.backward()
optimizer.step()
running_loss += loss.item()
print('Epoch [%d], Loss: %.4f' % (epoch+1, running_loss/len(dataloader)))
# 加载MNIST数据集
trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=32, shuffle=True)
# 创建Autoencoder模型
autoencoder = Autoencoder()
# 训练Autoencoder模型
train(autoencoder, trainloader, n_epochs=10)
# 使用Autoencoder模型生成压缩表示
dataiter = iter(trainloader)
inputs, _ = dataiter.next()
inputs = inputs.view(inputs.size(0), -1)
outputs = autoencoder.encoder(inputs)
print(outputs)
```
在这个示例中,我们定义了一个基本的Autoencoder模型,它有一个encoder和一个decoder,都由全连接层组成。我们使用MNIST数据集来训练模型,并在训练期间计算重构误差。最后,我们使用模型的encoder生成压缩表示。