easy sample gan
时间: 2023-09-01 08:11:06 浏览: 48
Here is a simple implementation of a GAN in PyTorch:
```
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.autograd import Variable
# Define the discriminator network
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(784, 512),
nn.ReLU(),
nn.Linear(512, 256),
nn.ReLU(),
nn.Linear(256, 1),
nn.Sigmoid()
)
def forward(self, x):
x = x.view(x.size(0), 784)
out = self.model(x)
return out
# Define the generator network
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.model = nn.Sequential(
nn.Linear(100, 256),
nn.ReLU(),
nn.Linear(256, 512),
nn.ReLU(),
nn.Linear(512, 784),
nn.Tanh()
)
def forward(self, x):
out = self.model(x)
out = out.view(out.size(0), 1, 28, 28)
return out
# Define the loss function (BCELoss) and the optimizer (Adam)
criterion = nn.BCELoss()
optimizerD = optim.Adam(discriminator.parameters(), lr=0.0002)
optimizerG = optim.Adam(generator.parameters(), lr=0.0002)
# Load the MNIST dataset
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
train_dataset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True)
# Train the GAN
for epoch in range(num_epochs):
for i, (images, _) in enumerate(train_loader):
# Train the discriminator
discriminator.zero_grad()
real_images = Variable(images.view(-1, 784))
real_labels = Variable(torch.ones(images.size(0)))
fake_images = Variable(generator(torch.randn(images.size(0), 100)).detach())
fake_labels = Variable(torch.zeros(images.size(0)))
d_loss_real = criterion(discriminator(real_images), real_labels)
d_loss_fake = criterion(discriminator(fake_images), fake_labels)
d_loss = d_loss_real + d_loss_fake
d_loss.backward()
optimizerD.step()
# Train the generator
generator.zero_grad()
fake_images = generator(torch.randn(images.size(0), 100))
g_loss = criterion(discriminator(fake_images), real_labels)
g_loss.backward()
optimizerG.step()
```
In this implementation, we train a GAN to generate images of hand-written digits using the MNIST dataset. The discriminator network is a simple feedforward neural network with three hidden layers, and the generator network is also a simple feedforward neural network with three hidden layers. We use the binary cross-entropy loss function and the Adam optimizer for both the discriminator and the generator. The generator tries to generate fake images that look like the real images from the MNIST dataset, while the discriminator tries to distinguish between the real images and the fake images generated by the generator. The generator is updated to generate images that can fool the discriminator, while the discriminator is updated to better distinguish between the real and fake images.
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)