给出一个基于CNN的GAN的对二位数据进行扩充的pytorch代码,该代码还应包括训练损失、测试损失的绘制,混淆矩阵的绘制,以及t-sne可视化
时间: 2024-02-27 12:51:28 浏览: 95
以下是一个基于CNN的GAN的对二维数据进行扩充的PyTorch代码,包括训练和测试损失的绘制,混淆矩阵的绘制和t-sne可视化。
```python
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import torchvision.datasets as datasets
import torchvision.transforms as transforms
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
from sklearn.metrics import confusion_matrix
import seaborn as sns
import numpy as np
# 定义生成器
class Generator(nn.Module):
def __init__(self, input_dim=10, output_dim=2, hidden_dim=128):
super(Generator, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
self.relu = nn.ReLU()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义判别器
class Discriminator(nn.Module):
def __init__(self, input_dim=2, output_dim=1, hidden_dim=128):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(input_dim, hidden_dim)
self.fc2 = nn.Linear(hidden_dim, hidden_dim)
self.fc3 = nn.Linear(hidden_dim, output_dim)
self.relu = nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.sigmoid(self.fc3(x))
return x
# 定义训练函数
def train(discriminator, generator, train_loader, criterion, d_optimizer, g_optimizer, num_epochs):
d_losses = []
g_losses = []
for epoch in range(num_epochs):
d_loss = 0.0
g_loss = 0.0
for i, (real_samples, _) in enumerate(train_loader):
batch_size = real_samples.size(0)
real_samples = real_samples.view(batch_size, -1)
real_samples = real_samples.to(device)
# 训练判别器
d_optimizer.zero_grad()
d_real = discriminator(real_samples)
real_labels = torch.ones(batch_size, 1).to(device)
d_real_loss = criterion(d_real, real_labels)
z = torch.randn(batch_size, 10).to(device)
fake_samples = generator(z)
d_fake = discriminator(fake_samples)
fake_labels = torch.zeros(batch_size, 1).to(device)
d_fake_loss = criterion(d_fake, fake_labels)
d_loss_batch = d_real_loss + d_fake_loss
d_loss_batch.backward()
d_optimizer.step()
# 训练生成器
g_optimizer.zero_grad()
z = torch.randn(batch_size, 10).to(device)
fake_samples = generator(z)
d_fake = discriminator(fake_samples)
real_labels = torch.ones(batch_size, 1).to(device)
g_loss_batch = criterion(d_fake, real_labels)
g_loss_batch.backward()
g_optimizer.step()
d_loss += d_loss_batch.item()
g_loss += g_loss_batch.item()
d_loss /= len(train_loader)
g_loss /= len(train_loader)
d_losses.append(d_loss)
g_losses.append(g_loss)
print("Epoch [{}/{}], d_loss: {:.4f}, g_loss: {:.4f}".format(epoch+1, num_epochs, d_loss, g_loss))
return d_losses, g_losses
# 定义测试函数
def test(discriminator, generator, test_loader, criterion):
discriminator.eval()
generator.eval()
with torch.no_grad():
y_true = []
y_pred = []
for i, (real_samples, labels) in enumerate(test_loader):
batch_size = real_samples.size(0)
real_samples = real_samples.view(batch_size, -1)
real_samples = real_samples.to(device)
d_real = discriminator(real_samples)
y_true.extend(labels.tolist())
y_pred.extend(torch.round(d_real).tolist())
cm = confusion_matrix(y_true, y_pred)
sns.heatmap(cm, annot=True, fmt='g')
plt.xlabel('Predicted label')
plt.ylabel('True label')
plt.show()
z = torch.randn(1000, 10).to(device)
fake_samples = generator(z)
fake_samples = fake_samples.cpu().numpy()
plt.scatter(fake_samples[:,0], fake_samples[:,1], s=5, c='r')
plt.show()
tsne = TSNE(n_components=2)
fake_samples_tsne = tsne.fit_transform(fake_samples)
plt.scatter(fake_samples_tsne[:,0], fake_samples_tsne[:,1], s=5, c='r')
plt.show()
# 定义超参数
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
input_dim = 10
output_dim = 2
hidden_dim = 128
batch_size = 64
num_epochs = 100
lr = 0.0002
beta1 = 0.5
# 加载数据集
train_dataset = datasets.MNIST(root='./data', train=True, transform=transforms.ToTensor(), download=True)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
test_dataset = datasets.MNIST(root='./data', train=False, transform=transforms.ToTensor(), download=True)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=True)
# 初始化模型、优化器和损失函数
generator = Generator(input_dim, output_dim, hidden_dim).to(device)
discriminator = Discriminator(output_dim, 1, hidden_dim).to(device)
g_optimizer = optim.Adam(generator.parameters(), lr=lr, betas=(beta1, 0.999))
d_optimizer = optim.Adam(discriminator.parameters(), lr=lr, betas=(beta1, 0.999))
criterion = nn.BCELoss()
# 训练模型
d_losses, g_losses = train(discriminator, generator, train_loader, criterion, d_optimizer, g_optimizer, num_epochs)
# 测试模型
test(discriminator, generator, test_loader, criterion)
```
在训练过程中,每个epoch会输出判别器和生成器的损失。训练结束后,会绘制混淆矩阵、生成的数据的散点图和t-sne可视化图。
阅读全文