基于pytorch,帮我编写一个生成时序数据的GAN模型
时间: 2024-03-23 18:42:03 浏览: 78
好的,以下是基于 PyTorch 实现的生成时序数据的 GAN 模型代码,其中使用了 LSTM 层作为生成器和判别器的核心组件:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
class Generator(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super(Generator, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, noise):
# noise shape: (batch_size, seq_len, input_dim)
batch_size = noise.size(0)
hidden = self.init_hidden(batch_size)
output, hidden = self.lstm(noise, hidden)
output = self.linear(output)
return output
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.hidden_dim).zero_().to(device),
weight.new(self.num_layers, batch_size, self.hidden_dim).zero_().to(device))
return hidden
class Discriminator(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super(Discriminator, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_dim, output_dim)
def forward(self, input):
# input shape: (batch_size, seq_len, input_dim)
batch_size = input.size(0)
hidden = self.init_hidden(batch_size)
output, hidden = self.lstm(input, hidden)
output = self.linear(output)
return output
def init_hidden(self, batch_size):
weight = next(self.parameters()).data
hidden = (weight.new(self.num_layers, batch_size, self.hidden_dim).zero_().to(device),
weight.new(self.num_layers, batch_size, self.hidden_dim).zero_().to(device))
return hidden
# hyperparameters
input_dim = 1
hidden_dim = 32
output_dim = 1
num_layers = 2
batch_size = 64
epochs = 100
lr = 0.0002
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# define generator and discriminator
generator = Generator(input_dim, hidden_dim, output_dim, num_layers).to(device)
discriminator = Discriminator(input_dim, hidden_dim, output_dim, num_layers).to(device)
# define loss function and optimizer
criterion = nn.BCEWithLogitsLoss()
optimizer_G = optim.Adam(generator.parameters(), lr=lr)
optimizer_D = optim.Adam(discriminator.parameters(), lr=lr)
# train GAN model
for epoch in range(epochs):
for i in range(len(train_data)//batch_size):
# train discriminator
real_data = train_data[i*batch_size:(i+1)*batch_size]
real_data = torch.Tensor(real_data).to(device)
labels_real = torch.ones(batch_size, 1).to(device)
fake_data = generator(torch.randn(batch_size, seq_len, input_dim).to(device))
labels_fake = torch.zeros(batch_size, 1).to(device)
discriminator.zero_grad()
outputs_real = discriminator(real_data)
loss_real = criterion(outputs_real, labels_real)
loss_real.backward()
outputs_fake = discriminator(fake_data.detach())
loss_fake = criterion(outputs_fake, labels_fake)
loss_fake.backward()
optimizer_D.step()
# train generator
generator.zero_grad()
fake_data = generator(torch.randn(batch_size, seq_len, input_dim).to(device))
outputs_fake = discriminator(fake_data)
loss_G = criterion(outputs_fake, labels_real)
loss_G.backward()
optimizer_G.step()
# print loss and save generated samples
if (i+1) % 100 == 0:
print("Epoch [{}/{}], Step [{}/{}], Loss D: {:.4f}, Loss G: {:.4f}"
.format(epoch+1, epochs, i+1, len(train_data)//batch_size, loss_real+loss_fake, loss_G))
with torch.no_grad():
generated_data = generator(torch.randn(1, seq_len, input_dim).to(device)).cpu().numpy()
np.savetxt("generated_data_epoch_{}.txt".format(epoch+1), generated_data[0], fmt="%.4f")
```
其中 train_data 为训练数据集,seq_len 为时间序列的长度。在训练过程中,我们先训练判别器,然后固定判别器的参数,训练生成器,最后输出 loss 并保存生成的数据。
阅读全文