以西储大学CWRU轴承故障的振动信号数据库作为模型的训练集,PyTorch实现基于GAN网络生成一维数据的代码
时间: 2023-06-15 17:06:49 浏览: 307
轴承振动信号库
5星 · 资源好评率100%
以下是基于GAN网络生成一维数据的PyTorch代码,使用CWRU轴承故障的振动信号数据库作为模型的训练集:
```python
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
# 定义生成器
class Generator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Generator, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 定义判别器
class Discriminator(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(Discriminator, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.fc2 = nn.Linear(hidden_size, hidden_size)
self.fc3 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = torch.sigmoid(self.fc3(x))
return x
# 定义训练函数
def train_generator(gen, disc, optimizer, criterion, noise, real_data):
optimizer.zero_grad()
fake_data = gen(noise)
disc_fake = disc(fake_data)
loss = criterion(disc_fake, torch.ones_like(disc_fake))
loss.backward()
optimizer.step()
return loss
def train_discriminator(gen, disc, optimizer, criterion, noise, real_data):
optimizer.zero_grad()
fake_data = gen(noise)
disc_fake = disc(fake_data.detach())
disc_real = disc(real_data)
loss_fake = criterion(disc_fake, torch.zeros_like(disc_fake))
loss_real = criterion(disc_real, torch.ones_like(disc_real))
loss = loss_fake + loss_real
loss.backward()
optimizer.step()
return loss
# 定义超参数
input_size = 1
hidden_size = 10
output_size = 1
num_epochs = 500
batch_size = 64
learning_rate = 0.001
# 加载训练集
data = np.load('bearing_dataset.npy')
train_data = torch.from_numpy(data).float()
# 初始化网络和优化器
gen = Generator(input_size, hidden_size, output_size)
disc = Discriminator(input_size, hidden_size, output_size)
gen_optimizer = torch.optim.Adam(gen.parameters(), lr=learning_rate)
disc_optimizer = torch.optim.Adam(disc.parameters(), lr=learning_rate)
# 定义损失函数
criterion = nn.BCELoss()
# 训练网络
for epoch in range(num_epochs):
for i in range(0, train_data.size(0), batch_size):
real_data = train_data[i:i+batch_size]
noise = torch.randn(batch_size, input_size)
gen_loss = train_generator(gen, disc, gen_optimizer, criterion, noise, real_data)
disc_loss = train_discriminator(gen, disc, disc_optimizer, criterion, noise, real_data)
if (epoch+1) % 50 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Gen loss: {gen_loss.item():.4f}, Disc loss: {disc_loss.item():.4f}')
# 生成新数据
with torch.no_grad():
noise = torch.randn(1000, input_size)
fake_data = gen(noise)
fake_data = fake_data.numpy()
# 绘制新数据分布
plt.hist(fake_data, bins=50)
plt.show()
```
在此代码中,我们定义了一个生成器和一个判别器。生成器将噪声作为输入,生成与训练集相似的数据。判别器将数据作为输入,输出一个0到1之间的数,表示输入数据是真实数据的概率。在训练过程中,我们交替训练生成器和判别器,使生成器能够生成更真实的数据,使判别器能够更好地区分真实数据和生成数据。
最后,我们使用生成器生成1000个新数据,并绘制它们的分布。可以看到,生成的数据分布与训练集的分布相似。
阅读全文