1dcnn+lstm回归输出pytorch 可视化
时间: 2023-08-01 08:07:58 浏览: 151
Pytorch实战基于LSTM实现文本的情感分析项目源代码+数据集,使用GPU加速
您好!感谢您的提问。下面是一个简单的 1D-CNN + LSTM 回归输出的 PyTorch 实现示例,并且给出了可视化训练过程的代码:
```python
import torch
import torch.nn as nn
import numpy as np
import matplotlib.pyplot as plt
class CNN_LSTM(nn.Module):
def __init__(self, input_size, hidden_size):
super(CNN_LSTM, self).__init__()
self.hidden_size = hidden_size
self.conv1 = nn.Conv1d(input_size, 32, kernel_size=3, padding=1)
self.relu1 = nn.ReLU(inplace=True)
self.pool1 = nn.MaxPool1d(kernel_size=2, stride=2)
self.conv2 = nn.Conv1d(32, 64, kernel_size=3, padding=1)
self.relu2 = nn.ReLU(inplace=True)
self.pool2 = nn.MaxPool1d(kernel_size=2, stride=2)
self.lstm = nn.LSTM(64, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, 1)
def forward(self, x):
x = self.conv1(x)
x = self.relu1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.relu2(x)
x = self.pool2(x)
x = x.transpose(1, 2)
_, (h_n, _) = self.lstm(x)
h_n = h_n[-1]
out = self.fc(h_n)
return out
# 生成数据集
def generate_data(n_samples):
x = np.random.uniform(-10, 10, size=(n_samples, 10))
y = np.sum(x, axis=1) + np.random.normal(0, 1, size=n_samples)
return x, y
# 训练模型
def train(model, optimizer, criterion, x_train, y_train, x_val, y_val, n_epochs=100):
train_losses = []
val_losses = []
for epoch in range(n_epochs):
# 训练
model.train()
optimizer.zero_grad()
y_pred_train = model(torch.from_numpy(x_train).float().transpose(1,2))
loss_train = criterion(y_pred_train.squeeze(), torch.from_numpy(y_train).float())
loss_train.backward()
optimizer.step()
train_losses.append(loss_train.item())
# 验证
model.eval()
with torch.no_grad():
y_pred_val = model(torch.from_numpy(x_val).float().transpose(1,2))
loss_val = criterion(y_pred_val.squeeze(), torch.from_numpy(y_val).float())
val_losses.append(loss_val.item())
# 打印当前 epoch 的训练和验证 loss
print(f'Epoch: {epoch+1}/{n_epochs}, Train Loss: {loss_train.item():.4f}, Val Loss: {loss_val.item():.4f}')
# 绘制训练和验证 loss 曲线
plt.plot(train_losses, label='train loss')
plt.plot(val_losses, label='val loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# 生成数据集
x_train, y_train = generate_data(1000)
x_val, y_val = generate_data(100)
# 定义模型、优化器和损失函数
model = CNN_LSTM(input_size=10, hidden_size=64)
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
criterion = nn.MSELoss()
# 训练模型
train(model, optimizer, criterion, x_train, y_train, x_val, y_val, n_epochs=100)
# 预测并可视化
model.eval()
with torch.no_grad():
x_test, y_test = generate_data(100)
y_pred = model(torch.from_numpy(x_test).float().transpose(1,2))
plt.plot(y_test, label='true')
plt.plot(y_pred.squeeze().numpy(), label='pred')
plt.legend()
plt.show()
```
在这个示例中,我们生成了一个简单的数据集,其中每个样本都是一个 10 维向量的和加上一些高斯噪声。然后,我们定义了一个 1D-CNN + LSTM 模型,并用 MSE 损失函数进行训练。最后,我们使用训练好的模型进行预测,并且绘制了真实值和预测值的曲线。您可以根据您的数据和需要来修改这个模型。
阅读全文