基于pytoch的LSTM实现回归预测的代码
时间: 2024-02-12 16:08:02 浏览: 24
下面是基于PyTorch实现LSTM进行回归预测的代码示例:
```python
import torch
import torch.nn as nn
class LSTMRegressor(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super(LSTMRegressor, self).__init__()
self.hidden_dim = hidden_dim
self.num_layers = num_layers
self.lstm = nn.LSTM(input_dim, hidden_dim, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_dim, output_dim)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_dim).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
# 定义超参数
input_dim = 1
hidden_dim = 32
output_dim = 1
num_layers = 2
learning_rate = 0.01
num_epochs = 100
# 加载数据集
data = ...
x_train, y_train, x_test, y_test = ...
# 将数据集转换成张量
x_train = torch.Tensor(x_train).view(-1, 1, input_dim)
y_train = torch.Tensor(y_train).view(-1, output_dim)
x_test = torch.Tensor(x_test).view(-1, 1, input_dim)
y_test = torch.Tensor(y_test).view(-1, output_dim)
# 创建模型
model = LSTMRegressor(input_dim, hidden_dim, output_dim, num_layers)
# 定义损失函数和优化器
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# 训练模型
for epoch in range(num_epochs):
outputs = model(x_train)
loss = criterion(outputs, y_train)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if epoch % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# 测试模型
with torch.no_grad():
y_predicted = model(x_test)
test_loss = criterion(y_predicted, y_test)
print('Test loss: {:.4f}'.format(test_loss.item()))
# 可视化结果
plt.plot(y_test.numpy(), label='True')
plt.plot(y_predicted.numpy(), label='Predicted')
plt.legend()
plt.show()
```
这段代码实现了一个包含两个LSTM层的回归预测模型。在训练过程中,使用均方误差作为损失函数,并使用Adam优化器来更新模型参数。最后,使用训练好的模型对测试集进行预测,并将预测结果可视化。