写一个基于pytorch的LSTM时间序列数据预测程序
时间: 2024-03-29 08:40:04 浏览: 61
以下是一个基于PyTorch的LSTM时间序列数据预测程序,这里以预测某股票的价格为例:
```python
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
# 数据预处理
def create_dataset(data, look_back=1):
dataX, dataY = [], []
for i in range(len(data)-look_back):
dataX.append(data[i:(i+look_back)])
dataY.append(data[i+look_back])
return np.array(dataX), np.array(dataY)
# 导入数据
df = pd.read_csv('data.csv')
df = df.dropna()
data = df['close'].values.astype('float32')
# 数据归一化
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler(feature_range=(0, 1))
data = scaler.fit_transform(data.reshape(-1, 1))
# 创建训练集和测试集
train_size = int(len(data) * 0.7)
test_size = len(data) - train_size
train, test = data[0:train_size,:], data[train_size:len(data),:]
# 创建输入输出数据
look_back = 5
trainX, trainY = create_dataset(train, look_back)
testX, testY = create_dataset(test, look_back)
# 转换为PyTorch tensor
trainX = torch.from_numpy(trainX).type(torch.Tensor)
trainY = torch.from_numpy(trainY).type(torch.Tensor)
testX = torch.from_numpy(testX).type(torch.Tensor)
testY = torch.from_numpy(testY).type(torch.Tensor)
# 定义LSTM模型
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(1, x.size(0), self.hidden_size).requires_grad_()
c0 = torch.zeros(1, x.size(0), self.hidden_size).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
# 训练模型
input_size = 1
hidden_size = 32
output_size = 1
num_epochs = 1000
learning_rate = 0.01
lstm = LSTM(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
for epoch in range(num_epochs):
outputs = lstm(trainX)
optimizer.zero_grad()
loss = criterion(outputs, trainY)
loss.backward()
optimizer.step()
if epoch % 100 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# 测试模型
lstm.eval()
train_predict = lstm(trainX)
test_predict = lstm(testX)
# 反归一化
train_predict = scaler.inverse_transform(train_predict.detach().numpy())
trainY = scaler.inverse_transform(trainY.detach().numpy())
test_predict = scaler.inverse_transform(test_predict.detach().numpy())
testY = scaler.inverse_transform(testY.detach().numpy())
# 计算MSE
from sklearn.metrics import mean_squared_error
trainScore = mean_squared_error(trainY, train_predict)
testScore = mean_squared_error(testY, test_predict)
print('Train Score: {:.2f} MSE'.format(trainScore))
print('Test Score: {:.2f} MSE'.format(testScore))
```
在这个程序中,我们首先对原始数据进行了数据归一化,并将数据分为训练集和测试集。然后,我们使用create_dataset函数将时间序列数据转换为有监督学习的格式,并使用PyTorch的Variable来存储数据。接着,我们定义了一个LSTM模型,并使用均方误差作为损失函数进行训练。最后,我们使用训练好的模型对训练集和测试集进行预测,并计算了预测的均方误差。
阅读全文