请提供pytorch架构程序示例,要求:评估LSTM、BP、RBF人工神经网络的多步预测X效果,集RMSE;输入数据集从EXCEL表读取,表有7个特征变量,最后一个是target,预测时间步是30
时间: 2024-03-17 07:42:31 浏览: 105
pytorch利用LSTM实现对股票进行多变量多步预测
好的,以下是一个简单的PyTorch程序示例,演示如何使用LSTM、BP和RBF人工神经网络进行多步预测,并计算RMSE。该程序将从Excel表中读取数据集,数据集有7个特征变量,最后一个是目标变量,预测时间步为30。请注意,该程序仅供参考,您可能需要根据您的具体情况进行修改。
```python
import torch
import torch.nn as nn
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
# 从Excel表中读取数据集
data = pd.read_excel('data.xlsx', header=None)
data = np.array(data)
# 将数据集归一化
scaler = MinMaxScaler(feature_range=(-1, 1))
data = scaler.fit_transform(data)
# 定义模型的超参数
input_size = 7
hidden_size = 64
output_size = 1
num_layers = 2
seq_length = 30
learning_rate = 0.01
num_epochs = 100
# 将数据集转换为PyTorch张量
data = torch.from_numpy(data).float()
# 将数据集分成训练集和测试集
train_size = int(len(data) * 0.8)
test_size = len(data) - train_size
train_data = data[:train_size]
test_data = data[train_size:]
# 定义LSTM模型
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers):
super(LSTM, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).requires_grad_()
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).requires_grad_()
out, (hn, cn) = self.lstm(x, (h0.detach(), c0.detach()))
out = self.fc(out[:, -1, :])
return out
# 定义BP模型
class BP(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(BP, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.relu = nn.ReLU()
self.fc2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.relu(out)
out = self.fc2(out)
return out
# 定义RBF模型
class RBF(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RBF, self).__init__()
self.fc1 = nn.Linear(input_size, hidden_size)
self.rbf = nn.RBF(hidden_size, output_size)
def forward(self, x):
out = self.fc1(x)
out = self.rbf(out)
return out
# 创建LSTM、BP和RBF模型,并定义损失函数和优化器
lstm_model = LSTM(input_size, hidden_size, output_size, num_layers)
bp_model = BP(input_size, hidden_size, output_size)
rbf_model = RBF(input_size, hidden_size, output_size)
criterion = nn.MSELoss()
lstm_optimizer = torch.optim.Adam(lstm_model.parameters(), lr=learning_rate)
bp_optimizer = torch.optim.Adam(bp_model.parameters(), lr=learning_rate)
rbf_optimizer = torch.optim.Adam(rbf_model.parameters(), lr=learning_rate)
# 训练LSTM模型
for epoch in range(num_epochs):
for i in range(0, train_data.size(0) - seq_length, seq_length):
input_seq = train_data[i:i+seq_length, :-1]
target_seq = train_data[i:i+seq_length, -1:]
lstm_optimizer.zero_grad()
lstm_output = lstm_model(input_seq.unsqueeze(0))
lstm_loss = criterion(lstm_output, target_seq.unsqueeze(0))
lstm_loss.backward()
lstm_optimizer.step()
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, lstm_loss.item()))
# 训练BP模型
for epoch in range(num_epochs):
for i in range(0, train_data.size(0) - seq_length, seq_length):
input_seq = train_data[i:i+seq_length, :-1]
target_seq = train_data[i:i+seq_length, -1:]
bp_optimizer.zero_grad()
bp_output = bp_model(input_seq)
bp_loss = criterion(bp_output, target_seq)
bp_loss.backward()
bp_optimizer.step()
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, bp_loss.item()))
# 训练RBF模型
for epoch in range(num_epochs):
for i in range(0, train_data.size(0) - seq_length, seq_length):
input_seq = train_data[i:i+seq_length, :-1]
target_seq = train_data[i:i+seq_length, -1:]
rbf_optimizer.zero_grad()
rbf_output = rbf_model(input_seq)
rbf_loss = criterion(rbf_output, target_seq)
rbf_loss.backward()
rbf_optimizer.step()
if (epoch+1) % 10 == 0:
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, rbf_loss.item()))
# 使用训练好的模型进行多步预测
lstm_preds = []
bp_preds = []
rbf_preds = []
with torch.no_grad():
lstm_input = train_data[-seq_length:, :-1]
for i in range(seq_length):
lstm_output = lstm_model(lstm_input.unsqueeze(0))
lstm_preds.append(lstm_output.item())
lstm_input = torch.cat((lstm_input[1:, :], lstm_output), axis=0)
bp_input = train_data[-seq_length:, :-1]
for i in range(seq_length):
bp_output = bp_model(bp_input)
bp_preds.append(bp_output.item())
bp_input = torch.cat((bp_input[1:, :], bp_output), axis=0)
rbf_input = train_data[-seq_length:, :-1]
for i in range(seq_length):
rbf_output = rbf_model(rbf_input)
rbf_preds.append(rbf_output.item())
rbf_input = torch.cat((rbf_input[1:, :], rbf_output), axis=0)
# 计算RMSE
train_data = scaler.inverse_transform(train_data)
lstm_preds = scaler.inverse_transform(np.array(lstm_preds).reshape(-1, 1))
bp_preds = scaler.inverse_transform(np.array(bp_preds).reshape(-1, 1))
rbf_preds = scaler.inverse_transform(np.array(rbf_preds).reshape(-1, 1))
lstm_rmse = np.sqrt(mean_squared_error(train_data[:, -1], lstm_preds[:, 0]))
bp_rmse = np.sqrt(mean_squared_error(train_data[:, -1], bp_preds[:, 0]))
rbf_rmse = np.sqrt(mean_squared_error(train_data[:, -1], rbf_preds[:, 0]))
print('LSTM RMSE: {:.4f}'.format(lstm_rmse))
print('BP RMSE: {:.4f}'.format(bp_rmse))
print('RBF RMSE: {:.4f}'.format(rbf_rmse))
```
阅读全文