基于pytorch搭建cnn-lstm-attention用于时序预测的完整代码,包括数据处理和数据格式变换
时间: 2023-10-30 09:06:15 浏览: 355
以下是基于PyTorch搭建CNN-LSTM-Attention用于时序预测的完整代码,包括数据处理和数据格式变换的示例代码:
```python
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
# 数据处理
data = pd.read_csv('data.csv')
data = data.drop(['date', 'time'], axis=1)
scaler = MinMaxScaler()
data = scaler.fit_transform(data)
train_data = data[:8000, :]
test_data = data[8000:, :]
# 数据格式变换
def create_dataset(dataset, look_back=1):
dataX, dataY = [], []
for i in range(len(dataset)-look_back-1):
a = dataset[i:(i+look_back), :]
dataX.append(a)
dataY.append(dataset[i + look_back, :])
return np.array(dataX), np.array(dataY)
look_back = 5
trainX, trainY = create_dataset(train_data, look_back)
testX, testY = create_dataset(test_data, look_back)
trainX = torch.from_numpy(trainX).float()
trainY = torch.from_numpy(trainY).float()
testX = torch.from_numpy(testX).float()
testY = torch.from_numpy(testY).float()
# 搭建模型
class CNN_LSTM_Attention(nn.Module):
def __init__(self, input_size, hidden_size, output_size, num_layers, kernel_size):
super(CNN_LSTM_Attention, self).__init__()
self.hidden_size = hidden_size
self.num_layers = num_layers
self.conv = nn.Conv1d(in_channels=input_size, out_channels=hidden_size, kernel_size=kernel_size)
self.lstm = nn.LSTM(hidden_size, hidden_size, num_layers=num_layers, batch_first=True)
self.attention = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.Tanh(),
nn.Linear(hidden_size, 1)
)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
batch_size, seq_len, _ = x.size()
x = x.permute(0, 2, 1)
x = self.conv(x)
x = x.permute(0, 2, 1)
h0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
c0 = torch.zeros(self.num_layers, x.size(0), self.hidden_size).to(device)
out, _ = self.lstm(x, (h0, c0))
attention_weights = self.attention(out)
attention_weights = torch.softmax(attention_weights.view(batch_size, seq_len), dim=1).unsqueeze(2)
out = out * attention_weights
out = out.sum(dim=1)
out = self.fc(out)
return out
# 训练模型
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = CNN_LSTM_Attention(input_size=6, hidden_size=64, output_size=6, num_layers=2, kernel_size=3).to(device)
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
num_epochs = 100
batch_size = 64
for epoch in range(num_epochs):
for i in range(0, len(trainX), batch_size):
inputs = trainX[i:i+batch_size].to(device)
labels = trainY[i:i+batch_size].to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (epoch+1) % 10 == 0:
print(f'Epoch [{epoch+1}/{num_epochs}], Loss: {loss.item():.4f}')
# 测试模型
model.eval()
test_loss = 0
with torch.no_grad():
for i in range(0, len(testX), batch_size):
inputs = testX[i:i+batch_size].to(device)
labels = testY[i:i+batch_size].to(device)
outputs = model(inputs)
test_loss += criterion(outputs, labels).item()
test_loss /= len(testX)
print(f'Test Loss: {test_loss:.4f}')
```
在这段代码中,我们首先读入数据,并使用MinMaxScaler进行标准化处理。然后我们使用create_dataset函数将数据转化为具有look_back个时间步长的输入和输出序列,用于训练和测试模型。接下来我们定义了CNN_LSTM_Attention模型,该模型包括一个卷积层,一个LSTM层和一个Attention层。在训练模型时,我们使用均方误差损失函数和Adam优化器。最后,在测试模型时,我们计算了模型的测试损失。
阅读全文