transformer时间序列单变量单步时间序列预测代码
时间: 2023-07-25 11:32:58 浏览: 154
单变量时间序列预测开发深度学习模型-python-源码
5星 · 资源好评率100%
以下是使用Transformer进行时间序列单变量单步预测的代码示例:
```python
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
# 定义Transformer模型
class TransformerModel(nn.Module):
def __init__(self, input_dim, output_dim, d_model, nhead, num_layers, dropout):
super(TransformerModel, self).__init__()
self.d_model = d_model
self.pos_encoder = PositionalEncoding(d_model, dropout)
encoder_layer = nn.TransformerEncoderLayer(d_model=d_model, nhead=nhead, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=num_layers)
self.fc = nn.Linear(d_model, output_dim)
def forward(self, src):
src = src.permute(1, 0) # 将输入转置为(seq_len, batch_size)
src = self.pos_encoder(src * np.sqrt(self.d_model))
output = self.transformer_encoder(src)
output = self.fc(output[-1, :, :]) # 取最后一个时间步的输出
return output
# 定义位置编码层
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
# 定义时间序列数据集类
class TimeSeriesDataset(Dataset):
def __init__(self, data, seq_length):
self.data = data
self.seq_length = seq_length
def __len__(self):
return len(self.data) - self.seq_length
def __getitem__(self, idx):
idx = idx + self.seq_length
x = self.data[idx - self.seq_length : idx]
y = self.data[idx]
return x, y
# 加载数据
data = pd.read_csv('data.csv')['value'].values.astype(np.float32)
train_data = data[:1000]
test_data = data[1000:]
# 定义超参数
input_dim = 1
output_dim = 1
d_model = 32
nhead = 4
num_layers = 2
dropout = 0.2
lr = 0.001
batch_size = 32
num_epochs = 100
seq_length = 10
# 初始化模型和优化器
model = TransformerModel(input_dim, output_dim, d_model, nhead, num_layers, dropout)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.MSELoss()
# 训练模型
train_dataset = TimeSeriesDataset(train_data, seq_length)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(num_epochs):
for i, (x, y) in enumerate(train_loader):
optimizer.zero_grad()
y_pred = model(x.unsqueeze(-1))
loss = criterion(y_pred, y.unsqueeze(-1))
loss.backward()
optimizer.step()
print('Epoch [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, loss.item()))
# 测试模型
test_dataset = TimeSeriesDataset(test_data, seq_length)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
model.eval()
with torch.no_grad():
y_pred_list = []
for x, y in test_loader:
y_pred_list.append(model(x.unsqueeze(-1)))
y_pred = torch.cat(y_pred_list, dim=0)
y_true = torch.tensor(test_data[seq_length:], dtype=torch.float32).unsqueeze(-1)
test_loss = criterion(y_pred, y_true)
print('Test Loss: {:.4f}'.format(test_loss.item()))
```
这段代码中,我们定义了`TransformerModel`类来实现Transformer模型,并定义了`PositionalEncoding`类来实现位置编码层。我们还定义了`TimeSeriesDataset`类来加载时间序列数据。
在训练模型时,我们使用`TimeSeriesDataset`类加载数据,并使用`DataLoader`类将数据分成小批量进行训练。我们使用均方误差损失函数和Adam优化器进行训练。
在测试模型时,我们使用`TimeSeriesDataset`类加载测试数据,并使用训练好的模型对测试数据进行预测。预测结果与真实值进行比较,计算测试集上的损失函数。
阅读全文