基于pytorch使用TCN预测溶解氧的全部代码
时间: 2023-07-30 07:10:06 浏览: 224
Pytorch实现TCN-Transformer的时间序列预测(完整源码和数据)
5星 · 资源好评率100%
下面是基于PyTorch使用TCN预测溶解氧的完整代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
class TCN(nn.Module):
def __init__(self, input_size, output_size, num_channels, kernel_size, dropout):
super(TCN, self).__init__()
self.input_size = input_size
self.output_size = output_size
self.num_channels = num_channels
self.kernel_size = kernel_size
self.dropout = dropout
self.tcn = nn.Sequential(
nn.Conv1d(self.input_size, self.num_channels, self.kernel_size, stride=1, padding=(self.kernel_size-1)),
nn.BatchNorm1d(self.num_channels),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Conv1d(self.num_channels, self.num_channels, self.kernel_size, stride=1, padding=(self.kernel_size-1)),
nn.BatchNorm1d(self.num_channels),
nn.ReLU(),
nn.Dropout(self.dropout),
nn.Conv1d(self.num_channels, self.output_size, 1)
)
def forward(self, x):
y = self.tcn(x)
return y[:,:,-1]
def train_model(model, train_loader, optimizer, criterion, epoch):
model.train()
train_loss = 0.0
for i, (inputs, targets) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets)
loss.backward()
optimizer.step()
train_loss += loss.item()
train_loss /= len(train_loader)
print('Epoch: {}, Training Loss: {:.6f}'.format(epoch+1, train_loss))
def evaluate_model(model, val_loader, criterion):
model.eval()
val_loss = 0.0
with torch.no_grad():
for inputs, targets in val_loader:
outputs = model(inputs)
loss = criterion(outputs, targets)
val_loss += loss.item()
val_loss /= len(val_loader)
print('Validation Loss: {:.6f}'.format(val_loss))
return val_loss
def predict(model, test_loader):
model.eval()
predictions = []
with torch.no_grad():
for inputs, _ in test_loader:
outputs = model(inputs)
predictions.append(outputs.numpy())
return np.vstack(predictions)
# Load and preprocess data
data = pd.read_csv('data.csv')
scaler = MinMaxScaler()
data['DO'] = scaler.fit_transform(data['DO'].values.reshape(-1,1))
# Split data into train, validation, and test sets
train = data[:int(0.7*len(data))]
val = data[int(0.7*len(data)):int(0.85*len(data))]
test = data[int(0.85*len(data)):]
# Create PyTorch DataLoader objects
batch_size = 64
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val, batch_size=batch_size, shuffle=False)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size, shuffle=False)
# Initialize model, optimizer, and loss function
input_size = 1
output_size = 1
num_channels = 64
kernel_size = 3
dropout = 0.2
model = TCN(input_size, output_size, num_channels, kernel_size, dropout)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
criterion = nn.MSELoss()
# Train model
num_epochs = 50
best_val_loss = float('inf')
for epoch in range(num_epochs):
train_model(model, train_loader, optimizer, criterion, epoch)
val_loss = evaluate_model(model, val_loader, criterion)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), 'tcn_model.pt')
# Load best model and make predictions on test set
model.load_state_dict(torch.load('tcn_model.pt'))
predictions = scaler.inverse_transform(predict(model, test_loader))
actual = test['DO'].values.reshape(-1,1)
test_rmse = np.sqrt(np.mean((predictions-actual)**2))
print('Test RMSE: {:.6f}'.format(test_rmse))
```
代码中的 `TCN` 类定义了一个简单的 TCN 模型。在训练循环中,我们使用了 PyTorch 的 DataLoader 对象来加载数据并进行训练。模型的优化器采用了 Adam 优化器,损失函数采用了均方误差(MSE)损失函数。在训练过程中,我们记录了最好的验证集损失,以便在测试集上进行预测时使用。最后,我们将预测值反转缩放以获得原始数据的单位,并计算测试集的均方根误差(RMSE)。
阅读全文