pytorch实现LSTM训练模型,使用NSS-KDD数据集
时间: 2023-12-09 10:06:32 浏览: 30
以下是一个使用PyTorch实现LSTM模型进行NSS-KDD数据集训练的示例代码:
```
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# 读取数据集
train_data = []
with open('KDDTrain+.txt', 'r') as f:
for line in f:
line = line.strip().split(',')
train_data.append((line[:-1], line[-1]))
# 定义数据集类
class KDDDataset(Dataset):
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, index):
x = torch.Tensor(list(map(int, self.data[index][0])))
y = torch.Tensor([int(self.data[index][1])])
return x, y
# 定义LSTM模型
class LSTMModel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(LSTMModel, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, x):
out, _ = self.lstm(x)
out = self.fc(out[:, -1, :])
return out
# 定义训练函数
def train(model, device, train_loader, optimizer, criterion):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = criterion(output, target.squeeze())
loss.backward()
optimizer.step()
# 训练模型
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
batch_size = 128
epochs = 10
input_size = 41
hidden_size = 128
output_size = 1
lr = 0.01
criterion = nn.BCEWithLogitsLoss()
model = LSTMModel(input_size, hidden_size, output_size).to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
train_dataset = KDDDataset(train_data)
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
for epoch in range(epochs):
train(model, device, train_loader, optimizer, criterion)
print('Epoch: {}'.format(epoch+1))
# 保存模型
torch.save(model.state_dict(), 'lstm_model.pt')
```
在训练过程中,我们使用了PyTorch内置的LSTM模块进行建模,并使用Adam优化器进行参数更新。训练完成后,我们将模型保存到本地文件中以备后续使用。