用pytorch写一个使用NSL-KDD数据集训练5层卷积神经网络的联邦学习代码。每行代码给出注释。从数据预处理开始写起
时间: 2023-05-25 09:07:05 浏览: 224
CNN_test_卷积神经网络代码学习_
# 导入必要的库
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
# 设置设备,如果有GPU,则使用GPU,否则使用CPU
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# 定义模型
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.conv2 = nn.Conv2d(16, 32, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.conv3 = nn.Conv2d(32, 64, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.conv4 = nn.Conv2d(64, 128, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.conv5 = nn.Conv2d(128, 256, kernel_size=(3,3), stride=(1,1), padding=(1,1))
self.fc1 = nn.Linear(256*3*3, 64)
self.fc2 = nn.Linear(64, 10)
self.dropout = nn.Dropout(p=0.5)
def forward(self, x):
x = nn.functional.relu(self.conv1(x))
x = nn.functional.max_pool2d(x, kernel_size=(2,2))
x = nn.functional.relu(self.conv2(x))
x = nn.functional.max_pool2d(x, kernel_size=(2,2))
x = nn.functional.relu(self.conv3(x))
x = nn.functional.max_pool2d(x, kernel_size=(2,2))
x = nn.functional.relu(self.conv4(x))
x = nn.functional.max_pool2d(x, kernel_size=(2,2))
x = nn.functional.relu(self.conv5(x))
x = nn.functional.max_pool2d(x, kernel_size=(2,2))
x = x.view(-1, 256*3*3)
x = nn.functional.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return x
# 定义超参数
batch_size = 64
lr = 0.001
num_epochs = 10
# 定义数据预处理
transform = torchvision.transforms.Compose([
torchvision.transforms.ToTensor(),
torchvision.transforms.Normalize((0.5,),(0.5,))
])
# 加载数据集
train_dataset = torchvision.datasets.NSLKDD('NSL-KDD', train=True, transform=transform, download=True)
test_dataset = torchvision.datasets.NSLKDD('NSL-KDD', train=False, transform=transform, download=True)
# 创建数据加载器
train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, num_workers=4)
# 定义模型和优化器
model = ConvNet().to(device)
optimizer = optim.Adam(model.parameters(), lr=lr)
# 训练模型
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(train_loader):
inputs = inputs.to(device)
labels = labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = nn.functional.cross_entropy(outputs, labels)
loss.backward()
optimizer.step()
if (i+1) % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss {:.4f}'
.format(epoch+1, num_epochs, i+1, len(train_loader), loss.item()))
# 测试模型的准确性
with torch.no_grad():
correct = 0
total = 0
for inputs, labels in test_loader:
inputs = inputs.to(device)
labels = labels.to(device)
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Test Accuracy of the model on the {} test images: {} %'.format(total, 100 * correct / total))
阅读全文