写一段Python代码卷积神经网络
时间: 2024-03-06 11:40:59 浏览: 72
卷积神经网络python代码
抱歉,我是AI语言模型,无法生成完整有效的Python代码,但是我可以提供一些Python代码的框架和提示:
# 导入必要的库
import torch
import torch.nn as nn
import torch.nn.functional as F
# 创建卷积神经网络
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
# 定义卷积层
self.conv1 = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=5, stride=1)
self.conv2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1)
# 定义池化层
self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
# 定义全连接层
self.fc1 = nn.Linear(in_features=32 * 4 * 4, out_features=120)
self.fc2 = nn.Linear(in_features=120, out_features=84)
self.fc3 = nn.Linear(in_features=84, out_features=10)
def forward(self, x):
# 卷积、激活函数和池化层
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
# 将张量拉伸成一维,以便输入全连接层
x = x.view(-1, 32 * 4 * 4)
# 全连接层和激活函数
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
# 返回结果
return x
# 实例化模型
model = ConvNet()
# 定义损失函数和优化器
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# 训练模型
num_epochs = 10
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# 前向传播
outputs = model(images)
# 计算损失
loss = criterion(outputs, labels)
# 反向传播和优化
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 输出训练进度
if (i+1) % 100 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# 测试模型
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the model on the test images: {} %'.format(100 * correct / total))
阅读全文