基于cifar10各类模型代码

时间: 2023-10-16 18:03:21 浏览: 43
好的,下面是基于 CIFAR-10 数据集的各类模型的 PyTorch 实现代码: 1. LeNet-5 ```python import torch.nn as nn class LeNet5(nn.Module): def __init__(self, num_classes=10): super(LeNet5, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 6, kernel_size=5), nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2), nn.Conv2d(6, 16, kernel_size=5), nn.Tanh(), nn.AvgPool2d(kernel_size=2, stride=2), ) self.classifier = nn.Sequential( nn.Linear(16 * 5 * 5, 120), nn.Tanh(), nn.Linear(120, 84), nn.Tanh(), nn.Linear(84, num_classes), ) def forward(self, x): x = self.features(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x ``` 2. AlexNet ```python import torch.nn as nn class AlexNet(nn.Module): def __init__(self, num_classes=10): super(AlexNet, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(64, 192, kernel_size=5, padding=2), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), nn.Conv2d(192, 384, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(384, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=3, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) self.classifier = nn.Sequential( nn.Dropout(), nn.Linear(256 * 6 * 6, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x ``` 3. VGG-11 ```python import torch.nn as nn class VGG11(nn.Module): def __init__(self, num_classes=10): super(VGG11, self).__init__() self.features = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(64, 128, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(128, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(256, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.Conv2d(512, 512, kernel_size=3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=2, stride=2), ) self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) self.classifier = nn.Sequential( nn.Linear(512 * 7 * 7, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, 4096), nn.ReLU(inplace=True), nn.Dropout(), nn.Linear(4096, num_classes), ) def forward(self, x): x = self.features(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.classifier(x) return x ``` 4. ResNet-18 ```python import torch.nn as nn class ResNet18(nn.Module): def __init__(self, num_classes=10): super(ResNet18, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.layer1 = nn.Sequential( nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(inplace=True), ) self.layer2 = nn.Sequential( nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(128), nn.ReLU(inplace=True), ) self.layer3 = nn.Sequential( nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(256), nn.ReLU(inplace=True), ) self.layer4 = nn.Sequential( nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(512), nn.ReLU(inplace=True), ) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(512, num_classes) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x ``` 5. DenseNet-121 ```python import torch.nn as nn import torch.nn.functional as F class Bottleneck(nn.Module): def __init__(self, in_channels, growth_rate): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(in_channels) self.conv1 = nn.Conv2d(in_channels, 4 * growth_rate, kernel_size=1, bias=False) self.bn2 = nn.BatchNorm2d(4 * growth_rate) self.conv2 = nn.Conv2d(4 * growth_rate, growth_rate, kernel_size=3, padding=1, bias=False) def forward(self, x): out = self.conv1(F.relu(self.bn1(x))) out = self.conv2(F.relu(self.bn2(out))) out = torch.cat([out, x], 1) return out class DenseBlock(nn.Module): def __init__(self, in_channels, num_layers, growth_rate): super(DenseBlock, self).__init__() self.layer = nn.ModuleList() for i in range(num_layers): in_channels += i * growth_rate self.layer.append(Bottleneck(in_channels, growth_rate)) def forward(self, x): for layer in self.layer: x = layer(x) return x class Transition(nn.Module): def __init__(self, in_channels, out_channels): super(Transition, self).__init__() self.bn = nn.BatchNorm2d(in_channels) self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False) self.avgpool = nn.AvgPool2d(kernel_size=2, stride=2) def forward(self, x): x = self.conv(F.relu(self.bn(x))) x = self.avgpool(x) return x class DenseNet121(nn.Module): def __init__(self, num_classes=10): super(DenseNet121, self).__init__() self.conv = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.block1 = DenseBlock(64, num_layers=6, growth_rate=32) self.trans1 = Transition(224, 128) self.block2 = DenseBlock(128, num_layers=12, growth_rate=32) self.trans2 = Transition(448, 256) self.block3 = DenseBlock(256, num_layers=24, growth_rate=32) self.trans3 = Transition(1024, 512) self.block4 = DenseBlock(512, num_layers=16, growth_rate=32) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = nn.Linear(1024, num_classes) def forward(self, x): x = self.conv(x) x = self.bn(x) x = self.relu(x) x = self.maxpool(x) x = self.block1(x) x = self.trans1(x) x = self.block2(x) x = self.trans2(x) x = self.block3(x) x = self.trans3(x) x = self.block4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x ```

相关推荐

最新推荐

recommend-type

Tensorflow 2.1训练 实战 cifar10 完整代码 准确率 88.6% 模型 Resnet SENet Inception

用Resnet ,SENet, Inceptiont网络训练Cifar10 或者Cifar 100. 训练数据:Cifar10 或者 Cifar 100 训练集上准确率:97.11%左右 验证集上准确率:90.22%左右 测试集上准确率:88.6% 训练时间在GPU上:一小时多 权重...
recommend-type

keras实现VGG16 CIFAR10数据集方式

主要介绍了keras实现VGG16 CIFAR10数据集方式,具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

CIFAR10百度云链接,永久有效.docx

cifar10 matlab\python都有CIFAR-10数据集由10个类的60000个32x32彩色图像组成,每个类有6000个图像。有50000个训练图像和10000个测试图像。数据集分为五个训练批次和一个测试批次,每个批次有10000个图像。测试批次...
recommend-type

CIFAR-10/100 数据集中文说明

CIFAR-10/100数据集是深度学习进阶常用的数据集,本文档是关于CIFAR-10/100 数据集的中文简介。
recommend-type

pytorch VGG11识别cifar10数据集(训练+预测单张输入图片操作)

主要介绍了pytorch VGG11识别cifar10数据集(训练+预测单张输入图片操作),具有很好的参考价值,希望对大家有所帮助。一起跟随小编过来看看吧
recommend-type

zigbee-cluster-library-specification

最新的zigbee-cluster-library-specification说明文档。
recommend-type

管理建模和仿真的文件

管理Boualem Benatallah引用此版本:布阿利姆·贝纳塔拉。管理建模和仿真。约瑟夫-傅立叶大学-格勒诺布尔第一大学,1996年。法语。NNT:电话:00345357HAL ID:电话:00345357https://theses.hal.science/tel-003453572008年12月9日提交HAL是一个多学科的开放存取档案馆,用于存放和传播科学研究论文,无论它们是否被公开。论文可以来自法国或国外的教学和研究机构,也可以来自公共或私人研究中心。L’archive ouverte pluridisciplinaire
recommend-type

MATLAB柱状图在信号处理中的应用:可视化信号特征和频谱分析

![matlab画柱状图](https://img-blog.csdnimg.cn/3f32348f1c9c4481a6f5931993732f97.png) # 1. MATLAB柱状图概述** MATLAB柱状图是一种图形化工具,用于可视化数据中不同类别或组的分布情况。它通过绘制垂直条形来表示每个类别或组中的数据值。柱状图在信号处理中广泛用于可视化信号特征和进行频谱分析。 柱状图的优点在于其简单易懂,能够直观地展示数据分布。在信号处理中,柱状图可以帮助工程师识别信号中的模式、趋势和异常情况,从而为信号分析和处理提供有价值的见解。 # 2. 柱状图在信号处理中的应用 柱状图在信号处理
recommend-type

用Spring boot和vue写一个登录注册界面

好的,这是一个比较常见的技术问题,我可以为您提供一些参考资料和思路。首先,您可以使用Spring Boot作为后端框架来处理用户认证和注册的逻辑,比如使用Spring Security实现用户登录认证。同时,您还需要设计相应的数据模型和数据库表结构来存储用户信息。在前端方面,您可以使用Vue.js作为框架来构建登录注册页面,使用Axios来发起API请求并和后端进行交互。当然,在实现过程中,还需要考虑一些具体细节,比如数据校验、安全性和用户体验等方面。希望这些信息能够帮助到您。
recommend-type

JSBSim Reference Manual

JSBSim参考手册,其中包含JSBSim简介,JSBSim配置文件xml的编写语法,编程手册以及一些应用实例等。其中有部分内容还没有写完,估计有生之年很难看到完整版了,但是内容还是很有参考价值的。