ShuffleNet的代码
时间: 2024-05-04 08:20:32 浏览: 17
这里给出ShuffleNet v1和ShuffleNet v2的代码实现,供参考:
**ShuffleNet v1**
```python
import torch.nn as nn
import torch.nn.functional as F
import math
class ShuffleBlock(nn.Module):
def __init__(self, groups):
super(ShuffleBlock, self).__init__()
self.groups = groups
def forward(self, x):
n, c, h, w = x.size()
g = self.groups
assert c % g == 0, "Error: group number doesn't divide input channels"
x = x.view(n, g, c//g, h, w).permute(0,2,1,3,4).contiguous()
x = x.view(n, c, h, w)
return x
class ShuffleNet(nn.Module):
def __init__(self, groups=3, num_classes=1000):
super(ShuffleNet, self).__init__()
self.groups = groups
self.conv1 = nn.Conv2d(3, 24, kernel_size=3, stride=2, padding=1, bias=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stage2 = self._make_stage(24, 144, 3)
self.stage3 = self._make_stage(144, 288, 7)
self.stage4 = self._make_stage(288, 576, 3)
self.conv5 = nn.Conv2d(576, 1024, kernel_size=1, stride=1, padding=0, bias=False)
self.globalpool = nn.AdaptiveAvgPool2d((1,1))
self.fc = nn.Linear(1024, num_classes)
def _make_stage(self, inplanes, outplanes, blocks):
layers = []
layers.append(ShuffleBlock(self.groups))
layers.append(nn.Conv2d(inplanes, outplanes//4, kernel_size=1, stride=1, padding=0, bias=False))
layers.append(nn.BatchNorm2d(outplanes//4))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(outplanes//4, outplanes//4, kernel_size=3, stride=1, padding=1, groups=outplanes//4, bias=False))
layers.append(nn.BatchNorm2d(outplanes//4))
layers.append(nn.Conv2d(outplanes//4, outplanes, kernel_size=1, stride=1, padding=0, bias=False))
layers.append(nn.BatchNorm2d(outplanes))
layers.append(nn.ReLU(inplace=True))
for i in range(1, blocks):
layers.append(ShuffleBlock(self.groups))
layers.append(nn.Conv2d(outplanes, outplanes//4, kernel_size=1, stride=1, padding=0, bias=False))
layers.append(nn.BatchNorm2d(outplanes//4))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(outplanes//4, outplanes//4, kernel_size=3, stride=1, padding=1, groups=outplanes//4, bias=False))
layers.append(nn.BatchNorm2d(outplanes//4))
layers.append(nn.Conv2d(outplanes//4, outplanes, kernel_size=1, stride=1, padding=0, bias=False))
layers.append(nn.BatchNorm2d(outplanes))
layers.append(nn.ReLU(inplace=True))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = self.conv5(x)
x = self.globalpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
```
**ShuffleNet v2**
```python
import torch.nn as nn
import torch.nn.functional as F
import math
class ShuffleBlock(nn.Module):
def __init__(self, inp, oup, mid_channels, ksize, stride):
super(ShuffleBlock, self).__init__()
self.stride = stride
self.ksize = ksize
self.mid_channels = mid_channels
self.conv1 = nn.Conv2d(inp, mid_channels, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(mid_channels)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, kernel_size=ksize, stride=stride, padding=(ksize-1)//2, groups=mid_channels, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
self.conv3 = nn.Conv2d(mid_channels, oup, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(oup)
self.shortcut = nn.Sequential()
if stride == 2:
self.shortcut = nn.Sequential(
nn.Conv2d(inp, inp, kernel_size=ksize, stride=stride, padding=(ksize-1)//2, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.Conv2d(inp, oup, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(oup)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.stride == 2:
shortcut = self.shortcut(x)
else:
shortcut = x
out = F.relu(torch.cat([out, shortcut], 1))
out = self.channel_shuffle(out, 2)
return out
def channel_shuffle(self, x, groups):
n, c, h, w = x.size()
x = x.view(n, groups, c//groups, h, w).permute(0,2,1,3,4).contiguous()
x = x.view(n, c, h, w)
return x
class ShuffleNetV2(nn.Module):
def __init__(self, num_classes=1000, input_size=224, width_mult=1.):
super(ShuffleNetV2, self).__init__()
self.stage_repeats = [4, 8, 4]
self.stage_out_channels = [24, 116, 232, 464, 1024]
if width_mult == 0.5:
self.stage_out_channels = [int(x * width_mult) for x in self.stage_out_channels]
self.conv1 = nn.Conv2d(3, self.stage_out_channels[0], kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.stage_out_channels[0])
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stage2 = self._make_stage(1)
self.stage3 = self._make_stage(2)
self.stage4 = self._make_stage(3)
self.conv5 = nn.Conv2d(self.stage_out_channels[3], self.stage_out_channels[4], kernel_size=1, stride=1, padding=0, bias=False)
self.bn5 = nn.BatchNorm2d(self.stage_out_channels[4])
self.avgpool = nn.AvgPool2d(kernel_size=7, stride=1)
self.fc = nn.Linear(self.stage_out_channels[4], num_classes)
def _make_stage(self, stage):
modules = []
stage_repeats = self.stage_repeats[stage-1]
stage_out_channels = self.stage_out_channels[stage]
for i in range(stage_repeats):
if i == 0:
modules.append(ShuffleBlock(self.stage_out_channels[stage-1], stage_out_channels, stage_out_channels//2, ksize=3, stride=2))
else:
modules.append(ShuffleBlock(stage_out_channels, stage_out_channels, stage_out_channels//2, ksize=3, stride=1))
return nn.Sequential(*modules)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = self.maxpool(x)
x = self.stage2(x)
x = self.stage3(x)
x = self.stage4(x)
x = F.relu(self.bn5(self.conv5(x)))
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
```
以上给出的是PyTorch的实现代码,其中包括ShuffleBlock和ShuffleNet/ShuffleNetV2两个模型的实现。具体实现细节可以参考论文中的介绍。
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![rar](https://img-home.csdnimg.cn/images/20210720083606.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)