mobilenetv2模型主干部分有多少层卷积层?
时间: 2023-11-18 19:02:09 浏览: 76
MobileNetV2 模型主干部分包含了 16 个 bottleneck 模块,其中每个 bottleneck 模块包含了若干个卷积层。具体地,每个 bottleneck 模块由一个 1x1 的卷积层、一个 depthwise 卷积层和一个 1x1 的卷积层组成,因此,MobileNetV2 模型主干部分的卷积层数为 16 x 3 = 48。
相关问题
在pytorch环境中,使用mobilenetv2为主干网络的deeplabv3plus模型添加SE教程
1. 安装必要的库和模块
在使用SE模块之前,需要安装必要的库和模块,包括pytorch、torchvision和numpy等。可以使用pip或conda来安装。
```python
!pip install torch torchvision numpy
```
2. 定义SE模块
SE模块包括两个部分:squeeze和excitation。squeeze部分是一个全局平均池化层,用于将特征图压缩成一个数值,excitation部分是一个全连接层,用于学习特征图的重要性权重。
```python
import torch.nn as nn
class SEModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channels, channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channels // reduction, channels, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
```
3. 修改deeplabv3plus模型
在deeplabv3plus模型中,需要修改主干网络部分,将每个卷积层后添加一个SE模块。
```python
import torchvision.models as models
class DeepLabV3Plus(nn.Module):
def __init__(self, n_classes=21):
super(DeepLabV3Plus, self).__init__()
self.mobilenetv2 = models.mobilenet_v2(pretrained=True).features
self.se_module1 = SEModule(24)
self.se_module2 = SEModule(32)
self.se_module3 = SEModule(96)
self.se_module4 = SEModule(320)
self.aspp = ASPP(320, [12, 24, 36])
self.decoder = Decoder(n_classes)
def forward(self, x):
x_size = x.size()
h, w = x_size[2], x_size[3]
x = self.mobilenetv2(x)
x = self.se_module1(x)
x = self.mobilenetv2[3:6](x)
x = self.se_module2(x)
x = self.mobilenetv2[6](x)
x = self.se_module3(x)
x = self.mobilenetv2[7](x)
x = self.se_module4(x)
x = self.aspp(x)
x = self.decoder(x, h, w)
x = F.interpolate(x, size=x_size[2:], mode='bilinear', align_corners=False)
return x
```
4. 模型训练和测试
使用修改后的模型进行训练和测试,可以使用与原模型相同的训练和测试代码。
```python
model = DeepLabV3Plus(n_classes=21)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=1e-4)
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(train_loader):
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
with torch.no_grad():
for i, (inputs, labels) in enumerate(test_loader):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
# do something with preds
```
mmsegmentation框架下给mobilenetv2主干加入SKNet
在`mmsegmentation`框架中,可以通过修改`mmseg/models/backbones/mobilenet_v2.py`文件来给`MobileNetV2`主干加入`SKNet`。下面是实现的步骤:
1. 首先需要导入`SKNet`的相关模块,可以在文件最上面添加以下代码:
```python
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from ..builder import BACKBONES
```
2. 接着在`MobileNetV2`类中定义`SKNet`的结构,可以在`_make_stem_layer`函数中加入以下代码:
```python
def _make_stem_layer(self, in_channels, stem_channels):
layers = []
layers.append(ConvModule(
in_channels,
stem_channels,
3,
stride=2,
padding=1,
bias=False,
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
activation='relu',
inplace=True))
in_channels = stem_channels
layers.append(ConvModule(
in_channels,
in_channels,
3,
stride=1,
padding=1,
bias=False,
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
activation='relu',
inplace=True))
# add SKNet module
channels = in_channels
mid_channels = channels // 2
squeeze_channels = max(1, mid_channels // 8)
layers.append(
build_plugin_layer(dict(
type='SKConv',
channels=channels,
squeeze_channels=squeeze_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
groups=32,
sk_mode='two',
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
act_cfg=dict(type='ReLU', inplace=True),
),
[build_conv_layer(
dict(type='Conv2d'),
channels,
channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(dict(type='BN', momentum=0.1, eps=1e-5), channels)[1]]))
return nn.Sequential(*layers)
```
3. 最后在`BACKBONES`中注册`MobileNetV2`主干即可。完整代码如下:
```python
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.cnn import build_conv_layer, build_norm_layer, build_plugin_layer
from ..builder import BACKBONES
@BACKBONES.register_module()
class MobileNetV2(nn.Module):
def __init__(self,
widen_factor=1.0,
output_stride=32,
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
with_cp=False,
):
super(MobileNetV2, self).__init__()
assert output_stride in [8, 16, 32]
self.output_stride = output_stride
self.with_cp = with_cp
self.norm_cfg = norm_cfg
input_channel = int(32 * widen_factor)
self.stem = self._make_stem_layer(3, input_channel)
self.layer1 = self._make_layer(
input_channel, int(16 * widen_factor), 1, 1, 16, 2)
self.layer2 = self._make_layer(
int(16 * widen_factor), int(24 * widen_factor), 2, 6, 16, 2)
self.layer3 = self._make_layer(
int(24 * widen_factor), int(32 * widen_factor), 3, 6, 24, 2)
self.layer4 = self._make_layer(
int(32 * widen_factor), int(64 * widen_factor), 4, 6, 32, 2)
self.layer5 = self._make_layer(
int(64 * widen_factor), int(96 * widen_factor), 3, 6, 64, 1)
self.layer6 = self._make_layer(
int(96 * widen_factor), int(160 * widen_factor), 3, 6, 96, 1)
self.layer7 = self._make_layer(
int(160 * widen_factor), int(320 * widen_factor), 1, 6, 160, 1)
if self.output_stride == 8:
self.layer2[0].conv2.stride = (1, 1)
self.layer2[0].downsample[0].stride = (1, 1)
self.layer3[0].conv2.stride = (1, 1)
self.layer3[0].downsample[0].stride = (1, 1)
elif self.output_stride == 16:
self.layer3[0].conv2.stride = (1, 1)
self.layer3[0].downsample[0].stride = (1, 1)
self._freeze_stages()
def _make_stem_layer(self, in_channels, stem_channels):
layers = []
layers.append(ConvModule(
in_channels,
stem_channels,
3,
stride=2,
padding=1,
bias=False,
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
activation='relu',
inplace=True))
in_channels = stem_channels
layers.append(ConvModule(
in_channels,
in_channels,
3,
stride=1,
padding=1,
bias=False,
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
activation='relu',
inplace=True))
# add SKNet module
channels = in_channels
mid_channels = channels // 2
squeeze_channels = max(1, mid_channels // 8)
layers.append(
build_plugin_layer(dict(
type='SKConv',
channels=channels,
squeeze_channels=squeeze_channels,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
groups=32,
sk_mode='two',
norm_cfg=dict(type='BN', momentum=0.1, eps=1e-5),
act_cfg=dict(type='ReLU', inplace=True),
),
[build_conv_layer(
dict(type='Conv2d'),
channels,
channels,
kernel_size=3,
stride=1,
padding=1,
bias=False),
build_norm_layer(dict(type='BN', momentum=0.1, eps=1e-5), channels)[1]]))
return nn.Sequential(*layers)
def _make_layer(self,
in_channels,
out_channels,
num_blocks,
stride,
t,
dilation=1):
layers = []
layers.append(InvertedResidual(
in_channels,
out_channels,
stride,
expand_ratio=t,
dilation=dilation,
norm_cfg=self.norm_cfg,
with_cp=self.with_cp))
for i in range(1, num_blocks):
layers.append(InvertedResidual(
out_channels,
out_channels,
1,
expand_ratio=t,
dilation=dilation,
norm_cfg=self.norm_cfg,
with_cp=self.with_cp))
return nn.Sequential(*layers)
def _freeze_stages(self):
if self.output_stride == 8:
self.stem.eval()
for param in self.stem.parameters():
param.requires_grad = False
elif self.output_stride == 16:
self.stem[0].eval()
self.stem[1].eval()
for param in self.stem[0].parameters():
param.requires_grad = False
for param in self.stem[1].parameters():
param.requires_grad = False
```
这样就给`MobileNetV2`主干加入了`SKNet`结构。
阅读全文