class conv_block(nn.Module): def __init__(self, ch_in, ch_out): super(conv_block, self).__init__() self.conv = nn.Sequential( nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(ch_out), nn.ReLU(inplace=True), nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True), nn.BatchNorm2d(ch_out), nn.ReLU(inplace=True) ) def forward(self, x): x = self.conv(x) return x class SqueezeAttentionBlock(nn.Module): def __init__(self, ch_in, ch_out): super(SqueezeAttentionBlock, self).__init__() self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2) self.conv = conv_block(ch_in, ch_out) self.conv_atten = conv_block(ch_in, ch_out) self.upsample = nn.Upsample(scale_factor=2) def forward(self, x): # print(x.shape) x_res = self.conv(x) # print(x_res.shape) y = self.avg_pool(x) # print(y.shape) y = self.conv_atten(y) # print(y.shape) y = self.upsample(y) # print(y.shape, x_res.shape) return (y * x_res) + y为这段代码添加中文注释
时间: 2024-04-25 13:26:50 浏览: 161
# 定义卷积块模块
class conv_block(nn.Module):
def __init__(self, ch_in, ch_out):
super(conv_block, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3, stride=1, padding=1, bias=True), # 3x3卷积层,输入通道数为ch_in,输出通道数为ch_out
nn.BatchNorm2d(ch_out), # 批归一化层,对输出特征图进行归一化处理
nn.ReLU(inplace=True), # ReLU激活函数,将负数部分裁剪为0
nn.Conv2d(ch_out, ch_out, kernel_size=3, stride=1, padding=1, bias=True), # 再次进行3x3卷积操作
nn.BatchNorm2d(ch_out), # 批归一化层
nn.ReLU(inplace=True) # ReLU激活函数
)
def forward(self, x):
x = self.conv(x) # 前向传播,进行卷积操作
return x
# 定义SqueezeAttentionBlock模块,用于对特征图进行注意力加权
class SqueezeAttentionBlock(nn.Module):
def __init__(self, ch_in, ch_out):
super(SqueezeAttentionBlock, self).__init__()
self.avg_pool = nn.AvgPool2d(kernel_size=2, stride=2) # 平均池化层,用于对特征图进行降采样
self.conv = conv_block(ch_in, ch_out) # 卷积块,用于对降采样后的特征图进行卷积操作
self.conv_atten = conv_block(ch_in, ch_out) # 卷积块,用于学习注意力权重
self.upsample = nn.Upsample(scale_factor=2) # 上采样层,用于将池化后的特征图恢复到原始尺寸
def forward(self, x):
x_res = self.conv(x) # 对原始特征图进行卷积操作
y = self.avg_pool(x) # 对特征图进行降采样
y = self.conv_atten(y) # 对降采样后的特征图进行卷积操作,得到注意力权重
y = self.upsample(y) # 将池化后的特征图恢复到原始尺寸
return (y * x_res) + y # 将注意力权重应用到原始特征图上,得到加权后的特征图
阅读全文