如何将self.conv1 = nn.Conv2d(4 * num_filters, num_filters, kernel_size=3, padding=1) self.conv_offset1 = nn.Conv2d(512, 18, kernel_size=3, stride=1, padding=1) init_offset1 = torch.Tensor(np.zeros([18, 512, 3, 3])) self.conv_offset1.weight = torch.nn.Parameter(init_offset1) # 初始化为0 self.conv_mask1 = nn.Conv2d(512, 9, kernel_size=3, stride=1, padding=1) init_mask1 = torch.Tensor(np.zeros([9, 512, 3, 3]) + np.array([0.5])) self.conv_mask1.weight = torch.nn.Parameter(init_mask1) # 初始化为0.5 与torchvision.ops.deform_conv2d,加入到:class NLayerDiscriminator(nn.Module): def init(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True): super(NLayerDiscriminator, self).init() self.use_parallel = use_parallel if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d kw = 4 padw = int(np.ceil((kw-1)/2)) sequence = [ nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True) ] nf_mult = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2n, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2n_layers, 8) sequence += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] sequence += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] if use_sigmoid: sequence += [nn.Sigmoid()] self.model = nn.Sequential(*sequence) def forward(self, input): return self.model(input)中,请给出修改后的代码
时间: 2023-08-22 16:05:09 浏览: 128
Tensorflow tf.nn.atrous_conv2d如何实现空洞卷积的
可以将self.conv1、self.conv_offset1和self.conv_mask1加入到NLayerDiscriminator中的nn.Sequential序列中,如下所示:
```
import torchvision
from torchvision.ops.deform_conv import DeformConv2d
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True):
super(NLayerDiscriminator, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
kw = 4
padw = int(np.ceil((kw-1)/2))
nf_mult = 1
sequence = [
nn.Conv2d(input_nc, ndf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)
]
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
sequence += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
sequence += [
DeformConv2d(
ndf * nf_mult_prev, ndf * nf_mult, kernel_size=3, stride=1, padding=1,
dilation=1, groups=1, deformable_groups=1, bias=False
),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True),
nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)
]
if use_sigmoid:
sequence += [nn.Sigmoid()]
self.model = nn.Sequential(*sequence)
self.conv1 = nn.Conv2d(4 * ndf, ndf, kernel_size=3, padding=1)
self.conv_offset1 = nn.Conv2d(ndf, 18, kernel_size=3, stride=1, padding=1)
init_offset1 = torch.Tensor(np.zeros([18, ndf, 3, 3]))
self.conv_offset1.weight = torch.nn.Parameter(init_offset1)
self.conv_mask1 = nn.Conv2d(ndf, 9, kernel_size=3, stride=1, padding=1)
init_mask1 = torch.Tensor(np.zeros([9, ndf, 3, 3]) + np.array([0.5]))
self.conv_mask1.weight = torch.nn.Parameter(init_mask1)
def forward(self, input):
x = input
for module in self.model:
if isinstance(module, DeformConv2d):
offset_mask = torch.cat([self.conv_offset1(x), self.conv_mask1(x)], dim=1)
x = module(x, offset_mask)
else:
x = module(x)
return x
```
这里使用了torchvision.ops.deform_conv2d中的DeformConv2d作为最后一层卷积层,因此需要将self.conv1、self.conv_offset1和self.conv_mask1的通道数修改为ndf。在forward中,对于DeformConv2d层,需要将self.conv_offset1和self.conv_mask1的输出拼接起来作为offset_mask输入。
阅读全文