def init(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True): super(NLayerDiscriminator, self).init() self.use_parallel = use_parallel if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d self.conv1 = nn.Conv2d(input_nc, ndf, kernel_size=3, padding=1) self.conv_offset1 = nn.Conv2d(512, 18, kernel_size=3, stride=1, padding=1) init_offset1 = torch.Tensor(np.zeros([18, 512, 3, 3])) self.conv_offset1.weight = torch.nn.Parameter(init_offset1) # 初始化为0 self.conv_mask1 = nn.Conv2d(512, 9, kernel_size=3, stride=1, padding=1) init_mask1 = torch.Tensor(np.zeros([9, 512, 3, 3]) + np.array([0.5])) self.conv_mask1.weight = torch.nn.Parameter(init_mask1) # 初始化为0.5 def forward(self, input): input = self.conv1(input) offset1 = self.conv_offset1(input) mask1 = torch.sigmoid(self.conv_mask1(input)) sequence1 = [ torchvision.ops.deform_conv2d(input=input, offset=offset1, weight=self.conv1.weight, mask=mask1, padding=(1, 1)) ] sequence2 = sequence1 + self.sequence2 self.model = nn.Sequential(*sequence2) nn.LeakyReLU(0.2, True) return self.model(input),上述代码出现问题:RuntimeError: Given groups=1, weight of size [18, 512, 3, 3], expected input[1, 64, 512, 512] to have 512 channels, but got 64 channels instead,如何修改
时间: 2023-08-21 19:06:26 浏览: 298
这个错误是由于conv_offset1和conv_mask1的输入通道数与conv1的输出通道数不匹配所引起的。你可以尝试修改conv_offset1和conv_mask1的输入通道数以匹配conv1的输出通道数,或者将conv1的输出通道数修改为匹配conv_offset1和conv_mask1的输入通道数。你可以使用print语句检查conv1的输出通道数和conv_offset1、conv_mask1的输入通道数是否匹配。
根据报错信息,可以看到输入的张量input的shape为[1, 64, 512, 512],而conv_offset1和conv_mask1的输入通道数为512,但conv1的输出通道数为ndf,即64。因此,你需要将conv_offset1和conv_mask1的输入通道数修改为ndf,即64。修改后的代码如下:
```
def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True):
super(NLayerDiscriminator, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.conv1 = nn.Conv2d(input_nc, ndf, kernel_size=3, padding=1)
self.conv_offset1 = nn.Conv2d(ndf, 18, kernel_size=3, stride=1, padding=1)
init_offset1 = torch.Tensor(np.zeros([18, ndf, 3, 3]))
self.conv_offset1.weight = torch.nn.Parameter(init_offset1) # 初始化为0
self.conv_mask1 = nn.Conv2d(ndf, 9, kernel_size=3, stride=1, padding=1)
init_mask1 = torch.Tensor(np.zeros([9, ndf, 3, 3]) + np.array([0.5]))
self.conv_mask1.weight = torch.nn.Parameter(init_mask1) # 初始化为0.5
def forward(self, input):
input = self.conv1(input)
offset1 = self.conv_offset1(input)
mask1 = torch.sigmoid(self.conv_mask1(input))
sequence1 = [ torchvision.ops.deform_conv2d(input=input, offset=offset1, weight=self.conv1.weight, mask=mask1, padding=(1, 1)) ]
sequence2 = sequence1 + self.sequence2
self.model = nn.Sequential(*sequence2)
nn.LeakyReLU(0.2, True)
return self.model(input)
```
阅读全文