Defines the PatchGAN discriminator with the specified arguments. class NLayerDiscriminator(nn.Module): def init(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True): super(NLayerDiscriminator, self).init() self.use_parallel = use_parallel if type(norm_layer) == functools.partial: use_bias = norm_layer.func == nn.InstanceNorm2d else: use_bias = norm_layer == nn.InstanceNorm2d self.conv1 = nn.Conv2d(input_nc, ndf, kernel_size=3, padding=1) self.conv_offset1 = nn.Conv2d(ndf, 18, kernel_size=3, stride=1, padding=1) init_offset1 = torch.Tensor(np.zeros([18, ndf, 3, 3])) self.conv_offset1.weight = torch.nn.Parameter(init_offset1) # 初始化为0 self.conv_mask1 = nn.Conv2d(ndf, 9, kernel_size=3, stride=1, padding=1) init_mask1 = torch.Tensor(np.zeros([9, ndf, 3, 3]) + np.array([0.5])) self.conv_mask1.weight = torch.nn.Parameter(init_mask1) # 初始化为0.5 kw = 4 padw = int(np.ceil((kw-1)/2)) nf_mult = 1 for n in range(1, n_layers): nf_mult_prev = nf_mult nf_mult = min(2n, 8) self.sequence2 = [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] nf_mult_prev = nf_mult nf_mult = min(2n_layers, 8) self.sequence2 += [ nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias), norm_layer(ndf * nf_mult), nn.LeakyReLU(0.2, True) ] self.sequence2 += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)] if use_sigmoid: self.sequence2 += [nn.Sigmoid()] def forward(self, input): input = self.conv1(input) offset1 = self.conv_offset1(input) mask1 = torch.sigmoid(self.conv_mask1(input)) sequence1 = [ torchvision.ops.deform_conv2d(input=input, offset=offset1, weight=self.conv1.weight, mask=mask1, padding=(1, 1)) 上述代码中出现错误:RuntimeError: Expected weight_c.size(1) * n_weight_grps == input_c.size(1) to be true, but got false. (Could this error message be improved? If so, please report an enhancement request to PyTorch.),请问如何解决,给出修改后的代码
时间: 2023-08-22 20:04:22 浏览: 192
USB_Video_Payload_DV_1.1.rar_The Class_usb video class
这个错误通常是由于卷积层的通道数和输入数据的通道数不匹配导致的。根据错误信息可以看出,这个错误是在第 49 行的 deform_conv2d 函数中出现的。根据代码,这个函数是使用可变形卷积进行特征提取的,因此需要注意卷积层的通道数和输入数据的通道数是否匹配。具体来说,需要检查卷积层的输入通道数和权重矩阵的通道数是否匹配。
下面是修改后的代码,主要是修改了可变形卷积的输入参数,确保通道数匹配:
```python
class NLayerDiscriminator(nn.Module):
def __init__(self, input_nc=3, ndf=64, n_layers=3, norm_layer=nn.BatchNorm2d, use_sigmoid=False, use_parallel=True):
super(NLayerDiscriminator, self).__init__()
self.use_parallel = use_parallel
if type(norm_layer) == functools.partial:
use_bias = norm_layer.func == nn.InstanceNorm2d
else:
use_bias = norm_layer == nn.InstanceNorm2d
self.conv1 = nn.Conv2d(input_nc, ndf, kernel_size=3, padding=1)
self.conv_offset1 = nn.Conv2d(ndf, 18, kernel_size=3, stride=1, padding=1)
init_offset1 = torch.Tensor(np.zeros([18, ndf, 3, 3]))
self.conv_offset1.weight = torch.nn.Parameter(init_offset1) # 初始化为0
self.conv_mask1 = nn.Conv2d(ndf, 9, kernel_size=3, stride=1, padding=1)
init_mask1 = torch.Tensor(np.zeros([9, ndf, 3, 3]) + np.array([0.5]))
self.conv_mask1.weight = torch.nn.Parameter(init_mask1) # 初始化为0.5
kw = 4
padw = int(np.ceil((kw-1)/2))
nf_mult = 1
self.sequence2 = []
for n in range(1, n_layers):
nf_mult_prev = nf_mult
nf_mult = min(2**n, 8)
self.sequence2 += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=2, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
nf_mult_prev = nf_mult
nf_mult = min(2**n_layers, 8)
self.sequence2 += [
nn.Conv2d(ndf * nf_mult_prev, ndf * nf_mult, kernel_size=kw, stride=1, padding=padw, bias=use_bias),
norm_layer(ndf * nf_mult),
nn.LeakyReLU(0.2, True)
]
self.sequence2 += [nn.Conv2d(ndf * nf_mult, 1, kernel_size=kw, stride=1, padding=padw)]
if use_sigmoid:
self.sequence2 += [nn.Sigmoid()]
def forward(self, input):
input = self.conv1(input)
offset1 = self.conv_offset1(input)
mask1 = torch.sigmoid(self.conv_mask1(input))
sequence1 = [
torchvision.ops.deform_conv2d(
input=input,
offset=offset1,
weight=self.conv1.weight,
mask=mask1,
padding=1,
stride=1,
dilation=1
)
]
for layer in self.sequence2:
sequence1.append(layer(sequence1[-1]))
return sequence1[-1]
```
需要注意的是,由于 deform_conv2d 函数的输入参数有很多,需要根据实际情况进行调整。
阅读全文