def _break_up_pc(self, pc): xyz = pc[..., 0:3].contiguous() features = ( pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None ) return xyz, features def forward(self, pointcloud: torch.cuda.FloatTensor): xyz, features = self._break_up_pc(pointcloud) l_xyz, l_features = [xyz], [features] for i in range(len(self.SA_modules)): li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) l_xyz.append(li_xyz) l_features.append(li_features)明明在使用SA_modules时只给了l_xyz和l_features两个参数,怎么会报错说 File "/root/autodl-tmp/project/tools/../lib/net/pointnet2_msg.py", line 154, in forward li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) File "/root/miniconda3/lib/python3.8/site-packages/torch/nn/modules/module.py", line 727, in _call_impl result = self.forward(*input, **kwargs) TypeError: forward() takes 2 positional arguments but 3 were given呢?
时间: 2023-07-15 20:13:32 浏览: 159
根据报错信息,可以看出forward()方法实际传递了三个参数,而不是只有两个参数。在你的代码中,第一个参数是self,表示类实例本身,第二个参数是pointcloud,第三个参数是在SA_modules中调用forward()方法时隐式传递的。因此,你需要检查SA_modules中forward()方法的定义,看是否有多余的参数传递。另外,你也可以检查l_features是否为None,如果是,则只传递l_xyz一个参数即可。
相关问题
def _break_up_pc(self, pc): xyz = pc[..., 0:3].contiguous() features = ( pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None ) return xyz, features
这段代码是PointNet++模型中的一个私有方法,用于将输入的点云数据(pc)拆分为xyz坐标和特征向量(features)。具体来说,该方法首先从输入数据中提取xyz坐标,然后从输入数据中提取特征向量(如果存在)。最后,将xyz坐标和特征向量作为元组返回。
具体来看,该方法首先从输入数据中提取前三个元素,即点的x、y和z坐标,这部分数据被称为xyz坐标。为了确保数据的连续性,使用了contiguous()方法。
然后,该方法检查输入数据的最后一个维度是否大于3。如果是,则说明输入数据中包含特征向量。在这种情况下,该方法使用transpose()方法将输入数据的最后两个维度进行转置,并从中提取特征向量。这里的转置操作是因为在PointNet++模型中,特征向量是从每个点的角度提取的,因此需要将其转换为每个特征的角度提取。
最后,该方法将xyz坐标和特征向量作为一个元组返回。
for k in range(cfg.RPN.SA_CONFIG.NPOINTS.__len__()): mlps = cfg.RPN.SA_CONFIG.MLPS[k].copy() channel_out = 0 for idx in range(mlps.__len__()): mlps[idx] = [channel_in] + mlps[idx] channel_out += mlps[idx][-1] self.SA_modules.append( PointnetSAModuleMSG( npoint=cfg.RPN.SA_CONFIG.NPOINTS[k], radii=cfg.RPN.SA_CONFIG.RADIUS[k], nsamples=cfg.RPN.SA_CONFIG.NSAMPLE[k], mlps=mlps, use_xyz=use_xyz, bn=cfg.RPN.USE_BN ) ) skip_channel_list.append(channel_out) channel_in = channel_out self.FP_modules = nn.ModuleList() for k in range(cfg.RPN.FP_MLPS.__len__()): pre_channel = cfg.RPN.FP_MLPS[k + 1][-1] if k + 1 < len(cfg.RPN.FP_MLPS) else channel_out self.FP_modules.append( PointnetFPModule(mlp=[pre_channel + skip_channel_list[k]] + cfg.RPN.FP_MLPS[k]) ) def _break_up_pc(self, pc): xyz = pc[..., 0:3].contiguous() features = ( pc[..., 3:].transpose(1, 2).contiguous() if pc.size(-1) > 3 else None ) return xyz, features def forward(self, pointcloud: torch.cuda.FloatTensor): xyz, features = self._break_up_pc(pointcloud) l_xyz, l_features = [xyz], [features] for i in range(len(self.SA_modules)): li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i]) l_xyz.append(li_xyz) l_features.append(li_features) for i in range(-1, -(len(self.FP_modules) + 1), -1): l_features[i - 1] = self.FP_modules[i]( l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i] ) return l_xyz[0], l_features[0]在forward函数中,如果我要使用channel_out变量传入SA_modules中,我该如何在forward函数中计算并得到它,再传入SA_modules中,你可以给我详细的代码吗?
可以在 forward 函数中添加一个变量 channel_out,初始化为输入的 features 的通道数。然后在每次循环中更新 channel_out 的值,最后将其传入 SA_modules 中即可。
以下是修改后的代码:
```
def forward(self, pointcloud: torch.cuda.FloatTensor):
xyz, features = self._break_up_pc(pointcloud)
l_xyz, l_features = [xyz], [features]
channel_out = features.shape[1] # 初始化 channel_out
for i in range(len(self.SA_modules)):
mlps = self.cfg.RPN.SA_CONFIG.MLPS[i].copy()
for idx in range(mlps.__len__()):
mlps[idx] = [channel_out] + mlps[idx]
channel_out += mlps[idx][-1]
li_xyz, li_features = self.SA_modules[i](l_xyz[i], l_features[i])
l_xyz.append(li_xyz)
l_features.append(li_features)
skip_channel_list = []
for i in range(-1, -(len(self.FP_modules) + 1), -1):
pre_channel = self.cfg.RPN.FP_MLPS[i + 1][-1] if i + 1 < len(self.cfg.RPN.FP_MLPS) else channel_out
skip_channel_list.append(pre_channel)
l_features[i - 1] = self.FP_modules[i](
l_xyz[i - 1], l_xyz[i], l_features[i - 1], l_features[i]
)
return l_xyz[0], l_features[0], channel_out # 返回 channel_out
```
阅读全文