torch.full((num_anchors,), -1, dtype=torch.long, device=device)
时间: 2024-06-05 17:13:18 浏览: 88
这行代码使用 PyTorch 创建一个形状为 (num_anchors,),数据类型为 long,所有元素均为 -1 的张量,并将其存储在指定的设备上(如 GPU)。在目标检测中,通常会将这个张量用作目标检测算法输出的分类预测中的背景类别的索引。由于背景类别没有对应的物体,因此将其索引设置为 -1,以便在后续处理中将其忽略。
相关问题
anchors_bbox_map = torch.full((num_anchors,), -1, dtype=torch.long, device=device)
这行代码创建了一个形状为 `(num_anchors,)` 的张量 `anchors_bbox_map`,并将其用数值 -1 填充整个张量。具体来说,这个张量的每个元素都表示一个 anchor 与哪个 ground-truth box 匹配,如果该 anchor 没有匹配到任何一个 ground-truth box,则对应的元素值为 -1。
这里,`dtype=torch.long` 表示创建的张量的数据类型为 64 位整型,`device=device` 表示创建的张量将被放在指定的设备上(可能是 GPU 或 CPU)。
yolov7 common.py 源码
以下是 YOLOv7 的 common.py 源码:
```python
import math
import torch.nn.functional as F
import torch.nn as nn
import torch
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k]
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, stride=s, padding=autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.Hardswish() if act else nn.Identity()
def forward(self, x):
return self.act(self.bn(self.conv(x)))
class Bottleneck(nn.Module):
# Standard bottleneck
def __init__(self, c1, c2, shortcut=True, g=1, e=0.5):
super(Bottleneck, self).__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_, c2, 3, 1, g=g)
self.add = shortcut and c1 == c2
self.identity = nn.Identity() if self.add else None
def forward(self, x):
return self.identity(x) + self.cv2(self.cv1(x))
class SPP(nn.Module):
# Spatial pyramid pooling layer used in YOLOv3-SPP
def __init__(self, c1, c2, k=(5, 9, 13)):
super(SPP, self).__init__()
c_ = c1 // 2 # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)
self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])
def forward(self, x):
x = self.cv1(x)
return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))
class DWConv(nn.Module):
# Depthwise convolution
def __init__(self, c1, c2, k=1, s=1, p=None):
super(DWConv, self).__init__()
self.conv = nn.Conv2d(c1, c1, k, stride=s, padding=autopad(k, p), groups=c1, bias=False)
self.bn = nn.BatchNorm2d(c1)
self.act = nn.Hardswish()
self.project = nn.Conv2d(c1, c2, 1, bias=False)
self.bn2 = nn.BatchNorm2d(c2)
self.act2 = nn.Hardswish()
def forward(self, x):
return self.act2(self.bn2(self.project(self.act(self.bn(self.conv(x))))))
class Focus(nn.Module):
# Focus wh information into c-space
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):
super(Focus, self).__init__()
self.conv = Conv(c1 * 4, c2, k, s, p, g, act)
def forward(self, x):
# x(b,c,w,h) -> y(b,4c,w/2,h/2)
return self.conv(torch.cat([x[..., ::2, ::2], x[..., 1::2, ::2], x[..., ::2, 1::2], x[..., 1::2, 1::2]], 1))
class Concat(nn.Module):
# Concatenate a list of tensors along dimension
def __init__(self, dimension=1):
super(Concat, self).__init__()
self.d = dimension
def forward(self, x):
return torch.cat(x, self.d)
class Detect(nn.Module):
# Detect layer
def __init__(self, nc, anchors):
super(Detect, self).__init__()
self.nc = nc # number of classes
self.no = nc + 5 # number of outputs per anchor
self.na = len(anchors) # number of anchors
self.anchors = torch.tensor(anchors).float().view(self.na, -1)
self.anchors /= self.anchors.sum(1).view(self.na, 1) # normalized anchors
self.register_buffer("anchor_grid", self.anchors.clone().view(1, -1, 1, 1))
self.m = nn.Conv2d(self.no * self.na, self.no * self.na, 1) # prediction conv
def forward(self, x):
# x(bs,255,h,w) -> p(bs,3,85,h,w)
bs, _, ny, nx = x.shape
device, dtype = x.device, x.dtype
stride = self.anchor_grid.device / torch.tensor([nx, ny])[None, :, None, None].to(device)
grid = torch.meshgrid([torch.arange(ny), torch.arange(nx)])
y = torch.stack(grid, 2).to(device).float()
x = (x.sigmoid() * 2. - 0.5) * stride # x(?,255,?,?) --sig--> x(?,255,?,?) --*2-0.5--> x(?,255,?,?) --*stride--> x(?,255,?,?)
y = (y + 0.5) * stride # y(?,2,?,?) --+0.5--> y(?,2,?,?) --*stride--> y(?,2,?,?)
xy = torch.stack([x, y], 2).view(bs, 2, self.na * ny * nx).permute(0, 2, 1).contiguous().view(bs, self.na * ny * nx, 2)
x = self.m(x.flatten(2).permute(0, 2, 1)).view(bs, self.na, self.no, ny, nx).permute(0, 1, 3, 4, 2).contiguous()
# x(bs,na,ny,nx,na) --view--> x(bs,na,ny,nx,no) --permute--> x(bs,na,ny,nx,no)
if not self.training:
x[..., 4:] = x[..., 4:].sigmoid()
return x
else: # train
return x, xy, self.anchor_grid.repeat(bs, 1, ny, nx)
class Model(nn.Module):
# YOLOv7 model https://github.com/WongKinYiu/yolov7
def __init__(self, nc=80, anchors=((10, 13), (16, 30), (33, 23), (30, 61), (62, 45), (59, 119), (116, 90), (156, 198), (373, 326)),
ch=[256, 512, 1024, 2048], depth=0.33):
super(Model, self).__init__()
assert depth in [0.33, 0.67, 1.0]
self.depth = depth # model depth multiplier
self.grid = [torch.zeros(1)] * 5 # init grid
self.stride = torch.tensor([8., 16., 32., 64., 128.])
self.create_backbone(ch)
self.create_neck()
self.create_head(nc, anchors)
def forward(self, x):
z = []
for i in range(5):
x = self.backbone[i](x)
z.append(x)
x = self.neck(z)
return self.head(x)
def create_backbone(self, ch):
# darknet backbone
self.backbone = nn.ModuleList([Focus(3, ch[0], 3),
Conv(ch[0], ch[1], 3, 2),
Bottleneck(ch[1], ch[2]),
Conv(ch[2], ch[3], 3, 2),
Bottleneck(ch[3], ch[4]),
Conv(ch[4], ch[5], 3, 2),
SPP(ch[5], ch[5]),
Bottleneck(ch[5], ch[6]),
Conv(ch[6], ch[7], 1)])
c2 = make_divisible(ch[7] * self.depth) # ch_last
self.backbone.append(Bottleneck(ch[7], c2, False))
self.out_channels = [c2, ch[4], ch[2], ch[0]]
def create_neck(self):
# FPN-like attentional output
self.neck = nn.Sequential(
Concat(),
Conv(self.out_channels[0], self.out_channels[0], 1),
DWConv(self.out_channels[0], self.out_channels[1], 3, s=2),
DWConv(self.out_channels[1], self.out_channels[2], 3, s=2),
DWConv(self.out_channels[2], self.out_channels[3], 3, s=2),
SPP(self.out_channels[3], self.out_channels[3]),
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
)
def create_head(self, nc, anchors):
# detection head
self.head = nn.Sequential(
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
DWConv(self.out_channels[3], self.out_channels[3], 3, dilation=3),
Concat(),
Conv(self.out_channels[3] * 4, self.out_channels[3], 1),
nn.Conv2d(self.out_channels[3], len(anchors) * (nc + 5), 1, bias=True),
Detect(nc, anchors))
def attempt_load(weights, map_location=None, inplace=True):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
if isinstance(weights, (list, tuple)):
# Load a list of models (ensemble)
ensemble = nn.ModuleList()
for w in weights:
model = Model()
model.to(next(w.parameters()).device)
try:
ckpt = torch.load(w, map_location=map_location) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
print(f"Transferred {len(state_dict)} from {w}")
except:
print(f"Error loading {w}")
ensemble.append(model.eval())
return ensemble
else:
# Load a single model
model = Model()
model.to(next(weights.parameters()).device)
try:
ckpt = torch.load(weights, map_location=map_location) # load
state_dict = ckpt['model'].float().state_dict() # to FP32
state_dict = {k: v for k, v in state_dict.items() if model.state_dict()[k].shape == v.shape} # filter
model.load_state_dict(state_dict, strict=False) # load
print(f"Transferred {len(state_dict)} from {weights}")
except:
print(f"Error loading {weights}")
return model.eval()
```
阅读全文