map(torch.stack, zip(*batch))
时间: 2023-03-28 09:04:25 浏览: 138
这个问题涉及到 Python 编程语言中的函数调用,我可以回答这个问题。这段代码的作用是将一个批次(batch)中的多个张量(tensor)按照维度进行堆叠,返回一个新的张量。具体来说,zip(*batch) 将批次中的多个张量按照维度进行打包,然后 map(torch.stack, ...) 将每个打包后的元组中的张量进行堆叠,最终返回一个新的张量。
相关问题
import os import json import torch from PIL import Image from torchvision import transforms from model import resnet34 def main(): device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") data_transform = transforms.Compose( [transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])]) # load image # 指向需要遍历预测的图像文件夹 imgs_root = "../dataset/val" assert os.path.exists(imgs_root), f"file: '{imgs_root}' dose not exist." # 读取指定文件夹下所有jpg图像路径 img_path_list = [os.path.join(imgs_root, i) for i in os.listdir(imgs_root) if i.endswith(".jpg")] # read class_indict json_path = './class_indices.json' assert os.path.exists(json_path), f"file: '{json_path}' dose not exist." json_file = open(json_path, "r") class_indict = json.load(json_file) # create model model = resnet34(num_classes=16).to(device) # load model weights weights_path = "./newresNet34.pth" assert os.path.exists(weights_path), f"file: '{weights_path}' dose not exist." model.load_state_dict(torch.load(weights_path, map_location=device)) # prediction model.eval() batch_size = 8 # 每次预测时将多少张图片打包成一个batch with torch.no_grad(): for ids in range(0, len(img_path_list) // batch_size): img_list = [] for img_path in img_path_list[ids * batch_size: (ids + 1) * batch_size]: assert os.path.exists(img_path), f"file: '{img_path}' dose not exist." img = Image.open(img_path) img = data_transform(img) img_list.append(img) # batch img # 将img_list列表中的所有图像打包成一个batch batch_img = torch.stack(img_list, dim=0) # predict class output = model(batch_img.to(device)).cpu() predict = torch.softmax(output, dim=1) probs, classes = torch.max(predict, dim=1) for idx, (pro, cla) in enumerate(zip(probs, classes)): print("image: {} class: {} prob: {:.3}".format(img_path_list[ids * batch_size + idx], class_indict[str(cla.numpy())], pro.numpy())) if __name__ == '__main__': main()
这段代码实现了导入必要的包和模块,包括操作系统、JSON、PyTorch、PIL及其转换模块、还有定义的resnet34模型。在主函数中,首先根据可用GPU情况使用cuda或cpu作为设备,然后定义数据的处理流程,包括缩放、剪裁、转换为Tensor并进行标准化。
AssertionError: train: No labels found in D:\YOLO\datasets\Armor\labels\1.cache, can not start training.
这个错误提示表明在训练YOLO模型时,找不到训练数据集中的标签文件。解决这个问题的方法如下:
1. 确认标签文件是否存在,标签文件应该与图像文件在同一目录下,且文件名相同,只是扩展名不同。标签文件的扩展名通常为.txt,每个文件应包含与其对应的图像文件中所有对象的标签信息。
2. 确认标签文件的格式是否正确。YOLO模型要求标签文件的格式为每行一个对象,每行包含对象的类别和位置信息。位置信息应该是相对于图像宽度和高度的归一化坐标,即左上角和右下角的坐标值应该在0到1之间。
3. 确认训练脚本中的数据集路径和标签文件路径是否正确。如果数据集路径或标签文件路径不正确,就会导致找不到标签文件的错误。
4. 修改datasets.py文件。在该文件中,需要将标签文件的路径替换为正确的路径。具体来说,需要将datasets.py文件中的JPEGImages替换为标签文件所在的目录。
以下是修改后的datasets.py文件的示例代码:
```python
import glob
import os
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False):
path = str(Path(path)) # os-agnostic
assert os.path.isfile(path), f'File not found {path}'
with open(path, 'r') as f:
self.img_files = [x.replace('\n', '') for x in f.readlines() if os.path.isfile(x.replace('\n', ''))]
assert self.img_files, f'No images found in {path}'
self.label_files = [x.replace('images', 'labels').replace('.png', '.txt').replace('.jpg', '.txt')
.replace('.jpeg', '.txt') for x in self.img_files]
self.img_size = img_size
self.batch_size = batch_size
self.augment = augment
self.hyp = hyp
self.rect = rect
self.image_weights = image_weights
self.cache_images = cache_images
self.single_cls = single_cls
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
img_path = self.img_files[index % len(self.img_files)].rstrip()
label_path = self.label_files[index % len(self.img_files)].rstrip()
# Load image
img = None
if self.cache_images: # option 1 - caches small/medium images
img = self.imgs[index % len(self.imgs)]
if img is None: # option 2 - loads large images on-the-fly
img = Image.open(img_path).convert('RGB')
if self.cache_images:
if img.size[0] < 640 or img.size[1] < 640: # if one side is < 640
img = img.resize((640, 640)) # resize
self.imgs[index % len(self.imgs)] = img # save
assert img.size[0] > 9, f'Width must be >9 pixels {img_path}'
assert img.size[1] > 9, f'Height must be >9 pixels {img_path}'
# Load labels
targets = None
if os.path.isfile(label_path):
with open(label_path, 'r') as f:
x = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32)
# Normalized xywh to pixel xyxy format
labels = x.copy()
if x.size > 0:
labels[:, 1] = x[:, 1] * img.width # xmin
labels[:, 2] = x[:, 2] * img.height # ymin
labels[:, 3] = x[:, 3] * img.width # xmax
labels[:, 4] = x[:, 4] * img.height # ymax
labels[:, 1:5] = xywh2xyxy(labels[:, 1:5]) # xywh to xyxy
targets = torch.zeros((len(labels), 6))
targets[:, 1:] = torch.from_numpy(labels)
# Apply augmentations
if self.augment:
img, targets = random_affine(img, targets,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
border=self.img_size // 2) # border to remove
# Letterbox
img, ratio, pad = letterbox(img, new_shape=self.img_size, auto=self.rect, scaleup=self.augment,
stride=self.hyp['stride'])
targets[:, 2:6] = xyxy2xywh(targets[:, 2:6]) / self.img_size / ratio # normalized xywh (to grid cell)
# Load into tensor
img = np.array(img).transpose(2, 0, 1) # HWC to CHW
img = torch.from_numpy(img).to(torch.float32) # uint8 to fp16/32
targets = targets[torch.where(targets[:, 0] == index % len(self.img_files))] # filter by image index
return img, targets, index, img_path
def coco_index(self, index):
"""Map dataset index to COCO index (minus 1)"""
return int(Path(self.img_files[index]).stem) - 1
@staticmethod
def collate_fn(batch):
img, label, _, path = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path
class LoadImages(Dataset): # for inference
def __init__(self, path, img_size=640, stride=32, auto=True):
path = str(Path(path)) # os-agnostic
if os.path.isdir(path):
files = sorted(glob.glob('%s/*.*' % path))
elif os.path.isfile(path):
files = [path]
else:
raise Exception(f'Error: {path} does not exist')
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.auto = auto
self.video_flag = [False] * ni + [True] * nv
self.img_files = images + videos
self.cap = [cv2.VideoCapture(x) for x in videos]
self.frame = [None] * nv
self.ret = [False] * nv
self.path = path
def __len__(self):
return len(self.img_files)
def __getitem__(self, index):
if self.video_flag[index]:
return self.load_video(index)
else:
return self.load_image(index)
def load_image(self, index):
img_path = self.img_files[index]
img = cv2.imread(img_path) # BGR
assert img is not None, 'Image Not Found ' + img_path
h0, w0 = img.shape[:2] # orig hw
img = letterbox(img, new_shape=self.img_size, auto=self.auto)[0]
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), index, img_path, (h0, w0)
def load_video(self, index):
cap = self.cap[index]
while True:
self.ret[index], frame = cap.read()
if not self.ret[index]:
break
if self.frame[index] is None:
self.frame[index] = letterbox(frame, new_shape=self.img_size, auto=self.auto)[0]
self.frame[index] = self.frame[index][:, :, ::-1].transpose(2, 0, 1)
self.frame[index] = np.ascontiguousarray(self.frame[index])
else:
self.frame[index] = torch.cat((self.frame[index][self.stride:], letterbox(frame, new_shape=self.img_size,
auto=self.auto)[0]), 0)
if self.ret[index]:
return self.frame[index], index, self.img_files[index], frame.shape[:2]
def __del__(self):
if hasattr(self, 'cap'):
for c in self.cap:
c.release()
def letterbox(img, new_shape=640, color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
ratio = float(new_shape) / max(shape)
else:
ratio = min(float(new_shape[0]) / shape[0], float(new_shape[1]) / shape[1])
if ratio != 1: # always resize down, only resize up if shape < new_shape * 1.5
if scaleup or (ratio < 1 and max(shape) * ratio > stride * 1.5):
interp = cv2.INTER_LINEAR
if ratio < 1:
img = cv2.resize(img, (int(round(shape[1] * ratio)), int(round(shape[0] * ratio))), interpolation=interp)
else:
img = cv2.resize(img, (int(round(shape[1] * ratio)), int(round(shape[0] * ratio))), interpolation=interp)
else:
interp = cv2.INTER_AREA
img = cv2.resize(img, (int(round(shape[1] * ratio)), int(round(shape[0] * ratio))), interpolation=interp)
new_shape = [round(shape[1] * ratio), round(shape[0] * ratio)]
# Compute stride-aligned boxes
if auto:
stride = int(np.ceil(new_shape[0] / stride) * stride)
top_pad = (stride - new_shape[0]) % stride # add top-padding (integer pixels only)
left_pad = (stride - new_shape[1]) % stride # add left-padding (integer pixels only)
if top_pad or left_pad:
img = cv2.copyMakeBorder(img, top_pad // 2, top_pad - top_pad // 2, left_pad // 2, left_pad - left_pad // 2,
cv2.BORDER_CONSTANT, value=color) # add border
else:
stride = 32
top_pad, left_pad = 0, 0
# Pad to rectangular shape divisible by stride
h, w = img.shape[:2]
if scaleFill or new_shape == (w, h): # scale-up width and height
new_img = np.zeros((new_shape[1], new_shape[0], 3), dtype=np.uint8) + color # whole image
nh, nw = h, w
else: # scale width OR height
nh = new_shape[1] - top_pad
nw = new_shape[0] - left_pad
assert nh > 0 and nw > 0, 'image size < new_size'
new_img = np.zeros((new_shape[1], new_shape[0], 3), dtype=np.uint8) + color # whole image
if nw / w <= nh / h: # resize by width, then pad height
new_w = new_shape[0]
new_h = int(nh * new_w / nw)
assert new_h > 0, 'image size < new_size'
img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
top = top_pad // 2
bottom = top_pad - top
left = left_pad // 2
right = left_pad - left
new_img[top:top + new_h, left:left + new_w] = img
else: # resize by height, then pad width
new_h = new_shape[1]
new_w = int(nw * new_h / nh)
assert new_w > 0, 'image size < new_size'
img = cv2.resize(img, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
top = top_pad // 2
bottom = top_pad - top
left = left_pad // 2
right = left_pad - left
new_img[top:top + new_h, left:left + new_w] = img
return new_img, ratio, (top_pad, left_pad)
def xywh2xyxy(x):
# Convert bounding box format from [x, y, w, h] to [x1, y1, x2, y2]
y = x.copy() if isinstance(x, np.ndarray) else np.array(x)
y[..., 0] = x[..., 0] - x[..., 2] / 2
y[..., 1] = x[..., 1] - x[..., 3] / 2
y[..., 2] = x[..., 0] + x[..., 2] / 2
y[..., 3] = x[..., 1] + x[..., 3] / 2
return y
def xyxy2xywh(x):
# Convert bounding
阅读全文