def read_lfw_pairs(self, pairs_filename): pairs = [] with open(pairs_filename, 'r') as f: for line in f.readlines()[1:]: pair = line.strip().split() pairs.append(pair) return np.array(pairs) ValueError: setting an array element with a sequence. The requested array has an inhomogeneous shape after 1 dimensions. The detected shape was (6000,) + inhomogeneous part.
时间: 2024-02-14 19:27:36 浏览: 178
这个错误通常表示您正在尝试将具有不同形状的序列分给NumPy数组的元素。在种情况下,数组的第一个维度是(6000,),但是后面的维度具有不同的形状。
可能的解决方法是检查您的数据并确保每个元素具有相同的形状。您可以尝试使用NumPy的`np.array`函数创建一个具有相同形状的数组,并将所有元素添加到该数组中。
另外,还请确保您的数据格式正确,特别是在将字符串拆分为pair时。您可以使用`print(pair)`来调试并查看每个pair的形状和内容。
相关问题
facenet eval_LFW
回答: 在使用facenet进行LFW数据集的评估时,首先需要修改facenet.py文件中的model_path参数和backbone model_path,将其设置为我们训练好的模型的路径。同时,我们需要保持backbone为mobilenet的提取神经网络。\[1\]接下来,我们需要使用转换后的eval graph,将参数和结构固化。可以使用facenet自带的freeze_graph.py脚本来完成这一步骤。由于之前导出的是eval graph,所以导致输出的facenet.pb只有一个输入节点input shape=(1, 64, 64, 3)和一个输出节点output shape=(1, 512)。\[2\]最后,我们可以使用src/validate_on_lfw.py文件作为入口,同时传入两个参数。第一个参数是我们已经预处理(人脸检测和人脸对齐)后的数据集的路径,第二个参数是下载的模型的路径。通过pairs.txt文件导入待对比的图片路径,即可进行facenet的LFW评估。\[3\]
#### 引用[.reference_title]
- *1* [使用pyTorch搭建自己的facenet](https://blog.csdn.net/jgmgtdp/article/details/129207611)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item]
- *2* [GitHub - jiangxiluning/facenet_mtcnn_to_mobile: convert facenet and mtcnn models from tensorflow to ...](https://blog.csdn.net/weixin_32512261/article/details/114706487)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item]
- *3* [tensorflow入门教程(四十)FaceNet源码分析之validate_on_lfw(上)](https://blog.csdn.net/rookie_wei/article/details/90048979)[target="_blank" data-report-click={"spm":"1018.2226.3001.9630","extra":{"utm_source":"vip_chatgpt_common_search_pc_result","utm_medium":"distribute.pc_search_result.none-task-cask-2~all~insert_cask~default-1-null.142^v91^control,239^v3^insert_chatgpt"}} ] [.reference_item]
[ .reference_list ]
基于pytorch的arcface的lfw_dataset = LFW(lfw_path, transform)中LFW的具体函数代码
LFW的函数代码如下:
```python
class LFW(Dataset):
def __init__(self, root, transform=None, pairs_path=None, download=False):
self.root = root
self.transform = transform
self.pairs_path = pairs_path
self.download = download
if download:
self.download_lfw()
if not self._check_integrity():
raise RuntimeError('Dataset not found or corrupted.' +
' You can use download=True to download it')
self.imgs, self.pairs = self._load_metadata()
def __getitem__(self, index):
path1, path2, label = self.pairs[index]
img1 = self._load_image(os.path.join(self.root, path1))
img2 = self._load_image(os.path.join(self.root, path2))
if self.transform is not None:
img1 = self.transform(img1)
img2 = self.transform(img2)
return img1, img2, label
def __len__(self):
return len(self.pairs)
def _load_metadata(self):
pairs = []
if self.pairs_path is None:
pairs = self._generate_pairs()
else:
with open(self.pairs_path, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split()
if len(pair) == 3:
path1 = os.path.join(pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.jpg')
path2 = os.path.join(pair[0], pair[0] + '_' + '%04d' % int(pair[2]) + '.jpg')
label = 1
elif len(pair) == 4:
path1 = os.path.join(pair[0], pair[0] + '_' + '%04d' % int(pair[1]) + '.jpg')
path2 = os.path.join(pair[2], pair[2] + '_' + '%04d' % int(pair[3]) + '.jpg')
label = -1
else:
raise ValueError('Pair {} do not have length of 3 or 4'.format(pair))
pairs.append((path1, path2, label))
root = os.path.expanduser(self.root)
imgs = {os.path.join(root, img): None for img in os.listdir(root)}
return imgs, pairs
def _generate_pairs(self):
root = os.path.expanduser(self.root)
if not os.path.exists(os.path.join(root, 'lfw_funneled')):
print('Please download the Funneled version of the LFW dataset from the official website'
'and place it in: ' + root)
exit(0)
imgs = glob.glob(os.path.join(root, 'lfw_funneled', '**/*.jpg'))
imgs = {os.path.relpath(x, root): None for x in imgs}
pairs = []
people = set()
for img in imgs:
people.add('_'.join(img.split('_')[:-1]))
people = list(people)
n = len(people)
for i, name in enumerate(people):
same = [(name, x) for x in people[i+1:]]
for s in same:
pairs.append((s[0], s[1], 1))
for i, name in enumerate(people):
diff = [name, random.choice(list(set(people) - set([name])))]
pairs.append((diff[0], diff[1], -1))
return pairs
def _load_image(self, path):
if self.imgs[path] is None:
self.imgs[path] = pil_loader(path)
return self.imgs[path]
def _check_integrity(self):
root = os.path.expanduser(self.root)
if not os.path.isdir(root):
return False
return True
def download_lfw(self):
if self._check_integrity():
print('Files already downloaded and verified')
return
download_and_extract_archive(LFW_URL, self.root, filename=LFW_FILENAME, md5=LFW_MD5)
```
其中,该函数接受四个参数:`root`表示LFW数据集的根目录,`transform`表示数据预处理函数,`pairs_path`表示pairs.txt文件的路径,`download`表示是否自动下载LFW数据集。
该函数主要实现了以下功能:
- 加载LFW数据集的元数据,包括人脸图像路径、同/异类标签等信息;
- 加载LFW数据集的人脸图像;
- 对人脸图像进行预处理,如裁剪、归一化等;
- 按照pairs.txt文件中的信息,计算出每对人脸图像的同/异类标签;
- 返回每对人脸图像的数据和标签。
阅读全文