以上代码报错 lines = cv2.drawMatches(img1, corners1, img2, corners2, None, None) cv2.error: OpenCV(4.7.0) :-1: error: (-5:Bad argument) in function 'drawMatches' > Overload resolution failed: > - Can't parse 'keypoints1'. Sequence item with index 0 has a wrong type > - Can't parse 'keypoints1'. Sequence item with index 0 has a wrong type > - drawMatches() missing required argument 'matchesThickness' (pos 7) > - drawMatches() missing required argument 'matchesThickness' (pos 7)
时间: 2023-07-15 18:12:05 浏览: 212
这个错误提示中提到了一些可能的问题:
- "keypoints1" 序列中的第一个元素类型错误
- "keypoints2" 序列中的第一个元素类型错误
- drawMatches() 缺少必需的参数 "matchesThickness"
你需要检查一下代码中是否存在这些问题,并进行相应的修正。特别是第一个和第二个问题,可能是由于关键点序列中的某些元素类型不正确导致的,你可以检查一下序列中所有元素的类型是否正确,或者尝试将其转换为正确的类型。对于第三个问题,你需要确保在调用 drawMatches() 函数时,传递了正确的参数。
相关问题
检查改进代码:import cv2 # 读取图像 img1 = cv2.imread('qiao1.jpg') img2 = cv2.imread('qiao2.jpg') # 转为灰度图像 gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY) gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) # 使用Shi-Tomasi算法检测特征点 corners1 = cv2.goodFeaturesToTrack(gray1, 100, 0.01, 10) corners2 = cv2.goodFeaturesToTrack(gray2, 100, 0.01, 10) # 将特征点转为整数 corners1 = corners1.astype(int) corners2 = corners2.astype(int) # 进行暴力匹配 matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=True) matches = matcher.match(gray1, gray2) # 根据距离排序 matches = sorted(matches, key=lambda x: x.distance) # 取前10个最佳匹配 matches = matches[:10] # 绘制匹配点连线图 matched_img = cv2.drawMatches(img1, corners1, img2, corners2, matches, None) cv2.imshow('Matched', matched_img) cv2.waitKey(0) # 进行图像拼接 src_pts = np.float32([corners1[m.queryIdx].flatten() for m in matches]).reshape(-1, 1, 2) dst_pts = np.float32([corners2[m.trainIdx].flatten() for m in matches]).reshape(-1, 1, 2) M, _ = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) result = cv2.warpPerspective(img1, M, (img1.shape[1] + img2.shape[1], img1.shape[0])) result[0:img2.shape[0], 0:img2.shape[1]] = img2 cv2.imshow('Result', result) cv2.waitKey(0)
代码看起来不错,但是有一些可以改进的地方:
1. 在进行暴力匹配之前,需要将特征点转换为描述符。可以使用ORB、SIFT或SURF等算法来提取描述符。
2. 在进行图像拼接之前,可以先使用RANSAC算法进行特征点匹配,以排除错误的匹配点。
3. 在绘制匹配点连线图之前,可以使用cv2.drawKeypoints函数绘制特征点的位置和方向,以更好地观察匹配结果。
4. 在进行图像拼接之后,可以使用cv2.resize函数对结果进行缩放,以便更好地观察。
下面是改进后的代码:
import cv2
import numpy as np
# 读取图像
img1 = cv2.imread('qiao1.jpg')
img2 = cv2.imread('qiao2.jpg')
# 转为灰度图像
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# 提取特征点和描述符
orb = cv2.ORB_create()
kp1, des1 = orb.detectAndCompute(gray1, None)
kp2, des2 = orb.detectAndCompute(gray2, None)
# 进行特征点匹配
matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
matches = matcher.match(des1, des2)
# 使用RANSAC算法排除错误匹配点
src_pts = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# 绘制特征点位置和方向
img1_kp = cv2.drawKeypoints(img1, kp1, None)
img2_kp = cv2.drawKeypoints(img2, kp2, None)
cv2.imshow('Keypoints1', img1_kp)
cv2.imshow('Keypoints2', img2_kp)
# 绘制匹配点连线图
matches_img = cv2.drawMatches(img1, kp1, img2, kp2, matches, None, flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imshow('Matches', matches_img)
# 进行图像拼接
result = cv2.warpPerspective(img1, M, (img1.shape[1] + img2.shape[1], img1.shape[0]))
result[0:img2.shape[0], 0:img2.shape[1]] = img2
# 缩放结果图像
result = cv2.resize(result, None, fx=0.5, fy=0.5)
cv2.imshow('Result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
import cv2 # 读取两幅待处理的图像 img1 = cv2.imread('image1.jpg', cv2.IMREAD_GRAYSCALE) img2 = cv2.imread('image2.jpg', cv2.IMREAD_GRAYSCALE) # 对图像进行高斯模糊 img1 = cv2.GaussianBlur(img1, (5, 5), 0) img2 = cv2.GaussianBlur(img2, (5, 5), 0) # 使用Shi-Tomasi算法检测特征点 corners1 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10) corners2 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10) # 对特征点进行亚像素定位 corners1 = cv2.cornerSubPix(img1, corners1, (5, 5), (-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) corners2 = cv2.cornerSubPix(img2, corners2, (5, 5), (-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)) # 对特征点进行匹配 matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING) kps1, descs1 = sift.detectAndCompute(img1, None) kps2, descs2 = sift.detectAndCompute(img2, None) matches = matcher.match(descs1, descs2) # 使用RANSAC算法进行匹配点筛选 src_pts = np.float32([kps1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2) dst_pts = np.float32([kps2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2) M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) # 对图像进行配准和拼接 result = cv2.warpPerspective(img1, M, (img1.shape[1] + img2.shape[1], img1.shape[0])) result[0:img2.shape[0], 0:img2.shape[1]] = img2 # 显示结果 cv2.imshow('Result', result) cv2.waitKey() cv2.destroyAllWindows()改进这段代码使其输出特征点连线图和拼接图
import cv2
import numpy as np
# 读取两幅待处理的图像
img1 = cv2.imread('image1.jpg', cv2.IMREAD_GRAYSCALE)
img2 = cv2.imread('image2.jpg', cv2.IMREAD_GRAYSCALE)
# 对图像进行高斯模糊
img1 = cv2.GaussianBlur(img1, (5, 5), 0)
img2 = cv2.GaussianBlur(img2, (5, 5), 0)
# 使用Shi-Tomasi算法检测特征点
corners1 = cv2.goodFeaturesToTrack(img1, 100, 0.01, 10)
corners2 = cv2.goodFeaturesToTrack(img2, 100, 0.01, 10)
# 对特征点进行亚像素定位
corners1 = cv2.cornerSubPix(img1, corners1, (5, 5), (-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
corners2 = cv2.cornerSubPix(img2, corners2, (5, 5), (-1, -1), criteria=(cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001))
# 对特征点进行匹配
sift = cv2.xfeatures2d.SIFT_create()
matcher = cv2.DescriptorMatcher_create(cv2.DESCRIPTOR_MATCHER_BRUTEFORCE_HAMMING)
kps1, descs1 = sift.detectAndCompute(img1, None)
kps2, descs2 = sift.detectAndCompute(img2, None)
matches = matcher.match(descs1, descs2)
# 使用RANSAC算法进行匹配点筛选
src_pts = np.float32([kps1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kps2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# 画出特征点连线图
matchesMask = mask.ravel().tolist()
h, w = img1.shape
draw_params = dict(matchColor=(0, 255, 0), singlePointColor=None, matchesMask=matchesMask, flags=2)
img3 = cv2.drawMatches(img1, kps1, img2, kps2, matches, None, **draw_params)
cv2.imshow('Feature Matching', img3)
# 对图像进行配准和拼接
result = cv2.warpPerspective(img1, M, (img1.shape[1] + img2.shape[1], img1.shape[0]))
result[0:img2.shape[0], 0:img2.shape[1]] = img2
# 显示结果
cv2.imshow('Result', result)
cv2.waitKey()
cv2.destroyAllWindows()
相关推荐
![zip](https://img-home.csdnimg.cn/images/20210720083736.png)
![cpp](https://img-home.csdnimg.cn/images/20210720083646.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)
![](https://csdnimg.cn/download_wenku/file_type_ask_c1.png)