使用python实现基于sift特征提取的图像拼接的代码示例
时间: 2024-01-01 15:05:55 浏览: 89
python 实现的 sift 两张图片的图像拼接
5星 · 资源好评率100%
以下是一个基于SIFT特征提取的图像拼接代码示例,使用了OpenCV库:
```python
import cv2
import numpy as np
def stitch_images(img1, img2):
# Convert images to grayscale
gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
# Initialize SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# Find keypoints and descriptors for both images
kp1, des1 = sift.detectAndCompute(gray1, None)
kp2, des2 = sift.detectAndCompute(gray2, None)
# Initialize FLANN matcher
flann = cv2.FlannBasedMatcher()
# Match keypoints using FLANN matcher
matches = flann.knnMatch(des1, des2, k=2)
# Filter matches using Lowe's ratio test
good_matches = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good_matches.append(m)
# Find homography matrix using RANSAC algorithm
src_pts = np.float32([kp1[m.queryIdx].pt for m in good_matches]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good_matches]).reshape(-1, 1, 2)
H, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# Warp img1 to img2 using homography matrix
h, w = gray1.shape
warped_img1 = cv2.warpPerspective(img1, H, (w, h))
# Create a mask for img2
mask = np.zeros((h, w), dtype=np.uint8)
cv2.fillConvexPoly(mask, np.int32([kp2[m.trainIdx].pt for m in good_matches]), (255, 255, 255), cv2.LINE_AA)
# Blend images using mask
blended_img = cv2.seamlessClone(warped_img1, img2, mask, (w//2, h//2), cv2.NORMAL_CLONE)
return blended_img
```
使用方法:
```python
img1 = cv2.imread('image1.jpg')
img2 = cv2.imread('image2.jpg')
result = stitch_images(img1, img2)
cv2.imshow('Result', result)
cv2.waitKey(0)
cv2.destroyAllWindows()
```
请注意,这只是一个简单的示例,可能需要进行调整和改进以适应不同的图像。
阅读全文