视频稳像代码,用orb实现特征点提取,暴力匹配实现特征点匹配,并剔除误匹配点,然后用仿射变换模型进行全局运动估计,得出运动参数,并用滑动平均算法进行平滑得到平均运动参数,最后用这些参数再利用仿射变换模型进行运动补偿。将视频输出并保存到本地文件
时间: 2024-05-11 08:20:01 浏览: 140
基于特征点匹配的视频稳像
以下是基于 OpenCV 实现视频稳像的代码:
```python
import cv2
import numpy as np
# 读取视频
cap = cv2.VideoCapture('input.mp4')
# 获取帧率、视频宽度、高度和总帧数
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# 初始化特征点检测器和匹配器
orb = cv2.ORB_create()
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# 初始化滑动窗口和平均运动参数
window_size = 10
window = []
avg_motion = None
# 读取第一帧并提取特征点
ret, prev_frame = cap.read()
prev_gray = cv2.cvtColor(prev_frame, cv2.COLOR_BGR2GRAY)
prev_kp, prev_desc = orb.detectAndCompute(prev_gray, None)
while True:
# 读取下一帧
ret, curr_frame = cap.read()
if not ret:
break
curr_gray = cv2.cvtColor(curr_frame, cv2.COLOR_BGR2GRAY)
# 提取特征点并匹配
curr_kp, curr_desc = orb.detectAndCompute(curr_gray, None)
matches = bf.match(prev_desc, curr_desc)
# 剔除误匹配点
good_matches = []
for match in matches:
if match.distance < 50:
good_matches.append(match)
prev_pts = np.float32([prev_kp[match.queryIdx].pt for match in good_matches]).reshape(-1, 1, 2)
curr_pts = np.float32([curr_kp[match.trainIdx].pt for match in good_matches]).reshape(-1, 1, 2)
# 计算运动参数
M, mask = cv2.estimateAffine2D(prev_pts, curr_pts)
if M is None:
# 如果无法估计出运动参数,则使用上一帧的平均运动参数
M = avg_motion
else:
# 将当前帧的运动参数添加到滑动窗口中
motion = np.linalg.inv(M)
window.append(motion)
if len(window) > window_size:
window.pop(0)
# 计算平均运动参数
avg_motion = np.mean(window, axis=0)
# 运动补偿
curr_stable = cv2.warpAffine(curr_frame, avg_motion, (width, height))
# 显示原视频和稳定后的视频
cv2.imshow('Original Video', curr_frame)
cv2.imshow('Stabilized Video', curr_stable)
cv2.waitKey(int(1000 / fps))
# 保存稳定后的视频到本地文件
out.write(curr_stable)
# 更新前一帧和前一帧的特征点
prev_gray = curr_gray.copy()
prev_kp = curr_kp
prev_desc = curr_desc
# 释放视频和输出
cap.release()
out.release()
cv2.destroyAllWindows()
```
阅读全文