fgbg = cv2.createBackgroundSubtractorMOG2(history=200, varThreshold=35, detectShadows=True)
时间: 2023-08-11 19:04:39 浏览: 273
这是一个使用 OpenCV 库创建基于 MOG2(Mixture of Gaussians)算法的背景减法器。该算法可以用于视频中的运动目标检测。其中,history 参数表示用于建立背景模型的帧数,varThreshold 参数表示像素颜色与背景模型不匹配的阈值,detectShadows 参数表示是否检测阴影。
相关问题
import cv2 # 创建混合高斯模型 fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=50, detectShadows=False) # 打开视频文件 cap = cv2.VideoCapture('t1.mp4') # 获取视频帧率、宽度和高度 fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建前景视频对象 fg_out = cv2.VideoWriter('foreground_video.avi', cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height)) # 循环遍历视频帧 while True: ret, frame = cap.read() if not ret: break # 高斯模型背景减除法 fgmask = fgbg.apply(frame) # 缩放比例 scale_percent = 50 # 计算缩放后的新尺寸 width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) dim = (width, height) # 缩放图像 frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) fgmask = cv2.resize(fgmask, dim, interpolation=cv2.INTER_AREA) # 形态学开运算去除噪点 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) # 寻找轮廓并计算周长 contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: perimeter = cv2.arcLength(cnt, True) if perimeter > 500: # 画出矩形框 x, y, w, h = cv2.boundingRect(cnt) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) cv2.imshow('frame', frame) cv2.imshow('fgmask', fgmask) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放对象 cap.release() fg_out.release() cv2.destroyAllWindows()改这个程序,消除视频抖动的影响,不要用光流补偿
要消除视频抖动的影响,可以考虑使用视频稳定技术。一种简单的方法是使用帧间差分来计算每一帧与前一帧之间的运动,然后将每一帧按照相同的运动进行平移,从而达到视频稳定的效果。
以下是修改后的程序:
``` python
import cv2
# 创建混合高斯模型
fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=50, detectShadows=False)
# 打开视频文件
cap = cv2.VideoCapture('t1.mp4')
# 获取视频帧率、宽度和高度
fps = int(cap.get(cv2.CAP_PROP_FPS))
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 创建前景视频对象
fg_out = cv2.VideoWriter('foreground_video.avi', cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height))
# 初始化上一帧
prev_frame = None
# 循环遍历视频帧
while True:
ret, frame = cap.read()
if not ret:
break
# 高斯模型背景减除法
fgmask = fgbg.apply(frame)
# 缩放比例
scale_percent = 50
# 计算缩放后的新尺寸
width = int(frame.shape[1] * scale_percent / 100)
height = int(frame.shape[0] * scale_percent / 100)
dim = (width, height)
# 缩放图像
frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA)
fgmask = cv2.resize(fgmask, dim, interpolation=cv2.INTER_AREA)
# 形态学开运算去除噪点
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel)
# 寻找轮廓并计算周长
contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contours:
perimeter = cv2.arcLength(cnt, True)
if perimeter > 500:
# 画出矩形框
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# 视频稳定
if prev_frame is not None:
# 计算帧间差分
diff = cv2.absdiff(frame, prev_frame)
# 计算运动向量
_, motion = cv2.optflow.calcOpticalFlowFarneback(prev_frame, frame, None, 0.5, 3, 15, 3, 5, 1.2, 0)
# 平移每一帧
M = np.float32([[1, 0, motion[:,:,0].mean()], [0, 1, motion[:,:,1].mean()]])
frame = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0]))
diff = cv2.warpAffine(diff, M, (diff.shape[1], diff.shape[0]))
# 显示帧间差分
cv2.imshow('diff', diff)
# 更新上一帧
prev_frame = frame.copy()
cv2.imshow('frame', frame)
cv2.imshow('fgmask', fgmask)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# 释放对象
cap.release()
fg_out.release()
cv2.destroyAllWindows()
```
在原有程序的基础上,我们加入了视频稳定的处理步骤。具体来说,我们计算每一帧与前一帧之间的帧间差分和运动向量,然后将每一帧按照平均运动向量进行平移。这样可以消除视频抖动的影响,从而得到更加稳定的结果。
import cv2 import numpy as np # 创建混合高斯模型 fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=50, detectShadows=False) # 打开视频文件 cap = cv2.VideoCapture('t1.mp4') # 获取视频帧率、宽度和高度 fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建前景视频对象 fg_out = cv2.VideoWriter('foreground_video.avi', cv2.VideoWriter_fourcc(*'XVID'), fps, (width, height)) # 初始化上一帧 prev_frame = None # 循环遍历视频帧 while True: ret, frame = cap.read() if not ret: break # 高斯模型背景减除法 fgmask = fgbg.apply(frame) # 缩放比例 scale_percent = 50 # 计算缩放后的新尺寸 width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) dim = (width, height) # 缩放图像 frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) fgmask = cv2.resize(fgmask, dim, interpolation=cv2.INTER_AREA) # 形态学开运算去除噪点 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) # 寻找轮廓并计算周长 contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: perimeter = cv2.arcLength(cnt, True) if perimeter > 500: # 画出矩形框 x, y, w, h = cv2.boundingRect(cnt) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # 视频稳定 if prev_frame is not None: # 计算帧间差分 diff = cv2.absdiff(frame, prev_frame) # 计算运动向量 _, motion = cv2.optflow.calcOpticalFlowFarneback(prev_frame, frame, None, 0.5, 3, 15, 3, 5, 1.2, 0) # 平移每一帧 M = np.float32([[1, 0, motion[:,:,0].mean()], [0, 1, motion[:,:,1].mean()]]) frame = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0])) diff = cv2.warpAffine(diff, M, (diff.shape[1], diff.shape[0])) # 显示帧间差分 cv2.imshow('diff', diff) # 更新上一帧 prev_frame = frame.copy() cv2.imshow('frame', frame) cv2.imshow('fgmask', fgmask) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放对象 cap.release() fg_out.release() cv2.destroyAllWindows()改为4.5.3版本的opencv能用的程序
import cv2 import numpy as np # 创建混合高斯模型 fgbg = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=50, detectShadows=False) # 打开视频文件 cap = cv2.VideoCapture('t1.mp4') # 获取视频帧率、宽度和高度 fps = int(cap.get(cv2.CAP_PROP_FPS)) width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 创建前景视频对象 fourcc = cv2.VideoWriter_fourcc(*'XVID') fg_out = cv2.VideoWriter('foreground_video.avi', fourcc, fps, (width, height)) # 初始化上一帧 prev_frame = None # 循环遍历视频帧 while True: ret, frame = cap.read() if not ret: break # 高斯模型背景减除法 fgmask = fgbg.apply(frame) # 缩放比例 scale_percent = 50 # 计算缩放后的新尺寸 width = int(frame.shape[1] * scale_percent / 100) height = int(frame.shape[0] * scale_percent / 100) dim = (width, height) # 缩放图像 frame = cv2.resize(frame, dim, interpolation=cv2.INTER_AREA) fgmask = cv2.resize(fgmask, dim, interpolation=cv2.INTER_AREA) # 形态学开运算去除噪点 kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3)) opening = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) # 寻找轮廓并计算周长 contours, hierarchy = cv2.findContours(opening, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) for cnt in contours: perimeter = cv2.arcLength(cnt, True) if perimeter > 500: # 画出矩形框 x, y, w, h = cv2.boundingRect(cnt) cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2) # 视频稳定 if prev_frame is not None: # 计算帧间差分 diff = cv2.absdiff(frame, prev_frame) # 计算运动向量 flow = cv2.calcOpticalFlowFarneback(prev_frame, frame, None, 0.5, 3, 15, 3, 5, 1.2, 0) # 平移每一帧 mean_flow = np.mean(flow, axis=(0, 1)) M = np.float32([[1, 0, mean_flow[0]], [0, 1, mean_flow[1]]]) frame = cv2.warpAffine(frame, M, (frame.shape[1], frame.shape[0])) diff = cv2.warpAffine(diff, M, (diff.shape[1], diff.shape[0])) # 显示帧间差分 cv2.imshow('diff', diff) # 更新上一帧 prev_frame = frame.copy() cv2.imshow('frame', frame) cv2.imshow('fgmask', fgmask) if cv2.waitKey(1) & 0xFF == ord('q'): break # 释放对象 cap.release() fg_out.release() cv2.destroyAllWindows()
阅读全文