怎么通过双目相机获取人的三维坐标代码
时间: 2024-02-24 21:58:43 浏览: 114
以下是通过OpenCV库实现的获取人的三维坐标的代码:
```python
import cv2
import numpy as np
# 标定双目相机
def stereo_calibration(left_imgs, right_imgs, chessboard_size, square_size):
# 定义标定板角点的三维坐标
objp = np.zeros((chessboard_size[0]*chessboard_size[1], 3), np.float32)
objp[:,:2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) * square_size
objpoints = [] # 存储三维坐标
imgpoints1 = [] # 存储左相机的二维坐标
imgpoints2 = [] # 存储右相机的二维坐标
for i in range(len(left_imgs)):
# 检测角点
ret1, corners1 = cv2.findChessboardCorners(left_imgs[i], chessboard_size, None)
ret2, corners2 = cv2.findChessboardCorners(right_imgs[i], chessboard_size, None)
if ret1 and ret2:
objpoints.append(objp)
imgpoints1.append(corners1)
imgpoints2.append(corners2)
# 标定相机
ret, K1, D1, K2, D2, R, T, E, F = cv2.stereoCalibrate(objpoints, imgpoints1, imgpoints2, None, None, None, None, left_imgs[0].shape[::-1], flags=cv2.CALIB_FIX_INTRINSIC)
# 计算矫正映射矩阵和变换矩阵
R1, R2, P1, P2, Q, validPixROI1, validPixROI2 = cv2.stereoRectify(K1, D1, K2, D2, left_imgs[0].shape[::-1], R, T, flags=cv2.CALIB_ZERO_DISPARITY)
# 生成映射函数
map1_1, map1_2 = cv2.initUndistortRectifyMap(K1, D1, R1, P1, left_imgs[0].shape[::-1], cv2.CV_32FC1)
map2_1, map2_2 = cv2.initUndistortRectifyMap(K2, D2, R2, P2, left_imgs[0].shape[::-1], cv2.CV_32FC1)
return K1, D1, K2, D2, R1, R2, P1, P2, Q, map1_1, map1_2, map2_1, map2_2
# 获取深度图和点云
def get_depth_map(left_img, right_img, map1_1, map1_2, map2_1, map2_2, Q):
# 矫正图像
left_img = cv2.remap(left_img, map1_1, map1_2, cv2.INTER_LINEAR)
right_img = cv2.remap(right_img, map2_1, map2_2, cv2.INTER_LINEAR)
# 计算视差图
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=15)
disparity = stereo.compute(left_img, right_img)
# 计算深度图
depth_map = cv2.reprojectImageTo3D(disparity, Q)
# 过滤掉深度值小于0的点
mask = disparity > disparity.min()
depth_map = depth_map[mask]
depth_map = depth_map.reshape(-1, 3)
return depth_map
# 获取人的三维坐标
def get_person_3d_coordinates(left_img, right_img, map1_1, map1_2, map2_1, map2_2, Q):
# 获取深度图和点云
depth_map = get_depth_map(left_img, right_img, map1_1, map1_2, map2_1, map2_2, Q)
# 通过颜色过滤出人的点云
hsv = cv2.cvtColor(left_img, cv2.COLOR_BGR2HSV)
lower_color = np.array([0, 0, 0])
upper_color = np.array([255, 255, 255])
mask = cv2.inRange(hsv, lower_color, upper_color)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
mask = cv2.medianBlur(mask, 5)
mask = cv2.bitwise_not(mask)
# 获取人的点云坐标
person_points = depth_map[mask]
person_points = person_points[~np.isnan(person_points).any(axis=1)]
person_points = person_points[~np.isinf(person_points).any(axis=1)]
person_points = person_points[person_points[:,2] > 0.5]
return person_points
# 读取图像
left_img = cv2.imread('left_img.png')
right_img = cv2.imread('right_img.png')
# 标定双目相机
left_imgs = [left_img]
right_imgs = [right_img]
chessboard_size = (9, 6)
square_size = 0.02
K1, D1, K2, D2, R1, R2, P1, P2, Q, map1_1, map1_2, map2_1, map2_2 = stereo_calibration(left_imgs, right_imgs, chessboard_size, square_size)
# 获取人的三维坐标
person_points = get_person_3d_coordinates(left_img, right_img, map1_1, map1_2, map2_1, map2_2, Q)
print(person_points)
```
这段代码中,我们首先通过`stereo_calibration`函数对双目相机进行标定,得到相机内参和外参矩阵等参数,然后通过`get_depth_map`函数获取深度图和点云,最后通过`get_person_3d_coordinates`函数通过颜色过滤出人的点云并计算出人的三维坐标。
阅读全文