mediapipe获取骨骼3d坐标 c++
时间: 2023-08-03 13:04:56 浏览: 237
要获取骨骼3D坐标,您可以使用MediaPipe库中的PoseEstimation模块。下面是一个示例代码片段,演示如何使用MediaPipe C++ API从图像中检测姿势并获取骨骼关节的3D坐标:
```c++
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/landmark_list.pb.h"
#include "mediapipe/framework/port/opencv_core_inc.h"
#include "mediapipe/framework/port/opencv_video_inc.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/framework/port/statusor.h"
#include "mediapipe/framework/tool/status_util.h"
#include "mediapipe/framework/tool/tag_map.h"
#include "mediapipe/framework/tool/validate_type.h"
#include "mediapipe/modules/pose_detection/pose_detection_calculator.pb.h"
#include "mediapipe/modules/pose_detection/pose_detection_landmark.pb.h"
namespace mediapipe {
namespace {
constexpr char kLandmarksTag[] = "POSE_LANDMARKS";
} // namespace
// 定义回调函数,以处理检测到的姿势
mediapipe::Status ProcessOutput(
const std::map<std::string, mediapipe::Packet>& output_packets) {
if (output_packets.find(kLandmarksTag) == output_packets.end() ||
!output_packets.at(kLandmarksTag).ValidateAsType<mediapipe::LandmarkList>()) {
return tool::StatusInvalid("No valid landmarks found in output map.");
}
// 从输出包中提取关键点列表
const auto& landmark_list =
output_packets.at(kLandmarksTag).Get<mediapipe::LandmarkList>();
// 访问第一个姿势中的骨骼关节
const auto& pose_landmarks = landmark_list.landmark();
const auto& left_shoulder = pose_landmarks[mediapipe::PoseLandmark::LEFT_SHOULDER];
const auto& right_shoulder = pose_landmarks[mediapipe::PoseLandmark::RIGHT_SHOULDER];
const auto& left_elbow = pose_landmarks[mediapipe::PoseLandmark::LEFT_ELBOW];
const auto& right_elbow = pose_landmarks[mediapipe::PoseLandmark::RIGHT_ELBOW];
// 获取骨骼关节的3D坐标
const auto& left_shoulder_x = left_shoulder.x();
const auto& left_shoulder_y = left_shoulder.y();
const auto& left_shoulder_z = left_shoulder.z();
const auto& right_shoulder_x = right_shoulder.x();
const auto& right_shoulder_y = right_shoulder.y();
const auto& right_shoulder_z = right_shoulder.z();
const auto& left_elbow_x = left_elbow.x();
const auto& left_elbow_y = left_elbow.y();
const auto& left_elbow_z = left_elbow.z();
const auto& right_elbow_x = right_elbow.x();
const auto& right_elbow_y = right_elbow.y();
const auto& right_elbow_z = right_elbow.z();
// TODO: 处理骨骼关节的3D坐标
return mediapipe::OkStatus();
}
mediapipe::Status RunPoseDetection(cv::Mat input_frame) {
// 创建MediaPipe图形
mediapipe::CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<mediapipe::CalculatorGraphConfig>(
R"(
input_stream: "input_video"
output_stream: "pose_landmarks"
node {
calculator: "PoseDetectionCalculator"
input_stream: "IMAGE:input_video"
output_stream: "POSE_LANDMARKS:pose_landmarks"
options: {
[mediapipe.PoseDetectionCalculatorOptions.ext] {
upper_body_only: false
smooth_landmarks: true
min_detection_confidence: 0.5
min_tracking_confidence: 0.5
}
}
}
)");
mediapipe::CalculatorGraph graph;
MP_RETURN_IF_ERROR(graph.Initialize(config));
// 获取输入流和输出流标签
const auto& input_stream = graph.GetInputStream("input_video").ValueOrDie();
const auto& output_stream = graph.GetOutputStream("pose_landmarks").ValueOrDie();
// 将OpenCV Mat转换为MediaPipe图像帧
auto input_frame_packet =
mediapipe::Adopt(new mediapipe::ImageFrame(
mediapipe::ImageFormat::SRGB, input_frame.cols, input_frame.rows,
mediapipe::ImageFrame::kDefaultAlignmentBoundary));
cv::Mat input_frame_mat = mediapipe::formats::MatView(input_frame_packet.get());
input_frame.copyTo(input_frame_mat);
// 将视频帧发送到输入流
MP_RETURN_IF_ERROR(input_stream.AddPacket(
mediapipe::Adopt(input_frame_packet).At(mediapipe::Timestamp(0))));
// 启动图形并等待姿势检测完成
MP_RETURN_IF_ERROR(graph.StartRun({}));
bool exit = false;
while (!exit) {
mediapipe::Packet packet;
if (!output_stream.Next(&packet)) {
break;
}
exit = (packet.Timestamp() == mediapipe::Timestamp::PostStream());
if (!exit) {
// 处理输出包中的姿势数据
MP_RETURN_IF_ERROR(ProcessOutput({{kLandmarksTag, packet}}));
}
}
// 关闭图形
MP_RETURN_IF_ERROR(graph.CloseAllPacketSources());
return mediapipe::OkStatus();
}
} // namespace mediapipe
```
在这个例子中,我们使用Initialize()方法创建一个MediaPipe计算图,并将PoseDetectionCalculator节点添加到计算图中。然后,我们创建了一个输入流和一个输出流,并将一个OpenCV Mat转换为MediaPipe图像帧,然后将它发送到输入流。我们使用StartRun()方法启动计算图,并使用Next()方法从输出流中获取关键点列表。最后,我们处理输出包中的姿势数据,并使用CloseAllPacketSources()方法关闭计算图。
阅读全文