改进orb提取算法,首先通过图像尺寸确定构建的金字塔的层数;将特征点均摊到每层图像上,将剩余的特征点归于图像金字塔的顶层;根据图像灰度信息计算出一种自适应提取阈值,若此阈值提取的特征点数目不满足要求,则降低阈值至 T/2,直到提取的特征点数目达到要求;对每层图像进行网格划分,从图像金字塔的第一层开始遍历网格进行特征提取并对每个网格计数;然后采用四叉树算法;最后采用GMS匹配算法。给出源码和头文件
时间: 2024-02-13 14:07:18 浏览: 86
以下是一个简单的 C++ 实现示例,供您参考:
```c++
#include <iostream>
#include <vector>
#include <algorithm>
#include <opencv2/opencv.hpp>
#include <opencv2/features2d.hpp>
using namespace std;
using namespace cv;
// 定义网格大小
const int grid_size = 20;
// 定义特征点提取阈值
float threshold = 10.0;
// 定义期望的特征点数
int num_features = 500;
// 定义四叉树节点
struct QuadTreeNode {
// 节点包含的特征点
vector<KeyPoint> keypoints;
// 子节点
QuadTreeNode* child[4];
// 包围盒
Rect2f box;
// 构造函数
QuadTreeNode(Rect2f box) : box(box) {
for (int i = 0; i < 4; i++) {
child[i] = nullptr;
}
}
// 析构函数
~QuadTreeNode() {
for (int i = 0; i < 4; i++) {
delete child[i];
}
}
};
// 分割特征点
void distributeFeatures(vector<KeyPoint>& keypoints, int levels) {
// 统计每层特征点数量
int num_points_per_level = keypoints.size() / levels;
// 特征点均摊到每层
for (int i = 1; i <= levels; i++) {
int num_points = i * num_points_per_level;
if (i == levels) {
num_points = keypoints.size();
}
keypoints.resize(num_points);
}
}
// 计算自适应提取阈值
float computeThreshold(const Mat& image) {
// 计算灰度图像
Mat gray;
cvtColor(image, gray, COLOR_BGR2GRAY);
// 计算灰度直方图
int histSize = 256;
float range[] = { 0, 256 };
const float* histRange = { range };
bool uniform = true, accumulate = false;
Mat hist;
calcHist(&gray, 1, 0, Mat(), hist, 1, &histSize, &histRange, uniform, accumulate);
// 计算自适应阈值
int num_pixels = gray.rows * gray.cols;
int num_pixels_below_threshold = 0;
float adaptive_threshold = 0.0;
for (int i = 0; i < histSize; i++) {
num_pixels_below_threshold += hist.at<float>(i);
if (num_pixels_below_threshold >= num_pixels / 2) {
adaptive_threshold = i;
break;
}
}
return adaptive_threshold;
}
// 对每层图像进行网格划分
vector<vector<Rect>> createGrids(const Mat& image, int levels) {
vector<vector<Rect>> grids(levels);
for (int i = 0; i < levels; i++) {
int scale = pow(2, i);
int rows = ceil((float)image.rows / (grid_size * scale));
int cols = ceil((float)image.cols / (grid_size * scale));
grids[i].resize(rows * cols);
for (int r = 0; r < rows; r++) {
for (int c = 0; c < cols; c++) {
int x = c * grid_size * scale;
int y = r * grid_size * scale;
int w = min(grid_size * scale, image.cols - x);
int h = min(grid_size * scale, image.rows - y);
grids[i][r * cols + c] = Rect(x, y, w, h);
}
}
}
return grids;
}
// 对每个网格进行特征提取并对每个网格计数
vector<vector<int>> extractFeaturesInGrid(const Mat& image, const vector<Rect>& grids) {
vector<vector<int>> counts(grids.size());
Ptr<ORB> orb = ORB::create();
for (int i = 0; i < grids.size(); i++) {
vector<KeyPoint> keypoints;
orb->detect(image(grids[i]), keypoints);
for (int j = 0; j < keypoints.size(); j++) {
counts[i].push_back(0);
if (grids[i].contains(keypoints[j].pt)) {
counts[i].back() = 1;
}
}
}
return counts;
}
// 四叉树算法
void buildQuadTree(QuadTreeNode& node, const vector<KeyPoint>& keypoints) {
// 将特征点插入四叉树
for (int i = 0; i < keypoints.size(); i++) {
if (node.box.contains(keypoints[i].pt)) {
node.keypoints.push_back(keypoints[i]);
}
}
// 如果特征点数量超过阈值,则继续分割
if (node.keypoints.size() > 10) {
float x = node.box.x, y = node.box.y, w = node.box.width / 2, h = node.box.height / 2;
node.child[0] = new QuadTreeNode(Rect2f(x, y, w, h));
node.child[1] = new QuadTreeNode(Rect2f(x + w, y, w, h));
node.child[2] = new QuadTreeNode(Rect2f(x + w, y + h, w, h));
node.child[3] = new QuadTreeNode(Rect2f(x, y + h, w, h));
for (int i = 0; i < 4; i++) {
buildQuadTree(*(node.child[i]), node.keypoints);
}
}
}
// GMS匹配算法
vector<DMatch> gmsMatch(const vector<KeyPoint>& keypoints1, const vector<KeyPoint>& keypoints2, const Mat& descriptors1, const Mat& descriptors2) {
// 使用汉明距离进行特征点匹配
BFMatcher matcher(NORM_HAMMING);
vector<DMatch> matches;
matcher.match(descriptors1, descriptors2, matches);
// 过滤匹配点对
vector<Point2f> points1(matches.size()), points2(matches.size());
for (int i = 0; i < matches.size(); i++) {
points1[i] = keypoints1[matches[i].queryIdx].pt;
points2[i] = keypoints2[matches[i].trainIdx].pt;
}
vector<unsigned char> inliers(points1.size(), 0);
findFundamentalMat(points1, points2, inliers, FM_RANSAC, 1.0, 0.999);
vector<DMatch> filtered_matches;
for (int i = 0; i < matches.size(); i++) {
if (inliers[i]) {
filtered_matches.push_back(matches[i]);
}
}
return filtered_matches;
}
int main(int argc, char** argv)
{
// 读取图像
Mat image1 = imread("test1.jpg");
Mat image2 = imread("test2.jpg");
// 设定金字塔层数
int levels = 8;
// 构建金字塔
vector<vector<KeyPoint>> keypoints1(levels), keypoints2(levels);
Ptr<ORB> orb = ORB::create();
for (int i = 0; i < levels; i++) {
int scale = pow(2, i);
Mat resized1, resized2;
resize(image1, resized1, Size(image1.cols / scale, image1.rows / scale));
resize(image2, resized2, Size(image2.cols / scale, image2.rows / scale));
// 提取特征点
keypoints1[i].resize(num_features);
keypoints2[i].resize(num_features);
distributeFeatures(keypoints1[i], levels);
distributeFeatures(keypoints2[i], levels);
// 计算自适应提取阈
阅读全文