改进orb提取算法,首先通过图像尺寸确定构建的金字塔的层数;将特征点均摊到每层图像上,将剩余的特征点归于图像金字塔的顶层;根据图像灰度信息计算出一种自适应提取阈值,若此阈值提取的特征点数目不满足要求,则降低阈值至 T/2,直到提取的特征点数目达到要求;对每层图像进行网格划分,从图像金字塔的第一层开始遍历网格进行特征提取并对每个网格计数;然后采用四叉树算法;最后采用GMS匹配算法。给出源码和头文件
时间: 2024-02-13 10:00:24 浏览: 149
很抱歉,我之前的回答有误。以下是一个基于OpenCV的改进ORB算法的源码和头文件供您参考。
```c++
// Improved ORB feature extraction algorithm
// Based on OpenCV 3.4.10
#include <opencv2/opencv.hpp>
using namespace cv;
// Function for computing adaptive threshold
int computeAdaptiveThreshold(Mat &image, int &threshold, int &maxFeatures, int &nfeatures)
{
int nPixels = image.cols * image.rows;
int nPixelsPerFeature = nPixels / nfeatures;
int histSize = 256;
float range[] = {0, 256};
const float *ranges[] = {range};
int channels[] = {0};
Mat hist;
calcHist(&image, 1, channels, Mat(), hist, 1, &histSize, ranges, true, false);
int i, sum = 0;
for (i = 255; i >= 0 && sum < nPixelsPerFeature; i--)
{
sum += hist.at<float>(i);
}
threshold = i;
if (sum < maxFeatures)
{
return -1;
}
else
{
return 0;
}
}
// Function for extracting ORB features
void featureExtraction(Mat &image, std::vector<KeyPoint> &keypoints, Mat &descriptors, int &nfeatures)
{
// Compute pyramid level
int levels = 0;
for (int i = std::min(image.cols, image.rows); i >= 16; i >>= 1)
{
levels++;
}
// Distribute features evenly on each pyramid level
std::vector<int> nfeaturesPerLevel(levels);
float factor = 1.0f / (float)(levels);
for (int i = 0; i < levels - 1; i++)
{
nfeaturesPerLevel[i] = cvRound(nfeatures * factor);
}
nfeaturesPerLevel[levels - 1] = std::max(nfeatures - (levels - 1) * nfeaturesPerLevel[0], 0);
// Compute adaptive threshold for each pyramid level
std::vector<int> thresholdPerLevel(levels);
int threshold = 0;
int maxFeatures = nfeaturesPerLevel[0];
for (int i = 0; i < levels; i++)
{
Mat level = i == 0 ? image : cv::resize(image, Size(image.cols >> i, image.rows >> i));
int ret = computeAdaptiveThreshold(level, threshold, maxFeatures, nfeaturesPerLevel[i]);
if (ret == -1)
{
break;
}
thresholdPerLevel[i] = threshold;
}
// Extract features on each pyramid level
keypoints.clear();
descriptors.release();
for (int i = 0; i < levels; i++)
{
Mat level = i == 0 ? image : cv::resize(image, Size(image.cols >> i, image.rows >> i));
if (i == levels - 1)
{
thresholdPerLevel[i] = 0;
}
Ptr<ORB> detector = ORB::create(nfeaturesPerLevel[i], 1.2f, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, thresholdPerLevel[i]);
std::vector<KeyPoint> kps;
Mat desc;
detector->detectAndCompute(level, Mat(), kps, desc);
for (std::vector<KeyPoint>::iterator it = kps.begin(); it != kps.end(); ++it)
{
it->pt *= pow(2.0f, (float)i);
}
keypoints.insert(keypoints.end(), kps.begin(), kps.end());
descriptors.push_back(desc);
}
// Use grid to remove redundant features
int nGridCols = 6;
int nGridRows = 4;
int nGridTotal = nGridCols * nGridRows;
std::vector<std::vector<KeyPoint>> grid(nGridTotal);
int gridWidth = image.cols / nGridCols;
int gridHeight = image.rows / nGridRows;
for (int i = 0; i < keypoints.size(); i++)
{
int x = keypoints[i].pt.x / gridWidth;
int y = keypoints[i].pt.y / gridHeight;
int idx = y * nGridCols + x;
grid[idx].push_back(keypoints[i]);
}
keypoints.clear();
for (int i = 0; i < nGridTotal; i++)
{
if (grid[i].size() == 0)
{
continue;
}
if (grid[i].size() == 1)
{
keypoints.push_back(grid[i][0]);
continue;
}
std::sort(grid[i].begin(), grid[i].end(), [](const KeyPoint &kp1, const KeyPoint &kp2) {
return kp1.response > kp2.response;
});
int nKeep = std::min(2, (int)grid[i].size());
for (int j = 0; j < nKeep; j++)
{
keypoints.push_back(grid[i][j]);
}
}
// Use quadtree to remove redundant features
cv::Size size(image.cols, image.rows);
cv::Rect rect(0, 0, size.width, size.height);
cv::QuadTree qtree(rect);
for (std::vector<KeyPoint>::iterator it = keypoints.begin(); it != keypoints.end(); ++it)
{
qtree.insert(*it);
}
keypoints.clear();
for (int i = 0; i < qtree.nodes.size(); i++)
{
if (qtree.nodes[i].isLeaf && qtree.nodes[i].points.size() > 0)
{
std::sort(qtree.nodes[i].points.begin(), qtree.nodes[i].points.end(), [](const KeyPoint &kp1, const KeyPoint &kp2) {
return kp1.response > kp2.response;
});
keypoints.push_back(qtree.nodes[i].points[0]);
}
}
// Compute descriptors on final keypoints
Ptr<ORB> descriptorExtractor = ORB::create(2000, 1.2f, 8, 31, 0, 2, ORB::HARRIS_SCORE, 31, 0);
descriptorExtractor->compute(image, keypoints, descriptors);
// Use GMS matching algorithm
std::vector<DMatch> matches;
GMSMatcher matcher;
matcher.match(descriptors1, descriptors2, matches);
}
```
注意:这只是一个示例代码,具体实现可能与您的需求有所不同,需要根据具体情况进行适当调整和修改。
阅读全文