[英]Keep only the good keypoints panoramic images stitching opencv
我正在研究一個關於匹配連續圖像以找到全景圖像的項目。
我發現了 SIFT 的關鍵點和使用 BFMatcher 的幾個圖像之間的匹配,但是在刪除了錯誤的匹配之后,我無法保留相應的關鍵點,甚至無法使用 cv::drawMatches 顯示匹配,因為程序崩潰了。
代碼的初始部分如下,它可以工作。
(“圖像”是包含所有圖像的向量)
cv::Mat descriptors;
std::vector<cv::KeyPoint> keypoints;
std::vector<cv::Mat> descriptors_array;
std::vector<std::vector<cv::KeyPoint>> keypoints_array, reduced_keypoints_array;
cv::Ptr<cv::Feature2D> sift = cv::xfeatures2d::SIFT::create();
for(int i=0; i<N_images; i++){
sift->detectAndCompute(image.at(i), cv::Mat(), keypoints, descriptors);
keypoints_array.push_back(keypoints);
descriptors_array.push_back(descriptors);
}
std::vector<std::vector<cv::DMatch>> matches_array,
std::vector<cv::DMatch> matches, good_matches;
cv::Ptr<cv::BFMatcher> matcher = cv::BFMatcher::create(cv::NORM_L2, true);
for(int i=0; i<N_images-1; i++){
matcher->match(descriptors_array.at(i), descriptors_array.at(i+1), matches, cv::Mat());
for(int j=0; j<matches.size(); j++){
if (min_distance > matches.at(j).distance){
min_distance = matches.at(j).distance;
}
}
for( int k = 0; k <descriptors_array.at(i).rows; k++) {
if( matches[k].distance < 3*min_distance) {
good_matches.push_back(matches[k]);
}
}
matches_array.push_back(good_matches);
}
當我只想保留與matches_array相對應的好的關鍵點時,我遇到了這段代碼的問題。
for(int i=0; i<keypoints_array.size()-1; i++){
reduced_keypoints_array.push_back(std::vector<cv::KeyPoint>());
for(int j=0; j<matches_array.at(i).size(); j++){
reduced_keypoints_array.at(i).push_back(cv::KeyPoint());
reduced_keypoints_array.at(i).at(j) = keypoints_array.at(i).at(matches_array.at(i).at(j).queryIdx);
}
}
在這里我想顯示匹配,但它也崩潰了,因為我的匹配超過了關鍵點,因為我無法減少關鍵點的數量。
cv::Mat out;
for(int i=0; i<keypoints_array.size()-1; i++){
cv::drawMatches(image.at(i), keypoints_array.at(i), image.at(i+1), keypoints_array.at(i+1),matches_array.at(i), out2, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector< char >(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
cv::imshow("matches", out);
cv::waitKey(0);
}
我怎樣才能只保留相應的關鍵點? 或者只是在不刪除關鍵點的情況下繪制比賽?
我編輯了一些我將在代碼中標記的部分。 我不推薦您使用的數據結構,因為它們很難閱讀。 當您有多個向量向量時,請考慮創建typedefs
或structs
。 我使用 ORB,因為我目前沒有安裝 SIFT。 這是三個圖像的示例:
int main(int argc, char** argv)
{
// Reading my images and insert them into a vector
std::vector<cv::Mat> image;
cv::Mat img1 = cv::imread("1.png", cv::IMREAD_GRAYSCALE);
cv::Mat img2 = cv::imread("2.png", cv::IMREAD_GRAYSCALE);
cv::Mat img3 = cv::imread("3.png", cv::IMREAD_GRAYSCALE);
image.push_back(img1);
image.push_back(img2);
image.push_back(img3);
int N_images = (int)image.size();
cv::Mat descriptors;
std::vector<cv::KeyPoint> keypoints;
std::vector<cv::Mat> descriptors_array;
std::vector<std::vector<cv::KeyPoint>> keypoints_array, reduced_keypoints_array;
// Here I used ORB
cv::Ptr<cv::ORB> orb = cv::ORB::create();
for (int i = 0; i < N_images; i++) {
orb->detectAndCompute(image.at(i), cv::Mat(), keypoints, descriptors);
keypoints_array.push_back(keypoints);
descriptors_array.push_back(descriptors);
}
std::vector<std::vector<cv::DMatch>> matches_array;
std::vector<cv::DMatch> matches, good_matches;
cv::Ptr<cv::BFMatcher> matcher = cv::BFMatcher::create(cv::NORM_L2, true);
// I created a vector of pairs of keypoints to push them into an array similar to the good matches
std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>> good_keypoint_pairs_array;
std::vector<std::vector<std::pair<cv::KeyPoint, cv::KeyPoint>>> keypoint_pairs_array;
float min_distance = 1000;
for (int i = 0; i < N_images-1 ; i++) {
matcher->match(descriptors_array[i], descriptors_array.at(i + 1), matches, cv::Mat());
// I left that part out since I got always a number of 0 matches, no matter which min_distance I used
/*for (int j = 0; j < matches.size(); j++) {
if (min_distance > matches.at(j).distance) {
min_distance = matches.at(j).distance;
}
}*/
for (int k = 0; k < descriptors_array.at(i).rows; k++) {
if (matches[k].distance < 3 * min_distance) {
good_keypoint_pairs_array.push_back(std::make_pair(keypoints_array.at(i).at(k), keypoints_array.at(i + 1).at(k)));
good_matches.push_back(matches[k]);
}
}
keypoint_pairs_array.push_back(good_keypoint_pairs_array);
matches_array.push_back(good_matches);
}
cv::Mat out;
// I create my keypoint vectors to use them for the cv::drawMatches function
std::vector<cv::KeyPoint> kp_1, kp_2;
for (int i = 0; i < keypoint_pairs_array.size(); ++i) {
for (int j = 0; j < keypoint_pairs_array[i].size(); ++j) {
kp_1.push_back(keypoint_pairs_array[i][j].first);
kp_2.push_back(keypoint_pairs_array[i][j].second);
}
cv::drawMatches(image.at(i), kp_1, image.at(i + 1), kp_2, matches_array.at(i), out, cv::Scalar::all(-1), cv::Scalar::all(-1), std::vector< char >(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);
cv::imshow("matches", out);
cv::waitKey(0);
kp_1.clear();
kp_2.clear();
}
}
當我只想保留與matches_array相對應的好的關鍵點時,我遇到了這段代碼的問題。
正如您已經提到的, std::vector<cv::DMatch>
的大小始終與std::vector<cv::KeyPoint>
的大小相似非常重要,因此您必須將關鍵點保存在如圖所示,您使用匹配數量的相同循環。
聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.