簡體   English   中英

使用 OpenCV 的 warpAffine 進行圖像配准

[英]use warpAffine of OpenCV to do image registration

我正在嘗試使用 ORB 功能進行圖像注冊。 我在使用 warpAffine 時遇到了問題。 編譯器告知無法將參數“1”從 cv::Mat * 轉換為 cv::InputArray。 這是我的代碼:

#pragma once

// Standard C++ I/O library.
#include <iostream>
#include <string>
#include <iomanip>
#include <vector>


// OpenCV library.
#include <cv.h>
#include <highgui.h>

// OpenCV feature library.
#include <opencv2/opencv.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <nonfree/features2d.hpp>




// main().
int main(int argv, char ** argc)
{
    cv::Mat im_ref, im_cmp;

    std::string  str_ref, str_cmp;

    // Read reference image.
    //std::cout<<"Input reference image filename: ";
    //std::cin>>str_ref;
    std::cout<<"-> Reading images."<<std::endl;
    str_ref = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\206.png";

    im_ref = cv::imread(str_ref);
    cv::imshow("Reference image", im_ref);

    // Read testing image.
    //std::cout<<"Input testing image filename: ";
    //std::cin>>str_cmp;
    str_cmp = "F:\\CPPs\\ImageRegistration\\OpenCVTest\\227.png";

    im_cmp = cv::imread(str_cmp);
    cv::imshow("Testing image", im_cmp);

    std::cout<<"Press any key to continue."<<std::endl;
    cvWaitKey(0);



    // Feature detection.
    std::cout<<"-> Feature detection."<<std::endl;
    std::vector <cv::KeyPoint> key_ref, key_cmp;           // Vectors for features extracted from reference and testing images.
    cv::Mat  des_ref, des_cmp;                             // Descriptors for features of 2 images.

    cv::ORB orb1;                                          // An ORB object.

    orb1(im_ref, cv::Mat(), key_ref, des_ref);             // Feature extraction.
    orb1(im_cmp, cv::Mat(), key_cmp, des_cmp);  


    // Show keypoints.
    std::cout<<"-> Show keypoints."<<std::endl;
    cv::Mat drawkey_ref, drawkey_cmp;                              // Output image for keypoint drawing.
    cv::drawKeypoints(im_ref, key_ref, drawkey_ref);               // Generate image for keypoint drawing.
    cv::imshow("Keypoints of reference", drawkey_ref);
    cv::drawKeypoints(im_cmp, key_cmp, drawkey_cmp);
    cv::imshow("Keypoints of test", drawkey_cmp);

    cvWaitKey(0);


    // Matching.
    std::cout<<"-> Matching."<<std::endl;
    cv::FlannBasedMatcher matcher1(new cv::flann::LshIndexParams(20,10,2));
    std::vector<cv::DMatch> matches1;
    matcher1.match(des_ref, des_cmp, matches1);            // Match two sets of features.

    double max_dist = 0; 
    double min_dist = 100;

    // Find out the minimum and maximum of all distance.
    for( int i = 0; i < des_ref.rows; i++ )
    { 
        double dist = matches1[i].distance;
        if( dist < min_dist ) min_dist = dist;
        if( dist > max_dist ) max_dist = dist;
    }

    cvWaitKey(0);


    // Eliminate relatively bad points.
    std::cout<<"-> Bad points elimination"<<std::endl;
    std::vector<cv::KeyPoint> kgood_ref, kgood_cmp;
    std::vector<cv::DMatch> goodMatch;
    for (int i=0; i<matches1.size(); i++)
    {
        if(matches1[i].distance < 2*min_dist)      // Keep points that are less than 2 times of the minimum distance.
        {
            goodMatch.push_back(matches1[i]);
            kgood_ref.push_back(key_ref[i]);
            kgood_cmp.push_back(key_cmp[i]);
        }  // end if
    } // end for
    cvWaitKey(0);


    // Calculate affine transform matrix.
    std::cout<<"-> Calculating affine transformation."<<std::endl;
    std::vector<cv::Point2f>   frm1_feature, frm2_feature;
    const int p_size = goodMatch.size();
    // * tmpP = new tmpPoint[p_size];
    cv::Point2f tmpP;


    for(int i=0; i<goodMatch.size(); i++)
    {
        tmpP.x = kgood_ref[i].pt.x;
        tmpP.y = kgood_ref[i].pt.y;
        frm1_feature.push_back(tmpP);

        tmpP.x = kgood_cmp[i].pt.x;
        tmpP.y = kgood_cmp[i].pt.y;
        frm2_feature.push_back(tmpP);
    }
    cv::Mat  affine_mat = cv::estimateRigidTransform(frm1_feature, frm2_feature, true);
    cv::Mat im_transformed;

    // Output results.
    cv::warpAffine(&im_cmp, &im_transformed, affine_mat, CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); // error comes from here.
    cv::imshow("Transformed image", im_transformed);

    cvWaitKey(0);

    return 0;
}

在使用 Evgeniy 給出的答案之前,我已經得到了結果。 我使用的轉換是

//cv::warpAffine( im_cmp, im_transformed, affine_mat, cv::Size(im_cmp.cols, im_cmp.rows) );

轉換的結果很奇怪在此處輸入圖片說明

我想要做的是最終獲得參考圖像和這個轉換圖像的合並圖像。 這實際上是我的第一步。 這是使用warpAffine() 的轉換參數的問題嗎?

最后,我想在這里得到一個像例子一樣的結果(在不同位置拍攝的兩個圖像,它們最終對齊) 在此處輸入圖片說明

您正在提供一個指針,但 wrapAffine 接受對 cv::Mat 的引用。 您可以像這樣更改代碼:

cv::warpAffine(im_cmp, im_transformed, affine_mat, cv::Size(), CV_INTER_LINEAR|CV_WARP_FILL_OUTLIERS); 

只需刪除'&'

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM