簡體   English   中英

如何在Android中使用Open CV在圖片中找到已知對象

[英]how to find a known object in a picture using open cv in android

我想制作一個類似boolean findobject(Mat ref_img,Mat object_img)的函數boolean findobject(Mat ref_img,Mat object_img)如果ref_img包含object_img,則返回true,否則返回false。 我正在使用SurfFeatureDetector在ref_img中找到對象,但是我沒有得到應該使用哪個參數來得出結論的信息。 提前致謝。 我使用以下代碼,盡管它具有字符串返回值,但我想將其更改為bool。

String detect(Mat ref_img, Mat& output)
{
 //cvtColor(img, gray, CV_RGBA2GRAY); // Assuming RGBA input

//previous code
    /*
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std :: vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( ref_img, keypoints_1 );

//-- Draw keypoints
Mat img_keypoints_1;
drawKeypoints( ref_img, keypoints_1, img_keypoints_1, Scalar :: all( - 1), DrawMatchesFlags :: DEFAULT );
output = img_keypoints_1;
*/
String result="";
try{
int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector<KeyPoint> keypoints_object, keypoints_scene;

detector.detect( output, keypoints_object );
detector.detect( ref_img, keypoints_scene );

//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( output, keypoints_object, descriptors_object );
extractor.compute( ref_img, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_object.rows; i++ )
{ double dist = matches[i].distance;
  if( dist < min_dist ) min_dist = dist;
  if( dist > max_dist ) max_dist = dist;
}

ostringstream os;
//os << max_dist;
//String s=os.str();
//result.append("max dist:");
//result.append(s);
//LOGI(s);//os.str());
//os<<min_dist;
//result.append("  min dist:");
//result.append(os.str());
//LOGI(os.str());
//printf("-- Max dist : %f \n", max_dist );
//printf("-- Min dist : %f \n", min_dist );

//-- Draw only "good" matches (i.e. whose distance is less than 3*min_dist )
std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
{ if( matches[i].distance < 2*min_dist )
   { good_matches.push_back( matches[i]); }
}

Mat img_matches;
drawMatches( output, keypoints_object, ref_img, keypoints_scene,
             good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
             vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

//-- Localize the object
std::vector<Point2f> obj;
std::vector<Point2f> scene;

os << good_matches.size();
result.append("no of good match:");
result.append(os.str());

for( int i = 0; i < good_matches.size(); i++ )
{
  //-- Get the keypoints from the good matches
  obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
  scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
}

Mat H = findHomography( obj, scene, CV_RANSAC );

//-- Get the corners from the image_1 ( the object to be "detected" )
std::vector<Point2f> obj_corners(4);
obj_corners[0] = cvPoint(0,0); obj_corners[1] = cvPoint( output.cols, 0 );
obj_corners[2] = cvPoint( output.cols, output.rows ); obj_corners[3] = cvPoint( 0, output.rows );
std::vector<Point2f> scene_corners(4);

perspectiveTransform( obj_corners, scene_corners, H);

//-- Draw lines between the corners (the mapped object in the scene - image_2 )
line( img_matches, scene_corners[0] + Point2f(output.cols, 0), scene_corners[1] + Point2f( output.cols, 0), Scalar(0, 255, 0), 4 );
line( img_matches, scene_corners[1] + Point2f( output.cols, 0), scene_corners[2] + Point2f( output.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[2] + Point2f( output.cols, 0), scene_corners[3] + Point2f( output.cols, 0), Scalar( 0, 255, 0), 4 );
line( img_matches, scene_corners[3] + Point2f( output.cols, 0), scene_corners[0] + Point2f( output.cols, 0), Scalar( 0, 255, 0), 4 );

//-- Show detected matches
//imshow( "Good Matches & Object detection", img_matches );

output = img_matches;
}
catch(Exception& e)
{
    result.append("exception occured");
}
return result;``
}

我認為您的函數將執行以下操作:它匹配參考圖像/對象ref_img與另一個圖像( output )之間的特征點,然后計算這兩個圖像之間的2D單應性。 然后,它將參考圖像的矩形邊界映射到另一幅圖像並繪制它們。

您想要確定圖像中是否包含查詢的對象。 我不確定,但我想您可以根據發現的機器數量或它們的距離來決定。

暫無
暫無

聲明:本站的技術帖子網頁,遵循CC BY-SA 4.0協議,如果您需要轉載,請注明本站網址或者原文地址。任何問題請咨詢:yoyou2525@163.com.

 
粵ICP備18138465號  © 2020-2024 STACKOOM.COM