fanfuhan OpenCV 教學096 ~ opencv-096-描述子比對/匹配
fanfuhan OpenCV 教學096 ~ opencv-096-描述子比對/匹配
資料來源: https://fanfuhan.github.io/
https://fanfuhan.github.io/2019/05/10/opencv-096/
GITHUB:https://github.com/jash-git/fanfuhan_ML_OpenCV
圖像特徵檢測首先會獲取關鍵點,然後根據關鍵點周圍的ROI區域的大小,生成描述子,完整的描述子向量就表示了一張圖像的特徵,是圖像特徵數據,這種方式也被稱為圖像特徵工程,即通過先驗模型與合理計算得到圖像特徵數據的過程,有了特徵數據我們就可以利用特徵數據實現對象檢測與對象識別,這個最簡單的方法就是特徵匹配,OPenCV提供了兩種圖像特徵匹配的算法
~暴力匹配
~FLANN匹配
其中FLANN是一種高效的數值或二進制匹配算法,SIFT / SURF是基於浮點數的匹配,ORB是二值匹配,速度轉換。 HASH。這個在C ++的代碼種編寫而成的演示。暴力匹配在Python代碼種做了演示。對匹配之後的輸出結果,根據距離進行排序,就會得到距離比較的匹配點,這個才是好的特徵匹配。
C++
#include <opencv2/opencv.hpp> #include <iostream> #include <math.h> #define RATIO 0.4 using namespace cv; using namespace std; int main(int argc, char** argv) { Mat box = imread("D:/images/box.png"); Mat scene = imread("D:/images/box_in_scene.png"); if (scene.empty()) { printf("could not load image...\n"); return -1; } imshow("input image", scene); vector<KeyPoint> keypoints_obj, keypoints_sence; Mat descriptors_box, descriptors_sence; Ptr<ORB> detector = ORB::create(); detector->detectAndCompute(scene, Mat(), keypoints_sence, descriptors_sence); detector->detectAndCompute(box, Mat(), keypoints_obj, descriptors_box); vector<DMatch> matches; // ��ʼ��flannƥ�� // Ptr<FlannBasedMatcher> matcher = FlannBasedMatcher::create(); // default is bad, using local sensitive hash(LSH) Ptr<DescriptorMatcher> matcher = makePtr<FlannBasedMatcher>(makePtr<flann::LshIndexParams>(12, 20, 2)); matcher->match(descriptors_box, descriptors_sence, matches); // ����ƥ�� vector<DMatch> goodMatches; printf("total match points : %d\n", matches.size()); float maxdist = 0; for (unsigned int i = 0; i < matches.size(); ++i) { printf("dist : %.2f \n", matches[i].distance); maxdist = max(maxdist, matches[i].distance); } for (unsigned int i = 0; i < matches.size(); ++i) { if (matches[i].distance < maxdist*RATIO) goodMatches.push_back(matches[i]); } Mat dst; drawMatches(box, keypoints_obj, scene, keypoints_sence, goodMatches, dst); imshow("output", dst); waitKey(0); return 0; }
Python
""" 描述子匹配 """ import cv2 as cv box = cv.imread("images/box.png") box_in_scene = cv.imread("images/box_in_scene.png") cv.imshow("box", box) cv.imshow("box_in_scene", box_in_scene) # 创建ORB特征检测器 orb = cv.ORB_create() # 得到特征关键点和描述子 kp1, des1 = orb.detectAndCompute(box, None) kp2, des2 = orb.detectAndCompute(box_in_scene, None) # 暴力匹配 bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True) matchers = bf.match(des1, des2) # 绘制匹配 matchers = sorted(matchers, key=lambda x: x.distance) result = cv.drawMatches(box, kp1, box_in_scene, kp2, matchers[:15], None) cv.imshow("orb-match", result) cv.waitKey(0) cv.destroyAllWindows()