fanfuhan OpenCV 教學097 ~ opencv-097-基於描述子比對/匹配的已知對象定位/標記

fanfuhan OpenCV 教學097 ~ opencv-097-基於描述子比對/匹配的已知對象定位/標記

fanfuhan OpenCV 教學097 ~ opencv-097-基於描述子比對/匹配的已知對象定位/標記

資料來源: https://fanfuhan.github.io/

https://fanfuhan.github.io/2019/05/10/opencv-097/

GITHUB:https://github.com/jash-git/fanfuhan_ML_OpenCV


圖像特徵點檢測,描述子生成以後,就可以通過OpenCV提供的描述子匹配算法,得到描述子直接的距離,距離越小的說明是匹配越好的,設置一個距離閾值,通常是最大匹配距離的1/5〜1/4左右作為閾值,得到所有小於閾值的匹配點,作為輸入,通過單應性矩陣,獲得這兩個點所在平面的變換關係H,根據H使用透視變換就可以根據輸入的對像圖像獲得場景圖像中對象位置,最終放置位置即可。


C++

#include <opencv2/opencv.hpp>
#include <iostream>
#include <math.h>
#define RATIO    0.4
using namespace cv;
using namespace std;

int main(int argc, char** argv) {
	Mat box = imread("D:/images/box.png");
	Mat scene = imread("D:/images/box_in_scene.png");
	if (scene.empty()) {
		printf("could not load image...\n");
		return -1;
	}
	imshow("input image", scene);
	vector<KeyPoint> keypoints_obj, keypoints_sence;
	Mat descriptors_box, descriptors_sence;
	Ptr<ORB> detector = ORB::create();
	detector->detectAndCompute(scene, Mat(), keypoints_sence, descriptors_sence);
	detector->detectAndCompute(box, Mat(), keypoints_obj, descriptors_box);

	vector<DMatch> matches;
	// 初始化flann匹配
	// Ptr<FlannBasedMatcher> matcher = FlannBasedMatcher::create(); // default is bad, using local sensitive hash(LSH)
	Ptr<DescriptorMatcher> matcher = makePtr<FlannBasedMatcher>(makePtr<flann::LshIndexParams>(12, 20, 2));
	matcher->match(descriptors_box, descriptors_sence, matches);

	// 发现匹配
	vector<DMatch> goodMatches;
	printf("total match points : %d\n", matches.size());
	float maxdist = 0;
	for (unsigned int i = 0; i < matches.size(); ++i) {
		printf("dist : %.2f \n", matches[i].distance);
		maxdist = max(maxdist, matches[i].distance);
	}
	for (unsigned int i = 0; i < matches.size(); ++i) {
		if (matches[i].distance < maxdist*RATIO)
			goodMatches.push_back(matches[i]);
	}

	Mat dst;
	drawMatches(box, keypoints_obj, scene, keypoints_sence, goodMatches, dst);
	imshow("output", dst);

	//-- Localize the object
	std::vector<Point2f> obj_pts;
	std::vector<Point2f> scene_pts;
	for (size_t i = 0; i < goodMatches.size(); i++)
	{
		//-- Get the keypoints from the good matches
		obj_pts.push_back(keypoints_obj[goodMatches[i].queryIdx].pt);
		scene_pts.push_back(keypoints_sence[goodMatches[i].trainIdx].pt);
	}
	Mat H = findHomography(obj_pts, scene_pts, RHO);
	// 无法配准
	// Mat H = findHomography(obj, scene, RANSAC);

	//-- Get the corners from the image_1 ( the object to be "detected" )
	std::vector<Point2f> obj_corners(4);
	obj_corners[0] = Point(0, 0); obj_corners[1] = Point(box.cols, 0);
	obj_corners[2] = Point(box.cols, box.rows); obj_corners[3] = Point(0, box.rows);
	std::vector<Point2f> scene_corners(4);
	perspectiveTransform(obj_corners, scene_corners, H);

	//-- Draw lines between the corners (the mapped object in the scene - image_2 )
	line(dst, scene_corners[0] + Point2f(box.cols, 0), scene_corners[1] + Point2f(box.cols, 0), Scalar(0, 255, 0), 4);
	line(dst, scene_corners[1] + Point2f(box.cols, 0), scene_corners[2] + Point2f(box.cols, 0), Scalar(0, 255, 0), 4);
	line(dst, scene_corners[2] + Point2f(box.cols, 0), scene_corners[3] + Point2f(box.cols, 0), Scalar(0, 255, 0), 4);
	line(dst, scene_corners[3] + Point2f(box.cols, 0), scene_corners[0] + Point2f(box.cols, 0), Scalar(0, 255, 0), 4);

	//-- Show detected matches
	imshow("Good Matches & Object detection", dst);
	imwrite("D:/result.png", dst);
	waitKey(0);

	waitKey(0);
	return 0;
}


Python

"""
基于描述子匹配的已知对象定位
"""

import cv2 as cv
import numpy as np

box = cv.imread("images/box.png")
box_in_scene = cv.imread("images/box_in_scene.png")
cv.imshow("box", box)
cv.imshow("box_in_scene", box_in_scene)

# 创建ORB特征检测器
orb = cv.ORB_create()

# 得到特征关键点和描述子
kp1, des1 = orb.detectAndCompute(box, None)
kp2, des2 = orb.detectAndCompute(box_in_scene, None)

# 暴力匹配
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
matchers = bf.match(des1, des2)

# 发现匹配
maxdist = 0
goodMatches = []
for m in matchers:
    maxdist = max(maxdist, m.distance)
for m in matchers:
    if m.distance < 0.4 * maxdist:
        goodMatches.append(m)


# 找到本地化对象
obj_pts = np.float32([kp1[m.queryIdx].pt for m in goodMatches]).reshape(-1, 1, 2)
scene_pts = np.float32([kp2[m.trainIdx].pt for m in goodMatches]).reshape(-1, 1, 2)

# findHomography 函数是计算变换矩阵
# 参数cv.RANSAC / cv.RHO是使用RANSAC算法寻找一个最佳单应性矩阵H,即返回值M
# 返回值:M 为变换矩阵,mask是掩模
M, mask = cv.findHomography(obj_pts, scene_pts, cv.RANSAC)

# 获取box的图像尺寸
h, w, _ = box.shape
# obj_corners是图像box的四个顶点
obj_corners = np.float32([[0, 0], [w, 0], [w, h], [0, h]]).reshape(-1, 1, 2)
# 计算变换后的四个顶点坐标位置,透视变换
scene_corners = cv.perspectiveTransform(obj_corners, M)

# 根据四个顶点坐标位置在img2图像画出变换后的边框
box_in_scene = cv.polylines(box_in_scene, [np.int32(scene_corners)], True, (0, 0, 255), 3, cv.LINE_AA)

# 绘制
result = cv.drawMatches(box, kp1, box_in_scene, kp2, goodMatches, None)
cv.imshow("orb-match", result)

cv.waitKey(0)
cv.destroyAllWindows()

發表迴響

你的電子郵件位址並不會被公開。 必要欄位標記為 *