1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
| import cv2
import numpy as np
import matplotlib.pyplot as plt
MIN_MATCH_COUNT = 10
# Read Images
img1 = cv2.imread('D:\python\CV\dataset\images\IMG_20250428_085110.jpg', 1)
img1 = cv2.resize(img1, dsize=None, fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR) # 图像过大就缩小了再处理
img2 = cv2.imread('D:\python\CV\dataset\images\IMG_20250428_085120.jpg', 1)
img2 = cv2.resize(img2, dsize=None, fx=0.6, fy=0.6, interpolation=cv2.INTER_LINEAR)
# Create SIFT Object Initiate SIFT detector
sift = cv2.SIFT_create()
# Find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1, None)
kp2, des2 = sift.detectAndCompute(img2, None)
# 输出检测到特征点的数量
print(f"Image 1 detected keypoints: {len(kp1)}")
print(f"Image 2 detected keypoints: {len(kp2)}")
# FLANN 匹配器
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1, des2, k=2)
# 输出匹配数目,应与size(des1)*k一致
print(f"Total FLANN matches: {len(matches)} (Expected ~{des1.shape[0] * 2})")
matchesMask = [[0,0] for i in range(len(matches))]
# ratio test as per Lowe's paper
valid_matches_count = 0
for i, (m, n) in enumerate(matches):
if m.distance < 0.7 * n.distance:
matchesMask[i] = [1, 0]
valid_matches_count += 1
# 输出有效匹配的数目
print(f"Valid FLANN matches after ratio test: {valid_matches_count}")
# Draw
draw_params = dict(
matchColor=(0, 255, 0), # 绿色线条表示匹配点对
singlePointColor=(255, 0, 0), # 红色表示未匹配的点
matchesMask=matchesMask,
flags=cv2.DrawMatchesFlags_DEFAULT
)
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, matches, None, **draw_params)
plt.imshow(img3)
plt.show()
# BFMatcher 匹配器使用
bf = cv2.BFMatcher()
matches_bf = bf.knnMatch(des1, des2, k=2)
# 输出匹配数目
print(f"Total BF matches: {len(matches_bf)}")
# Apply ratio test
good = []
for m, n in matches_bf:
if m.distance < 0.75 * n.distance:
good.append([m])
# 输出好的点数目
print(f"Good BF matches after ratio test: {len(good)}")
img3 = cv2.drawMatchesKnn(img1, kp1, img2, kp2, good, None,
flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv2.imwrite(r'./result/bfmatcher.jpg', img3)
plt.imshow(img3)
plt.show()
# Compute Homography based on FLANN good matches
good = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good.append(m)
if len(good) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good]).reshape(-1,1,2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
# findHomography之后点数改变,输出RANSAC筛选的点数
inliers = mask.ravel().tolist()
print(f"Number of inliers after RANSAC: {np.sum(mask)}")
matchesMask = inliers
h, w = img1.shape[:2] # 注意要取前两个维度 (高,宽)
pts = np.float32([[0,0],[0,h-1],[w-1,h-1],[w-1,0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts, M)
img2 = cv2.polylines(img2, [np.int32(dst)], True, 255, 3, cv2.LINE_AA)
else:
print(f"Not enough matches are found - {len(good)}/{MIN_MATCH_COUNT}")
matchesMask = None
draw_params = dict(
matchColor=(0, 255, 0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2
)
img3 = cv2.drawMatches(img1, kp1, img2, kp2, good, None, **draw_params)
plt.imshow(img3, 'gray')
plt.show()
#比率测试
goods = []
for m, n in matches:
if m.distance < 0.75 * n.distance:
goods.append(m)
src_pts = np.float32([kp1[m.queryIdx].pt for m in goods]).reshape(-1, 1, 2)
dst_pts = np.float32([kp2[m.trainIdx].pt for m in goods]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
im_src = cv2.imread("D:\python\CV\dataset\images\IMG_20250428_085110.jpg")
im_dst = cv2.imread("D:\python\CV\dataset\images\IMG_20250428_085120.jpg")
im_out = cv2.warpPerspective(im_src, M, (im_dst.shape[1], im_dst.shape[0]))
plot_image = np.concatenate((im_out, im_dst), axis = 1)
plt.imshow(plot_image)
plt.show()
|