-
Notifications
You must be signed in to change notification settings - Fork 5
/
Copy pathfeatures.py
62 lines (51 loc) · 2.4 KB
/
features.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
import traceback
import cv2
import numpy as np
def createDetector():
detector = cv2.ORB_create(nfeatures=2000)
return detector
def getFeatures(img):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = createDetector()
kps, descs = detector.detectAndCompute(gray, None)
return kps, descs, img.shape[:2][::-1]
def detectFeatures(img, train_features):
train_kps, train_descs, shape = train_features
# get features from input image
kps, descs, _ = getFeatures(img)
# check if keypoints are extracted
if not kps:
return None
# now we need to find matching keypoints in two sets of descriptors (from sample image, and from current image)
# knnMatch uses k-nearest neighbors algorithm for that
bf = cv2.BFMatcher(cv2.NORM_HAMMING)
matches = bf.knnMatch(train_descs, descs, k=2)
good = []
# apply ratio test to matches of each keypoint
# idea is if train KP have a matching KP on image, it will be much closer than next closest non-matching KP,
# otherwise, all KPs will be almost equally far
try:
for m, n in matches:
if m.distance < 0.8 * n.distance:
good.append([m])
# stop if we didn't find enough matching keypoints
if len(good) < 0.1 * len(train_kps):
return None
# estimate a transformation matrix which maps keypoints from train image coordinates to sample image
src_pts = np.float32([train_kps[m[0].queryIdx].pt for m in good
]).reshape(-1, 1, 2)
dst_pts = np.float32([kps[m[0].trainIdx].pt for m in good
]).reshape(-1, 1, 2)
m, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
if m is not None:
# apply perspective transform to train image corners to get a bounding box coordinates on a sample image
scene_points = cv2.perspectiveTransform(np.float32([(0, 0), (0, shape[0] - 1),
(shape[1] - 1, shape[0] - 1),
(shape[1] - 1, 0)]).reshape(-1, 1, 2), m)
rect = cv2.minAreaRect(scene_points)
# check resulting rect ratio knowing we have almost square train image
if rect[1][1] > 0 and 0.8 < (rect[1][0] / rect[1][1]) < 1.2:
return rect
except:
pass
return None