-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathransac_test.py
157 lines (106 loc) · 4.65 KB
/
ransac_test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
"""
When we have many many points pairs, the findEssentialMat and recoverPose will give us only one answer anyway,
which will definitely loose the benefit of numbers and we can do some `RANSAC` or `sliding-window` approaches.
Let's say, we have 1000 pairs of matched points, there can be ways to do it:
===========
First:
1. Calculate every 100 points and get 10 answers
2. Remove the answers with less confidence, aka, the less percentage of points passing the cheirality check (in recoverPose)
3. Get the main centroid of the remaining answers
===========
===========
Second:
1. Calculate every 100 points and get 10 answers
2. For each answer, calc the reprojection error (or the epipolar equation) on all points and filter out some answers
3. Get the main centroid of the remaining answers
===========
"""
from pose_estimation_2d2d import *
DEBUG = True
def plot_the_Rt_pairs():
pass
def split_the_matches(matches, step):
for i in range(len(matches)//step + 1):
yield matches[i*step:(i+1)*step]
def remove_less_confidence(kps1, kps2, matches, K, splitnum, threshold):
Rs = []
ts = []
confidences = []
splited_matches = split_the_matches(matches, splitnum)
for m in splited_matches:
if len(m) < 5:
print("Less than 5 points, no need")
continue
#print(len(m))
E_cv3, matches_E, _, _ = find_E_cv3(kps1, kps2, m, K)
R, t, matches_rp, matches_rp_bad, _, _ = recoverPoseFromE_cv3_with_kps(E_cv3, kps1, kps2, m, K)
conf = len(matches_rp)/len(m)
if conf >= threshold:
Rs.append(R)
ts.append(t)
confidences.append(conf)
return Rs, ts, confidences
def mean_Rt(Rs, ts):
rs = []
for R in Rs:
rs.append(rotate_angle_no_output(R))
rs = np.array(rs)
ts = np.array(ts)
assert rs[0].shape == (3, 1) and ts[0].shape == (3, 1)
#print("Rs:{}, ts:{}".format(rs, ts))
#print("rs.shape:{}, ts.shape:{}".format(rs.shape, ts.shape))
return rs.mean(axis=0), ts.mean(axis=0)
def test_remove_less_confidence(im1_file, im2_file):
K = np.array([[8607.8639, 0, 2880.72115], [0, 8605.4303, 1913.87935], [0, 0, 1]]) # Canon5DMarkIII-EF50mm
#im1_file = '1.jpg'
#im2_file = '3.jpg'
#print(im1_file, im2_file)
im1 = cv2.imread(im1_file, 0)
im2 = cv2.imread(im2_file, 0)
kps1, des1 = find_keypoints_and_description(im1)
kps2, des2 = find_keypoints_and_description(im2)
matches = find_matches_from_descriptors(des1, des2)
splitnum = 42
Rs, ts, confidences = remove_less_confidence(kps1, kps2, matches, K, splitnum, 0.9)
#test_remove_less_confidence_only_time(kps1, kps2, matches, K, splitnum, 0.6)
if DEBUG:
print("The answers num:{} of total:{}".format(len(Rs), len(matches)//splitnum))
for R, t, con in zip(Rs, ts, confidences):
print("=============")
print("Confidence:{}".format(con))
DEBUG_Rt_simple(R, t, str(con))
print("=============")
rs_mean, ts_mean = mean_Rt(Rs, ts)
print("rs_mean:{}\nts_mean:{}".format(rs_mean.T, ts_mean.T))
def test_remove_less_confidence_only_time(kps1, kps2, matches, K, splitnum, thres):
start_lsd = cv2.getTickCount()
freq = cv2.getTickFrequency()
test_num = 10
for i in range(test_num):
Rs, ts, confidences = remove_less_confidence(kps1, kps2, matches, K, splitnum, thres)
duration_ms_lsd = (cv2.getTickCount() - start_lsd) * 1000 / freq / test_num
print("Elapsed time for remove less confidence: {} ms".format(duration_ms_lsd ))
def test_remove_less_confidence_time(im1_file, im2_file):
start_lsd = cv2.getTickCount()
freq = cv2.getTickFrequency()
test_num = 10
for i in range(test_num):
test_remove_less_confidence(im1_file, im2_file)
duration_ms_lsd = (cv2.getTickCount() - start_lsd) * 1000 / freq / test_num
print("Elapsed time for remove less confidence: {} ms".format(duration_ms_lsd ))
if __name__ == "__main__":
import argparse
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-l", "--image1", required=True,
help="path to input image1")
ap.add_argument("-r", "--image2", required=True,
help="path to input image2")
args = vars(ap.parse_args())
base_dir = "H:/projects/SLAM/python_code/dataset/our/trajs2/"
im1_file = base_dir + args["image1"]
im2_file = base_dir + args["image2"]
#print("{}\n{}".format(im1_file, im2_file))
print("{}\n{}".format(args["image1"], args["image2"]))
# test_remove_less_confidence_time()
test_remove_less_confidence(im1_file, im2_file)