-
Notifications
You must be signed in to change notification settings - Fork 21
/
Copy pathstitch.py
200 lines (172 loc) · 7.46 KB
/
stitch.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
import cv2
import numpy as np
import features
def blendingMask(height, width, barrier, smoothing_window, left_biased=True):
assert barrier < width
mask = np.zeros((height, width))
offset = int(smoothing_window / 2)
try:
if left_biased:
mask[:, barrier - offset : barrier + offset + 1] = np.tile(
np.linspace(1, 0, 2 * offset + 1).T, (height, 1)
)
mask[:, : barrier - offset] = 1
else:
mask[:, barrier - offset : barrier + offset + 1] = np.tile(
np.linspace(0, 1, 2 * offset + 1).T, (height, 1)
)
mask[:, barrier + offset :] = 1
except BaseException:
if left_biased:
mask[:, barrier - offset : barrier + offset + 1] = np.tile(
np.linspace(1, 0, 2 * offset).T, (height, 1)
)
mask[:, : barrier - offset] = 1
else:
mask[:, barrier - offset : barrier + offset + 1] = np.tile(
np.linspace(0, 1, 2 * offset).T, (height, 1)
)
mask[:, barrier + offset :] = 1
return cv2.merge([mask, mask, mask])
def panoramaBlending(dst_img_rz, src_img_warped, width_dst, side, showstep=False):
"""Given two aligned images @dst_img and @src_img_warped, and the @width_dst is width of dst_img
before resize, that indicates where there is the discontinuity between the images,
this function produce a smoothed transient in the overlapping.
@smoothing_window is a parameter that determines the width of the transient
left_biased is a flag that determines whether it is masked the left image,
or the right one"""
h, w, _ = dst_img_rz.shape
smoothing_window = int(width_dst / 8)
barrier = width_dst - int(smoothing_window / 2)
mask1 = blendingMask(
h, w, barrier, smoothing_window=smoothing_window, left_biased=True
)
mask2 = blendingMask(
h, w, barrier, smoothing_window=smoothing_window, left_biased=False
)
if showstep:
nonblend = src_img_warped + dst_img_rz
else:
nonblend = None
leftside = None
rightside = None
if side == "left":
dst_img_rz = cv2.flip(dst_img_rz, 1)
src_img_warped = cv2.flip(src_img_warped, 1)
dst_img_rz = dst_img_rz * mask1
src_img_warped = src_img_warped * mask2
pano = src_img_warped + dst_img_rz
pano = cv2.flip(pano, 1)
if showstep:
leftside = cv2.flip(src_img_warped, 1)
rightside = cv2.flip(dst_img_rz, 1)
else:
dst_img_rz = dst_img_rz * mask1
src_img_warped = src_img_warped * mask2
pano = src_img_warped + dst_img_rz
if showstep:
leftside = dst_img_rz
rightside = src_img_warped
return pano, nonblend, leftside, rightside
def warpTwoImages(src_img, dst_img, showstep=False):
# generate Homography matrix
H, _ = features.generateHomography(src_img, dst_img)
# get height and width of two images
height_src, width_src = src_img.shape[:2]
height_dst, width_dst = dst_img.shape[:2]
# extract conners of two images: top-left, bottom-left, bottom-right, top-right
pts1 = np.float32(
[[0, 0], [0, height_src], [width_src, height_src], [width_src, 0]]
).reshape(-1, 1, 2)
pts2 = np.float32(
[[0, 0], [0, height_dst], [width_dst, height_dst], [width_dst, 0]]
).reshape(-1, 1, 2)
try:
# aply homography to conners of src_img
pts1_ = cv2.perspectiveTransform(pts1, H)
pts = np.concatenate((pts1_, pts2), axis=0)
# find max min of x,y coordinate
[xmin, ymin] = np.int64(pts.min(axis=0).ravel() - 0.5)
[_, ymax] = np.int64(pts.max(axis=0).ravel() + 0.5)
t = [-xmin, -ymin]
# top left point of image which apply homography matrix, which has x coordinate < 0, has side=left
# otherwise side=right
# source image is merged to the left side or right side of destination image
if pts[0][0][0] < 0:
side = "left"
width_pano = width_dst + t[0]
else:
width_pano = int(pts1_[3][0][0])
side = "right"
height_pano = ymax - ymin
# Translation
# https://stackoverflow.com/a/20355545
Ht = np.array([[1, 0, t[0]], [0, 1, t[1]], [0, 0, 1]])
src_img_warped = cv2.warpPerspective(
src_img, Ht.dot(H), (width_pano, height_pano)
)
# generating size of dst_img_rz which has the same size as src_img_warped
dst_img_rz = np.zeros((height_pano, width_pano, 3))
if side == "left":
dst_img_rz[t[1] : height_src + t[1], t[0] : width_dst + t[0]] = dst_img
else:
dst_img_rz[t[1] : height_src + t[1], :width_dst] = dst_img
# blending panorama
pano, nonblend, leftside, rightside = panoramaBlending(
dst_img_rz, src_img_warped, width_dst, side, showstep=showstep
)
# croping black region
pano = crop(pano, height_dst, pts)
return pano, nonblend, leftside, rightside
except BaseException:
raise Exception("Please try again with another image set!")
def multiStitching(list_images):
"""assume that the list_images was supplied in left-to-right order, choose middle image then
divide the array into 2 sub-arrays, left-array and right-array. Stiching middle image with each
image in 2 sub-arrays. @param list_images is The list which containing images, @param smoothing_window is
the value of smoothy side after stitched, @param output is the folder which containing stitched image
"""
n = int(len(list_images) / 2 + 0.5)
left = list_images[:n]
right = list_images[n - 1 :]
right.reverse()
while len(left) > 1:
dst_img = left.pop()
src_img = left.pop()
left_pano, _, _, _ = warpTwoImages(src_img, dst_img)
left_pano = left_pano.astype("uint8")
left.append(left_pano)
while len(right) > 1:
dst_img = right.pop()
src_img = right.pop()
right_pano, _, _, _ = warpTwoImages(src_img, dst_img)
right_pano = right_pano.astype("uint8")
right.append(right_pano)
# if width_right_pano > width_left_pano, Select right_pano as destination. Otherwise is left_pano
if right_pano.shape[1] >= left_pano.shape[1]:
fullpano, _, _, _ = warpTwoImages(left_pano, right_pano)
else:
fullpano, _, _, _ = warpTwoImages(right_pano, left_pano)
return fullpano
def crop(panorama, h_dst, conners):
"""crop panorama based on destination.
@param panorama is the panorama
@param h_dst is the hight of destination image
@param conner is the tuple which containing 4 conners of warped image and
4 conners of destination image"""
# find max min of x,y coordinate
[xmin, ymin] = np.int32(conners.min(axis=0).ravel() - 0.5)
t = [-xmin, -ymin]
conners = conners.astype(int)
# conners[0][0][0] is the X coordinate of top-left point of warped image
# If it has value<0, warp image is merged to the left side of destination image
# otherwise is merged to the right side of destination image
if conners[0][0][0] < 0:
n = abs(-conners[1][0][0] + conners[0][0][0])
panorama = panorama[t[1] : h_dst + t[1], n:, :]
else:
if conners[2][0][0] < conners[3][0][0]:
panorama = panorama[t[1] : h_dst + t[1], 0 : conners[2][0][0], :]
else:
panorama = panorama[t[1] : h_dst + t[1], 0 : conners[3][0][0], :]
return panorama