-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathversion1.py
116 lines (99 loc) · 4.01 KB
/
version1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
'''
SOURCES:
https://www.geeksforgeeks.org/image-stitching-with-opencv/
https://youtube.com/watch?v=v9JARVu74CI&t=14s
https://github.com/OpenStitching/stitching
https://dronemapper.com/sample_data/
https://docs.opencv.org/4.x/d3/da1/classcv_1_1BFMatcher.html#a02ef4d594b33d091767cbfe442aefb8a
'''
import cv2
import numpy as np
import os
'''
@params
folder: must be a folder name
debug: 0 = no debug; 1 = light debug; 2 = detailed debug
'''
def stitch(folder, debug = 0, range = None, panorama_name = 'panorama', type = '.jpg'):
print("\n----------------------\n")
print("Run Start")
images = []
if debug > 0: print("Scanning Images in folder:",folder)
factor_of_descale = 1
counter = 0
for filename in os.listdir(folder):
if range == None or (counter >= range[0] and counter < range[1]):
#Create full file path
f = os.path.join(folder, filename)
# Skipping if it is a file
if not os.path.isfile(f):
continue
#Save file in images
currimg = cv2.imread(f)
#currimg = cv2.resize(currimg, (int(currimg.shape[1] / factor_of_descale), int(currimg.shape[0] / factor_of_descale)), interpolation=cv2.INTER_LINEAR)
#current_image = current_image.astype(np.byte)
images.append(currimg)
if debug == 2: print(f, "Loaded")
counter += 1
if debug > 0: print("Done Scanning")
'''
#Finds features and adds details related
#Uses SIFT algorithm (Scale-Invariant Feature Transform)
#Also could try cv2.ORB_create()
sift = cv2.SIFT_create()
if debug == 2: print("Sift class created")
#Storing keypoints and descriptors here
keypoints = []
descriptors = []
if debug > 0: print("Loading keypoints and descriptors. Please wait...")
imagecounter = 0
for image in images:
#Magic openCV detection
#NOTE look into why the tutorial recommends None as the second param
key, desc = sift.detectAndCompute(image, None)
keypoints.append(key)
descriptors.append(desc)
if debug == 2: print("Stored Keypoints and Descriptors for Image: ", imagecounter)
imagecounter += 1
if debug > 0: print("Keypoint and Descriptors loaded")
'''
#whether to put create or not is debated between sites
#L1 or L2 is good with SIFT
#HAMMING is good with ORB or BRISK (i.e. NORM_HAMMING)
'''
If it is false, this is will be default BFMatcher behaviour
when it finds the k nearest neighbors for each query descriptor.
If crossCheck==true, then the knnMatch() method with k=1 will
only return pairs (i,j) such that for i-th query descriptor the
j-th descriptor in the matcher's collection is the nearest and
vice versa, i.e. the BFMatcher will only return consistent pairs.
Such technique usually produces best results with minimal number
of outliers when there are enough matches. This is alternative to
the ratio test, used by D. Lowe in SIFT paper.
No I (Jasper) didn't write that. I dont got that many brain cells
left. Copied from the 5th link
'''
#if debug > 0: print("Finding matches...")
#bf = cv2.BFMatcher.create(cv2.NORM_L2, crossCheck=True)
#matches = bf.match(descriptors[0],descriptors[1])
#if debug > 0: print("Matches Found")
print("Stitching...")
#Stitcher settings
cv2.ocl.setUseOpenCL(False)
stitcher = cv2.Stitcher.create(0)
cv2.Stitcher.setWaveCorrection(stitcher, False)
status, output = stitcher.stitch(images)
possibleStatus = [
"OK",
"ERR_NEED_MORE_IMGS",
"ERR_HOMOGRAPHY_EST_FAIL",
"ERR_CAMERA_PARAMS_ADJUST_FAIL"
]
if(status == 0):
#cv2.imshow('1',output)
#cv2.waitKey(0)
cv2.imwrite(panorama_name + type, output)
else:
print("Error: ", possibleStatus[status])
print("Run End")
stitch('C:\\Users\jaspe\Documents\Github_Local\mapping-tjuav\ImgSampleF', debug = 2, range = (0, 20), type = '.png', panorama_name = 'FimgsALL')