diff --git a/ImageFusion.py b/ImageFusion.py index 81cbf0a..7f50f48 100644 --- a/ImageFusion.py +++ b/ImageFusion.py @@ -36,7 +36,7 @@ def fuseByMinimum(self, images): (imageA, imageB) = images fuseRegion = np.minimum(imageA, imageB) return fuseRegion - + def getWeightsMatrix(self, images): ''' 获取权值矩阵 @@ -55,22 +55,21 @@ def getWeightsMatrix(self, images): compareList.append(np.count_nonzero(imageA[row // 2: row, 0: col // 2] > 0)) compareList.append(np.count_nonzero(imageA[row // 2: row, col // 2: col] > 0)) compareList.append(np.count_nonzero(imageA[0: row // 2, col // 2: col] > 0)) - self.printAndWrite(compareList) + # self.printAndWrite(" compareList:" + str(compareList)) index = compareList.index(min(compareList)) if index == 2: # 重合区域在imageA的上左部分 - self.printAndWrite("上左") - rowIndex = 0; - colIndex = 0; + # self.printAndWrite("上左") + rowIndex = 0; colIndex = 0; for j in range(1, col): for i in range(row - 1, -1, -1): - if imageA[i, col - j] != 0: + if imageA[i, col - j] != -1: rowIndex = i + 1 break if rowIndex != 0: break for i in range(col - 1, -1, -1): - if imageA[rowIndex, i] != 0: + if imageA[rowIndex, i] != -1: colIndex = i + 1 break # 赋值 @@ -84,21 +83,20 @@ def getWeightsMatrix(self, images): weightMatB_2[:, colIndex - i] = (colIndex - i) * 1 / colIndex weightMatB = weightMatB_1 * weightMatB_2 weightMatA = 1 - weightMatB - # elif leftCenter != 0 and bottomCenter != 0 and upCenter == 0 and rightCenter == 0: + #elif leftCenter != 0 and bottomCenter != 0 and upCenter == 0 and rightCenter == 0: elif index == 3: # 重合区域在imageA的下左部分 - self.printAndWrite("下左") - rowIndex = 0; - colIndex = 0; + # self.printAndWrite("下左") + rowIndex = 0; colIndex = 0; for j in range(1, col): for i in range(row): - if imageA[i, col - j] != 0: + if imageA[i, col - j] != -1: rowIndex = i - 1 break if rowIndex != 0: break for i in range(col - 1, -1, -1): - if imageA[rowIndex, i] != 0: + if imageA[rowIndex, i] != -1: colIndex = i + 1 break # 赋值 @@ -115,18 +113,18 @@ def getWeightsMatrix(self, images): # elif rightCenter != 0 and bottomCenter != 0 and upCenter == 0 and leftCenter == 0: elif index == 0: # 重合区域在imageA的下右部分 - self.printAndWrite("下右") + # self.printAndWrite("下右") rowIndex = 0; colIndex = 0; for j in range(0, col): for i in range(row): - if imageA[i, j] != 0: + if imageA[i, j] != -1: rowIndex = i - 1 break if rowIndex != 0: break for i in range(col): - if imageA[rowIndex, i] != 0: + if imageA[rowIndex, i] != -1: colIndex = i - 1 break # 赋值 @@ -143,18 +141,17 @@ def getWeightsMatrix(self, images): # elif upCenter != 0 and rightCenter != 0 and leftCenter == 0 and bottomCenter == 0: elif index == 1: # 重合区域在imageA的上右部分 - self.printAndWrite("上右") - rowIndex = 0; - colIndex = 0; + # self.printAndWrite("上右") + rowIndex = 0; colIndex = 0; for j in range(0, col): for i in range(row - 1, -1, -1): - if imageA[i, j] != 0: + if imageA[i, j] != -1: rowIndex = i + 1 break if rowIndex != 0: break for i in range(col): - if imageA[rowIndex, i] != 0: + if imageA[rowIndex, i] != -1: colIndex = i - 1 break for i in range(rowIndex + 1): @@ -182,12 +179,12 @@ def fuseByFadeInAndFadeOut(self, images, dx, dy): row, col = imageA.shape[:2] weightMatA = np.ones(imageA.shape, dtype=np.float32) weightMatB = np.ones(imageA.shape, dtype=np.float32) - self.printAndWrite("ratio: " + str(np.count_nonzero(imageA > 0) / imageA.size)) - if np.count_nonzero(imageA > 0) / imageA.size > 0.65: + # self.printAndWrite(" ratio: " + str(np.count_nonzero(imageA > -1) / imageA.size)) + if np.count_nonzero(imageA > -1) / imageA.size > 0.65: # 如果对于imageA中,非0值占比例比较大,则认为是普通融合 # 根据区域的行列大小来判断,如果行数大于列数,是水平方向 if col <= row: - self.printAndWrite("普通融合-水平方向") + # self.printAndWrite("普通融合-水平方向") for i in range(0, col): if dy <= 0: weightMatA[:, i] = weightMatA[:, i] * i * 1.0 / col @@ -197,7 +194,7 @@ def fuseByFadeInAndFadeOut(self, images, dx, dy): weightMatB[:, col - i - 1] = weightMatB[:, col - i - 1] * (col - i) * 1.0 / col # 根据区域的行列大小来判断,如果列数大于行数,是竖直方向 elif row < col: - self.printAndWrite("普通融合-竖直方向") + # self.printAndWrite("普通融合-竖直方向") for i in range(0, row): if dx <= 0: weightMatA[i, :] = weightMatA[i, :] * i * 1.0 / row @@ -207,13 +204,11 @@ def fuseByFadeInAndFadeOut(self, images, dx, dy): weightMatB[row - i - 1, :] = weightMatB[row - i - 1, :] * (row - i) * 1.0 / row else: # 如果对于imageA中,非0值占比例比较小,则认为是拐角融合 - self.printAndWrite("拐角融合") + # self.printAndWrite("拐角融合") weightMatA, weightMatB = self.getWeightsMatrix(images) - imageA[imageA == -1] = 0; - imageB[imageB == -1] = 0; + imageA[imageA == -1] = 0; imageB[imageB == -1] =0; result = weightMatA * imageA.astype(np.int) + weightMatB * imageB.astype(np.int) - result[result < 0] = 0; - result[result > 255] = 255 + result[result < 0] = 0; result[result > 255] = 255 fuseRegion = np.uint8(result) return fuseRegion @@ -229,12 +224,12 @@ def fuseByTrigonometric(self, images, dx, dy): row, col = imageA.shape[:2] weightMatA = np.ones(imageA.shape, dtype=np.float64) weightMatB = np.ones(imageA.shape, dtype=np.float64) - self.printAndWrite("ratio: " + str(np.count_nonzero(imageA > 0) / imageA.size)) - if np.count_nonzero(imageA > 0) / imageA.size > 0.65: + # self.printAndWrite(" ratio: " + str(np.count_nonzero(imageA > -1) / imageA.size)) + if np.count_nonzero(imageA > -1) / imageA.size > 0.65: # 如果对于imageA中,非0值占比例比较大,则认为是普通融合 # 根据区域的行列大小来判断,如果行数大于列数,是水平方向 if col <= row: - self.printAndWrite("普通融合-水平方向") + # self.printAndWrite("普通融合-水平方向") for i in range(0, col): if dy <= 0: weightMatA[:, i] = weightMatA[:, i] * i * 1.0 / col @@ -244,7 +239,7 @@ def fuseByTrigonometric(self, images, dx, dy): weightMatB[:, col - i - 1] = weightMatB[:, col - i - 1] * (col - i) * 1.0 / col # 根据区域的行列大小来判断,如果列数大于行数,是竖直方向 elif row < col: - self.printAndWrite("普通融合-竖直方向") + # self.printAndWrite("普通融合-竖直方向") for i in range(0, row): if dx <= 0: weightMatA[i, :] = weightMatA[i, :] * i * 1.0 / row @@ -254,17 +249,15 @@ def fuseByTrigonometric(self, images, dx, dy): weightMatB[row - i - 1, :] = weightMatB[row - i - 1, :] * (row - i) * 1.0 / row else: # 如果对于imageA中,非0值占比例比较小,则认为是拐角融合 - self.printAndWrite("拐角融合") + # self.printAndWrite("拐角融合") weightMatA, weightMatB = self.getWeightsMatrix(images) weightMatA = np.power(np.sin(weightMatA * math.pi / 2), 2) weightMatB = 1 - weightMatA - imageA[imageA == -1] = 0; - imageB[imageB == -1] = 0; + imageA[imageA == -1] = 0; imageB[imageB == -1] =0; result = weightMatA * imageA.astype(np.int) + weightMatB * imageB.astype(np.int) - result[result < 0] = 0; - result[result > 255] = 255 + result[result < 0] = 0; result[result > 255] = 255 fuseRegion = np.uint8(result) return fuseRegion @@ -273,7 +266,7 @@ def fuseByMultiBandBlending(self, images): imagesReturn = np.uint8(self.BlendArbitrary2(imageA, imageB, 4)) return imagesReturn - # 带权拉普拉斯金字塔融合 + #带权拉普拉斯金字塔融合 def BlendArbitrary(self, img1, img2, R, level): # img1 and img2 have the same size # R represents the region to be combined @@ -287,11 +280,11 @@ def BlendArbitrary(self, img1, img2, R, level): GRN.append(np.ones((GR[i].shape[0], GR[i].shape[1])) - GR[i]) LC = [] for i in range(level): - LC.append(LA[i] * GR[level - i - 1] + LB[i] * GRN[level - i - 1]) + LC.append(LA[i] * GR[level - i -1] + LB[i] * GRN[level - i - 1]) result = self.reconstruct(LC) - return result + return result - # 均值融合 + #均值融合 def BlendArbitrary2(self, img1, img2, level): # img1 and img2 have the same size # R represents the region to be combined @@ -306,7 +299,7 @@ def BlendArbitrary2(self, img1, img2, level): def LaplacianPyramid(self, img, level): gp = self.GaussianPyramid(img, level) - lp = [gp[level - 1]] + lp = [gp[level-1]] for i in range(level - 1, -1, -1): GE = cv2.pyrUp(gp[i]) GE = cv2.resize(GE, (gp[i - 1].shape[1], gp[i - 1].shape[0]), interpolation=cv2.INTER_CUBIC) @@ -318,7 +311,7 @@ def reconstruct(self, input_pyramid): out = input_pyramid[0] for i in range(1, len(input_pyramid)): out = cv2.pyrUp(out) - out = cv2.resize(out, (input_pyramid[i].shape[1], input_pyramid[i].shape[0]), interpolation=cv2.INTER_CUBIC) + out = cv2.resize(out, (input_pyramid[i].shape[1],input_pyramid[i].shape[0]), interpolation = cv2.INTER_CUBIC) out = cv2.add(out, input_pyramid[i]) return out @@ -330,7 +323,7 @@ def GaussianPyramid(self, R, level): gp.append(G) return gp - # 权值矩阵归一化 + #权值矩阵归一化 def stretchImage(self, Region): minI = Region.min() maxI = Region.max() @@ -358,11 +351,11 @@ def fuseByOptimalSeamLine(self, images, direction="horizontal"): fuseRegion = imageA.copy() fuseRegion[(1 - mask) == 0] = imageA[(1 - mask) == 0] fuseRegion[(1 - mask) == 1] = imageB[(1 - mask) == 1] - drawFuseRegion = self.drawOptimalLine(1 - mask, fuseRegion) + drawFuseRegion = self.drawOptimalLine(1- mask, fuseRegion) cv2.imwrite("optimalLine.jpg", drawFuseRegion) - cv2.imwrite("fuseRegion.jpg", np.uint8(self.BlendArbitrary(imageA, imageB, mask, 4))) + cv2.imwrite("fuseRegion.jpg", np.uint8(self.BlendArbitrary(imageA,imageB, mask, 4))) cv2.waitKey(0) - return np.uint8(self.BlendArbitrary(imageA, imageB, mask, 4)) + return np.uint8(self.BlendArbitrary(imageA,imageB, mask, 4)) def caculateVaule(self, images): (imageA, imageB) = images @@ -373,8 +366,8 @@ def caculateVaule(self, images): [-1, 0, 1], [-2, 0, 2]]) Sy = np.array([[-2, -1, -2], - [0, 0, 0], - [2, 1, 2]]) + [ 0, 0, 0], + [ 2, 1, 2]]) Egeometry = np.power(cv2.filter2D(Ecolor, -1, Sx), 2) + np.power(cv2.filter2D(Ecolor, -1, Sy), 2) diff = np.abs(imageA - imageB) / np.maximum(imageA, imageB).max() @@ -412,27 +405,22 @@ def findOptimalSeamLine(self, value, direction="horizontal"): # print(indexMatrix[i, j]) elif j == col - 1: dpMatrix[i, j] = (np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j]]) + value[i, j]).min() - indexMatrix[i, j] = (np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j]]) + value[ - i, j]).argmin() - 1 + indexMatrix[i, j] = (np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j]]) + value[i, j]).argmin() - 1 else: - dpMatrix[i, j] = ( - np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j], dpMatrix[i - 1, j + 1]]) + value[ - i, j]).min() - indexMatrix[i, j] = (np.array( - [dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j], dpMatrix[i - 1, j + 1]]) + value[ - i, j]).argmin() - 1 + dpMatrix[i, j] = (np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j], dpMatrix[i - 1, j + 1]]) + value[i, j]).min() + indexMatrix[i, j] = (np.array([dpMatrix[i - 1, j - 1], dpMatrix[i - 1, j], dpMatrix[i - 1, j + 1]]) + value[i, j]).argmin() - 1 # print(indexMatrix) # generate the mask index = dpMatrix[row - 1, :].argmin() # print("here" + str(dpMatrix[row - 1, :])) # print(index) for j in range(index, col): - mask[row - 1, j] = 1 + mask[row-1, j] = 1 for i in range(row - 1, 1, -1): index = indexMatrix[i, index] + index # print(index) for j in range(index, col): - mask[i - 1, j] = 1 + mask[i-1, j] = 1 if direction == "vertical": mask = np.transpose(mask) return mask @@ -448,8 +436,7 @@ def drawOptimalLine(self, mask, fuseRegion): break return drawing - -if __name__ == "__main__": +if __name__=="__main__": # 测试 num = 6 A_1 = np.zeros((num, num), dtype=np.uint8) @@ -467,4 +454,4 @@ def drawOptimalLine(self, mask, fuseRegion): A_2 = np.ones((num, num), dtype=np.uint8) imageFusion = ImageFusion() - imageFusion.fuseByFadeInAndFadeOut([A_1, A_2]) + imageFusion.fuseByFadeInAndFadeOut([A_1, A_2]) \ No newline at end of file diff --git a/ImageUtility.py b/ImageUtility.py index d1d3de3..4810b2f 100644 --- a/ImageUtility.py +++ b/ImageUtility.py @@ -1,16 +1,55 @@ import numpy as np import cv2 import math +import myGpuFeatures from scipy.stats import mode - class Method(): + # 关于打印信息的设置 outputAddress = "result/" - isEvaluate = True + isEvaluate = False evaluateFile = "evaluate.txt" isPrintLog = True - parallelMode = "None" # "CPU","GPU" + # 关于特征搜索的设置 + featureMethod = "surf" # "sift","surf" or "orb" + roiRatio = 0.1 # roi length for stitching in first direction + searchRatio = 0.75 # 0.75 is common value for matches + + # 关于 GPU 加速的设置 + isGPUAvailable = True + + # 关于 GPU-SURF 的设置 + surfHessianThreshold = 100.0 + surfNOctaves = 4 + surfNOctaveLayers = 3 + surfIsExtended = False + surfKeypointsRatio = 0.01 + surfIsUpright = False + + # 关于 GPU-ORB 的设置 + orbNfeatures = 500 + orbScaleFactor = 1.2 + orbNlevels = 8 + orbEdgeThreshold = 31 + orbFirstLevel = 0 + orbWTA_K = 2 + orbPatchSize = 31 + orbFastThreshold = 20 + orbBlurForDescriptor = True + orbMaxDistance = 30 + + # 关于特征配准的设置 + offsetCaculate = "mode" # "mode" or "ransac" + offsetEvaluate = 3 # 40 menas nums of matches for mode, 3.0 menas of matches for ransac + + # 关于图像增强的操作 + isEnhance = False + isClahe = False + clipLimit = 20 + tileSize = 5 + + # 向屏幕和文件打印输出内容 def printAndWrite(self, content): if self.isPrintLog: print(content) @@ -20,6 +59,35 @@ def printAndWrite(self, content): f.write("\n") f.close() + def getROIRegionForIncreMethod(self, image, direction=1, order="first", searchRatio=0.1): + row, col = image.shape[:2] + roiRegion = np.zeros(image.shape, np.uint8) + if direction == 1: + searchLength = np.floor(row * searchRatio).astype(int) + if order == "first": + roiRegion = image[row - searchLength:row, :] + elif order == "second": + roiRegion = image[0: searchLength, :] + elif direction == 2: + searchLength = np.floor(col * searchRatio).astype(int) + if order == "first": + roiRegion = image[:, col - searchLength:col] + elif order == "second": + roiRegion = image[:, 0: searchLength] + elif direction == 3: + searchLength = np.floor(row * searchRatio).astype(int) + if order == "first": + roiRegion = image[0: searchLength, :] + elif order == "second": + roiRegion = image[row - searchLength:row, :] + elif direction == 4: + searchLength = np.floor(col * searchRatio).astype(int) + if order == "first": + roiRegion = image[:, 0: searchLength] + elif order == "second": + roiRegion = image[:, col - searchLength:col] + return roiRegion + def getROIRegion(self, image, direction="horizontal", order="first", searchLength=150, searchLengthForLarge=-1): '''对原始图像裁剪感兴趣区域 :param originalImage:需要裁剪的原始图像 @@ -54,6 +122,39 @@ def getROIRegion(self, image, direction="horizontal", order="first", searchLengt elif searchLengthForLarge > 0: roiRegion = image[0: searchLength, 0:searchLengthForLarge] + def getOffsetByMode(self, kpsA, kpsB, matches, offsetEvaluate = 10): + totalStatus = True + if len(matches) == 0: + totalStatus = False + return (totalStatus, [0, 0]) + dxList = []; dyList = []; + for trainIdx, queryIdx in matches: + ptA = (kpsA[queryIdx][1], kpsA[queryIdx][0]) + ptB = (kpsB[trainIdx][1], kpsB[trainIdx][0]) + # dxList.append(int(round(ptA[0] - ptB[0]))) + # dyList.append(int(round(ptA[1] - ptB[1]))) + if int(ptA[0] - ptB[0]) == 0 and int(ptA[1] - ptB[1]) == 0: + continue + dxList.append(int(ptA[0] - ptB[0])) + dyList.append(int(ptA[1] - ptB[1])) + if len(dxList) == 0: + dxList.append(0); dyList.append(0) + # Get Mode offset in [dxList, dyList], thanks for clovermini + zipped = zip(dxList, dyList) + zip_list = list(zipped) + zip_dict = dict((a, zip_list.count(a)) for a in zip_list) + zip_dict_sorted = dict(sorted(zip_dict.items(), key=lambda x: x[1], reverse=True)) + + dx = list(zip_dict_sorted)[0][0] + dy = list(zip_dict_sorted)[0][1] + num = zip_dict_sorted[list(zip_dict_sorted)[0]] + # print("dx = " + str(dx) + ", dy = " + str(dy) + ", num = " + str(num)) + + if num < offsetEvaluate: + totalStatus = False + # self.printAndWrite(" In Mode, The number of num is " + str(num) + " and the number of offsetEvaluate is "+str(offsetEvaluate)) + return (totalStatus, [dx, dy]) + def getOffsetByRansac(self, kpsA, kpsB, matches, offsetEvaluate=100): totalStatus = False ptsA = np.float32([kpsA[i] for (_, i) in matches]) @@ -61,7 +162,7 @@ def getOffsetByRansac(self, kpsA, kpsB, matches, offsetEvaluate=100): if len(matches) == 0: return (totalStatus, [0, 0], 0) # 计算视角变换矩阵 - # H1 = cv2.getAffineTransform(ptsA, ptsB) + H1 = cv2.getAffineTransform(ptsA, ptsB) # print("H1") # print(H1) (H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC, 3, 0.9) @@ -72,39 +173,74 @@ def getOffsetByRansac(self, kpsA, kpsB, matches, offsetEvaluate=100): if trueCount >= offsetEvaluate: totalStatus = True adjustH = H.copy() - adjustH[0, 2] = 0; - adjustH[1, 2] = 0 - adjustH[2, 0] = 0; - adjustH[2, 1] = 0 - return (totalStatus, [np.round(np.array(H).astype(np.int)[1, 2]) * (-1), - np.round(np.array(H).astype(np.int)[0, 2]) * (-1)], adjustH) + adjustH[0, 2] = 0;adjustH[1, 2] = 0 + adjustH[2, 0] = 0;adjustH[2, 1] = 0 + return (totalStatus ,[np.round(np.array(H).astype(np.int)[1,2]) * (-1), np.round(np.array(H).astype(np.int)[0,2]) * (-1)], adjustH) else: return (totalStatus, [0, 0], 0) + def npToListForKeypoints(self, array): + ''' + Convert array to List, used for keypoints from GPUDLL to python List + :param array: array from GPUDLL + :return: + ''' + kps = [] + row, col = array.shape + for i in range(row): + kps.append([array[i, 0], array[i, 1]]) + return kps + + def npToListForMatches(self, array): + ''' + Convert array to List, used for DMatches from GPUDLL to python List + :param array: array from GPUDLL + :return: + ''' + descritpors = [] + row, col = array.shape + for i in range(row): + descritpors.append((array[i, 0], array[i, 1])) + return descritpors + + def npToKpsAndDescriptors(self, array): + kps = [] + descriptors = array[:, :, 1] + for i in range(array.shape[0]): + kps.append([array[i, 0, 0], array[i, 1, 0]]) + return (kps, descriptors) + def detectAndDescribe(self, image, featureMethod): ''' 计算图像的特征点集合,并返回该点集&描述特征 :param image:需要分析的图像 :return:返回特征点集,及对应的描述特征 ''' - # 建立SIFT生成器 - if featureMethod == "sift": - descriptor = cv2.xfeatures2d.SIFT_create() - elif featureMethod == "surf": - descriptor = cv2.xfeatures2d.SURF_create() - elif featureMethod == "orb": - descriptor = cv2.ORB_create(5000000) - - # 检测SIFT特征点,并计算描述子 - (kps, features) = descriptor.detectAndCompute(image, None) - - # 将结果转换成NumPy数组 - kps = np.float32([kp.pt for kp in kps]) - + if self.isGPUAvailable == False: # CPU mode + if featureMethod == "sift": + descriptor = cv2.xfeatures2d.SIFT_create() + elif featureMethod == "surf": + descriptor = cv2.xfeatures2d.SURF_create() + elif featureMethod == "orb": + descriptor = cv2.ORB_create(self.orbNfeatures, self.orbScaleFactor, self.orbNlevels, self.orbEdgeThreshold, self.orbFirstLevel, self.orbWTA_K, 0, self.orbPatchSize, self.orbFastThreshold) + # 检测SIFT特征点,并计算描述子 + kps, features = descriptor.detectAndCompute(image, None) + # 将结果转换成NumPy数组 + kps = np.float32([kp.pt for kp in kps]) + else: # GPU mode + if featureMethod == "sift": + # 目前GPU-SIFT尚未开发,先采用CPU版本的替代 + descriptor = cv2.xfeatures2d.SIFT_create() + kps, features = descriptor.detectAndCompute(image, None) + kps = np.float32([kp.pt for kp in kps]) + elif featureMethod == "surf": + kps, features = self.npToKpsAndDescriptors(myGpuFeatures.detectAndDescribeBySurf(image, self.surfHessianThreshold, self.surfNOctaves,self.surfNOctaveLayers, self.surfIsExtended, self.surfKeypointsRatio, self.surfIsUpright)) + elif featureMethod == "orb": + kps, features = self.npToKpsAndDescriptors(myGpuFeatures.detectAndDescribeByOrb(image, self.orbNfeatures, self.orbScaleFactor, self.orbNlevels, self.orbEdgeThreshold, self.orbFirstLevel, self.orbWTA_K, 0, self.orbPatchSize, self.orbFastThreshold, self.orbBlurForDescriptor)) # 返回特征点集,及对应的描述特征 return (kps, features) - def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio): + def matchDescriptors(self, featuresA, featuresB): ''' 匹配特征点 :param self: @@ -113,127 +249,84 @@ def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB, ratio): :param ratio: 最近邻和次近邻的比例 :return:返回匹配的对数 ''' - # 建立暴力匹配器 - matcher = cv2.DescriptorMatcher_create("BruteForce") - - # 使用KNN检测来自A、B图的SIFT特征匹配对,K=2,返回一个列表 - rawMatches = matcher.knnMatch(featuresA, featuresB, 2) - matches = [] - for m in rawMatches: - # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对 - if len(m) == 2 and m[0].distance < m[1].distance * ratio: - # 存储两个点在featuresA, featuresB中的索引值 - matches.append((m[0].trainIdx, m[0].queryIdx)) - self.printAndWrite(" The number of matches is " + str(len(matches))) + if self.isGPUAvailable == True: # CPU Mode + # 建立暴力匹配器 + if self.featureMethod == "surf" or self.featureMethod == "sift": + matcher = cv2.DescriptorMatcher_create("BruteForce") + # 使用KNN检测来自A、B图的SIFT特征匹配对,K=2,返回一个列表 + rawMatches = matcher.knnMatch(featuresA, featuresB, 2) + matches = [] + for m in rawMatches: + # 当最近距离跟次近距离的比值小于ratio值时,保留此匹配对 + if len(m) == 2 and m[0].distance < m[1].distance * self.searchRatio: + # 存储两个点在featuresA, featuresB中的索引值 + matches.append((m[0].trainIdx, m[0].queryIdx)) + elif self.featureMethod == "orb": + matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming") + rawMatches = matcher.match(featuresA, featuresB) + matches = [] + for m in rawMatches: + matches.append((m.trainIdx, m.queryIdx)) + # self.printAndWrite(" The number of matches is " + str(len(matches))) + else: # GPU Mode + if self.featureMethod == "surf": + matches = self.npToListForMatches(myGpuFeatures.matchDescriptors(np.array(featuresA), np.array(featuresB), 2, self.searchRatio)) + elif self.featureMethod == "orb": + matches = self.npToListForMatches(myGpuFeatures.matchDescriptors(np.array(featuresA), np.array(featuresB), 3, self.orbMaxDistance)) return matches - def getOffsetByMode(self, kpsA, kpsB, matches, offsetEvaluate=10): - totalStatus = True - if len(matches) == 0: - totalStatus = False - return (totalStatus, [0, 0]) - dxList = []; - dyList = []; - for trainIdx, queryIdx in matches: - ptA = (kpsA[queryIdx][1], kpsA[queryIdx][0]) - ptB = (kpsB[trainIdx][1], kpsB[trainIdx][0]) - # dxList.append(int(round(ptA[0] - ptB[0]))) - # dyList.append(int(round(ptA[1] - ptB[1]))) - if int(ptA[0] - ptB[0]) == 0 and int(ptA[1] - ptB[1]) == 0: - continue - dxList.append(int(ptA[0] - ptB[0])) - dyList.append(int(ptA[1] - ptB[1])) - - # Get Mode offset in [dxList, dyList], thanks for clovermini - zipped = zip(dxList, dyList) - zip_list = list(zipped) - zip_dict = dict((a, zip_list.count(a)) for a in zip_list) - zip_dict_sorted = dict(sorted(zip_dict.items(), key=lambda x: x[1], reverse=True)) - - dx = list(zip_dict_sorted)[0][0] - dy = list(zip_dict_sorted)[0][1] - num = zip_dict_sorted[list(zip_dict_sorted)[0]] - # print("dx = " + str(dx) + ", dy = " + str(dy) + ", num = " + str(num)) - - if num < offsetEvaluate: - totalStatus = False - self.printAndWrite( - " In Mode, The number of num is " + str(num) + " and the number of offsetEvaluate is " + str( - offsetEvaluate)) - return (totalStatus, [dx, dy]) - - def getROIRegionForIncreMethod(self, image, direction=1, order="first", searchRatio=0.1): - row, col = image.shape[:2] - roiRegion = np.zeros(image.shape, np.uint8) - if direction == 1: - searchLength = np.floor(row * searchRatio).astype(int) - if order == "first": - roiRegion = image[row - searchLength:row, :] - elif order == "second": - roiRegion = image[0: searchLength, :] - elif direction == 2: - searchLength = np.floor(col * searchRatio).astype(int) - if order == "first": - roiRegion = image[:, col - searchLength:col] - elif order == "second": - roiRegion = image[:, 0: searchLength] - elif direction == 3: - searchLength = np.floor(row * searchRatio).astype(int) - if order == "first": - roiRegion = image[0: searchLength, :] - elif order == "second": - roiRegion = image[row - searchLength:row, :] - elif direction == 4: - searchLength = np.floor(col * searchRatio).astype(int) - if order == "first": - roiRegion = image[:, 0: searchLength] - elif order == "second": - roiRegion = image[:, col - searchLength:col] - return roiRegion - - def resizeImg(self, image, resizeTimes, interMethod=cv2.INTER_AREA): + def resizeImg(self, image, resizeTimes, interMethod = cv2.INTER_AREA): (h, w) = image.shape resizeH = int(h * resizeTimes) resizeW = int(w * resizeTimes) # cv2.INTER_AREA是测试后最好的方法 return cv2.resize(image, (resizeW, resizeH), interpolation=interMethod) - def rectifyFinalImg(self, image, regionLength=10): + def rectifyFinalImg(self, image, regionLength = 10): (h, w) = image.shape - upperLeft = np.sum(image[0: regionLength, 0: regionLength]) - upperRight = np.sum(image[0: regionLength, w - regionLength: w]) - bottomLeft = np.sum(image[h - regionLength: h, 0: regionLength]) + print("h:" + str(h)) + print("w:" + str(w)) + upperLeft = np.sum(image[0: regionLength, 0: regionLength]) + upperRight = np.sum(image[0: regionLength, w - regionLength: w]) + bottomLeft = np.sum(image[h - regionLength: h, 0: regionLength]) bottomRight = np.sum(image[h - regionLength: h, w - regionLength: w]) + # 预处理 zeroCol = image[:, 0] noneZeroNum = np.count_nonzero(zeroCol) - zeroNum = h - noneZeroNum - print("h:" + str(h)) - print("w:" + str(w)) print("noneZeroNum:" + str(noneZeroNum)) print("zeroNum:" + str(zeroNum)) print("除法:" + str(noneZeroNum / h)) - if (noneZeroNum / h) < 0.5: + if (noneZeroNum / h) < 0.3: resultImage = image - elif upperLeft == 0 and bottomRight == 0 and upperRight != 0 and bottomLeft != 0: # 左边低,右边高 + elif upperLeft == 0 and bottomRight == 0 and upperRight != 0 and bottomLeft != 0: # 左边低,右边高 print(1) center = (w // 2, h // 2) print(w) print(h) - angle = math.atan(center[1] / center[0]) * 180 / math.pi + angle = math.atan(center[1] / center[0] * 180 / math.pi) print(str(angle)) M = cv2.getRotationMatrix2D(center, -1 * angle, 1.0) print(M) resultImage = cv2.warpAffine(image, M, (w, h)) - elif upperLeft != 0 and bottomRight != 0 and upperRight == 0 and bottomLeft == 0: # 左边高,右边低 + elif upperLeft != 0 and bottomRight != 0 and upperRight == 0 and bottomLeft == 0: # 左边高,右边低 print(2) center = (w // 2, h // 2) - angle = math.atan(center[1] / center[0]) * 180 / math.pi / 2 + angle = math.atan(center[1] / center[0] * 180 / math.pi) print(str(angle)) M = cv2.getRotationMatrix2D(center, angle, 1.0) resultImage = cv2.warpAffine(image, M, (w, h)) else: resultImage = image return resultImage + +if __name__=="__main__": + image = cv2.imread("D:\\Coding_Test\\Python\\ImageStitch\\images\\zirconSmall\\1\\WJE068-F (1).jpg", 0) + method = Method() + kps, descriptors = method.detectAndDescribe(image, 'orb') + print(len(kps)) + print(descriptors.shape) + print(kps[0]) + print(descriptors[0, :]) \ No newline at end of file diff --git a/Stitcher.py b/Stitcher.py index f9ceea6..8d6d853 100644 --- a/Stitcher.py +++ b/Stitcher.py @@ -8,12 +8,12 @@ from numba import jit import ImageUtility as Utility import ImageFusion -import myGpuSurf - +from phasecorrelation import * +import time class ImageFeature(): # 用来保存串行全局拼接中的第二张图像的特征点和描述子,为后续加速拼接使用 - isBreak = True # 判断是否上一次中断 + isBreak = True # 判断是否上一次中断 kps = None feature = None @@ -22,48 +22,13 @@ class Stitcher(Utility.Method): ''' 图像拼接类,包括所有跟材料显微组织图像配准相关函数 ''' - direction = 1 # 1: 第一张图像在上,第二张图像在下; 2: 第一张图像在左,第二张图像在右; - # 3: 第一张图像在下,第二张图像在上; 4: 第一张图像在右,第二张图像在左; + direction = 1 # 1: 第一张图像在上,第二张图像在下; 2: 第一张图像在左,第二张图像在右; + # 3: 第一张图像在下,第二张图像在上; 4: 第一张图像在右,第二张图像在左; directIncre = 1 - featureMethod = "surf" # "sift","surf" or "orb" - searchRatio = 0.75 # 0.75 is common value for matches - offsetCaculate = "mode" # "mode" or "ransac" - offsetEvaluate = 10 # 40 menas nums of matches for mode, 4.0 menas of matches for ransac - roiRatio = 0.1 # roi length for stitching in first direction fuseMethod = "notFuse" - isEnhance = False - isClahe = False - clipLimit = 20 - tileSize = 5 phaseResponseThreshold = 0.15 - overlapRatio = [] + phase = phaseCorrelation() tempImageFeature = ImageFeature() - isGPUAvailable = True - keypointsRatio = 0.005 - - def npToListForKeypoints(self, array): - ''' - Convert array to List, used for keypoints from GPUDLL to python List - :param array: array from GPUDLL - :return: - ''' - kps = [] - row, col = array.shape - for i in range(row): - kps.append([array[i, 0], array[i, 1]]) - return kps - - def npToListForMatches(self, array): - ''' - Convert array to List, used for DMatches from GPUDLL to python List - :param array: array from GPUDLL - :return: - ''' - descritpors = [] - row, col = array.shape - for i in range(row): - descritpors.append((array[i, 0], array[i, 1])) - return descritpors def directionIncrease(self, direction): direction += self.directIncre @@ -91,8 +56,7 @@ def flowStitch(self, fileList, caculateOffsetMethod): else: (status, offset) = caculateOffsetMethod([imageA, imageB]) if status == False: - describtion = " " + str(fileList[fileIndex]) + " and " + str( - fileList[fileIndex + 1]) + " can not be stitched" + describtion = " " + str(fileList[fileIndex]) + " and " + str(fileList[fileIndex+1]) + " can not be stitched" break else: offsetList.append(offset) @@ -100,32 +64,12 @@ def flowStitch(self, fileList, caculateOffsetMethod): endTime = time.time() self.printAndWrite("The time of registering is " + str(endTime - startTime) + "s") - self.printAndWrite(" The offsetList is " + str(offsetList)) # stitching and fusing self.printAndWrite("start stitching") startTime = time.time() - dxSum = 0; - dySum = 0 - stitchImage = cv2.imread(fileList[0], 0) - offsetListNum = len(offsetList) - - for fileIndex in range(0, offsetListNum): - self.printAndWrite(" stitching " + str(fileList[fileIndex + 1])) - imageB = cv2.imread(fileList[fileIndex + 1], 0) - dxSum = offsetList[fileIndex][0] + dxSum - dySum = offsetList[fileIndex][1] + dySum - offset = [dxSum, dySum] - self.printAndWrite(" The offsetX is " + str(offsetList[fileIndex][0]) + " and the offsetY is " + str( - offsetList[fileIndex][1])) - self.printAndWrite(" The dxSum is " + str(dxSum) + " and the dySum is " + str(dySum)) - (stitchImage, fuseRegion, roiImageRegionA, roiImageRegionB) = self.getStitchByOffset( - [stitchImage, imageB], offset) - if dxSum < 0: - dxSum = 0 - if dySum < 0: - dySum = 0 - + # offsetList = [[1784, 2], [1805, 2], [1809, 2], [1775, 2], [1760, 2], [1846, 2], [1809, 1], [1812, 2], [1786, 1], [1818, 3], [1786, 2], [1802, 2], [1722, 1], [1211, 1], [-10, 2411], [-1734, -1], [-1808, -1], [-1788, -3], [-1754, -1], [-1727, -2], [-1790, -3], [-1785, -2], [-1778, -1], [-1807, -2], [-1767, -2], [-1822, -3], [-1677, -2], [-1778, -2], [-1440, -1], [-2, 2410], [1758, 2], [1792, 2], [1794, 2], [1840, 3], [1782, 2], [1802, 3], [1782, 2], [1763, 3], [1738, 2], [1837, 3], [1781, 2], [1788, 18], [1712, 0], [1271, -11], [-3, 2478], [-1787, -1], [-1812, -2], [-1822, -2], [-1762, -1], [-1725, -2], [-1884, -2], [-1754, -2], [-1747, -1], [-1666, -1], [-1874, -3], [-1695, -2], [-1672, -1], [-1816, -2], [-1411, -1], [-4, 2431], [1874, 3], [1706, -3], [1782, 2], [1794, 2], [1732, 3], [1838, 3], [1721, 1], [1783, 3], [1805, 2], [1725, 3], [1828, 1], [1774, 3], [1776, 1], [1201, 1], [-16, 2405], [-1821, 0], [-1843, -2], [-1758, -2], [-1742, -3], [-1814, -2], [-1817, -2], [-1848, -2], [-1768, -2], [-1749, -2], [-1765, -2], [-1659, -2], [-1832, -2], [-1791, -2], [-1197, -1]] + stitchImage = self.getStitchByOffset(fileList, offsetList) endTime = time.time() self.printAndWrite("The time of fusing is " + str(endTime - startTime) + "s") @@ -146,8 +90,8 @@ def flowStitchWithMutiple(self, fileList, caculateOffsetMethod): else: startNum = startNum + status[1] + 1 - self.printAndWrite("status[1] = " + str(status[1])) - self.printAndWrite("startNum = " + str(startNum)) + # self.printAndWrite("status[1] = " + str(status[1])) + # self.printAndWrite("startNum = "+str(startNum)) if startNum == totalNum: break if startNum == (totalNum - 1): @@ -156,9 +100,8 @@ def flowStitchWithMutiple(self, fileList, caculateOffsetMethod): self.printAndWrite("stitching Break, start from " + str(fileList[startNum]) + " again") return result - def imageSetStitch(self, projectAddress, outputAddress, fileNum, caculateOffsetMethod, startNum=1, - fileExtension="jpg", outputfileExtension="jpg"): - for i in range(startNum, fileNum + 1): + def imageSetStitch(self, projectAddress, outputAddress, fileNum, caculateOffsetMethod, startNum = 1, fileExtension = "jpg", outputfileExtension = "jpg"): + for i in range(startNum, fileNum+1): fileAddress = projectAddress + "\\" + str(i) + "\\" fileList = glob.glob(fileAddress + "*." + fileExtension) if not os.path.exists(outputAddress): @@ -170,9 +113,9 @@ def imageSetStitch(self, projectAddress, outputAddress, fileNum, caculateOffsetM if status == False: self.printAndWrite("stitching Failed") - def imageSetStitchWithMutiple(self, projectAddress, outputAddress, fileNum, caculateOffsetMethod, startNum=1, - fileExtension="jpg", outputfileExtension="jpg"): - for i in range(startNum, fileNum + 1): + def imageSetStitchWithMutiple(self, projectAddress, outputAddress, fileNum, caculateOffsetMethod, startNum = 1, fileExtension = "jpg", outputfileExtension = "jpg"): + for i in range(startNum, fileNum+1): + startTime = time.time() fileAddress = projectAddress + "\\" + str(i) + "\\" fileList = glob.glob(fileAddress + "*." + fileExtension) if not os.path.exists(outputAddress): @@ -185,10 +128,10 @@ def imageSetStitchWithMutiple(self, projectAddress, outputAddress, fileNum, cacu # cv2.imwrite(outputAddress + "\\" + outputName + "." + outputfileExtension, result[0]) else: for j in range(0, len(result)): - cv2.imwrite( - outputAddress + "\\stitching_result_" + str(i) + "_" + str(j + 1) + "." + outputfileExtension, - result[j]) + cv2.imwrite(outputAddress + "\\stitching_result_" + str(i) + "_" + str(j+1) + "." + outputfileExtension, result[j]) # cv2.imwrite(outputAddress + "\\" + outputName + "_" + str(j + 1) + "." + outputfileExtension,result[j]) + endTime = time.time() + print("Time Consuming for " + fileAddress + " is " + str(endTime - startTime)) def calculateOffsetForPhaseCorrleate(self, dirAddress): (dir1, dir2) = dirAddress @@ -218,26 +161,24 @@ def calculateOffsetForPhaseCorrleateIncre(self, images): (imageA, imageB) = images offset = [0, 0] status = False - maxI = (np.floor(0.5 / self.roiRatio) + 1).astype(int) + 1 + maxI = (np.floor(0.5 / self.roiRatio) + 1).astype(int)+ 1 iniDirection = self.direction localDirection = iniDirection for i in range(1, maxI): - self.printAndWrite(" i=" + str(i) + " and maxI=" + str(maxI)) - while (True): + # self.printAndWrite(" i=" + str(i) + " and maxI="+str(maxI)) + while(True): # get the roi region of images - self.printAndWrite(" localDirection=" + str(localDirection)) - roiImageA = self.getROIRegionForIncreMethod(imageA, direction=localDirection, order="first", - searchRatio=i * self.roiRatio) - roiImageB = self.getROIRegionForIncreMethod(imageB, direction=localDirection, order="second", - searchRatio=i * self.roiRatio) + # self.printAndWrite(" localDirection=" + str(localDirection)) + roiImageA = self.getROIRegionForIncreMethod(imageA, direction=localDirection, order="first", searchRatio = i * self.roiRatio) + roiImageB = self.getROIRegionForIncreMethod(imageB, direction=localDirection, order="second", searchRatio = i * self.roiRatio) # hann = cv2.createHanningWindow(winSize=(roiImageA.shape[1], roiImageA.shape[0]), type=5) # (offsetTemp, response) = cv2.phaseCorrelate(np.float32(roiImageA), np.float32(roiImageB), window=hann) (offsetTemp, response) = cv2.phaseCorrelate(np.float64(roiImageA), np.float64(roiImageB)) offset[0] = np.int(offsetTemp[1]) offset[1] = np.int(offsetTemp[0]) - self.printAndWrite("offset: " + str(offset)) - self.printAndWrite("respnse: " + str(response)) + # self.printAndWrite("offset: " + str(offset)) + # self.printAndWrite("respnse: " + str(response)) if response > self.phaseResponseThreshold: status = True if status == True: @@ -285,41 +226,25 @@ def calculateOffsetForFeatureSearch(self, images): imageB = cv2.equalizeHist(imageB) # get the feature points if self.tempImageFeature.isBreak == True: - if self.isGPUAvailable == True: - myGpuSurf.matchFeaturesBySurf(imageA, imageB, self.keypointsRatio, self.searchRatio) - kpsA = self.npToListForKeypoints(myGpuSurf.getImageAKeyPoints()) - featuresA = myGpuSurf.getImageADescriptors() - kpsB = self.npToListForKeypoints(myGpuSurf.getImageBKeyPoints()) - featuresB = myGpuSurf.getImageBDescriptors() - else: - (kpsA, featuresA) = self.detectAndDescribe(imageA, featureMethod=self.featureMethod) - (kpsB, featuresB) = self.detectAndDescribe(imageB, featureMethod=self.featureMethod) + (kpsA, featuresA) = self.detectAndDescribe(imageA, featureMethod=self.featureMethod) + (kpsB, featuresB) = self.detectAndDescribe(imageB, featureMethod=self.featureMethod) self.tempImageFeature.isBreak = False self.tempImageFeature.kps = kpsB self.tempImageFeature.feature = featuresB else: kpsA = self.tempImageFeature.kps featuresA = self.tempImageFeature.feature - if self.isGPUAvailable == True: - myGpuSurf.matchFeaturesBySurf(imageA, imageB, self.searchRatio) - kpsB = self.npToListForKeypoints(myGpuSurf.getImageBKeyPoints()) - featuresB = myGpuSurf.getImageBDescriptors() - else: - (kpsB, featuresB) = self.detectAndDescribe(imageB, featureMethod=self.featureMethod) + (kpsB, featuresB) = self.detectAndDescribe(imageB, featureMethod=self.featureMethod) self.tempImageFeature.isBreak = False self.tempImageFeature.kps = kpsB self.tempImageFeature.feature = featuresB if featuresA is not None and featuresB is not None: - if self.isGPUAvailable == True: - matches = self.npToListForMatches(myGpuSurf.getGoodMatches()) - else: - matches = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, self.searchRatio) + matches = self.matchDescriptors(featuresA, featuresB) # match all the feature points if self.offsetCaculate == "mode": - (status, offset) = self.getOffsetByMode(kpsA, kpsB, matches, offsetEvaluate=self.offsetEvaluate) + (status, offset) = self.getOffsetByMode(kpsA, kpsB, matches, offsetEvaluate = self.offsetEvaluate) elif self.offsetCaculate == "ransac": - (status, offset, adjustH) = self.getOffsetByRansac(kpsA, kpsB, matches, - offsetEvaluate=self.offsetEvaluate) + (status, offset, adjustH) = self.getOffsetByRansac(kpsA, kpsB, matches, offsetEvaluate = self.offsetEvaluate) if status == False: self.tempImageFeature.isBreak = True return (status, " The two images can not match") @@ -341,48 +266,35 @@ def calculateOffsetForFeatureSearchIncre(self, images): (imageA, imageB) = images offset = [0, 0] status = False - maxI = (np.floor(0.5 / self.roiRatio) + 1).astype(int) + 1 + maxI = (np.floor(0.5 / self.roiRatio) + 1).astype(int)+ 1 iniDirection = self.direction localDirection = iniDirection for i in range(1, maxI): - self.printAndWrite(" i=" + str(i) + " and maxI=" + str(maxI)) - while (True): + # self.printAndWrite(" i=" + str(i) + " and maxI="+str(maxI)) + while(True): # get the roi region of images - self.printAndWrite(" localDirection=" + str(localDirection)) - roiImageA = self.getROIRegionForIncreMethod(imageA, direction=localDirection, order="first", - searchRatio=i * self.roiRatio) - roiImageB = self.getROIRegionForIncreMethod(imageB, direction=localDirection, order="second", - searchRatio=i * self.roiRatio) + # self.printAndWrite(" localDirection=" + str(localDirection)) + roiImageA = self.getROIRegionForIncreMethod(imageA, direction=localDirection, order="first", searchRatio = i * self.roiRatio) + roiImageB = self.getROIRegionForIncreMethod(imageB, direction=localDirection, order="second", searchRatio = i * self.roiRatio) if self.isEnhance == True: if self.isClahe == True: - clahe = cv2.createCLAHE(clipLimit=self.clipLimit, tileGridSize=(self.tileSize, self.tileSize)) + clahe = cv2.createCLAHE(clipLimit=self.clipLimit,tileGridSize=(self.tileSize, self.tileSize)) roiImageA = clahe.apply(roiImageA) roiImageB = clahe.apply(roiImageB) elif self.isClahe == False: roiImageA = cv2.equalizeHist(roiImageA) roiImageB = cv2.equalizeHist(roiImageB) # get the feature points - if self.isGPUAvailable == True: - myGpuSurf.matchFeaturesBySurf(roiImageA, roiImageB, self.keypointsRatio, self.searchRatio) - kpsA = self.npToListForKeypoints(myGpuSurf.getImageAKeyPoints()) - featuresA = myGpuSurf.getImageADescriptors() - kpsB = self.npToListForKeypoints(myGpuSurf.getImageBKeyPoints()) - featuresB = myGpuSurf.getImageBDescriptors() - else: - (kpsA, featuresA) = self.detectAndDescribe(roiImageA, featureMethod=self.featureMethod) - (kpsB, featuresB) = self.detectAndDescribe(roiImageB, featureMethod=self.featureMethod) + kpsA, featuresA = self.detectAndDescribe(roiImageA, featureMethod=self.featureMethod) + kpsB, featuresB = self.detectAndDescribe(roiImageB, featureMethod=self.featureMethod) if featuresA is not None and featuresB is not None: - if self.isGPUAvailable == True: - matches = self.npToListForMatches(myGpuSurf.getGoodMatches()) - else: - matches = self.matchKeypoints(kpsA, kpsB, featuresA, featuresB, self.searchRatio) + matches = self.matchDescriptors(featuresA, featuresB) # match all the feature points if self.offsetCaculate == "mode": - (status, offset) = self.getOffsetByMode(kpsA, kpsB, matches, offsetEvaluate=self.offsetEvaluate) + (status, offset) = self.getOffsetByMode(kpsA, kpsB, matches, offsetEvaluate = self.offsetEvaluate) elif self.offsetCaculate == "ransac": - (status, offset, adjustH) = self.getOffsetByRansac(kpsA, kpsB, matches, - offsetEvaluate=self.offsetEvaluate) + (status, offset, adjustH) = self.getOffsetByRansac(kpsA, kpsB, matches, offsetEvaluate = self.offsetEvaluate) if status == True: break else: @@ -406,72 +318,103 @@ def calculateOffsetForFeatureSearchIncre(self, images): self.printAndWrite(" The offset of stitching: dx is " + str(offset[0]) + " dy is " + str(offset[1])) return (status, offset) - def getStitchByOffset(self, images, offset): - (imageA, imageB) = images - (hA, wA) = imageA.shape[:2] - (hB, wB) = imageB.shape[:2] - dx = offset[0]; - dy = offset[1] - mask = np.zeros(imageB.shape, dtype=np.uint8) + def getStitchByOffset(self, fileList, offsetListOrigin): + ''' + 通过偏移量列表和文件列表得到最终的拼接结果 + :param fileList: 图像列表 + :param offsetListOrigin: 偏移量列表 + :return: ndaarry,图像 + ''' + # 如果你不细心,不要碰这段代码 + # 已优化到根据指针来控制拼接,CPU下最快了 + dxSum = dySum = 0 + imageList = [] + imageList.append(cv2.imread(fileList[0], 0)) + resultRow = imageList[0].shape[0] # 拼接最终结果的横轴长度,先赋值第一个图像的横轴 + resultCol = imageList[0].shape[1] # 拼接最终结果的纵轴长度,先赋值第一个图像的纵轴 + offsetListOrigin.insert(0, [0, 0]) # 增加第一张图像相对于最终结果的原点的偏移量 + + rangeX = [[0,0] for x in range(len(offsetListOrigin))] # 主要用于记录X方向最大最小边界 + rangeY = [[0, 0] for x in range(len(offsetListOrigin))] # 主要用于记录Y方向最大最小边界 + offsetList = offsetListOrigin.copy() + rangeX[0][1] = imageList[0].shape[0] + rangeY[0][1] = imageList[0].shape[1] - if dx >= 0 and dy >= 0: - # The first image is located at the left top, the second image located at the right bottom - stitchImage = np.zeros((max(hA, dx + hB), max(dy + wB, wA)), dtype=np.int) - 1 - roi_ltx = dx; - roi_lty = dy - roi_rbx = min(dx + hB, hA); - roi_rby = min(dy + wB, wA) - stitchImage[0: hA, 0:wA] = imageA - roiImageRegionA = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - stitchImage[dx: dx + hB, dy: dy + wB] = imageB - roiImageRegionB = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - elif dx >= 0 and dy < 0: - # The first image is located at the right top, the second image located at the left bottom - stitchImage = np.zeros((max(hA, dx + hB), -dy + wA), dtype=np.int) - 1 - roi_ltx = dx; - roi_lty = -dy - roi_rbx = hA; - roi_rby = min(-dy + wA, wB) - stitchImage[0: hA, -dy:-dy + wA] = imageA - roiImageRegionA = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - stitchImage[dx: dx + hB, 0: wB] = imageB - roiImageRegionB = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - elif dx < 0 and dy >= 0: - # The first image is located at the left bottom, the second image located at the right top - stitchImage = np.zeros((max(-dx + hA, hB), max(dy + wB, wA)), dtype=np.int) - 1 - roi_ltx = -dx; - roi_lty = dy - roi_rbx = min(-dx + hA, hB); - roi_rby = min(dy + wB, wA) - stitchImage[-dx: -dx + hA, 0: wA] = imageA - roiImageRegionA = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - stitchImage[0: hB, dy: dy + wB] = imageB - roiImageRegionB = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - elif dx < 0 and dy < 0: - # The first image is located at the right bottom, the second image located at the left top - stitchImage = np.zeros((-dx + hA, -dy + wA), dtype=np.int) - 1 - roi_ltx = -dx; - roi_lty = - dy - roi_rbx = hB; - roi_rby = wB - stitchImage[-dx: -dx + hA, -dy: -dy + wA] = imageA - roiImageRegionA = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - stitchImage[0: hB, 0: wB] = imageB - roiImageRegionB = stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby].copy() - # cv2.imshow("roiImageRegionA", roiImageRegionA) - # cv2.imshow("roiImageRegionB", roiImageRegionB) - # cv2.waitKey(0) - fuseRegion = self.fuseImage([roiImageRegionA, roiImageRegionB], dx, dy) - stitchImage[roi_ltx: roi_rbx, roi_lty: roi_rby] = fuseRegion.copy() - stitchImage[stitchImage == -1] = 0 - stitchImage = stitchImage.astype(np.uint8) - return (stitchImage, fuseRegion, roiImageRegionA, roiImageRegionB) + for i in range(1, len(offsetList)): + # self.printAndWrite(" stitching " + str(fileList[i])) + # 适用于流形拼接的校正,并更新最终图像大小 + tempImage = cv2.imread(fileList[i], 0) + dxSum = dxSum + offsetList[i][0] + dySum = dySum + offsetList[i][1] + # self.printAndWrite(" The dxSum is " + str(dxSum) + " and the dySum is " + str(dySum)) + if dxSum <= 0: + for j in range(0, i): + offsetList[j][0] = offsetList[j][0] + abs(dxSum) + rangeX[j][0] = rangeX[j][0] + abs(dxSum) + rangeX[j][1] = rangeX[j][1] + abs(dxSum) + resultRow = resultRow + abs(dxSum) + rangeX[i][1] = resultRow + dxSum = rangeX[i][0] = offsetList[i][0] = 0 + else: + offsetList[i][0] = dxSum + resultRow = max(resultRow, dxSum + tempImage.shape[0]) + rangeX[i][1] = resultRow + if dySum <= 0: + for j in range(0, i): + offsetList[j][1] = offsetList[j][1] + abs(dySum) + rangeY[j][0] = rangeY[j][0] + abs(dySum) + rangeY[j][1] = rangeY[j][1] + abs(dySum) + resultCol = resultCol + abs(dySum) + rangeY[i][1] = resultCol + dySum = rangeY[i][0] = offsetList[i][1] = 0 + else: + offsetList[i][1] = dySum + resultCol = max(resultCol, dySum + tempImage.shape[1]) + rangeY[i][1] = resultCol + imageList.append(tempImage) + stitchResult = np.zeros((resultRow, resultCol), np.int) - 1 + self.printAndWrite(" The rectified offsetList is " + str(offsetList)) + # 如上算出各个图像相对于原点偏移量,并最终计算出输出图像大小,并构造矩阵,如下开始赋值 + for i in range(0, len(offsetList)): + self.printAndWrite(" stitching " + str(fileList[i])) + if i == 0: + stitchResult[offsetList[0][0]: offsetList[0][0] + imageList[0].shape[0], offsetList[0][1]: offsetList[0][1] + imageList[0].shape[1]] = imageList[0] + else: + if self.fuseMethod == "notFuse": + # 适用于无图像融合,直接覆盖 + # self.printAndWrite("Stitch " + str(i+1) + "th, the roi_ltx is " + str(offsetList[i][0]) + " and the roi_lty is " + str(offsetList[i][1])) + stitchResult[offsetList[i][0]: offsetList[i][0] + imageList[i].shape[0], offsetList[i][1]: offsetList[i][1] + imageList[i].shape[1]] = imageList[i] + else: + # 适用于图像融合算法,切出 roiA 和 roiB 供图像融合 + minOccupyX = rangeX[i-1][0] + maxOccupyX = rangeX[i-1][1] + minOccupyY = rangeY[i-1][0] + maxOccupyY = rangeY[i-1][1] + # self.printAndWrite("Stitch " + str(i + 1) + "th, the offsetList[i][0] is " + str( + # offsetList[i][0]) + " and the offsetList[i][1] is " + str(offsetList[i][1])) + # self.printAndWrite("Stitch " + str(i + 1) + "th, the minOccupyX is " + str( + # minOccupyX) + " and the maxOccupyX is " + str(maxOccupyX) + " and the minOccupyY is " + str( + # minOccupyY) + " and the maxOccupyY is " + str(maxOccupyY)) + roi_ltx = max(offsetList[i][0], minOccupyX) + roi_lty = max(offsetList[i][1], minOccupyY) + roi_rbx = min(offsetList[i][0] + imageList[i].shape[0], maxOccupyX) + roi_rby = min(offsetList[i][1] + imageList[i].shape[1], maxOccupyY) + # self.printAndWrite("Stitch " + str(i + 1) + "th, the roi_ltx is " + str( + # roi_ltx) + " and the roi_lty is " + str(roi_lty) + " and the roi_rbx is " + str( + # roi_rbx) + " and the roi_rby is " + str(roi_rby)) + roiImageRegionA = stitchResult[roi_ltx:roi_rbx, roi_lty:roi_rby].copy() + stitchResult[offsetList[i][0]: offsetList[i][0] + imageList[i].shape[0], offsetList[i][1]: offsetList[i][1] + imageList[i].shape[1]] = imageList[i] + roiImageRegionB = stitchResult[roi_ltx:roi_rbx, roi_lty:roi_rby].copy() + stitchResult[roi_ltx:roi_rbx, roi_lty:roi_rby] = self.fuseImage([roiImageRegionA, roiImageRegionB], offsetListOrigin[i][0], offsetListOrigin[i][1]) + stitchResult[stitchResult == -1] = 0 + return stitchResult.astype(np.uint8) def fuseImage(self, images, dx, dy): (imageA, imageB) = images - # cv2.imshow("A", imageA) - # cv2.imshow("B", imageB) - # cv2.waitKey(0) + # cv2.namedWindow("A", 0) + # cv2.namedWindow("B", 0) + # cv2.imshow("A", imageA.astype(np.uint8)) + # cv2.imshow("B", imageB.astype(np.uint8)) fuseRegion = np.zeros(imageA.shape, np.uint8) # imageA[imageA == 0] = imageB[imageA == 0] # imageB[imageB == 0] = imageA[imageB == 0] @@ -514,8 +457,8 @@ def fuseImage(self, images, dx, dy): return fuseRegion -if __name__ == "__main__": +if __name__=="__main__": stitcher = Stitcher() imageA = cv2.imread(".\\images\\dendriticCrystal\\1\\1-044.jpg", 0) imageB = cv2.imread(".\\images\\dendriticCrystal\\1\\1-045.jpg", 0) - offset = stitcher.calculateOffsetForFeatureSearchIncre([imageA, imageB]) + offset = stitcher.calculateOffsetForFeatureSearchIncre([imageA, imageB]) \ No newline at end of file diff --git a/appendix/myGpuFeatures.cpp b/appendix/myGpuFeatures.cpp new file mode 100644 index 0000000..a32a1c6 --- /dev/null +++ b/appendix/myGpuFeatures.cpp @@ -0,0 +1,209 @@ +#include "cuda_runtime.h" +#include "device_launch_parameters.h" +#include "conversion.h" +#include "opencv2/core/cuda.hpp" +#include "opencv2/cudafeatures2d.hpp" +#include "opencv2/xfeatures2d/cuda.hpp" +#include +#include + +using namespace std; +using namespace cv::cuda; +using namespace cv; +namespace py = boost::python; + + +Mat tranVectorKeyPointsDescriptorsToMat(vector Vkeypoints, Mat Mdescriptors) +{ + int keyPointsNum = Vkeypoints.size(); + // int descriptorsDim = Vdescriptors.size() / keyPointsNum; + int descriptorsDim = Mdescriptors.cols; + + int sz[3] = { keyPointsNum, descriptorsDim, 2 }; + Mat keyPointsDescriptors(3, sz, CV_32F, Scalar(0.00)); + + for (int i = 0; i < keyPointsNum; i++) + { + float* data = keyPointsDescriptors.ptr(i); + data[0] = Vkeypoints[i].pt.x; + data[2] = Vkeypoints[i].pt.y; + } + + //for (int i = 0; i < keyPointsNum; i++) + //{ + // float* data = keyPointsDescriptors.ptr(i); + // for (int j = 0; j < descriptorsDim; j++) + // { + // data[j*2 + 1] = Vdescriptors[j]; + // } + //} + + for (int i = 0; i < keyPointsNum; i++) + { + float* dataAccept = keyPointsDescriptors.ptr(i); + float* dataSend = Mdescriptors.ptr(i); + for (int j = 0; j < descriptorsDim; j++) + { + dataAccept[j*2 + 1] = dataSend[j]; + } + } + return keyPointsDescriptors; +} + +Mat tranVectorDMatchToMat(vector VDMatch) +{ + int num = VDMatch.size(); + Mat dMatchMat = Mat(num, 2, CV_32S); + for (int i = 0; i < num; i++) + { + int* data = dMatchMat.ptr(i); + data[0] = VDMatch[i].trainIdx; + data[1] = VDMatch[i].queryIdx; + } + return dMatchMat; + +} + +PyObject* detectAndDescribeBySurf(PyObject *h_imageAPtr, float hessianThreshold, int nOctaves, int nOctaveLayers, bool isExtended, float keypointsRatio, bool isUpright) +{ + // GPUϴ洢ռ + GpuMat d_image, d_keypoints, d_descriptors; + + // DLLݵͼתMAT,صGPU + NDArrayConverter cvt; + d_image.upload(cvt.toMat(h_imageAPtr)); + CV_Assert(!d_image.empty()); + + SURF_CUDA surf = SURF_CUDA(hessianThreshold, nOctaves, nOctaveLayers, isExtended, keypointsRatio, isUpright); + surf(d_image, GpuMat(), d_keypoints, d_descriptors); + // downloading results from GPU to CPU + vector h_keypoints; + Mat h_descriptors; + + surf.downloadKeypoints(d_keypoints, h_keypoints); + d_descriptors.download(h_descriptors); + + // print only for testing + //cout << " FOUND " << d_keypoints.cols << " keypoints on image" << endl; + //for (int i = 0; i < 64; i++) + //{ + // cout << h_descriptors.at(0, i) << ","; + //} + //cout << endl; + //cout << h_keypoints[0].pt.x << "," << h_keypoints[0].pt.y << endl;; + //cout << endl; + + // releasing + d_image.release(); + d_keypoints.release(); + d_descriptors.release(); + surf.releaseMemory(); + + // ͨDLLndarryȥ + return cvt.toNDArray(tranVectorKeyPointsDescriptorsToMat(h_keypoints, h_descriptors)); +} + +PyObject* detectAndDescribeByOrb(PyObject *h_imageAPtr, int nFeatures, float scaleFactor, int nlevels, int edgeThreshold, int firstLevel, int WTA_K, int scoreType, int patchSize, int fastThreshold, bool blurForDescriptor) +{ + // GPUϴ洢ռ + GpuMat d_image, d_keypoints, d_descriptors, d_descriptors_32F; + + // DLLݵͼתMAT,صGPU + NDArrayConverter cvt; + d_image.upload(cvt.toMat(h_imageAPtr)); + CV_Assert(!d_image.empty()); + Ptr d_orb = cuda::ORB::create(nFeatures, scaleFactor, nlevels, edgeThreshold, firstLevel, WTA_K, 0, patchSize, fastThreshold, blurForDescriptor); + + d_orb->detectAndComputeAsync(d_image, GpuMat(), d_keypoints, d_descriptors); + d_descriptors.convertTo(d_descriptors_32F, CV_32F); + + vector h_keypoints; + Mat h_descriptors; + + d_orb->convert(d_keypoints, h_keypoints); + d_descriptors_32F.download(h_descriptors); + + // print only for testing + //cout << " FOUND " << d_keypoints.cols << " keypoints on image" << endl; + //cout << " Des shape" << h_descriptors.rows << " " << h_descriptors.cols << endl; + //for (int i = 0; i < h_descriptors.cols; i++) + //{ + // cout << h_descriptors.at(0, i)<< ","; + //} + //cout << endl; + //cout << h_keypoints[0].pt.x << "," << h_keypoints[0].pt.y << endl;; + //cout << endl; + + + // releasing + d_image.release(); + d_keypoints.release(); + d_descriptors.release(); + d_descriptors_32F.release(); + + // ͨDLLndarryȥ + return cvt.toNDArray(tranVectorKeyPointsDescriptorsToMat(h_keypoints, h_descriptors)); +} + +PyObject* matchDescriptors(PyObject *h_descriptorsAPtr, PyObject *h_descriptorsBPtr, int featureType, float param) +{ + GpuMat d_descriptorsA, d_descriptorsB; + + NDArrayConverter cvt; + + d_descriptorsA.upload(cvt.toMat(h_descriptorsAPtr)); + d_descriptorsB.upload(cvt.toMat(h_descriptorsBPtr)); + + Ptr matcher; + vector h_good_matches; + + if(featureType == 1 || featureType == 2) + { + // sift surf ģʽŷϾ룬жںʹνھ + matcher = cv::cuda::DescriptorMatcher::createBFMatcher(cv::NORM_L2); + vector> d_matches; + matcher->knnMatch(d_descriptorsA, d_descriptorsB, d_matches, 2); + for (int i = 0; i < d_matches.size(); i++) + { + if (d_matches[i][0].distance < param * d_matches[i][1].distance) + { + h_good_matches.push_back(d_matches[i][0]); + } + } + } + else if (featureType == 3) + { + // orb 룬жϾǷСij + vector d_matches; + matcher = cv::cuda::DescriptorMatcher::createBFMatcher(cv::NORM_HAMMING); + matcher->match(d_descriptorsA, d_descriptorsB, d_matches, GpuMat()); + for (int i = 0; i < d_matches.size(); i++) + { + if (d_matches[i].distance < param) + { + h_good_matches.push_back(d_matches[i]); + } + } + } + + // releasing + d_descriptorsA.release(); d_descriptorsB.release(); + matcher.release(); + + // ͨDLLndarryȥ + return cvt.toNDArray(tranVectorDMatchToMat(h_good_matches)); +} + +static void init() +{ + Py_Initialize(); + import_array(); +} + +BOOST_PYTHON_MODULE(myGpuFeatures) +{ + init(); + py::def("detectAndDescribeBySurf", detectAndDescribeBySurf); + py::def("detectAndDescribeByOrb", detectAndDescribeByOrb); + py::def("matchDescriptors", matchDescriptors); +} \ No newline at end of file diff --git a/myGpuFeatures.pyd b/myGpuFeatures.pyd new file mode 100644 index 0000000..1d6c6b6 Binary files /dev/null and b/myGpuFeatures.pyd differ diff --git a/stitchMain.py b/stitchMain.py index 451e37d..82e0f9f 100644 --- a/stitchMain.py +++ b/stitchMain.py @@ -3,17 +3,18 @@ import time import glob import os +import time - -def stitchWithFeatureIncre(): - method = "featureSearchIncre" - Stitcher.featureMethod = "surf" # "sift","surf" or "orb" - Stitcher.searchRatio = 0.75 # 0.75 is common value for matches - Stitcher.offsetCaculate = "mode" # "mode" or "ransac" - Stitcher.offsetEvaluate = 3 # 40 menas nums of matches for mode, 4.0 menas of matches for ransac - Stitcher.roiRatio = 0.2 # roi length for stitching in first direction - Stitcher.fuseMethod = "notFuse" +def stitchWithFeature(): + Stitcher.featureMethod = "surf" # "sift","surf" or "orb" + Stitcher.isGPUAvailable = True + Stitcher.searchRatio = 0.75 # 0.75 is common value for matches + Stitcher.offsetCaculate = "mode" # "mode" or "ransac" + Stitcher.offsetEvaluate = 3 # 40 menas nums of matches for mode, 4.0 menas of matches for ransac + Stitcher.roiRatio = 0.2 # roi length for stitching in first direction + Stitcher.fuseMethod = "fadeInAndFadeOut" stitcher = Stitcher() + startTime = time.time() # method = "featureSearchIncre"; Stitcher.direction = 1; Stitcher.directIncre = 0; # projectAddress = "images\\iron" @@ -21,13 +22,11 @@ def stitchWithFeatureIncre(): # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 50, stitcher.calculateOffsetForFeatureSearchIncre, # startNum=1, fileExtension="jpg", outputfileExtension="jpg") - method = "featureSearchIncre"; - Stitcher.direction = 1; - Stitcher.directIncre = 1; - projectAddress = "images\\dendriticCrystal" - outputAddress = "result\\" + method + "\\dendriticCrystal" + str.capitalize(Stitcher.fuseMethod) + "\\" - stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 12, stitcher.calculateOffsetForFeatureSearchIncre, - startNum=12, fileExtension="jpg", outputfileExtension="jpg") + # method = "featureSearchIncre"; Stitcher.direction = 1; Stitcher.directIncre = 1; + # projectAddress = "images\\dendriticCrystal" + # outputAddress = "result\\" + method + "\\dendriticCrystal" + str.capitalize(Stitcher.fuseMethod) + "\\" + # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 11, stitcher.calculateOffsetForFeatureSearchIncre, + # startNum=1, fileExtension="jpg", outputfileExtension="jpg") # Stitcher.featureMethod = "surf"; Stitcher.searchRatio = 0.95; Stitcher.offsetEvaluate = 3; # method = "featureSearchIncre"; Stitcher.direction = 1; Stitcher.directIncre = 1; @@ -38,24 +37,25 @@ def stitchWithFeatureIncre(): # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 1, stitcher.calculateOffsetForFeatureSearchIncre, # startNum=1, fileExtension="jpg", outputfileExtension="jpg") - # method = "featureSearchIncre"; Stitcher.direction = 4; Stitcher.directIncre = 0; - # projectAddress = "images\\zirconLarge" - # outputAddress = "result\\" + method + "\\zirconLarge" + str.capitalize(Stitcher.fuseMethod) + "\\" - # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 97, stitcher.calculateOffsetForFeatureSearchIncre, - # startNum=1, fileExtension="jpg", outputfileExtension="png") + method = "featureSearchIncre"; Stitcher.direction = 4; Stitcher.directIncre = 0; + projectAddress = "images\\zirconLarge" + outputAddress = "result\\" + method + "\\zirconLarge" + str.capitalize(Stitcher.fuseMethod) + "\\" + stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 97, stitcher.calculateOffsetForFeatureSearchIncre, + startNum=2, fileExtension="jpg", outputfileExtension="png") # method = "featureSearch"; Stitcher.direction = 4; Stitcher.directIncre = 0; - # projectAddress = "images\\zirconLargeResized_8_INTER_AREA" - # outputAddress = "result\\" + method + "\\zirconLargeResized_8_INTER_AREA" + str.capitalize(Stitcher.fuseMethod) + "\\" + # projectAddress = "images\\zirconLargeResized_4_INTER_AREA" + # outputAddress = "result\\" + method + "\\zirconLargeResized_4_INTER_AREA" + str.capitalize(Stitcher.fuseMethod) + "\\" # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 97, stitcher.calculateOffsetForFeatureSearch, # startNum=1, fileExtension="jpg", outputfileExtension="jpg") # method = "featureSearch"; Stitcher.direction = 4; Stitcher.directIncre = 0; # projectAddress = "images\\zirconSmall" # outputAddress = "result\\" + method + "\\zirconSmall" + str.capitalize(Stitcher.fuseMethod) + "\\" - # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 59, stitcher.calculateOffsetForFeatureSearch, + # stitcher.imageSetStitchWithMutiple(projectAddress, outputAddress, 194, stitcher.calculateOffsetForFeatureSearch, # startNum=1, fileExtension="jpg", outputfileExtension="jpg") - + endTime = time.time() + print("Time Consuming = " + str(endTime - startTime)) def stitchWithPhase(): method = "phaseCorrelate" @@ -67,6 +67,5 @@ def stitchWithPhase(): startNum=43, fileExtension="jpg", outputfileExtension="jpg") Stitcher.phase.shutdown() - -if __name__ == "__main__": - stitchWithFeatureIncre() +if __name__=="__main__": + stitchWithFeature() \ No newline at end of file