├── README.md ├── blend.py ├── image_stitching.py ├── images ├── 1.jpg ├── 1Hill.JPG ├── 2.jpg ├── 2Hill.JPG ├── 3.jpg ├── 3Hill.JPG ├── S1.jpg ├── S2.jpg ├── S3.jpg ├── S5.jpg ├── S6.jpg ├── trees_000.jpg ├── trees_001.jpg ├── trees_002.jpg └── trees_003.jpg ├── matchers.py ├── results ├── test1.jpg ├── test2.jpg ├── test3.jpg └── test4.jpg └── txtlists ├── files1.txt ├── files2.txt ├── files3.txt └── files4.txt /README.md: -------------------------------------------------------------------------------- 1 | # python-Multiple-Image-Stitching 2 | This is a simple implementation of multiple image stitching using python. Referenced [Multiple Image stitching in Python](https://github.com/kushalvyas/Python-Multiple-Image-Stitching), this repository makes a few small improvements: 3 | 1) Adjust the final canvas size to remove excess black background; 4 | 2) use linear blending instead of average blending. 5 | ## Results 6 | ![test1](results/test1.jpg) 7 | ![test2](results/test2.jpg) 8 | ![test3](results/test3.jpg) 9 | ![test4](results/test4.jpg) 10 | -------------------------------------------------------------------------------- /blend.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | def blend_linear(warp_img1, warp_img2): 6 | img1 = warp_img1 7 | img2 = warp_img2 8 | 9 | img1mask = ((img1[:,:,0] | img1[:,:,1] | img1[:,:,2]) >0) 10 | img2mask = ((img2[:,:,0] | img2[:,:,1] | img2[:,:,2]) >0) 11 | 12 | r,c = np.nonzero(img1mask) 13 | out_1_center = [np.mean(r),np.mean(c)] 14 | 15 | r,c = np.nonzero(img2mask) 16 | out_2_center = [np.mean(r),np.mean(c)] 17 | 18 | vec = np.array(out_2_center) - np.array(out_1_center) 19 | intsct_mask = img1mask & img2mask 20 | ## plt.gray() 21 | ## plt.subplot(311),plt.imshow(img1mask) 22 | ## plt.plot(out_1_center[1], out_1_center[0], 'ob') 23 | ## plt.subplot(312),plt.imshow(img2mask) 24 | ## plt.plot(out_2_center[1], out_2_center[0], 'ob') 25 | ## plt.subplot(313),plt.imshow(intsct_mask) 26 | ## plt.show() 27 | 28 | r,c = np.nonzero(intsct_mask) 29 | # def sub2ind(array_shape, rows, cols): 30 | # return cols*array_shape[0] + rows 31 | # idx = sub2ind(img2mask[:,2], r, c) 32 | out_wmask = np.zeros(img2mask.shape[:2]) 33 | proj_val = (r - out_1_center[0])*vec[0] + (c- out_1_center[1])*vec[1] 34 | out_wmask[r,c] = (proj_val - (min(proj_val)+(1e-3))) / \ 35 | ((max(proj_val)-(1e-3)) - (min(proj_val)+(1e-3))) 36 | 37 | # blending 38 | mask1 = img1mask & (out_wmask==0) 39 | mask2 = out_wmask 40 | mask3 = img2mask & (out_wmask==0) 41 | ## plt.gray() 42 | ## plt.subplot(311),plt.imshow(mask1),plt.axis('off') 43 | ## plt.subplot(312),plt.imshow(mask2),plt.axis('off') 44 | ## plt.subplot(313),plt.imshow(mask3),plt.axis('off') 45 | ## plt.savefig('blend.jpg') 46 | ## plt.show() 47 | 48 | out = np.zeros(img1.shape) 49 | for c in range(3): 50 | out[:,:,c] = img1[:,:,c]*(mask1+(1-mask2)*(mask2!=0)) + \ 51 | img2[:,:,c]*(mask2+mask3) 52 | return np.uint8(out) 53 | 54 | if __name__=="__main__": 55 | img1 = cv2.imread("warped_img1.jpg") 56 | img2 = cv2.imread("warped_img2.jpg") 57 | out = blend_linear(img1, img2) 58 | # cv2.imwrite("result.jpg",out) 59 | 60 | -------------------------------------------------------------------------------- /image_stitching.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import sys 4 | from matchers import SIFTMatcher 5 | import time 6 | import blend 7 | 8 | class Stitch: 9 | def __init__(self, args): 10 | self.path = args 11 | fp = open(self.path, 'r') 12 | filenames = [each.rstrip('\r\n') for each in fp.readlines()] 13 | # filenames = args 14 | print(filenames) 15 | # self.images = [cv2.resize(cv2.imread(each), (480, 320)) for each in filenames] 16 | self.images = [cv2.imread(each) for each in filenames] 17 | self.count = len(self.images) 18 | self.left_list, self.right_list, self.center_im = [], [], None 19 | self.matcher_obj = SIFTMatcher() 20 | self.prepare_lists() 21 | 22 | def prepare_lists(self): 23 | print("Number of images : %d" % self.count) 24 | self.centerIdx = self.count / 2 25 | print("Center index image : %d" % self.centerIdx) 26 | self.center_im = self.images[int(self.centerIdx)] 27 | for i in range(self.count): 28 | if (i <= self.centerIdx): 29 | self.left_list.append(self.images[i]) 30 | else: 31 | self.right_list.append(self.images[i]) 32 | print("Image lists prepared") 33 | 34 | def leftshift(self): 35 | # self.left_list = reversed(self.left_list) 36 | a = self.left_list[0] 37 | for b in self.left_list[1:]: 38 | H = self.matcher_obj.match(a, b, 'left') 39 | # print("Homography is : ", H) 40 | xh = np.linalg.inv(H) 41 | # print("Inverse Homography :", xh) 42 | br = np.dot(xh, np.array([a.shape[1], a.shape[0], 1])) 43 | br = br /br[-1] 44 | tl = np.dot(xh, np.array([0, 0, 1])) 45 | tl = tl / tl[-1] 46 | bl = np.dot(xh, np.array([0, a.shape[0], 1])) 47 | bl = bl / bl[-1] 48 | tr = np.dot(xh, np.array([a.shape[1], 0, 1])) 49 | tr = tr / tr[-1] 50 | cx = int(max([0, a.shape[1], tl[0], bl[0], tr[0], br[0]])) 51 | cy = int(max([0, a.shape[0], tl[1], bl[1], tr[1], br[1]])) 52 | offset = [abs(int(min([0, a.shape[1], tl[0], bl[0], tr[0], br[0]]))), 53 | abs(int(min([0, a.shape[0], tl[1], bl[1], tr[1], br[1]])))] 54 | dsize = (cx + offset[0], cy + offset[1]) 55 | print("image dsize =>", dsize, "offset", offset) 56 | 57 | tl[0:2] += offset; bl[0:2] += offset; tr[0:2] += offset; br[0:2] += offset 58 | dstpoints = np.array([tl, bl, tr, br]); 59 | srcpoints = np.array([[0, 0], [0, a.shape[0]], [a.shape[1], 0], [a.shape[1], a.shape[0]]]) 60 | # print('sp',sp,'dp',dp) 61 | M_off = cv2.findHomography(srcpoints, dstpoints)[0] 62 | # print('M_off', M_off) 63 | warped_img2 = cv2.warpPerspective(a, M_off, dsize) 64 | # cv2.imshow("warped", warped_img2) 65 | # cv2.waitKey() 66 | warped_img1 = np.zeros([dsize[1], dsize[0], 3], np.uint8) 67 | warped_img1[offset[1]:b.shape[0] + offset[1], offset[0]:b.shape[1] + offset[0]] = b 68 | tmp = blend.blend_linear(warped_img1, warped_img2) 69 | a = tmp 70 | 71 | self.leftImage = tmp 72 | 73 | def rightshift(self): 74 | for each in self.right_list: 75 | H = self.matcher_obj.match(self.leftImage, each, 'right') 76 | # print("Homography :", H) 77 | br = np.dot(H, np.array([each.shape[1], each.shape[0], 1])) 78 | br = br / br[-1] 79 | tl = np.dot(H, np.array([0, 0, 1])) 80 | tl = tl / tl[-1] 81 | bl = np.dot(H, np.array([0, each.shape[0], 1])) 82 | bl = bl / bl[-1] 83 | tr = np.dot(H, np.array([each.shape[1], 0, 1])) 84 | tr = tr / tr[-1] 85 | cx = int(max([0, self.leftImage.shape[1], tl[0], bl[0], tr[0], br[0]])) 86 | cy = int(max([0, self.leftImage.shape[0], tl[1], bl[1], tr[1], br[1]])) 87 | offset = [abs(int(min([0, self.leftImage.shape[1], tl[0], bl[0], tr[0], br[0]]))), 88 | abs(int(min([0, self.leftImage.shape[0], tl[1], bl[1], tr[1], br[1]])))] 89 | dsize = (cx + offset[0], cy + offset[1]) 90 | print("image dsize =>", dsize, "offset", offset) 91 | 92 | tl[0:2] += offset; bl[0:2] += offset; tr[0:2] += offset; br[0:2] += offset 93 | dstpoints = np.array([tl, bl, tr, br]); 94 | srcpoints = np.array([[0, 0], [0, each.shape[0]], [each.shape[1], 0], [each.shape[1], each.shape[0]]]) 95 | M_off = cv2.findHomography(dstpoints, srcpoints)[0] 96 | warped_img2 = cv2.warpPerspective(each, M_off, dsize, flags=cv2.WARP_INVERSE_MAP) 97 | # cv2.imshow("warped", warped_img2) 98 | # cv2.waitKey() 99 | warped_img1 = np.zeros([dsize[1], dsize[0], 3], np.uint8) 100 | warped_img1[offset[1]:self.leftImage.shape[0] + offset[1], offset[0]:self.leftImage.shape[1] + offset[0]] = self.leftImage 101 | tmp = blend.blend_linear(warped_img1, warped_img2) 102 | self.leftImage = tmp 103 | 104 | self.rightImage = tmp 105 | 106 | def showImage(self, string=None): 107 | if string == 'left': 108 | cv2.imshow("left image", self.leftImage) 109 | elif string == "right": 110 | cv2.imshow("right Image", self.rightImage) 111 | cv2.waitKey() 112 | 113 | 114 | if __name__ == '__main__': 115 | try: 116 | args = sys.argv[1] 117 | except: 118 | args = "txtlists/files4.txt" 119 | finally: 120 | print("Parameters : ", args) 121 | s = Stitch(args) 122 | 123 | # images = ['images/S1.jpg', 'images/S2.jpg','images/S3.jpg','images/S5.jpg','images/S6.jpg'] 124 | # images = ['images/trees_00{}Hill.jpg'.format(i) for i in range(0, 4)] 125 | # s = Stitch(images) 126 | 127 | s.leftshift() 128 | # s.showImage('left') 129 | s.rightshift() 130 | print("done") 131 | cv2.imwrite("results/test4.jpg", s.leftImage) 132 | print("image written") 133 | cv2.destroyAllWindows() 134 | 135 | -------------------------------------------------------------------------------- /images/1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/1.jpg -------------------------------------------------------------------------------- /images/1Hill.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/1Hill.JPG -------------------------------------------------------------------------------- /images/2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/2.jpg -------------------------------------------------------------------------------- /images/2Hill.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/2Hill.JPG -------------------------------------------------------------------------------- /images/3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/3.jpg -------------------------------------------------------------------------------- /images/3Hill.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/3Hill.JPG -------------------------------------------------------------------------------- /images/S1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/S1.jpg -------------------------------------------------------------------------------- /images/S2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/S2.jpg -------------------------------------------------------------------------------- /images/S3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/S3.jpg -------------------------------------------------------------------------------- /images/S5.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/S5.jpg -------------------------------------------------------------------------------- /images/S6.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/S6.jpg -------------------------------------------------------------------------------- /images/trees_000.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/trees_000.jpg -------------------------------------------------------------------------------- /images/trees_001.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/trees_001.jpg -------------------------------------------------------------------------------- /images/trees_002.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/trees_002.jpg -------------------------------------------------------------------------------- /images/trees_003.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/images/trees_003.jpg -------------------------------------------------------------------------------- /matchers.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | 5 | class SIFTMatcher(): 6 | def __init__(self): 7 | self.sift = cv2.xfeatures2d_SIFT().create() 8 | # FLANN parameters 9 | FLANN_INDEX_KDTREE = 1 10 | index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5) 11 | search_params = dict(checks=50) 12 | self.flann = cv2.FlannBasedMatcher(index_params, search_params) 13 | 14 | def match(self,srcImg,testImg,direction): 15 | print("Direction : ", direction) 16 | 17 | img1gray = cv2.cvtColor(srcImg, cv2.COLOR_BGR2GRAY) 18 | img2gray = cv2.cvtColor(testImg, cv2.COLOR_BGR2GRAY) 19 | # find the keypoints and descriptors with SIFT 20 | kp1, des1 = self.sift.detectAndCompute(img1gray, None) 21 | kp2, des2 = self.sift.detectAndCompute(img2gray, None) 22 | 23 | matches = self.flann.knnMatch(des1, des2, k=2) 24 | 25 | # Need to draw only good matches, so create a mask 26 | matchesMask = [[0, 0] for i in range(len(matches))] 27 | 28 | good = [] 29 | pts1 = [] 30 | pts2 = [] 31 | # ratio test as per Lowe's paper 32 | for i, (m, n) in enumerate(matches): 33 | if m.distance < 0.7 * n.distance: 34 | good.append(m) 35 | pts2.append(kp2[m.trainIdx].pt) 36 | pts1.append(kp1[m.queryIdx].pt) 37 | matchesMask[i] = [1, 0] 38 | 39 | draw_params = dict(matchColor=(0, 255, 0), 40 | singlePointColor=(255, 0, 0), 41 | matchesMask=matchesMask, 42 | flags=0) 43 | img3 = cv2.drawMatchesKnn(srcImg, kp1, testImg, kp2, matches, None, **draw_params) 44 | # cv2.imwrite("matches.jpg",img3) 45 | # plt.imshow(img3, ), plt.show() 46 | 47 | rows, cols = srcImg.shape[:2] 48 | MIN_MATCH_COUNT = 10 49 | if len(good) > MIN_MATCH_COUNT: 50 | src_pts = np.float32([kp1[m.queryIdx].pt for m in good])#.reshape(-1, 1, 2) 51 | dst_pts = np.float32([kp2[m.trainIdx].pt for m in good])#.reshape(-1, 1, 2) 52 | # print(src_pts, dst_pts) 53 | 54 | M, mask = cv2.findHomography(dst_pts, src_pts, cv2.RANSAC, 5.0) 55 | matchesMask = mask.ravel().tolist() 56 | draw_params = dict(matchColor = (0,255,0), 57 | singlePointColor = (255, 0, 0), 58 | matchesMask = matchesMask, # draw only inliers 59 | flags = 2) 60 | img4 = cv2.drawMatches(srcImg,kp1,testImg,kp2,good,None,**draw_params) 61 | # cv2.imwrite("matches_ransac.jpg",img4) 62 | # plt.imshow(img4,), plt.show() 63 | # print('M', M) 64 | return M 65 | else: 66 | print("Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT)) 67 | matchesMask = None 68 | return None 69 | 70 | -------------------------------------------------------------------------------- /results/test1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/results/test1.jpg -------------------------------------------------------------------------------- /results/test2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/results/test2.jpg -------------------------------------------------------------------------------- /results/test3.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/results/test3.jpg -------------------------------------------------------------------------------- /results/test4.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/breadcake/python-Multiple-Image-Stitching/d6a821e42da3d6cd5294ab83583fb64fea3a2bc6/results/test4.jpg -------------------------------------------------------------------------------- /txtlists/files1.txt: -------------------------------------------------------------------------------- 1 | images/S1.jpg 2 | images/S2.jpg 3 | images/S3.jpg 4 | images/S5.jpg 5 | images/S6.jpg -------------------------------------------------------------------------------- /txtlists/files2.txt: -------------------------------------------------------------------------------- 1 | images/1.jpg 2 | images/2.jpg 3 | images/3.jpg -------------------------------------------------------------------------------- /txtlists/files3.txt: -------------------------------------------------------------------------------- 1 | images/1Hill.JPG 2 | images/2Hill.JPG 3 | images/3Hill.JPG -------------------------------------------------------------------------------- /txtlists/files4.txt: -------------------------------------------------------------------------------- 1 | images/trees_000.jpg 2 | images/trees_001.jpg 3 | images/trees_002.jpg 4 | images/trees_003.jpg --------------------------------------------------------------------------------