├── .DS_Store ├── README.md ├── images ├── 1.png ├── 2.png ├── 3.png ├── 4.png ├── 5.png ├── 6.png ├── drawing1.png ├── drawing2.png ├── mountain1.jpg ├── mountain2.jpg ├── shanghai1.png ├── shanghai2.png ├── sudoku.png ├── uttower1.jpg └── uttower2.jpg ├── scripts ├── Final_Project.mlx ├── describe_hog_keypoints.m ├── describe_keypoints.m ├── fit_affine_matrix.m ├── harris_corners.m ├── hog_descriptor.m ├── img_trans.m ├── linear_blend.m ├── match_descriptors.m ├── my_surf.m ├── ransac.m ├── simple_descriptor.m └── stitch.m └── 计算机视觉期末作业说明文档.pdf /.DS_Store: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/.DS_Store -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # HDUComputerVision2020 2 | 3 | #### 介绍 4 | 5 | 杭电 2020 秋冬学期计算机视觉期末作业 6 | 7 | #### 简介 8 | 9 | 使用 Harris 角点检测算法、最小二乘拟合、随机抽样一致算法(RANSAC)以及 HOG 描述符、线性融合等计算机视觉算法,使用 Matlab 实现将多张图像拼接成一张全景图。 10 | 11 | #### 参与贡献 12 | 13 | 孟渝桓 18221785 14 | -------------------------------------------------------------------------------- /images/1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/1.png -------------------------------------------------------------------------------- /images/2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/2.png -------------------------------------------------------------------------------- /images/3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/3.png -------------------------------------------------------------------------------- /images/4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/4.png -------------------------------------------------------------------------------- /images/5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/5.png -------------------------------------------------------------------------------- /images/6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/6.png -------------------------------------------------------------------------------- /images/drawing1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/drawing1.png -------------------------------------------------------------------------------- /images/drawing2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/drawing2.png -------------------------------------------------------------------------------- /images/mountain1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/mountain1.jpg -------------------------------------------------------------------------------- /images/mountain2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/mountain2.jpg -------------------------------------------------------------------------------- /images/shanghai1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/shanghai1.png -------------------------------------------------------------------------------- /images/shanghai2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/shanghai2.png -------------------------------------------------------------------------------- /images/sudoku.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/sudoku.png -------------------------------------------------------------------------------- /images/uttower1.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/uttower1.jpg -------------------------------------------------------------------------------- /images/uttower2.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/images/uttower2.jpg -------------------------------------------------------------------------------- /scripts/Final_Project.mlx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/scripts/Final_Project.mlx -------------------------------------------------------------------------------- /scripts/describe_hog_keypoints.m: -------------------------------------------------------------------------------- 1 | function [M,out]=describe_hog_keypoints(image,keypoints,patch_size) 2 | 3 | if length(size(image))==3 4 | image = rgb2gray(double(image)/255); 5 | end 6 | 7 | [x,y]=find(keypoints==1); 8 | M=zeros(length(x),2); 9 | M(:,1)=x; 10 | M(:,2)=y; 11 | 12 | ps=floor(patch_size^0.5); 13 | out=zeros(length(x),128); 14 | 15 | for i=1:length(M) 16 | tempx=M(i,1); 17 | tempy=M(i,2); 18 | patch1=image(tempx-ps:tempx-1,tempy-ps:tempy-1); 19 | patch2=image(tempx+1:tempx+ps,tempy-ps:tempy-1); 20 | patch3=image(tempx-ps:tempx-1,tempy+1:tempy+ps); 21 | patch4=image(tempx+1:tempx+ps,tempy+1:tempy+ps); 22 | patch=[patch1,patch2;patch3,patch4]; 23 | hogpatch=hog_descriptor(patch); 24 | v=simple_descriptor(hogpatch); 25 | out(i,:)=v; 26 | end -------------------------------------------------------------------------------- /scripts/describe_keypoints.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % image:(H*W*3)图像,用于提取patch中的像素值,生成描述向量。 3 | % corners:(H*W)图像,Harris角点检测函数harris_corners( )检测出的角点图。 4 | % patch_size:patch的大小(patch_size*patch_size)。 5 | % 6 | % 输出: 7 | % keypoints:(m*2)矩阵,行数为角点索引,即第i行存储的内容(x,y)就是第i个角点在原图中的x坐标和y坐标。 8 | % descriptors:m*(patch_size*patch_size)矩阵,行数为角点索引,即第i行存储的(1*(patch_size*patch_size))数列就是第i个角点的描述子。 9 | 10 | function [keypoints,descriptors]=describe_keypoints(image,corners,patch_size) 11 | 12 | %将RGB图像转化为灰度图 13 | if length(size(image))==3 14 | image = rgb2gray(double(image)/255); 15 | end 16 | 17 | %得出角点图corners中角点在原图中的坐标 18 | [x,y]=find(corners==1); 19 | keypoints=zeros(length(x),2); 20 | keypoints(:,1)=x; 21 | keypoints(:,2)=y; 22 | 23 | %初始化描述向量矩阵 24 | ps=floor(patch_size*0.5); 25 | descriptors=zeros(length(x),(ps*2)^2); 26 | 27 | %计算每个角点的特征向量 28 | for i=1:length(keypoints) 29 | tempx=keypoints(i,1);%角点x坐标 30 | tempy=keypoints(i,2);%角点y坐标 31 | patch1=image(tempx-ps:tempx-1,tempy-ps:tempy-1); 32 | patch2=image(tempx+1:tempx+ps,tempy-ps:tempy-1); 33 | patch3=image(tempx-ps:tempx-1,tempy+1:tempy+ps); 34 | patch4=image(tempx+1:tempx+ps,tempy+1:tempy+ps); 35 | patch=[patch1,patch2;patch3,patch4];%取角点周围16*16个像素作为patch 36 | v=simple_descriptor(patch);%展开patch,并标准正态化增加光照稳定性 37 | descriptors(i,:)=v; 38 | end -------------------------------------------------------------------------------- /scripts/fit_affine_matrix.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % x、y、u、v:角点坐标集(x,y),角点坐标集(u,v)。 3 | % 4 | % 输出: 5 | % H:拟合的仿射变换矩阵H。 6 | % 7 | % 注意: 8 | % (x,y)应输入img2中匹配点的坐标,(u,v)应输入img1中匹配点的坐标,否则拟合出来的是img1变换成img2的变换矩阵。 9 | 10 | %根据论文构造的计算矩阵 11 | function H=fit_affine_matrix(x,y,u,v) 12 | 13 | L=length(x); 14 | A=[];b=[]; 15 | 16 | % 根据最小二乘法公式构造三个矩阵 17 | for i=1:L 18 | tempA=[x(i) y(i) 0 0 1 0;0 0 x(i) y(i) 0 1]; 19 | A=[A;tempA];% 构造矩阵A 20 | tempb=[u(i);v(i)]; 21 | b=[b;tempb];% 构造矩阵b 22 | end 23 | 24 | H=((A'*A)^-1)*A'*b;% 计算矩阵H 25 | 26 | % 有时候由于噪声影响,会使得最后一列不是精确的[0, 0, 1],此时我们可以手动将它置为[0, 0, 1] 27 | Tr=[H(1) H(2) H(5);H(3) H(4) H(6);0 0 1]; 28 | H=Tr; 29 | end 30 | 31 | %根据PPT构造的计算矩阵 计算结果都是相同的 32 | % function H=fit_affine_matrix(x,y,u,v) 33 | % L=length(x); 34 | % 35 | % A=[]; 36 | % B=[]; 37 | % for i=1:L 38 | % tempA=[x(i) y(i) 1 0 0 0;0 0 0 x(i) y(i) 1]; 39 | % A=[A;tempA]; 40 | % tempb=[u(i);v(i)]; 41 | % B=[B;tempb]; 42 | % end 43 | % H=((A'*A)^-1)*A'*B; 44 | % 45 | % Tr=[0 0 0;0 0 0;0 0 1]; 46 | % Tr(1,1:3)=H(1:3); 47 | % Tr(2,1:3)=H(4:6); 48 | % 49 | % H=Tr; 50 | % end -------------------------------------------------------------------------------- /scripts/harris_corners.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % image:需要检测角点的(H*W*3)图像。 3 | % window_size:检测窗口的大小。 4 | % k:角点响应方程参数。 5 | % border:我们对图像边界上的角点并不感兴趣,忽略边界的角点。 6 | % 7 | % 输出: 8 | % corners:检测出的角点结果。(H*W)的0/1值图像,认为是角点处的值置为1,不是的置为0。 9 | 10 | function corners=harris_corners(image,window_size,k,border) 11 | 12 | %将RGB图像转化为灰度图 13 | if length(size(image))==3 14 | image = rgb2gray(double(image)/255); 15 | end 16 | 17 | %初始化角点检测结果矩阵 18 | [H,W]=size(image); 19 | E=zeros(H,W); 20 | 21 | %定义x方向和y方向的sobel算子 22 | sobelx=[-1 0 1;-2 0 2;-1 0 1]; 23 | sobely=[-1 -2 -1;0 0 0;1 2 1]; 24 | 25 | %计算图像x方向和y方向的梯度值 26 | Gx=conv2(image,sobelx,'same'); 27 | Gy=conv2(image,sobely,'same'); 28 | 29 | %计算角点响应函数中的A、B、C(梯度乘积结果),并对得到的结果做一次高斯平滑增加抗噪能力。 30 | % window=ones(window_size,window_size); %选项1: 没有权重 31 | window=fspecial('gaussian',[window_size,window_size],1); %选项2: Gaussian平滑 32 | A=conv2(Gx.*Gx,window,'same'); 33 | B=conv2(Gx.*Gy,window,'same'); 34 | C=conv2(Gy.*Gy,window,'same'); 35 | 36 | %根据角点响应函数计算每个像素点的角点响应值 37 | for i=1:H 38 | for j=1:W 39 | M=[A(i,j),B(i,j);B(i,j),C(i,j)]; 40 | E(i,j)=det(M)-k*(trace(M)^2); 41 | end 42 | end 43 | 44 | %找出图像的最大响应值Emax,将阈值设为0.01*Emax 45 | Emax=max(E(:)); 46 | t=Emax*0.01; 47 | E=padarray(E,[1 1],'both'); 48 | 49 | %使用阈值过滤角点 50 | for i=2:H+1 51 | for j=2:W+1 52 | if E(i,j)0 14 | methodx='post'; 15 | else 16 | methodx='pre'; 17 | end 18 | 19 | if Ty>0 20 | methody='post'; 21 | else 22 | methody='pre'; 23 | end 24 | 25 | t_img2=padarray(t_img2,abs(floor(Tx)),0,methodx); 26 | t_img2=padarray(rot90(t_img2),abs(floor(Ty)),0,methody); 27 | t_img2=rot90(t_img2); 28 | t_img2=rot90(t_img2); 29 | t_img2=rot90(t_img2); 30 | 31 | t_img1=padarray(t_img1,abs(size(t_img2,1)-size(t_img1,1)),0,'post'); 32 | t_img1=padarray(rot90(t_img1),abs(size(t_img2,2)-size(t_img1,2)),0,'pre'); 33 | t_img1=rot90(t_img1); 34 | t_img1=rot90(t_img1); 35 | t_img1=rot90(t_img1); 36 | 37 | end 38 | -------------------------------------------------------------------------------- /scripts/linear_blend.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % img1:(H*W*3)经过仿射变换过后的图1。 3 | % img2:(H*W*3)经过仿射变换过后的图2。 4 | % 5 | % 输出: 6 | % temp_mask:img1和img2的重合区域。 7 | % linear_blended_img:(H*W*3)重合区域比重相同融合结果。 8 | % equally_weighted_blended_img:(H*W*3)重合区域线性融合结果。 9 | 10 | 11 | function [temp_mask,linear_blended_img,equally_weighted_blended_img]=linear_blend(img1,img2) 12 | 13 | %初始化 14 | linear_blended_img1=double(img1); 15 | linear_blended_img2=double(img2); 16 | equally_weighted_blended_img1=double(img1); 17 | equally_weighted_blended_img2=double(img2); 18 | 19 | %确定融合区域 20 | temp_mask1 = (linear_blended_img2(:,:,1)>0 |linear_blended_img2(:,:,2)>0 | linear_blended_img2(:,:,3)>0);%变换图像掩膜 21 | temp_mask2 = (linear_blended_img1(:,:,1)>0 | linear_blended_img1(:,:,2)>0 | linear_blended_img1(:,:,3)>0);%非变换图像掩膜 22 | temp_mask = and(temp_mask1,temp_mask2);%重叠区掩膜 23 | 24 | %确定融合区域的左边界和右边界 25 | [row,col] = find(temp_mask==1); 26 | left = min(col);right = max(col);%获得重叠区左右范围 27 | % up=min(row);down=max(row); 28 | 29 | %创建比重相同融合mask 30 | equally_weighted_mask = ones(size(temp_mask)); 31 | equally_weighted_mask(:,left:right) = repmat(linspace(0.5,0.5,right-left+1),size(equally_weighted_mask,1),1);%复制平铺矩阵 32 | %计算每张图的重合区域像素值 33 | equally_weighted_blended_img1(:,:,:) = equally_weighted_blended_img1(:,:,:).*equally_weighted_mask; 34 | equally_weighted_blended_img2(:,:,:) = equally_weighted_blended_img2(:,:,:).*equally_weighted_mask; 35 | 36 | linear_blend_mask = ones(size(temp_mask)); 37 | %创建线性融合mask1 38 | linear_blend_mask(:,left:right) = repmat(linspace(1,0,right-left+1),size(linear_blend_mask,1),1); 39 | % mask(up:down,:) = repmat(linspace(0,1,down-up+1)',1,size(mask,2));%复制平铺矩阵 40 | %计算每张图的重合区域像素值 41 | linear_blended_img1(:,:,:) = linear_blended_img1(:,:,:).*linear_blend_mask; 42 | %创建线性融合mask2 43 | linear_blend_mask(:,left:right) = repmat(linspace(0,1,right-left+1),size(linear_blend_mask,1),1);%复制平铺矩阵 44 | % mask(up:down,:) = repmat(linspace(1,0,down-up+1)',1,size(mask,2));%复制平铺矩阵 45 | %计算每张图的重合区域像素值 46 | linear_blended_img2(:,:,:) = linear_blended_img2(:,:,:).*linear_blend_mask; 47 | 48 | %输出结果 49 | linear_blended_img(:,:,:) = linear_blended_img2(:,:,:) + linear_blended_img1(:,:,:); 50 | linear_blended_img=uint8(linear_blended_img); 51 | 52 | equally_weighted_blended_img(:,:,:)=equally_weighted_blended_img1(:,:,:) + equally_weighted_blended_img2(:,:,:); 53 | equally_weighted_blended_img=uint8(equally_weighted_blended_img); 54 | end -------------------------------------------------------------------------------- /scripts/match_descriptors.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % descriptors1:m*d描述子矩阵。 3 | % descriptors2:n*d描述子矩阵。 4 | % k:比例阈值。 5 | % 6 | % 输出: 7 | % count:匹配上的角点对数。 8 | % matched_points:(count*2)矩阵,保存匹配上的角点的坐标。 9 | 10 | function [matched_points,count]=match_descriptors(descriptors1,descriptors2,k) 11 | 12 | %初始化欧氏距离矩阵 13 | [count1,~]=size(descriptors1); 14 | [count2,~]=size(descriptors2); 15 | matched_points=zeros(count1,2); 16 | dist=zeros(count1,count2); 17 | 18 | %计算欧式距离矩阵 19 | for i=1:count1 20 | for j=1:count2 21 | temp1=descriptors1(i,:);%取角点1的描述子 22 | temp2=descriptors2(j,:);%取角点2的描述子 23 | dist(i,j)=sqrt(sum((temp1-temp2).^2));%计算这两个描述子的欧氏距离 24 | end 25 | end 26 | 27 | %判断角点是否匹配 28 | for i=1:count1 29 | [dist_min,ind]=sort(dist(i,:));%对于img1中的角点i,找出img2中与它欧氏距离最小的两个角点j1和j2 30 | dist_min1=dist_min(1);%记(i,j1)的欧氏距离为dist_min1 31 | dist_min2=dist_min(2);%记(i,j2)的欧氏距离为dist_min2 32 | 33 | q=dist_min1/dist_min2;%计算dist_min1/dist_min2 34 | 35 | if q<=k%根据阈值k判断(i,j1)是否是一对被接受匹配的角点 36 | matched_points(i,1)=i; 37 | matched_points(i,2)=ind(1); 38 | end 39 | end 40 | 41 | matched_points(all(matched_points==0,2),:)=[]; 42 | count=size(matched_points,1); 43 | 44 | end -------------------------------------------------------------------------------- /scripts/my_surf.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % img1:(H*W*3)的图1。 3 | % img2:(H*W*3)的图2。 4 | % 5 | % 输出: 6 | % out:img1和img2线性融合的拼接结果。 7 | 8 | function out=my_surf(img1,img2) 9 | 10 | % 角点检测 11 | gray_img1=rgb2gray(img1); 12 | gray_img2=rgb2gray(img2); 13 | imageSize=size(gray_img1); 14 | 15 | p1=detectSURFFeatures(gray_img1); 16 | p2=detectSURFFeatures(gray_img2); 17 | 18 | % 生成描述子 19 | [img1Features, p1] = extractFeatures(gray_img1, p1); 20 | [img2Features, p2] = extractFeatures(gray_img2, p2); 21 | 22 | % 匹配角点 23 | boxPairs = matchFeatures(img1Features, img2Features); 24 | matchedimg1Points = p1(boxPairs(:, 1)); 25 | matchedimg2Points = p2(boxPairs(:, 2)); 26 | 27 | % RANSAC 28 | [tform, inlierimg2Points, inlierimg1Points] = estimateGeometricTransform(matchedimg2Points, matchedimg1Points, 'projective');%射影变换,tfrom映射点对1内点到点对2内点 29 | % 该函数使用随机样本一致性(RANSAC)算法的变体MSAC算法实现,去除误匹配点 30 | % The returned geometric transformation matrix maps the inliers in matchedPoints1 31 | % to the inliers in matchedPoints2.返回的几何映射矩阵映射第一参数内点到第二参数内点 32 | 33 | showMatchedFeatures(img1, img2, inlierimg1Points,inlierimg2Points, 'montage'); 34 | title('SURF描述符 RANSAC方法 匹配结果') 35 | 36 | % 拼接 37 | [xlim, ylim] = outputLimits(tform, [1 imageSize(2)], [1 imageSize(1)]); 38 | % 找到输出空间限制的最大最小值 39 | xMin = min([1; xlim(:)]); 40 | xMax = max([imageSize(2); xlim(:)]); 41 | 42 | yMin = min([1; ylim(:)]); 43 | yMax = max([imageSize(1); ylim(:)]); 44 | 45 | % 全景图的宽高 46 | width = round(xMax - xMin); 47 | height = round(yMax - yMin); 48 | 49 | % 创建2D空间参考对象定义全景图尺寸 50 | xLimits = [xMin xMax]; 51 | yLimits = [yMin yMax]; 52 | panoramaView = imref2d([height width ], xLimits, yLimits); 53 | 54 | % 变换图片到全景图 55 | gray_img1 = imwarp(img1,projective2d(eye(3)), 'OutputView', panoramaView); 56 | gray_img2 = imwarp(img2, tform, 'OutputView', panoramaView); 57 | 58 | [~,out,~]=linear_blend(gray_img1,gray_img2); -------------------------------------------------------------------------------- /scripts/ransac.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % keypoints1:(m*2)img1角点坐标索引矩阵。 3 | % keypoints2:(n*2)img2角点坐标索引矩阵。 4 | % matched_points:(p*2)矩阵,保存匹配上的角点的坐标。 5 | % iterations:RANSAC迭代次数。 6 | % thres:RANSAC阈值。 7 | % 8 | % 输出: 9 | % ransac_matched_points:(q*2)矩阵,保存RANSAC方法剔除误匹配后的匹配点坐标。 10 | % count_inliers:RANSAC方法剔除误匹配后的匹配点个数。 11 | 12 | function [ransac_matched_points,count_inliers]=ransac(keypoints1,keypoints2,matched_points,iterations,thres,num_inliers) 13 | 14 | N=length(matched_points);%以最大匹配点个数初始化RANSAC后的匹配点输出矩阵 15 | ransac_matched_points=zeros(N,2); 16 | 17 | matched1=matched_points(:,1);%图像1中的匹配点索引 18 | matched2=matched_points(:,2);%图像2中的匹配点索引 19 | 20 | sub_matched1=zeros(N,2); 21 | sub_matched2=zeros(N,2); 22 | sub_transed=zeros(N,2); 23 | 24 | for i=1:N 25 | sub_matched2(i,:)=keypoints1(matched1(i),:);%图像1中的匹配点坐标 26 | sub_matched1(i,:)=keypoints2(matched2(i),:);%图像2中的匹配点坐标 27 | end 28 | 29 | count_inliers = 0;%初始化局内点个数 30 | H=[0 0 0;0 0 0;0 0 1];%初始化变换矩阵 31 | x1=[];y1=[];x2=[];y2=[];A=[];B=[]; 32 | 33 | for i=1:iterations%ransac迭代次数 34 | x1=[];y1=[];x2=[];y2=[];A=[];B=[]; 35 | temp_n=0;temp_newmatches=zeros(N,2); 36 | 37 | randIndex=randperm(N); 38 | sub_p1=sub_matched1(randIndex,:); 39 | sub_p2=sub_matched2(randIndex,:);%打乱坐标 40 | 41 | %取前num_inliers个随机点对拟合仿射变换矩阵H 42 | for t1=1:num_inliers 43 | tempx1=sub_p1(t1,1);x1=[x1,tempx1]; 44 | tempy1=sub_p1(t1,2);y1=[y1,tempy1]; 45 | tempx2=sub_p2(t1,1);x2=[x2,tempx2]; 46 | tempy2=sub_p2(t1,2);y2=[y2,tempy2]; 47 | end 48 | 49 | for t2=1:num_inliers 50 | tempA=[x1(t2) y1(t2) 1 0 0 0;0 0 0 x1(t2) y1(t2) 1]; 51 | A=[A;tempA]; 52 | tempb=[x2(t2);y2(t2)]; 53 | B=[B;tempb]; 54 | end 55 | 56 | T=((A'*A)^-1)*A'*B; 57 | H(1,1:3)=T(1:3); 58 | H(2,1:3)=T(4:6); 59 | 60 | %根据拟合出来的变换矩阵计算变换结果 61 | for t3=1:N 62 | x=sub_matched1(t3,1); 63 | y=sub_matched1(t3,2); 64 | transed_point=H*[x;y;1]; 65 | sub_transed(t3,1)=transed_point(1,1); 66 | sub_transed(t3,2)=transed_point(2,1); 67 | end 68 | 69 | % 计算误差 70 | inaccuracy=abs(sub_transed./sub_matched2-1); 71 | % inaccuracy=abs(norm(sub_transed)-norm(sub_matched2)); 72 | 73 | for t4=1:N 74 | if sum(inaccuracy(t4,:)) count_inliers 82 | count_inliers = temp_n; 83 | ransac_matched_points=temp_newmatches; 84 | end 85 | end 86 | 87 | ransac_matched_points(all(ransac_matched_points==0,2),:)=[]; 88 | 89 | end -------------------------------------------------------------------------------- /scripts/simple_descriptor.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % patch:(patch_size*patch_size)矩阵。 3 | % 4 | % 输出: 5 | % out:(1*(patch_size*patch_size))向量,展开后的标准正态化patch。 6 | 7 | function out=simple_descriptor(patch) 8 | 9 | patch_std=std2(patch);%标准差 10 | patch_mean=mean(patch(:));%均值 11 | 12 | out=(patch-patch_mean)./patch_std;%标准正态化 13 | out=out(:); 14 | 15 | end -------------------------------------------------------------------------------- /scripts/stitch.m: -------------------------------------------------------------------------------- 1 | % 输入: 2 | % img1:(H*W*3)的图1。 3 | % img2:(H*W*3)的图2。 4 | % 5 | % 输出: 6 | % out:img1和img2线性融合的拼接结果。 7 | 8 | function out=stitch(img1,img2) 9 | 10 | k=0.04; 11 | border=20; 12 | corners_img1 = harris_corners(img1,3,k,border); 13 | corners_img2 = harris_corners(img2,3,k,border); 14 | 15 | [x1,y1]=find(corners_img1==1); 16 | [x2,y2]=find(corners_img2==1); 17 | 18 | patch_size=16; 19 | [keypoints1,descriptors1]=describe_hog_keypoints(img1,corners_img1,patch_size); 20 | [keypoints2,descriptors2]=describe_hog_keypoints(img2,corners_img2,patch_size); 21 | 22 | k=0.7; 23 | [matched_points,~] = match_descriptors(descriptors1,descriptors2,k); 24 | 25 | iterations=500; 26 | thres=0.01; 27 | num_inliers=10; 28 | 29 | [matches,~]=ransac(keypoints1,keypoints2,matched_points,iterations,thres,num_inliers); 30 | 31 | count=length(matches); 32 | x_img1=zeros(1,count);y_img1=zeros(1,count); 33 | x_img2=zeros(1,count);y_img2=zeros(1,count); 34 | 35 | for i=1:count 36 | p1=matches(i,1);p2=matches(i,2); 37 | x_img1(i)=x1(p1);y_img1(i)=y1(p1); 38 | x_img2(i)=x2(p2);y_img2(i)=y2(p2); 39 | end 40 | 41 | T=fit_affine_matrix(x_img2,y_img2,x_img1,y_img1); 42 | 43 | [t_img1,t_img2]=img_trans(img1,img2,T); 44 | 45 | [h1,~]=size(t_img1); 46 | [h2,~]=size(t_img2); 47 | 48 | h=max(h1,h2); 49 | t_img1=padarray(t_img1,[abs(h-h1) 0],0,'post'); 50 | t_img2=padarray(t_img2,[abs(h-h2) 0],0,'post'); 51 | 52 | [~,linear_blendedimg,~]=linear_blend(t_img1,t_img2); 53 | 54 | out=linear_blendedimg; 55 | -------------------------------------------------------------------------------- /计算机视觉期末作业说明文档.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CXris/computerVision-stitching/8c194e37fdf8c714c9213bf848d98880192ee90d/计算机视觉期末作业说明文档.pdf --------------------------------------------------------------------------------