├── code └── code │ ├── Find_Edge_Superpixels.m │ ├── GMmodel.m │ ├── GetMC.m │ ├── SLIC.cpp │ ├── SLIC.mexw32 │ ├── SLIC.mexw64 │ ├── Saliency_Absorb_MC.m │ ├── SubCode │ ├── Find_Edge_Superpixels.m │ ├── SLIC.cpp │ ├── SLIC.mexw32 │ ├── SLIC.mexw64 │ ├── find_connect_superpixel_DoubleIn_Opposite.m │ ├── normalize.m │ ├── sup2pixel.cpp │ ├── sup2pixel.mexw32 │ └── sup2pixel.mexw64 │ ├── SurfFeature.mat │ ├── apply_tranfer_function.m │ ├── child_window.m │ ├── classify_image.fig │ ├── classify_image.m │ ├── code.m │ ├── create_databaseD_1114.asv │ ├── create_databaseD_1114.m │ ├── dabap.m │ ├── dataset │ └── image_2 │ │ ├── 000000_10.png │ │ ├── 000000_11.png │ │ ├── 000001_10.png │ │ └── 000001_11.png │ ├── de.m │ ├── demo_style_transfer.m │ ├── downsample.m │ ├── ex.m │ ├── featureextraction_image.fig │ ├── featureextraction_image.m │ ├── final.fig │ ├── final.m │ ├── find_connect_superpixel_DoubleIn_Opposite.m │ ├── gaussian_pyramid.m │ ├── get_transfer_function.m │ ├── laplacian_pyramid.m │ ├── llf.m │ ├── llf_discrete.m │ ├── llf_general.m │ ├── main_menu.fig │ ├── main_menu.m │ ├── normalize.m │ ├── objectFeature.mat │ ├── otsu.m │ ├── preprocessing_image.fig │ ├── preprocessing_image.m │ ├── pyramid_filter.m │ ├── reconstruct_laplacian_pyramid.m │ ├── remapping_function.m │ ├── segmentation_image.fig │ ├── segmentation_image.m │ ├── slic.m │ ├── style_transfer.m │ ├── sup2pixel.cpp │ ├── sup2pixel.mexw32 │ ├── sup2pixel.mexw64 │ ├── te.xls │ └── upsample.m ├── editing doc.pdf └── object recognition of automated driving system (1).docx /code/code/Find_Edge_Superpixels.m: -------------------------------------------------------------------------------- 1 | function EdgSup = Find_Edge_Superpixels( Labels, K, height, width , Wcon, ConPix ) 2 | %% 3 | % obtain the indication of edge super-pixels 4 | % Input: 5 | % Labels: the super-pixel label obtained from SLIC 6 | % K: the number of super-pixels 7 | % height: the height of the image 8 | % width: the width of the image 9 | % Wcon: the affinity weight on the edge of the graph 10 | % ConPix: one layer neighbour relationship of super-pixels 11 | % Output: 12 | % EdgSup: the edge superpixel is indicated by value 1, 13 | % the superpixel in the edge frame is indicated by value 2. 14 | %%%%==================================================================== 15 | EdgSup=zeros( K,1); Check=0; 16 | for i=1:height 17 | EdgSup ( Labels( i,1 )+1 ) =1; 18 | EdgSup ( Labels(i, width) +1 )=1; 19 | end 20 | for i=1:width 21 | EdgSup (Labels(1,i) +1 )= 1 ; 22 | EdgSup (Labels(height, i) +1 ) =1; 23 | end 24 | EdgSupSecond = EdgSup; 25 | for j=1:K 26 | if EdgSup(j)==1 27 | for z=1:K 28 | if ( ConPix(j,z)>0 ) && ( EdgSup(z)==0 ) 29 | Check = Check + Wcon(j,z); 30 | EdgSupSecond( z ) = 1; 31 | end 32 | end 33 | if Check > 13 % heuristic threshold to discard the frame 34 | return; 35 | end 36 | end 37 | end 38 | EdgSup = EdgSup + EdgSupSecond; 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /code/code/GMmodel.m: -------------------------------------------------------------------------------- 1 | 2 | function [Wt,Meanmat,Cov] = GMmodel(x,no_gaus_distr) 3 | 4 | % the above expression is the definition of the function GMmodel which 5 | % takes in the training data and the number of gaussian distribution 6 | % to model the data as input and the output consist of the three matrices, 7 | % first one is mixing ratios whch is P * N matrix and contains the 8 | % fraction of datapoints belonging to each gaussian distribution, the 9 | % second one is mean matrix that contains the means of each gaussian 10 | % distribution in P * D matix and the third one is covariance matrix which 11 | % is D * D * P matrix and has covariance of each distribution in D * D 12 | % matrix. 13 | 14 | %-----------------------------------------------------//---------------------------------------------------------------------------------------------------% 15 | % initialization of the Meanmat, Wt and Cov is done in this block 16 | % of code. Initially K-mean clustering technique is being applied 17 | % to find the weights, means and covariance matrices. 18 | 19 | sizemat = size(x); 20 | tot_rows = sizemat(1); 21 | dimension = sizemat(2); 22 | Wt = zeros(no_gaus_distr,1); 23 | Cov = zeros(dimension,dimension,no_gaus_distr); 24 | Meanmat = kmclust(x,no_gaus_distr); 25 | 26 | % kmclust is a function that does the K-mean clustering out of 27 | % many data sets and returns the mean of each cluster. 28 | 29 | Eff_no_pnts = zeros(no_gaus_distr,1); 30 | indexcol = zeros(tot_rows,1); 31 | 32 | % the for loop used next is for indexing each row to the mean from 33 | % which its distance is minimum and the output is stored in 34 | % indexcol matrix. 35 | 36 | 37 | for currow = 1:tot_rows 38 | threshold = 10^20; 39 | for curmean = 1:no_gaus_distr 40 | sqdist = 0; 41 | for curdim = 1:dimension 42 | sqdist = sqdist + ((x(currow,curdim) - Meanmat(curmean,curdim))^2); 43 | end 44 | dist = sqdist^(0.5); 45 | if (dist <= threshold) 46 | indexcol(currow,1) = curmean; 47 | threshold = dist; 48 | end 49 | end 50 | end 51 | 52 | % the covariance matrix is initialised first with values that are 53 | % found using the datapoints which are belonging to the same 54 | % cluster. 55 | 56 | for loopvar1 = 1:no_gaus_distr 57 | count = 0; 58 | tempcov = zeros(dimension,dimension); 59 | for loopvar2 = 1:tot_rows 60 | if (indexcol(loopvar2,1) == loopvar1) 61 | count = count + 1; 62 | tempcov = tempcov + ((x(loopvar2,:)-Meanmat(loopvar1,:))')*(x(loopvar2,:)-Meanmat(loopvar1,:)); 63 | end 64 | end 65 | Wt(loopvar1,1) = count/tot_rows; 66 | tempcov = tempcov./count; 67 | %det(tempcov) 68 | Cov(:,:,loopvar1) = tempcov; 69 | %if (loopvar1 == 1) 70 | % Cov(:,:) = tempcov(:,:); 71 | %else 72 | % Cov = [Cov;tempcov]; 73 | %end 74 | end 75 | %save('covariance first.mat','Cov'); 76 | %exit (0); 77 | %tempvar2 = 0; 78 | %for currow = 1:tot_rows 79 | % tempvar = 0; 80 | %cov_var = 1; 81 | % for curdistr = 1:no_gaus_distr 82 | % %tempcov = Cov(cov_var:(dimension*curdistr),:); 83 | % tempcov = Cov(:,:,curdistr); 84 | %cov_var = cov_var + dimension; 85 | % exp_part = exp(-0.5*(x(currow,:)-Meanmat(curdistr,:))*(1/tempcov)*((x(currow,:)-Meanmat(curdistr,:))')); 86 | % tempvar = tempvar + ((Wt(curdistr,1))*(1/((det(tempcov))^(0.5)))*exp_part); 87 | % end 88 | % tempvar2 = tempvar2 + log(tempvar); 89 | %end 90 | %likelihoodnew = -((tot_rows*dimension)/2)*log(2*pi) + tempvar2; 91 | %flag_new = exp(likelihoodnew*(10^-5)); 92 | likelihoodold = -10^30; 93 | likelihoodnew = -10^25; 94 | flag = 10^(-10); 95 | %flag_new = 10000; 96 | %flag_old = 1000; 97 | iteration = 0; 98 | 99 | % responsibilities matrix is nothing but the responsibiliets of each 100 | % gaussian to take each point and it is a N * P matrix one row for 101 | % each data set and cloumn for each gaussian distribution. 102 | 103 | responsibilities = zeros(tot_rows,no_gaus_distr); 104 | 105 | 106 | %------------------------------------------------------//-------------------------------------------------------------------------------------------% 107 | % loop starts here 108 | 109 | % the loop runs until the difference between successive iteration 110 | % becomes less than 0.001 times the value of log of the 111 | % probability of observing the training set of data 112 | % while (abs(likelihoodold-likelihoodnew)>0.001 && iteration <=500) 113 | while (((likelihoodnew-likelihoodold)>flag || iteration<25) && iteration<200) 114 | iteration = iteration + 1 %abs(likelihoodold-likelihoodnew) 115 | likelihoodold = likelihoodnew; 116 | %flag_old = flag_new; 117 | 118 | 119 | 120 | %----------------------------------------------------//----------------------------------------------------------------------------------------------% 121 | % this chunk involves finding the responsibilities of each 122 | % gaussian distribution for each point in N*P matrix. 123 | % inv is a predefined function of matlab that finds the 124 | % inverse of the matrix given in its argument. This is 125 | % essentially the most time consuming part if the dimension 126 | % of feature is very large. 127 | 128 | for currow = 1:tot_rows 129 | %cov_var = 1; 130 | evidence = 0; 131 | for loopvar1 = 1:no_gaus_distr 132 | %tempcov = Cov(cov_var:(dimension*loopvar1),:); 133 | tempcov = Cov(:,:,loopvar1); 134 | %cov_var = cov_var + dimension; 135 | exp_part = exp(-0.5*(x(currow,:)-Meanmat(loopvar1,:))*(inv(tempcov))*((x(currow,:)-Meanmat(loopvar1,:))')); 136 | denominator = (Wt(loopvar1,1))*(1/((det(tempcov))^(0.5)))*exp_part; 137 | responsibilities(currow,loopvar1) = denominator; 138 | evidence = evidence + denominator; 139 | end 140 | responsibilities(currow,:) = responsibilities(currow,:)./evidence; 141 | end 142 | 143 | %str = 'finished first job' 144 | 145 | %-------------------------------------------------//--------------------------------------------------------------------------------------------------% 146 | 147 | 148 | 149 | % this chunk calculates new means, weights and effective 150 | % number of points. 151 | 152 | for loopvar1 = 1:no_gaus_distr 153 | tempvar = zeros(1,dimension); 154 | tempvar2 = 0; 155 | for currow = 1:tot_rows 156 | tempvar = tempvar + (responsibilities(currow,loopvar1).*x(currow,:)); 157 | tempvar2 = tempvar2 + responsibilities(currow,loopvar1); 158 | end 159 | Meanmat(loopvar1,:) = tempvar./tempvar2; 160 | Eff_no_pnts(loopvar1,1) = tempvar2; 161 | Wt(loopvar1,1) = Eff_no_pnts(loopvar1,1)/tot_rows; 162 | end 163 | 164 | % str = 'finished second job' 165 | 166 | %-----------------------------------------------//------------------------------------------------------------------------------------------------------% 167 | 168 | 169 | 170 | % this chunk of code is for recalculation of covariance 171 | % matrix. By recalculation it means the covariance matrix is 172 | % filled with new data sets that makes use of the 173 | % responsibilities calculated earlier. 174 | 175 | %cov_var = 1; 176 | for loopvar1 = 1:no_gaus_distr 177 | tempcov = zeros(dimension,dimension); 178 | tempvar2 = 0; 179 | for currow = 1:tot_rows 180 | tempcov = tempcov + responsibilities(currow,loopvar1)*(((x(currow,:))')*(x(currow,:))); 181 | tempvar2 = tempvar2 + responsibilities(currow,loopvar1); 182 | end 183 | Cov(:,:,loopvar1) = (tempcov/tempvar2) - (Meanmat(loopvar1,:)')*(Meanmat(loopvar1,:)); 184 | %Cov(cov_var:(dimension*loopvar1),:) = tempcov./Eff_no_pnts(loopvar1,1); 185 | %cov_var = cov_var + dimension; 186 | end 187 | 188 | % str = 'finished third job' 189 | 190 | %---------------------------------------------//---------------------------------------------------------------------------------------------------------% 191 | 192 | 193 | 194 | % this chunk calculates the log likelihood in order to 195 | % compare the previous value with the current value. 196 | 197 | tempvar2 = 0; 198 | for currow = 1:tot_rows 199 | tempvar = 0; 200 | %cov_var = 1; 201 | for curdistr = 1:no_gaus_distr 202 | tempcov = Cov(:,:,curdistr); 203 | %tempcov = Cov(cov_var:(dimension*curdistr),:); 204 | %cov_var = cov_var + dimension; 205 | exp_part = exp(-0.5*(x(currow,:)-Meanmat(curdistr,:))*(inv(tempcov))*((x(currow,:)-Meanmat(curdistr,:))')); 206 | tempvar = tempvar + ((Wt(curdistr,1))*(1/((det(tempcov))^(0.5)))*exp_part); 207 | end 208 | tempvar2 = tempvar2 + log(tempvar); 209 | end 210 | 211 | 212 | % str = 'finished fourth job' 213 | 214 | likelihoodnew = -((tot_rows*dimension)/2)*log(2*pi) + tempvar2 215 | if (iteration==3) 216 | flag = -1*(likelihoodnew/(10^(5))) 217 | end 218 | likelihoodnew-likelihoodold 219 | %flag_new = exp(likelihoodnew*(10^-5)); 220 | %exp(abs(flag_old-flag_new)) 221 | end 222 | %-------------------------------------------//----------------------------------------------------------------------------------------------------------% 223 | -------------------------------------------------------------------------------- /code/code/GetMC.m: -------------------------------------------------------------------------------- 1 | function [ Salpix ] = GetMC(imname) 2 | Img = double( imread( imname ) ); 3 | [ height,width ] = size(Img(:,:,1)); 4 | PixNum = height*width; 5 | ImgVecR = reshape( Img(:,:,1)', PixNum, 1); 6 | ImgVecG = reshape( Img(:,:,2)', PixNum, 1); 7 | ImgVecB = reshape( Img(:,:,3)', PixNum, 1); 8 | % m is the compactness parameter, k is the super-pixel number in SLIC algorithm 9 | m = 20; k = 250; 10 | ImgAttr=[ height ,width, k, m, PixNum ]; 11 | % obtain superpixel from SLIC algorithm: LabelLine is the super-pixel label vector of the image, 12 | % Sup1, Sup2, Sup3 are the mean L a b colour value of each superpixel, 13 | % k is the number of the super-pixel. 14 | [ LabelLine, Sup1, Sup2, Sup3, k ] = SLIC( ImgVecR, ImgVecG, ImgVecB, ImgAttr ); 15 | Label=reshape(LabelLine,width,height); 16 | Label = Label'; % the superpixle label 17 | 18 | [ ConPix, ConPixDouble ] = find_connect_superpixel_DoubleIn_Opposite( Label, k, height ,width ); 19 | % count the number of the edge in the graph 20 | NumInit=0; 21 | for j=1:k 22 | for z=j+1:k 23 | if ConPixDouble(j,z)>0 24 | NumInit=NumInit+1; 25 | end 26 | end 27 | end 28 | Dcol=zeros(NumInit,3); 29 | % calculate the edge weight 30 | mm=1; 31 | for j=1:k-1 32 | for z=j+1:k 33 | if ConPixDouble(j,z)>0 34 | DcolTem = sqrt( ( Sup1(j)-Sup1(z) ).^2 + ( Sup2(j)-Sup2(z) ).^2 + ( Sup3(j)- Sup3(z) ).^2 ); 35 | Dcol(mm, 1: 3 )=[j,z,DcolTem ]; 36 | mm=mm+1; 37 | end 38 | end 39 | end 40 | 41 | DcolNor = normalize( Dcol(:,3) ); 42 | weight = exp( -10*DcolNor ) + .00001; 43 | WconFirst = sparse( [Dcol(:,1);Dcol(:,2)], [Dcol(:,2);Dcol(:,1)], [weight,weight],k ,k ); 44 | WconFirst = full(WconFirst ) + eye(k); % the affinity matrix of the graph model 45 | 46 | Discard = sum(WconFirst,2); 47 | DiscardPos = find( Discard < 1.1 ); % to discard the outlier 48 | LenDiscardPos = length(DiscardPos); 49 | 50 | EdgSup = Find_Edge_Superpixels( Label, k, height, width , WconFirst, ConPix ); 51 | for j=1:LenDiscardPos 52 | EdgSup( DiscardPos(j) ) = 2; 53 | end 54 | 55 | NumIn = k - length( find( EdgSup == 2 ) ); 56 | NumEdg = length( find( EdgSup==1 ) ); 57 | EdgWcon = zeros( k, NumEdg ); 58 | mm=1; 59 | for j=1:k 60 | if EdgSup(j)==1 61 | EdgWcon(:,mm) = WconFirst(:,j); 62 | mm = mm + 1; 63 | end 64 | end 65 | alph = 1; W=zeros(k,k); 66 | %%%%%%%%%%%% absorb MC 67 | if NumIn == k 68 | BaseEdg = sum( EdgWcon, 2 ) ; 69 | D = diag( Discard + BaseEdg ); 70 | Wcon = D \ WconFirst; 71 | I = eye( NumIn ); 72 | N = ( I - alph* Wcon ); 73 | y = ones( NumIn, 1 ); 74 | Sal = N \ y; 75 | Sal = normalize(Sal); 76 | else 77 | BaseEdg = zeros( NumIn, 1 ); 78 | sumD = zeros( NumIn, 1 ); 79 | mm=1; 80 | for j = 1:k 81 | if EdgSup(j) < 2 82 | BaseEdg(mm) = sum( EdgWcon( j, : ) ); 83 | sumD(mm) = Discard(j); 84 | W(mm,:) = WconFirst(j,:); 85 | mm = mm + 1; 86 | end 87 | end 88 | mm=1; 89 | for j=1:k 90 | if EdgSup(j) < 2 91 | W( :,mm)=W(:,j); 92 | mm=mm+1; 93 | end 94 | end 95 | Wmid = W( 1:NumIn, 1: NumIn ); 96 | D = diag( BaseEdg + sumD ); 97 | Wmid = D \ Wmid; 98 | I = eye( NumIn ); 99 | N = ( I - alph* Wmid ); 100 | y = ones( NumIn, 1 ); 101 | Sal = N \ y; 102 | Sal = normalize(Sal); 103 | end 104 | %%%%%%%%%%% entropy decide 2 105 | Entro = zeros( 11, 1 ); 106 | for j = 1 : NumIn 107 | entroT = floor( Sal(j) * 10 ) + 1; 108 | Entro( entroT ) = Entro( entroT ) + 1; 109 | end 110 | Entro(10) = Entro(10) + Entro(11); 111 | Entro = Entro ./ NumIn; 112 | Entropy = 0; 113 | for j = 1 : 10 114 | Entropy = Entropy + Entro(j) * min( ( j ), ( 11 - j ) ); 115 | end 116 | % output the saliency map directly from absorb MC 117 | if Entropy < 2 118 | if NumIn < k 119 | SalAll = zeros(k,1); 120 | mm=1; 121 | for j= 1: k 122 | if EdgSup (j ) < 2 123 | SalAll(j) = Sal( mm ); 124 | mm=mm+1; 125 | end 126 | end 127 | for j=1:LenDiscardPos 128 | for z=1:k 129 | if ConPix( DiscardPos(j), z ) > 0 130 | if SalAll(z) >.3 131 | SalAll( DiscardPos(j) ) = 1 ; 132 | break; 133 | end 134 | end 135 | end 136 | end 137 | SalLine = sup2pixel( PixNum, LabelLine, SalAll ); % to convey the saliency value from superpixel to pixel 138 | Salpix = reshape( SalLine, width, height ); 139 | Salpix = Salpix'; 140 | else 141 | SalLine = sup2pixel( PixNum, LabelLine, Sal ); 142 | Salpix = reshape( SalLine, width, height ); 143 | Salpix = Salpix'; 144 | end 145 | %imwrite( Salpix, [ Salmap, ImgEnum(i).name(1:end-4), '.png' ] ); 146 | else 147 | %%%%%%%%%%%% equilibrium post-process 148 | if NumIn == k 149 | sumDiscard = sum( Discard ); 150 | c = Discard ./ sumDiscard; 151 | rW = 1 ./ c; 152 | sumrW = sum(rW); 153 | rW = rW / sumrW; 154 | Sal = N \ rW; 155 | Sal = normalize(Sal); 156 | else 157 | sumsumD = sum( sumD ); 158 | c = sumD ./ sumsumD; 159 | rW = 1 ./ c; 160 | sumrW = sum(rW); 161 | rW = rW / sumrW; 162 | Sal = N \ rW; 163 | Sal = normalize(Sal); 164 | end 165 | if NumIn < k 166 | SalAll = zeros(k,1); 167 | mm=1; 168 | for j= 1: k 169 | if EdgSup (j ) < 2 170 | SalAll(j) = Sal( mm ); 171 | mm=mm+1; 172 | end 173 | end 174 | for j = 1:LenDiscardPos % to descide the saliency of outlier based on neighbour's saliency 175 | for z=1:k 176 | if ConPix( DiscardPos(j), z ) > 0 177 | if SalAll(z) >.3 178 | SalAll( DiscardPos(j) ) = 1 ; 179 | break; 180 | end 181 | end 182 | end 183 | end 184 | SalLine = sup2pixel( PixNum, LabelLine, SalAll ); 185 | Salpix = reshape( SalLine, width, height ); 186 | Salpix = Salpix'; 187 | else 188 | SalLine = sup2pixel( PixNum, LabelLine, Sal ); 189 | Salpix = reshape( SalLine, width, height ); 190 | Salpix = Salpix'; 191 | end 192 | %imwrite( Salpix, [ Salmap,ImgEnum(i).name(1:end-4), '.png' ] ); 193 | end 194 | 195 | end 196 | 197 | -------------------------------------------------------------------------------- /code/code/SLIC.cpp: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include "matrix.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | using namespace std; 11 | 12 | // SLIC.h: interface for the SLIC class. 13 | //=========================================================================== 14 | // This code implements the superpixel method described in: 15 | // 16 | // Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Susstrunk, 17 | // "SLIC Superpixels", 18 | // EPFL Technical Report no. 149300, June 2010. 19 | //=========================================================================== 20 | // Copyright (c) 2012 Radhakrishna Achanta [EPFL]. All rights reserved. 21 | //=========================================================================== 22 | ////////////////////////////////////////////////////////////////////// 23 | 24 | #if !defined(_SLIC_H_INCLUDED_) 25 | #define _SLIC_H_INCLUDED_ 26 | 27 | class SLIC 28 | { 29 | public: 30 | SLIC(); 31 | virtual ~SLIC(); 32 | //============================================================================ 33 | // sRGB to CIELAB conversion for 2-D images 34 | //========================================================================= 35 | void DoRGBtoLABConversionSup( 36 | vector & r1, 37 | vector & g1, 38 | vector & b1, 39 | double*& lvec, 40 | double*& avec, 41 | double*& bvec, 42 | const int & numSup); 43 | //====================================================================== 44 | //mean rgb in each superpixel 45 | //======================================================================= 46 | void SLIC::DoMeanSup( double * & m_rr, 47 | double * & m_gg, 48 | double * & m_bb, 49 | int & numlabels, 50 | int & NumPixel , 51 | double *& outlabel, 52 | vector & meanSupl, 53 | vector & meanSupa, 54 | vector & meanSupb); 55 | //============================================================================ 56 | // Superpixel segmentation for a given step size (superpixel size ~= step*step) 57 | //============================================================================ 58 | void DoSuperpixelSegmentation_ForGivenSuperpixelSize( 59 | double * & r, 60 | double * & g, 61 | double * & b, 62 | const int width, 63 | const int height, 64 | vector & klabels, 65 | int& numlabels, 66 | const int& superpixelsize, 67 | const double& compactness, 68 | double *& outlabel, 69 | const int& sz); 70 | //============================================================================ 71 | // Superpixel segmentation for a given number of superpixels 72 | //============================================================================ 73 | void DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( 74 | double * & r, 75 | double * & g, 76 | double * & b, 77 | const int width, 78 | const int height, 79 | //vector & klabels, 80 | int& numlabels, 81 | const int& K, //required number of superpixels 82 | const double& compactness, 83 | double *& outlabel, 84 | const int& NumPixel); 85 | 86 | private: 87 | //============================================================================ 88 | // The main SLIC algorithm for generating superpixels 89 | //============================================================================ 90 | void PerformSuperpixelSLIC( 91 | vector& kseedsl, 92 | vector& kseedsa, 93 | vector& kseedsb, 94 | vector& kseedsx, 95 | vector& kseedsy, 96 | vector & klabels, 97 | const int& STEP, 98 | const vector& edgemag, 99 | const double& m , 100 | const int & sz, 101 | const int & numk); 102 | 103 | //============================================================================ 104 | // Pick seeds for superpixels when step size of superpixels is given. 105 | //============================================================================ 106 | void GetLABXYSeeds_ForGivenStepSize( 107 | vector& kseedsl, 108 | vector& kseedsa, 109 | vector& kseedsb, 110 | vector& kseedsx, 111 | vector& kseedsy, 112 | const int& STEP, 113 | const bool& perturbseeds, 114 | const vector& edgemag, 115 | int & numk); 116 | 117 | //============================================================================ 118 | // Move the superpixel seeds to low gradient positions to avoid putting seeds 119 | // at region boundaries. 120 | //============================================================================ 121 | void PerturbSeeds( 122 | vector& kseedsl, 123 | vector& kseedsa, 124 | vector& kseedsb, 125 | vector& kseedsx, 126 | vector& kseedsy, 127 | const vector& edges); 128 | //============================================================================ 129 | // Detect color edges, to help PerturbSeeds() 130 | //============================================================================ 131 | void DetectLabEdges( 132 | const double* lvec, 133 | const double* avec, 134 | const double* bvec, 135 | const int& width, 136 | const int& height, 137 | vector& edges, 138 | const int & sz); 139 | //============================================================================ 140 | // sRGB to XYZ conversion; helper for RGB2LAB() 141 | //============================================================================ 142 | void RGB2XYZ( 143 | const double & sR, 144 | const double & sG, 145 | const double & sB, 146 | double& X, 147 | double& Y, 148 | double& Z); 149 | //============================================================================ 150 | // sRGB to CIELAB conversion (uses RGB2XYZ function) 151 | //============================================================================ 152 | void RGB2LAB( 153 | const double & sR, 154 | const double & sG, 155 | const double & sB, 156 | double& lval, 157 | double& aval, 158 | double& bval); 159 | //============================================================================ 160 | // sRGB to CIELAB conversion for 2-D images 161 | //============================================================================ 162 | void DoRGBtoLABConversion( 163 | double * & r1, 164 | double * & g1, 165 | double * & b1, 166 | double*& lvec, 167 | double*& avec, 168 | double*& bvec, 169 | const int & sz); 170 | //=============================================================================== 171 | // convert mean rgb of superpixel to lab 172 | //================================================================================= 173 | void RGB2XYZSup( 174 | const double & sR, 175 | const double & sG, 176 | const double & sB, 177 | double& X, 178 | double& Y, 179 | double& Z); 180 | //============================================================================ 181 | // sRGB to CIELAB conversion (uses RGB2XYZ function) 182 | void RGB2LABSup( 183 | const double & sR, 184 | const double & sG, 185 | const double & sB, 186 | double& lval, 187 | double& aval, 188 | double& bval); 189 | 190 | //============================================================================ 191 | // Post-processing of SLIC segmentation, to avoid stray labels. 192 | //============================================================================ 193 | void EnforceLabelConnectivity( 194 | vector & labels, 195 | const int width, 196 | const int height, 197 | double *& outlabelout, //input labels that need to be corrected to remove stray labels 198 | int& numlabels, //the number of labels changes in the end if segments are removed 199 | const int& K, //the number of superpixels desired by the user 200 | const int& sz); 201 | 202 | 203 | private: 204 | int m_width; 205 | int m_height; 206 | int m_depth; 207 | 208 | double* m_lvec; 209 | double* m_avec; 210 | double* m_bvec; 211 | 212 | double** m_lvecvec; 213 | double** m_avecvec; 214 | double** m_bvecvec; 215 | }; 216 | 217 | #endif // !defined(_SLIC_H_INCLUDED_) 218 | 219 | // SLIC.cpp: implementation of the SLIC class. 220 | // 221 | // Copyright (C) Radhakrishna Achanta 2012 222 | // All rights reserved 223 | // Email: firstname.lastname@epfl.ch 224 | ////////////////////////////////////////////////////////////////////// 225 | 226 | 227 | 228 | ////////////////////////////////////////////////////////////////////// 229 | // Construction/Destruction 230 | ////////////////////////////////////////////////////////////////////// 231 | 232 | SLIC::SLIC() 233 | { 234 | m_lvec = NULL; 235 | m_avec = NULL; 236 | m_bvec = NULL; 237 | 238 | m_lvecvec = NULL; 239 | m_avecvec = NULL; 240 | m_bvecvec = NULL; 241 | } 242 | 243 | SLIC::~SLIC() 244 | { 245 | if(m_lvec) delete [] m_lvec; 246 | if(m_avec) delete [] m_avec; 247 | if(m_bvec) delete [] m_bvec; 248 | 249 | 250 | if(m_lvecvec) 251 | { 252 | for( int d = 0; d < m_depth; d++ ) delete [] m_lvecvec[d]; 253 | delete [] m_lvecvec; 254 | } 255 | if(m_avecvec) 256 | { 257 | for( int d = 0; d < m_depth; d++ ) delete [] m_avecvec[d]; 258 | delete [] m_avecvec; 259 | } 260 | if(m_bvecvec) 261 | { 262 | for( int d = 0; d < m_depth; d++ ) delete [] m_bvecvec[d]; 263 | delete [] m_bvecvec; 264 | } 265 | } 266 | 267 | //============================================================================== 268 | /// RGB2XYZ 269 | /// 270 | /// sRGB (D65 illuninant assumption) to XYZ conversion 271 | //============================================================================== 272 | void SLIC::RGB2XYZ( 273 | const double & sR, 274 | const double & sG, 275 | const double & sB, 276 | double& X, 277 | double& Y, 278 | double& Z) 279 | { 280 | double R = sR/255.0; 281 | double G = sG/255.0; 282 | double B = sB/255.0; 283 | double r, g, b; 284 | if(R <= 0.04045) r = R/12.92; 285 | else r = pow((R+0.055)/1.055,2.4); 286 | if(G <= 0.04045) g = G/12.92; 287 | else g = pow((G+0.055)/1.055,2.4); 288 | if(B <= 0.04045) b = B/12.92; 289 | else b = pow((B+0.055)/1.055,2.4); 290 | 291 | 292 | X = r*0.4339563 + g*0.3762153 + b*0.1898430; 293 | Y = r*0.2126729 + g*0.7151522 + b*0.0721750; 294 | Z = r*0.0177578 + g*0.1094756 + b*0.8728363; 295 | } 296 | 297 | //=========================================================================== 298 | /// RGB2LAB 299 | //=========================================================================== 300 | void SLIC::RGB2LAB(const double& sR, const double & sG, const double & sB, double& lval, double& aval, double& bval) 301 | { 302 | //------------------------ 303 | // sRGB to XYZ conversion 304 | //------------------------ 305 | double X, Y, Z; 306 | RGB2XYZ(sR, sG, sB, X, Y, Z); 307 | 308 | //------------------------ 309 | // XYZ to LAB conversion 310 | //------------------------ 311 | double epsilon = 0.008856; //actual CIE standard 312 | // double kappa = 903.3; //actual CIE standard 313 | 314 | double fx, fy, fz; 315 | if( X > epsilon) fx = pow(X, 1.0/3.0); 316 | else fx = 7.787069 * X + 0.137931; //(kappa*X + 16.0)/116.0; 317 | if( Y > epsilon) fy = pow(Y, 1.0/3.0); 318 | else fy = 7.787069 * Y + 0.137931; //(kappa*Y + 16.0)/116.0; 319 | if( Z > epsilon) fz = pow(Z, 1.0/3.0); 320 | else fz = 7.787069 * Z + 0.137931; //(kappa*Z + 16.0)/116.0; 321 | 322 | lval = 116.0*fy-16.0; 323 | aval = 500.0*(fx-fy); 324 | bval = 200.0*(fy-fz); 325 | } 326 | 327 | //=========================================================================== 328 | /// DoRGBtoLABConversion 329 | /// 330 | /// For whole image: overlaoded floating point version 331 | //=========================================================================== 332 | void SLIC::DoRGBtoLABConversion( 333 | double* & r1, 334 | double* & g1, 335 | double* & b1, 336 | double*& lvec, 337 | double*& avec, 338 | double*& bvec, 339 | const int & sz) 340 | { 341 | lvec = new double[sz]; 342 | avec = new double[sz]; 343 | bvec = new double[sz]; 344 | 345 | for( int j = 0; j < sz; j++ ) 346 | { 347 | double r = r1[j]; 348 | double g = g1[j]; 349 | double b = b1[j]; 350 | 351 | RGB2LAB( r, g, b, lvec[j], avec[j], bvec[j] ); 352 | } 353 | } 354 | 355 | //////////////// convert mean rgb of superpixel to lab 356 | void SLIC::RGB2XYZSup( 357 | const double & sR, 358 | const double & sG, 359 | const double & sB, 360 | double& X, 361 | double& Y, 362 | double& Z) 363 | { 364 | double r, g, b; 365 | r = pow( ( sR + 0.099 )/1.099, 2.222 ); 366 | g = pow( ( sG + 0.099 )/1.099, 2.222 ); 367 | b = pow( ( sB + 0.099 )/1.099, 2.222 ); 368 | 369 | if ( r < 0.018 ) 370 | r = sR / 4.5138; 371 | 372 | if ( g < 0.018 ) 373 | g = sG / 4.5138; 374 | 375 | if ( b < 0.018 ) 376 | b = sB / 4.5138; 377 | 378 | X = r*0.4339563 + g*0.3762153 + b*0.1898430; 379 | Y = r*0.2126729 + g*0.7151522 + b*0.0721750; 380 | Z = r*0.0177578 + g*0.1094756 + b*0.8728363; 381 | } 382 | 383 | //=========================================================================== 384 | /// RGB2LAB 385 | //=========================================================================== 386 | void SLIC::RGB2LABSup(const double& sR, const double & sG, const double & sB, double& lval, double& aval, double& bval) 387 | { 388 | //------------------------ 389 | // sRGB to XYZ conversion 390 | //------------------------ 391 | double X, Y, Z; 392 | RGB2XYZSup(sR, sG, sB, X, Y, Z); 393 | 394 | //------------------------ 395 | // XYZ to LAB conversion 396 | //------------------------ 397 | double epsilon = 0.008856; //actual CIE standard 398 | 399 | double fx, fy, fz; 400 | if( X > epsilon) fx = pow(X, 1.0/3.0); 401 | else fx = 7.787069 * X + 0.137931; //(kappa*X + 16.0)/116.0; 402 | if( Y > epsilon) fy = pow(Y, 1.0/3.0); 403 | else fy = 7.787069 * Y + 0.137931; //(kappa*Y + 16.0)/116.0; 404 | if( Z > epsilon) fz = pow(Z, 1.0/3.0); 405 | else fz = 7.787069 * Z + 0.137931; //(kappa*Z + 16.0)/116.0; 406 | 407 | lval = 116.0*fy-16.0; 408 | aval = 500.0*(fx-fy); 409 | bval = 200.0*(fy-fz); 410 | } 411 | 412 | //=========================================================================== 413 | /// DoRGBtoLABConversion 414 | /// 415 | /// For whole image: overlaoded floating point version 416 | //=========================================================================== 417 | void SLIC::DoRGBtoLABConversionSup( 418 | vector & r1, 419 | vector & g1, 420 | vector & b1, 421 | double*& lvec, 422 | double*& avec, 423 | double*& bvec, 424 | const int & numSup) 425 | { 426 | 427 | /*lvec = new double[sz]; 428 | avec = new double[sz]; 429 | bvec = new double[sz];*/ 430 | 431 | for( int j = 0; j < numSup; j++ ) 432 | { 433 | double r = r1[j]; 434 | double g = g1[j]; 435 | double b = b1[j]; 436 | 437 | RGB2LABSup( r, g, b, lvec[j], avec[j], bvec[j] ); 438 | } 439 | } 440 | 441 | //============================================================================== 442 | /// DetectLabEdges 443 | //============================================================================== 444 | void SLIC::DetectLabEdges( 445 | const double* lvec, 446 | const double* avec, 447 | const double* bvec, 448 | const int& width, 449 | const int& height, 450 | vector& edges, 451 | const int & sz) 452 | { 453 | 454 | edges.resize(sz,0); 455 | for( int j = 1; j < height-1; j++ ) 456 | { 457 | for( int k = 1; k < width-1; k++ ) 458 | { 459 | int i = j*width+k; 460 | 461 | double dx = (lvec[i-1]-lvec[i+1])*(lvec[i-1]-lvec[i+1]) + 462 | (avec[i-1]-avec[i+1])*(avec[i-1]-avec[i+1]) + 463 | (bvec[i-1]-bvec[i+1])*(bvec[i-1]-bvec[i+1]); 464 | 465 | double dy = (lvec[i-width]-lvec[i+width])*(lvec[i-width]-lvec[i+width]) + 466 | (avec[i-width]-avec[i+width])*(avec[i-width]-avec[i+width]) + 467 | (bvec[i-width]-bvec[i+width])*(bvec[i-width]-bvec[i+width]); 468 | 469 | //edges[i] = fabs(dx) + fabs(dy); 470 | edges[i] = dx*dx + dy*dy; 471 | } 472 | } 473 | } 474 | 475 | //=========================================================================== 476 | /// PerturbSeeds 477 | //=========================================================================== 478 | void SLIC::PerturbSeeds( 479 | vector& kseedsl, 480 | vector& kseedsa, 481 | vector& kseedsb, 482 | vector& kseedsx, 483 | vector& kseedsy, 484 | const vector& edges) 485 | { 486 | const int dx8[8] = {-1, -1, 0, 1, 1, 1, 0, -1}; 487 | const int dy8[8] = { 0, -1, -1, -1, 0, 1, 1, 1}; 488 | 489 | int numseeds = kseedsl.size(); 490 | 491 | for( int n = 0; n < numseeds; n++ ) 492 | { 493 | int ox = kseedsx[n]; //original x 494 | int oy = kseedsy[n]; //original y 495 | int oind = oy*m_width + ox; 496 | 497 | int storeind = oind; 498 | for( int i = 0; i < 8; i++ ) 499 | { 500 | int nx = ox+dx8[i];//new x 501 | int ny = oy+dy8[i];//new y 502 | 503 | if( nx >= 0 && nx < m_width && ny >= 0 && ny < m_height) 504 | { 505 | int nind = ny*m_width + nx; 506 | if( edges[nind] < edges[storeind]) 507 | { 508 | storeind = nind; 509 | } 510 | } 511 | } 512 | if(storeind != oind) 513 | { 514 | kseedsx[n] = storeind%m_width; 515 | kseedsy[n] = storeind/m_width; 516 | kseedsl[n] = m_lvec[storeind]; 517 | kseedsa[n] = m_avec[storeind]; 518 | kseedsb[n] = m_bvec[storeind]; 519 | } 520 | } 521 | } 522 | 523 | 524 | //=========================================================================== 525 | /// GetLABXYSeeds_ForGivenStepSize 526 | /// 527 | /// The k seed values are taken as uniform spatial pixel samples. 528 | //=========================================================================== 529 | void SLIC::GetLABXYSeeds_ForGivenStepSize( 530 | vector& kseedsl, 531 | vector& kseedsa, 532 | vector& kseedsb, 533 | vector& kseedsx, 534 | vector& kseedsy, 535 | const int& STEP, 536 | const bool& perturbseeds, 537 | const vector& edgemag, 538 | int & numk) 539 | { 540 | const bool hexgrid = false; 541 | int numseeds(0); 542 | int n(0); 543 | 544 | //int xstrips = m_width/STEP; 545 | //int ystrips = m_height/STEP; 546 | int xstrips = (0.5+double(m_width)/double(STEP)); 547 | int ystrips = (0.5+double(m_height)/double(STEP)); 548 | 549 | int xerr = m_width - STEP*xstrips;if(xerr < 0){xstrips--;xerr = m_width - STEP*xstrips;} 550 | int yerr = m_height - STEP*ystrips;if(yerr < 0){ystrips--;yerr = m_height- STEP*ystrips;} 551 | 552 | double xerrperstrip = double(xerr)/double(xstrips); 553 | double yerrperstrip = double(yerr)/double(ystrips); 554 | 555 | int xoff = STEP/2; 556 | int yoff = STEP/2; 557 | //------------------------- 558 | numseeds = xstrips*ystrips; 559 | //------------------------- 560 | kseedsl.resize(numseeds); 561 | kseedsa.resize(numseeds); 562 | kseedsb.resize(numseeds); 563 | kseedsx.resize(numseeds); 564 | kseedsy.resize(numseeds); 565 | 566 | for( int y = 0; y < ystrips; y++ ) 567 | { 568 | int ye = y*yerrperstrip; 569 | for( int x = 0; x < xstrips; x++ ) 570 | { 571 | int xe = x*xerrperstrip; 572 | int seedx = (x*STEP+xoff+xe); 573 | if(hexgrid){ seedx = x*STEP+(xoff<<(y&0x1))+xe; seedx = min(m_width-1,seedx); }//for hex grid sampling 574 | int seedy = (y*STEP+yoff+ye); 575 | int i = seedy*m_width + seedx; 576 | 577 | kseedsl[n] = m_lvec[i]; 578 | kseedsa[n] = m_avec[i]; 579 | kseedsb[n] = m_bvec[i]; 580 | kseedsx[n] = seedx; 581 | kseedsy[n] = seedy; 582 | n++; 583 | } 584 | } 585 | numk = n; 586 | if(perturbseeds) 587 | { 588 | PerturbSeeds(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, edgemag); 589 | } 590 | } 591 | 592 | //=========================================================================== 593 | /// PerformSuperpixelSLIC 594 | /// 595 | /// Performs k mean segmentation. It is fast because it looks locally, not 596 | /// over the entire image. 597 | //=========================================================================== 598 | void SLIC::PerformSuperpixelSLIC( 599 | vector& kseedsl, 600 | vector& kseedsa, 601 | vector& kseedsb, 602 | vector& kseedsx, 603 | vector& kseedsy, 604 | vector & klabels, 605 | const int& STEP, 606 | const vector& edgemag, 607 | const double& M, 608 | const int & sz, 609 | const int & numk) 610 | { 611 | 612 | //const int numk = kseedsl.size(); 613 | //---------------- 614 | int offset = STEP; 615 | //if(STEP < 8) offset = STEP*1.5;//to prevent a crash due to a very small step size 616 | //---------------- 617 | 618 | vector clustersize(numk, 0); 619 | vector inv(numk, 0);//to store 1/clustersize[k] values 620 | 621 | vector sigmal(numk, 0); 622 | vector sigmaa(numk, 0); 623 | vector sigmab(numk, 0); 624 | vector sigmax(numk, 0); 625 | vector sigmay(numk, 0); 626 | vector distvec(sz, DBL_MAX); 627 | 628 | double invwt = 1.0/((STEP/M)*(STEP/M)); 629 | 630 | int x1, y1, x2, y2; 631 | double l, a, b; 632 | double dist; 633 | double distxy; 634 | for( int itr = 0; itr < 10; itr++ ) 635 | { 636 | distvec.assign(sz, DBL_MAX); 637 | for( int n = 0; n < numk; n++ ) 638 | { 639 | y1 = max(0.0, kseedsy[n]-offset); 640 | y2 = min((double)m_height, kseedsy[n]+offset); 641 | x1 = max(0.0, kseedsx[n]-offset); 642 | x2 = min((double)m_width, kseedsx[n]+offset); 643 | 644 | 645 | for( int y = y1; y < y2; y++ ) 646 | { 647 | for( int x = x1; x < x2; x++ ) 648 | { 649 | int i = y*m_width + x; 650 | 651 | l = m_lvec[i]; 652 | a = m_avec[i]; 653 | b = m_bvec[i]; 654 | 655 | dist = (l - kseedsl[n])*(l - kseedsl[n]) + 656 | (a - kseedsa[n])*(a - kseedsa[n]) + 657 | (b - kseedsb[n])*(b - kseedsb[n]); 658 | 659 | distxy = (x - kseedsx[n])*(x - kseedsx[n]) + 660 | (y - kseedsy[n])*(y - kseedsy[n]); 661 | 662 | //------------------------------------------------------------------------ 663 | dist += distxy*invwt;//dist = sqrt(dist) + sqrt(distxy*invwt);//this is more exact 664 | //------------------------------------------------------------------------ 665 | if( dist < distvec[i] ) 666 | { 667 | distvec[i] = dist; 668 | klabels[i] = n; 669 | } 670 | } 671 | } 672 | } 673 | //----------------------------------------------------------------- 674 | // Recalculate the centroid and store in the seed values 675 | //----------------------------------------------------------------- 676 | //instead of reassigning memory on each iteration, just reset. 677 | 678 | sigmal.assign(numk, 0); 679 | sigmaa.assign(numk, 0); 680 | sigmab.assign(numk, 0); 681 | sigmax.assign(numk, 0); 682 | sigmay.assign(numk, 0); 683 | clustersize.assign(numk, 0); 684 | //------------------------------------ 685 | //edgesum.assign(numk, 0); 686 | //------------------------------------ 687 | 688 | {int ind(0); 689 | for( int r = 0; r < m_height; r++ ) 690 | { 691 | for( int c = 0; c < m_width; c++ ) 692 | { 693 | sigmal[klabels[ind]] += m_lvec[ind]; 694 | sigmaa[klabels[ind]] += m_avec[ind]; 695 | sigmab[klabels[ind]] += m_bvec[ind]; 696 | sigmax[klabels[ind]] += c; 697 | sigmay[klabels[ind]] += r; 698 | //------------------------------------ 699 | //edgesum[klabels[ind]] += edgemag[ind]; 700 | //------------------------------------ 701 | clustersize[klabels[ind]] += 1.0; 702 | ind++; 703 | } 704 | }} 705 | 706 | {for( int k = 0; k < numk; k++ ) 707 | { 708 | if( clustersize[k] <= 0 ) clustersize[k] = 1; 709 | inv[k] = 1.0/clustersize[k];//computing inverse now to multiply, than divide later 710 | }} 711 | 712 | {for( int k = 0; k < numk; k++ ) 713 | { 714 | kseedsl[k] = sigmal[k]*inv[k]; 715 | kseedsa[k] = sigmaa[k]*inv[k]; 716 | kseedsb[k] = sigmab[k]*inv[k]; 717 | kseedsx[k] = sigmax[k]*inv[k]; 718 | kseedsy[k] = sigmay[k]*inv[k]; 719 | //------------------------------------ 720 | //edgesum[k] *= inv[k]; 721 | //------------------------------------ 722 | }} 723 | } 724 | } 725 | 726 | //=========================================================================== 727 | /// EnforceLabelConnectivity 728 | /// 729 | /// 1. finding an adjacent label for each new component at the start 730 | /// 2. if a certain component is too small, assigning the previously found 731 | /// adjacent label to this component, and not incrementing the label. 732 | //=========================================================================== 733 | void SLIC::EnforceLabelConnectivity( 734 | vector & labels, //input labels that need to be corrected to remove stray labels 735 | const int width, 736 | const int height, 737 | double *& outlabelout, //new labels 738 | int& numlabels, //the number of labels changes in the end if segments are removed 739 | const int& K, //the number of superpixels desired by the user 740 | const int & sz) 741 | { 742 | // const int dx8[8] = {-1, -1, 0, 1, 1, 1, 0, -1}; 743 | // const int dy8[8] = { 0, -1, -1, -1, 0, 1, 1, 1}; 744 | 745 | const int dx4[4] = {-1, 0, 1, 0}; 746 | const int dy4[4] = { 0, -1, 0, 1}; 747 | 748 | 749 | const int SUPSZ = sz/K; 750 | //nlabels.resize(sz, -1); 751 | for( int i = 0; i < sz; i++ ) outlabelout[i] = -1; 752 | int label(0); 753 | int* xvec = new int[sz]; 754 | int* yvec = new int[sz]; 755 | int oindex(0); 756 | int adjlabel(0);//adjacent label 757 | for( int j = 0; j < height; j++ ) 758 | { 759 | for( int k = 0; k < width; k++ ) 760 | { 761 | if( 0 > outlabelout[oindex] ) 762 | { 763 | outlabelout[oindex] = label; 764 | //-------------------- 765 | // Start a new segment 766 | //-------------------- 767 | xvec[0] = k; 768 | yvec[0] = j; 769 | //------------------------------------------------------- 770 | // Quickly find an adjacent label for use later if needed 771 | //------------------------------------------------------- 772 | {for( int n = 0; n < 4; n++ ) 773 | { 774 | int x = xvec[0] + dx4[n]; 775 | int y = yvec[0] + dy4[n]; 776 | if( (x >= 0 && x < width) && (y >= 0 && y < height) ) 777 | { 778 | int nindex = y*width + x; 779 | if(outlabelout[nindex] >= 0) adjlabel = outlabelout[nindex]; 780 | } 781 | }} 782 | 783 | int count(1); 784 | for( int c = 0; c < count; c++ ) 785 | { 786 | for( int n = 0; n < 4; n++ ) 787 | { 788 | int x = xvec[c] + dx4[n]; 789 | int y = yvec[c] + dy4[n]; 790 | 791 | if( (x >= 0 && x < width) && (y >= 0 && y < height) ) 792 | { 793 | int nindex = y*width + x; 794 | 795 | if( 0 > outlabelout[nindex] && labels[oindex] == labels[nindex] ) 796 | { 797 | xvec[count] = x; 798 | yvec[count] = y; 799 | outlabelout[nindex] = label; 800 | count++; 801 | } 802 | } 803 | 804 | } 805 | } 806 | //------------------------------------------------------- 807 | // If segment size is less then a limit, assign an 808 | // adjacent label found before, and decrement label count. 809 | //------------------------------------------------------- 810 | if(count <= SUPSZ >> 2) 811 | { 812 | for( int c = 0; c < count; c++ ) 813 | { 814 | int ind = yvec[c]*width+xvec[c]; 815 | outlabelout[ind] = adjlabel; 816 | } 817 | label--; 818 | } 819 | label++; 820 | } 821 | oindex++; 822 | } 823 | } 824 | numlabels= label; 825 | 826 | if(xvec) delete [] xvec; 827 | if(yvec) delete [] yvec; 828 | } 829 | 830 | 831 | 832 | //=========================================================================== 833 | /// DoSuperpixelSegmentation_ForGivenSuperpixelSize 834 | /// 835 | /// The input parameter ubuff conains RGB values in a 32-bit unsigned integers 836 | /// as follows: 837 | /// 838 | /// [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] 839 | /// 840 | /// Nothing R G B 841 | /// 842 | /// The RGB values are accessed from (and packed into) the unsigned integers 843 | /// using bitwise operators as can be seen in the function DoRGBtoLABConversion(). 844 | /// 845 | /// compactness value depends on the input pixels values. For instance, if 846 | /// the input is greyscale with values ranging from 0-100, then a compactness 847 | /// value of 20.0 would give good results. A greater value will make the 848 | /// superpixels more compact while a smaller value would make them more uneven. 849 | /// 850 | /// The labels can be saved if needed using SaveSuperpixelLabels() 851 | //=========================================================================== 852 | void SLIC::DoSuperpixelSegmentation_ForGivenSuperpixelSize( 853 | double * & r, 854 | double * & g, 855 | double * & b, 856 | const int width, 857 | const int height, 858 | vector & klabels, 859 | int& numlabels, 860 | const int& superpixelsize, 861 | const double& compactness, 862 | double *& outlabel, 863 | const int& sz) 864 | { 865 | //------------------------------------------------ 866 | const int STEP = sqrt(double(superpixelsize))+0.5; 867 | //------------------------------------------------ 868 | vector kseedsl(0); 869 | vector kseedsa(0); 870 | vector kseedsb(0); 871 | vector kseedsx(0); 872 | vector kseedsy(0); 873 | 874 | //-------------------------------------------------- 875 | m_width = width; 876 | m_height = height; 877 | 878 | //LAB, the default option 879 | 880 | DoRGBtoLABConversion(r,g,b, m_lvec, m_avec, m_bvec, sz); 881 | 882 | //-------------------------------------------------- 883 | bool perturbseeds(false);//perturb seeds is not absolutely necessary, one can set this flag to false 884 | vector edgemag(0); 885 | if(perturbseeds) DetectLabEdges(m_lvec, m_avec, m_bvec, m_width, m_height, edgemag, sz); 886 | int numk(0); 887 | GetLABXYSeeds_ForGivenStepSize(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, STEP, perturbseeds, edgemag, numk); 888 | 889 | PerformSuperpixelSLIC(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, klabels, STEP, edgemag, compactness, sz, numk); 890 | //numlabels = kseedsl.size(); 891 | //numlabels = numk; 892 | //int* nlabels = new int[sz]; 893 | //vector nlabels (sz,-1); 894 | EnforceLabelConnectivity(klabels, m_width, m_height, outlabel, numlabels, double(sz)/double(STEP*STEP), sz); 895 | //{for(int i = 0; i < sz; i++ ) outlabel[i] = nlabels[i];} 896 | //if(nlabels) delete [] nlabels; 897 | } 898 | 899 | //=========================================================================== 900 | /// DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels 901 | /// 902 | /// The input parameter ubuff conains RGB values in a 32-bit unsigned integers 903 | /// as follows: 904 | /// 905 | /// [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] 906 | /// 907 | /// Nothing R G B 908 | /// 909 | /// The RGB values are accessed from (and packed into) the unsigned integers 910 | /// using bitwise operators as can be seen in the function DoRGBtoLABConversion(). 911 | /// 912 | /// compactness value depends on the input pixels values. For instance, if 913 | /// the input is greyscale with values ranging from 0-100, then a compactness 914 | /// value of 20.0 would give good results. A greater value will make the 915 | /// superpixels more compact while a smaller value would make them more uneven. 916 | /// 917 | /// The labels can be saved if needed using SaveSuperpixelLabels() 918 | //=========================================================================== 919 | void SLIC::DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( 920 | double * & r, 921 | double * & g, 922 | double * & b, 923 | const int width, 924 | const int height, 925 | //vector & klabels, 926 | int& numlabels, 927 | const int& K, 928 | const double& compactness, 929 | double *& outlabel, 930 | const int& NumPixel ) 931 | { 932 | vector klabels( NumPixel, -1 ); 933 | 934 | const int superpixelsize = 0.5 + double(NumPixel)/double(K); 935 | DoSuperpixelSegmentation_ForGivenSuperpixelSize(r,g,b,width,height,klabels,numlabels,superpixelsize,compactness,outlabel,NumPixel); 936 | } 937 | 938 | void SLIC::DoMeanSup( double * & m_rr, 939 | double * & m_gg, 940 | double * & m_bb, 941 | int & numlabels, 942 | int & NumPixel, 943 | double *& outlabel, 944 | vector & meanSupl, 945 | vector & meanSupa, 946 | vector & meanSupb ) 947 | { 948 | vector num( numlabels ); 949 | 950 | for ( int j = 0; j< NumPixel; j++ ){ 951 | 952 | int kk = int(outlabel[j]); 953 | meanSupl[ kk ] += m_rr[j]; 954 | num[ kk ] += 1; 955 | 956 | meanSupa[ kk ] += m_gg[j]; 957 | 958 | meanSupb[ kk ] += m_bb[j]; 959 | } 960 | 961 | for ( int j = 0; j< numlabels; j++ ) { 962 | 963 | int numj = num[ j ]; 964 | meanSupl[ j ] /= numj; 965 | meanSupa[ j ] /= numj; 966 | meanSupb[ j ] /= numj; 967 | } 968 | } 969 | 970 | 971 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 972 | { 973 | if (nrhs!=4) mexErrMsgTxt("error :the input number error"); 974 | 975 | double * rr= (double* )mxGetPr(prhs[0]); 976 | double * gg= (double*)mxGetPr(prhs[1]); 977 | double * bb= (double*)mxGetPr(prhs[2]); 978 | double *imgattr=(double *)mxGetPr(prhs[3]); 979 | 980 | double height=imgattr[0]; 981 | double width=imgattr[1]; 982 | double k=imgattr[2]; 983 | double m=imgattr[3]; 984 | int NumPixel = imgattr[4]; 985 | 986 | int numlabels(0); 987 | 988 | plhs[0] = mxCreateDoubleMatrix( NumPixel, 1, mxREAL); 989 | double * outlabel=(double *)mxGetPr(plhs[0]); 990 | 991 | SLIC slic; 992 | slic.DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( rr, gg, bb, width, height, numlabels, k, m, outlabel, NumPixel ); 993 | 994 | plhs[1] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 995 | double * Supll = (double *)mxGetPr(plhs[1]); 996 | 997 | plhs[2] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 998 | double * Supaa = (double *)mxGetPr(plhs[2]); 999 | 1000 | plhs[3] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 1001 | double * Supbb = (double *)mxGetPr(plhs[3]); 1002 | 1003 | plhs[4] = mxCreateDoubleMatrix( 1, 1, mxREAL ); 1004 | double * numSuperpixel = (double *)mxGetPr(plhs[4]); 1005 | numSuperpixel[0] = numlabels; 1006 | 1007 | vector meanSuprr( numlabels ); 1008 | vector meanSupgg( numlabels ); 1009 | vector meanSupbb( numlabels ); 1010 | 1011 | slic.DoMeanSup( rr, gg, bb, numlabels, NumPixel , outlabel, meanSuprr, meanSupgg, meanSupbb ); 1012 | 1013 | slic.DoRGBtoLABConversionSup(meanSuprr, meanSupgg, meanSupbb, Supll, Supaa, Supbb, numlabels); 1014 | 1015 | } -------------------------------------------------------------------------------- /code/code/SLIC.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SLIC.mexw32 -------------------------------------------------------------------------------- /code/code/SLIC.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SLIC.mexw64 -------------------------------------------------------------------------------- /code/code/Saliency_Absorb_MC.m: -------------------------------------------------------------------------------- 1 | function Saliency_Absorb_MC 2 | %% 3 | clear; tic; 4 | addpath( './SubCode/' ); 5 | Salmap = './map/'; 6 | mkdir( Salmap ); 7 | imgpath = './img/'; 8 | ImgEnum=dir([imgpath '*.jpg']); ImgNum=length(ImgEnum); 9 | for i= 1 :ImgNum 10 | imname = [ imgpath ImgEnum(i).name ]; 11 | Img = double( imread( imname ) ); 12 | figure,imshow(uint8(Img)); 13 | [ height,width ] = size(Img(:,:,1)); 14 | PixNum = height*width; 15 | ImgVecR = reshape( Img(:,:,1)', PixNum, 1); 16 | ImgVecG = reshape( Img(:,:,2)', PixNum, 1); 17 | ImgVecB = reshape( Img(:,:,3)', PixNum, 1); 18 | % m is the compactness parameter, k is the super-pixel number in SLIC algorithm 19 | m = 20; k = 250; 20 | ImgAttr=[ height ,width, k, m, PixNum ]; 21 | % obtain superpixel from SLIC algorithm: LabelLine is the super-pixel label vector of the image, 22 | % Sup1, Sup2, Sup3 are the mean L a b colour value of each superpixel, 23 | % k is the number of the super-pixel. 24 | [ LabelLine, Sup1, Sup2, Sup3, k ] = SLIC( ImgVecR, ImgVecG, ImgVecB, ImgAttr ); 25 | Label=reshape(LabelLine,width,height); 26 | Label = Label'; % the superpixle label 27 | 28 | [ ConPix, ConPixDouble ] = find_connect_superpixel_DoubleIn_Opposite( Label, k, height ,width ); 29 | % count the number of the edge in the graph 30 | NumInit=0; 31 | for j=1:k 32 | for z=j+1:k 33 | if ConPixDouble(j,z)>0 34 | NumInit=NumInit+1; 35 | end 36 | end 37 | end 38 | Dcol=zeros(NumInit,3); 39 | % calculate the edge weight 40 | mm=1; 41 | for j=1:k-1 42 | for z=j+1:k 43 | if ConPixDouble(j,z)>0 44 | DcolTem = sqrt( ( Sup1(j)-Sup1(z) ).^2 + ( Sup2(j)-Sup2(z) ).^2 + ( Sup3(j)- Sup3(z) ).^2 ); 45 | Dcol(mm, 1: 3 )=[j,z,DcolTem ]; 46 | mm=mm+1; 47 | end 48 | end 49 | end 50 | 51 | DcolNor = normalize( Dcol(:,3) ); 52 | weight = exp( -10*DcolNor ) + .00001; 53 | WconFirst = sparse( [Dcol(:,1);Dcol(:,2)], [Dcol(:,2);Dcol(:,1)], [weight,weight],k ,k ); 54 | WconFirst = full(WconFirst ) + eye(k); % the affinity matrix of the graph model 55 | 56 | Discard = sum(WconFirst,2); 57 | DiscardPos = find( Discard < 1.1 ); % to discard the outlier 58 | LenDiscardPos = length(DiscardPos); 59 | 60 | EdgSup = Find_Edge_Superpixels( Label, k, height, width , WconFirst, ConPix ); 61 | for j=1:LenDiscardPos 62 | EdgSup( DiscardPos(j) ) = 2; 63 | end 64 | 65 | NumIn = k - length( find( EdgSup == 2 ) ); 66 | NumEdg = length( find( EdgSup==1 ) ); 67 | EdgWcon = zeros( k, NumEdg ); 68 | mm=1; 69 | for j=1:k 70 | if EdgSup(j)==1 71 | EdgWcon(:,mm) = WconFirst(:,j); 72 | mm = mm + 1; 73 | end 74 | end 75 | alph = 1; W=zeros(k,k); 76 | %%%%%%%%%%%% absorb MC 77 | if NumIn == k 78 | BaseEdg = sum( EdgWcon, 2 ) ; 79 | D = diag( Discard + BaseEdg ); 80 | Wcon = D \ WconFirst; 81 | I = eye( NumIn ); 82 | N = ( I - alph* Wcon ); 83 | y = ones( NumIn, 1 ); 84 | Sal = N \ y; 85 | Sal = normalize(Sal); 86 | else 87 | BaseEdg = zeros( NumIn, 1 ); 88 | sumD = zeros( NumIn, 1 ); 89 | mm=1; 90 | for j = 1:k 91 | if EdgSup(j) < 2 92 | BaseEdg(mm) = sum( EdgWcon( j, : ) ); 93 | sumD(mm) = Discard(j); 94 | W(mm,:) = WconFirst(j,:); 95 | mm = mm + 1; 96 | end 97 | end 98 | mm=1; 99 | for j=1:k 100 | if EdgSup(j) < 2 101 | W( :,mm)=W(:,j); 102 | mm=mm+1; 103 | end 104 | end 105 | Wmid = W( 1:NumIn, 1: NumIn ); 106 | D = diag( BaseEdg + sumD ); 107 | Wmid = D \ Wmid; 108 | I = eye( NumIn ); 109 | N = ( I - alph* Wmid ); 110 | y = ones( NumIn, 1 ); 111 | Sal = N \ y; 112 | Sal = normalize(Sal); 113 | end 114 | %%%%%%%%%%% entropy decide 2 115 | Entro = zeros( 11, 1 ); 116 | for j = 1 : NumIn 117 | entroT = floor( Sal(j) * 10 ) + 1; 118 | Entro( entroT ) = Entro( entroT ) + 1; 119 | end 120 | Entro(10) = Entro(10) + Entro(11); 121 | Entro = Entro ./ NumIn; 122 | Entropy = 0; 123 | for j = 1 : 10 124 | Entropy = Entropy + Entro(j) * min( ( j ), ( 11 - j ) ); 125 | end 126 | % output the saliency map directly from absorb MC 127 | if Entropy < 2 128 | if NumIn < k 129 | SalAll = zeros(k,1); 130 | mm=1; 131 | for j= 1: k 132 | if EdgSup (j ) < 2 133 | SalAll(j) = Sal( mm ); 134 | mm=mm+1; 135 | end 136 | end 137 | for j=1:LenDiscardPos 138 | for z=1:k 139 | if ConPix( DiscardPos(j), z ) > 0 140 | if SalAll(z) >.3 141 | SalAll( DiscardPos(j) ) = 1 ; 142 | break; 143 | end 144 | end 145 | end 146 | end 147 | SalLine = sup2pixel( PixNum, LabelLine, SalAll ); % to convey the saliency value from superpixel to pixel 148 | Salpix = reshape( SalLine, width, height ); 149 | Salpix = Salpix'; 150 | else 151 | SalLine = sup2pixel( PixNum, LabelLine, Sal ); 152 | Salpix = reshape( SalLine, width, height ); 153 | Salpix = Salpix'; 154 | end 155 | imwrite( Salpix, [ Salmap, ImgEnum(i).name(1:end-4), '.png' ] ); 156 | else 157 | %%%%%%%%%%%% equilibrium post-process 158 | if NumIn == k 159 | sumDiscard = sum( Discard ); 160 | c = Discard ./ sumDiscard; 161 | rW = 1 ./ c; 162 | sumrW = sum(rW); 163 | rW = rW / sumrW; 164 | Sal = N \ rW; 165 | Sal = normalize(Sal); 166 | else 167 | sumsumD = sum( sumD ); 168 | c = sumD ./ sumsumD; 169 | rW = 1 ./ c; 170 | sumrW = sum(rW); 171 | rW = rW / sumrW; 172 | Sal = N \ rW; 173 | Sal = normalize(Sal); 174 | end 175 | if NumIn < k 176 | SalAll = zeros(k,1); 177 | mm=1; 178 | for j= 1: k 179 | if EdgSup (j ) < 2 180 | SalAll(j) = Sal( mm ); 181 | mm=mm+1; 182 | end 183 | end 184 | for j = 1:LenDiscardPos % to descide the saliency of outlier based on neighbour's saliency 185 | for z=1:k 186 | if ConPix( DiscardPos(j), z ) > 0 187 | if SalAll(z) >.3 188 | SalAll( DiscardPos(j) ) = 1 ; 189 | break; 190 | end 191 | end 192 | end 193 | end 194 | SalLine = sup2pixel( PixNum, LabelLine, SalAll ); 195 | Salpix = reshape( SalLine, width, height ); 196 | Salpix = Salpix'; 197 | else 198 | SalLine = sup2pixel( PixNum, LabelLine, Sal ); 199 | Salpix = reshape( SalLine, width, height ); 200 | Salpix = Salpix'; 201 | end 202 | imwrite( Salpix, [ Salmap,ImgEnum(i).name(1:end-4), '.png' ] ); 203 | end 204 | end 205 | toc; 206 | 207 | -------------------------------------------------------------------------------- /code/code/SubCode/Find_Edge_Superpixels.m: -------------------------------------------------------------------------------- 1 | function EdgSup = Find_Edge_Superpixels( Labels, K, height, width , Wcon, ConPix ) 2 | %% 3 | % obtain the indication of edge super-pixels 4 | % Input: 5 | % Labels: the super-pixel label obtained from SLIC 6 | % K: the number of super-pixels 7 | % height: the height of the image 8 | % width: the width of the image 9 | % Wcon: the affinity weight on the edge of the graph 10 | % ConPix: one layer neighbour relationship of super-pixels 11 | % Output: 12 | % EdgSup: the edge superpixel is indicated by value 1, 13 | % the superpixel in the edge frame is indicated by value 2. 14 | %%%%==================================================================== 15 | EdgSup=zeros( K,1); Check=0; 16 | for i=1:height 17 | EdgSup ( Labels( i,1 )+1 ) =1; 18 | EdgSup ( Labels(i, width) +1 )=1; 19 | end 20 | for i=1:width 21 | EdgSup (Labels(1,i) +1 )= 1 ; 22 | EdgSup (Labels(height, i) +1 ) =1; 23 | end 24 | EdgSupSecond = EdgSup; 25 | for j=1:K 26 | if EdgSup(j)==1 27 | for z=1:K 28 | if ( ConPix(j,z)>0 ) && ( EdgSup(z)==0 ) 29 | Check = Check + Wcon(j,z); 30 | EdgSupSecond( z ) = 1; 31 | end 32 | end 33 | if Check > 13 % heuristic threshold to discard the frame 34 | return; 35 | end 36 | end 37 | end 38 | EdgSup = EdgSup + EdgSupSecond; 39 | 40 | 41 | 42 | -------------------------------------------------------------------------------- /code/code/SubCode/SLIC.cpp: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | #include "matrix.h" 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | using namespace std; 11 | 12 | // SLIC.h: interface for the SLIC class. 13 | //=========================================================================== 14 | // This code implements the superpixel method described in: 15 | // 16 | // Radhakrishna Achanta, Appu Shaji, Kevin Smith, Aurelien Lucchi, Pascal Fua, and Sabine Susstrunk, 17 | // "SLIC Superpixels", 18 | // EPFL Technical Report no. 149300, June 2010. 19 | //=========================================================================== 20 | // Copyright (c) 2012 Radhakrishna Achanta [EPFL]. All rights reserved. 21 | //=========================================================================== 22 | ////////////////////////////////////////////////////////////////////// 23 | 24 | #if !defined(_SLIC_H_INCLUDED_) 25 | #define _SLIC_H_INCLUDED_ 26 | 27 | class SLIC 28 | { 29 | public: 30 | SLIC(); 31 | virtual ~SLIC(); 32 | //============================================================================ 33 | // sRGB to CIELAB conversion for 2-D images 34 | //========================================================================= 35 | void DoRGBtoLABConversionSup( 36 | vector & r1, 37 | vector & g1, 38 | vector & b1, 39 | double*& lvec, 40 | double*& avec, 41 | double*& bvec, 42 | const int & numSup); 43 | //====================================================================== 44 | //mean rgb in each superpixel 45 | //======================================================================= 46 | void SLIC::DoMeanSup( double * & m_rr, 47 | double * & m_gg, 48 | double * & m_bb, 49 | int & numlabels, 50 | int & NumPixel , 51 | double *& outlabel, 52 | vector & meanSupl, 53 | vector & meanSupa, 54 | vector & meanSupb); 55 | //============================================================================ 56 | // Superpixel segmentation for a given step size (superpixel size ~= step*step) 57 | //============================================================================ 58 | void DoSuperpixelSegmentation_ForGivenSuperpixelSize( 59 | double * & r, 60 | double * & g, 61 | double * & b, 62 | const int width, 63 | const int height, 64 | vector & klabels, 65 | int& numlabels, 66 | const int& superpixelsize, 67 | const double& compactness, 68 | double *& outlabel, 69 | const int& sz); 70 | //============================================================================ 71 | // Superpixel segmentation for a given number of superpixels 72 | //============================================================================ 73 | void DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( 74 | double * & r, 75 | double * & g, 76 | double * & b, 77 | const int width, 78 | const int height, 79 | //vector & klabels, 80 | int& numlabels, 81 | const int& K, //required number of superpixels 82 | const double& compactness, 83 | double *& outlabel, 84 | const int& NumPixel); 85 | 86 | private: 87 | //============================================================================ 88 | // The main SLIC algorithm for generating superpixels 89 | //============================================================================ 90 | void PerformSuperpixelSLIC( 91 | vector& kseedsl, 92 | vector& kseedsa, 93 | vector& kseedsb, 94 | vector& kseedsx, 95 | vector& kseedsy, 96 | vector & klabels, 97 | const int& STEP, 98 | const vector& edgemag, 99 | const double& m , 100 | const int & sz, 101 | const int & numk); 102 | 103 | //============================================================================ 104 | // Pick seeds for superpixels when step size of superpixels is given. 105 | //============================================================================ 106 | void GetLABXYSeeds_ForGivenStepSize( 107 | vector& kseedsl, 108 | vector& kseedsa, 109 | vector& kseedsb, 110 | vector& kseedsx, 111 | vector& kseedsy, 112 | const int& STEP, 113 | const bool& perturbseeds, 114 | const vector& edgemag, 115 | int & numk); 116 | 117 | //============================================================================ 118 | // Move the superpixel seeds to low gradient positions to avoid putting seeds 119 | // at region boundaries. 120 | //============================================================================ 121 | void PerturbSeeds( 122 | vector& kseedsl, 123 | vector& kseedsa, 124 | vector& kseedsb, 125 | vector& kseedsx, 126 | vector& kseedsy, 127 | const vector& edges); 128 | //============================================================================ 129 | // Detect color edges, to help PerturbSeeds() 130 | //============================================================================ 131 | void DetectLabEdges( 132 | const double* lvec, 133 | const double* avec, 134 | const double* bvec, 135 | const int& width, 136 | const int& height, 137 | vector& edges, 138 | const int & sz); 139 | //============================================================================ 140 | // sRGB to XYZ conversion; helper for RGB2LAB() 141 | //============================================================================ 142 | void RGB2XYZ( 143 | const double & sR, 144 | const double & sG, 145 | const double & sB, 146 | double& X, 147 | double& Y, 148 | double& Z); 149 | //============================================================================ 150 | // sRGB to CIELAB conversion (uses RGB2XYZ function) 151 | //============================================================================ 152 | void RGB2LAB( 153 | const double & sR, 154 | const double & sG, 155 | const double & sB, 156 | double& lval, 157 | double& aval, 158 | double& bval); 159 | //============================================================================ 160 | // sRGB to CIELAB conversion for 2-D images 161 | //============================================================================ 162 | void DoRGBtoLABConversion( 163 | double * & r1, 164 | double * & g1, 165 | double * & b1, 166 | double*& lvec, 167 | double*& avec, 168 | double*& bvec, 169 | const int & sz); 170 | //=============================================================================== 171 | // convert mean rgb of superpixel to lab 172 | //================================================================================= 173 | void RGB2XYZSup( 174 | const double & sR, 175 | const double & sG, 176 | const double & sB, 177 | double& X, 178 | double& Y, 179 | double& Z); 180 | //============================================================================ 181 | // sRGB to CIELAB conversion (uses RGB2XYZ function) 182 | void RGB2LABSup( 183 | const double & sR, 184 | const double & sG, 185 | const double & sB, 186 | double& lval, 187 | double& aval, 188 | double& bval); 189 | 190 | //============================================================================ 191 | // Post-processing of SLIC segmentation, to avoid stray labels. 192 | //============================================================================ 193 | void EnforceLabelConnectivity( 194 | vector & labels, 195 | const int width, 196 | const int height, 197 | double *& outlabelout, //input labels that need to be corrected to remove stray labels 198 | int& numlabels, //the number of labels changes in the end if segments are removed 199 | const int& K, //the number of superpixels desired by the user 200 | const int& sz); 201 | 202 | 203 | private: 204 | int m_width; 205 | int m_height; 206 | int m_depth; 207 | 208 | double* m_lvec; 209 | double* m_avec; 210 | double* m_bvec; 211 | 212 | double** m_lvecvec; 213 | double** m_avecvec; 214 | double** m_bvecvec; 215 | }; 216 | 217 | #endif // !defined(_SLIC_H_INCLUDED_) 218 | 219 | // SLIC.cpp: implementation of the SLIC class. 220 | // 221 | // Copyright (C) Radhakrishna Achanta 2012 222 | // All rights reserved 223 | // Email: firstname.lastname@epfl.ch 224 | ////////////////////////////////////////////////////////////////////// 225 | 226 | 227 | 228 | ////////////////////////////////////////////////////////////////////// 229 | // Construction/Destruction 230 | ////////////////////////////////////////////////////////////////////// 231 | 232 | SLIC::SLIC() 233 | { 234 | m_lvec = NULL; 235 | m_avec = NULL; 236 | m_bvec = NULL; 237 | 238 | m_lvecvec = NULL; 239 | m_avecvec = NULL; 240 | m_bvecvec = NULL; 241 | } 242 | 243 | SLIC::~SLIC() 244 | { 245 | if(m_lvec) delete [] m_lvec; 246 | if(m_avec) delete [] m_avec; 247 | if(m_bvec) delete [] m_bvec; 248 | 249 | 250 | if(m_lvecvec) 251 | { 252 | for( int d = 0; d < m_depth; d++ ) delete [] m_lvecvec[d]; 253 | delete [] m_lvecvec; 254 | } 255 | if(m_avecvec) 256 | { 257 | for( int d = 0; d < m_depth; d++ ) delete [] m_avecvec[d]; 258 | delete [] m_avecvec; 259 | } 260 | if(m_bvecvec) 261 | { 262 | for( int d = 0; d < m_depth; d++ ) delete [] m_bvecvec[d]; 263 | delete [] m_bvecvec; 264 | } 265 | } 266 | 267 | //============================================================================== 268 | /// RGB2XYZ 269 | /// 270 | /// sRGB (D65 illuninant assumption) to XYZ conversion 271 | //============================================================================== 272 | void SLIC::RGB2XYZ( 273 | const double & sR, 274 | const double & sG, 275 | const double & sB, 276 | double& X, 277 | double& Y, 278 | double& Z) 279 | { 280 | double R = sR/255.0; 281 | double G = sG/255.0; 282 | double B = sB/255.0; 283 | double r, g, b; 284 | if(R <= 0.04045) r = R/12.92; 285 | else r = pow((R+0.055)/1.055,2.4); 286 | if(G <= 0.04045) g = G/12.92; 287 | else g = pow((G+0.055)/1.055,2.4); 288 | if(B <= 0.04045) b = B/12.92; 289 | else b = pow((B+0.055)/1.055,2.4); 290 | 291 | 292 | X = r*0.4339563 + g*0.3762153 + b*0.1898430; 293 | Y = r*0.2126729 + g*0.7151522 + b*0.0721750; 294 | Z = r*0.0177578 + g*0.1094756 + b*0.8728363; 295 | } 296 | 297 | //=========================================================================== 298 | /// RGB2LAB 299 | //=========================================================================== 300 | void SLIC::RGB2LAB(const double& sR, const double & sG, const double & sB, double& lval, double& aval, double& bval) 301 | { 302 | //------------------------ 303 | // sRGB to XYZ conversion 304 | //------------------------ 305 | double X, Y, Z; 306 | RGB2XYZ(sR, sG, sB, X, Y, Z); 307 | 308 | //------------------------ 309 | // XYZ to LAB conversion 310 | //------------------------ 311 | double epsilon = 0.008856; //actual CIE standard 312 | // double kappa = 903.3; //actual CIE standard 313 | 314 | double fx, fy, fz; 315 | if( X > epsilon) fx = pow(X, 1.0/3.0); 316 | else fx = 7.787069 * X + 0.137931; //(kappa*X + 16.0)/116.0; 317 | if( Y > epsilon) fy = pow(Y, 1.0/3.0); 318 | else fy = 7.787069 * Y + 0.137931; //(kappa*Y + 16.0)/116.0; 319 | if( Z > epsilon) fz = pow(Z, 1.0/3.0); 320 | else fz = 7.787069 * Z + 0.137931; //(kappa*Z + 16.0)/116.0; 321 | 322 | lval = 116.0*fy-16.0; 323 | aval = 500.0*(fx-fy); 324 | bval = 200.0*(fy-fz); 325 | } 326 | 327 | //=========================================================================== 328 | /// DoRGBtoLABConversion 329 | /// 330 | /// For whole image: overlaoded floating point version 331 | //=========================================================================== 332 | void SLIC::DoRGBtoLABConversion( 333 | double* & r1, 334 | double* & g1, 335 | double* & b1, 336 | double*& lvec, 337 | double*& avec, 338 | double*& bvec, 339 | const int & sz) 340 | { 341 | lvec = new double[sz]; 342 | avec = new double[sz]; 343 | bvec = new double[sz]; 344 | 345 | for( int j = 0; j < sz; j++ ) 346 | { 347 | double r = r1[j]; 348 | double g = g1[j]; 349 | double b = b1[j]; 350 | 351 | RGB2LAB( r, g, b, lvec[j], avec[j], bvec[j] ); 352 | } 353 | } 354 | 355 | //////////////// convert mean rgb of superpixel to lab 356 | void SLIC::RGB2XYZSup( 357 | const double & sR, 358 | const double & sG, 359 | const double & sB, 360 | double& X, 361 | double& Y, 362 | double& Z) 363 | { 364 | double r, g, b; 365 | r = pow( ( sR + 0.099 )/1.099, 2.222 ); 366 | g = pow( ( sG + 0.099 )/1.099, 2.222 ); 367 | b = pow( ( sB + 0.099 )/1.099, 2.222 ); 368 | 369 | if ( r < 0.018 ) 370 | r = sR / 4.5138; 371 | 372 | if ( g < 0.018 ) 373 | g = sG / 4.5138; 374 | 375 | if ( b < 0.018 ) 376 | b = sB / 4.5138; 377 | 378 | X = r*0.4339563 + g*0.3762153 + b*0.1898430; 379 | Y = r*0.2126729 + g*0.7151522 + b*0.0721750; 380 | Z = r*0.0177578 + g*0.1094756 + b*0.8728363; 381 | } 382 | 383 | //=========================================================================== 384 | /// RGB2LAB 385 | //=========================================================================== 386 | void SLIC::RGB2LABSup(const double& sR, const double & sG, const double & sB, double& lval, double& aval, double& bval) 387 | { 388 | //------------------------ 389 | // sRGB to XYZ conversion 390 | //------------------------ 391 | double X, Y, Z; 392 | RGB2XYZSup(sR, sG, sB, X, Y, Z); 393 | 394 | //------------------------ 395 | // XYZ to LAB conversion 396 | //------------------------ 397 | double epsilon = 0.008856; //actual CIE standard 398 | 399 | double fx, fy, fz; 400 | if( X > epsilon) fx = pow(X, 1.0/3.0); 401 | else fx = 7.787069 * X + 0.137931; //(kappa*X + 16.0)/116.0; 402 | if( Y > epsilon) fy = pow(Y, 1.0/3.0); 403 | else fy = 7.787069 * Y + 0.137931; //(kappa*Y + 16.0)/116.0; 404 | if( Z > epsilon) fz = pow(Z, 1.0/3.0); 405 | else fz = 7.787069 * Z + 0.137931; //(kappa*Z + 16.0)/116.0; 406 | 407 | lval = 116.0*fy-16.0; 408 | aval = 500.0*(fx-fy); 409 | bval = 200.0*(fy-fz); 410 | } 411 | 412 | //=========================================================================== 413 | /// DoRGBtoLABConversion 414 | /// 415 | /// For whole image: overlaoded floating point version 416 | //=========================================================================== 417 | void SLIC::DoRGBtoLABConversionSup( 418 | vector & r1, 419 | vector & g1, 420 | vector & b1, 421 | double*& lvec, 422 | double*& avec, 423 | double*& bvec, 424 | const int & numSup) 425 | { 426 | 427 | /*lvec = new double[sz]; 428 | avec = new double[sz]; 429 | bvec = new double[sz];*/ 430 | 431 | for( int j = 0; j < numSup; j++ ) 432 | { 433 | double r = r1[j]; 434 | double g = g1[j]; 435 | double b = b1[j]; 436 | 437 | RGB2LABSup( r, g, b, lvec[j], avec[j], bvec[j] ); 438 | } 439 | } 440 | 441 | //============================================================================== 442 | /// DetectLabEdges 443 | //============================================================================== 444 | void SLIC::DetectLabEdges( 445 | const double* lvec, 446 | const double* avec, 447 | const double* bvec, 448 | const int& width, 449 | const int& height, 450 | vector& edges, 451 | const int & sz) 452 | { 453 | 454 | edges.resize(sz,0); 455 | for( int j = 1; j < height-1; j++ ) 456 | { 457 | for( int k = 1; k < width-1; k++ ) 458 | { 459 | int i = j*width+k; 460 | 461 | double dx = (lvec[i-1]-lvec[i+1])*(lvec[i-1]-lvec[i+1]) + 462 | (avec[i-1]-avec[i+1])*(avec[i-1]-avec[i+1]) + 463 | (bvec[i-1]-bvec[i+1])*(bvec[i-1]-bvec[i+1]); 464 | 465 | double dy = (lvec[i-width]-lvec[i+width])*(lvec[i-width]-lvec[i+width]) + 466 | (avec[i-width]-avec[i+width])*(avec[i-width]-avec[i+width]) + 467 | (bvec[i-width]-bvec[i+width])*(bvec[i-width]-bvec[i+width]); 468 | 469 | //edges[i] = fabs(dx) + fabs(dy); 470 | edges[i] = dx*dx + dy*dy; 471 | } 472 | } 473 | } 474 | 475 | //=========================================================================== 476 | /// PerturbSeeds 477 | //=========================================================================== 478 | void SLIC::PerturbSeeds( 479 | vector& kseedsl, 480 | vector& kseedsa, 481 | vector& kseedsb, 482 | vector& kseedsx, 483 | vector& kseedsy, 484 | const vector& edges) 485 | { 486 | const int dx8[8] = {-1, -1, 0, 1, 1, 1, 0, -1}; 487 | const int dy8[8] = { 0, -1, -1, -1, 0, 1, 1, 1}; 488 | 489 | int numseeds = kseedsl.size(); 490 | 491 | for( int n = 0; n < numseeds; n++ ) 492 | { 493 | int ox = kseedsx[n]; //original x 494 | int oy = kseedsy[n]; //original y 495 | int oind = oy*m_width + ox; 496 | 497 | int storeind = oind; 498 | for( int i = 0; i < 8; i++ ) 499 | { 500 | int nx = ox+dx8[i];//new x 501 | int ny = oy+dy8[i];//new y 502 | 503 | if( nx >= 0 && nx < m_width && ny >= 0 && ny < m_height) 504 | { 505 | int nind = ny*m_width + nx; 506 | if( edges[nind] < edges[storeind]) 507 | { 508 | storeind = nind; 509 | } 510 | } 511 | } 512 | if(storeind != oind) 513 | { 514 | kseedsx[n] = storeind%m_width; 515 | kseedsy[n] = storeind/m_width; 516 | kseedsl[n] = m_lvec[storeind]; 517 | kseedsa[n] = m_avec[storeind]; 518 | kseedsb[n] = m_bvec[storeind]; 519 | } 520 | } 521 | } 522 | 523 | 524 | //=========================================================================== 525 | /// GetLABXYSeeds_ForGivenStepSize 526 | /// 527 | /// The k seed values are taken as uniform spatial pixel samples. 528 | //=========================================================================== 529 | void SLIC::GetLABXYSeeds_ForGivenStepSize( 530 | vector& kseedsl, 531 | vector& kseedsa, 532 | vector& kseedsb, 533 | vector& kseedsx, 534 | vector& kseedsy, 535 | const int& STEP, 536 | const bool& perturbseeds, 537 | const vector& edgemag, 538 | int & numk) 539 | { 540 | const bool hexgrid = false; 541 | int numseeds(0); 542 | int n(0); 543 | 544 | //int xstrips = m_width/STEP; 545 | //int ystrips = m_height/STEP; 546 | int xstrips = (0.5+double(m_width)/double(STEP)); 547 | int ystrips = (0.5+double(m_height)/double(STEP)); 548 | 549 | int xerr = m_width - STEP*xstrips;if(xerr < 0){xstrips--;xerr = m_width - STEP*xstrips;} 550 | int yerr = m_height - STEP*ystrips;if(yerr < 0){ystrips--;yerr = m_height- STEP*ystrips;} 551 | 552 | double xerrperstrip = double(xerr)/double(xstrips); 553 | double yerrperstrip = double(yerr)/double(ystrips); 554 | 555 | int xoff = STEP/2; 556 | int yoff = STEP/2; 557 | //------------------------- 558 | numseeds = xstrips*ystrips; 559 | //------------------------- 560 | kseedsl.resize(numseeds); 561 | kseedsa.resize(numseeds); 562 | kseedsb.resize(numseeds); 563 | kseedsx.resize(numseeds); 564 | kseedsy.resize(numseeds); 565 | 566 | for( int y = 0; y < ystrips; y++ ) 567 | { 568 | int ye = y*yerrperstrip; 569 | for( int x = 0; x < xstrips; x++ ) 570 | { 571 | int xe = x*xerrperstrip; 572 | int seedx = (x*STEP+xoff+xe); 573 | if(hexgrid){ seedx = x*STEP+(xoff<<(y&0x1))+xe; seedx = min(m_width-1,seedx); }//for hex grid sampling 574 | int seedy = (y*STEP+yoff+ye); 575 | int i = seedy*m_width + seedx; 576 | 577 | kseedsl[n] = m_lvec[i]; 578 | kseedsa[n] = m_avec[i]; 579 | kseedsb[n] = m_bvec[i]; 580 | kseedsx[n] = seedx; 581 | kseedsy[n] = seedy; 582 | n++; 583 | } 584 | } 585 | numk = n; 586 | if(perturbseeds) 587 | { 588 | PerturbSeeds(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, edgemag); 589 | } 590 | } 591 | 592 | //=========================================================================== 593 | /// PerformSuperpixelSLIC 594 | /// 595 | /// Performs k mean segmentation. It is fast because it looks locally, not 596 | /// over the entire image. 597 | //=========================================================================== 598 | void SLIC::PerformSuperpixelSLIC( 599 | vector& kseedsl, 600 | vector& kseedsa, 601 | vector& kseedsb, 602 | vector& kseedsx, 603 | vector& kseedsy, 604 | vector & klabels, 605 | const int& STEP, 606 | const vector& edgemag, 607 | const double& M, 608 | const int & sz, 609 | const int & numk) 610 | { 611 | 612 | //const int numk = kseedsl.size(); 613 | //---------------- 614 | int offset = STEP; 615 | //if(STEP < 8) offset = STEP*1.5;//to prevent a crash due to a very small step size 616 | //---------------- 617 | 618 | vector clustersize(numk, 0); 619 | vector inv(numk, 0);//to store 1/clustersize[k] values 620 | 621 | vector sigmal(numk, 0); 622 | vector sigmaa(numk, 0); 623 | vector sigmab(numk, 0); 624 | vector sigmax(numk, 0); 625 | vector sigmay(numk, 0); 626 | vector distvec(sz, DBL_MAX); 627 | 628 | double invwt = 1.0/((STEP/M)*(STEP/M)); 629 | 630 | int x1, y1, x2, y2; 631 | double l, a, b; 632 | double dist; 633 | double distxy; 634 | for( int itr = 0; itr < 10; itr++ ) 635 | { 636 | distvec.assign(sz, DBL_MAX); 637 | for( int n = 0; n < numk; n++ ) 638 | { 639 | y1 = max(0.0, kseedsy[n]-offset); 640 | y2 = min((double)m_height, kseedsy[n]+offset); 641 | x1 = max(0.0, kseedsx[n]-offset); 642 | x2 = min((double)m_width, kseedsx[n]+offset); 643 | 644 | 645 | for( int y = y1; y < y2; y++ ) 646 | { 647 | for( int x = x1; x < x2; x++ ) 648 | { 649 | int i = y*m_width + x; 650 | 651 | l = m_lvec[i]; 652 | a = m_avec[i]; 653 | b = m_bvec[i]; 654 | 655 | dist = (l - kseedsl[n])*(l - kseedsl[n]) + 656 | (a - kseedsa[n])*(a - kseedsa[n]) + 657 | (b - kseedsb[n])*(b - kseedsb[n]); 658 | 659 | distxy = (x - kseedsx[n])*(x - kseedsx[n]) + 660 | (y - kseedsy[n])*(y - kseedsy[n]); 661 | 662 | //------------------------------------------------------------------------ 663 | dist += distxy*invwt;//dist = sqrt(dist) + sqrt(distxy*invwt);//this is more exact 664 | //------------------------------------------------------------------------ 665 | if( dist < distvec[i] ) 666 | { 667 | distvec[i] = dist; 668 | klabels[i] = n; 669 | } 670 | } 671 | } 672 | } 673 | //----------------------------------------------------------------- 674 | // Recalculate the centroid and store in the seed values 675 | //----------------------------------------------------------------- 676 | //instead of reassigning memory on each iteration, just reset. 677 | 678 | sigmal.assign(numk, 0); 679 | sigmaa.assign(numk, 0); 680 | sigmab.assign(numk, 0); 681 | sigmax.assign(numk, 0); 682 | sigmay.assign(numk, 0); 683 | clustersize.assign(numk, 0); 684 | //------------------------------------ 685 | //edgesum.assign(numk, 0); 686 | //------------------------------------ 687 | 688 | {int ind(0); 689 | for( int r = 0; r < m_height; r++ ) 690 | { 691 | for( int c = 0; c < m_width; c++ ) 692 | { 693 | sigmal[klabels[ind]] += m_lvec[ind]; 694 | sigmaa[klabels[ind]] += m_avec[ind]; 695 | sigmab[klabels[ind]] += m_bvec[ind]; 696 | sigmax[klabels[ind]] += c; 697 | sigmay[klabels[ind]] += r; 698 | //------------------------------------ 699 | //edgesum[klabels[ind]] += edgemag[ind]; 700 | //------------------------------------ 701 | clustersize[klabels[ind]] += 1.0; 702 | ind++; 703 | } 704 | }} 705 | 706 | {for( int k = 0; k < numk; k++ ) 707 | { 708 | if( clustersize[k] <= 0 ) clustersize[k] = 1; 709 | inv[k] = 1.0/clustersize[k];//computing inverse now to multiply, than divide later 710 | }} 711 | 712 | {for( int k = 0; k < numk; k++ ) 713 | { 714 | kseedsl[k] = sigmal[k]*inv[k]; 715 | kseedsa[k] = sigmaa[k]*inv[k]; 716 | kseedsb[k] = sigmab[k]*inv[k]; 717 | kseedsx[k] = sigmax[k]*inv[k]; 718 | kseedsy[k] = sigmay[k]*inv[k]; 719 | //------------------------------------ 720 | //edgesum[k] *= inv[k]; 721 | //------------------------------------ 722 | }} 723 | } 724 | } 725 | 726 | //=========================================================================== 727 | /// EnforceLabelConnectivity 728 | /// 729 | /// 1. finding an adjacent label for each new component at the start 730 | /// 2. if a certain component is too small, assigning the previously found 731 | /// adjacent label to this component, and not incrementing the label. 732 | //=========================================================================== 733 | void SLIC::EnforceLabelConnectivity( 734 | vector & labels, //input labels that need to be corrected to remove stray labels 735 | const int width, 736 | const int height, 737 | double *& outlabelout, //new labels 738 | int& numlabels, //the number of labels changes in the end if segments are removed 739 | const int& K, //the number of superpixels desired by the user 740 | const int & sz) 741 | { 742 | // const int dx8[8] = {-1, -1, 0, 1, 1, 1, 0, -1}; 743 | // const int dy8[8] = { 0, -1, -1, -1, 0, 1, 1, 1}; 744 | 745 | const int dx4[4] = {-1, 0, 1, 0}; 746 | const int dy4[4] = { 0, -1, 0, 1}; 747 | 748 | 749 | const int SUPSZ = sz/K; 750 | //nlabels.resize(sz, -1); 751 | for( int i = 0; i < sz; i++ ) outlabelout[i] = -1; 752 | int label(0); 753 | int* xvec = new int[sz]; 754 | int* yvec = new int[sz]; 755 | int oindex(0); 756 | int adjlabel(0);//adjacent label 757 | for( int j = 0; j < height; j++ ) 758 | { 759 | for( int k = 0; k < width; k++ ) 760 | { 761 | if( 0 > outlabelout[oindex] ) 762 | { 763 | outlabelout[oindex] = label; 764 | //-------------------- 765 | // Start a new segment 766 | //-------------------- 767 | xvec[0] = k; 768 | yvec[0] = j; 769 | //------------------------------------------------------- 770 | // Quickly find an adjacent label for use later if needed 771 | //------------------------------------------------------- 772 | {for( int n = 0; n < 4; n++ ) 773 | { 774 | int x = xvec[0] + dx4[n]; 775 | int y = yvec[0] + dy4[n]; 776 | if( (x >= 0 && x < width) && (y >= 0 && y < height) ) 777 | { 778 | int nindex = y*width + x; 779 | if(outlabelout[nindex] >= 0) adjlabel = outlabelout[nindex]; 780 | } 781 | }} 782 | 783 | int count(1); 784 | for( int c = 0; c < count; c++ ) 785 | { 786 | for( int n = 0; n < 4; n++ ) 787 | { 788 | int x = xvec[c] + dx4[n]; 789 | int y = yvec[c] + dy4[n]; 790 | 791 | if( (x >= 0 && x < width) && (y >= 0 && y < height) ) 792 | { 793 | int nindex = y*width + x; 794 | 795 | if( 0 > outlabelout[nindex] && labels[oindex] == labels[nindex] ) 796 | { 797 | xvec[count] = x; 798 | yvec[count] = y; 799 | outlabelout[nindex] = label; 800 | count++; 801 | } 802 | } 803 | 804 | } 805 | } 806 | //------------------------------------------------------- 807 | // If segment size is less then a limit, assign an 808 | // adjacent label found before, and decrement label count. 809 | //------------------------------------------------------- 810 | if(count <= SUPSZ >> 2) 811 | { 812 | for( int c = 0; c < count; c++ ) 813 | { 814 | int ind = yvec[c]*width+xvec[c]; 815 | outlabelout[ind] = adjlabel; 816 | } 817 | label--; 818 | } 819 | label++; 820 | } 821 | oindex++; 822 | } 823 | } 824 | numlabels= label; 825 | 826 | if(xvec) delete [] xvec; 827 | if(yvec) delete [] yvec; 828 | } 829 | 830 | 831 | 832 | //=========================================================================== 833 | /// DoSuperpixelSegmentation_ForGivenSuperpixelSize 834 | /// 835 | /// The input parameter ubuff conains RGB values in a 32-bit unsigned integers 836 | /// as follows: 837 | /// 838 | /// [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] 839 | /// 840 | /// Nothing R G B 841 | /// 842 | /// The RGB values are accessed from (and packed into) the unsigned integers 843 | /// using bitwise operators as can be seen in the function DoRGBtoLABConversion(). 844 | /// 845 | /// compactness value depends on the input pixels values. For instance, if 846 | /// the input is greyscale with values ranging from 0-100, then a compactness 847 | /// value of 20.0 would give good results. A greater value will make the 848 | /// superpixels more compact while a smaller value would make them more uneven. 849 | /// 850 | /// The labels can be saved if needed using SaveSuperpixelLabels() 851 | //=========================================================================== 852 | void SLIC::DoSuperpixelSegmentation_ForGivenSuperpixelSize( 853 | double * & r, 854 | double * & g, 855 | double * & b, 856 | const int width, 857 | const int height, 858 | vector & klabels, 859 | int& numlabels, 860 | const int& superpixelsize, 861 | const double& compactness, 862 | double *& outlabel, 863 | const int& sz) 864 | { 865 | //------------------------------------------------ 866 | const int STEP = sqrt(double(superpixelsize))+0.5; 867 | //------------------------------------------------ 868 | vector kseedsl(0); 869 | vector kseedsa(0); 870 | vector kseedsb(0); 871 | vector kseedsx(0); 872 | vector kseedsy(0); 873 | 874 | //-------------------------------------------------- 875 | m_width = width; 876 | m_height = height; 877 | 878 | //LAB, the default option 879 | 880 | DoRGBtoLABConversion(r,g,b, m_lvec, m_avec, m_bvec, sz); 881 | 882 | //-------------------------------------------------- 883 | bool perturbseeds(false);//perturb seeds is not absolutely necessary, one can set this flag to false 884 | vector edgemag(0); 885 | if(perturbseeds) DetectLabEdges(m_lvec, m_avec, m_bvec, m_width, m_height, edgemag, sz); 886 | int numk(0); 887 | GetLABXYSeeds_ForGivenStepSize(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, STEP, perturbseeds, edgemag, numk); 888 | 889 | PerformSuperpixelSLIC(kseedsl, kseedsa, kseedsb, kseedsx, kseedsy, klabels, STEP, edgemag, compactness, sz, numk); 890 | //numlabels = kseedsl.size(); 891 | //numlabels = numk; 892 | //int* nlabels = new int[sz]; 893 | //vector nlabels (sz,-1); 894 | EnforceLabelConnectivity(klabels, m_width, m_height, outlabel, numlabels, double(sz)/double(STEP*STEP), sz); 895 | //{for(int i = 0; i < sz; i++ ) outlabel[i] = nlabels[i];} 896 | //if(nlabels) delete [] nlabels; 897 | } 898 | 899 | //=========================================================================== 900 | /// DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels 901 | /// 902 | /// The input parameter ubuff conains RGB values in a 32-bit unsigned integers 903 | /// as follows: 904 | /// 905 | /// [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] [1 1 1 1 1 1 1 1] 906 | /// 907 | /// Nothing R G B 908 | /// 909 | /// The RGB values are accessed from (and packed into) the unsigned integers 910 | /// using bitwise operators as can be seen in the function DoRGBtoLABConversion(). 911 | /// 912 | /// compactness value depends on the input pixels values. For instance, if 913 | /// the input is greyscale with values ranging from 0-100, then a compactness 914 | /// value of 20.0 would give good results. A greater value will make the 915 | /// superpixels more compact while a smaller value would make them more uneven. 916 | /// 917 | /// The labels can be saved if needed using SaveSuperpixelLabels() 918 | //=========================================================================== 919 | void SLIC::DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( 920 | double * & r, 921 | double * & g, 922 | double * & b, 923 | const int width, 924 | const int height, 925 | //vector & klabels, 926 | int& numlabels, 927 | const int& K, 928 | const double& compactness, 929 | double *& outlabel, 930 | const int& NumPixel ) 931 | { 932 | vector klabels( NumPixel, -1 ); 933 | 934 | const int superpixelsize = 0.5 + double(NumPixel)/double(K); 935 | DoSuperpixelSegmentation_ForGivenSuperpixelSize(r,g,b,width,height,klabels,numlabels,superpixelsize,compactness,outlabel,NumPixel); 936 | } 937 | 938 | void SLIC::DoMeanSup( double * & m_rr, 939 | double * & m_gg, 940 | double * & m_bb, 941 | int & numlabels, 942 | int & NumPixel, 943 | double *& outlabel, 944 | vector & meanSupl, 945 | vector & meanSupa, 946 | vector & meanSupb ) 947 | { 948 | vector num( numlabels ); 949 | 950 | for ( int j = 0; j< NumPixel; j++ ){ 951 | 952 | int kk = int(outlabel[j]); 953 | meanSupl[ kk ] += m_rr[j]; 954 | num[ kk ] += 1; 955 | 956 | meanSupa[ kk ] += m_gg[j]; 957 | 958 | meanSupb[ kk ] += m_bb[j]; 959 | } 960 | 961 | for ( int j = 0; j< numlabels; j++ ) { 962 | 963 | int numj = num[ j ]; 964 | meanSupl[ j ] /= numj; 965 | meanSupa[ j ] /= numj; 966 | meanSupb[ j ] /= numj; 967 | } 968 | } 969 | 970 | 971 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 972 | { 973 | if (nrhs!=4) mexErrMsgTxt("error :the input number error"); 974 | 975 | double * rr= (double* )mxGetPr(prhs[0]); 976 | double * gg= (double*)mxGetPr(prhs[1]); 977 | double * bb= (double*)mxGetPr(prhs[2]); 978 | double *imgattr=(double *)mxGetPr(prhs[3]); 979 | 980 | double height=imgattr[0]; 981 | double width=imgattr[1]; 982 | double k=imgattr[2]; 983 | double m=imgattr[3]; 984 | int NumPixel = imgattr[4]; 985 | 986 | int numlabels(0); 987 | 988 | plhs[0] = mxCreateDoubleMatrix( NumPixel, 1, mxREAL); 989 | double * outlabel=(double *)mxGetPr(plhs[0]); 990 | 991 | SLIC slic; 992 | slic.DoSuperpixelSegmentation_ForGivenNumberOfSuperpixels( rr, gg, bb, width, height, numlabels, k, m, outlabel, NumPixel ); 993 | 994 | plhs[1] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 995 | double * Supll = (double *)mxGetPr(plhs[1]); 996 | 997 | plhs[2] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 998 | double * Supaa = (double *)mxGetPr(plhs[2]); 999 | 1000 | plhs[3] = mxCreateDoubleMatrix( numlabels, 1, mxREAL); 1001 | double * Supbb = (double *)mxGetPr(plhs[3]); 1002 | 1003 | plhs[4] = mxCreateDoubleMatrix( 1, 1, mxREAL ); 1004 | double * numSuperpixel = (double *)mxGetPr(plhs[4]); 1005 | numSuperpixel[0] = numlabels; 1006 | 1007 | vector meanSuprr( numlabels ); 1008 | vector meanSupgg( numlabels ); 1009 | vector meanSupbb( numlabels ); 1010 | 1011 | slic.DoMeanSup( rr, gg, bb, numlabels, NumPixel , outlabel, meanSuprr, meanSupgg, meanSupbb ); 1012 | 1013 | slic.DoRGBtoLABConversionSup(meanSuprr, meanSupgg, meanSupbb, Supll, Supaa, Supbb, numlabels); 1014 | 1015 | } -------------------------------------------------------------------------------- /code/code/SubCode/SLIC.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SubCode/SLIC.mexw32 -------------------------------------------------------------------------------- /code/code/SubCode/SLIC.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SubCode/SLIC.mexw64 -------------------------------------------------------------------------------- /code/code/SubCode/find_connect_superpixel_DoubleIn_Opposite.m: -------------------------------------------------------------------------------- 1 | function [ ConPix0, ConPixSecond ] = find_connect_superpixel_DoubleIn_Opposite(labels, K, height ,width ) 2 | %% 3 | % obtain the neighbour relationship of the super-pixels 4 | % Input: 5 | % labels: the super-pixel label obtained from SLIC 6 | % K: the number of super-pixels 7 | % height: the height of the image 8 | % width: the width of the image 9 | % Output: 10 | % ConPix0: the one layer neighbour relationship 11 | % ConPixSecond: the two layer neighbour relationship 12 | %%%%===================================================== 13 | ConPix=zeros(K,K); 14 | %the one outerboundary super 15 | for i=1:height-1 16 | for j=1:width-1 17 | if labels(i,j)~=labels(i,j+1) 18 | ConPix(labels(i,j)+1 ,labels(i,j+1)+1 )=1; 19 | end 20 | if labels(i,j)~=labels(i+1,j) 21 | ConPix(labels(i,j)+1 ,labels(i+1,j)+1 )=1; 22 | end 23 | end 24 | if labels(i,j+1)~=labels(i+1,j+1) 25 | ConPix(labels(i,j+1)+1 ,labels(i+1,j+1 )+1 )=1; 26 | end 27 | end 28 | for j=1:width-1 29 | if labels(height,j)~=labels(height,j+1) 30 | ConPix( labels(height,j)+1,labels(height,j+1)+1 )=1; 31 | end 32 | end 33 | for i=1:height-1 34 | for j=1:width-1 35 | if labels(i,j)~=labels(i+1,j+1) 36 | ConPix( labels(i,j)+1,labels(i+1,j+1)+1 )=1; 37 | end 38 | end 39 | end 40 | for i=1:height-1 41 | for j=2:width 42 | if labels(i,j)~=labels(i+1,j-1) 43 | ConPix( labels(i,j)+1,labels(i+1,j-1)+1 )=1; 44 | end 45 | end 46 | end 47 | ConPix0 = ConPix + ConPix'; 48 | % connect the super-pixel on the opposite boundary 49 | for j=1:width 50 | ConPix( labels(1,j)+1 , labels(height,j)+1 ) = 1; 51 | end 52 | for j=1:height 53 | ConPix( labels(j,1)+1 , labels(j,width)+1 ) = 1 ; 54 | end 55 | ConPix=ConPix+ConPix'; 56 | % find the second outerboundary superpixel 57 | ConPixSecond = ConPix; 58 | for i=1:K 59 | siteline=find( ConPix(i,:)>0 ); 60 | lenthsiteline=length(siteline); 61 | for j=1:lenthsiteline 62 | ConPixSecond(i,:)= ConPixSecond(i,:)+ ConPix( siteline( j ), :); 63 | end 64 | end 65 | % find third outerboundary superpixel 66 | % ConPixTid = ConPixSecond; 67 | % for i=1:K 68 | % siteline=find( ConPixSecond(i,:)>0 ); 69 | % lenthsiteline=length(siteline); 70 | % for j=1:lenthsiteline 71 | % ConPixTid(i,:)= ConPixTid(i,:)+ ConPix( siteline( j ), :); 72 | % end 73 | % end 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /code/code/SubCode/normalize.m: -------------------------------------------------------------------------------- 1 | function normVals=normalize(newVals,oldVals) 2 | %function normVals=normalize(newVals,oldVals) normalizes the range of 3 | %newVals to the range of oldVals such that every column is normalized 4 | %independantly 5 | % 6 | %Inputs: newVals - NxP matrix of new values to be normalized to the 7 | % range of oldVals 8 | % oldVals - Optional NxP matrix of original values (for 9 | % arbitrary K). Defaults to normalizing range to [0,1] 10 | % 11 | %Outputs: normVals - NxP matrix of newVals normalized (columnwise) to 12 | % the range of oldVals 13 | % 14 | % 15 | %5/13/03 - Leo Grady 16 | 17 | % Copyright (C) 2002, 2003 Leo Grady 18 | % Computer Vision and Computational Neuroscience Lab 19 | % Department of Cognitive and Neural Systems 20 | % Boston University 21 | % Boston, MA 02215 22 | % 23 | % This program is free software; you can redistribute it and/or 24 | % modify it under the terms of the GNU General Public License 25 | % as published by the Free Software Foundation; either version 2 26 | % of the License, or (at your option) any later version. 27 | % 28 | % This program is distributed in the hope that it will be useful, 29 | % but WITHOUT ANY WARRANTY; without even the implied warranty of 30 | % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 | % GNU General Public License for more details. 32 | % 33 | % You should have received a copy of the GNU General Public License 34 | % along with this program; if not, write to the Free Software 35 | % Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 | % 37 | % Date - $Id: normalize.m,v 1.2 2003/08/21 17:29:29 lgrady Exp $ 38 | %========================================================================% 39 | 40 | %Initialize 41 | [N P]=size(newVals); 42 | 43 | %Supply optional argument, if required 44 | if nargin == 1 45 | oldVals=[zeros(1,P);ones(1,P)]; 46 | end 47 | 48 | %Find original minima/maxima 49 | minVal=min(oldVals,[],1); 50 | maxVal=max(oldVals,[],1); 51 | 52 | %Find current minima/maxima 53 | minNewVal=min(newVals,[],1); 54 | maxNewVal=max(newVals,[],1); 55 | 56 | %Perform normalization 57 | warning off MATLAB:divideByZero %Error for divide by zero handled below 58 | normVals=newVals-ones(N,1)*minNewVal; 59 | normVals=normVals.* ( ones(N,1)*(maxVal-minVal) ) ./ ( ones(N,1)* max(normVals,[],1) ); 60 | normVals=normVals+ones(N,1)*minVal; 61 | warning on MATLAB:divideByZero 62 | 63 | %Error check for completely uniform inputs 64 | uniformIndex=find(minNewVal==maxNewVal); 65 | normVals(:,uniformIndex)=ones(N,1)*minNewVal(:,uniformIndex); 66 | -------------------------------------------------------------------------------- /code/code/SubCode/sup2pixel.cpp: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | using namespace std; 3 | 4 | 5 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 6 | { 7 | if (nrhs!=3) mexErrMsgTxt("error :the input number error"); 8 | 9 | double * pixnum = (double* )mxGetPr(prhs[0]); 10 | double * label = (double* )mxGetPr(prhs[1]); 11 | double * sup = (double* )mxGetPr(prhs[2]); 12 | 13 | int pixelN = int( pixnum[0] ); 14 | plhs[0] = mxCreateDoubleMatrix( pixelN, 1, mxREAL ); 15 | double * outlabel=(double *)mxGetPr(plhs[0]); 16 | 17 | for ( int j = 0; j < pixelN; j++ ){ 18 | 19 | outlabel[j] = sup[ int( label[j] ) ]; 20 | 21 | } 22 | } -------------------------------------------------------------------------------- /code/code/SubCode/sup2pixel.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SubCode/sup2pixel.mexw32 -------------------------------------------------------------------------------- /code/code/SubCode/sup2pixel.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SubCode/sup2pixel.mexw64 -------------------------------------------------------------------------------- /code/code/SurfFeature.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/SurfFeature.mat -------------------------------------------------------------------------------- /code/code/apply_tranfer_function.m: -------------------------------------------------------------------------------- 1 | function O=apply_tranfer_function(I,f) 2 | % apply a transfer function 3 | % intput: 4 | % -I is the image in [0 1] to which apply the transfer function 5 | % -f is the transfer function given as a vector of size 256 6 | index=max(min(floor(255.*I),255),0); 7 | x=min((255.*I-index)./255,255); 8 | f(257)=1; 9 | O=((1-x).*f(index+1)+(x).*f(index+2)); 10 | end 11 | -------------------------------------------------------------------------------- /code/code/child_window.m: -------------------------------------------------------------------------------- 1 | function child = child_window(parent,N) 2 | % for a parent subwindow [r1 r2 c1 c2], find the corresponding 3 | % child subwindow at the coarser pyramid level N levels up 4 | 5 | if ~exist('N','var') 6 | N = 1; 7 | end 8 | 9 | child = parent; 10 | for K = 1:N 11 | child = (child+1)/2; 12 | child([1 3]) = ceil(child([1 3])); 13 | child([2 4]) = floor(child([2 4])); 14 | end 15 | 16 | end -------------------------------------------------------------------------------- /code/code/classify_image.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/classify_image.fig -------------------------------------------------------------------------------- /code/code/classify_image.m: -------------------------------------------------------------------------------- 1 | function varargout = classify_image(varargin) 2 | gui_Singleton = 1; 3 | gui_State = struct('gui_Name', mfilename, ... 4 | 'gui_Singleton', gui_Singleton, ... 5 | 'gui_OpeningFcn', @classify_image_OpeningFcn, ... 6 | 'gui_OutputFcn', @classify_image_OutputFcn, ... 7 | 'gui_LayoutFcn', [] , ... 8 | 'gui_Callback', []); 9 | if nargin && ischar(varargin{1}) 10 | gui_State.gui_Callback = str2func(varargin{1}); 11 | end 12 | 13 | if nargout 14 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 15 | else 16 | gui_mainfcn(gui_State, varargin{:}); 17 | end 18 | % End initialization code - DO NOT EDIT 19 | 20 | 21 | % --- Executes just before classify_image is made visible. 22 | function classify_image_OpeningFcn(hObject, eventdata, handles, varargin) 23 | % This function has no output args, see OutputFcn. 24 | % hObject handle to figure 25 | % eventdata reserved - to be defined in a future version of MATLAB 26 | % handles structure with handles and user data (see GUIDATA) 27 | % varargin command line arguments to classify_image (see VARARGIN) 28 | 29 | % Choose default command line output for classify_image 30 | handles.output = hObject; 31 | clc; 32 | warning off; 33 | 34 | set(handles.pushbutton1,'Enable','On'); 35 | set(handles.pushbutton2,'Enable','Off'); 36 | set(handles.pushbutton3,'Enable','Off'); 37 | set(handles.pushbutton4,'Enable','Off'); 38 | 39 | % Update handles structure 40 | guidata(hObject, handles); 41 | 42 | % UIWAIT makes classify_image wait for user response (see UIRESUME) 43 | % uiwait(handles.figure1); 44 | 45 | 46 | % --- Outputs from this function are returned to the command line. 47 | function varargout = classify_image_OutputFcn(hObject, eventdata, handles) 48 | % varargout cell array for returning output args (see VARARGOUT); 49 | % hObject handle to figure 50 | % eventdata reserved - to be defined in a future version of MATLAB 51 | % handles structure with handles and user data (see GUIDATA) 52 | 53 | % Get default command line output from handles structure 54 | varargout{1} = handles.output; 55 | 56 | 57 | 58 | function edit1_Callback(hObject, eventdata, handles) 59 | % hObject handle to edit1 (see GCBO) 60 | % eventdata reserved - to be defined in a future version of MATLAB 61 | % handles structure with handles and user data (see GUIDATA) 62 | 63 | % Hints: get(hObject,'String') returns contents of edit1 as text 64 | % str2double(get(hObject,'String')) returns contents of edit1 as a double 65 | 66 | 67 | % --- Executes during object creation, after setting all properties. 68 | function edit1_CreateFcn(hObject, eventdata, handles) 69 | % hObject handle to edit1 (see GCBO) 70 | % eventdata reserved - to be defined in a future version of MATLAB 71 | % handles empty - handles not created until after all CreateFcns called 72 | 73 | % Hint: edit controls usually have a white background on Windows. 74 | % See ISPC and COMPUTER. 75 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 76 | set(hObject,'BackgroundColor','white'); 77 | end 78 | 79 | 80 | %% Browse 81 | % --- Executes on button press in pushbutton1. 82 | function pushbutton1_Callback(hObject, eventdata, handles) 83 | % hObject handle to pushbutton1 (see GCBO) 84 | % eventdata reserved - to be defined in a future version of MATLAB 85 | % handles structure with handles and user data (see GUIDATA) 86 | set(handles.pushbutton1,'Enable','Off'); 87 | set(handles.pushbutton2,'Enable','On'); 88 | set(handles.pushbutton3,'Enable','On'); 89 | set(handles.pushbutton4,'Enable','On'); 90 | 91 | global Image; 92 | 93 | [fname, pthname] = uigetfile('*.*', 'Select the Test image'); % Ask user to select test image 94 | dataset_fullpath = strcat(pthname, fname); % Concatenate path and image name 95 | Image = imread(dataset_fullpath); % Read complete image with path 96 | Image = im2uint8(Image); % Convert datatype 97 | Image = imresize(Image,[1000,667]); % Resize it 98 | 99 | set(handles.edit1,'string',dataset_fullpath); % Write full path of the file to edit1 box 100 | 101 | % Set the variable with handles to use it in all other pushbuttons (to declare globally) 102 | handles.Image = Image; 103 | % Store this data (handles variables) in gui data 104 | guidata(hObject, handles); 105 | 106 | 107 | 108 | %% Pre-Processing 109 | % --- Executes on button press in pushbutton2. 110 | function pushbutton2_Callback(hObject, eventdata, handles) 111 | % hObject handle to pushbutton2 (see GCBO) 112 | % eventdata reserved - to be defined in a future version of MATLAB 113 | % handles structure with handles and user data (see GUIDATA) 114 | global Image; 115 | h = preprocessing_image(Image); 116 | close(classify_image); 117 | 118 | 119 | 120 | %% Segmentation 121 | % --- Executes on button press in pushbutton3. 122 | function pushbutton3_Callback(hObject, eventdata, handles) 123 | % hObject handle to pushbutton3 (see GCBO) 124 | % eventdata reserved - to be defined in a future version of MATLAB 125 | % handles structure with handles and user data (see GUIDATA) 126 | 127 | 128 | 129 | 130 | %% Feature Extraction 131 | % --- Executes on button press in pushbutton4. 132 | function pushbutton4_Callback(hObject, eventdata, handles) 133 | % hObject handle to pushbutton4 (see GCBO) 134 | % eventdata reserved - to be defined in a future version of MATLAB 135 | % handles structure with handles and user data (see GUIDATA) 136 | 137 | 138 | 139 | 140 | % --- Executes on slider movement. 141 | function slider1_Callback(hObject, eventdata, handles) 142 | % hObject handle to slider1 (see GCBO) 143 | % eventdata reserved - to be defined in a future version of MATLAB 144 | % handles structure with handles and user data (see GUIDATA) 145 | 146 | % Hints: get(hObject,'Value') returns position of slider 147 | % get(hObject,'Min') and get(hObject,'Max') to determine range of slider 148 | 149 | 150 | % --- Executes during object creation, after setting all properties. 151 | function slider1_CreateFcn(hObject, eventdata, handles) 152 | % hObject handle to slider1 (see GCBO) 153 | % eventdata reserved - to be defined in a future version of MATLAB 154 | % handles empty - handles not created until after all CreateFcns called 155 | 156 | % Hint: slider controls usually have a light gray background. 157 | if isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 158 | set(hObject,'BackgroundColor',[.9 .9 .9]); 159 | end 160 | -------------------------------------------------------------------------------- /code/code/code.m: -------------------------------------------------------------------------------- 1 | 2 | clear all; 3 | close all; 4 | clc; 5 | 6 | % I = double(imread('pepper_color.jpg')); 7 | I = double(imread('hangzhou.jpg')); 8 | image_size1 = size(I,1); 9 | image_size2 = size(I,2); 10 | 11 | % denoise image in advance 12 | % H = fspecial('average',[3,3]); 13 | H = fspecial('average',[5,5]); 14 | I = imfilter(I,H,'symmetric'); 15 | 16 | % vectorize image 17 | R = I(:,:,1);G = I(:,:,2);B = I(:,:,3); 18 | x = [R(:) G(:) B(:)]; N = size(x,1); 19 | % cluster = 8; % predefined segmentation number 20 | 21 | %% Unsupervised EM algorithm(EM+MML) 22 | % find number of segmentation using MML 23 | Mmax = 8;Mmin = 3; 24 | Lmin = 1e6; 25 | 26 | for m = Mmax:-1:Mmin 27 | [Wt_hat,Meanmat_hat,Cov_hat] = GMmodel(x ,m); 28 | logsum = 0; 29 | mixpdf = 0; 30 | loglikelihood = 0; 31 | for i = 1:1:m 32 | logsum = logsum + log(N*Wt_hat(i)/12); 33 | end 34 | for k = 1:1:N 35 | for i = 1:1:m 36 | mixpdf = mixpdf + 37 | Wt_hat(i)*mvnpdf(x(k,:),Meanmat_hat(i,:),Cov_hat(:,:,i)); 38 | end 39 | loglikelihood = loglikelihood + log(mixpdf); 40 | end 41 | Lm = (3/2)*logsum + (m/2)*log(N/2) + m*4/2-loglikelihood; 42 | if Lm<=Lmin 43 | Lmin = Lm; 44 | Wt = Wt_hat; 45 | Meanmat = Meanmat_hat; 46 | Cov = Cov_hat; 47 | end 48 | end 49 | cluster = size(Wt,1); 50 | 51 | % estimate parameters using EM_algorithm 52 | % Online file developed by Ravi Shankar, 3rd year B.tech, IIT Guwahati. 53 | [Wt,Meanmat,Cov] = GMmodel(x,cluster); 54 | 55 | % load 'EM_supervised.mat' 56 | % load 'Wt_MML.mat' 57 | % load 'Cov_MML.mat' 58 | % load 'Meanmat_MML.mat' 59 | 60 | % assign pixels to groups 61 | map_em = zeros(image_size1*image_size2,3); 62 | for i=1:1:N 63 | L = zeros(1,cluster); 64 | for j = 1:1:cluster 65 | invCov = inv(Cov(:,:,j)); 66 | numerator = exp(-0.5*(x(i,:)-Meanmat(j,:))*invCov* (x(i,:)- 67 | Meanmat(j,:))'); 68 | L(j) = numerator/det(invCov)^-0.5; 69 | end 70 | [value,index] = max(L); 71 | map_em(i,:) = Meanmat(index,:); 72 | end 73 | 74 | R_em = reshape(map_em(:,1),image_size1,image_size2); 75 | G_em = reshape(map_em(:,2),image_size1,image_size2); 76 | B_em = reshape(map_em(:,3),image_size1,image_size2); 77 | EM_image = cat(3,R_em, G_em, B_em); 78 | 79 | %% K-means clusttering algorithm 80 | newmeans = kmclust(x,cluster); 81 | map_k = zeros(image_size1*image_size2,3); 82 | for i = 1:1:N 83 | for j =1:1:size(newmeans,1) 84 | L(j) = norm(newmeans(j,:) - x(i,:),2); 85 | end 86 | [value,index] = min(L); 87 | map_k(i,:) = newmeans(index,:); 88 | end 89 | 90 | R_k = reshape(map_k(:,1),image_size1,image_size2); 91 | G_k = reshape(map_k(:,2),image_size1,image_size2); 92 | B_k = reshape(map_k(:,3),image_size1,image_size2); 93 | 94 | k_image = cat(3,R_k, G_k, B_k); 95 | 96 | % figure(gcf+1); 97 | % subplot(221);image(uint8(k_image));title('k-means algorithm');axis image 98 | % subplot(222);image(uint8(EM_image_Supervised));title('Supervised EM algorithm');axis image 99 | % subplot(223);image(uint8(EM_image));title('Unsupervised EM algorithm');axisimage 100 | % subplot(224);image(uint8(EM_image - EM_image_Supervised));title('Difference');axis image 101 | 102 | %original image 103 | 104 | figure(gcf); 105 | image(uint8(I)); 106 | title('original','FontSize', 15); 107 | axis image 108 | 109 | % results of K-means algorithm vs EM algorithm 110 | 111 | figure(gcf+1); 112 | subplot(121); 113 | image(uint8(k_image)); 114 | title('k-means algorithm'); 115 | axis image 116 | subplot(122); 117 | image(uint8(EM_image)); 118 | title('Unsupervised EM algorithm'); 119 | axis image 120 | -------------------------------------------------------------------------------- /code/code/create_databaseD_1114.asv: -------------------------------------------------------------------------------- 1 | clc; % Clears Command Window 2 | clear all; % Clears Workspace 3 | warning off; % Disable all warnings 4 | 5 | % Create database 6 | imgSet = [imageSet('.\DDSM Database\DDSM_Benign'),... 7 | imageSet('.\DDSM Database\DDSM_Malignant'),... 8 | imageSet('.\DDSM Database\DDSM_Normal')]; 9 | 10 | % Loop over the folders 11 | for i = 1:length({imgSet.Description}) 12 | % Loop over images in that particular folder 13 | for j = 1:[imgSet(i).Count] 14 | Image = read(imgSet(i),j); % Read image 15 | Image = im2uint8(Image); % Change integar type 16 | Image = imresize(Image,[1000,667]); % Resize image 17 | [IDX,sep] = otsu(Image,3); % Perform OTSU thresholding 18 | [M,N] = size(IDX); % Find out size i.e. no. of rows and columns 19 | a2 = zeros(M,N); % Create a zero matrix to append data in future 20 | % Loop over pixels in an image 21 | for i3 = 1:M 22 | for j3 = 1:N 23 | % Give some threshold value 24 | if(IDX(i3,j3)>= 2 ) 25 | a2(i3,j3) = 1; 26 | else 27 | a2(i3,j3) = 0; 28 | end 29 | end 30 | end 31 | 32 | [b,num] = bwlabel(a2,8); % Find number of label connected objects 33 | count_pixels_per_obj = sum(bsxfun(@eq,b(:),1:num)); % Count the pixels for every blob being obtained 34 | [~,ind] = max(count_pixels_per_obj); % Find blob corresponding to the maximum pixel 35 | a2 = (b==ind); % Take only that blob from entire image 36 | 37 | seg_image = Image; 38 | seg_image(~a2) = 0; % Highlight only that blob from original image 39 | a2 = adapthisteq(seg_image); 40 | 41 | 42 | F = fft2(a2); % Perform Fourier Transform 43 | Fa = abs(F); % Get the magnitude 44 | Fb = log(Fa+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 45 | Fc = mat2gray(Fb); % Convert matrix to grayscale image 46 | 47 | F1 = fftshift(F); % Center FFT 48 | 49 | F2 = abs(F1); % Get the magnitude 50 | F3 = log(F2+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 51 | F4 = mat2gray(F3); % Convert matrix to grayscale image 52 | 53 | [p3, p4] = size(F4); % Find out size of an image 54 | q1 = 400; 55 | i3_start = floor((p3-q1)/2); 56 | i3_stop = i3_start + q1; 57 | i4_start = floor((p4-q1)/2); 58 | i4_stop = i4_start + q1; 59 | F5 = F4(i3_start:i3_stop, i4_start:i4_stop, :); 60 | 61 | 62 | GLCM2 = graycomatrix(F5,'Offset',[0 1; -1 1; -1 0; -1 -1]); % Create gray-level co-occurrence matrix from image 63 | stats = GLCM_Features1(GLCM2,0) % Find out GLCM features in this 64 | t1= struct2array(stats) % Convert this structure to an array 65 | 66 | 67 | f2(j,:) = t1; % Make feature vector of all images 68 | end 69 | 70 | if (i>=1) 71 | featureD_dft22([(((i-1)*j)+1):(i*j)],:) = f2; % Make feature vectors of all images of all folders 72 | end 73 | end 74 | 75 | save featureD_dft22 featureD_dft22; % Save this as a ".mat" file -------------------------------------------------------------------------------- /code/code/create_databaseD_1114.m: -------------------------------------------------------------------------------- 1 | clc; % Clears Command Window 2 | clear all; % Clears Workspace 3 | warning off; % Disable all warnings 4 | 5 | % Create database 6 | imgSet = [imageSet('.\Object Database\object_Benign'),... 7 | imageSet('.\Object Database\object_Malignant'),... 8 | imageSet('.\Object Database\object_Normal')]; 9 | 10 | % Loop over the folders 11 | for i = 1:length({imgSet.Description}) 12 | % Loop over images in that particular folder 13 | for j = 1:[imgSet(i).Count] 14 | Image = read(imgSet(i),j); % Read image 15 | Image = im2uint8(Image); % Change integar type 16 | Image = imresize(Image,[1000,667]); % Resize image 17 | [IDX,sep] = otsu(Image,3); % Perform OTSU thresholding 18 | [M,N] = size(IDX); % Find out size i.e. no. of rows and columns 19 | a2 = zeros(M,N); % Create a zero matrix to append data in future 20 | % Loop over pixels in an image 21 | for i3 = 1:M 22 | for j3 = 1:N 23 | % Give some threshold value 24 | if(IDX(i3,j3)>= 2 ) 25 | a2(i3,j3) = 1; 26 | else 27 | a2(i3,j3) = 0; 28 | end 29 | end 30 | end 31 | 32 | [b,num] = bwlabel(a2,8); % Find number of label connected objects 33 | count_pixels_per_obj = sum(bsxfun(@eq,b(:),1:num)); % Count the pixels for every blob being obtained 34 | [~,ind] = max(count_pixels_per_obj); % Find blob corresponding to the maximum pixel 35 | a2 = (b==ind); % Take only that blob from entire image 36 | 37 | seg_image = Image; 38 | seg_image(~a2) = 0; % Highlight only that blob from original image 39 | a2 = adapthisteq(seg_image); 40 | 41 | 42 | F = fft2(a2); % Perform Fourier Transform 43 | Fa = abs(F); % Get the magnitude 44 | Fb = log(Fa+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 45 | Fc = mat2gray(Fb); % Convert matrix to grayscale image 46 | 47 | F1 = fftshift(F); % Center FFT 48 | 49 | F2 = abs(F1); % Get the magnitude 50 | F3 = log(F2+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 51 | F4 = mat2gray(F3); % Convert matrix to grayscale image 52 | 53 | [p3, p4] = size(F4); % Find out size of an image 54 | q1 = 400; 55 | i3_start = floor((p3-q1)/2); 56 | i3_stop = i3_start + q1; 57 | i4_start = floor((p4-q1)/2); 58 | i4_stop = i4_start + q1; 59 | F5 = F4(i3_start:i3_stop, i4_start:i4_stop, :); 60 | 61 | 62 | GLCM2 = graycomatrix(F5,'Offset',[0 1; -1 1; -1 0; -1 -1]); % Create gray-level co-occurrence matrix from image 63 | stats = GLCM_Features1(GLCM2,0) % Find out GLCM features in this 64 | t1= struct2array(stats) % Convert this structure to an array 65 | 66 | 67 | f2(j,:) = t1; % Make feature vector of all images 68 | end 69 | 70 | if (i>=1) 71 | featureD_dft22([(((i-1)*j)+1):(i*j)],:) = f2; % Make feature vectors of all images of all folders 72 | end 73 | end 74 | 75 | save featureD_dft22 featureD_dft22; % Save this as a ".mat" file -------------------------------------------------------------------------------- /code/code/dabap.m: -------------------------------------------------------------------------------- 1 | %% Fish Detection 2 | % Database Images 3 | offset=1; 4 | dirname=uigetdir(); 5 | D=dir(dirname); 6 | count=0; 7 | for i=3:length(D) 8 | 9 | count=count+1; 10 | files{count}=[dirname '\\' D(i).name]; 11 | I0=files{count}; 12 | F=imread(I0); 13 | 14 | F1=rgb2gray(F); 15 | 16 | 17 | % Feature Extraction using SURF 18 | 19 | fishPoints = detectSURFFeatures(F1); 20 | 21 | 22 | % Visualize the strongest feature points found in the reference image. 23 | 24 | figure, 25 | imshow(F1); 26 | title('F.Points from Train Image'); 27 | hold on; 28 | plot(fishPoints.selectStrongest(100)); 29 | 30 | % Extract feature descriptors at the interest points in both images. 31 | 32 | [fishFeatures, fishPoints] = extractFeatures(F1, fishPoints); 33 | 34 | 35 | 36 | Standard_Deviation = std2(fishFeatures); 37 | 38 | Variance = mean2(var(double(fishFeatures))); 39 | Feature=[Standard_Deviation,Variance]; 40 | 41 | xlswrite('Trainfeaturenew.xls', [Feature], 1, sprintf('A%d',offset)); 42 | offset = offset + 1; 43 | end 44 | -------------------------------------------------------------------------------- /code/code/dataset/image_2/000000_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/dataset/image_2/000000_10.png -------------------------------------------------------------------------------- /code/code/dataset/image_2/000000_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/dataset/image_2/000000_11.png -------------------------------------------------------------------------------- /code/code/dataset/image_2/000001_10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/dataset/image_2/000001_10.png -------------------------------------------------------------------------------- /code/code/dataset/image_2/000001_11.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/dataset/image_2/000001_11.png -------------------------------------------------------------------------------- /code/code/de.m: -------------------------------------------------------------------------------- 1 | I1=imread('c1.jpg'); 2 | I = rgb2gray(im2double(I1)); 3 | I_ratio=double(I1)./repmat(I,[1 1 3])./255; 4 | 5 | % Frame enhancement using a general remapping function 6 | N=20; 7 | 8 | I_enhanced2=llf_general(I,@remapping_function,N); 9 | 10 | I_enhanced2=repmat(I_enhanced2,[1 1 3]).*I_ratio; 11 | 12 | 13 | figure, 14 | imshow(I_enhanced2); 15 | title('Edge-aware Enhancement Image'); 16 | 17 | boxImage=rgb2gray(I_enhanced2); 18 | figure, 19 | imshow(boxImage); 20 | title('Edge Enhanced Gray Image'); 21 | figure, 22 | imshow(boxImage); 23 | title('This is Fake Image'); 24 | 25 | 26 | %% 27 | % Read the target image containing a cluttered scene. 28 | As1=imread('6.jpg'); 29 | As = rgb2gray(im2double(As1)); 30 | 31 | figure, 32 | imshow(As); 33 | title('Whole Test Image'); 34 | 35 | %% Step 2: Detect Feature Points 36 | % Detect feature points in both images. 37 | boxPoints = detectSURFFeatures(boxImage); 38 | scenePoints = detectSURFFeatures(As); 39 | 40 | %% 41 | % Visualize the strongest feature points found in the reference image. 42 | figure, 43 | imshow(boxImage); 44 | title('F.Points from Fake Image'); 45 | hold on; 46 | plot(boxPoints.selectStrongest(300)); 47 | 48 | %% 49 | % Visualize the strongest feature points found in the target image. 50 | figure, 51 | imshow(As); 52 | title('F.Points from Test Image'); 53 | hold on; 54 | plot(scenePoints.selectStrongest(300)); 55 | 56 | %% Step 3: Extract Feature Descriptors 57 | % Extract feature descriptors at the interest points in both images. 58 | [boxFeatures, boxPoints] = extractFeatures(boxImage, boxPoints); 59 | [sceneFeatures, scenePoints] = extractFeatures(As, scenePoints); 60 | 61 | %% Step 4: Find Putative Point Matches 62 | % Match the features using their descriptors. 63 | boxPairs = matchFeatures(boxFeatures, sceneFeatures); 64 | 65 | %% 66 | % Display putatively matched features. 67 | matchedBoxPoints = boxPoints(boxPairs(:, 1), :); 68 | matchedScenePoints = scenePoints(boxPairs(:, 2), :); 69 | figure, 70 | showMatchedFeatures(boxImage, As, matchedBoxPoints, ... 71 | matchedScenePoints, 'montage'); 72 | title(' Matched Points (Outliers)'); 73 | 74 | %% Step 5: Locate the Object in the Scene Using Putative Matches 75 | % |estimateGeometricTransform| calculates the transformation relating the 76 | % matched points, while eliminating outliers. This transformation allows us 77 | % to localize the object in the scene. 78 | [tform, inlierBoxPoints, inlierScenePoints] = ... 79 | estimateGeometricTransform(matchedBoxPoints, matchedScenePoints, 'affine'); 80 | 81 | %% 82 | % Display the matching point pairs with the outliers removed 83 | figure, 84 | showMatchedFeatures(boxImage, As, inlierBoxPoints, ... 85 | inlierScenePoints, 'montage'); 86 | title('Matched Points (Inliers)'); 87 | 88 | %% 89 | % Get the bounding polygon of the reference image. 90 | boxPolygon = [1, 1;... % top-left 91 | size(boxImage, 2), 1;... % top-right 92 | size(boxImage, 2), size(boxImage, 1);... % bottom-right 93 | 1, size(boxImage, 1);... % bottom-left 94 | 1, 1]; % top-left again to close the polygon 95 | 96 | %% 97 | % Transform the polygon into the coordinate system of the target image. 98 | % The transformed polygon indicates the location of the object in the 99 | % scene. 100 | newBoxPolygon = transformPointsForward(tform, boxPolygon); 101 | 102 | %% 103 | % Display the detected object. 104 | figure, 105 | imshow(uint8(As1)); 106 | hold on; 107 | line(newBoxPolygon(:, 1), newBoxPolygon(:, 2), 'Color', 'R'); 108 | title('Detected Fake Person'); 109 | 110 | -------------------------------------------------------------------------------- /code/code/demo_style_transfer.m: -------------------------------------------------------------------------------- 1 | 2 | %% load images 3 | I=imread('images/lounge.png'); 4 | M=imread('images/ruins.png'); 5 | I=rgb2gray(double(I)./255); 6 | M=rgb2gray(double(M)./255); 7 | 8 | %% main computation 9 | tic 10 | [O Og]=style_transfer(I,M,10,4); 11 | toc 12 | 13 | %% show results 14 | figure; 15 | imshow(I);title('Input photograph'); 16 | figure; 17 | imshow(O);title('New style'); 18 | -------------------------------------------------------------------------------- /code/code/downsample.m: -------------------------------------------------------------------------------- 1 | 2 | function [R,subwindow_child] = downsample(I, filter, subwindow) 3 | 4 | r = size(I,1); 5 | c = size(I,2); 6 | if ~exist('subwindow','var') 7 | subwindow = [1 r 1 c]; 8 | end 9 | subwindow_child = child_window(subwindow); 10 | 11 | border_mode = 'reweighted'; 12 | %border_mode = 'symmetric'; 13 | 14 | switch border_mode 15 | case 'reweighted' 16 | % low pass, convolve with 2D separable filter 17 | R = imfilter(I,filter); 18 | 19 | % reweight, brute force weights from 1's in valid image positions 20 | Z = imfilter(ones(size(I)),filter); 21 | R = R./Z; 22 | 23 | otherwise 24 | % low pass, convolve with 2D separable filter 25 | R = imfilter(I,filter,border_mode); 26 | end 27 | 28 | % decimate 29 | reven = mod(subwindow(1),2)==0; 30 | ceven = mod(subwindow(3),2)==0; 31 | R = R(1+reven:2:r, 1+ceven:2:c, :); 32 | 33 | end -------------------------------------------------------------------------------- /code/code/ex.m: -------------------------------------------------------------------------------- 1 | pos = {'A1','B1'}; 2 | for i = 1:4 3 | xdis = [1:10]'; 4 | ydis = rand(10,1); 5 | xlswrite('StreamLines.xls',xdis,1,pos{i}) 6 | xlswrite('StreamLines.xls',ydis,1,pos{i}) 7 | end -------------------------------------------------------------------------------- /code/code/featureextraction_image.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/featureextraction_image.fig -------------------------------------------------------------------------------- /code/code/featureextraction_image.m: -------------------------------------------------------------------------------- 1 | function varargout = featureextraction_image(varargin) 2 | gui_Singleton = 1; 3 | gui_State = struct('gui_Name', mfilename, ... 4 | 'gui_Singleton', gui_Singleton, ... 5 | 'gui_OpeningFcn', @featureextraction_image_OpeningFcn, ... 6 | 'gui_OutputFcn', @featureextraction_image_OutputFcn, ... 7 | 'gui_LayoutFcn', [] , ... 8 | 'gui_Callback', []); 9 | if nargin && ischar(varargin{1}) 10 | gui_State.gui_Callback = str2func(varargin{1}); 11 | end 12 | 13 | if nargout 14 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 15 | else 16 | gui_mainfcn(gui_State, varargin{:}); 17 | end 18 | % End initialization code - DO NOT EDIT 19 | 20 | 21 | % --- Executes just before featureextraction_image is made visible. 22 | function featureextraction_image_OpeningFcn(hObject, eventdata, handles, varargin) 23 | % This function has no output args, see OutputFcn. 24 | % hObject handle to figure 25 | % eventdata reserved - to be defined in a future version of MATLAB 26 | % handles structure with handles and user data (see GUIDATA) 27 | % varargin command line arguments to featureextraction_image (see VARARGIN) 28 | 29 | % Choose default command line output for featureextraction_image 30 | handles.output = hObject; 31 | clc; 32 | warning off; 33 | 34 | global Image; 35 | global seg_image; 36 | global a3; 37 | global F5; 38 | global Fc; 39 | global F4; 40 | 41 | GLCM2 = graycomatrix(F5,'Offset',[0 1; -1 1; -1 0; -1 -1]); % Create gray-level co-occurrence matrix from image 42 | stats = GLCM_Features1(GLCM2,0); % Find out GLCM features in this 43 | t1= struct2array(stats); % Convert this structure to an array 44 | test_feat(:,:) = t1; % Make feature vector of all images 45 | 46 | contrast_disp = stats.contr(1); % Get Contrast 47 | set(handles.edit1,'string',contrast_disp); % Display in editbox 48 | 49 | variance_disp = stats.sosvh(1); % Get Variance 50 | set(handles.edit2,'string',variance_disp); % Display in editbox 51 | 52 | mean_disp = stats.savgh(1); % Get Mean 53 | set(handles.edit3,'string',mean_disp); % Display in editbox 54 | 55 | homogeneity_disp = stats.homop(1); % Get Homogeneity 56 | set(handles.edit4,'string',homogeneity_disp); % Display in editbox 57 | 58 | entropy_disp = stats.entro(1); % Get Entropy 59 | set(handles.edit5,'string',entropy_disp); % Display in editbox 60 | 61 | correlation_disp = stats.corrp(1); % Get correlation 62 | set(handles.edit6,'string',correlation_disp); % Display in editbox 63 | 64 | 65 | 66 | load 'featureD_dft22.mat' % Load trained feature ".mat" file 67 | l = [1;2;3]; % Define labels: 1 - Benign, 2 - Malignant, 3 - Normal 68 | label_train = [repmat(l(1),280,1);repmat(l(2),280,1);repmat(l(3),280,1)]; % Repeat labels for training features 69 | class = multisvm1(featureD_dft22,label_train,test_feat); % Using multisvm, classify image 70 | 71 | % Depending on the labels, we will get the output and hence will display 72 | % that in textbox 73 | if class == 1 74 | out_disp = 'BENIGN' 75 | set(handles.edit7,'string',out_disp); 76 | elseif class == 2 77 | out_disp = 'MALIGNANT' 78 | set(handles.edit7,'string',out_disp); 79 | elseif class == 3 80 | out_disp = 'NORMAL' 81 | set(handles.edit7,'string',out_disp); 82 | end 83 | 84 | 85 | axes(handles.axes1); 86 | imshow(Image); 87 | 88 | axes(handles.axes2); 89 | imshow(a3); 90 | 91 | axes(handles.axes3); 92 | imshow(Fc); 93 | 94 | axes(handles.axes4); 95 | imshow(F4); 96 | 97 | % Update handles structure 98 | guidata(hObject, handles); 99 | 100 | % UIWAIT makes featureextraction_image wait for user response (see UIRESUME) 101 | % uiwait(handles.figure1); 102 | 103 | 104 | % --- Outputs from this function are returned to the command line. 105 | function varargout = featureextraction_image_OutputFcn(hObject, eventdata, handles) 106 | % varargout cell array for returning output args (see VARARGOUT); 107 | % hObject handle to figure 108 | % eventdata reserved - to be defined in a future version of MATLAB 109 | % handles structure with handles and user data (see GUIDATA) 110 | 111 | % Get default command line output from handles structure 112 | varargout{1} = handles.output; 113 | 114 | 115 | 116 | function edit7_Callback(hObject, eventdata, handles) 117 | % hObject handle to edit7 (see GCBO) 118 | % eventdata reserved - to be defined in a future version of MATLAB 119 | % handles structure with handles and user data (see GUIDATA) 120 | 121 | % Hints: get(hObject,'String') returns contents of edit7 as text 122 | % str2double(get(hObject,'String')) returns contents of edit7 as a double 123 | 124 | 125 | % --- Executes during object creation, after setting all properties. 126 | function edit7_CreateFcn(hObject, eventdata, handles) 127 | % hObject handle to edit7 (see GCBO) 128 | % eventdata reserved - to be defined in a future version of MATLAB 129 | % handles empty - handles not created until after all CreateFcns called 130 | 131 | % Hint: edit controls usually have a white background on Windows. 132 | % See ISPC and COMPUTER. 133 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 134 | set(hObject,'BackgroundColor','white'); 135 | end 136 | 137 | 138 | 139 | function edit1_Callback(hObject, eventdata, handles) 140 | % hObject handle to edit1 (see GCBO) 141 | % eventdata reserved - to be defined in a future version of MATLAB 142 | % handles structure with handles and user data (see GUIDATA) 143 | 144 | % Hints: get(hObject,'String') returns contents of edit1 as text 145 | % str2double(get(hObject,'String')) returns contents of edit1 as a double 146 | 147 | 148 | % --- Executes during object creation, after setting all properties. 149 | function edit1_CreateFcn(hObject, eventdata, handles) 150 | % hObject handle to edit1 (see GCBO) 151 | % eventdata reserved - to be defined in a future version of MATLAB 152 | % handles empty - handles not created until after all CreateFcns called 153 | 154 | % Hint: edit controls usually have a white background on Windows. 155 | % See ISPC and COMPUTER. 156 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 157 | set(hObject,'BackgroundColor','white'); 158 | end 159 | 160 | 161 | 162 | function edit2_Callback(hObject, eventdata, handles) 163 | % hObject handle to edit2 (see GCBO) 164 | % eventdata reserved - to be defined in a future version of MATLAB 165 | % handles structure with handles and user data (see GUIDATA) 166 | 167 | % Hints: get(hObject,'String') returns contents of edit2 as text 168 | % str2double(get(hObject,'String')) returns contents of edit2 as a double 169 | 170 | 171 | % --- Executes during object creation, after setting all properties. 172 | function edit2_CreateFcn(hObject, eventdata, handles) 173 | % hObject handle to edit2 (see GCBO) 174 | % eventdata reserved - to be defined in a future version of MATLAB 175 | % handles empty - handles not created until after all CreateFcns called 176 | 177 | % Hint: edit controls usually have a white background on Windows. 178 | % See ISPC and COMPUTER. 179 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 180 | set(hObject,'BackgroundColor','white'); 181 | end 182 | 183 | 184 | 185 | function edit3_Callback(hObject, eventdata, handles) 186 | % hObject handle to edit3 (see GCBO) 187 | % eventdata reserved - to be defined in a future version of MATLAB 188 | % handles structure with handles and user data (see GUIDATA) 189 | 190 | % Hints: get(hObject,'String') returns contents of edit3 as text 191 | % str2double(get(hObject,'String')) returns contents of edit3 as a double 192 | 193 | 194 | % --- Executes during object creation, after setting all properties. 195 | function edit3_CreateFcn(hObject, eventdata, handles) 196 | % hObject handle to edit3 (see GCBO) 197 | % eventdata reserved - to be defined in a future version of MATLAB 198 | % handles empty - handles not created until after all CreateFcns called 199 | 200 | % Hint: edit controls usually have a white background on Windows. 201 | % See ISPC and COMPUTER. 202 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 203 | set(hObject,'BackgroundColor','white'); 204 | end 205 | 206 | 207 | 208 | function edit4_Callback(hObject, eventdata, handles) 209 | % hObject handle to edit4 (see GCBO) 210 | % eventdata reserved - to be defined in a future version of MATLAB 211 | % handles structure with handles and user data (see GUIDATA) 212 | 213 | % Hints: get(hObject,'String') returns contents of edit4 as text 214 | % str2double(get(hObject,'String')) returns contents of edit4 as a double 215 | 216 | 217 | % --- Executes during object creation, after setting all properties. 218 | function edit4_CreateFcn(hObject, eventdata, handles) 219 | % hObject handle to edit4 (see GCBO) 220 | % eventdata reserved - to be defined in a future version of MATLAB 221 | % handles empty - handles not created until after all CreateFcns called 222 | 223 | % Hint: edit controls usually have a white background on Windows. 224 | % See ISPC and COMPUTER. 225 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 226 | set(hObject,'BackgroundColor','white'); 227 | end 228 | 229 | 230 | 231 | function edit5_Callback(hObject, eventdata, handles) 232 | % hObject handle to edit5 (see GCBO) 233 | % eventdata reserved - to be defined in a future version of MATLAB 234 | % handles structure with handles and user data (see GUIDATA) 235 | 236 | % Hints: get(hObject,'String') returns contents of edit5 as text 237 | % str2double(get(hObject,'String')) returns contents of edit5 as a double 238 | 239 | 240 | % --- Executes during object creation, after setting all properties. 241 | function edit5_CreateFcn(hObject, eventdata, handles) 242 | % hObject handle to edit5 (see GCBO) 243 | % eventdata reserved - to be defined in a future version of MATLAB 244 | % handles empty - handles not created until after all CreateFcns called 245 | 246 | % Hint: edit controls usually have a white background on Windows. 247 | % See ISPC and COMPUTER. 248 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 249 | set(hObject,'BackgroundColor','white'); 250 | end 251 | 252 | 253 | 254 | function edit6_Callback(hObject, eventdata, handles) 255 | % hObject handle to edit6 (see GCBO) 256 | % eventdata reserved - to be defined in a future version of MATLAB 257 | % handles structure with handles and user data (see GUIDATA) 258 | 259 | % Hints: get(hObject,'String') returns contents of edit6 as text 260 | % str2double(get(hObject,'String')) returns contents of edit6 as a double 261 | 262 | 263 | % --- Executes during object creation, after setting all properties. 264 | function edit6_CreateFcn(hObject, eventdata, handles) 265 | % hObject handle to edit6 (see GCBO) 266 | % eventdata reserved - to be defined in a future version of MATLAB 267 | % handles empty - handles not created until after all CreateFcns called 268 | 269 | % Hint: edit controls usually have a white background on Windows. 270 | % See ISPC and COMPUTER. 271 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 272 | set(hObject,'BackgroundColor','white'); 273 | end 274 | 275 | 276 | %% MAIN MENU 277 | % --- Executes on button press in pushbutton1. 278 | function pushbutton1_Callback(hObject, eventdata, handles) 279 | % hObject handle to pushbutton1 (see GCBO) 280 | % eventdata reserved - to be defined in a future version of MATLAB 281 | % handles structure with handles and user data (see GUIDATA) 282 | h = classify_image; 283 | close(featureextraction_image); 284 | 285 | 286 | %% Pre-Processing 287 | % --- Executes on button press in pushbutton2. 288 | function pushbutton2_Callback(hObject, eventdata, handles) 289 | % hObject handle to pushbutton2 (see GCBO) 290 | % eventdata reserved - to be defined in a future version of MATLAB 291 | % handles structure with handles and user data (see GUIDATA) 292 | h = preprocessing_image; 293 | close(featureextraction_image); 294 | 295 | 296 | %% Segmentation 297 | % --- Executes on button press in pushbutton3. 298 | function pushbutton3_Callback(hObject, eventdata, handles) 299 | % hObject handle to pushbutton3 (see GCBO) 300 | % eventdata reserved - to be defined in a future version of MATLAB 301 | % handles structure with handles and user data (see GUIDATA) 302 | h = segmentation_image; 303 | close(featureextraction_image); 304 | 305 | 306 | %% EXIT 307 | % --- Executes on button press in pushbutton4. 308 | function pushbutton4_Callback(hObject, eventdata, handles) 309 | % hObject handle to pushbutton4 (see GCBO) 310 | % eventdata reserved - to be defined in a future version of MATLAB 311 | % handles structure with handles and user data (see GUIDATA) 312 | close(featureextraction_image); 313 | -------------------------------------------------------------------------------- /code/code/final.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/final.fig -------------------------------------------------------------------------------- /code/code/final.m: -------------------------------------------------------------------------------- 1 | function varargout = final(varargin) 2 | gui_Singleton = 1; 3 | gui_State = struct('gui_Name', mfilename, ... 4 | 'gui_Singleton', gui_Singleton, ... 5 | 'gui_OpeningFcn', @final_OpeningFcn, ... 6 | 'gui_OutputFcn', @final_OutputFcn, ... 7 | 'gui_LayoutFcn', [] , ... 8 | 'gui_Callback', []); 9 | if nargin && ischar(varargin{1}) 10 | gui_State.gui_Callback = str2func(varargin{1}); 11 | end 12 | 13 | if nargout 14 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 15 | else 16 | gui_mainfcn(gui_State, varargin{:}); 17 | end 18 | % End initialization code - DO NOT EDIT 19 | 20 | 21 | % --- Executes just before final is made visible. 22 | function final_OpeningFcn(hObject, eventdata, handles, varargin) 23 | % This function has no output args, see OutputFcn. 24 | % hObject handle to figure 25 | % eventdata reserved - to be defined in a future version of MATLAB 26 | % handles structure with handles and user data (see GUIDATA) 27 | % varargin command line arguments to final (see VARARGIN) 28 | 29 | % Choose default command line output for final 30 | handles.output = hObject; 31 | ss=ones(300,300); 32 | axes(handles.axes1); 33 | imshow(ss); 34 | axes(handles.axes2); 35 | imshow(ss); 36 | axes(handles.axes3); 37 | imshow(ss); 38 | axes(handles.axes4); 39 | imshow(ss); 40 | axes(handles.axes5); 41 | imshow(ss); 42 | axes(handles.axes6); 43 | imshow(ss); 44 | axes(handles.axes7); 45 | imshow(ss); 46 | 47 | % Update handles structure 48 | guidata(hObject, handles); 49 | 50 | % UIWAIT makes final wait for user response (see UIRESUME) 51 | % uiwait(handles.figure1); 52 | 53 | 54 | % --- Outputs from this function are returned to the command line. 55 | function varargout = final_OutputFcn(hObject, eventdata, handles) 56 | % varargout cell array for returning output args (see VARARGOUT); 57 | % hObject handle to figure 58 | % eventdata reserved - to be defined in a future version of MATLAB 59 | % handles structure with handles and user data (see GUIDATA) 60 | 61 | % Get default command line output from handles structure 62 | varargout{1} = handles.output; 63 | 64 | 65 | % --- Executes on button press in pushbutton1. 66 | function pushbutton1_Callback(hObject, eventdata, handles) 67 | % hObject handle to pushbutton1 (see GCBO) 68 | % eventdata reserved - to be defined in a future version of MATLAB 69 | % handles structure with handles and user data (see GUIDATA) 70 | %% Object Detection 71 | 72 | %% Read Test Image 73 | 74 | % Database Images 75 | offset=1; 76 | dirname=uigetdir(); 77 | D=dir(dirname); 78 | count=0; 79 | for i=3:length(D) 80 | 81 | count=count+1; 82 | files{count}=[dirname '\\' D(i).name]; 83 | I0=files{count}; 84 | I1=imread(I0); 85 | 86 | axes(handles.axes1); 87 | imshow(I1); 88 | title('Test Cropped Image'); 89 | 90 | 91 | 92 | I = rgb2gray(im2double(I1)); 93 | I_ratio=double(I1)./repmat(I,[1 1 3])./255; 94 | 95 | % Frame enhancement using a general remapping function 96 | N=20; 97 | 98 | I_enhanced2=llf_general(I,@remapping_function,N); 99 | 100 | I_enhanced2=repmat(I_enhanced2,[1 1 3]).*I_ratio; 101 | 102 | 103 | axes(handles.axes2); 104 | imshow(I_enhanced2); 105 | title('Edge-aware Enhancement Image'); 106 | 107 | Tr=rgb2gray(I_enhanced2); 108 | axes(handles.axes3); 109 | imshow(Tr); 110 | title('Edge Enhanced Gray Image'); 111 | 112 | 113 | axes(handles.axes4); 114 | nbins = 50; 115 | hist(Tr,nbins) 116 | title('Histogram of Gray Image'); 117 | 118 | 119 | %% Detect Feature Points 120 | 121 | TestPoints = detectSURFFeatures(Tr); 122 | 123 | % Visualize the strongest feature points found in the test image. 124 | 125 | axes(handles.axes5); 126 | imshow(Tr); 127 | title('Features Points from Test Image'); 128 | hold on; 129 | plot(TestPoints.selectStrongest(100)); 130 | 131 | [ObjectFeatures, TestPoints] = extractFeatures(Tr,TestPoints); 132 | 133 | save('objectFeature.mat','ObjectFeatures'); 134 | 135 | Standard_Deviation = std2(ObjectFeatures); 136 | 137 | Variance = mean2(var(double(ObjectFeatures))); 138 | Feature=[Standard_Deviation,Variance]; 139 | 140 | xlswrite('te.xls',[Feature]); 141 | end 142 | 143 | handles.Feature=Feature; 144 | 145 | handles.I1=I1; 146 | 147 | % Update handles structure 148 | guidata(hObject, handles); 149 | 150 | 151 | % --- Executes on button press in pushbutton2. 152 | function pushbutton2_Callback(hObject, eventdata, handles) 153 | % hObject handle to pushbutton2 (see GCBO) 154 | % eventdata reserved - to be defined in a future version of MATLAB 155 | % handles structure with handles and user data (see GUIDATA) 156 | Feature=handles.Feature; 157 | I1=handles.I1; 158 | 159 | Da=xlsread('Trainfeature1.xls'); 160 | 161 | A='Test Feature matched with Train Feature'; 162 | set(handles.edit1,'string',A); 163 | 164 | 165 | % Update handles structure 166 | guidata(hObject, handles); 167 | 168 | 169 | 170 | 171 | 172 | function edit1_Callback(hObject, eventdata, handles) 173 | % hObject handle to edit1 (see GCBO) 174 | % eventdata reserved - to be defined in a future version of MATLAB 175 | % handles structure with handles and user data (see GUIDATA) 176 | 177 | % Hints: get(hObject,'String') returns contents of edit1 as text 178 | % str2double(get(hObject,'String')) returns contents of edit1 as a double 179 | 180 | 181 | % --- Executes during object creation, after setting all properties. 182 | function edit1_CreateFcn(hObject, eventdata, handles) 183 | % hObject handle to edit1 (see GCBO) 184 | % eventdata reserved - to be defined in a future version of MATLAB 185 | % handles empty - handles not created until after all CreateFcns called 186 | 187 | % Hint: edit controls usually have a white background on Windows. 188 | % See ISPC and COMPUTER. 189 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 190 | set(hObject,'BackgroundColor','white'); 191 | end 192 | 193 | 194 | 195 | function edit2_Callback(hObject, eventdata, handles) 196 | % hObject handle to edit2 (see GCBO) 197 | % eventdata reserved - to be defined in a future version of MATLAB 198 | % handles structure with handles and user data (see GUIDATA) 199 | 200 | % Hints: get(hObject,'String') returns contents of edit2 as text 201 | % str2double(get(hObject,'String')) returns contents of edit2 as a double 202 | 203 | 204 | % --- Executes during object creation, after setting all properties. 205 | function edit2_CreateFcn(hObject, eventdata, handles) 206 | % hObject handle to edit2 (see GCBO) 207 | % eventdata reserved - to be defined in a future version of MATLAB 208 | % handles empty - handles not created until after all CreateFcns called 209 | 210 | % Hint: edit controls usually have a white background on Windows. 211 | % See ISPC and COMPUTER. 212 | if ispc && isequal(get(hObject,'BackgroundColor'), get(0,'defaultUicontrolBackgroundColor')) 213 | set(hObject,'BackgroundColor','white'); 214 | end 215 | -------------------------------------------------------------------------------- /code/code/find_connect_superpixel_DoubleIn_Opposite.m: -------------------------------------------------------------------------------- 1 | function [ ConPix0, ConPixSecond ] = find_connect_superpixel_DoubleIn_Opposite(labels, K, height ,width ) 2 | %% 3 | % obtain the neighbour relationship of the super-pixels 4 | % Input: 5 | % labels: the super-pixel label obtained from SLIC 6 | % K: the number of super-pixels 7 | % height: the height of the image 8 | % width: the width of the image 9 | % Output: 10 | % ConPix0: the one layer neighbour relationship 11 | % ConPixSecond: the two layer neighbour relationship 12 | %%%%===================================================== 13 | ConPix=zeros(K,K); 14 | %the one outerboundary super 15 | for i=1:height-1 16 | for j=1:width-1 17 | if labels(i,j)~=labels(i,j+1) 18 | ConPix(labels(i,j)+1 ,labels(i,j+1)+1 )=1; 19 | end 20 | if labels(i,j)~=labels(i+1,j) 21 | ConPix(labels(i,j)+1 ,labels(i+1,j)+1 )=1; 22 | end 23 | end 24 | if labels(i,j+1)~=labels(i+1,j+1) 25 | ConPix(labels(i,j+1)+1 ,labels(i+1,j+1 )+1 )=1; 26 | end 27 | end 28 | for j=1:width-1 29 | if labels(height,j)~=labels(height,j+1) 30 | ConPix( labels(height,j)+1,labels(height,j+1)+1 )=1; 31 | end 32 | end 33 | for i=1:height-1 34 | for j=1:width-1 35 | if labels(i,j)~=labels(i+1,j+1) 36 | ConPix( labels(i,j)+1,labels(i+1,j+1)+1 )=1; 37 | end 38 | end 39 | end 40 | for i=1:height-1 41 | for j=2:width 42 | if labels(i,j)~=labels(i+1,j-1) 43 | ConPix( labels(i,j)+1,labels(i+1,j-1)+1 )=1; 44 | end 45 | end 46 | end 47 | ConPix0 = ConPix + ConPix'; 48 | % connect the super-pixel on the opposite boundary 49 | for j=1:width 50 | ConPix( labels(1,j)+1 , labels(height,j)+1 ) = 1; 51 | end 52 | for j=1:height 53 | ConPix( labels(j,1)+1 , labels(j,width)+1 ) = 1 ; 54 | end 55 | ConPix=ConPix+ConPix'; 56 | % find the second outerboundary superpixel 57 | ConPixSecond = ConPix; 58 | for i=1:K 59 | siteline=find( ConPix(i,:)>0 ); 60 | lenthsiteline=length(siteline); 61 | for j=1:lenthsiteline 62 | ConPixSecond(i,:)= ConPixSecond(i,:)+ ConPix( siteline( j ), :); 63 | end 64 | end 65 | % find third outerboundary superpixel 66 | % ConPixTid = ConPixSecond; 67 | % for i=1:K 68 | % siteline=find( ConPixSecond(i,:)>0 ); 69 | % lenthsiteline=length(siteline); 70 | % for j=1:lenthsiteline 71 | % ConPixTid(i,:)= ConPixTid(i,:)+ ConPix( siteline( j ), :); 72 | % end 73 | % end 74 | 75 | 76 | 77 | -------------------------------------------------------------------------------- /code/code/gaussian_pyramid.m: -------------------------------------------------------------------------------- 1 | % Construction of Gaussian pyramid 2 | % 3 | % Arguments: 4 | % image 'I' 5 | % 'nlev', number of levels in the pyramid (optional) 6 | % subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional) 7 | % 8 | % tom.mertens@gmail.com, August 2007 9 | % sam.hasinoff@gmail.com, March 2011 [modified to handle subwindows] 10 | % 11 | 12 | function pyr = gaussian_pyramid(I,nlev,subwindow) 13 | 14 | r = size(I,1); 15 | c = size(I,2); 16 | if ~exist('subwindow','var') 17 | subwindow = [1 r 1 c]; 18 | end 19 | if ~exist('nlev','var') 20 | nlev = numlevels([r c]); % build highest possible pyramid 21 | end 22 | 23 | % start by copying the image to the finest level 24 | pyr = cell(nlev,1); 25 | pyr{1} = I; 26 | 27 | % recursively downsample the image 28 | filter = pyramid_filter; 29 | for l = 2:nlev 30 | I = downsample(I,filter,subwindow); 31 | pyr{l} = I; 32 | end -------------------------------------------------------------------------------- /code/code/get_transfer_function.m: -------------------------------------------------------------------------------- 1 | function f=get_transfer_function(I,M) 2 | % input: 3 | % I : image in [0 1] with stat you want to be transfered 4 | % M : image in [0 1] with stat you want to get to 5 | % output 6 | % f : table with transfer values of size 256 and values in [0,1] 7 | 8 | % get cdf of the input values 9 | x=linspace(0,1,256); 10 | h1=hist(I(:),x); 11 | cdf1=cumsum(h1)./sum(h1); 12 | 13 | % get cdf target values (more resolution is needed because we want to invert it) 14 | x=linspace(0,1,256^2); 15 | h2=hist(M(:),x); 16 | cdf2=cumsum(h2)./sum(h2); 17 | f=linspace(0,1,256); 18 | 19 | % get f=cdf2^{-1}(cdf1) 20 | for i=1:256 21 | f(i)=find(cdf2>=cdf1(i),1)./(256*256); 22 | end 23 | 24 | end 25 | -------------------------------------------------------------------------------- /code/code/laplacian_pyramid.m: -------------------------------------------------------------------------------- 1 | % Contruction of Laplacian pyramid 2 | % 3 | % Arguments: 4 | % image 'I' 5 | % 'nlev', number of levels in the pyramid (optional) 6 | % subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional) 7 | % 8 | % tom.mertens@gmail.com, August 2007 9 | % sam.hasinoff@gmail.com, March 2011 [modified to handle subwindows] 10 | % 11 | % 12 | % More information: 13 | % 'The Laplacian Pyramid as a Compact Image Code' 14 | % Burt, P., and Adelson, E. H., 15 | % IEEE Transactions on Communication, COM-31:532-540 (1983). 16 | % 17 | 18 | function pyr = laplacian_pyramid(I,nlev,subwindow) 19 | 20 | r = size(I,1); 21 | c = size(I,2); 22 | if ~exist('subwindow','var') 23 | subwindow = [1 r 1 c]; 24 | end 25 | if ~exist('nlev','var') 26 | nlev = numlevels([r c]); % build highest possible pyramid 27 | end 28 | 29 | % recursively build pyramid 30 | pyr = cell(nlev,1); 31 | filter = pyramid_filter; 32 | J = I; 33 | for l = 1:nlev - 1 34 | % apply low pass filter, and downsample 35 | [I,subwindow_child] = downsample(J,filter,subwindow); 36 | 37 | % in each level, store difference between image and upsampled low pass version 38 | pyr{l} = J - upsample(I,filter,subwindow); 39 | 40 | J = I; % continue with low pass image 41 | subwindow = subwindow_child; 42 | end 43 | pyr{nlev} = J; % the coarest level contains the residual low pass image 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /code/code/llf.m: -------------------------------------------------------------------------------- 1 | % Perform the local laplacian filter using the function 2 | % f(x,ref)=x+fact*(I-ref)*exp(-(I-ref)²/(2*sigma²)) 3 | 4 | % Perform the local laplacian filter using any function 5 | 6 | % This script implements edge-aware detail and tone manipulation as 7 | % described in : 8 | % Fast and Robust Pyramid-based Image Processing. 9 | % Mathieu Aubry, Sylvain Paris, Samuel W. Hasinoff, Jan Kautz, and Frédo Durand. 10 | % MIT technical report, November 2011 11 | 12 | % INPUT 13 | % I : input greyscale image 14 | % r : a function handle to the remaping function 15 | % N : number discretisation values of the intensity 16 | % 17 | 18 | % OUTPUT 19 | % F : filtered image 20 | 21 | % aubry.mathieu@gmail.com Sept 2012 22 | 23 | 24 | function [F]=llf(I,sigma,fact,N) 25 | 26 | [height width]=size(I); 27 | n_levels=ceil(log(min(height,width))-log(2))+2; 28 | discretisation=linspace(0,1,N); 29 | discretisation_step=discretisation(2); 30 | 31 | input_gaussian_pyr=gaussian_pyramid(I,n_levels); 32 | output_laplace_pyr=laplacian_pyramid(I,n_levels); 33 | output_laplace_pyr{n_levels}=input_gaussian_pyr{n_levels}; 34 | 35 | for ref=discretisation 36 | I_remap=fact*(I-ref).*exp(-(I-ref).*(I-ref)./(2*sigma*sigma)); 37 | temp_laplace_pyr=laplacian_pyramid(I_remap,n_levels); 38 | for level=1:n_levels-1 39 | output_laplace_pyr{level}=output_laplace_pyr{level}+... 40 | (abs(input_gaussian_pyr{level}-ref)= 2 ) 79 | a2(i3,j3) = 1; 80 | else 81 | a2(i3,j3) = 0; 82 | end 83 | end 84 | end 85 | 86 | [b,num] = bwlabel(a2,8); % Find number of label connected objects 87 | count_pixels_per_obj = sum(bsxfun(@eq,b(:),1:num)); % Count the pixels for every blob being obtained 88 | [~,ind] = max(count_pixels_per_obj); % Find blob corresponding to the maximum pixel 89 | a2 = (b==ind); % Take only that blob from entire image 90 | 91 | seg_image = Image; 92 | seg_image(~a2) = 0; % Highlight only that blob from original image 93 | a2 = adapthisteq(seg_image); 94 | 95 | F = fft2(a2); % Perform Fourier Transform 96 | Fa = abs(F); % Get the magnitude 97 | Fb = log(Fa+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 98 | Fc = mat2gray(Fb); % Convert matrix to grayscale image 99 | 100 | F1 = fftshift(F); % Center FFT 101 | F2 = abs(F1); % Get the magnitude 102 | F3 = log(F2+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 103 | F4 = mat2gray(F3); % Convert matrix to grayscale image 104 | 105 | [p3, p4] = size(F4); % Find out size of an image 106 | q1 = 400; 107 | i3_start = floor((p3-q1)/2); 108 | i3_stop = i3_start + q1; 109 | i4_start = floor((p4-q1)/2); 110 | i4_stop = i4_start + q1; 111 | F5 = F4(i3_start:i3_stop, i4_start:i4_stop, :); 112 | 113 | 114 | GLCM2 = graycomatrix(F5,'Offset',[0 1; -1 1; -1 0; -1 -1]); % Create gray-level co-occurrence matrix from image 115 | stats = GLCM_Features1(GLCM2,0); % Find out GLCM features in this 116 | t1= struct2array(stats); % Convert this structure to an array 117 | f2(j,:) = t1; % Make feature vector of all images 118 | end 119 | 120 | if (i>=1) 121 | featureD_dft([(((i-1)*j)+1):(i*j)],:) = f2; % Make feature vectors of all images of all folders 122 | end 123 | end 124 | 125 | save featureD_dft featureD_dft; % Save this as a ".mat" file 126 | close(mb); 127 | mb = msgbox('Training Completed'); 128 | close(mb); % Close Message Box 129 | 130 | 131 | %% CLASSIFICATION 132 | % --- Executes on button press in pushbutton2. 133 | function pushbutton2_Callback(hObject, eventdata, handles) 134 | % hObject handle to pushbutton2 (see GCBO) 135 | % eventdata reserved - to be defined in a future version of MATLAB 136 | % handles structure with handles and user data (see GUIDATA) 137 | h = classify_image; 138 | close(main_menu); 139 | -------------------------------------------------------------------------------- /code/code/normalize.m: -------------------------------------------------------------------------------- 1 | function normVals=normalize(newVals,oldVals) 2 | %function normVals=normalize(newVals,oldVals) normalizes the range of 3 | %newVals to the range of oldVals such that every column is normalized 4 | %independantly 5 | % 6 | %Inputs: newVals - NxP matrix of new values to be normalized to the 7 | % range of oldVals 8 | % oldVals - Optional NxP matrix of original values (for 9 | % arbitrary K). Defaults to normalizing range to [0,1] 10 | % 11 | %Outputs: normVals - NxP matrix of newVals normalized (columnwise) to 12 | % the range of oldVals 13 | % 14 | % 15 | %5/13/03 - Leo Grady 16 | 17 | % Copyright (C) 2002, 2003 Leo Grady 18 | % Computer Vision and Computational Neuroscience Lab 19 | % Department of Cognitive and Neural Systems 20 | % Boston University 21 | % Boston, MA 02215 22 | % 23 | % This program is free software; you can redistribute it and/or 24 | % modify it under the terms of the GNU General Public License 25 | % as published by the Free Software Foundation; either version 2 26 | % of the License, or (at your option) any later version. 27 | % 28 | % This program is distributed in the hope that it will be useful, 29 | % but WITHOUT ANY WARRANTY; without even the implied warranty of 30 | % MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 31 | % GNU General Public License for more details. 32 | % 33 | % You should have received a copy of the GNU General Public License 34 | % along with this program; if not, write to the Free Software 35 | % Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 36 | % 37 | % Date - $Id: normalize.m,v 1.2 2003/08/21 17:29:29 lgrady Exp $ 38 | %========================================================================% 39 | 40 | %Initialize 41 | [N P]=size(newVals); 42 | 43 | %Supply optional argument, if required 44 | if nargin == 1 45 | oldVals=[zeros(1,P);ones(1,P)]; 46 | end 47 | 48 | %Find original minima/maxima 49 | minVal=min(oldVals,[],1); 50 | maxVal=max(oldVals,[],1); 51 | 52 | %Find current minima/maxima 53 | minNewVal=min(newVals,[],1); 54 | maxNewVal=max(newVals,[],1); 55 | 56 | %Perform normalization 57 | warning off MATLAB:divideByZero %Error for divide by zero handled below 58 | normVals=newVals-ones(N,1)*minNewVal; 59 | normVals=normVals.* ( ones(N,1)*(maxVal-minVal) ) ./ ( ones(N,1)* max(normVals,[],1) ); 60 | normVals=normVals+ones(N,1)*minVal; 61 | warning on MATLAB:divideByZero 62 | 63 | %Error check for completely uniform inputs 64 | uniformIndex=find(minNewVal==maxNewVal); 65 | normVals(:,uniformIndex)=ones(N,1)*minNewVal(:,uniformIndex); 66 | -------------------------------------------------------------------------------- /code/code/objectFeature.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/objectFeature.mat -------------------------------------------------------------------------------- /code/code/otsu.m: -------------------------------------------------------------------------------- 1 | function [IDX,sep] = otsu(I,n) 2 | 3 | 4 | error(nargchk(1,2,nargin)) 5 | 6 | % Check if is the input is an RGB image 7 | isRGB = isrgb(I); 8 | 9 | assert(isRGB | ndims(I)==2,... 10 | 'The input must be a 2-D array or an RGB image.') 11 | 12 | %% Checking n (number of classes) 13 | if nargin==1 14 | n = 2; 15 | elseif n==1; 16 | IDX = NaN(size(I)); 17 | sep = 0; 18 | return 19 | elseif n~=abs(round(n)) || n==0 20 | error('MATLAB:otsu:WrongNValue',... 21 | 'n must be a strictly positive integer!') 22 | elseif n>255 23 | n = 255; 24 | warning('MATLAB:otsu:TooHighN',... 25 | 'n is too high. n value has been changed to 255.') 26 | end 27 | 28 | I = single(I); 29 | 30 | %% Perform a KLT if isRGB, and keep the component of highest energy 31 | if isRGB 32 | sizI = size(I); 33 | I = reshape(I,[],3); 34 | [V,D] = eig(cov(I)); 35 | [tmp,c] = max(diag(D)); 36 | I = reshape(I*V(:,c),sizI(1:2)); % component with the highest energy 37 | end 38 | 39 | %% Convert to 256 levels 40 | I = I-min(I(:)); 41 | I = round(I/max(I(:))*255); 42 | 43 | %% Probability distribution 44 | unI = sort(unique(I)); 45 | nbins = min(length(unI),256); 46 | if nbins==n 47 | IDX = ones(size(I)); 48 | for i = 1:n, IDX(I==unI(i)) = i; end 49 | sep = 1; 50 | return 51 | elseif nbinspixval(k+1)) = 2; 76 | 77 | % separability criterion 78 | sep = maxsig/sum(((1:nbins)-mu(end)).^2.*P); 79 | 80 | elseif n==3 81 | w0 = w; 82 | w2 = fliplr(cumsum(fliplr(P))); 83 | [w0,w2] = ndgrid(w0,w2); 84 | 85 | mu0 = mu./w; 86 | mu2 = fliplr(cumsum(fliplr((1:nbins).*P))./cumsum(fliplr(P))); 87 | [mu0,mu2] = ndgrid(mu0,mu2); 88 | 89 | w1 = 1-w0-w2; 90 | w1(w1<=0) = NaN; 91 | 92 | sigma2B =... 93 | w0.*(mu0-mu(end)).^2 + w2.*(mu2-mu(end)).^2 +... 94 | (w0.*(mu0-mu(end)) + w2.*(mu2-mu(end))).^2./w1; 95 | sigma2B(isnan(sigma2B)) = 0; % zeroing if k1 >= k2 96 | 97 | [maxsig,k] = max(sigma2B(:)); 98 | [k1,k2] = ind2sub([nbins nbins],k); 99 | 100 | % segmented image 101 | IDX = ones(size(I))*3; 102 | IDX(I<=pixval(k1)) = 1; 103 | IDX(I>pixval(k1) & I<=pixval(k2)) = 2; 104 | 105 | % separability criterion 106 | sep = maxsig/sum(((1:nbins)-mu(end)).^2.*P); 107 | 108 | else 109 | k0 = linspace(0,1,n+1); k0 = k0(2:n); 110 | [k,y] = fminsearch(@sig_func,k0,optimset('TolX',1)); 111 | k = round(k*(nbins-1)+1); 112 | 113 | % segmented image 114 | IDX = ones(size(I))*n; 115 | IDX(I<=pixval(k(1))) = 1; 116 | for i = 1:n-2 117 | IDX(I>pixval(k(i)) & I<=pixval(k(i+1))) = i+1; 118 | end 119 | 120 | % separability criterion 121 | sep = 1-y; 122 | 123 | end 124 | 125 | IDX(~isfinite(I)) = 0; 126 | 127 | %% Function to be minimized if n>=4 128 | function y = sig_func(k) 129 | 130 | muT = sum((1:nbins).*P); 131 | sigma2T = sum(((1:nbins)-muT).^2.*P); 132 | 133 | k = round(k*(nbins-1)+1); 134 | k = sort(k); 135 | if any(k<1 | k>nbins), y = 1; return, end 136 | 137 | k = [0 k nbins]; 138 | sigma2B = 0; 139 | for j = 1:n 140 | wj = sum(P(k(j)+1:k(j+1))); 141 | if wj==0, y = 1; return, end 142 | muj = sum((k(j)+1:k(j+1)).*P(k(j)+1:k(j+1)))/wj; 143 | sigma2B = sigma2B + wj*(muj-muT)^2; 144 | end 145 | y = 1-sigma2B/sigma2T; % within the range [0 1] 146 | 147 | end 148 | 149 | end 150 | 151 | function isRGB = isrgb(A) 152 | % --- Do we have an RGB image? 153 | % RGB images can be only uint8, uint16, single, or double 154 | isRGB = ndims(A)==3 && (isfloat(A) || isa(A,'uint8') || isa(A,'uint16')); 155 | % ---- Adapted from the obsolete function ISRGB ---- 156 | if isRGB && isfloat(A) 157 | % At first, just test a small chunk to get a possible quick negative 158 | mm = size(A,1); 159 | nn = size(A,2); 160 | chunk = A(1:min(mm,10),1:min(nn,10),:); 161 | isRGB = (min(chunk(:))>=0 && max(chunk(:))<=1); 162 | % If the chunk is an RGB image, test the whole image 163 | if isRGB, isRGB = (min(A(:))>=0 && max(A(:))<=1); end 164 | end 165 | end 166 | 167 | -------------------------------------------------------------------------------- /code/code/preprocessing_image.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/preprocessing_image.fig -------------------------------------------------------------------------------- /code/code/preprocessing_image.m: -------------------------------------------------------------------------------- 1 | function varargout = preprocessing_image(varargin) 2 | gui_Singleton = 1; 3 | gui_State = struct('gui_Name', mfilename, ... 4 | 'gui_Singleton', gui_Singleton, ... 5 | 'gui_OpeningFcn', @preprocessing_image_OpeningFcn, ... 6 | 'gui_OutputFcn', @preprocessing_image_OutputFcn, ... 7 | 'gui_LayoutFcn', [] , ... 8 | 'gui_Callback', []); 9 | if nargin && ischar(varargin{1}) 10 | gui_State.gui_Callback = str2func(varargin{1}); 11 | end 12 | 13 | if nargout 14 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 15 | else 16 | gui_mainfcn(gui_State, varargin{:}); 17 | end 18 | % End initialization code - DO NOT EDIT 19 | 20 | 21 | % --- Executes just before preprocessing_image is made visible. 22 | function preprocessing_image_OpeningFcn(hObject, eventdata, handles, varargin) 23 | % This function has no output args, see OutputFcn. 24 | % hObject handle to figure 25 | % eventdata reserved - to be defined in a future version of MATLAB 26 | % handles structure with handles and user data (see GUIDATA) 27 | % varargin command line arguments to preprocessing_image (see VARARGIN) 28 | 29 | 30 | % Choose default command line output for preprocessing_image 31 | handles.output = hObject; 32 | clc; 33 | warning off; 34 | global Image 35 | global seg_image 36 | global a3 37 | Image = im2uint8(Image); % Convert datatype 38 | Image = imresize(Image,[1000,667]); % Resize it 39 | axes(handles.axes1); % Define the axes to show image 40 | imshow(Image); % Show image at axes1 41 | 42 | [IDX,sep] = otsu(Image,3); % Perform OTSU thresholding on an image 43 | [M,N] = size(IDX); 44 | a2 = zeros(M,N); 45 | 46 | % Give threshold value to get thresholded image. Loop over all pixel values 47 | % of an image 48 | for i3 = 1:M 49 | for j3 = 1:N 50 | if(IDX(i3,j3)>= 2 ) 51 | a2(i3,j3) = 1; 52 | else 53 | a2(i3,j3) = 0; 54 | end 55 | end 56 | end 57 | 58 | [b,num] = bwlabel(a2,8); % Find label connected objects 59 | count_pixels_per_obj = sum(bsxfun(@eq,b(:),1:num)); % Get sum of all pixels of all blobs 60 | [~,ind] = max(count_pixels_per_obj); % Find blob corresponding to max. number of pixels 61 | a2 = (b==ind); % Get particular blob from an image 62 | seg_image = Image; 63 | seg_image(~a2) = 0; % Highlight only that blob from original image 64 | 65 | % axes(handles.axes2); 66 | % imshow(seg_image); 67 | a3 = adapthisteq(seg_image); 68 | axes(handles.axes2); 69 | imshow(a3); 70 | 71 | 72 | 73 | % Update handles structure 74 | guidata(hObject, handles); 75 | 76 | % UIWAIT makes preprocessing_image wait for user response (see UIRESUME) 77 | % uiwait(handles.figure1); 78 | 79 | 80 | % --- Outputs from this function are returned to the command line. 81 | function varargout = preprocessing_image_OutputFcn(hObject, eventdata, handles) 82 | % varargout cell array for returning output args (see VARARGOUT); 83 | % hObject handle to figure 84 | % eventdata reserved - to be defined in a future version of MATLAB 85 | % handles structure with handles and user data (see GUIDATA) 86 | 87 | % Get default command line output from handles structure 88 | varargout{1} = handles.output; 89 | 90 | 91 | %% Main MENU 92 | % --- Executes on button press in pushbutton1. 93 | function pushbutton1_Callback(hObject, eventdata, handles) 94 | % hObject handle to pushbutton1 (see GCBO) 95 | % eventdata reserved - to be defined in a future version of MATLAB 96 | % handles structure with handles and user data (see GUIDATA) 97 | h = classify_image; 98 | close(preprocessing_image); 99 | 100 | 101 | %% Segmentation 102 | % --- Executes on button press in pushbutton2. 103 | function pushbutton2_Callback(hObject, eventdata, handles) 104 | % hObject handle to pushbutton2 (see GCBO) 105 | % eventdata reserved - to be defined in a future version of MATLAB 106 | % handles structure with handles and user data (see GUIDATA) 107 | global Image; 108 | global seg_image; 109 | global a3; 110 | h = segmentation_image(Image,seg_image,a3); 111 | close(preprocessing_image); 112 | -------------------------------------------------------------------------------- /code/code/pyramid_filter.m: -------------------------------------------------------------------------------- 1 | % This is a 2D separable low pass filter for constructing Gaussian and 2 | % Laplacian pyramids, built from a 1D 5-tap low pass filter. 3 | % 4 | % tom.mertens@gmail.com, August 2007 5 | % sam.hasinoff@gmail.com, March 2011 [imfilter faster with 2D filter] 6 | % 7 | 8 | function f = pyramid_filter() 9 | f = [.05, .25, .4, .25, .05]; % original [Burt and Adelson, 1983] 10 | %f = [.0625, .25, .375, .25, .0625]; % binom-5 11 | f = f'*f; 12 | end -------------------------------------------------------------------------------- /code/code/reconstruct_laplacian_pyramid.m: -------------------------------------------------------------------------------- 1 | % Reconstruction of image from Laplacian pyramid 2 | % 3 | % Arguments: 4 | % pyramid 'pyr', as generated by function 'laplacian_pyramid' 5 | % subwindow indices 'subwindow', given as [r1 r2 c1 c2] (optional) 6 | % 7 | % tom.mertens@gmail.com, August 2007 8 | % sam.hasinoff@gmail.com, March 2011 [modified to handle subwindows] 9 | % 10 | % 11 | % More information: 12 | % 'The Laplacian Pyramid as a Compact Image Code' 13 | % Burt, P., and Adelson, E. H., 14 | % IEEE Transactions on Communication, COM-31:532-540 (1983). 15 | % 16 | 17 | function R = reconstruct_laplacian_pyramid(pyr,subwindow) 18 | 19 | r = size(pyr{1},1); 20 | c = size(pyr{1},2); 21 | nlev = length(pyr); 22 | 23 | subwindow_all = zeros(nlev,4); 24 | if ~exist('subwindow','var') 25 | subwindow_all(1,:) = [1 r 1 c]; 26 | else 27 | subwindow_all(1,:) = subwindow; 28 | end 29 | for lev = 2:nlev 30 | subwindow_all(lev,:) = child_window(subwindow_all(lev-1,:)); 31 | end 32 | 33 | % start with low pass residual 34 | R = pyr{nlev}; 35 | filter = pyramid_filter; 36 | for lev = nlev-1 : -1 : 1 37 | % upsample, and add to current level 38 | R = pyr{lev} + upsample(R,filter,subwindow_all(lev,:)); 39 | end 40 | -------------------------------------------------------------------------------- /code/code/remapping_function.m: -------------------------------------------------------------------------------- 1 | %This is just a toy example! 2 | function y=remapping_function(x) 3 | % y=(x-0.1).*(x>0.1)+(x+0.1).*(x<-0.1); %smoothing 4 | y=3.*x.*(abs(x)<0.1)+(x+0.2).*(x>0.1)+(x-0.2).*(x<-0.1); %enhancement 5 | end -------------------------------------------------------------------------------- /code/code/segmentation_image.fig: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/segmentation_image.fig -------------------------------------------------------------------------------- /code/code/segmentation_image.m: -------------------------------------------------------------------------------- 1 | function varargout = segmentation_image(varargin) 2 | gui_Singleton = 1; 3 | gui_State = struct('gui_Name', mfilename, ... 4 | 'gui_Singleton', gui_Singleton, ... 5 | 'gui_OpeningFcn', @segmentation_image_OpeningFcn, ... 6 | 'gui_OutputFcn', @segmentation_image_OutputFcn, ... 7 | 'gui_LayoutFcn', [] , ... 8 | 'gui_Callback', []); 9 | if nargin && ischar(varargin{1}) 10 | gui_State.gui_Callback = str2func(varargin{1}); 11 | end 12 | 13 | if nargout 14 | [varargout{1:nargout}] = gui_mainfcn(gui_State, varargin{:}); 15 | else 16 | gui_mainfcn(gui_State, varargin{:}); 17 | end 18 | % End initialization code - DO NOT EDIT 19 | 20 | 21 | % --- Executes just before segmentation_image is made visible. 22 | function segmentation_image_OpeningFcn(hObject, eventdata, handles, varargin) 23 | % This function has no output args, see OutputFcn. 24 | % hObject handle to figure 25 | % eventdata reserved - to be defined in a future version of MATLAB 26 | % handles structure with handles and user data (see GUIDATA) 27 | % varargin command line arguments to segmentation_image (see VARARGIN) 28 | 29 | % Choose default command line output for segmentation_image 30 | handles.output = hObject; 31 | clc; 32 | warning off; 33 | 34 | global Image; 35 | global seg_image; 36 | global a3; 37 | global F5; 38 | global Fc; 39 | global F4; 40 | 41 | F = fft2(a3); % Perform FFT 42 | Fa = abs(F); % Get the magnitude 43 | Fb = log(Fa+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 44 | Fc = mat2gray(Fb); % Convert matrix to grayscale image 45 | axes(handles.axes1); 46 | imshow(Fc); 47 | 48 | F1 = fftshift(F); % Center FFT 49 | F2 = abs(F1); % Get the magnitude 50 | F3 = log(F2+1); % Use log, for perceptual scaling, and +1 since log(0) is undefined 51 | F4 = mat2gray(F3); % Convert matrix to grayscale image 52 | axes(handles.axes2); 53 | imshow(F4); 54 | 55 | [p3, p4] = size(F4); 56 | q1 = 400; 57 | i3_start = floor((p3-q1)/2); 58 | i3_stop = i3_start + q1; 59 | i4_start = floor((p4-q1)/2); 60 | i4_stop = i4_start + q1; 61 | F5 = F4(i3_start:i3_stop, i4_start:i4_stop, :); 62 | 63 | axes(handles.axes3); 64 | imshow(Image); 65 | 66 | axes(handles.axes4); 67 | imshow(a3); 68 | 69 | % Update handles structure 70 | guidata(hObject, handles); 71 | 72 | % UIWAIT makes segmentation_image wait for user response (see UIRESUME) 73 | % uiwait(handles.figure1); 74 | 75 | 76 | % --- Outputs from this function are returned to the command line. 77 | function varargout = segmentation_image_OutputFcn(hObject, eventdata, handles) 78 | % varargout cell array for returning output args (see VARARGOUT); 79 | % hObject handle to figure 80 | % eventdata reserved - to be defined in a future version of MATLAB 81 | % handles structure with handles and user data (see GUIDATA) 82 | 83 | % Get default command line output from handles structure 84 | varargout{1} = handles.output; 85 | 86 | 87 | %% Main MENU 88 | % --- Executes on button press in pushbutton1. 89 | function pushbutton1_Callback(hObject, eventdata, handles) 90 | % hObject handle to pushbutton1 (see GCBO) 91 | % eventdata reserved - to be defined in a future version of MATLAB 92 | % handles structure with handles and user data (see GUIDATA) 93 | h = classify_image; 94 | close(segmentation_image); 95 | 96 | 97 | %% Pre-Processing 98 | % --- Executes on button press in pushbutton2. 99 | function pushbutton2_Callback(hObject, eventdata, handles) 100 | % hObject handle to pushbutton2 (see GCBO) 101 | % eventdata reserved - to be defined in a future version of MATLAB 102 | % handles structure with handles and user data (see GUIDATA) 103 | h = preprocessing_image; 104 | close(segmentation_image); 105 | 106 | 107 | 108 | %% Feature Extraction 109 | % --- Executes on button press in pushbutton3. 110 | function pushbutton3_Callback(hObject, eventdata, handles) 111 | % hObject handle to pushbutton3 (see GCBO) 112 | % eventdata reserved - to be defined in a future version of MATLAB 113 | % handles structure with handles and user data (see GUIDATA) 114 | global F5 115 | global Fc; 116 | global F4; 117 | h = featureextraction_image(F5,Fc,F4); 118 | close(segmentation_image); 119 | -------------------------------------------------------------------------------- /code/code/slic.m: -------------------------------------------------------------------------------- 1 | % SLIC Simple Linear Iterative Clustering SuperPixels 2 | % 3 | % Implementation of Achanta, Shaji, Smith, Lucchi, Fua and Susstrunk's 4 | % SLIC Superpixels 5 | % 6 | % Usage: [l, Am, Sp, d] = slic(im, k, m, seRadius, colopt, mw) 7 | % 8 | % Arguments: im - Image to be segmented. 9 | % k - Number of desired superpixels. Note that this is nominal 10 | % the actual number of superpixels generated will generally 11 | % be a bit larger, espiecially if parameter m is small. 12 | % m - Weighting factor between colour and spatial 13 | % differences. Values from about 5 to 40 are useful. Use a 14 | % large value to enforce superpixels with more regular and 15 | % smoother shapes. Try a value of 10 to start with. 16 | % seRadius - Regions morphologically smaller than this are merged with 17 | % adjacent regions. Try a value of 1 or 1.5. Use 0 to 18 | % disable. 19 | % colopt - String 'mean' or 'median' indicating how the cluster 20 | % colour centre should be computed. Defaults to 'mean' 21 | % mw - Optional median filtering window size. Image compression 22 | % can result in noticeable artifacts in the a*b* components 23 | % of the image. Median filtering can reduce this. mw can be 24 | % a single value in which case the same median filtering is 25 | % applied to each L* a* and b* components. Alternatively it 26 | % can be a 2-vector where mw(1) specifies the median 27 | % filtering window to be applied to L* and mw(2) is the 28 | % median filtering window to be applied to a* and b*. 29 | % 30 | % Returns: l - Labeled image of superpixels. Labels range from 1 to k. 31 | % Am - Adjacency matrix of segments. Am(i, j) indicates whether 32 | % segments labeled i and j are connected/adjacent 33 | % Sp - Superpixel attribute structure array with fields: 34 | % L - Mean L* value 35 | % a - Mean a* value 36 | % b - Mean b* value 37 | % r - Mean row value 38 | % c - Mean column value 39 | % stdL - Standard deviation of L* 40 | % stda - Standard deviation of a* 41 | % stdb - Standard deviation of b* 42 | % N - Number of pixels 43 | % edges - List of edge numbers that bound each 44 | % superpixel. This field is allocated, but not set, 45 | % by SLIC. Use SPEDGES for this. 46 | % d - Distance image giving the distance each pixel is from its 47 | % associated superpixel centre. 48 | % 49 | % It is suggested that use of this function is followed by SPDBSCAN to perform a 50 | % DBSCAN clustering of superpixels. This results in a simple and fast 51 | % segmentation of an image. 52 | % 53 | % Minor variations from the original algorithm as defined in Achanta et al's 54 | % paper: 55 | % 56 | % - SuperPixel centres are initialised on a hexagonal grid rather than a square 57 | % one. This results in a segmentation that will be nominally 6-connected 58 | % which hopefully facilitates any subsequent post-processing that seeks to 59 | % merge superpixels. 60 | % - Initial cluster positions are not shifted to point of lowest gradient 61 | % within a 3x3 neighbourhood because this will be rendered irrelevant the 62 | % first time cluster centres are updated. 63 | % 64 | % Reference: R. Achanta, A. Shaji, K. Smith, A. Lucchi, P. Fua and 65 | % S. Susstrunk. "SLIC Superpixels Compared to State-of-the-Art Superpixel 66 | % Methods" PAMI. Vol 34 No 11. November 2012. pp 2274-2281. 67 | % 68 | % See also: SPDBSCAN, MCLEANUPREGIONS, REGIONADJACENCY, DRAWREGIONBOUNDARIES, RGB2LAB 69 | 70 | % Copyright (c) 2013 Peter Kovesi 71 | % Centre for Exploration Targeting 72 | % School of Earth and Environment 73 | % The University of Western Australia 74 | % peter.kovesi at uwa edu au 75 | % 76 | % Permission is hereby granted, free of charge, to any person obtaining a copy 77 | % of this software and associated documentation files (the "Software"), to deal 78 | % in the Software without restriction, subject to the following conditions: 79 | % 80 | % The above copyright notice and this permission notice shall be included in 81 | % all copies or substantial portions of the Software. 82 | % 83 | % The Software is provided "as is", without warranty of any kind. 84 | 85 | % Feb 2013 86 | % July 2013 Super pixel attributes returned as a structure array 87 | 88 | % Note that most of the computation time is not in the clustering, but rather 89 | % in the region cleanup process. 90 | 91 | 92 | function [l, Am, Sp, d] = slic(im, k, m, seRadius, colopt, mw, nItr, eim, We) 93 | 94 | if ~exist('colopt','var') || isempty(colopt), colopt = 'mean'; end 95 | if ~exist('mw','var') || isempty(mw), mw = 0; end 96 | if ~exist('nItr','var') || isempty(nItr), nItr = 10; end 97 | 98 | if exist('eim', 'var'), USEDIST = 0; else, USEDIST = 1; end 99 | 100 | MEANCENTRE = 1; 101 | MEDIANCENTRE = 2; 102 | 103 | if strcmp(colopt, 'mean') 104 | centre = MEANCENTRE; 105 | elseif strcmp(colopt, 'median') 106 | centre = MEDIANCENTRE; 107 | else 108 | error('Invalid colour centre computation option'); 109 | end 110 | 111 | [rows, cols, chan] = size(im); 112 | if chan ~= 3 113 | error('Image must be colour'); 114 | end 115 | 116 | % Convert image to L*a*b* colourspace. This gives us a colourspace that is 117 | % nominally perceptually uniform. This allows us to use the euclidean 118 | % distance between colour coordinates to measure differences between 119 | % colours. Note the image becomes double after conversion. We may want to 120 | % go to signed shorts to save memory. 121 | im = rgb2lab(im); 122 | 123 | % Apply median filtering to colour components if mw has been supplied 124 | % and/or non-zero 125 | if mw 126 | if length(mw) == 1 127 | mw(2) = mw(1); % Use same filtering for L and chrominance 128 | end 129 | for n = 1:3 130 | im(:,:,n) = medfilt2(im(:,:,n), [mw(1) mw(1)]); 131 | end 132 | end 133 | 134 | % Nominal spacing between grid elements assuming hexagonal grid 135 | S = sqrt(rows*cols / (k * sqrt(3)/2)); 136 | 137 | % Get nodes per row allowing a half column margin at one end that alternates 138 | % from row to row 139 | nodeCols = round(cols/S - 0.5); 140 | % Given an integer number of nodes per row recompute S 141 | S = cols/(nodeCols + 0.5); 142 | 143 | % Get number of rows of nodes allowing 0.5 row margin top and bottom 144 | nodeRows = round(rows/(sqrt(3)/2*S)); 145 | vSpacing = rows/nodeRows; 146 | 147 | % Recompute k 148 | k = nodeRows * nodeCols; 149 | 150 | % Allocate memory and initialise clusters, labels and distances. 151 | C = zeros(6,k); % Cluster centre data 1:3 is mean Lab value, 152 | % 4:5 is row, col of centre, 6 is No of pixels 153 | l = -ones(rows, cols); % Pixel labels. 154 | d = inf(rows, cols); % Pixel distances from cluster centres. 155 | 156 | % Initialise clusters on a hexagonal grid 157 | kk = 1; 158 | r = vSpacing/2; 159 | 160 | for ri = 1:nodeRows 161 | % Following code alternates the starting column for each row of grid 162 | % points to obtain a hexagonal pattern. Note S and vSpacing are kept 163 | % as doubles to prevent errors accumulating across the grid. 164 | if mod(ri,2), c = S/2; else, c = S; end 165 | 166 | for ci = 1:nodeCols 167 | cc = round(c); rr = round(r); 168 | C(1:5, kk) = [squeeze(im(rr,cc,:)); cc; rr]; 169 | c = c+S; 170 | kk = kk+1; 171 | end 172 | 173 | r = r+vSpacing; 174 | end 175 | 176 | % Now perform the clustering. 10 iterations is suggested but I suspect n 177 | % could be as small as 2 or even 1 178 | S = round(S); % We need S to be an integer from now on 179 | 180 | for n = 1:nItr 181 | for kk = 1:k % for each cluster 182 | 183 | % Get subimage around cluster 184 | rmin = max(C(5,kk)-S, 1); rmax = min(C(5,kk)+S, rows); 185 | cmin = max(C(4,kk)-S, 1); cmax = min(C(4,kk)+S, cols); 186 | subim = im(rmin:rmax, cmin:cmax, :); 187 | assert(numel(subim) > 0) 188 | 189 | % Compute distances D between C(:,kk) and subimage 190 | if USEDIST 191 | D = dist(C(:, kk), subim, rmin, cmin, S, m); 192 | else 193 | D = dist2(C(:, kk), subim, rmin, cmin, S, m, eim, We); 194 | end 195 | 196 | % If any pixel distance from the cluster centre is less than its 197 | % previous value update its distance and label 198 | subd = d(rmin:rmax, cmin:cmax); 199 | subl = l(rmin:rmax, cmin:cmax); 200 | updateMask = D < subd; 201 | subd(updateMask) = D(updateMask); 202 | subl(updateMask) = kk; 203 | 204 | d(rmin:rmax, cmin:cmax) = subd; 205 | l(rmin:rmax, cmin:cmax) = subl; 206 | end 207 | 208 | % Update cluster centres with mean values 209 | C(:) = 0; 210 | for r = 1:rows 211 | for c = 1:cols 212 | tmp = [im(r,c,1); im(r,c,2); im(r,c,3); c; r; 1]; 213 | C(:, l(r,c)) = C(:, l(r,c)) + tmp; 214 | end 215 | end 216 | 217 | % Divide by number of pixels in each superpixel to get mean values 218 | for kk = 1:k 219 | C(1:5,kk) = round(C(1:5,kk)/C(6,kk)); 220 | end 221 | 222 | % Note the residual error, E, is not calculated because we are using a 223 | % fixed number of iterations 224 | end 225 | 226 | % Cleanup small orphaned regions and 'spurs' on each region using 227 | % morphological opening on each labeled region. The cleaned up regions are 228 | % assigned to the nearest cluster. The regions are renumbered and the 229 | % adjacency matrix regenerated. This is needed because the cleanup is 230 | % likely to change the number of labeled regions. 231 | % [l, Am] = mcleanupregions(l, seRadius); 232 | Am = l; 233 | 234 | % Recompute the final superpixel attributes and write information into 235 | % the Sp struct array. 236 | N = length(Am); 237 | Sp = struct('L', cell(1,N), 'a', cell(1,N), 'b', cell(1,N), ... 238 | 'stdL', cell(1,N), 'stda', cell(1,N), 'stdb', cell(1,N), ... 239 | 'r', cell(1,N), 'c', cell(1,N), 'N', cell(1,N)); 240 | [X,Y] = meshgrid(1:cols, 1:rows); 241 | L = im(:,:,1); 242 | A = im(:,:,2); 243 | B = im(:,:,3); 244 | for n = 1:N 245 | mask = l==n; 246 | nm = sum(mask(:)); 247 | if centre == MEANCENTRE 248 | Sp(n).L = sum(L(mask))/nm; 249 | Sp(n).a = sum(A(mask))/nm; 250 | Sp(n).b = sum(B(mask))/nm; 251 | 252 | elseif centre == MEDIANCENTRE 253 | Sp(n).L = median(L(mask)); 254 | Sp(n).a = median(A(mask)); 255 | Sp(n).b = median(B(mask)); 256 | end 257 | 258 | Sp(n).r = sum(Y(mask))/nm; 259 | Sp(n).c = sum(X(mask))/nm; 260 | 261 | % Compute standard deviations of the colour components of each super 262 | % pixel. This can be used by code seeking to merge superpixels into 263 | % image segments. Note these are calculated relative to the mean colour 264 | % component irrespective of the centre being calculated from the mean or 265 | % median colour component values. 266 | Sp(n).stdL = std(L(mask)); 267 | Sp(n).stda = std(A(mask)); 268 | Sp(n).stdb = std(B(mask)); 269 | 270 | Sp(n).N = nm; % Record number of pixels in superpixel too. 271 | end 272 | 273 | %-- dist ------------------------------------------- 274 | % 275 | % Usage: D = dist(C, im, r1, c1, S, m) 276 | % 277 | % Arguments: C - Cluster being considered 278 | % im - sub-image surrounding cluster centre 279 | % r1, c1 - row and column of top left corner of sub image within the 280 | % overall image. 281 | % S - grid spacing 282 | % m - weighting factor between colour and spatial differences. 283 | % 284 | % Returns: D - Distance image giving distance of every pixel in the 285 | % subimage from the cluster centre 286 | % 287 | % Distance = sqrt( dc^2 + (ds/S)^2*m^2 ) 288 | % where: 289 | % dc = sqrt(dl^2 + da^2 + db^2) % Colour distance 290 | % ds = sqrt(dx^2 + dy^2) % Spatial distance 291 | % 292 | % m is a weighting factor representing the nominal maximum colour distance 293 | % expected so that one can rank colour similarity relative to distance 294 | % similarity. try m in the range [1-40] for L*a*b* space 295 | % 296 | % ?? Might be worth trying the Geometric Mean instead ?? 297 | % Distance = sqrt(dc * ds) 298 | % but having a factor 'm' to play with is probably handy 299 | 300 | % This code could be more efficient 301 | 302 | function D = dist(C, im, r1, c1, S, m) 303 | 304 | % Squared spatial distance 305 | % ds is a fixed 'image' we should be able to exploit this 306 | % and use a fixed meshgrid for much of the time somehow... 307 | [rows, cols, chan] = size(im); 308 | [x,y] = meshgrid(c1:(c1+cols-1), r1:(r1+rows-1)); 309 | x = x-C(4); % x and y dist from cluster centre 310 | y = y-C(5); 311 | ds2 = x.^2 + y.^2; 312 | 313 | % Squared colour difference 314 | for n = 1:3 315 | im(:,:,n) = (im(:,:,n)-C(n)).^2; 316 | end 317 | dc2 = sum(im,3); 318 | 319 | D = sqrt(dc2 + ds2/S^2*m^2); 320 | 321 | 322 | 323 | %--- dist2 ------------------------------------------ 324 | % 325 | % Usage: D = dist2(C, im, r1, c1, S, m, eim) 326 | % 327 | % Arguments: C - Cluster being considered 328 | % im - sub-image surrounding cluster centre 329 | % r1, c1 - row and column of top left corner of sub image within the 330 | % overall image. 331 | % S - grid spacing 332 | % m - weighting factor between colour and spatial differences. 333 | % eim - Edge strength sub-image corresponding to im 334 | % 335 | % Returns: D - Distance image giving distance of every pixel in the 336 | % subimage from the cluster centre 337 | % 338 | % Distance = sqrt( dc^2 + (ds/S)^2*m^2 ) 339 | % where: 340 | % dc = sqrt(dl^2 + da^2 + db^2) % Colour distance 341 | % ds = sqrt(dx^2 + dy^2) % Spatial distance 342 | % 343 | % m is a weighting factor representing the nominal maximum colour distance 344 | % expected so that one can rank colour similarity relative to distance 345 | % similarity. try m in the range [1-40] for L*a*b* space 346 | % 347 | 348 | function D = dist2(C, im, r1, c1, S, m, eim, We) 349 | 350 | % Squared spatial distance 351 | % ds is a fixed 'image' we should be able to exploit this 352 | % and use a fixed meshgrid for much of the time somehow... 353 | [rows, cols, chan] = size(im); 354 | [x,y] = meshgrid(c1:(c1+cols-1), r1:(r1+rows-1)); 355 | x = x-C(4); 356 | y = y-C(5); 357 | ds2 = x.^2 + y.^2; 358 | 359 | % Squared colour difference 360 | for n = 1:3 361 | im(:,:,n) = (im(:,:,n)-C(n)).^2; 362 | end 363 | dc2 = sum(im,3); 364 | 365 | % Combine colour and spatial distance measure 366 | D = sqrt(dc2 + ds2/S^2*m^2); 367 | 368 | % for every pixel in the subimage call improfile to the cluster centre 369 | % and use the largest value as the 'edge distance' 370 | rCentre = C(5)-r1; % Cluster centre coords relative to this sub-image 371 | cCentre = C(4)-c1; 372 | de = zeros(rows,cols); 373 | for r = 1:rows 374 | for c = 1:cols 375 | v = improfile(eim,[c cCentre], [r rCentre]); 376 | de(r,c) = max(v); 377 | end 378 | end 379 | 380 | % Combine edge distance with weight, We with total Distance. 381 | D = D + We * de; 382 | -------------------------------------------------------------------------------- /code/code/style_transfer.m: -------------------------------------------------------------------------------- 1 | function [Oi Og]=style_transfer(I,M,N,n_iterations) 2 | % transfer style from black and white images 3 | % input: 4 | % -I is the input image, M the model 5 | % -N is the number reference intensities used for the LLF 6 | % -n_iterations is the number of iterations of the transfer 7 | % output: 8 | % -Og is the output finihing with a gradient transfer 9 | % -Oi is the output finishing with an intensity transfer 10 | if nargin<4 11 | n_iterations=5; 12 | end 13 | if nargin<3 14 | N=20; 15 | end 16 | GM=sqrt((M(1:end-1,1:end-1)-M(2:end,1:end-1)).^2+(M(1:end-1,1:end-1)-M(1:end-1,2:end)).^2); 17 | f=get_transfer_function(I,M); 18 | Oi=apply_tranfer_function(I,f); 19 | for t=1:n_iterations 20 | fprintf('iteration %i ...\n',t); 21 | GI=sqrt((Oi(1:end-1,1:end-1)-Oi(2:end,1:end-1)).^2+(Oi(1:end-1,1:end-1)-Oi(1:end-1,2:end)).^2); 22 | f=get_transfer_function(GI,GM); 23 | Og=llf_discrete(Oi,f,N); 24 | Oi=(Og-min(Og(:)))./(max(Og(:))-min(Og(:))); 25 | f=get_transfer_function(Oi,M); 26 | Oi=apply_tranfer_function(Oi,f); 27 | end 28 | O=I; 29 | end 30 | -------------------------------------------------------------------------------- /code/code/sup2pixel.cpp: -------------------------------------------------------------------------------- 1 | #include "mex.h" 2 | using namespace std; 3 | 4 | 5 | void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) 6 | { 7 | if (nrhs!=3) mexErrMsgTxt("error :the input number error"); 8 | 9 | double * pixnum = (double* )mxGetPr(prhs[0]); 10 | double * label = (double* )mxGetPr(prhs[1]); 11 | double * sup = (double* )mxGetPr(prhs[2]); 12 | 13 | int pixelN = int( pixnum[0] ); 14 | plhs[0] = mxCreateDoubleMatrix( pixelN, 1, mxREAL ); 15 | double * outlabel=(double *)mxGetPr(plhs[0]); 16 | 17 | for ( int j = 0; j < pixelN; j++ ){ 18 | 19 | outlabel[j] = sup[ int( label[j] ) ]; 20 | 21 | } 22 | } -------------------------------------------------------------------------------- /code/code/sup2pixel.mexw32: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/sup2pixel.mexw32 -------------------------------------------------------------------------------- /code/code/sup2pixel.mexw64: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/sup2pixel.mexw64 -------------------------------------------------------------------------------- /code/code/te.xls: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/code/code/te.xls -------------------------------------------------------------------------------- /code/code/upsample.m: -------------------------------------------------------------------------------- 1 | % Upsampling procedure. 2 | % 3 | % Argments: 4 | % 'I': image 5 | % 'filter': 2D separable upsampling filter 6 | % parent subwindow indices 'subwindow', given as [r1 r2 c1 c2] 7 | % 8 | % tom.mertens@gmail.com, August 2007 9 | % sam.hasinoff@gmail.com, March 2011 [handle subwindows, reweighted boundaries] 10 | % 11 | 12 | function R = upsample(I, filter, subwindow) 13 | 14 | % increase size to match dimensions of the parent subwindow, 15 | % about 2x in each dimension 16 | r = subwindow(2) - subwindow(1) + 1; 17 | c = subwindow(4) - subwindow(3) + 1; 18 | k = size(I,3); 19 | reven = mod(subwindow(1),2)==0; 20 | ceven = mod(subwindow(3),2)==0; 21 | 22 | border_mode = 'reweighted'; 23 | %border_mode = 'symmetric'; 24 | 25 | switch border_mode 26 | case 'reweighted' 27 | % interpolate, convolve with 2D separable filter 28 | R = zeros(r,c,k); 29 | R(1+reven:2:r, 1+ceven:2:c, :) = I; 30 | R = imfilter(R,filter); 31 | 32 | % reweight, brute force weights from 1's in valid image positions 33 | Z = zeros(r,c,k); 34 | Z(1+reven:2:r, 1+ceven:2:c, :) = 1; 35 | Z = imfilter(Z,filter); 36 | R = R./Z; 37 | 38 | otherwise 39 | % increase resolution 40 | I = padarray(I,[1 1 0],'replicate'); % pad the image with a 1-pixel border 41 | R = zeros(r+4,c+4,k); 42 | R(1+reven:2:end, 1+ceven:2:end, :) = 4*I; 43 | 44 | % interpolate, convolve with 2D separable filter 45 | R = imfilter(R,filter,border_mode); 46 | 47 | % remove the border 48 | R = R(3:end-2, 3:end-2, :); 49 | end 50 | end -------------------------------------------------------------------------------- /editing doc.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/editing doc.pdf -------------------------------------------------------------------------------- /object recognition of automated driving system (1).docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/sharath573/Object-Recognition-for-Autonomous-Driving-System-MATLAB-project/b56cf5a392423f4b0a0e47d69f0f47d92cd5d2dd/object recognition of automated driving system (1).docx --------------------------------------------------------------------------------