├── Datasets ├── Indian_Pines-40.mat ├── Indian_pines.mat └── Indian_pines_gt.mat ├── README.md ├── RLFFC ├── FOForth.m ├── LP.m ├── RLFFC.m ├── constructW_PKN.m └── get_data.m ├── cal_OA ├── KNN_Classifier │ └── KNN_Classifier.m ├── LDA_Classifier │ └── LDA_Classifier.m ├── SVM_Classifier │ └── SVM_Classifier.m ├── randdivide.m └── test_bs_accu.m └── demo.m /Datasets/Indian_Pines-40.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WangJun2023/RLFFC/304ce943382592cca753146e61a7708f0a532e9d/Datasets/Indian_Pines-40.mat -------------------------------------------------------------------------------- /Datasets/Indian_pines.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WangJun2023/RLFFC/304ce943382592cca753146e61a7708f0a532e9d/Datasets/Indian_pines.mat -------------------------------------------------------------------------------- /Datasets/Indian_pines_gt.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WangJun2023/RLFFC/304ce943382592cca753146e61a7708f0a532e9d/Datasets/Indian_pines_gt.mat -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | This is the source code for "Hyperspectral Band Selection via Region-aware Latent Features Fusion Based Clustering", Information Fusion 2022. 2 | 3 | The superpixel segmentation code can refer to RLPA. 4 | More datasets can be generated from the GRSC or downloaded from 百度网盘 5 | 提取码:vxr3 6 | 7 | If you have any question about the code, please send email to wang_jun@nudt.edu.cn 8 | 9 | If you think this code is helpful to you, please cite our paper, thanks!\ 10 | @article{wang2022hyperspectral, \ 11 | title={Hyperspectral band selection via region-aware latent features fusion based clustering}, \ 12 | author={Wang, Jun and Tang, Chang and Li, Zhenglai and Liu, Xinwang and Zhang, Wei and Zhu, En and Wang, Lizhe},\ 13 | journal={Information Fusion},\ 14 | volume={79},\ 15 | pages={162--173},\ 16 | year={2022},\ 17 | publisher={Elsevier}\ 18 | } 19 | -------------------------------------------------------------------------------- /RLFFC/FOForth.m: -------------------------------------------------------------------------------- 1 | function [X,Out] = FOForth(X, G, fun, opts, varargin) 2 | % using a gradient reflection (or projection) method to solve 3 | % 4 | % min f(X):= E(X)+trace(G'X) 5 | % s. t. X'X = I, where X in R^{n*p}. 6 | % 7 | % Assume gradient of E(X)=H(X)X, where H(X) is a n-by-n symmetric matrix. 8 | % ---------------------------------- 9 | % Input: 10 | % X --- n-by-p initial point such that X'X=I 11 | % G --- n-by-p matrix 12 | % fun --- a matlab function for f(X) 13 | % call: [funX,F] = fun(X,data1,data2); 14 | % funX: function value f(X) 15 | % F: gradient of f(X) 16 | % data: extra data (can be more) 17 | % varargin --- data1, data2 18 | % 19 | % Calling syntax: 20 | % [X, out]= FOForth(X0,G,@fun,opts,data1,data2); 21 | % 22 | % opts --- option structure with fields: 23 | % solver: 1(gradient reflection method) 2(projection method) 3(QR retraction) 24 | % stepsize: 0(ABB stepsize) o.w.(fixed stepsize) 25 | % xtol: stop control for ||X_k - X_{k+1}||/sqr(n) 26 | % gtol: stop control for ||kkt||/||kkt0|| 27 | % ftol: stop control for ||f_k -f_{k+1}||/(|f_k|+ 1) 28 | % maxit: max iteration 29 | % info: 0(no print) o.w.(print) 30 | % 31 | % Output: 32 | % X --- solution 33 | % Out --- output information 34 | % kkt: ||kkt|| (first-order optimality condition) 35 | % fval: function value of solution 36 | % feaX: ||I-X'X||_F (feasiblity violation) 37 | % xerr: ||X_k - X_{k+1}||/sqr(n) 38 | % iter: total iteration number 39 | % fvals: history of function value 40 | % kkts: history of kkt 41 | % message: convergence message 42 | % -------------------------------------------------------------------- 43 | % Reference: 44 | % B. Gao, X. Liu, X. Chen and Y. Yuan 45 | % A new first-order algorithmic framework for optimization problems with 46 | % orthogonality constraints, SIAM Journal on Optimization, 28 (2018), pp.302--332. 47 | % ---------------------------------- 48 | % Author: Bin Gao, Xin Liu (ICMSEC, AMSS, CAS) 49 | % gaobin@lsec.cc.ac.cn 50 | % liuxin@lsec.cc.ac.cn 51 | % Version: 1.0 --- 2016/04/01 52 | % Version: 1.1 --- 2017/10/16: support general function 53 | % -------------------------------------------------------------------- 54 | %% default setting 55 | if nargin < 4;opts=[];end 56 | 57 | if isempty(X) 58 | error('Input X is an empty matrix'); 59 | else 60 | [n, p] = size(X); 61 | end 62 | 63 | if isempty(G);G = zeros(n,p);end 64 | 65 | if isfield(opts, 'solver') 66 | if all(opts.solver ~= 1:3) 67 | opts.solver = 1; 68 | end 69 | else 70 | opts.solver = 1; 71 | end 72 | 73 | if isfield(opts, 'stepsize') 74 | if opts.stepsize < 0 75 | opts.stepsize = 0; 76 | end 77 | else 78 | opts.stepsize = 0; 79 | end 80 | 81 | if isfield(opts, 'xtol') 82 | if opts.xtol < 0 || opts.xtol > 1 83 | opts.xtol = 1e-8; 84 | end 85 | else 86 | opts.xtol = 1e-8; 87 | end 88 | 89 | if isfield(opts, 'gtol') 90 | if opts.gtol < 0 || opts.gtol > 1 91 | opts.gtol = 1e-5; 92 | end 93 | else 94 | opts.gtol = 1e-5; 95 | end 96 | 97 | if isfield(opts, 'ftol') 98 | if opts.ftol < 0 || opts.ftol > 1 99 | opts.ftol = 1e-10; 100 | end 101 | else 102 | opts.ftol = 1e-10; 103 | end 104 | 105 | if isfield(opts, 'maxit') 106 | if opts.maxit < 0 || opts.maxit > 10000 107 | opts.maxit = 1000; 108 | end 109 | else 110 | opts.maxit = 1000; 111 | end 112 | 113 | if ~isfield(opts, 'info');opts.info = 0;end 114 | %% --------------------------------------------------------------- 115 | % copy parameters 116 | solver = opts.solver; 117 | stepsize = opts.stepsize; 118 | xtol = opts.xtol; 119 | gtol = opts.gtol; 120 | ftol = opts.ftol; 121 | maxit = opts.maxit; 122 | info = opts.info; 123 | 124 | global Ip 125 | Ip = eye(p); 126 | 127 | % successive infomation (successive T iterations) 128 | T = 5; Terr = zeros(T,2); 129 | 130 | %% --------------------------------------------------------------- 131 | % Initialization 132 | iter = 0; Out.fvals = []; Out.kkts = []; 133 | % ensure X is orthogonal 134 | if norm(X'*X-Ip,'fro')>1e-13; [X,~] = qr(X,0); end 135 | % evaluate function and gradient info. 136 | [funX, F] = feval(fun, X , varargin{:}); 137 | [PF,kkt0,feaX] = getPG(X,F); 138 | % save history 139 | Out.fvals(1) = funX; Out.kkts(1) = kkt0; 140 | 141 | % initial stepsize 142 | if stepsize == 0 143 | tau = max(0.1,min(0.01*kkt0,1)); 144 | else 145 | tau = stepsize; 146 | end 147 | 148 | % initial solver 149 | switch solver 150 | case 1; mainsolver = @gradient_reflection; 151 | case 2; mainsolver = @projection; 152 | case 3; mainsolver = @QRretraction; 153 | end 154 | 155 | % info 156 | if info ~= 0 157 | switch solver 158 | case 1 159 | % fprintf('------------------ FOForth with gradient reflection start ------------------\n'); 160 | case 2 161 | % fprintf('------------------ FOForth with projection start ------------------\n'); 162 | case 3 163 | % fprintf('------------------ Riemannian Opt with QR retraction start ------------------\n'); 164 | end 165 | % fprintf('%4s | %15s | %10s | %10s | %8s | %8s\n', 'Iter ', 'F(X) ', 'KKT ', 'Xerr ', 'Feasi ', 'tau'); 166 | % fprintf('%d \t %f \t %3.2e \t %3.2e \t %3.2e \t %3.2e\n',iter, funX, kkt0, 0, feaX, tau); 167 | end 168 | 169 | %% --------------------------------------------------------------- 170 | % main iteration 171 | for iter = 1:maxit 172 | Xk = X; Fk = F; PFk = PF; funXk = funX; 173 | 174 | % ---------- PART I: gradient step ---------- 175 | % Riemmanian or Euclidean gradient 176 | if solver == 3 177 | Grad = PFk; 178 | else 179 | Grad = Fk; 180 | end 181 | 182 | % gradient step 183 | V = Xk - tau * Grad; 184 | X = mainsolver(Xk,V); 185 | 186 | % ---------- PART II: symmetrization step ---------- 187 | if feaX>1e-12; [X,~] = qr(X,0); end 188 | if solver ~= 3 189 | if G ~= 0 190 | [tu,~,tv] = svd(X'*G,0); 191 | X = -X*(tu*tv'); 192 | end 193 | end 194 | 195 | % ------------ evaluate error ------------ 196 | [funX, F] = feval(fun, X , varargin{:}); 197 | [PF,kkt,feaX] = getPG(X,F); 198 | Out.fvals(iter+1) = funX; Out.kkts(iter+1) = kkt; 199 | 200 | xerr = norm(Xk - X,'fro')/sqrt(n); 201 | ferr = abs(funXk - funX)/(abs(funXk)+1); 202 | 203 | % successive error 204 | Terr(2:T,:) = Terr(1:(T-1),:); Terr(1,:) = [xerr, ferr]'; 205 | merr = mean(Terr(1:min(iter,T),:)); 206 | % info 207 | if info ~= 0 && (mod(iter,15) == 0 ) 208 | % fprintf('%d \t %f \t %3.2e \t %3.2e \t %3.2e \t %3.2e\n',iter, funX, kkt, xerr, feaX, tau); 209 | end 210 | 211 | % ------------ update ABB stepsize ------------ 212 | if stepsize == 0 213 | Sk = X - Xk; 214 | Vk = PF - PFk; %Vk = F - Fk; 215 | SV = sum(sum(Sk.*Vk)); 216 | if mod(iter+1,2) == 0 217 | tau = abs(SV)/sum(sum(Vk.*Vk)); % SBB for odd 218 | else 219 | tau = sum(sum(Sk.*Sk))/abs(SV); % LBB for even 220 | end 221 | tau = max(min(tau, 1e10), 1e-10); 222 | end 223 | 224 | % ------------------ stop criteria -------------------- 225 | % if kkt/kkt0 < gtol 226 | % if kkt/kkt0 < gtol || (xerr < xtol || ferr < ftol) 227 | % if kkt/kkt0 < gtol || (xerr < xtol && ferr < ftol) 228 | % if (kkt/kkt0 < gtol && xerr < xtol) || ferr < ftol 229 | % if (kkt/kkt0 < gtol && ferr < ftol) || xerr < xtol 230 | if kkt/kkt0 < gtol || (xerr < xtol && ferr < ftol) || all(merr < 10*[xtol, ftol]) 231 | Out.message = 'converge'; 232 | break; 233 | end 234 | end 235 | 236 | if iter >= opts.maxit 237 | Out.message = 'exceed max iteration'; 238 | end 239 | 240 | Out.feaX = feaX; 241 | Out.fval = funX; 242 | Out.iter = iter; 243 | Out.xerr = xerr; 244 | Out.kkt = kkt; 245 | 246 | if info ~= 0 247 | % fprintf('%s at...\n',Out.message); 248 | % fprintf('%d \t %f \t %3.2e \t %3.2e \t %3.2e \t %3.2e\n',iter, funX, kkt, xerr, feaX, tau); 249 | % fprintf('------------------------------------------------------------------------\n'); 250 | end 251 | 252 | %% --------------------------------------------------------------- 253 | % nest-function 254 | % gradient reflection step 255 | function X = gradient_reflection(X,V) 256 | VV = V'*V; 257 | VX = V'*X; 258 | TVX = VV\VX; 259 | X = -X + V*(2*TVX); 260 | end 261 | 262 | % projection step 263 | function X = projection(~,V) 264 | % approach 1 265 | VV = V'*V; 266 | [Q,D] = eig(VV); 267 | DD = sqrt(D)\Ip; 268 | X = V*(Q*DD*Q'); 269 | 270 | % approach 2 271 | % [UX,~,VX] = svd(V,0); 272 | % X = UX*VX'; 273 | end 274 | 275 | % QR retraction 276 | function X = QRretraction(~,V) 277 | % approach 1 278 | VV = V'*V; 279 | L = chol(VV,'lower'); 280 | X = V*inv(L)'; 281 | 282 | % approach 2 283 | % [X,~] = qr(V,0); 284 | end 285 | 286 | % get projected gradient and its norm 287 | function [PF,normPF,feaX] = getPG(X,F) 288 | PF = F - X*(F'*X); 289 | normPF = norm(PF,'fro'); 290 | feaX = norm(X'*X-Ip,'fro'); 291 | end 292 | 293 | end -------------------------------------------------------------------------------- /RLFFC/LP.m: -------------------------------------------------------------------------------- 1 | 2 | function LH = LP(X) 3 | 4 | v = length(X); % the nuber of segmented regions 5 | n = size(X{1},2); 6 | 7 | for i = 1 :v 8 | X{i} = X{i}'; 9 | for j = 1:n 10 | X{i}(j,:) = ( X{i}(j,:) - mean( X{i}(j,:) ) ) / std( X{i}(j,:) ) ; 11 | end 12 | end 13 | 14 | LH = zeros(n,n,v); 15 | for idx = 1 : v 16 | A0 = constructW_PKN(X{idx}',10); 17 | A0 = A0-diag(diag(A0)); 18 | A10 = (A0+A0')/2; 19 | D10 = diag(1./sqrt(sum(A10, 2))); 20 | LH(:,:,idx) = D10*A10*D10; 21 | end 22 | 23 | end 24 | -------------------------------------------------------------------------------- /RLFFC/RLFFC.m: -------------------------------------------------------------------------------- 1 | function [F,R,gamma,obj] = RLFFC(YP,k,lambda,F_ba,beta,L_ba) 2 | 3 | num = size(YP, 1); % the number of bands 4 | view_num = size(YP, 3); % the number of superpixels 5 | maxIter = 100; % the number of iterations 6 | 7 | %% Initialization gamma, R, Y, F 8 | gamma = ones(view_num,1)/(view_num); 9 | 10 | R = zeros(k,k,view_num); 11 | for v = 1 : view_num 12 | R(:,:,v) = eye(k); 13 | end 14 | 15 | Y_ba = zeros(num,k); 16 | 17 | [F,~] = qr(randn(num,k),0); 18 | 19 | opts = []; opts.info = 1; 20 | opts.gtol = 1e-5; 21 | 22 | flag = 1; 23 | iter = 0; 24 | 25 | %% Iterative Update 26 | while flag 27 | iter = iter + 1; 28 | for v = 1 : view_num 29 | Y_ba = Y_ba + gamma(v)*(YP(:,:,v)*R(:,:,v)); 30 | end 31 | 32 | %% update F 33 | X = F; 34 | A = - beta * L_ba; 35 | G = - Y_ba - lambda * F_ba; 36 | [F,~] = FOForth(X,G,@fun,opts,A,G); %calculate the minimum 37 | 38 | %% update R 39 | for v = 1 : view_num 40 | if gamma(v)>1e-4 41 | temp_matrix = gamma(v)*YP(:,:,v)'*(F); 42 | [Up,Sp,Vp] = svd(temp_matrix,'econ'); 43 | R(:,:,v) = Up*Vp'; 44 | end 45 | end 46 | 47 | %% update gamma 48 | coef = zeros(1,view_num); 49 | for v = 1 : view_num 50 | coef(1,v) = trace((F)'*YP(:,:,v)* R(:,:,v)); 51 | end 52 | gamma = coef/norm(coef,2); 53 | 54 | %% calculate objective function value 55 | temp = zeros(num,k); 56 | for v = 1 : view_num 57 | temp = temp + gamma(v)*(YP(:,:,v)*R(:,:,v)); 58 | end 59 | obj(iter) = trace(F'*temp + lambda * F' * F_ba + beta * F' * L_ba * F); 60 | 61 | %% verify convergence 62 | if (iter>2) && (abs((obj(iter-1)-obj(iter))/(obj(iter-1)))<1e-6 || iter>maxIter) 63 | flag =0; 64 | end 65 | end 66 | 67 | F = F./ repmat(sqrt(sum(F.^2, 2)), 1, k); 68 | 69 | function [funX, F] = fun(X,A,G) 70 | F = A * X + G; 71 | funX = sum(sum(X.* F)); 72 | end 73 | 74 | end 75 | -------------------------------------------------------------------------------- /RLFFC/constructW_PKN.m: -------------------------------------------------------------------------------- 1 | % construct similarity matrix with probabilistic k-nearest neighbors. It is a parameter free, distance consistent similarity. 2 | function W = constructW_PKN(X, k, issymmetric) 3 | % X: each column is a data point 4 | % k: number of neighbors 5 | % issymmetric: set W = (W+W')/2 if issymmetric=1 6 | % W: similarity matrix 7 | 8 | if nargin < 3 9 | issymmetric = 1; 10 | end; 11 | if nargin < 2 12 | k = 5; 13 | end; 14 | 15 | [dim, n] = size(X); 16 | D = L2_distance_1(X, X); 17 | [dumb, idx] = sort(D, 2); % sort each row 18 | 19 | W = zeros(n); 20 | for i = 1:n 21 | id = idx(i,2:k+2); 22 | di = D(i, id); 23 | W(i,id) = (di(k+1)-di)/(k*di(k+1)-sum(di(1:k))+eps); 24 | end; 25 | 26 | if issymmetric == 1 27 | W = (W+W')/2; 28 | end; 29 | 30 | 31 | 32 | 33 | % compute squared Euclidean distance 34 | % ||A-B||^2 = ||A||^2 + ||B||^2 - 2*A'*B 35 | function d = L2_distance_1(a,b) 36 | % a,b: two matrices. each column is a data 37 | % d: distance matrix of a and b 38 | 39 | 40 | 41 | if (size(a,1) == 1) 42 | a = [a; zeros(1,size(a,2))]; 43 | b = [b; zeros(1,size(b,2))]; 44 | end 45 | 46 | aa=sum(a.*a); bb=sum(b.*b); ab=a'*b; 47 | d = repmat(aa',[1 size(bb,2)]) + repmat(bb,[size(aa,2) 1]) - 2*ab; 48 | 49 | d = real(d); 50 | d = max(d,0); 51 | 52 | % % force 0 on the diagonal? 53 | % if (df==1) 54 | % d = d.*(1-eye(size(d))); 55 | % end 56 | 57 | 58 | 59 | 60 | 61 | -------------------------------------------------------------------------------- /RLFFC/get_data.m: -------------------------------------------------------------------------------- 1 | function [Dataset] = get_data(dataset_name) 2 | %% import the dataset 3 | switch dataset_name 4 | case 'Indian_Pines' 5 | A = importdata('Indian_pines.mat'); 6 | ground_truth = importdata('Indian_pines_gt.mat'); 7 | case 'Salinas' 8 | A = importdata('Salinas.mat'); 9 | ground_truth = importdata('Salinas_gt.mat'); 10 | case 'Pavia_University' 11 | A = importdata('PaviaU.mat'); 12 | ground_truth = importdata('PaviaU_gt.mat'); 13 | case 'KSC' 14 | A = importdata('KSC.mat'); 15 | ground_truth = importdata('KSC_gt.mat'); 16 | case 'Botswana' 17 | A = importdata('Botswana.mat'); 18 | ground_truth = importdata('Botswana_gt.mat'); 19 | end 20 | %% definition and initialization 21 | A = double(A); 22 | minv = min(A(:)); 23 | maxv = max(A(:)); 24 | A = double(A - minv) / double(maxv - minv); 25 | 26 | %% Generalize the output 27 | X = permute(A, [3, 1, 2]); 28 | X = X(:, :); 29 | Dataset.X = X; 30 | Dataset.A = A; 31 | Dataset.ground_truth = ground_truth; 32 | end -------------------------------------------------------------------------------- /cal_OA/KNN_Classifier/KNN_Classifier.m: -------------------------------------------------------------------------------- 1 | function [OA,MA,Kappa,test_SL,predict_label] = KNN_Classifier(Dataset, band_set) 2 | 3 | [train_X,train_labels,test_X,test_labels,test_SL] = randdivide(Dataset); 4 | test_size = size(test_labels, 1); 5 | C = max(test_labels); 6 | bs_train_X = train_X(:, band_set); 7 | bs_test_X = test_X(:, band_set); 8 | 9 | mdl = fitcknn(bs_train_X, train_labels, 'NumNeighbors',5, 'Standardize',1); 10 | predict_label = predict(mdl,bs_test_X); 11 | OA = 0; 12 | cmat = confusionmat(test_labels, predict_label); 13 | for i = 1 : size(predict_label, 1) 14 | if predict_label(i) == test_labels(i) 15 | OA = OA + 1; 16 | end 17 | end 18 | OA = OA / size(predict_label, 1); 19 | sum_accu = 0; 20 | for i = 1 : C 21 | sum_accu = sum_accu + cmat(i, i) / sum(cmat(i, :), 2); 22 | end 23 | MA = sum_accu / C; 24 | Pe = 0; 25 | for i = 1 : C 26 | Pe = Pe + cmat(i, :) * cmat(:, i); 27 | end 28 | Pe = Pe / (test_size*test_size); 29 | Kappa = (OA - Pe) / (1 - Pe); 30 | 31 | end 32 | 33 | -------------------------------------------------------------------------------- /cal_OA/LDA_Classifier/LDA_Classifier.m: -------------------------------------------------------------------------------- 1 | function [OA,MA,Kappa,test_SL,predict_label] = LDA_Classifier(Dataset, band_set) 2 | 3 | [train_X,train_labels,test_X,test_labels,test_SL] = randdivide(Dataset); 4 | test_size = size(test_labels, 1); 5 | C = max(test_labels); 6 | bs_train_X = train_X(:, band_set); 7 | bs_test_X = test_X(:, band_set); 8 | 9 | 10 | factor = fitcdiscr(bs_train_X, train_labels); 11 | predict_label = double(factor.predict(bs_test_X)); 12 | cmat = confusionmat(test_labels, predict_label); 13 | OA = length(find(predict_label == test_labels)) / length(test_labels); 14 | sum_accu = 0; 15 | for i = 1 : C 16 | sum_accu = sum_accu + cmat(i, i) / sum(cmat(i, :), 2); 17 | end 18 | MA = sum_accu / C; 19 | Pe = 0; 20 | for i = 1 : C 21 | Pe = Pe + cmat(i, :) * cmat(:, i); 22 | end 23 | Pe = Pe / (test_size*test_size); 24 | Kappa = (OA - Pe) / (1 - Pe); 25 | end 26 | 27 | -------------------------------------------------------------------------------- /cal_OA/SVM_Classifier/SVM_Classifier.m: -------------------------------------------------------------------------------- 1 | function [OA,MA,Kappa,test_SL,predict_labels] = SVM_Classifier(Dataset, band_set) 2 | 3 | [train_X,train_labels,test_X,test_labels,test_SL] = randdivide(Dataset); 4 | test_size = size(test_labels, 1); 5 | C = max(test_labels); 6 | bs_train_X = train_X(:, band_set); 7 | bs_test_X = test_X(:, band_set); 8 | 9 | 10 | model = svmtrain(train_labels, bs_train_X, Dataset.svm_para); 11 | [predict_labels, corrected_num, ~] = svmpredict(test_labels, bs_test_X, model, '-q'); 12 | OA = corrected_num(1) / 100; 13 | cmat = confusionmat(test_labels, predict_labels); 14 | sum_accu = 0; 15 | for i = 1 : C 16 | sum_accu = sum_accu + cmat(i, i) / sum(cmat(i, :), 2); 17 | end 18 | MA = sum_accu / C; 19 | Pe = 0; 20 | for i = 1 : C 21 | Pe = Pe + cmat(i, :) * cmat(:, i); 22 | end 23 | Pe = Pe / (test_size * test_size); 24 | Kappa = (OA - Pe) / (1 - Pe); 25 | end 26 | 27 | -------------------------------------------------------------------------------- /cal_OA/randdivide.m: -------------------------------------------------------------------------------- 1 | function [train_X,train_labels,test_X,test_labels,test_SL] = randdivide(Dataset) 2 | ground_truth = Dataset.ground_truth; 3 | A = Dataset.A; 4 | train_ratio = Dataset.train_ratio; 5 | num_classes = max(max(ground_truth)); 6 | pixel_pos = cell(1, num_classes); 7 | [M, N, ~] = size(A); 8 | for i = 1:M 9 | for j = 1:N 10 | if ground_truth(i, j) ~= 0 11 | pixel_pos{ground_truth(i, j)} = [pixel_pos{ground_truth(i, j)}; [i j]]; 12 | end 13 | end 14 | end 15 | 16 | train_X = []; 17 | test_X = []; 18 | train_labels = []; 19 | test_labels = []; 20 | test_pos = []; 21 | row_rank = cell(num_classes, 1); 22 | for i = 1:num_classes 23 | pos_mat = pixel_pos{i}; 24 | row_rank{i} = randperm(size(pos_mat, 1)); 25 | pos_mat = pos_mat(row_rank{i}, :); 26 | 27 | [m1, ~] = size(pos_mat); 28 | for j = 1 : floor(m1 * train_ratio) 29 | temp = A(pos_mat(j, 1), pos_mat(j, 2), :); 30 | train_X = [train_X temp(:)]; 31 | train_labels = [train_labels;i]; 32 | end 33 | end 34 | 35 | for i = 1: num_classes 36 | pos_mat = pixel_pos{i}; 37 | pos_mat = pos_mat(row_rank{i}, :); 38 | [m1, ~] = size(pos_mat); 39 | for j = floor(m1 * train_ratio) + 1 : m1 40 | temp = A(pos_mat(j, 1), pos_mat(j, 2), :); 41 | test_X = [test_X temp(:)]; 42 | test_labels = [test_labels;i]; 43 | test_pos = [test_pos; (pos_mat(j, 2)-1) * M + pos_mat(j, 1)]; 44 | end 45 | end 46 | 47 | train_X = train_X'; 48 | test_X = test_X'; 49 | test_SL(1,:) = test_pos; 50 | test_SL(2,:) = test_labels; 51 | train_labels = double(train_labels); 52 | test_labels = double(test_labels); 53 | end 54 | 55 | -------------------------------------------------------------------------------- /cal_OA/test_bs_accu.m: -------------------------------------------------------------------------------- 1 | function [accu,Classify_map] = test_bs_accu(band_set, Dataset, classifier_type) 2 | warning('off'); 3 | for iter = 1 : 10 4 | switch(classifier_type) 5 | case 'SVM' 6 | [OA(iter),MA(iter),Kappa(iter),test_SL,predict_label] = SVM_Classifier(Dataset, band_set); 7 | case 'KNN' 8 | [OA(iter),MA(iter),Kappa(iter),test_SL,predict_label] = KNN_Classifier(Dataset, band_set); 9 | case 'LDA' 10 | [OA(iter),MA(iter),Kappa(iter),test_SL,predict_label] = LDA_Classifier(Dataset, band_set); 11 | end 12 | end 13 | accu.OA = mean(OA); 14 | accu.MA = mean(MA); 15 | accu.Kappa = mean(Kappa); 16 | 17 | accu.STDOA = std(OA); 18 | accu.STDMA = std(MA); 19 | accu.STDKappa = std(Kappa); 20 | 21 | [M,N,B] = size(Dataset.A); 22 | 23 | Classify_map = Dataset.ground_truth(:); 24 | Classify_map(test_SL(1,:)) = predict_label; 25 | Classify_map = reshape(Classify_map,[M N]); 26 | 27 | end -------------------------------------------------------------------------------- /demo.m: -------------------------------------------------------------------------------- 1 | clear all 2 | clc 3 | dataset_names = {'Indian_Pines', 'Salinas', 'KSC', 'Botswana'}; 4 | classifier_names = {'KNN', 'SVM', 'LDA'}; 5 | svm_para = {'-c 5000.000000 -g 0.500000 -m 500 -t 2 -q',... 6 | '-c 100 -g 16 -m 500 -t 2 -q',... 7 | '-c 10000.000000 -g 16.000000 -m 500 -t 2 -q',... 8 | '-c 10000 -g 0.5 -m 500 -t 2 -q',... 9 | }; 10 | Dataset = get_data(dataset_names{1}); 11 | Dataset.train_ratio = 0.1; 12 | Dataset.svm_para = svm_para{1, 1}; 13 | load('Indian_Pines-40.mat'); 14 | 15 | H = LP(X1); % calculate the laplacian matrix of each segmented region 16 | 17 | d = 5; % the latent feature dimension 18 | 19 | clusternum = 10; % the number of selected bands 20 | 21 | view_num = size(H,3); 22 | num = size(H,1); 23 | L_ba = zeros(num,num); 24 | 25 | for i = 1 : view_num 26 | L_ba = L_ba + (1 / view_num) * H(:,:,i); 27 | end 28 | 29 | H0 = zeros(num,num); 30 | 31 | opt.disp = 0; 32 | for p=1:view_num 33 | H(:,:,p) = (H(:,:,p)+H(:,:,p)')/2; 34 | [Hp, ~] = eigs(H(:,:,p), d, 'la', opt); 35 | H0 = H0 + (1/view_num)*H(:,:,p); 36 | HP(:,:,p) = Hp; 37 | end 38 | [Y_ba,~] = eigs(H0, d, 'la', opt); 39 | lambda = 2.^-15; 40 | beta = 0.1; 41 | %lambda = 2.^[-15:1:15]; 42 | %beta = 0.01:0.01:0.1; 43 | 44 | [F,~,~,~] = RLFFC(HP,d,lambda,Y_ba,beta,L_ba); 45 | for k = 1 : length(clusternum) 46 | [IDX, C, SUMD, D] = kmeans(F,clusternum(k),'maxiter',100,'replicates',50,'emptyaction','singleton'); 47 | [~,I] = min(D); % I: the selected band subset 48 | end 49 | [acc,~] = test_bs_accu(I, Dataset, 'KNN'); 50 | 51 | --------------------------------------------------------------------------------