├── Cal_PSNRSSIM.m ├── Data └── data_download.txt ├── Demo_test_noiselevel100.m ├── Model ├── HSID-CNN.prototxt ├── HSID-CNN_Realdata_iter_1000000.caffemodel ├── HSID-CNN_noiseGau.caffemodel ├── HSID-CNN_noiselevel100_iter_600000.caffemodel ├── HSID-CNN_noiselevel25.caffemodel ├── HSID-CNN_noiselevel5.caffemodel ├── HSID-CNN_noiselevel50.caffemodel ├── HSID-CNN_noiselevel75.caffemodel ├── HSID-CNN_noiserand25.caffemodel └── More_Model ├── README.md └── SAM.m /Cal_PSNRSSIM.m: -------------------------------------------------------------------------------- 1 | function [psnr_cur, ssim_cur, cc_cur] = Cal_PSNRSSIM(A,B,row,col) 2 | 3 | [n,m,ch]=size(B); 4 | A = A(row+1:n-row,col+1:m-col,:); 5 | B = B(row+1:n-row,col+1:m-col,:); 6 | A=double(A); % Ground-truth(GT) 7 | B=double(B); % 8 | 9 | temp1=0.0;temp2=0.0;temp3=0.0; 10 | A_mean=mean2(A); 11 | B_mean=mean2(B); 12 | for i=1:n 13 | for j=1:m 14 | % if B(i,j)==0 15 | % continue; 16 | % end 17 | temp1=double(A(i,j)-A_mean)*double(B(i,j)-B_mean)+temp1; 18 | temp2=double(A(i,j)-A_mean)*double(A(i,j)-A_mean)+temp2; 19 | temp3=double(B(i,j)-B_mean)*double(B(i,j)-B_mean)+temp3; 20 | end 21 | end 22 | cc_cur=temp1/sqrt(temp2*temp3); 23 | 24 | 25 | e=A(:)-B(:); 26 | mse=mean(e.^2); 27 | psnr_cur=10*log10(1^2/mse); 28 | 29 | 30 | % result = 0; 31 | % [lWidth, lHeight]=size(A); 32 | % for j=1:lWidth*lHeight 33 | % temp = A(j)-B(j); 34 | % result = result + double(temp*temp); 35 | % end; 36 | % 37 | % if(result==0) 38 | % psnr_cur =100; 39 | % else 40 | % psnr_cur = 10*log10(1.0/result*lWidth*lHeight); 41 | % end 42 | 43 | 44 | if ch==1 45 | [ssim_cur, ~] = ssim_index(A, B); 46 | else 47 | ssim_cur = -1; 48 | end 49 | 50 | 51 | function [mssim, ssim_map] = ssim_index(img1, img2, K, window, L) 52 | 53 | %======================================================================== 54 | %SSIM Index, Version 1.0 55 | %Copyright(c) 2003 Zhou Wang 56 | %All Rights Reserved. 57 | % 58 | %The author is with Howard Hughes Medical Institute, and Laboratory 59 | %for Computational Vision at Center for Neural Science and Courant 60 | %Institute of Mathematical Sciences, New York University. 61 | % 62 | %---------------------------------------------------------------------- 63 | %Permission to use, copy, or modify this software and its documentation 64 | %for educational and research purposes only and without fee is hereby 65 | %granted, provided that this copyright notice and the original authors' 66 | %names appear on all copies and supporting documentation. This program 67 | %shall not be used, rewritten, or adapted as the basis of a commercial 68 | %software or hardware product without first obtaining permission of the 69 | %authors. The authors make no representations about the suitability of 70 | %this software for any purpose. It is provided "as is" without express 71 | %or implied warranty. 72 | %---------------------------------------------------------------------- 73 | % 74 | %This is an implementation of the algorithm for calculating the 75 | %Structural SIMilarity (SSIM) index between two images. Please refer 76 | %to the following paper: 77 | % 78 | %Z. Wang, A. C. Bovik, H. R. Sheikh, and E. P. Simoncelli, "Image 79 | %quality assessment: From error measurement to structural similarity" 80 | %IEEE Transactios on Image Processing, vol. 13, no. 1, Jan. 2004. 81 | % 82 | %Kindly report any suggestions or corrections to zhouwang@ieee.org 83 | % 84 | %---------------------------------------------------------------------- 85 | % 86 | %Input : (1) img1: the first image being compared 87 | % (2) img2: the second image being compared 88 | % (3) K: constants in the SSIM index formula (see the above 89 | % reference). defualt value: K = [0.01 0.03] 90 | % (4) window: local window for statistics (see the above 91 | % reference). default widnow is Gaussian given by 92 | % window = fspecial('gaussian', 11, 1.5); 93 | % (5) L: dynamic range of the images. default: L = 255 94 | % 95 | %Output: (1) mssim: the mean SSIM index value between 2 images. 96 | % If one of the images being compared is regarded as 97 | % perfect quality, then mssim can be considered as the 98 | % quality measure of the other image. 99 | % If img1 = img2, then mssim = 1. 100 | % (2) ssim_map: the SSIM index map of the test image. The map 101 | % has a smaller size than the input images. The actual size: 102 | % size(img1) - size(window) + 1. 103 | % 104 | %Default Usage: 105 | % Given 2 test images img1 and img2, whose dynamic range is 0-255 106 | % 107 | % [mssim ssim_map] = ssim_index(img1, img2); 108 | % 109 | %Advanced Usage: 110 | % User defined parameters. For example 111 | % 112 | % K = [0.05 0.05]; 113 | % window = ones(8); 114 | % L = 100; 115 | % [mssim ssim_map] = ssim_index(img1, img2, K, window, L); 116 | % 117 | %See the results: 118 | % 119 | % mssim %Gives the mssim value 120 | % imshow(max(0, ssim_map).^4) %Shows the SSIM index map 121 | % 122 | %======================================================================== 123 | 124 | 125 | if (nargin < 2 || nargin > 5) 126 | ssim_index = -Inf; 127 | ssim_map = -Inf; 128 | return; 129 | end 130 | 131 | if (size(img1) ~= size(img2)) 132 | ssim_index = -Inf; 133 | ssim_map = -Inf; 134 | return; 135 | end 136 | 137 | [M N] = size(img1); 138 | 139 | if (nargin == 2) 140 | if ((M < 11) || (N < 11)) 141 | ssim_index = -Inf; 142 | ssim_map = -Inf; 143 | return 144 | end 145 | window = fspecial('gaussian', 11, 1.5); % 146 | K(1) = 0.01; % default settings 147 | K(2) = 0.03; % 148 | L = 255; % 149 | end 150 | 151 | if (nargin == 3) 152 | if ((M < 11) || (N < 11)) 153 | ssim_index = -Inf; 154 | ssim_map = -Inf; 155 | return 156 | end 157 | window = fspecial('gaussian', 11, 1.5); 158 | L = 255; 159 | if (length(K) == 2) 160 | if (K(1) < 0 || K(2) < 0) 161 | ssim_index = -Inf; 162 | ssim_map = -Inf; 163 | return; 164 | end 165 | else 166 | ssim_index = -Inf; 167 | ssim_map = -Inf; 168 | return; 169 | end 170 | end 171 | 172 | if (nargin == 4) 173 | [H W] = size(window); 174 | if ((H*W) < 4 || (H > M) || (W > N)) 175 | ssim_index = -Inf; 176 | ssim_map = -Inf; 177 | return 178 | end 179 | L = 255; 180 | if (length(K) == 2) 181 | if (K(1) < 0 || K(2) < 0) 182 | ssim_index = -Inf; 183 | ssim_map = -Inf; 184 | return; 185 | end 186 | else 187 | ssim_index = -Inf; 188 | ssim_map = -Inf; 189 | return; 190 | end 191 | end 192 | 193 | if (nargin == 5) 194 | [H W] = size(window); 195 | if ((H*W) < 4 || (H > M) || (W > N)) 196 | ssim_index = -Inf; 197 | ssim_map = -Inf; 198 | return 199 | end 200 | if (length(K) == 2) 201 | if (K(1) < 0 || K(2) < 0) 202 | ssim_index = -Inf; 203 | ssim_map = -Inf; 204 | return; 205 | end 206 | else 207 | ssim_index = -Inf; 208 | ssim_map = -Inf; 209 | return; 210 | end 211 | end 212 | 213 | L=1; 214 | C1 = (K(1)*L)^2; 215 | C2 = (K(2)*L)^2; 216 | window = window/sum(sum(window)); 217 | img1 = double(img1); 218 | img2 = double(img2); 219 | 220 | mu1 = filter2(window, img1, 'valid'); 221 | mu2 = filter2(window, img2, 'valid'); 222 | mu1_sq = mu1.*mu1; 223 | mu2_sq = mu2.*mu2; 224 | mu1_mu2 = mu1.*mu2; 225 | sigma1_sq = filter2(window, img1.*img1, 'valid') - mu1_sq; 226 | sigma2_sq = filter2(window, img2.*img2, 'valid') - mu2_sq; 227 | sigma12 = filter2(window, img1.*img2, 'valid') - mu1_mu2; 228 | 229 | if (C1 > 0 & C2 > 0) 230 | ssim_map = ((2*mu1_mu2 + C1).*(2*sigma12 + C2))./((mu1_sq + mu2_sq + C1).*(sigma1_sq + sigma2_sq + C2)); 231 | else 232 | numerator1 = 2*mu1_mu2 + C1; 233 | numerator2 = 2*sigma12 + C2; 234 | denominator1 = mu1_sq + mu2_sq + C1; 235 | denominator2 = sigma1_sq + sigma2_sq + C2; 236 | ssim_map = ones(size(mu1)); 237 | index = (denominator1.*denominator2 > 0); 238 | ssim_map(index) = (numerator1(index).*numerator2(index))./(denominator1(index).*denominator2(index)); 239 | index = (denominator1 ~= 0) & (denominator2 == 0); 240 | ssim_map(index) = numerator1(index)./denominator1(index); 241 | end 242 | 243 | mssim = mean2(ssim_map); 244 | 245 | return 246 | 247 | 248 | 249 | 250 | 251 | 252 | 253 | -------------------------------------------------------------------------------- /Data/data_download.txt: -------------------------------------------------------------------------------- 1 | Downloading W.D.C HSI data(200*200*191), then place the "GT_crop.mat" into the folder "Data". 2 | 3 | Link: https://pan.baidu.com/s/1ilHQJ9-s9q9Pay_ykNZWBg 4 | Password: rkfg 5 | 6 | or 7 | 8 | Link: https://drive.google.com/file/d/1Iy03Ozz6jgvRefeihollkz5W6tWYnEq8/view?usp=sharing 9 | 10 | -------------------------------------------------------------------------------------------- 11 | 12 | Indian Pines and Pavia University: 13 | Link: 14 | https://pan.baidu.com/s/19HXlZnbONedfYKD9p8AnFw 15 | Password: 16 | giuq 17 | 18 | -------------------------------------------------------------------------------- /Demo_test_noiselevel100.m: -------------------------------------------------------------------------------- 1 | 2 | %% Qiang Zhang, Wuhan University. 3 | %% whuqzhang@gmail.com 4 | 5 | clear;close all; 6 | 7 | %% Model 8 | def = 'Model/HSID-CNN.prototxt'; 9 | model= 'Model/HSID-CNN_noiselevel100_iter_600000.caffemodel'; 10 | 11 | %% noise level 12 | noiseSigma=100.0; 13 | 14 | %% get GT HSI data 15 | load('Data/GT_crop.mat'); 16 | im_label=temp; 17 | [w,h, band] = size(im_label); 18 | im_input=zeros(w,h, band); 19 | 20 | %% add noise (same level for all bands) 21 | for i=1:band 22 | 23 | im_input(:, :, i) = im_label(:, :, i) + noiseSigma/255.0*randn(size(im_label(:, :, i))); 24 | end 25 | 26 | %% HSI-denoising 27 | caffe.reset_all(); 28 | caffe.set_mode_gpu(); 29 | net = caffe.Net(def, model, 'test'); 30 | 31 | k=12; 32 | im_output=zeros(w, h, band); 33 | 34 | for i=1 : 1 : k 35 | output1 = net.forward({im_input(:, :, 1:24),im_input(:, :, i)}); 36 | output = output1{1,1}; 37 | im_output(:, :, i) = im_input(:, :, i)+output; 38 | end 39 | 40 | for i=band-k+1 : 1 : band 41 | output1 = net.forward({im_input(:, :, 168:191),im_input(:, :, i)}); 42 | output = output1{1,1}; 43 | im_output(:, :, i) = im_input(:, :, i)+output; 44 | end 45 | 46 | for i=1+k : 1 : band-k 47 | output1 = net.forward({im_input(:, :, [i-k:i-1, i+1:i+k]),im_input(:, :, i)}); 48 | output = output1{1,1}; 49 | im_output(:, :, i) = im_input(:, :, i)+output; 50 | end 51 | 52 | 53 | %% PSNR & SSIM 54 | PSNR=zeros(band, 1); 55 | SSIM=zeros(band, 1); 56 | 57 | for i=1:band 58 | 59 | [psnr_cur, ssim_cur, ~] = Cal_PSNRSSIM(im_output(:, :, i), im_label(:, :, i), 0, 0); 60 | PSNR(i,1)=psnr_cur; 61 | SSIM(i,1)=ssim_cur; 62 | end 63 | 64 | [SAM1, SAM2]=SAM(im_label, im_output); 65 | disp(SAM1); 66 | 67 | show_band=[57, 27, 17]; 68 | 69 | subplot(131), imshow(im_label(:, :, show_band)); 70 | title(['Original Band Number: ', num2str(show_band)]) 71 | 72 | subplot(132), imshow(im_input(:, :, show_band)); 73 | title(['Noise Level = ', num2str(floor(noiseSigma))]) 74 | 75 | subplot(133), imshow(im_output(:, :, show_band)); 76 | title(['MPSNR: ',num2str(mean(PSNR),'%2.4f'),'dB',' MSSIM: ',num2str(mean(SSIM),'%2.4f'),' MSA: ',num2str(SAM1,'%2.4f')]) 77 | 78 | drawnow; 79 | 80 | disp([mean(PSNR), mean(SSIM), SAM1]); 81 | 82 | 83 | -------------------------------------------------------------------------------- /Model/HSID-CNN.prototxt: -------------------------------------------------------------------------------- 1 | name: "HSID-CNN" 2 | input: "data" 3 | input_dim: 1 4 | input_dim: 24 5 | input_dim: 200 6 | input_dim: 200 7 | 8 | input: "data_2" 9 | input_dim: 1 10 | input_dim: 1 11 | input_dim: 200 12 | input_dim: 200 13 | 14 | layer { 15 | name: "feature3" 16 | type: "Convolution" 17 | bottom: "data" 18 | top: "feature3" 19 | param { 20 | lr_mult: 1 21 | } 22 | param { 23 | lr_mult: 0.1 24 | } 25 | convolution_param { 26 | num_output: 20 27 | kernel_size: 3 28 | stride: 1 29 | pad: 1 30 | weight_filler { 31 | type: "msra" 32 | } 33 | bias_filler { 34 | type: "constant" 35 | } 36 | } 37 | } 38 | 39 | layer { 40 | name: "feature5" 41 | type: "Convolution" 42 | bottom: "data" 43 | top: "feature5" 44 | param { 45 | lr_mult: 1 46 | } 47 | param { 48 | lr_mult: 0.1 49 | } 50 | convolution_param { 51 | num_output: 20 52 | kernel_size: 5 53 | stride: 1 54 | pad: 2 55 | weight_filler { 56 | type: "msra" 57 | } 58 | bias_filler { 59 | type: "constant" 60 | } 61 | } 62 | } 63 | 64 | layer { 65 | name: "feature7" 66 | type: "Convolution" 67 | bottom: "data" 68 | top: "feature7" 69 | param { 70 | lr_mult: 1 71 | } 72 | param { 73 | lr_mult: 0.1 74 | } 75 | convolution_param { 76 | num_output: 20 77 | kernel_size: 7 78 | stride: 1 79 | pad: 3 80 | weight_filler { 81 | type: "msra" 82 | } 83 | bias_filler { 84 | type: "constant" 85 | } 86 | } 87 | } 88 | 89 | layer { 90 | name: "feature_3_5_7" 91 | type: "Concat" 92 | bottom: "feature3" 93 | bottom: "feature5" 94 | bottom: "feature7" 95 | top: "feature_3_5_7" 96 | } 97 | 98 | layer { 99 | name: "relu_feature" 100 | type: "ReLU" 101 | bottom: "feature_3_5_7" 102 | top: "feature_3_5_7" 103 | } 104 | 105 | 106 | 107 | layer { 108 | name: "feature3_2" 109 | type: "Convolution" 110 | bottom: "data_2" 111 | top: "feature3_2" 112 | param { 113 | lr_mult: 1 114 | } 115 | param { 116 | lr_mult: 0.1 117 | } 118 | convolution_param { 119 | num_output: 20 120 | kernel_size: 3 121 | stride: 1 122 | pad: 1 123 | weight_filler { 124 | type: "msra" 125 | } 126 | bias_filler { 127 | type: "constant" 128 | } 129 | } 130 | } 131 | 132 | layer { 133 | name: "feature5_2" 134 | type: "Convolution" 135 | bottom: "data_2" 136 | top: "feature5_2" 137 | param { 138 | lr_mult: 1 139 | } 140 | param { 141 | lr_mult: 0.1 142 | } 143 | convolution_param { 144 | num_output: 20 145 | kernel_size: 5 146 | stride: 1 147 | pad: 2 148 | weight_filler { 149 | type: "msra" 150 | } 151 | bias_filler { 152 | type: "constant" 153 | } 154 | } 155 | } 156 | 157 | layer { 158 | name: "feature7_2" 159 | type: "Convolution" 160 | bottom: "data_2" 161 | top: "feature7_2" 162 | param { 163 | lr_mult: 1 164 | } 165 | param { 166 | lr_mult: 0.1 167 | } 168 | convolution_param { 169 | num_output: 20 170 | kernel_size: 7 171 | stride: 1 172 | pad: 3 173 | weight_filler { 174 | type: "msra" 175 | } 176 | bias_filler { 177 | type: "constant" 178 | } 179 | } 180 | } 181 | 182 | layer { 183 | name: "feature_3_5_7_2" 184 | type: "Concat" 185 | bottom: "feature3_2" 186 | bottom: "feature5_2" 187 | bottom: "feature7_2" 188 | top: "feature_3_5_7_2" 189 | } 190 | 191 | layer { 192 | name: "relu_feature_2" 193 | type: "ReLU" 194 | bottom: "feature_3_5_7_2" 195 | top: "feature_3_5_7_2" 196 | } 197 | 198 | layer { 199 | name: "feature_all" 200 | type: "Concat" 201 | bottom: "feature_3_5_7" 202 | bottom: "feature_3_5_7_2" 203 | top: "feature_all" 204 | } 205 | 206 | layer { 207 | name: "conv1" 208 | type: "Convolution" 209 | bottom: "feature_all" 210 | top: "conv1" 211 | param { 212 | lr_mult: 1 213 | } 214 | param { 215 | lr_mult: 1 216 | } 217 | convolution_param { 218 | num_output: 60 219 | kernel_size: 3 220 | stride: 1 221 | pad: 1 222 | weight_filler { 223 | type: "msra" 224 | } 225 | bias_filler { 226 | type: "constant" 227 | } 228 | } 229 | } 230 | 231 | layer { 232 | name: "relu1" 233 | type: "ReLU" 234 | bottom: "conv1" 235 | top: "conv1" 236 | } 237 | 238 | layer { 239 | name: "conv2" 240 | type: "Convolution" 241 | bottom: "conv1" 242 | top: "conv2" 243 | param { 244 | lr_mult: 1 245 | } 246 | param { 247 | lr_mult: 0 248 | } 249 | convolution_param { 250 | num_output: 60 251 | kernel_size: 3 252 | stride: 1 253 | pad: 1 254 | weight_filler { 255 | type: "msra" 256 | } 257 | bias_filler { 258 | type: "constant" 259 | } 260 | } 261 | } 262 | 263 | layer { 264 | name: "relu2" 265 | type: "ReLU" 266 | bottom: "conv2" 267 | top: "conv2" 268 | } 269 | 270 | layer { 271 | name: "conv3" 272 | type: "Convolution" 273 | bottom: "conv2" 274 | top: "conv3" 275 | param { 276 | lr_mult: 1 277 | } 278 | param { 279 | lr_mult: 0 280 | } 281 | convolution_param { 282 | num_output: 60 283 | kernel_size: 3 284 | stride: 1 285 | pad: 1 286 | weight_filler { 287 | type: "msra" 288 | } 289 | bias_filler { 290 | type: "constant" 291 | } 292 | } 293 | } 294 | 295 | layer { 296 | name: "relu3" 297 | type: "ReLU" 298 | bottom: "conv3" 299 | top: "conv3" 300 | } 301 | 302 | layer { 303 | name: "conv4" 304 | type: "Convolution" 305 | bottom: "conv3" 306 | top: "conv4" 307 | param { 308 | lr_mult: 1 309 | } 310 | param { 311 | lr_mult: 0 312 | } 313 | convolution_param { 314 | num_output: 60 315 | kernel_size: 3 316 | stride: 1 317 | pad: 1 318 | weight_filler { 319 | type: "msra" 320 | } 321 | bias_filler { 322 | type: "constant" 323 | } 324 | } 325 | } 326 | 327 | layer { 328 | name: "relu4" 329 | type: "ReLU" 330 | bottom: "conv4" 331 | top: "conv4" 332 | } 333 | 334 | layer { 335 | name: "conv5" 336 | type: "Convolution" 337 | bottom: "conv4" 338 | top: "conv5" 339 | param { 340 | lr_mult: 1 341 | } 342 | param { 343 | lr_mult: 0 344 | } 345 | convolution_param { 346 | num_output: 60 347 | kernel_size: 3 348 | stride: 1 349 | pad: 1 350 | weight_filler { 351 | type: "msra" 352 | } 353 | bias_filler { 354 | type: "constant" 355 | } 356 | } 357 | } 358 | 359 | layer { 360 | name: "relu5" 361 | type: "ReLU" 362 | bottom: "conv5" 363 | top: "conv5" 364 | } 365 | 366 | layer { 367 | name: "conv6" 368 | type: "Convolution" 369 | bottom: "conv5" 370 | top: "conv6" 371 | param { 372 | lr_mult: 1 373 | } 374 | param { 375 | lr_mult: 0 376 | } 377 | convolution_param { 378 | num_output: 60 379 | kernel_size: 3 380 | stride: 1 381 | pad: 1 382 | weight_filler { 383 | type: "msra" 384 | } 385 | bias_filler { 386 | type: "constant" 387 | } 388 | } 389 | } 390 | 391 | layer { 392 | name: "relu6" 393 | type: "ReLU" 394 | bottom: "conv6" 395 | top: "conv6" 396 | } 397 | 398 | layer { 399 | name: "conv7" 400 | type: "Convolution" 401 | bottom: "conv6" 402 | top: "conv7" 403 | param { 404 | lr_mult: 1 405 | } 406 | param { 407 | lr_mult: 0 408 | } 409 | convolution_param { 410 | num_output: 60 411 | kernel_size: 3 412 | stride: 1 413 | pad: 1 414 | weight_filler { 415 | type: "msra" 416 | } 417 | bias_filler { 418 | type: "constant" 419 | } 420 | } 421 | } 422 | 423 | layer { 424 | name: "relu7" 425 | type: "ReLU" 426 | bottom: "conv7" 427 | top: "conv7" 428 | } 429 | 430 | layer { 431 | name: "conv8" 432 | type: "Convolution" 433 | bottom: "conv7" 434 | top: "conv8" 435 | param { 436 | lr_mult: 1 437 | } 438 | param { 439 | lr_mult: 0 440 | } 441 | convolution_param { 442 | num_output: 60 443 | kernel_size: 3 444 | stride: 1 445 | pad: 1 446 | weight_filler { 447 | type: "msra" 448 | } 449 | bias_filler { 450 | type: "constant" 451 | } 452 | } 453 | } 454 | 455 | layer { 456 | name: "relu8" 457 | type: "ReLU" 458 | bottom: "conv8" 459 | top: "conv8" 460 | } 461 | 462 | layer { 463 | name: "conv9" 464 | type: "Convolution" 465 | bottom: "conv8" 466 | top: "conv9" 467 | param { 468 | lr_mult: 1 469 | } 470 | param { 471 | lr_mult: 0 472 | } 473 | convolution_param { 474 | num_output: 60 475 | kernel_size: 3 476 | stride: 1 477 | pad: 1 478 | weight_filler { 479 | type: "msra" 480 | } 481 | bias_filler { 482 | type: "constant" 483 | } 484 | } 485 | } 486 | 487 | layer { 488 | name: "relu9" 489 | type: "ReLU" 490 | bottom: "conv9" 491 | top: "conv9" 492 | } 493 | 494 | 495 | 496 | layer { 497 | name: "feature_conv3" 498 | type: "Convolution" 499 | bottom: "conv3" 500 | top: "feature_conv3" 501 | param { 502 | lr_mult: 1 503 | } 504 | param { 505 | lr_mult: 0.1 506 | } 507 | convolution_param { 508 | num_output: 15 509 | kernel_size: 3 510 | stride: 1 511 | pad: 1 512 | weight_filler { 513 | type: "msra" 514 | } 515 | bias_filler { 516 | type: "constant" 517 | } 518 | } 519 | } 520 | 521 | layer { 522 | name: "feature_conv5" 523 | type: "Convolution" 524 | bottom: "conv5" 525 | top: "feature_conv5" 526 | param { 527 | lr_mult: 1 528 | } 529 | param { 530 | lr_mult: 0.1 531 | } 532 | convolution_param { 533 | num_output: 15 534 | kernel_size: 3 535 | stride: 1 536 | pad: 1 537 | weight_filler { 538 | type: "msra" 539 | } 540 | bias_filler { 541 | type: "constant" 542 | } 543 | } 544 | } 545 | 546 | layer { 547 | name: "feature_conv7" 548 | type: "Convolution" 549 | bottom: "conv7" 550 | top: "feature_conv7" 551 | param { 552 | lr_mult: 1 553 | } 554 | param { 555 | lr_mult: 0.1 556 | } 557 | convolution_param { 558 | num_output: 15 559 | kernel_size: 3 560 | stride: 1 561 | pad: 1 562 | weight_filler { 563 | type: "msra" 564 | } 565 | bias_filler { 566 | type: "constant" 567 | } 568 | } 569 | } 570 | 571 | layer { 572 | name: "feature_conv9" 573 | type: "Convolution" 574 | bottom: "conv9" 575 | top: "feature_conv9" 576 | param { 577 | lr_mult: 1 578 | } 579 | param { 580 | lr_mult: 0.1 581 | } 582 | convolution_param { 583 | num_output: 15 584 | kernel_size: 3 585 | stride: 1 586 | pad: 1 587 | weight_filler { 588 | type: "msra" 589 | } 590 | bias_filler { 591 | type: "constant" 592 | } 593 | } 594 | } 595 | 596 | layer { 597 | name: "feature_conv_3_5_7_9" 598 | type: "Concat" 599 | bottom: "feature_conv3" 600 | bottom: "feature_conv5" 601 | bottom: "feature_conv7" 602 | bottom: "feature_conv9" 603 | top: "feature_conv_3_5_7_9" 604 | } 605 | 606 | layer { 607 | name: "relu_feature_conv_3_5_7_9" 608 | type: "ReLU" 609 | bottom: "feature_conv_3_5_7_9" 610 | top: "feature_conv_3_5_7_9" 611 | } 612 | 613 | layer { 614 | name: "conv10" 615 | type: "Convolution" 616 | bottom: "feature_conv_3_5_7_9" 617 | top: "conv10" 618 | param { 619 | lr_mult: 1 620 | } 621 | param { 622 | lr_mult: 0 623 | } 624 | convolution_param { 625 | num_output: 1 626 | kernel_size: 3 627 | stride: 1 628 | pad: 1 629 | weight_filler { 630 | type: "msra" 631 | } 632 | bias_filler { 633 | type: "constant" 634 | } 635 | } 636 | } 637 | -------------------------------------------------------------------------------- /Model/HSID-CNN_Realdata_iter_1000000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_Realdata_iter_1000000.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiseGau.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiseGau.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiselevel100_iter_600000.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiselevel100_iter_600000.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiselevel25.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiselevel25.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiselevel5.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiselevel5.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiselevel50.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiselevel50.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiselevel75.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiselevel75.caffemodel -------------------------------------------------------------------------------- /Model/HSID-CNN_noiserand25.caffemodel: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/qzhang95/HSID-CNN/9932431f92dc35054add77fc3ef9a02ad5cd609b/Model/HSID-CNN_noiserand25.caffemodel -------------------------------------------------------------------------------- /Model/More_Model: -------------------------------------------------------------------------------- 1 | If you need more models for HSI denoising, please contact me: whuqzhang@gmail.com 2 | Qiang Zhang, 3 | Wuhan University. 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **HSID-CNN** 2 | ==== 3 | *********************************************************************************************************** 4 | *********************************************************************************************************** 5 | **Matlab demo code for Hyperspectral Image Denoising Employing a Spatial–Spectral Deep Residual Convolutional Neural Network(HSID-CNN), IEEE TGRS, 2019.** 6 | 7 | By Qiang Zhang (whuqzhang@gmail.com) 8 | Wuhan University, China. 9 | 10 | If you use/adapt our code in your work (either as a stand-alone tool or as a component of any algorithm), **please cite our paper**. 11 | 12 | **Q. Yuan, Q. Zhang, J. Li, H. Shen, and L. Zhang**, "Hyperspectral Image Denoising Employing a Spatial–Spectral Deep Residual Convolutional Neural Network," ***IEEE Transactions on Geoscience and Remote Sensing***, vol. 57, no. 2, pp. 1205-1218, 2019. 13 | 14 | @ARTICLE{yuan2019, 15 | author={Q. {Yuan} and Q. {Zhang} and J. {Li} and H. {Shen} and L. {Zhang}}, 16 | journal={IEEE Trans. Geosci. Remote Sens.}, 17 | title={Hyperspectral Image Denoising Employing a Spatial-Spectral Deep Residual Convolutional Neural Network}, 18 | year={2019}, 19 | volume={57}, 20 | number={2}, 21 | pages={1205-1218}, 22 | month={Feb.},} 23 | 24 | 25 | 26 | This code is for academic purpose only. Not for commercial/industrial activities. 27 | 28 | 29 | 30 | 31 | **NOTE:** 32 | 33 | This Matlab version is a re-implementation with ***HSID-CNN*** (https://ieeexplore.ieee.org/document/8454887, IEEE TGRS, 2019), and is for the ease of understanding the algorithm. This code is not optimized, and the speed is not representative. The result can be slightly different from the paper due to transferring across platforms. 34 | 35 | 36 | 37 | **Enviroment:** 38 | 39 | Window 7, Cuda 7.5, Caffe framework (**Necessary, GPU mode better**), Matlab R2014b. 40 | Place set this folder into "($Caffe_Dir)/examples/" 41 | 42 | 43 | 44 | **Others:** 45 | 46 | If you need more models, please contact me. (whuqzhang@gmail.com) 47 | -------------------------------------------------------------------------------- /SAM.m: -------------------------------------------------------------------------------- 1 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 2 | % Description: 3 | % Spectral Angle Mapper (SAM). 4 | % 5 | % Interface: 6 | % [SAM_index,SAM_map] = SAM(I1,I2) 7 | % 8 | % Inputs: 9 | % I1: First multispectral image; 10 | % I2: Second multispectral image. 11 | % 12 | % Outputs: 13 | % SAM_index: SAM index; 14 | % SAM_map: Image of SAM values. 15 | % 16 | % References: 17 | % [Yuhas92] R. H. Yuhas, A. F. H. Goetz, and J. W. Boardman, "Discrimination among semi-arid landscape endmembers using the Spectral Angle Mapper (SAM) algorithm," 18 | % in Proceeding Summaries 3rd Annual JPL Airborne Geoscience Workshop, 1992, pp. 147?49. 19 | % [Vivone14] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms? 20 | % IEEE Transaction on Geoscience and Remote Sensing, 2014. (Accepted) 21 | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% 22 | function [SAM_index,SAM_map] = SAM(I1,I2) 23 | 24 | [M,N,~] = size(I2); 25 | prod_scal = dot(I1,I2,3); 26 | norm_orig = dot(I1,I1,3); 27 | norm_fusa = dot(I2,I2,3); 28 | prod_norm = sqrt(norm_orig.*norm_fusa); 29 | prod_map = prod_norm; 30 | prod_map(prod_map==0)=eps; 31 | SAM_map = acos(prod_scal./prod_map); 32 | prod_scal = reshape(prod_scal,M*N,1); 33 | prod_norm = reshape(prod_norm, M*N,1); 34 | z=find(prod_norm==0); 35 | prod_scal(z)=[];prod_norm(z)=[]; 36 | angolo = sum(sum(acos(prod_scal./prod_norm)))/(size(prod_norm,1)); 37 | SAM_index = real(angolo)*180/pi; 38 | 39 | end 40 | --------------------------------------------------------------------------------