├── README.md ├── methods ├── __pycache__ │ ├── GS.cpython-36.pyc │ ├── CNMF.cpython-36.pyc │ ├── GFPCA.cpython-36.pyc │ ├── GSA.cpython-36.pyc │ ├── IHS.cpython-36.pyc │ ├── PCA.cpython-36.pyc │ ├── PNN.cpython-36.pyc │ ├── SFIM.cpython-36.pyc │ ├── utils.cpython-36.pyc │ ├── Bicubic.cpython-36.pyc │ ├── Brovey.cpython-36.pyc │ ├── MTF_GLP.cpython-36.pyc │ ├── Wavelet.cpython-36.pyc │ ├── metrics.cpython-36.pyc │ ├── __init__.cpython-36.pyc │ └── MTF_GLP_HPM.cpython-36.pyc ├── __init__.py ├── eval.txt ├── Bicubic.py ├── Wavelet.py ├── GFPCA.py ├── PCA.py ├── SFIM.py ├── Brovey.py ├── IHS.py ├── test.py ├── GS.py ├── GSA.py ├── MTF_GLP_HPM.py ├── MTF_GLP.py ├── PNN.py ├── utils.py ├── PanNet.py ├── x.py ├── demo_all_methods.py-v2 ├── demo_all_methods-gf.py ├── demo_all_methods.py ├── metrics.py └── CNMF.py └── LICENSE /README.md: -------------------------------------------------------------------------------- 1 | # pan-sharpening 2 | this is a repostitory about pan-sharpening, for multispectral and panchromatic images only. 3 | -------------------------------------------------------------------------------- /methods/__pycache__/GS.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/GS.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/CNMF.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/CNMF.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/GFPCA.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/GFPCA.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/GSA.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/GSA.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/IHS.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/IHS.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/PCA.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/PCA.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/PNN.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/PNN.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/SFIM.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/SFIM.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: Apache-2.0 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | -------------------------------------------------------------------------------- /methods/__pycache__/Bicubic.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/Bicubic.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/Brovey.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/Brovey.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/MTF_GLP.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/MTF_GLP.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/Wavelet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/Wavelet.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/metrics.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/metrics.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/__init__.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/__init__.cpython-36.pyc -------------------------------------------------------------------------------- /methods/__pycache__/MTF_GLP_HPM.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/yisun98/pan-sharpening/HEAD/methods/__pycache__/MTF_GLP_HPM.cpython-36.pyc -------------------------------------------------------------------------------- /methods/eval.txt: -------------------------------------------------------------------------------- 1 | metrics: D_lamda, D_s, QNR 2 | Bicubic 0.02590.0871 0.8892 3 | Brovey 0.11290.2445 0.6702 4 | PCA 0.2730.3252 0.4905 5 | IHS 0.10050.2591 0.6664 6 | SFIM 0.11210.2191 0.6933 7 | GS 0.10870.2668 0.6535 8 | Wavelet 0.11930.2089 0.6968 9 | MTF_GLP 0.16630.188 0.6769 10 | MTF_GLP_HPM 0.16440.1808 0.6845 11 | GSA 0.10020.2286 0.6941 12 | CNMF 0.07240.1782 0.7623 13 | GFPCA 0.08680.0927 0.8285 14 | PNN 0.04810.0746 0.8809 15 | RSCNNCA 0.02830.0632 0.9103 16 | -------------------------------------------------------------------------------- /methods/Bicubic.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import numpy as np 9 | from methods.utils import upsample_bicubic 10 | 11 | def Bicubic(pan, hs): 12 | 13 | M, N, c = pan.shape 14 | m, n, C = hs.shape 15 | 16 | ratio = int(np.round(M/m)) 17 | 18 | print('get sharpening ratio: ', ratio) 19 | assert int(np.round(M/m)) == int(np.round(N/n)) 20 | 21 | # jsut upsample with bicubic 22 | I_Bicubic = upsample_bicubic(hs, ratio) 23 | 24 | #adjustment 25 | I_Bicubic[I_Bicubic<0]=0 26 | I_Bicubic[I_Bicubic>1]=1 27 | 28 | return I_Bicubic -------------------------------------------------------------------------------- /methods/Wavelet.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] King R L, Wang J. A wavelet based algorithm for pan sharpening Landsat 7 imagery 8 | [C]//IGARSS 2001. Scanning the Present and Resolving the Future. Proceedings. 9 | IEEE 2001 International Geoscience and Remote Sensing Symposium (Cat. No. 01CH37217). IEEE, 2001, 2: 849-851. 10 | """ 11 | 12 | import numpy as np 13 | from methods.utils import upsample_interp23 14 | import pywt 15 | 16 | def Wavelet(pan, hs): 17 | 18 | M, N, c = pan.shape 19 | m, n, C = hs.shape 20 | 21 | ratio = int(np.round(M/m)) 22 | 23 | print('get sharpening ratio: ', ratio) 24 | assert int(np.round(M/m)) == int(np.round(N/n)) 25 | 26 | #upsample 27 | u_hs = upsample_interp23(hs, ratio) 28 | 29 | pan = np.squeeze(pan) 30 | pc = pywt.wavedec2(pan, 'haar', level=2) 31 | 32 | rec=[] 33 | for i in range(C): 34 | temp_dec = pywt.wavedec2(u_hs[:, :, i], 'haar', level=2) 35 | 36 | pc[0] = temp_dec[0] 37 | 38 | temp_rec = pywt.waverec2(pc, 'haar') 39 | temp_rec = np.expand_dims(temp_rec, -1) 40 | rec.append(temp_rec) 41 | 42 | I_Wavelet = np.concatenate(rec, axis=-1) 43 | 44 | #adjustment 45 | I_Wavelet[I_Wavelet<0]=0 46 | I_Wavelet[I_Wavelet>1]=1 47 | 48 | return I_Wavelet 49 | -------------------------------------------------------------------------------- /methods/GFPCA.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] W. Liao et al., "Two-stage fusion of thermal hyperspectral and visible RGB image by PCA and guided filter," 8 | 2015 7th Workshop on Hyperspectral Image and Signal Processing: Evolution in Remote Sensing (WHISPERS), Tokyo, 2015, pp. 1-4. 9 | """ 10 | 11 | import numpy as np 12 | from methods.utils import upsample_interp23 13 | from sklearn.decomposition import PCA as princomp 14 | from cv2.ximgproc import guidedFilter 15 | 16 | def GFPCA(pan, hs): 17 | 18 | M, N, c = pan.shape 19 | m, n, C = hs.shape 20 | 21 | ratio = int(np.round(M/m)) 22 | 23 | print('get sharpening ratio: ', ratio) 24 | assert int(np.round(M/m)) == int(np.round(N/n)) 25 | 26 | p = princomp(n_components=C) 27 | pca_hs = p.fit_transform(np.reshape(hs, (m*n, C))) 28 | 29 | pca_hs = np.reshape(pca_hs, (m, n, C)) 30 | 31 | pca_hs = upsample_interp23(pca_hs, ratio) 32 | 33 | gp_hs = [] 34 | for i in range(C): 35 | temp = guidedFilter(np.float32(pan), np.float32(np.expand_dims(pca_hs[:, :, i], -1)), 8, eps = 0.001**2) 36 | temp = np.expand_dims(temp ,axis=-1) 37 | gp_hs.append(temp) 38 | 39 | gp_hs = np.concatenate(gp_hs, axis=-1) 40 | 41 | I_GFPCA = p.inverse_transform(gp_hs) 42 | 43 | #adjustment 44 | I_GFPCA[I_GFPCA<0]=0 45 | I_GFPCA[I_GFPCA>1]=1 46 | 47 | return I_GFPCA -------------------------------------------------------------------------------- /methods/PCA.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] P. S. Chavez Jr. and A. W. Kwarteng, “Extracting spectral contrast in Landsat Thematic Mapper image data using selective principal component analysis,” 8 | Photogrammetric Engineering and Remote Sensing, vol. 55, no. 3, pp. 339–348, March 1989. 9 | [2] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 10 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 11 | """ 12 | 13 | import numpy as np 14 | from methods.utils import upsample_interp23 15 | from sklearn.decomposition import PCA as princomp 16 | 17 | def PCA(pan, hs): 18 | 19 | M, N, c = pan.shape 20 | m, n, C = hs.shape 21 | 22 | ratio = int(np.round(M/m)) 23 | 24 | print('get sharpening ratio: ', ratio) 25 | assert int(np.round(M/m)) == int(np.round(N/n)) 26 | 27 | image_hr = pan 28 | 29 | #upsample 30 | u_hs = upsample_interp23(hs, ratio) 31 | 32 | p = princomp(n_components=C) 33 | pca_hs = p.fit_transform(np.reshape(u_hs, (M*N, C))) 34 | 35 | pca_hs = np.reshape(pca_hs, (M, N, C)) 36 | 37 | I = pca_hs[:, :, 0] 38 | 39 | image_hr = (image_hr - np.mean(image_hr))*np.std(I, ddof=1)/np.std(image_hr, ddof=1)+np.mean(I) 40 | 41 | pca_hs[:, :, 0] = image_hr[:, :, 0] 42 | 43 | I_PCA = p.inverse_transform(pca_hs) 44 | 45 | #equalization 46 | I_PCA = I_PCA-np.mean(I_PCA, axis=(0, 1))+np.mean(u_hs) 47 | 48 | #adjustment 49 | I_PCA[I_PCA<0]=0 50 | I_PCA[I_PCA>1]=1 51 | 52 | return I_PCA -------------------------------------------------------------------------------- /methods/SFIM.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: Apache-2.0 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] J. Liu, “Smoothing filter based intensity modulation: a spectral preserve image fusion technique for improving spatial details,” 8 | International Journal of Remote Sensing, vol. 21, no. 18, pp. 3461–3472, December 2000. 9 | [2] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 10 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 11 | """ 12 | 13 | import numpy as np 14 | from methods.utils import upsample_interp23 15 | from scipy import signal 16 | 17 | def SFIM(pan, hs): 18 | 19 | M, N, c = pan.shape 20 | m, n, C = hs.shape 21 | 22 | ratio = int(np.round(M/m)) 23 | 24 | print('get sharpening ratio: ', ratio) 25 | assert int(np.round(M/m)) == int(np.round(N/n)) 26 | 27 | #upsample 28 | u_hs = upsample_interp23(hs, ratio) 29 | 30 | if np.mod(ratio, 2)==0: 31 | ratio = ratio + 1 32 | 33 | pan = np.tile(pan, (1, 1, C)) 34 | 35 | pan = (pan - np.mean(pan, axis=(0, 1)))*(np.std(u_hs, axis=(0, 1), ddof=1)/np.std(pan, axis=(0, 1), ddof=1))+np.mean(u_hs, axis=(0, 1)) 36 | 37 | kernel = np.ones((ratio, ratio)) 38 | kernel = kernel/np.sum(kernel) 39 | 40 | I_SFIM = np.zeros((M, N, C)) 41 | for i in range(C): 42 | lrpan = signal.convolve2d(pan[:, :, i], kernel, mode='same', boundary = 'wrap') 43 | I_SFIM[:, :, i] = u_hs[:, :, i]*pan[:, :, i]/(lrpan+1e-8) 44 | 45 | #adjustment 46 | I_SFIM[I_SFIM<0]=0 47 | I_SFIM[I_SFIM>1]=1 48 | 49 | return I_SFIM -------------------------------------------------------------------------------- /methods/Brovey.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] A. R. Gillespie, A. B. Kahle, and R. E. Walker, “Color enhancement of highly correlated images-II. Channel ratio and “Chromaticity” Transform techniques,” 8 | Remote Sensing of Environment, vol. 22, no. 3, pp. 343–365, August 1987. 9 | [2] T.-M. Tu, S.-C. Su, H.-C. Shyu, and P. S. Huang, “A new look at IHS-like image fusion methods,” 10 | Information Fusion, vol. 2, no. 3, pp. 177–186, September 2001. 11 | [3] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 12 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 13 | """ 14 | 15 | import numpy as np 16 | from methods.utils import upsample_interp23 17 | 18 | def Brovey(pan, hs): 19 | 20 | M, N, c = pan.shape 21 | m, n, C = hs.shape 22 | 23 | ratio = int(np.round(M/m)) 24 | 25 | print('get sharpening ratio: ', ratio) 26 | assert int(np.round(M/m)) == int(np.round(N/n)) 27 | 28 | #upsample 29 | u_hs = upsample_interp23(hs, ratio) 30 | 31 | I = np.mean(u_hs, axis=-1) 32 | 33 | image_hr = (pan-np.mean(pan))*(np.std(I, ddof=1)/np.std(pan, ddof=1))+np.mean(I) 34 | image_hr = np.squeeze(image_hr) 35 | 36 | I_Brovey=[] 37 | for i in range(C): 38 | temp = image_hr*u_hs[:, :, i]/(I+1e-8) 39 | temp = np.expand_dims(temp, axis=-1) 40 | I_Brovey.append(temp) 41 | 42 | I_Brovey = np.concatenate(I_Brovey, axis=-1) 43 | 44 | #adjustment 45 | I_Brovey[I_Brovey<0]=0 46 | I_Brovey[I_Brovey>1]=1 47 | 48 | return I_Brovey -------------------------------------------------------------------------------- /methods/IHS.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] W. Carper, T. Lillesand, and R. Kiefer, “The use of Intensity-Hue-Saturation transformations for merging SPOT panchromatic and multispectral image data,” 8 | Photogrammetric Engineering and Remote Sensing, vol. 56, no. 4, pp. 459–467, April 1990. 9 | [2] P. S. Chavez Jr., S. C. Sides, and J. A. Anderson, “Comparison of three different methods to merge multiresolution and multispectral data: Landsat TM and SPOT panchromatic,” 10 | Photogrammetric Engineering and Remote Sensing, vol. 57, no. 3, pp. 295–303, March 1991. 11 | [3] T.-M. Tu, S.-C. Su, H.-C. Shyu, and P. S. Huang, “A new look at IHS-like image fusion methods,” 12 | Information Fusion, vol. 2, no. 3, pp. 177–186, September 2001. 13 | [4] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 14 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 15 | """ 16 | 17 | import numpy as np 18 | from methods.utils import upsample_interp23 19 | 20 | def IHS(pan, hs): 21 | 22 | M, N, c = pan.shape 23 | m, n, C = hs.shape 24 | 25 | ratio = int(np.round(M/m)) 26 | 27 | print('get sharpening ratio: ', ratio) 28 | assert int(np.round(M/m)) == int(np.round(N/n)) 29 | 30 | #upsample 31 | u_hs = upsample_interp23(hs, ratio) 32 | 33 | I = np.mean(u_hs, axis=-1, keepdims=True) 34 | 35 | P = (pan - np.mean(pan))*np.std(I, ddof=1)/np.std(pan, ddof=1)+np.mean(I) 36 | 37 | I_IHS = u_hs + np.tile(P-I, (1, 1, C)) 38 | 39 | #adjustment 40 | I_IHS[I_IHS<0]=0 41 | I_IHS[I_IHS>1]=1 42 | 43 | return I_IHS 44 | -------------------------------------------------------------------------------- /methods/test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # coding=utf-8 3 | 4 | m = {'metrics: ': ' D_lamda, D_s, QNR', 'Bicubic ': [0.025913447546229217, 0.08711555757037545, 0.8892284593149556], 'Brovey ': [0.11293054690255566, 0.24447562181243088, 0.6702025969606337], 'PCA ': [0.273043900050805, 0.3252224292697705, 0.49053367115123975], 'IHS ': [0.10045895502637893, 0.25914419311992176, 0.6664302066956808], 'SFIM ': [0.11214566139139327, 0.21913551071959034, 0.6932939246730055], 'GS ': [0.108677150952651, 0.2668403795690048, 0.6534819216890276], 'Wavelet ': [0.11925721207426858, 0.20889819006024524, 0.6967572136194318], 'MTF_GLP ': [0.16628529351614077, 0.1880402576872391, 0.6769427782389935], 'MTF_GLP_HPM': [0.16443811219120089, 0.18081720169017784, 0.6844779254162497], 'GSA ': [0.10024767699097574, 0.22857129785879945, 0.6940947667873818], 'CNMF ': [0.07240990085594358, 0.1781623461945271, 0.7623284707737373], 'GFPCA ': [0.08682089196113008, 0.09274872260679193, 0.8284829122570552], 'PNN ': [0.048079945492640795, 0.07462845794303297, 0.8808797287544271], 'RSCNNCA ': [0.028285061983203814, 0.06315653883658062, 0.9103447857958529]} 5 | 6 | filename = 'eval.txt' 7 | 8 | with open(filename, 'w') as f: 9 | 10 | ''''print result''' 11 | print('################## reference comparision #######################') 12 | for index, i in enumerate(m): # i=key 13 | if index == 0: 14 | print(i, m[i]) 15 | f.write(i + ' ' + m[i] + ' ') 16 | else: 17 | print(i, [round(j, 4) for j in m[i]]) 18 | x = 0 19 | for j in m[i]: 20 | if x == 0: 21 | f.write(i + ' ' + str(round(j, 4))) 22 | x+=1 23 | else: 24 | f.write(str(round(j, 4)) + ' ') 25 | f.write('\n') 26 | print('################## reference comparision #######################') 27 | 28 | print() 29 | print() 30 | print() 31 | 32 | 33 | -------------------------------------------------------------------------------- /methods/GS.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] C. A. Laben and B. V. Brower, “Process for enhancing the spatial resolution of multispectral imagery using pan-sharpening,” 8 | Eastman Kodak Company, Tech. Rep. US Patent # 6,011,875, 2000. 9 | [2] B. Aiazzi, S. Baronti, and M. Selva, “Improving component substitution Pansharpening through multivariate regression of MS+Pan data,” 10 | IEEE Transactions on Geoscience and Remote Sensing, vol. 45, no. 10, pp. 3230–3239, October 2007. 11 | [3] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 12 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 13 | """ 14 | 15 | import numpy as np 16 | from methods.utils import upsample_interp23 17 | 18 | def GS(pan, hs): 19 | 20 | M, N, c = pan.shape 21 | m, n, C = hs.shape 22 | 23 | ratio = int(np.round(M/m)) 24 | 25 | print('get sharpening ratio: ', ratio) 26 | assert int(np.round(M/m)) == int(np.round(N/n)) 27 | 28 | #upsample 29 | u_hs = upsample_interp23(hs, ratio) 30 | 31 | #remove means from u_hs 32 | means = np.mean(u_hs, axis=(0, 1)) 33 | image_lr = u_hs-means 34 | 35 | #sintetic intensity 36 | I = np.mean(u_hs, axis=2, keepdims=True) 37 | I0 = I-np.mean(I) 38 | 39 | image_hr = (pan-np.mean(pan))*(np.std(I0, ddof=1)/np.std(pan, ddof=1))+np.mean(I0) 40 | 41 | #computing coefficients 42 | g = [] 43 | g.append(1) 44 | 45 | for i in range(C): 46 | temp_h = image_lr[:, :, i] 47 | c = np.cov(np.reshape(I0, (-1,)), np.reshape(temp_h, (-1,)), ddof=1) 48 | g.append(c[0,1]/np.var(I0)) 49 | g = np.array(g) 50 | 51 | #detail extraction 52 | delta = image_hr-I0 53 | deltam = np.tile(delta, (1, 1, C+1)) 54 | 55 | #fusion 56 | V = np.concatenate((I0, image_lr), axis=-1) 57 | 58 | g = np.expand_dims(g, 0) 59 | g = np.expand_dims(g, 0) 60 | 61 | g = np.tile(g, (M, N, 1)) 62 | 63 | V_hat = V+ g*deltam 64 | 65 | I_GS = V_hat[:, :, 1:] 66 | 67 | I_GS = I_GS - np.mean(I_GS, axis=(0, 1))+means 68 | 69 | #adjustment 70 | I_GS[I_GS<0]=0 71 | I_GS[I_GS>1]=1 72 | 73 | return I_GS 74 | -------------------------------------------------------------------------------- /methods/GSA.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] B. Aiazzi, S. Baronti, and M. Selva, “Improving component substitution Pansharpening through multivariate regression of MS+Pan data,” 8 | IEEE Transactions on Geoscience and Remote Sensing, vol. 45, no. 10, pp. 3230–3239, October 2007. 9 | [2] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 10 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 11 | """ 12 | 13 | import numpy as np 14 | from methods.utils import upsample_interp23 15 | import cv2 16 | 17 | def estimation_alpha(pan, hs, mode='global'): 18 | if mode == 'global': 19 | IHC = np.reshape(pan, (-1, 1)) 20 | ILRC = np.reshape(hs, (hs.shape[0]*hs.shape[1], hs.shape[2])) 21 | 22 | alpha = np.linalg.lstsq(ILRC, IHC)[0] 23 | 24 | elif mode == 'local': 25 | patch_size = 32 26 | all_alpha = [] 27 | print(pan.shape) 28 | for i in range(0, hs.shape[0]-patch_size, patch_size): 29 | for j in range(0, hs.shape[1]-patch_size, patch_size): 30 | patch_pan = pan[i:i+patch_size, j:j+patch_size, :] 31 | patch_hs = hs[i:i+patch_size, j:j+patch_size, :] 32 | 33 | IHC = np.reshape(patch_pan, (-1, 1)) 34 | ILRC = np.reshape(patch_hs, (-1, hs.shape[2])) 35 | 36 | local_alpha = np.linalg.lstsq(ILRC, IHC)[0] 37 | all_alpha.append(local_alpha) 38 | 39 | all_alpha = np.array(all_alpha) 40 | 41 | alpha = np.mean(all_alpha, axis=0, keepdims=False) 42 | 43 | return alpha 44 | 45 | def GSA(pan, hs): 46 | 47 | M, N, c = pan.shape 48 | m, n, C = hs.shape 49 | 50 | ratio = int(np.round(M/m)) 51 | 52 | print('get sharpening ratio: ', ratio) 53 | assert int(np.round(M/m)) == int(np.round(N/n)) 54 | 55 | #upsample 56 | u_hs = upsample_interp23(hs, ratio) 57 | 58 | #remove means from u_hs 59 | means = np.mean(u_hs, axis=(0, 1)) 60 | image_lr = u_hs-means 61 | 62 | #remove means from hs 63 | image_lr_lp = hs-np.mean(hs, axis=(0,1)) 64 | 65 | #sintetic intensity 66 | image_hr = pan-np.mean(pan) 67 | image_hr0 = cv2.resize(image_hr, (n, m), cv2.INTER_CUBIC) 68 | image_hr0 = np.expand_dims(image_hr0, -1) 69 | 70 | alpha = estimation_alpha(image_hr0, np.concatenate((image_lr_lp, np.ones((m, n, 1))), axis=-1), mode='global') 71 | 72 | I = np.dot(np.concatenate((image_lr, np.ones((M, N, 1))), axis=-1), alpha) 73 | 74 | I0 = I-np.mean(I) 75 | 76 | #computing coefficients 77 | g = [] 78 | g.append(1) 79 | 80 | for i in range(C): 81 | temp_h = image_lr[:, :, i] 82 | c = np.cov(np.reshape(I0, (-1,)), np.reshape(temp_h, (-1,)), ddof=1) 83 | g.append(c[0,1]/np.var(I0)) 84 | g = np.array(g) 85 | 86 | #detail extraction 87 | delta = image_hr-I0 88 | deltam = np.tile(delta, (1, 1, C+1)) 89 | 90 | #fusion 91 | V = np.concatenate((I0, image_lr), axis=-1) 92 | 93 | g = np.expand_dims(g, 0) 94 | g = np.expand_dims(g, 0) 95 | 96 | g = np.tile(g, (M, N, 1)) 97 | 98 | V_hat = V + g*deltam 99 | 100 | I_GSA = V_hat[:, :, 1:] 101 | 102 | I_GSA = I_GSA - np.mean(I_GSA, axis=(0, 1)) + means 103 | 104 | #adjustment 105 | I_GSA[I_GSA<0]=0 106 | I_GSA[I_GSA>1]=1 107 | 108 | return I_GSA -------------------------------------------------------------------------------- /methods/MTF_GLP_HPM.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] B. Aiazzi, L. Alparone, S. Baronti, and A. Garzelli, “Context-driven fusion of high spatial and spectral resolution images based on oversampled multiresolution analysis,” 8 | IEEE Transactions on Geoscience and Remote Sensing, vol. 40, no. 10, pp. 2300–2312, October 2002. 9 | [2] B. Aiazzi, L. Alparone, S. Baronti, A. Garzelli, and M. Selva, “MTF-tailored multiscale fusion of high-resolution MS and Pan imagery,” 10 | Photogrammetric Engineering and Remote Sensing, vol. 72, no. 5, pp. 591–596, May 2006. 11 | [3] G. Vivone, R. Restaino, M. Dalla Mura, G. Licciardi, and J. Chanussot, “Contrast and error-based fusion schemes for multispectral image pansharpening,” 12 | IEEE Geoscience and Remote Sensing Letters, vol. 11, no. 5, pp. 930–934, May 2014. 13 | [4] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 14 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 15 | """ 16 | 17 | import numpy as np 18 | from methods.utils import upsample_interp23 19 | import cv2 20 | from scipy import signal 21 | 22 | def gaussian2d(N, std): 23 | 24 | t=np.arange(-(N-1)/2,(N+2)/2) 25 | t1,t2=np.meshgrid(t,t) 26 | std=np.double(std) 27 | w = np.exp(-0.5*(t1/std)**2)*np.exp(-0.5*(t2/std)**2) 28 | return w 29 | 30 | def kaiser2d(N, beta): 31 | 32 | t=np.arange(-(N-1)/2,(N+1)/2)/np.double(N-1) 33 | t1,t2=np.meshgrid(t,t) 34 | t12=np.sqrt(t1*t1+t2*t2) 35 | w1=np.kaiser(N,beta) 36 | w=np.interp(t12,t,w1) 37 | w[t12>t[-1]]=0 38 | w[t121]=1 135 | 136 | return I_MTF_GLP_HPM -------------------------------------------------------------------------------- /methods/MTF_GLP.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Paper References: 7 | [1] B. Aiazzi, L. Alparone, S. Baronti, and A. Garzelli, “Context-driven fusion of high spatial and spectral resolution images based on oversampled multiresolution analysis,” 8 | IEEE Transactions on Geoscience and Remote Sensing, vol. 40, no. 10, pp. 2300–2312, October 2002. 9 | [2] B. Aiazzi, L. Alparone, S. Baronti, A. Garzelli, and M. Selva, “MTF-tailored multiscale fusion of high-resolution MS and Pan imagery,” 10 | Photogrammetric Engineering and Remote Sensing, vol. 72, no. 5, pp. 591–596, May 2006. 11 | [3] G. Vivone, R. Restaino, M. Dalla Mura, G. Licciardi, and J. Chanussot, “Contrast and error-based fusion schemes for multispectral image pansharpening,” 12 | IEEE Geoscience and Remote Sensing Letters, vol. 11, no. 5, pp. 930–934, May 2014. 13 | [4] G. Vivone, L. Alparone, J. Chanussot, M. Dalla Mura, A. Garzelli, G. Licciardi, R. Restaino, and L. Wald, “A Critical Comparison Among Pansharpening Algorithms”, 14 | IEEE Transaction on Geoscience and Remote Sensing, 2014. 15 | """ 16 | 17 | import numpy as np 18 | from methods.utils import upsample_interp23 19 | import cv2 20 | from scipy import signal 21 | 22 | def gaussian2d(N, std): 23 | 24 | t=np.arange(-(N-1)/2,(N+2)/2) 25 | t1,t2=np.meshgrid(t,t) 26 | std=np.double(std) 27 | w = np.exp(-0.5*(t1/std)**2)*np.exp(-0.5*(t2/std)**2) 28 | return w 29 | 30 | def kaiser2d(N, beta): 31 | 32 | t=np.arange(-(N-1)/2,(N+1)/2)/np.double(N-1) 33 | t1,t2=np.meshgrid(t,t) 34 | t12=np.sqrt(t1*t1+t2*t2) 35 | w1=np.kaiser(N,beta) 36 | w=np.interp(t12,t,w1) 37 | w[t12>t[-1]]=0 38 | w[t121]=1 137 | 138 | return I_MTF_GLP -------------------------------------------------------------------------------- /methods/PNN.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | Code Reference: https://github.com/sergiovitale/pansharpening-cnn-python-version 7 | Paper References: 8 | Masi G, Cozzolino D, Verdoliva L, et al. Pansharpening by convolutional neural networks 9 | [J]. Remote Sensing, 2016, 8(7): 594. 10 | """ 11 | 12 | import numpy as np 13 | from keras.layers import Concatenate, Conv2D, Input 14 | from keras.callbacks import LearningRateScheduler, ModelCheckpoint 15 | from keras.optimizers import Adam 16 | from keras.models import Model 17 | import tensorflow as tf 18 | from tqdm import tqdm 19 | from keras import backend as K 20 | import os 21 | import random 22 | from utils import upsample_interp23, downgrade_images 23 | import gc 24 | 25 | def psnr(y_true, y_pred): 26 | """Peak signal-to-noise ratio averaged over samples and channels.""" 27 | mse = K.mean(K.square(y_true*255 - y_pred*255), axis=(-3, -2, -1)) 28 | return K.mean(20 * K.log(255 / K.sqrt(mse)) / np.log(10)) 29 | 30 | def pnn_net(lrhs_size=(32, 32, 3), hrms_size = (32, 32, 1)): 31 | 32 | lrhs_inputs = Input(lrhs_size) 33 | hrms_inputs = Input(hrms_size) 34 | 35 | mixed = Concatenate()([lrhs_inputs, hrms_inputs]) 36 | 37 | mixed1 = Conv2D(64, (9, 9), strides=(1, 1), padding='same', activation='relu')(mixed) 38 | 39 | mixed1 = Conv2D(32, (5, 5), strides=(1, 1), padding='same', activation='relu')(mixed1) 40 | 41 | c6 = Conv2D(lrhs_size[2], (5, 5), strides=(1, 1), padding='same', activation='relu', name='model1_last1')(mixed1) 42 | 43 | model = Model(inputs = [lrhs_inputs, hrms_inputs], outputs = c6) 44 | 45 | model.compile(optimizer =Adam(lr = 5e-4), loss = 'mse', metrics=[psnr]) 46 | 47 | model.summary() 48 | 49 | return model 50 | 51 | def PNN(hrms, lrhs, sensor = None): 52 | """ 53 | this is an zero-shot learning method with deep learning (PNN) 54 | hrms: numpy array with MXNXc 55 | lrhs: numpy array with mxnxC 56 | """ 57 | 58 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 59 | 60 | config = tf.ConfigProto() 61 | config.gpu_options.allow_growth = True 62 | sess = tf.Session(config=config) 63 | K.set_session(sess) 64 | 65 | M, N, c = hrms.shape 66 | m, n, C = lrhs.shape 67 | 68 | stride = 8 69 | training_size=32#training patch size 70 | testing_size=400#testing patch size 71 | reconstructing_size=320#reconstructing patch size 72 | left_pad = (testing_size-reconstructing_size)//2 73 | 74 | 75 | ''' 76 | testing 77 | --------------- 78 | | rec | 79 | | ------- | 80 | | | | | 81 | | | | | 82 | | ------- | 83 | | | 84 | --------------- 85 | |pad| 86 | 87 | ''' 88 | 89 | ratio = int(np.round(M/m)) 90 | 91 | print('get sharpening ratio: ', ratio) 92 | assert int(np.round(M/m)) == int(np.round(N/n)) 93 | 94 | train_hrhs_all = [] 95 | train_hrms_all = [] 96 | train_lrhs_all = [] 97 | 98 | used_hrhs = lrhs 99 | used_lrhs = lrhs 100 | 101 | used_lrhs, used_hrms = downgrade_images(used_lrhs, hrms, ratio, sensor=sensor) 102 | 103 | print(used_lrhs.shape, used_hrms.shape) 104 | 105 | used_lrhs = upsample_interp23(used_lrhs, ratio) 106 | 107 | """crop images""" 108 | print('croping images...') 109 | 110 | for j in range(0, used_hrms.shape[0]-training_size, stride): 111 | for k in range(0, used_hrms.shape[1]-training_size, stride): 112 | 113 | temp_hrhs = used_hrhs[j:j+training_size, k:k+training_size, :] 114 | temp_hrms = used_hrms[j:j+training_size, k:k+training_size, :] 115 | temp_lrhs = used_lrhs[j:j+training_size, k:k+training_size, :] 116 | 117 | train_hrhs_all.append(temp_hrhs) 118 | train_hrms_all.append(temp_hrms) 119 | train_lrhs_all.append(temp_lrhs) 120 | 121 | train_hrhs_all = np.array(train_hrhs_all, dtype='float16') 122 | train_hrms_all = np.array(train_hrms_all, dtype='float16') 123 | train_lrhs_all = np.array(train_lrhs_all, dtype='float16') 124 | 125 | index = [i for i in range(train_hrhs_all.shape[0])] 126 | # random.seed(2020) 127 | random.shuffle(index) 128 | train_hrhs = train_hrhs_all[index, :, :, :] 129 | train_hrms= train_hrms_all[index, :, :, :] 130 | train_lrhs = train_lrhs_all[index, :, :, :] 131 | 132 | print(train_hrhs.shape, train_hrms.shape, train_lrhs.shape) 133 | 134 | """train net""" 135 | print('training...') 136 | 137 | def lr_schedule(epoch): 138 | """Learning Rate Schedule 139 | 140 | # Arguments 141 | epoch (int): The number of epochs 142 | 143 | # Returns 144 | lr (float32): learning rate 145 | """ 146 | lr = 5e-4 147 | if epoch > 40: 148 | lr *= 1e-2 149 | elif epoch > 20: 150 | lr *= 1e-1 151 | return lr 152 | 153 | lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1) 154 | checkpoint = ModelCheckpoint(filepath='./weights/PNN_model.h5', 155 | monitor='val_psnr', 156 | mode='max', 157 | verbose=1, 158 | save_best_only=True) 159 | callbacks = [lr_scheduler, checkpoint] 160 | 161 | model = pnn_net(lrhs_size=(training_size, training_size, C), hrms_size=(training_size, training_size, c)) 162 | 163 | model.fit( x=[train_lrhs, train_hrms], 164 | y=train_hrhs, 165 | validation_split=0.33, 166 | batch_size=32, 167 | epochs=50, 168 | verbose=1, 169 | callbacks=callbacks) 170 | 171 | model = pnn_net(lrhs_size=(testing_size, testing_size, C), hrms_size=(testing_size, testing_size, c)) 172 | 173 | model.load_weights('./weights/PNN_model.h5') 174 | 175 | """eval""" 176 | print('evaling...') 177 | 178 | new_M = min(M, m*ratio) 179 | new_N = min(N, n*ratio) 180 | 181 | print('output image size:', new_M, new_N) 182 | 183 | test_label = np.zeros((new_M, new_N, C), dtype = 'uint8') 184 | 185 | used_lrhs = lrhs[:new_M//ratio, :new_N//ratio, :] 186 | used_hrms = hrms[:new_M, :new_N, :] 187 | 188 | used_lrhs = upsample_interp23(used_lrhs, ratio) 189 | 190 | used_lrhs = np.expand_dims(used_lrhs, 0) 191 | used_hrms = np.expand_dims(used_hrms, 0) 192 | 193 | used_lrhs = np.pad(used_lrhs, ((0, 0), (left_pad, testing_size), (left_pad, testing_size), (0, 0)), mode='symmetric') 194 | used_hrms = np.pad(used_hrms, ((0, 0), (left_pad, testing_size), (left_pad, testing_size), (0, 0)), mode='symmetric') 195 | 196 | for h in tqdm(range(0, new_M, reconstructing_size)): 197 | for w in range(0, new_N, reconstructing_size): 198 | temp_lrhs = used_lrhs[:, h:h+testing_size, w:w+testing_size, :] 199 | temp_hrms = used_hrms[:, h:h+testing_size, w:w+testing_size, :] 200 | 201 | fake = model.predict([temp_lrhs, temp_hrms]) 202 | fake = np.clip(fake, 0, 1) 203 | fake.shape=(testing_size, testing_size, C) 204 | fake = fake[left_pad:(testing_size-left_pad), left_pad:(testing_size-left_pad)] 205 | fake = np.uint8(fake*255) 206 | 207 | if h+testing_size>new_M: 208 | fake = fake[:new_M-h, :, :] 209 | 210 | if w+testing_size>new_N: 211 | fake = fake[:, :new_N-w, :] 212 | 213 | test_label[h:h+reconstructing_size, w:w+reconstructing_size]=fake 214 | 215 | K.clear_session() 216 | gc.collect() 217 | del model 218 | 219 | return np.uint8(test_label) -------------------------------------------------------------------------------- /methods/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import cv2 9 | import numpy as np 10 | from scipy import ndimage 11 | from scipy import signal 12 | import scipy.misc as misc 13 | 14 | def upsample_bilinear(image, ratio): 15 | 16 | h,w,c = image.shape 17 | re_image = cv2.resize(image, (w*ratio, h*ratio), cv2.INTER_LINEAR) 18 | 19 | return re_image 20 | 21 | def upsample_bicubic(image, ratio): 22 | 23 | h,w,c = image.shape 24 | re_image = cv2.resize(image, (w*ratio, h*ratio), cv2.INTER_CUBIC) 25 | 26 | return re_image 27 | 28 | def upsample_interp23(image, ratio): 29 | 30 | image = np.transpose(image, (2, 0, 1)) 31 | 32 | b,r,c = image.shape 33 | 34 | CDF23 = 2*np.array([0.5, 0.305334091185, 0, -0.072698593239, 0, 0.021809577942, 0, -0.005192756653, 0, 0.000807762146, 0, -0.000060081482]) 35 | d = CDF23[::-1] 36 | CDF23 = np.insert(CDF23, 0, d[:-1]) 37 | BaseCoeff = CDF23 38 | 39 | first = 1 40 | for z in range(1,np.int(np.log2(ratio))+1): 41 | I1LRU = np.zeros((b, 2**z*r, 2**z*c)) 42 | if first: 43 | I1LRU[:, 1:I1LRU.shape[1]:2, 1:I1LRU.shape[2]:2]=image 44 | first = 0 45 | else: 46 | I1LRU[:,0:I1LRU.shape[1]:2,0:I1LRU.shape[2]:2]=image 47 | 48 | for ii in range(0,b): 49 | t = I1LRU[ii,:,:] 50 | for j in range(0,t.shape[0]): 51 | t[j,:]=ndimage.correlate(t[j,:],BaseCoeff,mode='wrap') 52 | for k in range(0,t.shape[1]): 53 | t[:,k]=ndimage.correlate(t[:,k],BaseCoeff,mode='wrap') 54 | I1LRU[ii,:,:]=t 55 | image = I1LRU 56 | 57 | re_image=np.transpose(I1LRU, (1, 2, 0)) 58 | 59 | return re_image 60 | 61 | def upsample_mat_interp23(image, ratio=4): 62 | 63 | '''2 pixel shift compare with original matlab version''' 64 | 65 | shift=2 66 | 67 | h,w,c = image.shape 68 | 69 | basecoeff = np.array([[-4.63495665e-03, -3.63442646e-03, 3.84904063e-18, 70 | 5.76678319e-03, 1.08358664e-02, 1.01980790e-02, 71 | -9.31747402e-18, -1.75033181e-02, -3.17660068e-02, 72 | -2.84531643e-02, 1.85181518e-17, 4.42450253e-02, 73 | 7.71733386e-02, 6.70554910e-02, -2.85299239e-17, 74 | -1.01548683e-01, -1.78708388e-01, -1.60004642e-01, 75 | 3.61741232e-17, 2.87940558e-01, 6.25431459e-01, 76 | 8.97067600e-01, 1.00107877e+00, 8.97067600e-01, 77 | 6.25431459e-01, 2.87940558e-01, 3.61741232e-17, 78 | -1.60004642e-01, -1.78708388e-01, -1.01548683e-01, 79 | -2.85299239e-17, 6.70554910e-02, 7.71733386e-02, 80 | 4.42450253e-02, 1.85181518e-17, -2.84531643e-02, 81 | -3.17660068e-02, -1.75033181e-02, -9.31747402e-18, 82 | 1.01980790e-02, 1.08358664e-02, 5.76678319e-03, 83 | 3.84904063e-18, -3.63442646e-03, -4.63495665e-03]]) 84 | 85 | coeff = np.dot(basecoeff.T, basecoeff) 86 | 87 | I1LRU = np.zeros((ratio*h, ratio*w, c)) 88 | 89 | I1LRU[shift::ratio, shift::ratio, :]=image 90 | 91 | for i in range(c): 92 | temp = I1LRU[:, :, i] 93 | temp = ndimage.convolve(temp, coeff, mode='wrap') 94 | I1LRU[:, :, i]=temp 95 | 96 | return I1LRU 97 | 98 | def gaussian2d (N, std): 99 | 100 | t=np.arange(-(N-1)/2,(N+2)/2) 101 | t1,t2=np.meshgrid(t,t) 102 | std=np.double(std) 103 | w = np.exp(-0.5*(t1/std)**2)*np.exp(-0.5*(t2/std)**2) 104 | return w 105 | 106 | def kaiser2d (N, beta): 107 | 108 | t=np.arange(-(N-1)/2,(N+2)/2)/np.double(N-1) 109 | t1,t2=np.meshgrid(t,t) 110 | t12=np.sqrt(t1*t1+t2*t2) 111 | w1=np.kaiser(N,beta) 112 | w=np.interp(t12,t,w1) 113 | w[t12>t[-1]]=0 114 | w[t12 40: 216 | lr *= 1e-2 217 | elif epoch > 20: 218 | lr *= 1e-1 219 | return lr 220 | 221 | lr_scheduler = LearningRateScheduler(lr_schedule, verbose=1) 222 | checkpoint = ModelCheckpoint(filepath='./weights/PANNET_model.h5', 223 | monitor='val_psnr', 224 | mode='max', 225 | verbose=1, 226 | save_best_only=True) 227 | callbacks = [lr_scheduler, checkpoint] 228 | 229 | model = pannet(lrhs_size=(int(training_size/ratio), int(training_size/ratio), C), hrms_size=(training_size, training_size, c)) 230 | 231 | model.fit( x=[train_lrhs, train_hrms], 232 | y=train_hrhs, 233 | validation_split=0.1, 234 | batch_size=32, 235 | epochs=50, 236 | verbose=1, 237 | callbacks=callbacks) 238 | 239 | model = pannet(lrhs_size=(int(testing_size/ratio), int(testing_size/ratio), C), hrms_size=(testing_size, testing_size, c)) 240 | 241 | model.load_weights('./weights/PANNET_model.h5') 242 | 243 | """eval""" 244 | print('evaling...') 245 | 246 | used_lrhs = np.expand_dims(lrhs, 0) 247 | used_hrms = np.expand_dims(hrms, 0) 248 | 249 | new_M = min(M, m*ratio) 250 | new_N = min(N, n*ratio) 251 | 252 | print('output image size:', new_M, new_N) 253 | 254 | test_label = np.zeros((new_M, new_N, C), dtype = 'uint8') 255 | 256 | used_lrhs = used_lrhs[:, :new_M//ratio, :new_N//ratio, :] 257 | used_hrms = used_hrms[:, :new_M, :new_N, :] 258 | 259 | used_lrhs = np.pad(used_lrhs, ((0, 0), (left_pad//ratio, testing_size//ratio), (left_pad//ratio, testing_size//ratio), (0, 0)), mode='symmetric') 260 | used_hrms = np.pad(used_hrms, ((0, 0), (left_pad, testing_size), (left_pad, testing_size), (0, 0)), mode='symmetric') 261 | 262 | for h in tqdm(range(0, new_M, reconstructing_size)): 263 | for w in range(0, new_N, reconstructing_size): 264 | temp_lrhs = used_lrhs[:,int(h/ratio):int((h+testing_size)/ratio), int(w/ratio):int((w+testing_size)/ratio), :] 265 | temp_hrms = used_hrms[:, h:h+testing_size, w:w+testing_size, :] 266 | 267 | fake = model.predict([temp_lrhs, temp_hrms]) 268 | fake = np.clip(fake, 0, 1) 269 | fake.shape=(testing_size, testing_size, C) 270 | fake = fake[left_pad:(testing_size-left_pad), left_pad:(testing_size-left_pad)] 271 | fake = np.uint8(fake*255) 272 | 273 | if h+testing_size>new_M: 274 | fake = fake[:new_M-h, :, :] 275 | 276 | if w+testing_size>new_N: 277 | fake = fake[:, :new_N-w, :] 278 | 279 | test_label[h:h+reconstructing_size, w:w+reconstructing_size]=fake 280 | 281 | # K.clear_session() 282 | # gc.collect() 283 | # del model 284 | 285 | return np.uint8(test_label) -------------------------------------------------------------------------------- /methods/x.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import numpy as np 9 | import cv2 10 | import os 11 | from scipy import signal 12 | from PIL import Image 13 | import torch 14 | from methods.Bicubic import Bicubic 15 | from methods.Brovey import Brovey 16 | from methods.PCA import PCA 17 | from methods.IHS import IHS 18 | from methods.SFIM import SFIM 19 | from methods.GS import GS 20 | from methods.Wavelet import Wavelet 21 | from methods.MTF_GLP import MTF_GLP 22 | from methods.MTF_GLP_HPM import MTF_GLP_HPM 23 | from methods.GSA import GSA 24 | from methods.CNMF import CNMF 25 | from methods.GFPCA import GFPCA 26 | 27 | from metrics import ref_evaluate, no_ref_evaluate 28 | 29 | '''loading data''' 30 | 31 | ms_path = r'E:/data/test/wv/lrms/151.tif' 32 | pan_path = r'E:/data/test/wv/pan/151.tif' 33 | gt_path = r'E:/data/test/wv/hrms/151.tif' 34 | save_dir = r'E:/data/test/wv/wv-result/' 35 | 36 | '''setting save parameters''' 37 | save_images = True 38 | save_channels = [0, 1, 2] # BGR-NIR for GF2 39 | 40 | if save_images and (not os.path.isdir(save_dir)): 41 | os.makedirs(save_dir) 42 | 43 | 44 | def save_img(img, img_name, mode): 45 | img = torch.tensor(img) 46 | save_img = img.squeeze().clamp(0, 1).numpy() 47 | # save img 48 | save_fn = save_dir + '/' + img_name 49 | save_img = np.uint8(save_img * 255).astype('uint8') 50 | save_img = Image.fromarray(save_img, mode) 51 | save_img.save(save_fn) 52 | 53 | 54 | original_ms = np.array(Image.open(ms_path), dtype=np.float32) 55 | original_pan = np.expand_dims(np.array(Image.open(pan_path), dtype=np.float32), -1) 56 | original_gt = np.array(Image.open(gt_path), dtype=np.float32) 57 | 58 | print('original ms', original_ms.shape) 59 | print('original pan', original_pan.shape) 60 | print('original gt', original_gt.shape) 61 | 62 | '''normalization''' 63 | used_ms = original_ms / 255. 64 | used_pan = original_pan / 255. 65 | gt = original_gt / 255. 66 | 67 | 68 | gt = np.uint8(gt * 255).astype('uint8') 69 | print('lrms shape: ', used_ms.shape, 'pan shape: ', used_pan.shape) 70 | 71 | '''evaluating all methods''' 72 | ref_results = {} 73 | ref_results.update({'metrics: ': ' PSNR, SSIM, SAM, ERGAS, SCC, Q'}) 74 | no_ref_results = {} 75 | no_ref_results.update({'metrics: ': ' D_lamda, D_s, QNR'}) 76 | 77 | '''Bicubic method''' 78 | print('evaluating Bicubic method') 79 | fused_image = Bicubic(used_pan, used_ms) 80 | fused_image_uint8 = np.uint8(fused_image * 255) 81 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 82 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255).astype('uint8'), 83 | np.uint8(used_ms * 255).astype('uint8')) 84 | ref_results.update({'Bicubic ': temp_ref_results}) 85 | no_ref_results.update({'Bicubic ': temp_no_ref_results}) 86 | print('Bicubic ', fused_image.shape, fused_image.max(), fused_image.min()) 87 | # save 88 | if save_images: 89 | save_img(fused_image, 'bicubic.tif', mode='CMYK') 90 | 91 | '''Brovey method''' 92 | print('evaluating Brovey method') 93 | fused_image = Brovey(used_pan[:, :, :], used_ms[:, :, :]) 94 | fused_image_uint8 = np.uint8(fused_image * 255) 95 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 96 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255).astype('uint8'), 97 | np.uint8(used_ms * 255).astype('uint8')) 98 | ref_results.update({'Brovey ': temp_ref_results}) 99 | no_ref_results.update({'Brovey ': temp_no_ref_results}) 100 | print('Brovey ', fused_image.shape, fused_image.max(), fused_image.min()) 101 | # save 102 | if save_images: 103 | save_img(fused_image, 'Brovey.tif', mode='CMYK') 104 | 105 | '''PCA method''' 106 | print('evaluating PCA method') 107 | fused_image = PCA(used_pan[:, :, :], used_ms[:, :, :]) 108 | fused_image_uint8 = np.uint8(fused_image * 255) 109 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 110 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255).astype('uint8'), 111 | np.uint8(used_ms * 255).astype('uint8')) 112 | ref_results.update({'PCA ': temp_ref_results}) 113 | no_ref_results.update({'PCA ': temp_no_ref_results}) 114 | print('PCA ', fused_image.shape, fused_image.max(), fused_image.min()) 115 | # save 116 | if save_images: 117 | save_img(fused_image, 'PCA.tif', mode='CMYK') 118 | 119 | '''IHS method''' 120 | print('evaluating IHS method') 121 | fused_image = IHS(used_pan[:, :, :], used_ms[:, :, :]) 122 | fused_image_uint8 = np.uint8(fused_image * 255) 123 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 124 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255).astype('uint8'), 125 | np.uint8(used_ms * 255).astype('uint8')) 126 | ref_results.update({'IHS ': temp_ref_results}) 127 | no_ref_results.update({'IHS ': temp_no_ref_results}) 128 | print('IHS ', fused_image.shape, fused_image.max(), fused_image.min()) 129 | # save 130 | if save_images: 131 | save_img(fused_image, 'IHS.tif', mode='CMYK') 132 | 133 | '''SFIM method''' 134 | print('evaluating SFIM method') 135 | fused_image = SFIM(used_pan[:, :, :], used_ms[:, :, :]) 136 | fused_image_uint8 = np.uint8(fused_image * 255) 137 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 138 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255).astype('uint8'), 139 | np.uint8(used_ms * 255).astype('uint8')) 140 | ref_results.update({'SFIM ': temp_ref_results}) 141 | no_ref_results.update({'SFIM ': temp_no_ref_results}) 142 | print('SFIM ', fused_image.shape, fused_image.max(), fused_image.min()) 143 | # save 144 | if save_images: 145 | save_img(fused_image, 'SFIM.tif', mode='CMYK') 146 | 147 | '''GS method''' 148 | print('evaluating GS method') 149 | fused_image = GS(used_pan[:, :, :], used_ms[:, :, :]) 150 | fused_image_uint8 = np.uint8(fused_image * 255) 151 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 152 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 153 | ref_results.update({'GS ': temp_ref_results}) 154 | no_ref_results.update({'GS ': temp_no_ref_results}) 155 | print('GS ', fused_image.shape, fused_image.max(), fused_image.min()) 156 | # save 157 | if save_images: 158 | save_img(fused_image, 'GS.tif', mode='CMYK') 159 | 160 | '''Wavelet method''' 161 | print('evaluating Wavelet method') 162 | fused_image = Wavelet(used_pan[:, :, :], used_ms[:, :, :]) 163 | fused_image_uint8 = np.uint8(fused_image * 255) 164 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 165 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 166 | ref_results.update({'Wavelet ': temp_ref_results}) 167 | no_ref_results.update({'Wavelet ': temp_no_ref_results}) 168 | print('Wavelet ', fused_image.shape, fused_image.max(), fused_image.min()) 169 | # save 170 | if save_images: 171 | save_img(fused_image, 'Wavelet.tif', mode='CMYK') 172 | 173 | '''MTF_GLP method''' 174 | print('evaluating MTF_GLP method') 175 | fused_image = MTF_GLP(used_pan[:, :, :], used_ms[:, :, :]) 176 | fused_image_uint8 = np.uint8(fused_image * 255) 177 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 178 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 179 | ref_results.update({'MTF_GLP ': temp_ref_results}) 180 | no_ref_results.update({'MTF_GLP ': temp_no_ref_results}) 181 | print('MTF_GLP ', fused_image.shape, fused_image.max(), fused_image.min()) 182 | # save 183 | if save_images: 184 | save_img(fused_image, 'MTF_GLP.tif', mode='CMYK') 185 | 186 | '''MTF_GLP_HPM method''' 187 | print('evaluating MTF_GLP_HPM method') 188 | fused_image = MTF_GLP_HPM(used_pan[:, :, :], used_ms[:, :, :]) 189 | fused_image_uint8 = np.uint8(fused_image * 255) 190 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 191 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 192 | ref_results.update({'MTF_GLP_HPM': temp_ref_results}) 193 | no_ref_results.update({'MTF_GLP_HPM': temp_no_ref_results}) 194 | 195 | print('MTF_GLP_HPM ', fused_image.shape, fused_image.max(), fused_image.min()) 196 | # save 197 | if save_images: 198 | save_img(fused_image, 'MTF_GLP_HPM.tif', mode='CMYK') 199 | 200 | '''GSA method''' 201 | print('evaluating GSA method') 202 | fused_image = GSA(used_pan[:, :, :], used_ms[:, :, :]) 203 | fused_image_uint8 = np.uint8(fused_image * 255) 204 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 205 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 206 | ref_results.update({'GSA ': temp_ref_results}) 207 | no_ref_results.update({'GSA ': temp_no_ref_results}) 208 | 209 | print('GSA ', fused_image.shape, fused_image.max(), fused_image.min()) 210 | # save 211 | if save_images: 212 | save_img(fused_image, 'GSA.tif', mode='CMYK') 213 | 214 | '''CNMF method''' 215 | print('evaluating CNMF method') 216 | fused_image = CNMF(used_pan[:, :, :], used_ms[:, :, :]) 217 | fused_image_uint8 = np.uint8(fused_image * 255) 218 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 219 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 220 | ref_results.update({'CNMF ': temp_ref_results}) 221 | no_ref_results.update({'CNMF ': temp_no_ref_results}) 222 | 223 | print('CNMF ', fused_image.shape, fused_image.max(), fused_image.min()) 224 | # save 225 | if save_images: 226 | save_img(fused_image, 'CNMF.tif', mode='CMYK') 227 | 228 | '''GFPCA method''' 229 | print('evaluating GFPCA method') 230 | fused_image = GFPCA(used_pan[:, :, :], used_ms[:, :, :]) 231 | fused_image_uint8 = np.uint8(fused_image * 255) 232 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 233 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan * 255), np.uint8(used_ms * 255)) 234 | ref_results.update({'GFPCA ': temp_ref_results}) 235 | no_ref_results.update({'GFPCA ': temp_no_ref_results}) 236 | 237 | print('GFPCA ', fused_image.shape, fused_image.max(), fused_image.min()) 238 | # save 239 | if save_images: 240 | save_img(fused_image, 'GFPCA.tif', mode='CMYK') 241 | 242 | ''''print result''' 243 | print('################## reference comparision #######################') 244 | for index, i in enumerate(ref_results): 245 | if index == 0: 246 | print(i, ref_results[i]) 247 | else: 248 | print(i, [round(j, 4) for j in ref_results[i]]) 249 | print('################## reference comparision #######################') 250 | 251 | print() 252 | print() 253 | print() 254 | 255 | print('################## no reference comparision ####################') 256 | for index, i in enumerate(no_ref_results): 257 | if index == 0: 258 | print(i, no_ref_results[i]) 259 | else: 260 | print(i, [round(j, 4) for j in no_ref_results[i]]) 261 | print('################## no reference comparision ####################') 262 | 263 | 264 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /methods/demo_all_methods.py-v2: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import numpy as np 9 | import cv2 10 | import os 11 | from scipy import signal 12 | from PIL import Image 13 | import torch 14 | from methods.Bicubic import Bicubic 15 | from methods.Brovey import Brovey 16 | from methods.PCA import PCA 17 | from methods.IHS import IHS 18 | from methods.SFIM import SFIM 19 | from methods.GS import GS 20 | from methods.Wavelet import Wavelet 21 | from methods.MTF_GLP import MTF_GLP 22 | from methods.MTF_GLP_HPM import MTF_GLP_HPM 23 | from methods.GSA import GSA 24 | from methods.CNMF import CNMF 25 | from methods.GFPCA import GFPCA 26 | 27 | from metrics import ref_evaluate, no_ref_evaluate 28 | 29 | '''loading data''' 30 | 31 | ms_path = r'E:/data/test/wv/lrms/151.tif' 32 | pan_path = r'E:/data/test/wv/pan/151.tif' 33 | gt_path = r'E:/data/test/wv/hrms/151.tif' 34 | save_dir= r'E:/data/test/wv/wv-result/' 35 | 36 | '''setting save parameters''' 37 | save_images = True 38 | save_channels = [0, 1, 2] # BGR-NIR for GF2 39 | 40 | if save_images and (not os.path.isdir(save_dir)): 41 | os.makedirs(save_dir) 42 | 43 | def save_img(img, img_name, mode): 44 | img = torch.tensor(img) 45 | save_img = img.squeeze().clamp(0, 1).numpy() 46 | # save img 47 | save_fn = save_dir + '/' + img_name 48 | save_img = np.uint8(save_img * 255).astype('uint8') 49 | save_img = Image.fromarray(save_img, mode) 50 | save_img.save(save_fn) 51 | 52 | original_ms = np.array(Image.open(ms_path), dtype=np.float32) 53 | original_pan = np.expand_dims(np.array(Image.open(pan_path), dtype=np.float32), -1) 54 | original_gt = np.array(Image.open(gt_path), dtype=np.float32) 55 | 56 | print('original ms', original_ms.shape) 57 | print('original pan', original_pan.shape) 58 | print('original gt', original_gt.shape) 59 | 60 | 61 | '''normalization''' 62 | # used_ms = original_ms / 255. 63 | # used_pan = original_pan / 255. 64 | # gt = original_gt / 255. 65 | 66 | max_patch, min_patch = np.max(original_ms, axis=(0,1)), np.min(original_ms, axis=(0,1)) 67 | original_msi = np.float32(original_ms-min_patch) / (max_patch - min_patch) 68 | 69 | max_patch, min_patch = np.max(original_pan, axis=(0,1)), np.min(original_pan, axis=(0,1)) 70 | original_pan = np.float32(original_pan-min_patch) / (max_patch - min_patch) 71 | 72 | '''generating ms image with gaussian kernel''' 73 | sig = (1/(2*(2.772587)/4**2))**0.5 74 | kernel = np.multiply(cv2.getGaussianKernel(9, sig), cv2.getGaussianKernel(9,sig).T) 75 | new_lrhs = [] 76 | for i in range(original_msi.shape[-1]): 77 | temp = signal.convolve2d(original_msi[:,:, i], kernel, boundary='wrap',mode='same') 78 | temp = np.expand_dims(temp, -1) 79 | new_lrhs.append(temp) 80 | new_lrhs = np.concatenate(new_lrhs, axis=-1) 81 | used_ms = new_lrhs[0::4, 0::4, :] 82 | 83 | #'''generating ms image with bicubic interpolation''' 84 | #used_ms = cv2.resize(original_msi, (original_msi.shape[1]//4, original_msi.shape[0]//4), cv2.INTER_CUBIC) 85 | 86 | '''generating pan image with gaussian kernel''' 87 | used_pan = signal.convolve2d(original_pan, kernel, boundary='wrap',mode='same') 88 | used_pan = np.expand_dims(used_pan, -1) 89 | used_pan = used_pan[0::4, 0::4, :] 90 | 91 | gt = np.uint8(gt*255).astype('uint8') 92 | print('lrms shape: ', used_ms.shape, 'pan shape: ', used_pan.shape) 93 | 94 | 95 | '''evaluating all methods''' 96 | ref_results={} 97 | ref_results.update({'metrics: ':' PSNR, SSIM, SAM, ERGAS, SCC, Q'}) 98 | no_ref_results={} 99 | no_ref_results.update({'metrics: ':' D_lamda, D_s, QNR'}) 100 | 101 | '''Bicubic method''' 102 | print('evaluating Bicubic method') 103 | fused_image = Bicubic(used_pan, used_ms) 104 | fused_image_uint8 = np.uint8(fused_image*255) 105 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 106 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 107 | ref_results.update({'Bicubic ':temp_ref_results}) 108 | no_ref_results.update({'Bicubic ':temp_no_ref_results}) 109 | print('Bicubic ', fused_image.shape, fused_image.max(), fused_image.min()) 110 | #save 111 | if save_images: 112 | save_img(fused_image, 'bicubic.tif', mode='CMYK') 113 | 114 | '''Brovey method''' 115 | print('evaluating Brovey method') 116 | fused_image = Brovey(used_pan[:, :, :], used_ms[:, :, :]) 117 | fused_image_uint8 = np.uint8(fused_image*255) 118 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 119 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 120 | ref_results.update({'Brovey ':temp_ref_results}) 121 | no_ref_results.update({'Brovey ':temp_no_ref_results}) 122 | print('Brovey ', fused_image.shape, fused_image.max(), fused_image.min()) 123 | #save 124 | if save_images: 125 | save_img(fused_image, 'Brovey.tif', mode='CMYK') 126 | 127 | 128 | 129 | '''PCA method''' 130 | print('evaluating PCA method') 131 | fused_image = PCA(used_pan[:, :, :], used_ms[:, :, :]) 132 | fused_image_uint8 = np.uint8(fused_image*255) 133 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 134 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 135 | ref_results.update({'PCA ':temp_ref_results}) 136 | no_ref_results.update({'PCA ':temp_no_ref_results}) 137 | print('PCA ', fused_image.shape, fused_image.max(), fused_image.min()) 138 | #save 139 | if save_images: 140 | save_img(fused_image, 'PCA.tif', mode='CMYK') 141 | 142 | 143 | 144 | '''IHS method''' 145 | print('evaluating IHS method') 146 | fused_image = IHS(used_pan[:, :, :], used_ms[:, :, :]) 147 | fused_image_uint8 = np.uint8(fused_image*255) 148 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 149 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 150 | ref_results.update({'IHS ':temp_ref_results}) 151 | no_ref_results.update({'IHS ':temp_no_ref_results}) 152 | print('IHS ', fused_image.shape, fused_image.max(), fused_image.min()) 153 | #save 154 | if save_images: 155 | save_img(fused_image, 'IHS.tif', mode='CMYK') 156 | 157 | 158 | 159 | '''SFIM method''' 160 | print('evaluating SFIM method') 161 | fused_image = SFIM(used_pan[:, :, :], used_ms[:, :, :]) 162 | fused_image_uint8 = np.uint8(fused_image*255) 163 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 164 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 165 | ref_results.update({'SFIM ':temp_ref_results}) 166 | no_ref_results.update({'SFIM ':temp_no_ref_results}) 167 | print('SFIM ', fused_image.shape, fused_image.max(), fused_image.min()) 168 | #save 169 | if save_images: 170 | save_img(fused_image, 'SFIM.tif', mode='CMYK') 171 | 172 | 173 | 174 | '''GS method''' 175 | print('evaluating GS method') 176 | fused_image = GS(used_pan[:, :, :], used_ms[:, :, :]) 177 | fused_image_uint8 = np.uint8(fused_image*255) 178 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 179 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 180 | ref_results.update({'GS ':temp_ref_results}) 181 | no_ref_results.update({'GS ':temp_no_ref_results}) 182 | print('GS ', fused_image.shape, fused_image.max(), fused_image.min()) 183 | #save 184 | if save_images: 185 | save_img(fused_image, 'GS.tif', mode='CMYK') 186 | 187 | '''Wavelet method''' 188 | print('evaluating Wavelet method') 189 | fused_image = Wavelet(used_pan[:, :, :], used_ms[:, :, :]) 190 | fused_image_uint8 = np.uint8(fused_image*255) 191 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 192 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 193 | ref_results.update({'Wavelet ':temp_ref_results}) 194 | no_ref_results.update({'Wavelet ':temp_no_ref_results}) 195 | print('Wavelet ', fused_image.shape, fused_image.max(), fused_image.min()) 196 | #save 197 | if save_images: 198 | save_img(fused_image, 'Wavelet.tif', mode='CMYK') 199 | 200 | 201 | '''MTF_GLP method''' 202 | print('evaluating MTF_GLP method') 203 | fused_image = MTF_GLP(used_pan[:, :, :], used_ms[:, :, :]) 204 | fused_image_uint8 = np.uint8(fused_image*255) 205 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 206 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 207 | ref_results.update({'MTF_GLP ':temp_ref_results}) 208 | no_ref_results.update({'MTF_GLP ':temp_no_ref_results}) 209 | print('MTF_GLP ', fused_image.shape, fused_image.max(), fused_image.min()) 210 | #save 211 | if save_images: 212 | save_img(fused_image, 'MTF_GLP.tif', mode='CMYK') 213 | 214 | 215 | 216 | '''MTF_GLP_HPM method''' 217 | print('evaluating MTF_GLP_HPM method') 218 | fused_image = MTF_GLP_HPM(used_pan[:, :, :], used_ms[:, :, :]) 219 | fused_image_uint8 = np.uint8(fused_image*255) 220 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 221 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 222 | ref_results.update({'MTF_GLP_HPM':temp_ref_results}) 223 | no_ref_results.update({'MTF_GLP_HPM':temp_no_ref_results}) 224 | 225 | print('MTF_GLP_HPM ', fused_image.shape, fused_image.max(), fused_image.min()) 226 | #save 227 | if save_images: 228 | save_img(fused_image, 'MTF_GLP_HPM.tif', mode='CMYK') 229 | 230 | 231 | 232 | '''GSA method''' 233 | print('evaluating GSA method') 234 | fused_image = GSA(used_pan[:, :, :], used_ms[:, :, :]) 235 | fused_image_uint8 = np.uint8(fused_image*255) 236 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 237 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 238 | ref_results.update({'GSA ':temp_ref_results}) 239 | no_ref_results.update({'GSA ':temp_no_ref_results}) 240 | 241 | print('GSA ', fused_image.shape, fused_image.max(), fused_image.min()) 242 | #save 243 | if save_images: 244 | save_img(fused_image, 'GSA.tif', mode='CMYK') 245 | 246 | 247 | '''CNMF method''' 248 | print('evaluating CNMF method') 249 | fused_image = CNMF(used_pan[:, :, :], used_ms[:, :, :]) 250 | fused_image_uint8 = np.uint8(fused_image*255) 251 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 252 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 253 | ref_results.update({'CNMF ':temp_ref_results}) 254 | no_ref_results.update({'CNMF ':temp_no_ref_results}) 255 | 256 | print('CNMF ', fused_image.shape, fused_image.max(), fused_image.min()) 257 | #save 258 | if save_images: 259 | save_img(fused_image, 'CNMF.tif', mode='CMYK') 260 | 261 | 262 | '''GFPCA method''' 263 | print('evaluating GFPCA method') 264 | fused_image = GFPCA(used_pan[:, :, :], used_ms[:, :, :]) 265 | fused_image_uint8 = np.uint8(fused_image*255) 266 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 267 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 268 | ref_results.update({'GFPCA ':temp_ref_results}) 269 | no_ref_results.update({'GFPCA ':temp_no_ref_results}) 270 | 271 | print('GFPCA ', fused_image.shape, fused_image.max(), fused_image.min()) 272 | #save 273 | if save_images: 274 | save_img(fused_image, 'GFPCA.tif', mode='CMYK') 275 | 276 | 277 | 278 | ''''print result''' 279 | print('################## reference comparision #######################') 280 | for index, i in enumerate(ref_results): 281 | if index == 0: 282 | print(i, ref_results[i]) 283 | else: 284 | print(i, [round(j, 4) for j in ref_results[i]]) 285 | print('################## reference comparision #######################') 286 | 287 | print() 288 | print() 289 | print() 290 | 291 | 292 | print('################## no reference comparision ####################') 293 | for index, i in enumerate(no_ref_results): 294 | if index == 0: 295 | print(i, no_ref_results[i]) 296 | else: 297 | print(i, [round(j, 4) for j in no_ref_results[i]]) 298 | print('################## no reference comparision ####################') 299 | 300 | 301 | -------------------------------------------------------------------------------- /methods/demo_all_methods-gf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import numpy as np 9 | import cv2 10 | import os 11 | from scipy import signal 12 | from PIL import Image 13 | import torch 14 | from methods.Bicubic import Bicubic 15 | from methods.Brovey import Brovey 16 | from methods.PCA import PCA 17 | from methods.IHS import IHS 18 | from methods.SFIM import SFIM 19 | from methods.GS import GS 20 | from methods.Wavelet import Wavelet 21 | from methods.MTF_GLP import MTF_GLP 22 | from methods.MTF_GLP_HPM import MTF_GLP_HPM 23 | from methods.GSA import GSA 24 | from methods.CNMF import CNMF 25 | from methods.GFPCA import GFPCA 26 | 27 | from metrics import ref_evaluate, no_ref_evaluate 28 | 29 | '''loading data''' 30 | 31 | ms_path = r'E:/data/test/gf/lrms/34.tif' 32 | pan_path = r'E:/data/test/gf/pan/34.tif' 33 | gt_path = r'E:/data/test/gf/hrms/34.tif' 34 | save_dir= r'E:/data/test/gf/gf-result/' 35 | pnn_path = r'E:/data/test/gf/gf-result/PNN.tif' 36 | rscnnca_path = r'E:/data/test/gf/gf-result/RSCNNCA.tif' 37 | 38 | '''setting save parameters''' 39 | save_images = True 40 | save_channels = [0, 1, 2] # BGR-NIR for GF2 41 | 42 | if save_images and (not os.path.isdir(save_dir)): 43 | os.makedirs(save_dir) 44 | 45 | def save_img(img, img_name, mode): 46 | img = torch.tensor(img) 47 | save_img = img.squeeze().clamp(0, 1).numpy() 48 | # save img 49 | save_fn = save_dir + '/' + img_name 50 | save_img = np.uint8(save_img * 255).astype('uint8') 51 | save_img = Image.fromarray(save_img, mode) 52 | save_img.save(save_fn) 53 | 54 | original_ms = np.array(Image.open(ms_path)) 55 | original_pan = np.expand_dims(np.array(Image.open(pan_path), dtype=np.float32), -1) 56 | original_gt = np.array(Image.open(gt_path)) 57 | 58 | print('original ms', original_ms.shape) 59 | print('original pan', original_pan.shape) 60 | print('original gt', original_gt.shape) 61 | 62 | 63 | '''normalization''' 64 | used_ms = original_ms / 255. 65 | used_pan = original_pan / 255. 66 | gt = original_gt / 255. 67 | 68 | # max_patch, min_patch = np.max(original_ms, axis=(0,1)), np.min(original_ms, axis=(0,1)) 69 | # original_msi = np.float32(original_ms-min_patch) / (max_patch - min_patch) 70 | # 71 | # max_patch, min_patch = np.max(original_pan, axis=(0,1)), np.min(original_pan, axis=(0,1)) 72 | # original_pan = np.float32(original_pan-min_patch) / (max_patch - min_patch) 73 | # original_pan = np.expand_dims(original_pan, -1) 74 | # 75 | # max_patch, min_patch = np.max(original_gt, axis=(0,1)), np.min(original_gt, axis=(0,1)) 76 | # original_gt = np.float32(original_gt-min_patch) / (max_patch - min_patch) 77 | 78 | gt = np.uint8(gt*255) 79 | 80 | # used_ms = original_msi 81 | # used_pan = original_pan 82 | 83 | 84 | 85 | 86 | print('lrms shape: ', used_ms.shape, 'pan shape: ', used_pan.shape, 'gt', gt.shape) 87 | 88 | '''evaluating all methods''' 89 | ref_results={} 90 | ref_results.update({'metrics: ':' PSNR, SSIM, SAM, ERGAS, SCC, Q'}) 91 | no_ref_results={} 92 | no_ref_results.update({'metrics: ':' D_lamda, D_s, QNR'}) 93 | 94 | '''Bicubic method''' 95 | print('evaluating Bicubic method') 96 | fused_image = Bicubic(used_pan, used_ms) 97 | fused_image_uint8 = np.uint8(fused_image*255) 98 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 99 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 100 | ref_results.update({'Bicubic ':temp_ref_results}) 101 | no_ref_results.update({'Bicubic ':temp_no_ref_results}) 102 | print('Bicubic ', fused_image.shape, fused_image.max(), fused_image.min()) 103 | #save 104 | if save_images: 105 | save_img(fused_image, 'bicubic.tif', mode='CMYK') 106 | 107 | '''Brovey method''' 108 | print('evaluating Brovey method') 109 | fused_image = Brovey(used_pan[:, :, :], used_ms[:, :, :]) 110 | fused_image_uint8 = np.uint8(fused_image*255) 111 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 112 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 113 | ref_results.update({'Brovey ':temp_ref_results}) 114 | no_ref_results.update({'Brovey ':temp_no_ref_results}) 115 | print('Brovey ', fused_image.shape, fused_image.max(), fused_image.min()) 116 | #save 117 | if save_images: 118 | save_img(fused_image, 'Brovey.tif', mode='CMYK') 119 | 120 | 121 | 122 | '''PCA method''' 123 | print('evaluating PCA method') 124 | fused_image = PCA(used_pan[:, :, :], used_ms[:, :, :]) 125 | fused_image_uint8 = np.uint8(fused_image*255) 126 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 127 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 128 | ref_results.update({'PCA ':temp_ref_results}) 129 | no_ref_results.update({'PCA ':temp_no_ref_results}) 130 | print('PCA ', fused_image.shape, fused_image.max(), fused_image.min()) 131 | #save 132 | if save_images: 133 | save_img(fused_image, 'PCA.tif', mode='CMYK') 134 | 135 | 136 | 137 | '''IHS method''' 138 | print('evaluating IHS method') 139 | fused_image = IHS(used_pan[:, :, :], used_ms[:, :, :]) 140 | fused_image_uint8 = np.uint8(fused_image*255) 141 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 142 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 143 | ref_results.update({'IHS ':temp_ref_results}) 144 | no_ref_results.update({'IHS ':temp_no_ref_results}) 145 | print('IHS ', fused_image.shape, fused_image.max(), fused_image.min()) 146 | #save 147 | if save_images: 148 | save_img(fused_image, 'IHS.tif', mode='CMYK') 149 | 150 | 151 | 152 | '''SFIM method''' 153 | print('evaluating SFIM method') 154 | fused_image = SFIM(used_pan[:, :, :], used_ms[:, :, :]) 155 | fused_image_uint8 = np.uint8(fused_image*255) 156 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 157 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 158 | ref_results.update({'SFIM ':temp_ref_results}) 159 | no_ref_results.update({'SFIM ':temp_no_ref_results}) 160 | print('SFIM ', fused_image.shape, fused_image.max(), fused_image.min()) 161 | #save 162 | if save_images: 163 | save_img(fused_image, 'SFIM.tif', mode='CMYK') 164 | 165 | 166 | 167 | '''GS method''' 168 | print('evaluating GS method') 169 | fused_image = GS(used_pan[:, :, :], used_ms[:, :, :]) 170 | fused_image_uint8 = np.uint8(fused_image*255) 171 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 172 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 173 | ref_results.update({'GS ':temp_ref_results}) 174 | no_ref_results.update({'GS ':temp_no_ref_results}) 175 | print('GS ', fused_image.shape, fused_image.max(), fused_image.min()) 176 | #save 177 | if save_images: 178 | save_img(fused_image, 'GS.tif', mode='CMYK') 179 | 180 | '''Wavelet method''' 181 | print('evaluating Wavelet method') 182 | fused_image = Wavelet(used_pan[:, :, :], used_ms[:, :, :]) 183 | fused_image_uint8 = np.uint8(fused_image*255) 184 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 185 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 186 | ref_results.update({'Wavelet ':temp_ref_results}) 187 | no_ref_results.update({'Wavelet ':temp_no_ref_results}) 188 | print('Wavelet ', fused_image.shape, fused_image.max(), fused_image.min()) 189 | #save 190 | if save_images: 191 | save_img(fused_image, 'Wavelet.tif', mode='CMYK') 192 | 193 | 194 | '''MTF_GLP method''' 195 | print('evaluating MTF_GLP method') 196 | fused_image = MTF_GLP(used_pan[:, :, :], used_ms[:, :, :]) 197 | fused_image_uint8 = np.uint8(fused_image*255) 198 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 199 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 200 | ref_results.update({'MTF_GLP ':temp_ref_results}) 201 | no_ref_results.update({'MTF_GLP ':temp_no_ref_results}) 202 | print('MTF_GLP ', fused_image.shape, fused_image.max(), fused_image.min()) 203 | #save 204 | if save_images: 205 | save_img(fused_image, 'MTF_GLP.tif', mode='CMYK') 206 | 207 | 208 | 209 | '''MTF_GLP_HPM method''' 210 | print('evaluating MTF_GLP_HPM method') 211 | fused_image = MTF_GLP_HPM(used_pan[:, :, :], used_ms[:, :, :]) 212 | fused_image_uint8 = np.uint8(fused_image*255) 213 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 214 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 215 | ref_results.update({'MTF_GLP_HPM':temp_ref_results}) 216 | no_ref_results.update({'MTF_GLP_HPM':temp_no_ref_results}) 217 | 218 | print('MTF_GLP_HPM ', fused_image.shape, fused_image.max(), fused_image.min()) 219 | #save 220 | if save_images: 221 | save_img(fused_image, 'MTF_GLP_HPM.tif', mode='CMYK') 222 | 223 | 224 | 225 | '''GSA method''' 226 | print('evaluating GSA method') 227 | fused_image = GSA(used_pan[:, :, :], used_ms[:, :, :]) 228 | fused_image_uint8 = np.uint8(fused_image*255) 229 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 230 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 231 | ref_results.update({'GSA ':temp_ref_results}) 232 | no_ref_results.update({'GSA ':temp_no_ref_results}) 233 | 234 | print('GSA ', fused_image.shape, fused_image.max(), fused_image.min()) 235 | #save 236 | if save_images: 237 | save_img(fused_image, 'GSA.tif', mode='CMYK') 238 | 239 | 240 | '''CNMF method''' 241 | print('evaluating CNMF method') 242 | fused_image = CNMF(used_pan[:, :, :], used_ms[:, :, :]) 243 | fused_image_uint8 = np.uint8(fused_image*255) 244 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 245 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 246 | ref_results.update({'CNMF ':temp_ref_results}) 247 | no_ref_results.update({'CNMF ':temp_no_ref_results}) 248 | 249 | print('CNMF ', fused_image.shape, fused_image.max(), fused_image.min()) 250 | #save 251 | if save_images: 252 | save_img(fused_image, 'CNMF.tif', mode='CMYK') 253 | 254 | 255 | '''GFPCA method''' 256 | print('evaluating GFPCA method') 257 | fused_image = GFPCA(used_pan[:, :, :], used_ms[:, :, :]) 258 | fused_image_uint8 = np.uint8(fused_image*255) 259 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 260 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 261 | ref_results.update({'GFPCA ':temp_ref_results}) 262 | no_ref_results.update({'GFPCA ':temp_no_ref_results}) 263 | 264 | print('GFPCA ', fused_image.shape, fused_image.max(), fused_image.min()) 265 | #save 266 | if save_images: 267 | save_img(fused_image, 'GFPCA.tif', mode='CMYK') 268 | 269 | '''PNN method''' 270 | print('evaluating PNN method') 271 | fused_image = np.array(Image.open(pnn_path)) 272 | temp_ref_results = ref_evaluate(fused_image, gt) 273 | temp_no_ref_results = no_ref_evaluate(fused_image, np.uint8(used_pan*255), np.uint8(used_ms*255)) 274 | ref_results.update({'PNN ':temp_ref_results}) 275 | no_ref_results.update({'PNN ':temp_no_ref_results}) 276 | 277 | 278 | '''RSCNNCA method''' 279 | print('evaluating RSCNNCA method') 280 | fused_image = np.array(Image.open(rscnnca_path)) 281 | temp_ref_results = ref_evaluate(fused_image, gt) 282 | temp_no_ref_results = no_ref_evaluate(fused_image, np.uint8(used_pan*255), np.uint8(used_ms*255)) 283 | ref_results.update({'RSCNNCA ':temp_ref_results}) 284 | no_ref_results.update({'RSCNNCA ':temp_no_ref_results}) 285 | 286 | filename = 'eval-gf.txt' 287 | with open(filename, 'w') as f: 288 | 289 | ''''print result''' 290 | print('################## reference comparision #######################') 291 | for index, i in enumerate(ref_results): # i=key 292 | if index == 0: 293 | print(i, ref_results[i]) 294 | f.write(i + ' ' + ref_results[i] + ' ') 295 | else: 296 | print(i, [round(j, 4) for j in ref_results[i]]) 297 | x = 0 298 | for j in no_ref_results[i]: 299 | if x == 0: 300 | f.write(i + ' ' + str(round(j, 4))) 301 | x += 1 302 | else: 303 | f.write(str(round(j, 4)) + ' ') 304 | f.write('\n') 305 | print('################## reference comparision #######################') 306 | 307 | print() 308 | print() 309 | print() 310 | 311 | 312 | 313 | 314 | print('################## no reference comparision ####################') 315 | for index, i in enumerate(no_ref_results): 316 | if index == 0: 317 | print(i, no_ref_results[i]) 318 | f.write(i + ' ' + no_ref_results[i] + ' ') 319 | else: 320 | print(i, [round(j, 4) for j in no_ref_results[i]]) 321 | x = 0 322 | for j in no_ref_results[i]: 323 | if x == 0: 324 | f.write(i + ' ' + str(round(j, 4))) 325 | x += 1 326 | else: 327 | f.write(str(round(j, 4)) + ' ') 328 | f.write('\n') 329 | print('################## no reference comparision ####################') 330 | 331 | # print(ref_results) 332 | # print(no_ref_results) 333 | 334 | print('write done...') 335 | 336 | -------------------------------------------------------------------------------- /methods/demo_all_methods.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: MIT 4 | @author: gaj 5 | E-mail: anjing_guo@hnu.edu.cn 6 | """ 7 | 8 | import numpy as np 9 | import cv2 10 | import os 11 | from scipy import signal 12 | from PIL import Image 13 | import torch 14 | from methods.Bicubic import Bicubic 15 | from methods.Brovey import Brovey 16 | from methods.PCA import PCA 17 | from methods.IHS import IHS 18 | from methods.SFIM import SFIM 19 | from methods.GS import GS 20 | from methods.Wavelet import Wavelet 21 | from methods.MTF_GLP import MTF_GLP 22 | from methods.MTF_GLP_HPM import MTF_GLP_HPM 23 | from methods.GSA import GSA 24 | from methods.CNMF import CNMF 25 | from methods.GFPCA import GFPCA 26 | 27 | from metrics import ref_evaluate, no_ref_evaluate 28 | 29 | '''loading data''' 30 | 31 | ms_path = r'E:/data/test/wv/lrms/151.tif' 32 | pan_path = r'E:/data/test/wv/pan/151.tif' 33 | gt_path = r'E:/data/test/wv/hrms/151.tif' 34 | save_dir= r'E:/data/test/wv/wv-result/' 35 | pnn_path = r'E:/data/test/wv/wv-result/PNN.tif' 36 | rscnnca_path = r'E:/data/test/wv/wv-result/RSCNNCA.tif' 37 | 38 | '''setting save parameters''' 39 | save_images = True 40 | save_channels = [0, 1, 2] # BGR-NIR for GF2 41 | 42 | if save_images and (not os.path.isdir(save_dir)): 43 | os.makedirs(save_dir) 44 | 45 | def save_img(img, img_name, mode): 46 | img = torch.tensor(img) 47 | save_img = img.squeeze().clamp(0, 1).numpy() 48 | # save img 49 | save_fn = save_dir + '/' + img_name 50 | save_img = np.uint8(save_img * 255).astype('uint8') 51 | save_img = Image.fromarray(save_img, mode) 52 | save_img.save(save_fn) 53 | 54 | original_ms = np.array(Image.open(ms_path)) 55 | original_pan = np.expand_dims(np.array(Image.open(pan_path), dtype=np.float32), -1) 56 | original_gt = np.array(Image.open(gt_path)) 57 | 58 | print('original ms', original_ms.shape) 59 | print('original pan', original_pan.shape) 60 | print('original gt', original_gt.shape) 61 | 62 | 63 | '''normalization''' 64 | used_ms = original_ms / 255. 65 | used_pan = original_pan / 255. 66 | gt = original_gt / 255. 67 | 68 | # max_patch, min_patch = np.max(original_ms, axis=(0,1)), np.min(original_ms, axis=(0,1)) 69 | # original_msi = np.float32(original_ms-min_patch) / (max_patch - min_patch) 70 | # 71 | # max_patch, min_patch = np.max(original_pan, axis=(0,1)), np.min(original_pan, axis=(0,1)) 72 | # original_pan = np.float32(original_pan-min_patch) / (max_patch - min_patch) 73 | # original_pan = np.expand_dims(original_pan, -1) 74 | # 75 | # max_patch, min_patch = np.max(original_gt, axis=(0,1)), np.min(original_gt, axis=(0,1)) 76 | # original_gt = np.float32(original_gt-min_patch) / (max_patch - min_patch) 77 | 78 | gt = np.uint8(gt*255) 79 | 80 | # used_ms = original_msi 81 | # used_pan = original_pan 82 | 83 | 84 | 85 | 86 | print('lrms shape: ', used_ms.shape, 'pan shape: ', used_pan.shape, 'gt', gt.shape) 87 | 88 | '''evaluating all methods''' 89 | ref_results={} 90 | ref_results.update({'metrics: ':' PSNR, SSIM, SAM, ERGAS, SCC, Q'}) 91 | no_ref_results={} 92 | no_ref_results.update({'metrics: ':' D_lamda, D_s, QNR'}) 93 | 94 | '''Bicubic method''' 95 | print('evaluating Bicubic method') 96 | fused_image = Bicubic(used_pan, used_ms) 97 | fused_image_uint8 = np.uint8(fused_image*255) 98 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 99 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 100 | ref_results.update({'Bicubic ':temp_ref_results}) 101 | no_ref_results.update({'Bicubic ':temp_no_ref_results}) 102 | print('Bicubic ', fused_image.shape, fused_image.max(), fused_image.min()) 103 | #save 104 | if save_images: 105 | save_img(fused_image, 'bicubic.tif', mode='CMYK') 106 | 107 | '''Brovey method''' 108 | print('evaluating Brovey method') 109 | fused_image = Brovey(used_pan[:, :, :], used_ms[:, :, :]) 110 | fused_image_uint8 = np.uint8(fused_image*255) 111 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 112 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 113 | ref_results.update({'Brovey ':temp_ref_results}) 114 | no_ref_results.update({'Brovey ':temp_no_ref_results}) 115 | print('Brovey ', fused_image.shape, fused_image.max(), fused_image.min()) 116 | #save 117 | if save_images: 118 | save_img(fused_image, 'Brovey.tif', mode='CMYK') 119 | 120 | 121 | 122 | '''PCA method''' 123 | print('evaluating PCA method') 124 | fused_image = PCA(used_pan[:, :, :], used_ms[:, :, :]) 125 | fused_image_uint8 = np.uint8(fused_image*255) 126 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 127 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 128 | ref_results.update({'PCA ':temp_ref_results}) 129 | no_ref_results.update({'PCA ':temp_no_ref_results}) 130 | print('PCA ', fused_image.shape, fused_image.max(), fused_image.min()) 131 | #save 132 | if save_images: 133 | save_img(fused_image, 'PCA.tif', mode='CMYK') 134 | 135 | 136 | 137 | '''IHS method''' 138 | print('evaluating IHS method') 139 | fused_image = IHS(used_pan[:, :, :], used_ms[:, :, :]) 140 | fused_image_uint8 = np.uint8(fused_image*255) 141 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 142 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 143 | ref_results.update({'IHS ':temp_ref_results}) 144 | no_ref_results.update({'IHS ':temp_no_ref_results}) 145 | print('IHS ', fused_image.shape, fused_image.max(), fused_image.min()) 146 | #save 147 | if save_images: 148 | save_img(fused_image, 'IHS.tif', mode='CMYK') 149 | 150 | 151 | 152 | '''SFIM method''' 153 | print('evaluating SFIM method') 154 | fused_image = SFIM(used_pan[:, :, :], used_ms[:, :, :]) 155 | fused_image_uint8 = np.uint8(fused_image*255) 156 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 157 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255).astype('uint8'), np.uint8(used_ms*255).astype('uint8')) 158 | ref_results.update({'SFIM ':temp_ref_results}) 159 | no_ref_results.update({'SFIM ':temp_no_ref_results}) 160 | print('SFIM ', fused_image.shape, fused_image.max(), fused_image.min()) 161 | #save 162 | if save_images: 163 | save_img(fused_image, 'SFIM.tif', mode='CMYK') 164 | 165 | 166 | 167 | '''GS method''' 168 | print('evaluating GS method') 169 | fused_image = GS(used_pan[:, :, :], used_ms[:, :, :]) 170 | fused_image_uint8 = np.uint8(fused_image*255) 171 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 172 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 173 | ref_results.update({'GS ':temp_ref_results}) 174 | no_ref_results.update({'GS ':temp_no_ref_results}) 175 | print('GS ', fused_image.shape, fused_image.max(), fused_image.min()) 176 | #save 177 | if save_images: 178 | save_img(fused_image, 'GS.tif', mode='CMYK') 179 | 180 | '''Wavelet method''' 181 | print('evaluating Wavelet method') 182 | fused_image = Wavelet(used_pan[:, :, :], used_ms[:, :, :]) 183 | fused_image_uint8 = np.uint8(fused_image*255) 184 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 185 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 186 | ref_results.update({'Wavelet ':temp_ref_results}) 187 | no_ref_results.update({'Wavelet ':temp_no_ref_results}) 188 | print('Wavelet ', fused_image.shape, fused_image.max(), fused_image.min()) 189 | #save 190 | if save_images: 191 | save_img(fused_image, 'Wavelet.tif', mode='CMYK') 192 | 193 | 194 | '''MTF_GLP method''' 195 | print('evaluating MTF_GLP method') 196 | fused_image = MTF_GLP(used_pan[:, :, :], used_ms[:, :, :]) 197 | fused_image_uint8 = np.uint8(fused_image*255) 198 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 199 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 200 | ref_results.update({'MTF_GLP ':temp_ref_results}) 201 | no_ref_results.update({'MTF_GLP ':temp_no_ref_results}) 202 | print('MTF_GLP ', fused_image.shape, fused_image.max(), fused_image.min()) 203 | #save 204 | if save_images: 205 | save_img(fused_image, 'MTF_GLP.tif', mode='CMYK') 206 | 207 | 208 | 209 | '''MTF_GLP_HPM method''' 210 | print('evaluating MTF_GLP_HPM method') 211 | fused_image = MTF_GLP_HPM(used_pan[:, :, :], used_ms[:, :, :]) 212 | fused_image_uint8 = np.uint8(fused_image*255) 213 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 214 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 215 | ref_results.update({'MTF_GLP_HPM':temp_ref_results}) 216 | no_ref_results.update({'MTF_GLP_HPM':temp_no_ref_results}) 217 | 218 | print('MTF_GLP_HPM ', fused_image.shape, fused_image.max(), fused_image.min()) 219 | #save 220 | if save_images: 221 | save_img(fused_image, 'MTF_GLP_HPM.tif', mode='CMYK') 222 | 223 | 224 | 225 | '''GSA method''' 226 | print('evaluating GSA method') 227 | fused_image = GSA(used_pan[:, :, :], used_ms[:, :, :]) 228 | fused_image_uint8 = np.uint8(fused_image*255) 229 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 230 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 231 | ref_results.update({'GSA ':temp_ref_results}) 232 | no_ref_results.update({'GSA ':temp_no_ref_results}) 233 | 234 | print('GSA ', fused_image.shape, fused_image.max(), fused_image.min()) 235 | #save 236 | if save_images: 237 | save_img(fused_image, 'GSA.tif', mode='CMYK') 238 | 239 | 240 | '''CNMF method''' 241 | print('evaluating CNMF method') 242 | fused_image = CNMF(used_pan[:, :, :], used_ms[:, :, :]) 243 | fused_image_uint8 = np.uint8(fused_image*255) 244 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 245 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 246 | ref_results.update({'CNMF ':temp_ref_results}) 247 | no_ref_results.update({'CNMF ':temp_no_ref_results}) 248 | 249 | print('CNMF ', fused_image.shape, fused_image.max(), fused_image.min()) 250 | #save 251 | if save_images: 252 | save_img(fused_image, 'CNMF.tif', mode='CMYK') 253 | 254 | 255 | '''GFPCA method''' 256 | print('evaluating GFPCA method') 257 | fused_image = GFPCA(used_pan[:, :, :], used_ms[:, :, :]) 258 | fused_image_uint8 = np.uint8(fused_image*255) 259 | temp_ref_results = ref_evaluate(fused_image_uint8, gt) 260 | temp_no_ref_results = no_ref_evaluate(fused_image_uint8, np.uint8(used_pan*255), np.uint8(used_ms*255)) 261 | ref_results.update({'GFPCA ':temp_ref_results}) 262 | no_ref_results.update({'GFPCA ':temp_no_ref_results}) 263 | 264 | print('GFPCA ', fused_image.shape, fused_image.max(), fused_image.min()) 265 | #save 266 | if save_images: 267 | save_img(fused_image, 'GFPCA.tif', mode='CMYK') 268 | 269 | '''PNN method''' 270 | print('evaluating PNN method') 271 | fused_image = np.array(Image.open(pnn_path)) 272 | temp_ref_results = ref_evaluate(fused_image, gt) 273 | temp_no_ref_results = no_ref_evaluate(fused_image, np.uint8(used_pan*255), np.uint8(used_ms*255)) 274 | ref_results.update({'PNN ':temp_ref_results}) 275 | no_ref_results.update({'PNN ':temp_no_ref_results}) 276 | 277 | 278 | '''RSCNNCA method''' 279 | print('evaluating RSCNNCA method') 280 | fused_image = np.array(Image.open(rscnnca_path)) 281 | temp_ref_results = ref_evaluate(fused_image, gt) 282 | temp_no_ref_results = no_ref_evaluate(fused_image, np.uint8(used_pan*255), np.uint8(used_ms*255)) 283 | ref_results.update({'RSCNNCA ':temp_ref_results}) 284 | no_ref_results.update({'RSCNNCA ':temp_no_ref_results}) 285 | 286 | 287 | filename = 'eval-wv.txt' 288 | with open(filename, 'w') as f: 289 | ''''print result''' 290 | print('################## reference comparision #######################') 291 | for index, i in enumerate(ref_results): # i=key 292 | if index == 0: 293 | print(i, ref_results[i]) 294 | f.write(i + ' ' + ref_results[i] + ' ') 295 | else: 296 | print(i, [round(j, 4) for j in ref_results[i]]) 297 | x = 0 298 | for j in no_ref_results[i]: 299 | if x == 0: 300 | f.write(i + ' ' + str(round(j, 4))) 301 | x += 1 302 | else: 303 | f.write(str(round(j, 4)) + ' ') 304 | f.write('\n') 305 | print('################## reference comparision #######################') 306 | 307 | print() 308 | print() 309 | print() 310 | 311 | print('################## no reference comparision ####################') 312 | for index, i in enumerate(no_ref_results): 313 | if index == 0: 314 | print(i, no_ref_results[i]) 315 | f.write(i + ' ' + no_ref_results[i] + ' ') 316 | else: 317 | print(i, [round(j, 4) for j in no_ref_results[i]]) 318 | x = 0 319 | for j in no_ref_results[i]: 320 | if x == 0: 321 | f.write(i + ' ' + str(round(j, 4))) 322 | x += 1 323 | else: 324 | f.write(str(round(j, 4)) + ' ') 325 | f.write('\n') 326 | print('################## no reference comparision ####################') 327 | 328 | # print(ref_results) 329 | # print(no_ref_results) 330 | 331 | print('write done...') 332 | 333 | 334 | 335 | 336 | -------------------------------------------------------------------------------- /methods/metrics.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: GNU-3.0 4 | Code Reference:https://github.com/wasaCheney/IQA_pansharpening_python 5 | """ 6 | 7 | import numpy as np 8 | from scipy import ndimage 9 | import cv2 10 | 11 | def sam(img1, img2): 12 | """SAM for 3D image, shape (H, W, C); uint or float[0, 1]""" 13 | if not img1.shape == img2.shape: 14 | raise ValueError('Input images must have the same dimensions.') 15 | assert img1.ndim == 3 and img1.shape[2] > 1, "image n_channels should be greater than 1" 16 | img1_ = img1.astype(np.float64) 17 | img2_ = img2.astype(np.float64) 18 | inner_product = (img1_ * img2_).sum(axis=2) 19 | img1_spectral_norm = np.sqrt((img1_**2).sum(axis=2)) 20 | img2_spectral_norm = np.sqrt((img2_**2).sum(axis=2)) 21 | # numerical stability 22 | cos_theta = (inner_product / (img1_spectral_norm * img2_spectral_norm + np.finfo(np.float64).eps)).clip(min=0, max=1) 23 | return np.mean(np.arccos(cos_theta)) 24 | 25 | 26 | def psnr(img1, img2, dynamic_range=255): 27 | """PSNR metric, img uint8 if 225; uint16 if 2047""" 28 | if not img1.shape == img2.shape: 29 | raise ValueError('Input images must have the same dimensions.') 30 | img1_ = img1.astype(np.float64) 31 | img2_ = img2.astype(np.float64) 32 | mse = np.mean((img1_ - img2_)**2) 33 | if mse <= 1e-10: 34 | return np.inf 35 | return 20 * np.log10(dynamic_range / (np.sqrt(mse) + np.finfo(np.float64).eps)) 36 | 37 | 38 | def scc(img1, img2): 39 | """SCC for 2D (H, W)or 3D (H, W, C) image; uint or float[0, 1]""" 40 | if not img1.shape == img2.shape: 41 | raise ValueError('Input images must have the same dimensions.') 42 | img1_ = img1.astype(np.float64) 43 | img2_ = img2.astype(np.float64) 44 | if img1_.ndim == 2: 45 | return np.corrcoef(img1_.reshape(1, -1), img2_.rehshape(1, -1))[0, 1] 46 | elif img1_.ndim == 3: 47 | #print(img1_[..., i].reshape[1, -1].shape) 48 | #test = np.corrcoef(img1_[..., i].reshape[1, -1], img2_[..., i].rehshape(1, -1)) 49 | #print(type(test)) 50 | ccs = [np.corrcoef(img1_[..., i].reshape(1, -1), img2_[..., i].reshape(1, -1))[0, 1] 51 | for i in range(img1_.shape[2])] 52 | return np.mean(ccs) 53 | else: 54 | raise ValueError('Wrong input image dimensions.') 55 | 56 | 57 | def _qindex(img1, img2, block_size=8): 58 | """Q-index for 2D (one-band) image, shape (H, W); uint or float [0, 1]""" 59 | assert block_size > 1, 'block_size shold be greater than 1!' 60 | img1_ = img1.astype(np.float64) 61 | img2_ = img2.astype(np.float64) 62 | window = np.ones((block_size, block_size)) / (block_size**2) 63 | # window_size = block_size**2 64 | # filter, valid 65 | pad_topleft = int(np.floor(block_size/2)) 66 | pad_bottomright = block_size - 1 - pad_topleft 67 | mu1 = cv2.filter2D(img1_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] 68 | mu2 = cv2.filter2D(img2_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] 69 | mu1_sq = mu1**2 70 | mu2_sq = mu2**2 71 | mu1_mu2 = mu1 * mu2 72 | 73 | sigma1_sq = cv2.filter2D(img1_**2, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_sq 74 | sigma2_sq = cv2.filter2D(img2_**2, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu2_sq 75 | # print(mu1_mu2.shape) 76 | #print(sigma2_sq.shape) 77 | sigma12 = cv2.filter2D(img1_ * img2_, -1, window)[pad_topleft:-pad_bottomright, pad_topleft:-pad_bottomright] - mu1_mu2 78 | 79 | # all = 1, include the case of simga == mu == 0 80 | qindex_map = np.ones(sigma12.shape) 81 | # sigma == 0 and mu != 0 82 | 83 | # print(np.min(sigma1_sq + sigma2_sq), np.min(mu1_sq + mu2_sq)) 84 | 85 | idx = ((sigma1_sq + sigma2_sq) < 1e-8) * ((mu1_sq + mu2_sq) >1e-8) 86 | qindex_map[idx] = 2 * mu1_mu2[idx] / (mu1_sq + mu2_sq)[idx] 87 | # sigma !=0 and mu == 0 88 | idx = ((sigma1_sq + sigma2_sq) >1e-8) * ((mu1_sq + mu2_sq) < 1e-8) 89 | qindex_map[idx] = 2 * sigma12[idx] / (sigma1_sq + sigma2_sq)[idx] 90 | # sigma != 0 and mu != 0 91 | idx = ((sigma1_sq + sigma2_sq) >1e-8) * ((mu1_sq + mu2_sq) >1e-8) 92 | qindex_map[idx] =((2 * mu1_mu2[idx]) * (2 * sigma12[idx])) / ( 93 | (mu1_sq + mu2_sq)[idx] * (sigma1_sq + sigma2_sq)[idx]) 94 | 95 | # print(np.mean(qindex_map)) 96 | 97 | # idx = ((sigma1_sq + sigma2_sq) == 0) * ((mu1_sq + mu2_sq) != 0) 98 | # qindex_map[idx] = 2 * mu1_mu2[idx] / (mu1_sq + mu2_sq)[idx] 99 | # # sigma !=0 and mu == 0 100 | # idx = ((sigma1_sq + sigma2_sq) != 0) * ((mu1_sq + mu2_sq) == 0) 101 | # qindex_map[idx] = 2 * sigma12[idx] / (sigma1_sq + sigma2_sq)[idx] 102 | # # sigma != 0 and mu != 0 103 | # idx = ((sigma1_sq + sigma2_sq) != 0) * ((mu1_sq + mu2_sq) != 0) 104 | # qindex_map[idx] =((2 * mu1_mu2[idx]) * (2 * sigma12[idx])) / ( 105 | # (mu1_sq + mu2_sq)[idx] * (sigma1_sq + sigma2_sq)[idx]) 106 | 107 | return np.mean(qindex_map) 108 | 109 | 110 | def qindex(img1, img2, block_size=8): 111 | """Q-index for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1]""" 112 | if not img1.shape == img2.shape: 113 | raise ValueError('Input images must have the same dimensions.') 114 | if img1.ndim == 2: 115 | return _qindex(img1, img2, block_size) 116 | elif img1.ndim == 3: 117 | qindexs = [_qindex(img1[..., i], img2[..., i], block_size) for i in range(img1.shape[2])] 118 | return np.array(qindexs).mean() 119 | else: 120 | raise ValueError('Wrong input image dimensions.') 121 | 122 | 123 | def _ssim(img1, img2, dynamic_range=255): 124 | """SSIM for 2D (one-band) image, shape (H, W); uint8 if 225; uint16 if 2047""" 125 | C1 = (0.01 * dynamic_range)**2 126 | C2 = (0.03 * dynamic_range)**2 127 | 128 | img1_ = img1.astype(np.float64) 129 | img2_ = img2.astype(np.float64) 130 | kernel = cv2.getGaussianKernel(11, 1.5) # kernel size 11 131 | window = np.outer(kernel, kernel.transpose()) 132 | 133 | mu1 = cv2.filter2D(img1_, -1, window)[5:-5, 5:-5] # valid 134 | mu2 = cv2.filter2D(img2_, -1, window)[5:-5, 5:-5] 135 | mu1_sq = mu1**2 136 | mu2_sq = mu2**2 137 | mu1_mu2 = mu1 * mu2 138 | sigma1_sq = cv2.filter2D(img1_**2, -1, window)[5:-5, 5:-5] - mu1_sq 139 | sigma2_sq = cv2.filter2D(img2_**2, -1, window)[5:-5, 5:-5] - mu2_sq 140 | sigma12 = cv2.filter2D(img1_ * img2_, -1, window)[5:-5, 5:-5] - mu1_mu2 141 | 142 | ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ( 143 | (mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2)) 144 | return ssim_map.mean() 145 | 146 | 147 | def ssim(img1, img2, dynamic_range=255): 148 | """SSIM for 2D (H, W) or 3D (H, W, C) image; uint8 if 225; uint16 if 2047""" 149 | if not img1.shape == img2.shape: 150 | raise ValueError('Input images must have the same dimensions.') 151 | if img1.ndim == 2: 152 | return _ssim(img1, img2, dynamic_range) 153 | elif img1.ndim == 3: 154 | ssims = [_ssim(img1[..., i], img2[..., i], dynamic_range) for i in range(img1.shape[2])] 155 | return np.array(ssims).mean() 156 | else: 157 | raise ValueError('Wrong input image dimensions.') 158 | 159 | 160 | def ergas(img_fake, img_real, scale=4): 161 | """ERGAS for 2D (H, W) or 3D (H, W, C) image; uint or float [0, 1]. 162 | scale = spatial resolution of PAN / spatial resolution of MUL, default 4.""" 163 | if not img_fake.shape == img_real.shape: 164 | raise ValueError('Input images must have the same dimensions.') 165 | img_fake_ = img_fake.astype(np.float64) 166 | img_real_ = img_real.astype(np.float64) 167 | if img_fake_.ndim == 2: 168 | mean_real = img_real_.mean() 169 | mse = np.mean((img_fake_ - img_real_)**2) 170 | return 100 / scale * np.sqrt(mse / (mean_real**2 + np.finfo(np.float64).eps)) 171 | elif img_fake_.ndim == 3: 172 | means_real = img_real_.reshape(-1, img_real_.shape[2]).mean(axis=0) 173 | mses = ((img_fake_ - img_real_)**2).reshape(-1, img_fake_.shape[2]).mean(axis=0) 174 | return 100 / scale * np.sqrt((mses / (means_real**2 + np.finfo(np.float64).eps)).mean()) 175 | else: 176 | raise ValueError('Wrong input image dimensions.') 177 | 178 | 179 | #################### 180 | # observation model 181 | #################### 182 | 183 | 184 | def gaussian2d(N, std): 185 | t = np.arange(-(N - 1) // 2, (N + 2) // 2) 186 | t1, t2 = np.meshgrid(t, t) 187 | std = np.double(std) 188 | w = np.exp(-0.5 * (t1 / std)**2) * np.exp(-0.5 * (t2 / std)**2) 189 | return w 190 | 191 | 192 | def kaiser2d(N, beta): 193 | t = np.arange(-(N - 1) // 2, (N + 2) // 2) / np.double(N - 1) 194 | t1, t2 = np.meshgrid(t, t) 195 | t12 = np.sqrt(t1 * t1 + t2 * t2) 196 | w1 = np.kaiser(N, beta) 197 | w = np.interp(t12, t, w1) 198 | w[t12 > t[-1]] = 0 199 | w[t12 < t[0]] = 0 200 | return w 201 | 202 | 203 | def fir_filter_wind(Hd, w): 204 | """ 205 | compute fir (finite impulse response) filter with window method 206 | Hd: desired freqeuncy response (2D) 207 | w: window (2D) 208 | """ 209 | hd = np.rot90(np.fft.fftshift(np.rot90(Hd, 2)), 2) 210 | h = np.fft.fftshift(np.fft.ifft2(hd)) 211 | h = np.rot90(h, 2) 212 | h = h * w 213 | h = h / np.sum(h) 214 | return h 215 | 216 | 217 | def GNyq2win(GNyq, scale=4, N=41): 218 | """Generate a 2D convolutional window from a given GNyq 219 | GNyq: Nyquist frequency 220 | scale: spatial size of PAN / spatial size of MS 221 | """ 222 | #fir filter with window method 223 | fcut = 1 / scale 224 | alpha = np.sqrt(((N - 1) * (fcut / 2))**2 / (-2 * np.log(GNyq))) 225 | H = gaussian2d(N, alpha) 226 | Hd = H / np.max(H) 227 | w = kaiser2d(N, 0.5) 228 | h = fir_filter_wind(Hd, w) 229 | return np.real(h) 230 | 231 | 232 | def mtf_resize(img, satellite='QuickBird', scale=4): 233 | # satellite GNyq 234 | scale = int(scale) 235 | if satellite == 'QuickBird': 236 | GNyq = [0.34, 0.32, 0.30, 0.22] # Band Order: B,G,R,NIR 237 | GNyqPan = 0.15 238 | elif satellite == 'IKONOS': 239 | GNyq = [0.26, 0.28, 0.29, 0.28] # Band Order: B,G,R,NIR 240 | GNyqPan = 0.17 241 | else: 242 | raise NotImplementedError('satellite: QuickBird or IKONOS') 243 | # lowpass 244 | img_ = img.squeeze() 245 | img_ = img_.astype(np.float64) 246 | if img_.ndim == 2: # Pan 247 | H, W = img_.shape 248 | lowpass = GNyq2win(GNyqPan, scale, N=41) 249 | elif img_.ndim == 3: # MS 250 | H, W, _ = img.shape 251 | lowpass = [GNyq2win(gnyq, scale, N=41) for gnyq in GNyq] 252 | lowpass = np.stack(lowpass, axis=-1) 253 | img_ = ndimage.filters.correlate(img_, lowpass, mode='nearest') 254 | # downsampling 255 | output_size = (H // scale, W // scale) 256 | img_ = cv2.resize(img_, dsize=output_size, interpolation=cv2.INTER_NEAREST) 257 | return img_ 258 | 259 | 260 | ################## 261 | # No reference IQA 262 | ################## 263 | 264 | 265 | def D_lambda(img_fake, img_lm, block_size=32, p=1): 266 | """Spectral distortion 267 | img_fake, generated HRMS 268 | img_lm, LRMS""" 269 | assert img_fake.ndim == img_lm.ndim == 3, 'Images must be 3D!' 270 | H_f, W_f, C_f = img_fake.shape 271 | H_r, W_r, C_r = img_lm.shape 272 | assert C_f == C_r, 'Fake and lm should have the same number of bands!' 273 | # D_lambda 274 | Q_fake = [] 275 | Q_lm = [] 276 | for i in range(C_f): 277 | for j in range(i+1, C_f): 278 | # for fake 279 | band1 = img_fake[..., i] 280 | band2 = img_fake[..., j] 281 | Q_fake.append(_qindex(band1, band2, block_size=block_size)) 282 | # for real 283 | band1 = img_lm[..., i] 284 | band2 = img_lm[..., j] 285 | Q_lm.append(_qindex(band1, band2, block_size=block_size)) 286 | Q_fake = np.array(Q_fake) 287 | Q_lm = np.array(Q_lm) 288 | D_lambda_index = (np.abs(Q_fake - Q_lm) ** p).mean() 289 | return D_lambda_index ** (1/p) 290 | 291 | 292 | def D_s(img_fake, img_lm, pan, satellite='QuickBird', scale=4, block_size=32, q=1): 293 | """Spatial distortion 294 | img_fake, generated HRMS 295 | img_lm, LRMS 296 | pan, HRPan""" 297 | # fake and lm 298 | assert img_fake.ndim == img_lm.ndim == 3, 'MS images must be 3D!' 299 | H_f, W_f, C_f = img_fake.shape 300 | H_r, W_r, C_r = img_lm.shape 301 | assert H_f // H_r == W_f // W_r == scale, 'Spatial resolution should be compatible with scale' 302 | assert C_f == C_r, 'Fake and lm should have the same number of bands!' 303 | # fake and pan 304 | assert pan.ndim == 3, 'Panchromatic image must be 3D!' 305 | H_p, W_p, C_p = pan.shape 306 | assert C_p == 1, 'size of 3rd dim of Panchromatic image must be 1' 307 | assert H_f == H_p and W_f == W_p, "Pan's and fake's spatial resolution should be the same" 308 | # get LRPan, 2D 309 | pan_lr = mtf_resize(pan, satellite=satellite, scale=scale) 310 | #print(pan_lr.shape) 311 | # D_s 312 | Q_hr = [] 313 | Q_lr = [] 314 | for i in range(C_f): 315 | # for HR fake 316 | band1 = img_fake[..., i] 317 | band2 = pan[..., 0] # the input PAN is 3D with size=1 along 3rd dim 318 | #print(band1.shape) 319 | #print(band2.shape) 320 | Q_hr.append(_qindex(band1, band2, block_size=block_size)) 321 | band1 = img_lm[..., i] 322 | band2 = pan_lr # this is 2D 323 | #print(band1.shape) 324 | #print(band2.shape) 325 | Q_lr.append(_qindex(band1, band2, block_size=block_size)) 326 | Q_hr = np.array(Q_hr) 327 | Q_lr = np.array(Q_lr) 328 | D_s_index = (np.abs(Q_hr - Q_lr) ** q).mean() 329 | return D_s_index ** (1/q) 330 | 331 | def qnr(img_fake, img_lm, pan, satellite='QuickBird', scale=4, block_size=32, p=1, q=1, alpha=1, beta=1): 332 | """QNR - No reference IQA""" 333 | D_lambda_idx = D_lambda(img_fake, img_lm, block_size, p) 334 | D_s_idx = D_s(img_fake, img_lm, pan, satellite, scale, block_size, q) 335 | QNR_idx = (1 - D_lambda_idx) ** alpha * (1 - D_s_idx) ** beta 336 | return QNR_idx 337 | 338 | 339 | def ref_evaluate(pred, gt): 340 | #reference metrics 341 | c_psnr = psnr(pred, gt) 342 | c_ssim = ssim(pred, gt) 343 | c_sam = sam(pred, gt) 344 | c_ergas = ergas(pred, gt) 345 | c_scc = scc(pred, gt) 346 | c_q = qindex(pred, gt) 347 | 348 | return [c_psnr, c_ssim, c_sam, c_ergas, c_scc, c_q] 349 | 350 | def no_ref_evaluate(pred, pan, hs): 351 | #no reference metrics 352 | c_D_lambda = D_lambda(pred, hs) 353 | c_D_s = D_s(pred, hs, pan) 354 | c_qnr = qnr(pred, hs, pan) 355 | 356 | return [c_D_lambda, c_D_s, c_qnr] 357 | 358 | 359 | 360 | 361 | 362 | -------------------------------------------------------------------------------- /methods/CNMF.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | License: GNU-3.0 4 | Referenc: http://www.naotoyokoya.com/ 5 | Paper References: 6 | [1] N. Yokoya, T. Yairi, and A. Iwasaki, "Coupled nonnegative matrix factorization unmixing for hyperspectral and multispectral data fusion," 7 | IEEE Trans. Geosci. Remote Sens., vol. 50, no. 2, pp. 528-537, 2012. 8 | [2] N. Yokoya, T. Yairi, and A. Iwasaki, "Hyperspectral, multispectral, and panchromatic data fusion based on non-negative matrix factorization," 9 | Proc. WHISPERS, Lisbon, Portugal, Jun. 6-9, 2011. 10 | [3] N. Yokoya, N. Mayumi, and A. Iwasaki, "Cross-calibration for data fusion of EO-1/Hyperion and Terra/ASTER," 11 | IEEE J. Sel. Topics Appl. Earth Observ. Remote Sens., vol. 6, no. 2, pp. 419-426, 2013. 12 | """ 13 | 14 | import numpy as np 15 | from scipy.special import erfinv 16 | 17 | def CNMF(MSI, HSI, mask=0, verbose='off',MEMs=0): 18 | ''' 19 | COUPLED NONNEGATIVE MATRIX FACTORIZATION (CNMF) 20 | 21 | Copyright (c) 2016 Naoto Yokoya 22 | Email: yokoya@sal.rcast.u-tokyo.ac.jp 23 | Update: 2016/04/01 24 | 25 | References: 26 | [1] N. Yokoya, T. Yairi, and A. Iwasaki, "Coupled nonnegative matrix 27 | factorization unmixing for hyperspectral and multispectral data fusion," 28 | IEEE Trans. Geosci. Remote Sens., vol. 50, no. 2, pp. 528-537, 2012. 29 | [2] N. Yokoya, N. Mayumi, and A. Iwasaki, "Cross-calibration for data fusion 30 | of EO-1/Hyperion and Terra/ASTER," IEEE J. Sel. Topics Appl. Earth Observ. 31 | Remote Sens., vol. 6, no. 2, pp. 419-426, 2013. 32 | [3] N. Yokoya, T. Yairi, and A. Iwasaki, "Hyperspectral, multispectral, 33 | and panchromatic data fusion based on non-negative matrix factorization," 34 | Proc. WHISPERS, Lisbon, Portugal, Jun. 6-9, 2011. 35 | 36 | USAGE 37 | Out = CNMF_fusion(HSI,MSI,mask,verbose) 38 | 39 | INPUT 40 | HSI : Low-spatial-resolution HS image (rows2,cols2,bands2) 41 | MSI : MS image (rows1,cols1,bands1) 42 | mask : (optional) Binary mask for processing (rows2,cols2) (0: mask, 1: image) 43 | verbose : (optional) Print out processing status 44 | MEMs : (optional) Manually defined endmembers (bands2, num. of endmembers) 45 | 46 | OUTPUT 47 | Out : High-spatial-resolution HS image (rows1,cols1,bands2) 48 | ''' 49 | 50 | # masking mode 51 | if np.isscalar(mask): 52 | masking = 0 53 | else: 54 | masking = 1 55 | 56 | # image size 57 | rows1 = MSI.shape[0] 58 | cols1 = MSI.shape[1] 59 | bands1 = MSI.shape[2] 60 | rows2 = HSI.shape[0] 61 | cols2 = HSI.shape[1] 62 | bands2 = HSI.shape[2] 63 | 64 | w = int(rows1/rows2) 65 | 66 | # Estimation of R 67 | if verbose == 'on': 68 | print('Estimate R...') 69 | R = estR(HSI,MSI,mask) 70 | for b in range(bands1): 71 | msi = MSI[:,:,b].reshape(rows1,cols1).copy() 72 | msi = msi - R[b,-1] 73 | msi[np.nonzero(msi<0)] = 0 74 | MSI[:,:,b] = msi.copy() 75 | R = R[:,0:bands2] 76 | 77 | # parameters 78 | th_h = 1e-8 # Threshold of change ratio in inner loop for HS unmixing 79 | th_m = 1e-8 # Threshold of change ratio in inner loop for MS unmixing 80 | th2 = 1e-2 # Threshold of change ratio in outer loop 81 | sum2one = 2*( MSI.mean()/0.7455)**0.5 / bands1**3 # Parameter of sum to 1 constraint 82 | 83 | if bands1 == 1: 84 | I1 = 75 # Maximum iteration of inner loop 85 | I2 = 1 # Maximum iteration of outer loop 86 | else: 87 | I1 = 200 # Maximum iteration of inner loop (200-300) 88 | I2 = 1 # Maximum iteration of outer loop (1-3) 89 | 90 | # initialization of H_hyper 91 | # 0: constant (fast) 92 | # 1: nonnegative least squares (slow) 93 | init_mode = 0 94 | 95 | # avoid nonnegative values 96 | HSI[np.nonzero(HSI<0)] = 0 97 | MSI[np.nonzero(MSI<0)] = 0 98 | 99 | if masking == 0: 100 | HSI = HSI.reshape(rows2*cols2,bands2).transpose() 101 | MSI = MSI.reshape(rows1*cols1,bands1).transpose() 102 | else: 103 | HSI = HSI.reshape(rows2*cols2,bands2) 104 | MSI = MSI.reshape(rows1*cols1,bands1) 105 | 106 | mask2 = zoom_nn(mask,w) 107 | HSI = HSI[mask.reshape(rows2*cols2)==1,:].transpose() 108 | MSI = MSI[mask2.reshape(rows1*cols1)==1,:].transpose() 109 | 110 | # manually define endmembers 111 | if np.isscalar(MEMs) == False: 112 | if MEMs.shape[0] == bands2 and len(MEMs.shape) == 2: 113 | M_m = MEMs.shape[1] 114 | else: 115 | print('Please check the size of manually defined endmembers.') 116 | M_m = 0 117 | MEMs = 0 118 | else: 119 | M_m = 0 120 | 121 | # number of endmembers 122 | M_est = int(round(vd(HSI,5*10**-2))) 123 | M = max([min([30,bands2]), M_est]) # M can be automatically defined, for example, by VD 124 | if verbose == 'on': 125 | print('Number of endmembers: ', M+M_m) 126 | 127 | # CNMF Initializatioin 128 | HSI, MSI, W_hyper, H_hyper, W_multi, H_multi, RMSE_h, RMSE_m = CNMF_init(rows1,cols1,w,M,HSI,MSI,sum2one,I1,th_h,th_m,R,init_mode,mask,verbose,MEMs) 129 | 130 | cost = np.zeros((2,I2+1)) 131 | cost[0,0] = RMSE_h 132 | cost[1,0] = RMSE_m 133 | 134 | # CNMF Iteration 135 | for i in range(I2): 136 | W_hyper, H_hyper, W_multi1, H_multi1, W_multi2, H_multi2, RMSE_h, RMSE_m = CNMF_ite(rows1,cols1,w,M+M_m,HSI,MSI,W_hyper,H_hyper,W_multi,H_multi,I1,th_h,th_m,I2,i,R,mask,verbose) 137 | 138 | cost[0,i+1] = RMSE_h 139 | cost[1,i+1] = RMSE_m 140 | 141 | if (cost[0,i]-cost[0,i+1])/cost[0,i]>th2 and (cost[1,i]-cost[1,i+1])/cost[1,i]>th2 and i1]=1 162 | 163 | return Out 164 | 165 | 166 | def CNMF_init(xdata,ydata,w,M,hyper,multi,delta,I_in,delta_h,delta_m,srf,init_mode=0,mask=0,verbose='off',MEMs=0): 167 | ''' 168 | COUPLED NONNEGATIVE MATRIX FACTORIZATION (CNMF) 169 | 170 | Copyright (c) 2016 Naoto Yokoya 171 | Email: yokoya@sal.rcast.u-tokyo.ac.jp 172 | Update: 2016/04/01 173 | 174 | References: 175 | [1] N. Yokoya, T. Yairi, and A. Iwasaki, "Coupled nonnegative matrix 176 | factorization unmixing for hyperspectral and multispectral data fusion," 177 | IEEE Trans. Geosci. Remote Sens., vol. 50, no. 2, pp. 528-537, 2012. 178 | [2] N. Yokoya, T. Yairi, and A. Iwasaki, "Hyperspectral, multispectral, 179 | and panchromatic data fusion based on non-negative matrix factorization," 180 | Proc. WHISPERS, Lisbon, Portugal, Jun. 6-9, 2011. 181 | 182 | This function is the initilization function of CNMF. 183 | 184 | USAGE 185 | hyper, multi, W_hyper, H_hyper, W_multi, H_multi, RMSE_h, RMSE_m = 186 | CNMF_init(xdata,ydata,w,M,hyper,multi,delta,I_in,delta_h,delta_m,srf,init_mode,mask,verbose) 187 | 188 | INPUT 189 | xdata : image height 190 | ydata : image width 191 | w : multiple difference of ground sampling distance (scalar) 192 | M : Number of endmembers 193 | hyper : Low-spatial-resolution HS image (band, xdata/w*ydata/w) 194 | multi : MS image (multi_band, xdata*ydata) 195 | delta : Parameter of sum to one constraint 196 | I_in : Maximum number of inner iteration 197 | delta_h : Parameter for HS unmixing 198 | delta_m : Parameter for MS unmixing 199 | srf : Relative specctral response function 200 | init_mode : Initialization mode (0: const, 1: nnls) 201 | mask : (optional) Binary mask for processing (xdata/w,ydata/w) 202 | verbose : (optional) Print out processing status 203 | MEMs : (optional) Manually defined endmembers (bands2, num. of endmembers) 204 | 205 | OUTPUT 206 | hyper : Low-spatial-resolution HS image with ones (band+1, xdata/w*ydata/w) 207 | multi : MS image with ones (multi_band+1, xdata*ydata) 208 | W_hyper : HS endmember matrix with ones (band+1, M) 209 | H_hyper : HS abundance matrix (M, xdata/w*ydata/w) 210 | W_multi : MS endmember matrix with ones (multi_band+1, M) 211 | H_multi : MS abundance matrix (M, xdata*ydata) 212 | RMSE_h : RMSE of HS unmixing 213 | RMSE_m : RMSE of MS unmixing 214 | ''' 215 | 216 | MIN_MS_BANDS = 3 217 | 218 | band = np.size(hyper,0) 219 | multi_band = np.size(multi,0) 220 | hx = int(xdata/w) 221 | hy = int(ydata/w) 222 | if verbose == 'on': 223 | print('Initialize Wh by VCA') 224 | W_hyper, indices = vca( hyper, M ) 225 | 226 | # Add manually defined endmembers 227 | if np.isscalar(MEMs) == False: 228 | W_hyper = np.hstack((W_hyper, MEMs)) 229 | M = W_hyper.shape[1] 230 | 231 | # masking mode 232 | if np.isscalar(mask): 233 | masking = 0 234 | mask = np.ones((hy,hx)) 235 | else: 236 | masking = 1 237 | 238 | # Initialize H_hyper: (M, N_h) 239 | if masking == 0: 240 | H_hyper = np.ones((M, hx*hy))/M 241 | else: 242 | H_hyper = np.ones((M, hx*hy))/M 243 | H_hyper = H_hyper[:,mask.reshape(hx*hy)==1] 244 | 245 | if init_mode == 1: 246 | if verbose == 'on': 247 | print('Initialize Hh by NLS') 248 | # initialize H_hyper by nonnegative least squares 249 | H_hyper = nls_su(hyper,W_hyper) 250 | 251 | # Sum-to-one constraint 252 | W_hyper = np.vstack((W_hyper, delta*np.ones((1,np.size(W_hyper, 1))))) 253 | hyper = np.vstack((hyper, delta*np.ones((1,np.size(hyper, 1))))) 254 | 255 | # NMF for Vh 1st 256 | if verbose == 'on': 257 | print ('NMF for Vh ( 1 )') 258 | for i in range(I_in): 259 | # Initialization of H_hyper 260 | if i == 0: 261 | cost0 = 0 262 | for q in range(I_in*3): 263 | # Update H_hyper 264 | H_hyper_old = H_hyper 265 | H_hyper_n = np.dot(W_hyper.transpose(), hyper) 266 | H_hyper_d = np.dot(np.dot(W_hyper.transpose(), W_hyper), H_hyper) 267 | H_hyper = (H_hyper*H_hyper_n)/H_hyper_d 268 | cost = np.sum((hyper[0:band, :] - np.dot(W_hyper[0:band, :], H_hyper))**2) 269 | if q > 1 and (cost0-cost)/cost < delta_h: 270 | if verbose == 'on': 271 | print('Initialization of H_hyper converged at the ', q, 'th iteration ') 272 | H_hyper = H_hyper_old 273 | break 274 | cost0 = cost 275 | else: 276 | # Update W_hyper 277 | W_hyper_old = W_hyper 278 | W_hyper_n = np.dot(hyper[0:band, :], (H_hyper.transpose())) 279 | W_hyper_d = np.dot(np.dot(W_hyper[0:band,:], H_hyper), H_hyper.transpose()) 280 | W_hyper[0:band, :] = (W_hyper[0:band, :]*W_hyper_n)/W_hyper_d 281 | # Update H_hyper 282 | H_hyper_old = H_hyper 283 | H_hyper_n = np.dot(W_hyper.transpose(), hyper) 284 | H_hyper_d = np.dot(np.dot(W_hyper.transpose(), W_hyper), H_hyper) 285 | H_hyper = (H_hyper*H_hyper_n)/H_hyper_d 286 | cost = np.sum((hyper[0:band, :] - np.dot(W_hyper[0:band, :], H_hyper))**2) 287 | if (cost0-cost)/cost < delta_h: 288 | if verbose == 'on': 289 | print('Optimization of HS unmixing converged at the ', i, 'th iteration ') 290 | W_hyper = W_hyper_old 291 | H_hyper = H_hyper_old 292 | break 293 | cost0 = cost 294 | 295 | RMSE_h = (cost0/(hyper.shape[1]*band))**0.5 296 | if verbose == 'on': 297 | print(' RMSE(Vh) = ', RMSE_h) 298 | 299 | # initialize W_multi: (multi_band, M) 300 | W_multi = np.dot(srf, W_hyper[0:band,:]) 301 | W_multi = np.vstack((W_multi, delta*np.ones((1, M)))) 302 | multi = np.vstack((multi, delta*np.ones((1, multi.shape[1])))) 303 | 304 | # initialize H_multi by interpolation 305 | if masking == 0: 306 | H_multi = np.ones((M, xdata*ydata))/M 307 | for i in range(M): 308 | tmp = zoom_bi(H_hyper[i,:].reshape(hx,hy).copy(),w) 309 | H_multi[i,:] = tmp.reshape(1,xdata*ydata) 310 | H_multi[np.nonzero(H_multi<0)] = 0 311 | else: 312 | mask2 = zoom_nn(mask,w) 313 | H_multi = np.ones((M,multi.shape[1]))/M 314 | for i in range(M): 315 | tmp = np.zeros((hx,hy)) 316 | tmp[np.nonzero(mask>0)] = H_hyper[i,:].copy() 317 | tmp = zoom_bi(tmp,w) 318 | H_multi[i,:] = tmp[np.nonzero(mask2>0)].copy() 319 | H_multi[np.nonzero(H_multi<0)] = 0 320 | 321 | # NMF for Vm 1st 322 | if verbose == 'on': 323 | print('NMF for Vm ( 1 )') 324 | for i in range(I_in): 325 | if i == 0: 326 | cost0 = 0 327 | for q in range(I_in): 328 | # Update H_multi 329 | H_multi_old = H_multi 330 | H_multi_n = np.dot(W_multi.transpose(), multi) 331 | H_multi_d = np.dot(np.dot(W_multi.transpose(), W_multi), H_multi) 332 | H_multi = (H_multi*H_multi_n)/H_multi_d 333 | cost = np.sum((multi[0:multi_band, :] - np.dot(W_multi[0:multi_band, :], H_multi))**2) 334 | if q > 1 and (cost0-cost)/cost < delta_m: 335 | if verbose == 'on': 336 | print('Initialization of H_multi converged at the ', q, 'th iteration ') 337 | H_multi = H_multi_old 338 | break 339 | cost0 = cost 340 | else: 341 | # Update W_multi 342 | W_multi_old = W_multi 343 | if multi_band > MIN_MS_BANDS: 344 | W_multi_n = np.dot(multi[0:multi_band, :], H_multi.transpose()) 345 | W_multi_d = np.dot(np.dot(W_multi[0:multi_band, :], H_multi), H_multi.transpose()) 346 | W_multi[0:multi_band, :] = (W_multi[0:multi_band, :]*W_multi_n)/W_multi_d 347 | # Update H_hyper 348 | H_multi_old = H_multi 349 | H_multi_n = np.dot(W_multi.transpose(), multi) 350 | H_multi_d = np.dot(np.dot(W_multi.transpose(), W_multi), H_multi) 351 | H_multi = H_multi*H_multi_n/H_multi_d 352 | cost = np.sum((multi[0:multi_band, :]-np.dot(W_multi[0:multi_band, :], H_multi))**2) 353 | if (cost0-cost)/cost < delta_m: 354 | if verbose == 'on': 355 | print('Optimization of MS unmixing converged at the ', i, 'th iteration ') 356 | W_multi = W_multi_old 357 | H_multi = H_multi_old 358 | break 359 | cost0=cost 360 | 361 | RMSE_m = (cost0/((multi.shape[1])*multi_band))**0.5 362 | if verbose == 'on': 363 | print(' RMSE(Vm) = ', RMSE_m) # MSE(Mean Squared Error) in NMF of Vm 364 | 365 | return hyper, multi, W_hyper, H_hyper, W_multi, H_multi, RMSE_h, RMSE_m 366 | 367 | def CNMF_ite(xdata,ydata,w,M,hyper,multi,W_hyper,H_hyper,W_multi,H_multi,I_in,delta_h,delta_m,I_out,i_out,srf,mask=0,verbose='off'): 368 | ''' 369 | COUPLED NONNEGATIVE MATRIX FACTORIZATION (CNMF) 370 | 371 | Copyright (c) 2016 Naoto Yokoya 372 | Email: yokoya@sal.rcast.u-tokyo.ac.jp 373 | Update: 2016/04/01 374 | 375 | References: 376 | [1] N. Yokoya, T. Yairi, and A. Iwasaki, "Coupled nonnegative matrix 377 | factorization unmixing for hyperspectral and multispectral data fusion," 378 | IEEE Trans. Geosci. Remote Sens., vol. 50, no. 2, pp. 528-537, 2012. 379 | [2] N. Yokoya, T. Yairi, and A. Iwasaki, "Hyperspectral, multispectral, 380 | and panchromatic data fusion based on non-negative matrix factorization," 381 | Proc. WHISPERS, Lisbon, Portugal, Jun. 6-9, 2011. 382 | 383 | This function is the iteration function of CNMF. 384 | 385 | USAGE 386 | W_hyper, H_hyper, W_multi1, H_multi1, W_multi2, H_multi2, RMSE_h, RMSE_m = 387 | CNMF_ite(xdata,ydata,w,M,hyper,multi,W_hyper,H_hyper,W_multi,H_multi,ite_max,delta_h,delta_m,iter,srf,mask,verbose) 388 | 389 | INPUT 390 | xdata : image height 391 | ydata : image width 392 | w : multiple difference of ground sampling distance (scalar) 393 | M : Number of endmembers 394 | hyper : Low-spatial-resolution HS image (band, xdata/w*ydata/w) 395 | multi : MS image (multi_band, xdata*ydata) 396 | W_hyper : HS endmember matrix with ones (band+1, M) 397 | H_hyper : HS abundance matrix (M, xdata/w*ydata/w) 398 | W_multi : MS endmember matrix with ones (multi_band+1, M) 399 | H_multi : MS abundance matrix (M, xdata*ydata) 400 | delta : Parameter of sum to one constraint 401 | I_in : Maximum number of inner iteration 402 | delta_h : Parameter for HS unmixing 403 | delta_m : Parameter for MS unmixing 404 | I_out : Maximum number of outer iteration 405 | i_out : Current number of outer iteration 406 | srf : Relative specctral response function 407 | mask : (optional) Binary mask for processing (xdata/w,ydata/w) 408 | 409 | OUTPUT 410 | W_hyper : HS endmember matrix with ones (band+1, M) 411 | H_hyper : HS abundance matrix (M, xdata/w*ydata/w) 412 | W_multi1 : MS endmember matrix with ones before MS unmixing (multi_band+1, M) 413 | H_multi1 : MS abundance matrix before MS unmixing (M, xdata*ydata) 414 | W_multi2 : MS endmember matrix with ones after MS unmixing (multi_band+1, M) 415 | H_multi2 : MS abundance matrix after MS unmixing (M, xdata*ydata) 416 | RMSE_h : RMSE of HS unmixing 417 | RMSE_m : RMSE of MS unmixing 418 | ''' 419 | 420 | MIN_MS_BANDS = 3 421 | 422 | band = np.size(hyper,0)-1 423 | multi_band = np.size(multi,0)-1 424 | hx = int(xdata/w) 425 | hy = int(ydata/w) 426 | 427 | # masking mode 428 | if np.isscalar(mask): 429 | masking = 0 430 | mask = np.ones((hy,hx)) 431 | else: 432 | masking = 1 433 | 434 | if verbose == 'on': 435 | print('Iteration', i_out) 436 | 437 | # Initialize H_hyper form H_multi 438 | if masking == 0: 439 | H_hyper = gaussian_down_sample(H_multi.transpose().reshape(xdata,ydata,M),w).reshape(hx*hy,M).transpose() 440 | else: 441 | mask2 = zoom_nn(mask,w) 442 | for q in range(M): 443 | tmp = np.zeros((xdata,ydata)) 444 | tmp[mask2>0] = H_multi[q,:].copy() 445 | tmp = gaussian_down_sample(tmp.reshape(xdata,ydata,1),w).reshape(hx,hy) 446 | H_hyper[q,:] = tmp[mask>0].copy().reshape(1,mask.sum()) 447 | 448 | # NMF for Vh 449 | if verbose == 'on': 450 | print('NMF for Vh (', i_out+2, ')') 451 | for i in range(I_in): 452 | if i == 0: 453 | cost0 = 0 454 | for q in range(I_in): 455 | # Update W_hyper 456 | W_hyper_old = W_hyper 457 | W_hyper_n = np.dot(hyper[0:band, :], H_hyper.transpose()) 458 | W_hyper_d = np.dot(np.dot(W_hyper[0:band, :], H_hyper), H_hyper.transpose()) 459 | W_hyper[0:band, :] = (W_hyper[0:band, :]*W_hyper_n)/W_hyper_d 460 | cost = np.sum((hyper[0:band, :] - np.dot(W_hyper[0:band, :], H_hyper))**2) 461 | if q > 1 and (cost0-cost)/cost < delta_h: 462 | if verbose == 'on': 463 | print('Initialization of W_hyper converged at the ', q, 'th iteration ') 464 | W_hyper = W_hyper_old 465 | break 466 | cost0 = cost 467 | else: 468 | # Update H_hyper 469 | H_hyper_old = H_hyper 470 | if multi_band > MIN_MS_BANDS: 471 | H_hyper_n = np.dot(W_hyper.transpose(), hyper) 472 | H_hyper_d = np.dot(np.dot(W_hyper.transpose(), W_hyper), H_hyper) 473 | H_hyper = (H_hyper*H_hyper_n)/H_hyper_d 474 | # Update W_hyper 475 | W_hyper_old = W_hyper 476 | W_hyper_n = np.dot(hyper[0:band, :], H_hyper.transpose()) 477 | W_hyper_d = np.dot(np.dot(W_hyper[0:band, :], H_hyper), H_hyper.transpose()) 478 | W_hyper[0:band, :] = (W_hyper[0:band, :]*W_hyper_n)/W_hyper_d 479 | cost = np.sum((hyper[0:band, :] - np.dot(W_hyper[0:band, :], H_hyper))**2) 480 | if (cost0-cost)/cost < delta_h: 481 | if verbose == 'on': 482 | print('Optimization of HS unmixing converged at the ', i, 'th iteration ') 483 | H_hyper = H_hyper_old 484 | W_hyper = W_hyper_old 485 | break 486 | cost0 = cost 487 | 488 | RMSE_h = (cost0/(hyper.shape[1]*band))**0.5 489 | if verbose == 'on': 490 | print(' RMSE(Vh) = ', RMSE_h) 491 | 492 | W_multi1 = W_multi.copy() 493 | H_multi1 = H_multi.copy() 494 | 495 | # initialize W_multi: (multi_band, M) 496 | W_multi[0:multi_band,:] = np.dot(srf, W_hyper[0:band,:]) 497 | 498 | if verbose == 'on': 499 | print('NMF for Vm (', i_out+2, ')') 500 | for i in range(I_in): 501 | if i == 0: 502 | cost0 = 0 503 | for q in range(I_in): 504 | # Update H_multi 505 | H_multi_old = H_multi 506 | H_multi_n = np.dot(W_multi.transpose(), multi) 507 | H_multi_d = np.dot(np.dot(W_multi.transpose(), W_multi), H_multi) 508 | H_multi = (H_multi*H_multi_n)/H_multi_d 509 | cost = np.sum((multi[0:multi_band, :] - np.dot(W_multi[0:multi_band, :], H_multi))**2) 510 | if q > 1 and (cost0-cost)/cost < delta_m: 511 | if verbose == 'on': 512 | print('Initialization of H_multi converged at the ', q, 'th iteration ') 513 | H_multi = H_multi_old 514 | break 515 | cost0 = cost 516 | else: 517 | # Update W_multi 518 | W_multi_old = W_multi 519 | if multi_band > MIN_MS_BANDS: 520 | W_multi_n = np.dot(multi[0:multi_band, :], H_multi.transpose()) 521 | W_multi_d = np.dot(np.dot(W_multi[0:multi_band, :], H_multi), H_multi.transpose()) 522 | W_multi[0:multi_band, :] = (W_multi[0:multi_band, :]*W_multi_n)/W_multi_d 523 | # Update H_multi 524 | H_multi_old = H_multi 525 | H_multi_n = np.dot(W_multi.transpose(), multi) 526 | H_multi_d = np.dot(np.dot(W_multi.transpose(), W_multi), H_multi) 527 | H_multi = (H_multi*H_multi_n)/H_multi_d 528 | cost = np.sum((multi[0:multi_band, :] - np.dot(W_multi[0:multi_band, :], H_multi))**2) 529 | if (cost0-cost)/cost < delta_m: 530 | if verbose == 'on': 531 | print('Optimization of MS unmixing converged at the ', i, 'th iteration ') 532 | W_multi = W_multi_old 533 | H_multi = H_multi_old 534 | break 535 | cost0 = cost 536 | 537 | RMSE_m = (cost0/(multi.shape[1]*multi_band))**0.5 538 | if verbose == 'on': 539 | print(' RMSE(Vm) = ', RMSE_m) 540 | 541 | W_multi2 = W_multi 542 | H_multi2 = H_multi 543 | 544 | return W_hyper, H_hyper, W_multi1, H_multi1, W_multi2, H_multi2, RMSE_h, RMSE_m 545 | 546 | def gaussian_filter2d(shape=(3,3),sigma=1): 547 | ''' 548 | 2D Gaussian filter 549 | 550 | USAGE 551 | h = gaussian_filter2d(shape,sigma) 552 | 553 | INPUT 554 | shape : window size (e.g., (3,3)) 555 | sigma : scalar 556 | 557 | OUTPUT 558 | h 559 | ''' 560 | m,n = [(ss-1.)/2. for ss in shape] 561 | y,x = np.ogrid[-m:m+1,-n:n+1] 562 | h = np.exp( -(x**2 + y**2) / (2.*sigma**2) ) 563 | h[ h < np.finfo(h.dtype).eps*h.max() ] = 0 564 | sumh = h.sum() 565 | if sumh != 0: 566 | h /= sumh 567 | return h 568 | 569 | def gaussian_down_sample(data,w,mask=0): 570 | ''' 571 | This function downsamples HS image with a Gaussian point spread function. 572 | 573 | USAGE 574 | HSI = gaussian_down_sample(data,w,mask) 575 | 576 | INPUT 577 | data : input HS image (xdata,ydata,band) 578 | w : difference of ground sampling distance (FWHM = w) 579 | mask : (optional) Binary mask for processing (xdata,ydata) (0: mask, 1: image) 580 | 581 | OUTPUT 582 | HSI : downsampled HS image (xdata/w, ydata/w, band) 583 | ''' 584 | 585 | # masking mode 586 | if np.isscalar(mask): 587 | masking = 0 588 | else: 589 | masking = 1 590 | 591 | xdata = data.shape[0] 592 | ydata = data.shape[1] 593 | band = data.shape[2] 594 | hx = int(np.floor(xdata/w)) 595 | hy = int(np.floor(ydata/w)) 596 | HSI = np.zeros((hx, hy, band)) 597 | sig = w/2.35482 598 | 599 | if masking == 0: # without mask 600 | if np.mod(w,2)==0: 601 | H1 = gaussian_filter2d((w,w),sig).reshape(w,w,1) 602 | H2 = gaussian_filter2d((w*2,w*2),sig).reshape(w*2,w*2,1) 603 | for x in range(hx): 604 | for y in range(hy): 605 | if x==0 or x==hx-1 or y==0 or y==hy-1: 606 | HSI[x,y,:] = (np.double( data[x*w:(x+1)*w,y*w:(y+1)*w,:] ) * np.tile(H1,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 607 | else: 608 | HSI[x,y,:] = (np.double( data[x*w-w//2:(x+1)*w+w//2,y*w-w//2:(y+1)*w+w//2,:] ) * np.tile(H2,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 609 | else: 610 | H1 = gaussian_filter2d((w,w),sig).reshape(w,w,1) 611 | H2 = gaussian_filter2d((w*2-1,w*2-1),sig).reshape(w*2-1,w*2-1,1) 612 | for x in range(hx): 613 | for y in range(hy): 614 | if x==0 or x==hx-1 or y==0 or y==hy-1: 615 | HSI[x,y,:] = (np.double( data[x*w:(x+1)*w,y*w:(y+1)*w,:] ) * np.tile(H1,(1,1,band)) ).sum(axis=0).sum(axis=0).reshape(1,1,band) 616 | else: 617 | HSI[x,y,:] = (np.double( data[x*w-(w-1)//2:(x+1)*w+(w-1)//2,y*w-(w-1)//2:(y+1)*w+(w-1)//2,:] ) * np.tile(H2,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 618 | else: # with mask 619 | if np.mod(w,2)==0: 620 | H1 = gaussian_filter2d((w,w),sig).reshape(w,w,1) 621 | H2 = gaussian_filter2d((w*2,w*2),sig).reshape(w*2,w*2,1) 622 | for x in range(hx): 623 | for y in range(hy): 624 | mask_tmp = mask[x*w:(x+1)*w,y*w:(y+1)*w] 625 | if mask_tmp.sum() == w**2: 626 | if x==0 or x==hx-1 or y==0 or y==hy-1: 627 | HSI[x,y,:] = (np.double( data[x*w:(x+1)*w,y*w:(y+1)*w,:] ) * np.tile(H1,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 628 | else: 629 | HSI[x,y,:] = (np.double( data[x*w-w//2:(x+1)*w+w//2,y*w-w//2:(y+1)*w+w//2,:] ) * np.tile(H2,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 630 | else: 631 | H1 = gaussian_filter2d((w,w),sig).reshape(w,w,1) 632 | H2 = gaussian_filter2d((w*2-1,w*2-1),sig).reshape(w*2-1,w*2-1,1) 633 | for x in range(hx): 634 | for y in range(hy): 635 | mask_tmp = mask[x*w:(x+1)*w,y*w:(y+1)*w] 636 | if mask_tmp.sum() == w**2: 637 | if x==0 or x==hx-1 or y==0 or y==hy-1: 638 | HSI[x,y,:] = (np.double( data[x*w:(x+1)*w,y*w:(y+1)*w,:] ) * np.tile(H1,(1,1,band)) ).sum(axis=0).sum(axis=0).reshape(1,1,band) 639 | else: 640 | HSI[x,y,:] = (np.double( data[x*w-(w-1)//2:(x+1)*w+(w-1)//2,y*w-(w-1)//2:(y+1)*w+(w-1)//2,:] ) * np.tile(H2,(1,1,band))).sum(axis=0).sum(axis=0).reshape(1,1,band) 641 | 642 | return HSI 643 | 644 | def zoom_nn(data,w): 645 | ''' 646 | Zoom via nearest neighbor interpolation 647 | ''' 648 | rows = data.shape[0] 649 | cols = data.shape[1] 650 | print(data.shape) 651 | out = np.tile( np.tile(data.reshape(rows,cols,1),(1,1,w)).reshape(rows,cols*w,1) ,(1,1,w)).transpose(1,0,2).reshape(cols*w,rows*w).transpose() 652 | 653 | return out 654 | 655 | def zoom_bi(data,w): 656 | ''' 657 | Zoom via bilinear interpolation 658 | ''' 659 | rows = data.shape[0] 660 | cols = data.shape[1] 661 | # index 662 | r = np.tile(((2*np.r_[0:rows*w]+1)/(2*w)-0.5).reshape(rows*w,1),(1,cols*w)) 663 | c = np.tile((2*np.r_[0:cols*w]+1)/(2*w)-0.5,(rows*w,1)) 664 | r[r<0] = 0 665 | r[r>rows-1] = rows-1 666 | c[c<0] = 0 667 | c[c>cols-1] = cols-1 668 | w4 = (np.floor(r)+1-r)*(np.floor(c)+1-c) 669 | w3 = (np.floor(r)+1-r)*(c-np.floor(c)) 670 | w2 = (r-np.floor(r))*(np.floor(c)+1-c) 671 | w1 = (r-np.floor(r))*(c-np.floor(c)) 672 | data = np.hstack((np.vstack((data,np.zeros((1,cols)))),np.zeros((rows+1,1)))) 673 | out = w4*data[np.floor(r).astype(int),np.floor(c).astype(int)]+w3*data[np.floor(r).astype(int),np.floor(c).astype(int)+1]+w2*data[np.floor(r).astype(int)+1,np.floor(c).astype(int)]+w1*data[np.floor(r).astype(int)+1,np.floor(c).astype(int)+1] 674 | 675 | return out 676 | 677 | def lsqnonneg(y,A): 678 | ''' 679 | Nonnegative least squares via the active set method 680 | 681 | This function solves the following optimization 682 | 683 | min |y-Ax|^2 684 | s.t. x>=0 685 | 686 | USAGE 687 | x = lsqnonneg(y,A) 688 | 689 | INPUT 690 | y : observation (m,1) 691 | A : mixing matrix (m,n) 692 | 693 | OUTPUT 694 | x : coefficients (n,1) 695 | ''' 696 | 697 | t = 10*2.2204e-16*np.max(np.sum(np.abs(A),axis=0))*max([A.shape[0], A.shape[1]]) 698 | 699 | m = y.shape[0] 700 | n = A.shape[1] 701 | 702 | # initialize 703 | x = np.zeros((n,1)) 704 | s = x.copy() 705 | P = np.zeros((n,1)) 706 | R = np.ones((n,1)) 707 | w = np.dot(A.transpose() , (y - np.dot(A,x))) 708 | 709 | # main loop 710 | c = 0 711 | while R.sum() > 0 and w.max() > t: 712 | if c > 0: 713 | j_pre = j 714 | j = np.nonzero(w==w.max()) 715 | if c > 0: 716 | if j == j_pre: 717 | break 718 | c = c+1 719 | 720 | P[j[0]] = 1 721 | R[j[0]] = 0 722 | Ap = A[:,np.nonzero(P==1)[0]] 723 | sp = np.dot( np.linalg.inv(np.dot(Ap.transpose(),Ap)) , np.dot(Ap.transpose(),y) ) 724 | s[np.nonzero(P==1)] = sp.reshape(1,len(sp))[0,:] 725 | while s[np.nonzero(P==1)].min() <= 0: 726 | if sum((s<=0)*((x-s)!=0)) != 0: 727 | alpha = ( x[(s<=0)*((x-s)!=0)] / (x[(s<=0)*((x-s)!=0)]-s[(s<=0)*((x-s)!=0)]) ).min() 728 | x = x + alpha*(s-x) 729 | R[np.nonzero(x==0)] = 1 730 | P[np.nonzero(x==0)] = 0 731 | Ap = A[:,np.nonzero(P==1)[0]] 732 | sp = np.dot( np.linalg.inv(np.dot(Ap.transpose(),Ap)) , np.dot(Ap.transpose(),y) ) 733 | s[np.nonzero(P==1)] = sp.reshape(1,len(sp))[0] 734 | s[np.nonzero(R==1)] = 0 735 | else: 736 | break 737 | x = s.copy() 738 | w = np.dot(A.transpose() , (y - np.dot(A,x))) 739 | 740 | return x 741 | 742 | def nls_su(Y,A): 743 | ''' 744 | Nonnegative least squares for spectral unmixing 745 | 746 | This function solves the following optimization 747 | 748 | min |Y-AX|_F^2 749 | s.t. X>=0 750 | 751 | USAGE 752 | X = nls_su(Y,A) 753 | 754 | INPUT 755 | Y : observation (m,p) 756 | A : mixing matrix (m,n) 757 | 758 | OUTPUT 759 | X : coefficients (n,p) 760 | ''' 761 | n = A.shape[1] 762 | p = Y.shape[1] 763 | m = Y.shape[0] 764 | X = np.zeros((p,n)) 765 | for i in range(p): 766 | y = Y[:,i].reshape(m,1).copy() 767 | x = lsqnonneg(y,A) 768 | X[i,:] = x.transpose().copy() 769 | print(n, p) 770 | 771 | return X.transpose() 772 | 773 | def estR(HS,MS,mask=0): 774 | ''' 775 | Estimation of relative spectral response functions (SRFs) 776 | via the nonnegative least squares method 777 | 778 | USAGE 779 | R = estR(HS,MS,mask) 780 | 781 | INPUT 782 | HS : Low-spatial-resolution HS image (rows2,cols2,bands2) 783 | MS : MS image (rows1,cols1,bands1) 784 | mask: (optional) Binary mask for processing (rows2,cols2) (mainly 785 | for real data) 786 | 787 | OUTPUT 788 | R : Relative SRFs 789 | without mask (bands1,bands2) 790 | with mask (bands1,bands2+1) (consider offset) 791 | ''' 792 | 793 | rows1 = MS.shape[0] 794 | cols1 = MS.shape[1] 795 | bands1 = MS.shape[2] 796 | rows2 = HS.shape[0] 797 | cols2 = HS.shape[1] 798 | bands2 = HS.shape[2] 799 | 800 | # masking mode 801 | if np.isscalar(mask): 802 | masking = 0 803 | mask = np.ones((rows2,cols2)) 804 | else: 805 | masking = 1 806 | 807 | HS = np.hstack((HS.reshape(rows2*cols2,bands2), mask.reshape(rows2*cols2,1) )).reshape(rows2,cols2,bands2+1) 808 | bands2 = HS.shape[2] 809 | 810 | R = np.zeros((bands1,bands2)) 811 | 812 | # downgrade spatial resolution 813 | w = int(rows1/rows2) 814 | mask2 = zoom_nn(mask,w) 815 | 816 | Y = gaussian_down_sample(MS,w,mask2).reshape(rows2*cols2,bands1) 817 | 818 | A = HS.reshape(rows2*cols2,bands2).copy() 819 | 820 | if masking == 1: 821 | Y = Y[mask.reshape(rows2*cols2)==1,:] 822 | A = A[mask.reshape(rows2*cols2)==1,:] 823 | 824 | # solve nonnegative least squares problems 825 | for b in range(bands1): 826 | y = Y[:,b].reshape(Y.shape[0],1).copy() 827 | r = lsqnonneg(y,A) 828 | R[b,:] = r.transpose().copy() 829 | 830 | return R 831 | 832 | def vca(R,p): 833 | ''' 834 | Vertex Component Analysis (VCA) 835 | 836 | USAGE 837 | U, indices = vca( R, p ) 838 | 839 | INPUT 840 | R : Hyperspectral data (bands,pixels) 841 | p : Number of endmembers 842 | 843 | OUTPUT 844 | U : Matrix of endmembers (bands,p) 845 | indices : Indices of endmembers in R 846 | 847 | REFERENCE 848 | J. M. P. Nascimento and J. M. B. Dias, "Vertex component analysis: A 849 | fast algorithm to unmix hyperspectral data," IEEE Transactions on 850 | Geoscience and Remote Sensing, vol. 43, no. 4, pp. 898 - 910, Apr. 2005. 851 | ''' 852 | 853 | N = R.shape[1] # pixels 854 | L = R.shape[0] # bands 855 | 856 | # Estimate SNR 857 | r_m = R.mean(axis=1).reshape(L,1) 858 | R_o = R - np.tile(r_m, (1, N)) 859 | U, S, V = np.linalg.svd(np.dot(R_o,R_o.T) / N) 860 | Ud = U[:,:p] # computes the p-projection matrix 861 | x_p = np.dot(Ud.T, R_o) 862 | P_y = (R**2).sum() / N 863 | P_x = (x_p**2).sum() / N + np.dot(r_m.T, r_m) 864 | SNR = np.abs(10*np.log10( (P_x - (p/L)*P_y) / (P_y - P_x) )) 865 | 866 | # Determine which projection to use. 867 | SNRth = 15 + 10*np.log(p) + 8 868 | #SNRth = 15 + 10*log(p) # threshold proposed in the original paper 869 | if SNR > SNRth: 870 | d = p 871 | Ud, Sd, Vd = np.linalg.svd(np.dot(R,R.T)/N) 872 | Ud = U[:,:d] 873 | X = np.dot(Ud.T,R) 874 | u = X.mean(axis=1).reshape(X.shape[0],1) 875 | Y = X / np.tile( ( X * np.tile(u,(1, N)) ).sum(axis = 0) ,(d, 1) ) 876 | else: 877 | d = p-1 878 | r_m = (R.T).mean(axis=0).reshape((R.T).shape[1],1) 879 | R_o = R - np.tile(r_m, (1, N)) 880 | Ud, Sd, Vd = np.linalg.svd(np.dot(R_o,R_o.T)/N) 881 | Ud = U[:,:d] 882 | X = np.dot(Ud.T, R_o) 883 | c = np.sqrt((X**2).sum(axis = 0).max()) 884 | c = np.tile(c, (1, N)) 885 | Y = np.vstack( (X, c) ) 886 | 887 | e_u = np.zeros((p, 1)) 888 | e_u[p-1,0] = 1 889 | A = np.zeros((p, p)) 890 | A[:,0] = e_u[:,0] 891 | 892 | I = np.eye(p) 893 | k = np.zeros((N, 1)) 894 | 895 | indices = [] 896 | for i in range(p): 897 | w = np.random.rand(p,1) 898 | f = np.dot((I-np.dot(A,np.linalg.pinv(A))), w) 899 | f = f / np.linalg.norm(f) 900 | v = np.dot(f.T,Y) 901 | k = np.abs(v).argmax() 902 | A[:,i] = Y[:,k] 903 | indices.append(k) 904 | 905 | if SNR > SNRth: 906 | U = np.dot(Ud,X[:,indices]) 907 | else: 908 | U = np.dot(Ud,X[:,indices]) + np.tile(r_m, (1, p)) 909 | 910 | return U, indices 911 | 912 | def vd(data,alpha=10**(-3)): 913 | ''' 914 | Virtual dimensionality 915 | 916 | USAGE 917 | out = vd(data,alpha) 918 | 919 | INPUT 920 | data : HSI data (bands,pizels) 921 | alpha: False alarm rate 922 | 923 | OUTPUT 924 | out : Number of spectrally distinct signal sources in data 925 | 926 | REFERENCE 927 | J. Harsanyi, W. Farrand, and C.-I Chang, "Determining the number and 928 | identity of spectral endmembers: An integrated approach using 929 | Neyman-Pearson eigenthresholding and iterative constrained RMS error 930 | minimization," in Proc. 9th Thematic Conf. Geologic Remote Sensing, 931 | Feb. 1993. 932 | Chang, C.-I. and Du, Q., "Estimation of number of spectrally distinct 933 | signal sources in hyperspectral imagery," IEEE Transactions on Geoscience 934 | and Remote Sensing, vol. 42, pp. 608-619, 2004. 935 | ''' 936 | data = np.double(data) 937 | N = data.shape[1] # pixels 938 | L = data.shape[0] # bands 939 | 940 | R = np.dot(data, data.T)/N 941 | K = np.cov(data) 942 | 943 | D_r, V_r = np.linalg.eig(R) 944 | D_k, V_k = np.linalg.eig(K) 945 | 946 | e_r = np.sort(D_r)[::-1] 947 | e_k = np.sort(D_k)[::-1] 948 | 949 | diff = e_r - e_k 950 | variance = (2*(e_r**2+e_k**2)/N)**0.5 951 | 952 | tau = -ppf(alpha,np.zeros(L),variance) 953 | 954 | out = sum(diff > tau) 955 | 956 | return out 957 | 958 | def PSNR(ref,tar,mask=0): 959 | ''' 960 | Peak signal to noise ratio (PSNR) 961 | 962 | USAGE 963 | psnr_all, psnr_mean = PSNR(ref,tar) 964 | 965 | INPUT 966 | ref : reference HS data (rows,cols,bands) 967 | tar : target HS data (rows,cols,bands) 968 | mask: (optional) Binary mask for processing (rows,cols) (0: mask, 1: image) 969 | 970 | OUTPUT 971 | psnr_all : PSNR (bands) 972 | psnr_mean : average PSNR (scalar) 973 | ''' 974 | rows = ref.shape[0] 975 | cols = ref.shape[1] 976 | bands = ref.shape[2] 977 | 978 | # masking mode 979 | if np.isscalar(mask): 980 | mask = np.ones((rows,cols)) 981 | 982 | ref = ref.reshape(rows*cols,bands) 983 | tar = tar.reshape(rows*cols,bands) 984 | mask = mask.reshape(rows*cols) 985 | msr = ((ref[mask==1,:]-tar[mask==1,:])**2).mean(axis=0) 986 | max2 = ref.max(axis=0)**2 987 | 988 | psnr_all = 10*np.log10(max2/msr) 989 | psnr_mean = psnr_all.mean() 990 | 991 | return psnr_all, psnr_mean 992 | 993 | def SAM(ref,tar,mask=0): 994 | ''' 995 | Spectral angle mapper (SAM) 996 | 997 | USAGE 998 | sam_mean, map = SAM(ref,tar) 999 | 1000 | INPUT 1001 | ref : reference HS data (rows,cols,bands) 1002 | tar : target HS data (rows,cols,bands) 1003 | mask: (optional) Binary mask for processing (rows,cols) (0: mask, 1: image) 1004 | 1005 | OUTPUT 1006 | sam_mean : average value of SAM (scalar in degree) 1007 | map : 2-D map (in degree) 1008 | ''' 1009 | rows = tar.shape[0] 1010 | cols = tar.shape[1] 1011 | bands = tar.shape[2] 1012 | 1013 | # masking mode 1014 | if np.isscalar(mask): 1015 | masking = 0 1016 | mask = np.ones(rows*cols) 1017 | else: 1018 | masking = 1 1019 | mask = mask.reshape(rows*cols) 1020 | 1021 | prod_scal = (ref*tar).sum(axis=2) 1022 | norm_orig = (ref*ref).sum(axis=2) 1023 | norm_fusa = (tar*tar).sum(axis=2) 1024 | prod_norm = np.sqrt(norm_orig*norm_fusa) 1025 | prod_map = prod_norm 1026 | prod_map[prod_map==0] = 2.2204e-16 1027 | map = np.real(np.arccos(prod_scal/prod_map))*180/np.pi 1028 | prod_scal = prod_scal.reshape(rows*cols) 1029 | prod_norm = prod_norm.reshape(rows*cols) 1030 | sam_mean = np.real(np.arccos(prod_scal[(prod_norm!=0)*(mask==1)]/prod_norm[(prod_norm!=0)*(mask==1)]).sum()/((prod_norm!=0)*(mask==1)).sum())*180/np.pi 1031 | 1032 | return sam_mean, map 1033 | 1034 | def ppf(p,mu=0,sigma=1): 1035 | ''' 1036 | Percent point function (inverse of cdf) 1037 | for the normal distribution at p 1038 | 1039 | USAGE 1040 | out = ppf(p,mu,sigma) 1041 | 1042 | INPUT 1043 | p : lower tail probability 1044 | mu : mean (n) 1045 | sigma : standard deviation (n) 1046 | 1047 | OUTPUT 1048 | out : quantile corresponding to the lower tail probability p (n) 1049 | ''' 1050 | n = mu.shape[0] # number of elements 1051 | out = np.zeros((n)) 1052 | for i in range(n): 1053 | #print sigma[i] 1054 | out[i] = 2**0.5*sigma[i]*erfinv(2*p-1)+mu[i] 1055 | 1056 | return out --------------------------------------------------------------------------------