├── .DS_Store
├── Similarityfunction
├── .DS_Store
├── data
│ ├── .DS_Store
│ ├── cross-correlation.py
│ └── 1111.py
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ └── Similarityfunction.iml
├── wrapAffine
│ ├── input_Perspective_matrix.py
│ ├── 1111111.py
│ ├── similar_funtion.py
│ ├── sub_image_affine.py
│ ├── generator_perspective_fixed_moving_matrix.py
│ ├── sub_image.py
│ ├── image_wrapaffine.py
│ └── generator_cvs_path_affine_matrix.py
├── similar_function
│ ├── rand_index.py
│ ├── band_to_band.py
│ ├── cross-correlation.py
│ ├── Silhouette_Coefficient轮廓系数.py
│ ├── Homogeneity.py
│ ├── 峰值信噪比PSNR.py
│ ├── HOG.py
│ ├── Mutual_Information.py
│ ├── ergodic_mliment.py
│ ├── LBP特征.py
│ ├── conv_impliment.py
│ ├── BRIEF特征描述子.py
│ ├── voxelmorph_loss.py
│ ├── FAST_impliment_9.py
│ ├── FAST_impliment.py
│ └── self_brief描述子.py
└── 111.py
├── image-20230109174009382.png
├── DenseNet_registration
├── .DS_Store
├── model
│ └── .DS_Store
├── STN
│ ├── __pycache__
│ │ ├── STN_aff.cpython-36.pyc
│ │ ├── STN_proj.cpython-36.pyc
│ │ ├── TPS_STN.cpython-36.pyc
│ │ └── Proj_tr_matrix.cpython-36.pyc
│ ├── STN_proj.py
│ ├── TPS_STN.py
│ └── STN_aff.py
├── Registration_model
│ ├── __pycache__
│ │ ├── loss.cpython-36.pyc
│ │ ├── loss.cpython-37.pyc
│ │ ├── densenet.cpython-36.pyc
│ │ ├── data_generator.cpython-36.pyc
│ │ ├── densenet_model.cpython-36.pyc
│ │ └── densenet_model.cpython-37.pyc
│ ├── data_generator.py
│ ├── predict.py
│ ├── train.py
│ ├── densenet.py
│ └── loss.py
├── .idea
│ ├── modules.xml
│ ├── misc.xml
│ └── DenseNet_registration.iml
├── experiment
│ ├── test_tps.py
│ ├── test_cal_prj_mat_proj_stn.py
│ ├── test_tps_pot.py
│ └── cal_Homography_batch.py
└── 111111111.py
├── generate_affine_pre_data
├── .DS_Store
├── .idea
│ ├── misc.xml
│ ├── modules.xml
│ └── generate_affine_pre_data.iml
├── wrapAffine
│ ├── input_Perspective_matrix.py
│ ├── test_moving_pading.py
│ ├── 1111111.py
│ ├── displacement_4point_opencv.py
│ ├── similar_funtion.py
│ ├── different_4_4point_contrant.py
│ ├── sub_image_affine.py
│ ├── generator_perspective_fixed_moving_matrix.py
│ ├── sub_image.py
│ ├── test_4point_paper_random_landsat_256.py
│ ├── test_4point_random_random_landsat_256.py
│ ├── generator_cvs_path_affine_matrix.py
│ ├── displacement_4point.py
│ └── homography_matrix_numpy.py
├── Verification_pre
│ └── Verification.py
├── generate_train_data
│ ├── 4_point_displacement.py
│ ├── 4_point_displacement_fixed_random_landsat_256.py
│ ├── 4_point_displacement_fixed_random_overlap_landsat_256.py
│ ├── 4_point_displacement_random_random_landsat_256.py
│ └── 4_point_displacement_paper_random_256.py
└── generate_pre_test
│ └── pre_matrix.py
├── README.md
├── data_generator.py
├── predict.py
├── train.py
├── densenet.py
└── loss.py
/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/.DS_Store
--------------------------------------------------------------------------------
/Similarityfunction/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/Similarityfunction/.DS_Store
--------------------------------------------------------------------------------
/image-20230109174009382.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/image-20230109174009382.png
--------------------------------------------------------------------------------
/DenseNet_registration/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/.DS_Store
--------------------------------------------------------------------------------
/Similarityfunction/data/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/Similarityfunction/data/.DS_Store
--------------------------------------------------------------------------------
/generate_affine_pre_data/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/generate_affine_pre_data/.DS_Store
--------------------------------------------------------------------------------
/DenseNet_registration/model/.DS_Store:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/model/.DS_Store
--------------------------------------------------------------------------------
/DenseNet_registration/STN/__pycache__/STN_aff.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/STN/__pycache__/STN_aff.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/STN/__pycache__/STN_proj.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/STN/__pycache__/STN_proj.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/STN/__pycache__/TPS_STN.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/STN/__pycache__/TPS_STN.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/STN/__pycache__/Proj_tr_matrix.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/STN/__pycache__/Proj_tr_matrix.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/loss.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/loss.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/loss.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/loss.cpython-37.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/densenet.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/densenet.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/data_generator.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/data_generator.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/densenet_model.cpython-36.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/densenet_model.cpython-36.pyc
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/__pycache__/densenet_model.cpython-37.pyc:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/liliangzhi110/E2EIR/HEAD/DenseNet_registration/Registration_model/__pycache__/densenet_model.cpython-37.pyc
--------------------------------------------------------------------------------
/Similarityfunction/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/Similarityfunction/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/DenseNet_registration/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/DenseNet_registration/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/.idea/misc.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/.idea/modules.xml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/DenseNet_registration/.idea/DenseNet_registration.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/Similarityfunction/.idea/Similarityfunction.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/.idea/generate_affine_pre_data.iml:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/input_Perspective_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | for i in range(100):
5 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
6 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
7 | [np.random.randint(80,128,size=(1,))[0], 0],
8 | [128, np.random.randint(85,128,size=(1,))[0]],
9 | [np.random.randint(0,40,size=(1,))[0], 128]])
10 | matrix = cv2.getPerspectiveTransform(points1, points2)
11 |
12 | print(matrix)
13 | print("")
14 |
15 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/input_Perspective_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | for i in range(100):
5 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
6 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
7 | [np.random.randint(80,128,size=(1,))[0], 0],
8 | [128, np.random.randint(85,128,size=(1,))[0]],
9 | [np.random.randint(0,40,size=(1,))[0], 128]])
10 | matrix = cv2.getPerspectiveTransform(points1, points2)
11 |
12 | print(matrix)
13 | print("")
14 |
15 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/rand_index.py:
--------------------------------------------------------------------------------
1 | from sklearn import metrics
2 | import math
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | image_cv2_band1=data[1:2,0:100,0:100].reshape((100,100))
9 |
10 |
11 | plt.subplot(1,2,1)
12 | plt.imshow(image_cv2_band7)
13 |
14 | plt.subplot(1,2,2)
15 | plt.imshow(image_cv2_band1)
16 | plt.show()
17 | value=metrics.adjusted_rand_score(image_cv2_band7.reshape((10000,)),image_cv2_band1.reshape((10000,)))
18 |
19 | print(value)
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/band_to_band.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | image_cv2_band7=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
5 | image_cv2_band1=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
6 |
7 | image_cv2_band7=image_cv2_band7[0:100,0:100]
8 | # image_cv2_band1=image_cv2_band1[100:200,100:200]/255
9 | image_cv2_band1=image_cv2_band1[0:100,0:100]
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
--------------------------------------------------------------------------------
/Similarityfunction/data/cross-correlation.py:
--------------------------------------------------------------------------------
1 | from sklearn import metrics
2 | import cv2
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
9 | # image_cv2_band1=data[5:6,100:200,100:200].reshape((100,100))
10 |
11 |
12 | plt.subplot(1,2,1)
13 | plt.imshow(image_cv2_band7)
14 |
15 | plt.subplot(1,2,2)
16 | plt.imshow(image_cv2_band1)
17 | plt.show()
18 | value=np.correlate(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
19 |
20 | print(value)
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/cross-correlation.py:
--------------------------------------------------------------------------------
1 | from sklearn import metrics
2 | import cv2
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | image_cv2_band7=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
6 | image_cv2_band1=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
7 |
8 | image_cv2_band7=image_cv2_band7[0:100,0:100]
9 | # image_cv2_band1=image_cv2_band1[100:200,100:200]/255
10 | #image_cv2_band1=image_cv2_band1[100:200,100:200]
11 | image_cv2_band1=image_cv2_band1[0:100,0:100]
12 |
13 |
14 |
15 |
16 | value=np.correlate(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
17 |
18 |
19 | print(value)
20 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/Silhouette_Coefficient轮廓系数.py:
--------------------------------------------------------------------------------
1 | from sklearn import metrics
2 | import cv2
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
9 | # image_cv2_band1=data[5:6,100:200,100:200].reshape((100,100))
10 |
11 | # image_cv2_band1=image_cv2_band1[100:200,100:200]
12 | #image_cv2_band1=image_cv2_band1[100:200,100:200]/255
13 |
14 |
15 |
16 | plt.subplot(1,2,1)
17 | plt.imshow(image_cv2_band7)
18 |
19 | plt.subplot(1,2,2)
20 | plt.imshow(image_cv2_band1)
21 | plt.show()
22 | value=metrics.silhouette_score(image_cv2_band1,image_cv2_band7)
23 |
24 | print(value)
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/test_moving_pading.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
7 | plt.subplot(1,3,1)
8 | plt.imshow(image_cv2)
9 |
10 | image_cv2= np.pad(image_cv2, ((600, 300), (800, 600)), 'constant', constant_values=(0, 0))
11 |
12 | plt.subplot(1,3,2)
13 | plt.imshow(image_cv2)
14 |
15 |
16 |
17 | pts1 = np.float32([[0,0],[1000,0],[1000,1000]])
18 | pts2 = np.float32([[0,200],[870,0],[1000,750]])
19 |
20 | M = cv2.getAffineTransform(pts1,pts2)
21 |
22 | wrap = cv2.warpAffine(image_cv2,M,(1000,1000))
23 |
24 | plt.subplot(1,3,3)
25 | plt.imshow(wrap)
26 |
27 | plt.show()
28 |
29 |
30 |
31 |
32 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/Homogeneity.py:
--------------------------------------------------------------------------------
1 | from sklearn import metrics
2 | from sklearn.cluster import KMeans
3 | import numpy as np
4 | import matplotlib.pyplot as plt
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | image_cv2_band1=data[1:2,0:100,0:100].reshape((100,100))
9 |
10 |
11 | plt.subplot(1,2,1)
12 | plt.imshow(image_cv2_band7)
13 |
14 | plt.subplot(1,2,2)
15 | plt.imshow(image_cv2_band1)
16 | plt.show()
17 |
18 | value=metrics.homogeneity_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
19 | value1=metrics.completeness_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
20 | value2=metrics.v_measure_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
21 | value3=metrics.fowlkes_mallows_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
22 | # value4=metrics.calinski_harabaz_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
23 |
24 | print(value,value1,value2,value3)
25 | print(np.fabs((value*100)),np.fabs((value1*100)),np.fabs((value2*100)))
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/峰值信噪比PSNR.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import math
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | # image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
9 | image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
10 |
11 |
12 |
13 | plt.subplot(1,2,1)
14 | plt.imshow(image_cv2_band7)
15 |
16 | plt.subplot(1,2,2)
17 | plt.imshow(image_cv2_band1)
18 | plt.show()
19 |
20 |
21 |
22 | def psnr(target, ref, scale):
23 | # target:目标图像 ref:参考图像 scale:尺寸大小
24 | # assume RGB image
25 | target_data = np.array(target)
26 | target_data = target_data[scale:-scale ,scale:-scale]
27 |
28 | ref_data = np.array(ref)
29 | ref_data = ref_data[scale:-scale ,scale:-scale]
30 |
31 | diff = ref_data - target_data
32 | diff = diff.flatten('C')
33 | rmse = math.sqrt( np.mean(diff ** 2.) )
34 | return 20 *math.log10(1.0 /rmse)
35 |
36 |
37 |
38 | out=psnr(image_cv2_band1,image_cv2_band7,1)
39 |
40 |
41 |
42 | print(out)
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Multimodal image fusion framework for end-to-end remote sensing image registration
2 |
3 |
4 |
5 | 
6 |
7 | ## Contents
8 |
9 | 1. Dataset and Data generation
10 | 2. Training
11 |
12 |
13 | ## Dataset and Data generation
14 |
15 | For training, validating, and testing the proposed network,we employ the PS-RGB(RGB), Multispectral(MS), andSAR-Intensity(SAR) datasets from the SpaceNet [32] dataset.
16 |
17 | In addition to the dataset itself, we have provided the scripts for data generation. To generate data,using the .py file in the **E2EIR/generate_affine_pre_data/generate_train_data/** folder
18 |
19 | ```Shell
20 | E2EIR/generate_affine_pre_data/generate_train_data/
21 | ```
22 | Or you can use the already generated training data
23 |
24 | ```Shell
25 |
26 | dateset1: 0.23m resolution https://drive.google.com/drive/folders/1xyH2P1TRsRd9u2oXFGbYQ9zyQ4Ewu5U4?usp=sharing
27 |
28 | dataset2: 3.75m resolution https://drive.google.com/drive/folders/14kMVwVdvZ9YEFrwieqqMW1Fkn5Ne381S?usp=sharing
29 |
30 | dataset3: 30m resolution https://drive.google.com/drive/folders/1KL1wQ9-1oFthXH9YaCCNJ7KuRXozxu7M?usp=sharing
31 | ```
32 |
33 |
34 | ## Training
35 | Dataset is generated. You can use the dataset by running:
36 |
37 | ```Shell
38 | python train.py
39 | ```
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/DenseNet_registration/experiment/test_tps.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import cv2
4 | import matplotlib.pyplot as plt
5 | from STN.TPS_STN import TPS_STN
6 |
7 | img = cv2.imread("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\fixed.jpg",cv2.IMREAD_UNCHANGED)
8 | out_size = list(img.shape)
9 | shape = [1]+out_size+[1]
10 |
11 |
12 |
13 | nx = 4
14 | ny = 4
15 | # x,y=np.ones(shape=(4,)),np.ones(shape=(4,))
16 |
17 | x,y=np.random.uniform(-1.0,1.0,4),np.random.uniform(-1.0,1.0,4)
18 | # x, y = np.linspace(0, 1, 3), np.linspace(0, 1, 3)
19 | x, y = np.meshgrid(x, y)
20 | xs = x.flatten()
21 | ys = y.flatten()
22 | cps = np.vstack([xs, ys]).T
23 | print(cps)
24 |
25 |
26 |
27 | v = np.array([
28 | [-1., - 1.],
29 | [0. ,- 1.],
30 | [1. ,- 1.],
31 | [-1., 0.],
32 | [0.,0.],
33 | [1. , 0.],
34 | [-1.,1.],
35 | [0. , 1.],
36 | [1.,1.],
37 | ])
38 |
39 | p = tf.constant(cps.reshape([1, nx*ny, 2]), dtype=tf.float32)
40 | t_img = tf.constant(img.reshape(shape), dtype=tf.float32)
41 | t_img = TPS_STN(t_img, nx, ny, p, out_size)
42 |
43 |
44 |
45 | plt.subplot(1,2,1)
46 | plt.imshow(img)
47 |
48 | plt.subplot(1,2,2)
49 | plt.imshow(t_img.numpy().reshape((128,128)))
50 | plt.show()
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/Verification_pre/Verification.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | # np.set_printoptions(suppress=True)
5 |
6 | matrixt_file= 'D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\pre_displacement_4_point.npz'
7 |
8 | fixed_image=np.load(matrixt_file)['fixed'].reshape((128,128))
9 | moving_image=np.load(matrixt_file)['moving'].reshape((128,128))
10 | displacement_4_point=np.load(matrixt_file)['displacement_4_point'].reshape((4,2))
11 |
12 |
13 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
14 | displacement_4_point=points1+displacement_4_point
15 |
16 |
17 |
18 |
19 | matrix_pre =np.array([[ 10.363248, 13.420181],
20 | [-13.995538, 6.0418906],
21 | [-7.536574, -1.4410772],
22 | [13.6674795, -6.705563]])
23 | matrix_pre=(matrix_pre+points1).astype('float32')
24 |
25 |
26 | print(displacement_4_point)
27 | print('')
28 | print(matrix_pre)
29 |
30 |
31 | matrix = cv2.getPerspectiveTransform(matrix_pre,points1)
32 | output = cv2.warpPerspective(moving_image, matrix, (128, 128))
33 |
34 |
35 | plt.subplot(1,3,1)
36 | plt.title('fixed')
37 | plt.imshow(fixed_image)
38 |
39 | plt.subplot(1,3,2)
40 | plt.title('moving')
41 | plt.imshow(moving_image)
42 |
43 | plt.subplot(1,3,3)
44 | plt.title('moved')
45 | plt.imshow(output)
46 |
47 |
48 | plt.show()
49 |
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/1111111.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
8 |
9 | plt.subplot(2,2,1)
10 | plt.title('原图片')
11 | plt.imshow(image_cv2)
12 |
13 |
14 | points1 = np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
15 | points2 = np.float32([[0,200],[870,0],[1000,750],[160,1000]])
16 | matrix = cv2.getPerspectiveTransform(points1,points2)
17 | output = cv2.warpPerspective(image_cv2, matrix, (1000, 1000))
18 |
19 | print(matrix)
20 |
21 | #投影变换
22 | plt.subplot(2,2,2)
23 | plt.title('投影变换')
24 | plt.imshow(output)
25 |
26 |
27 | #点逆变换
28 | points1=np.float32([[0,200],[870,0],[1000,750],[160,1000]])
29 | points2=np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
30 |
31 | matrix1 = cv2.getPerspectiveTransform(points1,points2)
32 | output1 = cv2.warpPerspective(output, matrix1, (1000, 1000))
33 |
34 | print('')
35 | print(matrix1)
36 | plt.subplot(2,2,3)
37 | plt.title('点逆变换')
38 | plt.imshow(output1)
39 |
40 |
41 | #对矩阵求逆,再变换
42 | output3=cv2.warpPerspective(output,np.linalg.inv(matrix),(1000,1000))
43 | plt.subplot(2,2,4)
44 | plt.title('矩阵求逆再变换')
45 | plt.imshow(output3)
46 | plt.show()
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/1111111.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | plt.rcParams['font.sans-serif']=['SimHei']
6 |
7 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
8 |
9 | plt.subplot(2,2,1)
10 | plt.title('原图片')
11 | plt.imshow(image_cv2)
12 |
13 |
14 | points1 = np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
15 | points2 = np.float32([[0,200],[870,0],[1000,750],[160,1000]])
16 | matrix = cv2.getPerspectiveTransform(points1,points2)
17 | output = cv2.warpPerspective(image_cv2, matrix, (1000, 1000))
18 |
19 | print(matrix)
20 |
21 | #投影变换
22 | plt.subplot(2,2,2)
23 | plt.title('投影变换')
24 | plt.imshow(output)
25 |
26 |
27 | #点逆变换
28 | points1=np.float32([[0,200],[870,0],[1000,750],[160,1000]])
29 | points2=np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
30 |
31 | matrix1 = cv2.getPerspectiveTransform(points1,points2)
32 | output1 = cv2.warpPerspective(output, matrix1, (1000, 1000))
33 |
34 | print('')
35 | print(matrix1)
36 | plt.subplot(2,2,3)
37 | plt.title('点逆变换')
38 | plt.imshow(output1)
39 |
40 |
41 | #对矩阵求逆,再变换
42 | output3=cv2.warpPerspective(output,np.linalg.inv(matrix),(1000,1000))
43 | plt.subplot(2,2,4)
44 | plt.title('矩阵求逆再变换')
45 | plt.imshow(output3)
46 | plt.show()
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/displacement_4point_opencv.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | from skimage.draw import line
6 |
7 | np.set_printoptions(suppress=True)
8 | plt.rcParams['font.sans-serif']=['SimHei']
9 |
10 |
11 | image_cv2=cv2.imread("C:\\Users\\lilia\\Desktop\\data.tif",cv2.IMREAD_UNCHANGED)
12 |
13 | image_cv2=np.pad(np.array(image_cv2),((200,200),(200,200),(0,0)),constant_values=0)
14 |
15 | rr,cc=line(400,400,400,600)
16 |
17 | image_cv2[rr,cc]=(255,0,0)
18 |
19 | print(image_cv2.shape)
20 |
21 | cv2.imshow('result.jpg',image_cv2)
22 |
23 |
24 | # #其他四点变换
25 | point1_256=np.float32([[400,400],[600,400],[600,600],[400,600]])
26 | point2_256=np.float32([[ 402, 398.], [603, 402],[598 , 596 ],[ 400 ,605 ]])
27 | matrix1 = cv2.getPerspectiveTransform(point1_256,point2_256)
28 | output1 = cv2.warpPerspective(image_cv2, matrix1, (1000, 1000))
29 |
30 | rr,cc=line(400,400,400,600)
31 |
32 | output1[rr,cc]=(255,0,0)
33 |
34 |
35 |
36 |
37 | #对矩阵求逆,再变换
38 | matrix2 = cv2.getPerspectiveTransform(point2_256,point1_256)
39 | output2 = cv2.warpPerspective(image_cv2, matrix2, (1000, 1000))
40 |
41 |
42 |
43 | matrix3 = cv2.getPerspectiveTransform(point1_256,point2_256)
44 | output3 = cv2.warpPerspective(output2, matrix3, (1000, 1000))
45 |
46 |
47 | images=np.hstack([output1,output2])
48 |
49 | cv2.imshow('result.jpg',images)
50 |
51 | cv2.waitKey(0)
--------------------------------------------------------------------------------
/DenseNet_registration/experiment/test_cal_prj_mat_proj_stn.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import tensorflow as tf
4 | import matplotlib.pyplot as plt
5 | from STN.Proj_tr_matrix import getPerspectiveTransformMatrix,Matrix
6 | from STN.STN_proj import spatial_transformer_network
7 | np.set_printoptions(suppress=True)
8 |
9 | if __name__=='__main__':
10 |
11 | batch=3
12 |
13 | i=tf.convert_to_tensor([[ 10. , -15.],[ -100., -50.],[-10. ,10.],[ -8. ,-10.]])
14 | i=tf.reshape(i,(1,8))
15 | i=tf.tile(i,[batch,1])
16 |
17 |
18 | H=Matrix(i)
19 |
20 | img = cv2.imread('C:\\Users\\kylenate\\Desktop\\panda1.jpg')
21 | img = cv2.cvtColor(img,cv2.IMREAD_COLOR)/255
22 |
23 | height = img.shape[0]
24 | width = img.shape[1]
25 |
26 | imgs = tf.convert_to_tensor(img, dtype='float32')
27 | imgs=tf.reshape(imgs,(1,500,500,3))
28 |
29 | imgs=tf.tile(imgs,[batch,1,1,1])
30 |
31 |
32 | out_image=spatial_transformer_network(imgs,H)
33 |
34 | out_image=out_image.numpy()[0]
35 |
36 |
37 | indix=np.where(out_image[:,:,0]<0.001)
38 |
39 |
40 | x=indix[0]
41 | y=indix[1]
42 |
43 | for i in range(len(x)):
44 |
45 | img[x[i],y[i]]=0.5
46 |
47 |
48 | plt.subplot(121)
49 | plt.imshow(img)
50 |
51 |
52 | plt.subplot(122)
53 | plt.imshow(out_image)
54 | plt.show()
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/similar_funtion.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
5 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
6 |
7 |
8 |
9 |
10 | image1=[]
11 |
12 | for i in range(3,image_cv2_band7.shape[0]-3):
13 | for j in range(2,image_cv2_band7.shape[1]-3):
14 | print(i)
15 | temp=[np.square(image_cv2_band7[i][j+3]-image_cv2_band7[i][j]),
16 | np.square(image_cv2_band7[i][j-3]-image_cv2_band7[i][j]),
17 | np.square(image_cv2_band7[i-3][j]-image_cv2_band7[i][j]),
18 | np.square(image_cv2_band7[i+3][j]-image_cv2_band7[i][j])]
19 | temp=np.array(temp)
20 | image1.append(np.mean(temp))
21 |
22 |
23 | image2=[]
24 |
25 | for i in range(3,image_cv2_band1.shape[0]-3):
26 | for j in range(2,image_cv2_band7.shape[1]-3):
27 | print(i)
28 | temp=[np.square(image_cv2_band1[i][j+3]-image_cv2_band1[i][j]),
29 | np.square(image_cv2_band1[i][j-3]-image_cv2_band1[i][j]),
30 | np.square(image_cv2_band1[i-3][j]-image_cv2_band1[i][j]),
31 | np.square(image_cv2_band1[i+3][j]-image_cv2_band1[i][j])]
32 | temp=np.array(temp)
33 | image2.append(np.mean(temp))
34 |
35 |
36 | image1=np.array(image1)
37 | image2=np.array(image2)
38 |
39 |
40 | print(image1-image2)
41 |
42 |
43 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/HOG.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import math
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | # image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | # # image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
9 | # image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
10 | #
11 | #
12 | #
13 | # plt.subplot(1,2,1)
14 | # plt.imshow(image_cv2_band7)
15 | #
16 | # plt.subplot(1,2,2)
17 | # plt.imshow(image_cv2_band1)
18 | # plt.show()
19 |
20 |
21 | img = data[0:1,0:200,0:200].reshape((200,200))
22 | img1 = data[2:3,0:200,0:200].reshape((200,200))
23 |
24 | plt.subplot(1,2,1)
25 | plt.imshow(img)
26 |
27 | plt.subplot(1,2,2)
28 | plt.imshow(img1)
29 | plt.show()
30 |
31 |
32 | #在这里设置参数
33 | winSize = (128,128)
34 | blockSize = (64,64)
35 | blockStride = (8,8)
36 | cellSize = (16,16)
37 | nbins = 9
38 |
39 | #定义对象hog,同时输入定义的参数,剩下的默认即可
40 | hog = cv2.HOGDescriptor(winSize,blockSize,blockStride,cellSize,nbins)
41 |
42 | winStride = (8,8)
43 | padding = (8,8)
44 | descripor = hog.compute(img, winStride, padding).reshape((-1,))
45 |
46 | descripor1 = hog.compute(img1, winStride, padding).reshape((-1,))
47 |
48 |
49 | descripor=np.array(descripor)
50 | descripor1=np.array(descripor1)
51 |
52 | print(descripor)
53 | print(descripor1)
54 |
55 | reduce=descripor-descripor1
56 | squr=np.mean((reduce**2))
57 |
58 |
59 | print('FAST 打分的均方误差',squr)
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/similar_funtion.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
5 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
6 |
7 |
8 |
9 |
10 | image1=[]
11 |
12 | for i in range(3,image_cv2_band7.shape[0]-3):
13 | for j in range(2,image_cv2_band7.shape[1]-3):
14 | print(i)
15 | temp=[np.square(image_cv2_band7[i][j+3]-image_cv2_band7[i][j]),
16 | np.square(image_cv2_band7[i][j-3]-image_cv2_band7[i][j]),
17 | np.square(image_cv2_band7[i-3][j]-image_cv2_band7[i][j]),
18 | np.square(image_cv2_band7[i+3][j]-image_cv2_band7[i][j])]
19 | temp=np.array(temp)
20 | image1.append(np.mean(temp))
21 |
22 |
23 | image2=[]
24 |
25 | for i in range(3,image_cv2_band1.shape[0]-3):
26 | for j in range(2,image_cv2_band7.shape[1]-3):
27 | print(i)
28 | temp=[np.square(image_cv2_band1[i][j+3]-image_cv2_band1[i][j]),
29 | np.square(image_cv2_band1[i][j-3]-image_cv2_band1[i][j]),
30 | np.square(image_cv2_band1[i-3][j]-image_cv2_band1[i][j]),
31 | np.square(image_cv2_band1[i+3][j]-image_cv2_band1[i][j])]
32 | temp=np.array(temp)
33 | image2.append(np.mean(temp))
34 |
35 |
36 | image1=np.array(image1)
37 | image2=np.array(image2)
38 |
39 |
40 | print(image1-image2)
41 |
42 |
43 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/Mutual_Information.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | from sklearn import metrics
3 | import cv2
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')
7 |
8 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
9 |
10 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
11 | image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
12 | # image_cv2_band1=data[5:6,100:200,100:200].reshape((100,100))
13 |
14 | # image_cv2_band1=image_cv2_band1[100:200,100:200]
15 | #image_cv2_band1=image_cv2_band1[100:200,100:200]/255
16 |
17 |
18 |
19 | plt.subplot(1,2,1)
20 | plt.imshow(image_cv2_band7)
21 |
22 | plt.subplot(1,2,2)
23 | plt.imshow(image_cv2_band1)
24 | plt.show()
25 | value=metrics.adjusted_mutual_info_score(image_cv2_band1.reshape((10000,)),image_cv2_band7.reshape((10000,)))
26 |
27 | print(value)
28 |
29 |
30 | # p_y = tf.reduce_sum(p_y_on_x, axis=0, keepdim=True) / num_x # 1-by-num_y
31 | # h_y = -tf.reduce_sum(p_y * tf.math.log(p_y))
32 | # p_c = tf.reduce_sum(p_c_on_x, axis=0) / num_x # 1-by-num_c
33 | # h_c = -tf.reduce_sum(p_c * tf.math.log(p_c))
34 | # p_x_on_y = p_y_on_x / num_x / p_y # num_x-by-num_y
35 | # p_c_on_y = tf.matmul(p_c_on_x, p_x_on_y, transpose_a=True) # num_c-by-num_y
36 | # h_c_on_y = -tf.reduce_sum(tf.reduce_sum(p_c_on_y * tf.math.log(p_c_on_y), axis=0) * p_y)
37 | # i_y_c = h_c - h_c_on_y
38 | # nmi = 2 * i_y_c / (h_y + h_c)
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/ergodic_mliment.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | image_cv2_band7=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
5 | image_cv2_band1=cv2.imread("D:\\ProgramData_second\\Similarityfunction\\data\\origin_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
6 |
7 | image_cv2_band7=image_cv2_band7[0:100,0:100]/255
8 | image_cv2_band1=image_cv2_band1[0:100,0:100]/255
9 | print(np.max(image_cv2_band7),np.min(image_cv2_band1))
10 |
11 | image1=[]
12 |
13 | for i in range(3,image_cv2_band7.shape[0]-3):
14 | for j in range(2,image_cv2_band7.shape[1]-3):
15 |
16 | temp=[np.fabs(image_cv2_band7[i][j+3]-image_cv2_band7[i][j]),
17 | np.fabs(image_cv2_band7[i][j-3]-image_cv2_band7[i][j]),
18 | np.fabs(image_cv2_band7[i-3][j]-image_cv2_band7[i][j]),
19 | np.fabs(image_cv2_band7[i+3][j]-image_cv2_band7[i][j])]
20 | temp=np.array(temp)
21 | image1.append(np.mean(temp))
22 |
23 |
24 | image2=[]
25 |
26 | for i in range(3,image_cv2_band1.shape[0]-3):
27 | for j in range(2,image_cv2_band7.shape[1]-3):
28 |
29 | temp=[np.fabs(image_cv2_band1[i][j+3]-image_cv2_band1[i][j]),
30 | np.fabs(image_cv2_band1[i][j-3]-image_cv2_band1[i][j]),
31 | np.fabs(image_cv2_band1[i-3][j]-image_cv2_band1[i][j]),
32 | np.fabs(image_cv2_band1[i+3][j]-image_cv2_band1[i][j])]
33 | temp=np.array(temp)
34 | image2.append(np.mean(temp))
35 |
36 |
37 | image1=np.array(image1)
38 | image2=np.array(image2)
39 |
40 | print('直接减法',np.sum(image_cv2_band7-image_cv2_band1))
41 | print('相似性计算',np.sum(image1-image2))
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/sub_image_affine.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
6 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
7 |
8 | image_cv2_band7=np.array(image_cv2_band7)
9 | #变换矩阵
10 | print(image_cv2_band7.shape)
11 | points=[]
12 | for x in range(64,image_cv2_band7.shape[0]-64,128):
13 | for y in range(64,image_cv2_band7.shape[1]-64,128):
14 | x_y=np.array([x,y,1]).reshape(3,1)
15 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
16 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
17 |
18 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\fixed_image\\"+"%03d"%x+"_"+"%03d"%y+".jpg",sub_fixed)
19 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\moved_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moved)
20 |
21 |
22 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
23 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
24 | [np.random.randint(80,128,size=(1,))[0], 0],
25 | [128, np.random.randint(85,128,size=(1,))[0]],
26 | [np.random.randint(0,40,size=(1,))[0], 128]])
27 |
28 |
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
31 |
32 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\moving_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moving)
33 |
34 | print("next")
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/different_4_4point_contrant.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | np.set_printoptions(suppress=True)
6 | plt.rcParams['font.sans-serif']=['SimHei']
7 |
8 | image_cv2=cv2.imread("C:\\Users\\lilia\\Desktop\\santa_cruz_az-band1.tif",cv2.IMREAD_UNCHANGED)
9 |
10 | plt.subplot(3,2,1)
11 | plt.title('原图片')
12 | plt.imshow(image_cv2)
13 |
14 |
15 | points1 = np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
16 | points2 = np.float32([[0,200],[870,0],[1000,750],[160,1000]])
17 | matrix = cv2.getPerspectiveTransform(points1,points2)
18 | output = cv2.warpPerspective(image_cv2, matrix, (2000, 2000))
19 |
20 |
21 | #投影变换
22 | plt.subplot(3,2,2)
23 | plt.title('投影变换')
24 | plt.imshow(output)
25 |
26 |
27 | #其他四点变换
28 | point1_256=np.float32([[400,400],[600,400],[600,600],[400,600]])
29 | point2_256=np.float32([[ 405, 359.], [610, 410],[606 , 590 ],[ 400 ,605 ]])
30 | matrix1 = cv2.getPerspectiveTransform(point1_256,point2_256)
31 | output1 = cv2.warpPerspective(image_cv2, matrix1, (2000, 2000))
32 |
33 |
34 |
35 |
36 | plt.subplot(3,2,3)
37 | plt.title('H(AB)4点变换')
38 | plt.imshow(output1)
39 |
40 |
41 |
42 | #对矩阵求逆,再变换
43 | matrix2 = cv2.getPerspectiveTransform(point2_256,point1_256)
44 | output2 = cv2.warpPerspective(image_cv2, matrix2, (2000, 2000))
45 |
46 | plt.subplot(3,2,4)
47 | plt.title('H(BA)4点变换')
48 | plt.imshow(output2)
49 |
50 |
51 |
52 | matrix3 = cv2.getPerspectiveTransform(point1_256,point2_256)
53 | output3 = cv2.warpPerspective(output2, matrix3, (2000, 2000))
54 |
55 | plt.subplot(3,2,5)
56 | plt.title('用H(AB)变换H(BA)的结果')
57 | plt.imshow(output3)
58 |
59 |
60 |
61 |
62 | plt.show()
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/sub_image_affine.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 |
5 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
6 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
7 |
8 | image_cv2_band7=np.array(image_cv2_band7)
9 | #变换矩阵
10 | print(image_cv2_band7.shape)
11 | points=[]
12 | for x in range(64,image_cv2_band7.shape[0]-64,128):
13 | for y in range(64,image_cv2_band7.shape[1]-64,128):
14 | x_y=np.array([x,y,1]).reshape(3,1)
15 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
16 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
17 |
18 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\fixed_image\\"+"%03d"%x+"_"+"%03d"%y+".jpg",sub_fixed)
19 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\moved_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moved)
20 |
21 |
22 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
23 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
24 | [np.random.randint(80,128,size=(1,))[0], 0],
25 | [128, np.random.randint(85,128,size=(1,))[0]],
26 | [np.random.randint(0,40,size=(1,))[0], 128]])
27 |
28 |
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
31 |
32 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\Affine_image\\moving_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moving)
33 |
34 | print("next")
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/generator_perspective_fixed_moving_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 |
6 |
7 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
8 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
9 |
10 |
11 |
12 | path_fixed=[]
13 | path_moving=[]
14 | Perpecttive=[]
15 | for x in range(64,1024,128):
16 | for y in range(64,1024,128):
17 | x_y=np.array([x,y,1]).reshape(3,1)
18 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
19 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
20 |
21 | path_fixed.append(sub_fixed)
22 |
23 |
24 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
25 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
26 | [np.random.randint(80,128,size=(1,))[0], 0],
27 | [128, np.random.randint(85,128,size=(1,))[0]],
28 | [np.random.randint(0,40,size=(1,))[0], 128]])
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 |
31 |
32 | matrix2=cv2.getPerspectiveTransform(points2,points1)
33 |
34 | Perpecttive.append(matrix2)
35 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
36 | path_moving.append(sub_moving)
37 |
38 |
39 | path_fixed=np.array(path_fixed)
40 | path_moving=np.array(path_moving)
41 | Perpecttive=np.array(Perpecttive)
42 |
43 | np.savez("C:\\Users\\kylenate\\Desktop\\Perspective_matrix.npz",
44 | path_fixed=path_fixed,
45 | path_moving=path_moving,
46 | Perpecttive=Perpecttive)
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/generator_perspective_fixed_moving_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 |
6 |
7 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
8 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
9 |
10 |
11 |
12 | path_fixed=[]
13 | path_moving=[]
14 | Perpecttive=[]
15 | for x in range(64,1024,128):
16 | for y in range(64,1024,128):
17 | x_y=np.array([x,y,1]).reshape(3,1)
18 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
19 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
20 |
21 | path_fixed.append(sub_fixed)
22 |
23 |
24 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
25 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
26 | [np.random.randint(80,128,size=(1,))[0], 0],
27 | [128, np.random.randint(85,128,size=(1,))[0]],
28 | [np.random.randint(0,40,size=(1,))[0], 128]])
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 |
31 |
32 | matrix2=cv2.getPerspectiveTransform(points2,points1)
33 |
34 | Perpecttive.append(matrix2)
35 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
36 | path_moving.append(sub_moving)
37 |
38 |
39 | path_fixed=np.array(path_fixed)
40 | path_moving=np.array(path_moving)
41 | Perpecttive=np.array(Perpecttive)
42 |
43 | np.savez("C:\\Users\\kylenate\\Desktop\\Perspective_matrix.npz",
44 | path_fixed=path_fixed,
45 | path_moving=path_moving,
46 | Perpecttive=Perpecttive)
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/sub_image.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 |
6 |
7 |
8 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
9 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
10 |
11 | image_cv2_band7=np.array(image_cv2_band7)
12 | #变换矩阵
13 | print(image_cv2_band7.shape)
14 | points=[]
15 | for x in range(64,image_cv2_band7.shape[0]-64,128):
16 | for y in range(64,image_cv2_band7.shape[1]-64,128):
17 | x_y=np.array([x,y,1]).reshape(3,1)
18 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
19 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
20 |
21 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\fixed_image\\"+"%03d"%x+"_"+"%03d"%y+".jpg",sub_fixed)
22 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\moved_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moved)
23 |
24 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
25 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
26 | [np.random.randint(80,128,size=(1,))[0], 0],
27 | [128, np.random.randint(85,128,size=(1,))[0]],
28 | [np.random.randint(0,40,size=(1,))[0], 128]])
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
31 |
32 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\moving_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moving)
33 |
34 | print("next")
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/sub_image.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 |
6 |
7 |
8 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
9 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
10 |
11 | image_cv2_band7=np.array(image_cv2_band7)
12 | #变换矩阵
13 | print(image_cv2_band7.shape)
14 | points=[]
15 | for x in range(64,image_cv2_band7.shape[0]-64,128):
16 | for y in range(64,image_cv2_band7.shape[1]-64,128):
17 | x_y=np.array([x,y,1]).reshape(3,1)
18 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
19 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
20 |
21 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\fixed_image\\"+"%03d"%x+"_"+"%03d"%y+".jpg",sub_fixed)
22 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\moved_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moved)
23 |
24 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
25 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
26 | [np.random.randint(80,128,size=(1,))[0], 0],
27 | [128, np.random.randint(85,128,size=(1,))[0]],
28 | [np.random.randint(0,40,size=(1,))[0], 128]])
29 | matrix = cv2.getPerspectiveTransform(points1, points2)
30 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
31 |
32 | cv2.imwrite("D:\\Second_paper_VAE_CNN\\moving_image\\" + "%03d"%x + "_" + "%03d"%y + ".jpg", sub_moving)
33 |
34 | print("next")
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_train_data/4_point_displacement.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
4 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
5 |
6 |
7 | fixed=[]
8 | moving=[]
9 | displacement_4_point=[]
10 | for x in range(64,image_cv2_band7.shape[0]-64,128):
11 | for y in range(64,image_cv2_band7.shape[1]-64,128):
12 | x_y=np.array([x,y,1]).reshape(3,1)
13 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64].reshape((128,128,1))
14 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
15 |
16 | fixed.append(sub_fixed)
17 |
18 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
19 | points2 = np.float32([[np.random.randint(0,15,size=(1,))[0],np.random.randint(0,15,size=(1,))[0]],
20 | [np.random.randint(113,128,size=(1,))[0], np.random.randint(0,15,size=(1,))[0]],
21 | [np.random.randint(113,128,size=(1,))[0], np.random.randint(113,128,size=(1,))[0]],
22 | [np.random.randint(0,15,size=(1,))[0], np.random.randint(113,128,size=(1,))[0]]])
23 |
24 | matrix = cv2.getPerspectiveTransform(points1, points2)
25 |
26 | temp=points2-points1
27 | displacement_4_point.append(temp.reshape(1,8))
28 |
29 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
30 | moving.append(sub_moving.reshape((128,128,1)))
31 |
32 |
33 | fixed=np.array(fixed)
34 | moving=np.array(moving)
35 | displacement_4_point=np.array(displacement_4_point)
36 |
37 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point.npz",
38 | fixed=fixed,
39 | moving=moving,
40 | displacement_4_point=displacement_4_point
41 | )
42 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_train_data/4_point_displacement_fixed_random_landsat_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | one=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\one\\landsat1_1.npz")['image'][0]
4 | second=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\second\\landsat2_2.npz")['image'][0]
5 |
6 | one=(one-np.min(one))/(np.max(one)-np.min(one))
7 | second=(second-np.min(second))/(np.max(second)-np.min(second))
8 |
9 | fixed=[]
10 | moving=[]
11 | displacement_4_point=[]
12 |
13 |
14 | for x in range(128,one.shape[0]-128,64):
15 | for y in range(128,one.shape[1]-128,64):
16 |
17 | sub_fixed=one[x-128:x+128,y-128:y+128].reshape((256,256,1))
18 | sub_moved = second[x - 128:x + 128, y - 128:y + 128]
19 |
20 | fixed.append(sub_fixed)
21 |
22 | points1 = np.float32([[0, 0], [256, 0], [256, 256], [0, 256]])
23 | points2 = np.float32([[np.random.randint(0,30,size=(1,))[0],np.random.randint(0,30,size=(1,))[0]],
24 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(0,30,size=(1,))[0]],
25 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]],
26 | [np.random.randint(0,30,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]]])
27 |
28 | matrix = cv2.getPerspectiveTransform(points1, points2)
29 |
30 | temp=points2-points1
31 | displacement_4_point.append(temp.reshape(1,8))
32 |
33 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (256, 256))
34 | moving.append(sub_moving.reshape((256,256,1)))
35 |
36 |
37 | fixed=np.array(fixed)
38 | moving=np.array(moving)
39 | displacement_4_point=np.array(displacement_4_point)
40 |
41 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_fixed_random_landsat_256.npz",
42 | fixed=fixed,
43 | moving=moving,
44 | displacement_4_point=displacement_4_point
45 | )
--------------------------------------------------------------------------------
/data_generator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 |
5 | def vxm_data_generator(fixed_image,moving_image, per_matrix,batch_size=32):
6 |
7 |
8 | while True:
9 | # prepare inputs
10 | # inputs need to be of the size [batch_size, H, W, number_features]
11 | # number_features at input is 1 for us
12 | idx1 = np.random.randint(0, fixed_image.shape[0], size=batch_size)
13 | fixed_images = fixed_image[idx1, ..., ]
14 |
15 | # idx2 = np.random.randint(0, x_data.shape[0], size=batch_size)
16 | moving_images = moving_image[idx1, ..., ]
17 | tr_matrix=per_matrix[idx1, ...,]
18 |
19 |
20 | inputs1=fixed_images
21 | inputs2=moving_images
22 | outputs = tr_matrix
23 |
24 | yield [inputs1,inputs2], outputs
25 |
26 |
27 |
28 |
29 |
30 | # matrixt_file= 'D:/ProgramData/DenseNet_registration/Densenet/matrix.npz'
31 | # fixed_image=np.load(matrixt_file)['path_fixed']
32 | # moving_image=np.load(matrixt_file)['path_moving']
33 | # per_matrix=np.load(matrixt_file)['Perpecttive'].reshape(fixed_image.shape[0],9)
34 | #
35 | # fixed_image=fixed_image.astype('float')/255
36 | # moving_image=moving_image.astype('float')/255
37 | #
38 | # data=vxm_data_generator(fixed_image,moving_image,per_matrix)
39 | #
40 | # x,y,z=next(data)
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/test_4point_paper_random_landsat_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | np.set_printoptions(suppress=True)
6 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
7 |
8 | image_cv2=image_cv2[0:512,0:512]
9 |
10 |
11 | sub_fixed_256 = image_cv2[128:384, 128:384]
12 |
13 | points1 = np.float32([[128, 128], [384, 128], [384, 384], [128, 384]])
14 | points2 = np.float32([[np.random.randint(112,144,size=(1,))[0],np.random.randint(112,144,size=(1,))[0]],
15 | [np.random.randint(365,396,size=(1,))[0], np.random.randint(112,146,size=(1,))[0]],
16 | [np.random.randint(362,396,size=(1,))[0], np.random.randint(362,396,size=(1,))[0]],
17 | [np.random.randint(112,144,size=(1,))[0], np.random.randint(362,396,size=(1,))[0]]])
18 |
19 | print(points2.reshape((2,4)))
20 | print(points1.reshape((2,4)))
21 |
22 | matrix=cv2.getPerspectiveTransform(points2,points1)
23 | bb=cv2.findHomography(points2,points1)
24 |
25 | print(points1-points2)
26 |
27 |
28 | print(matrix)
29 | print(bb)
30 |
31 | moving_image=cv2.warpPerspective(image_cv2,matrix,(512,512))
32 |
33 | moving_image_256=moving_image[128:384, 128:384]
34 |
35 |
36 | wrap=cv2.getPerspectiveTransform(points1,points2)
37 |
38 | moving_image_256_return=cv2.warpPerspective(moving_image_256,wrap,(256,256))
39 |
40 |
41 |
42 |
43 | plt.subplot(2,2,1)
44 | plt.title('256_fixed')
45 | plt.imshow(sub_fixed_256)
46 |
47 |
48 |
49 | plt.subplot(2,2,2)
50 | plt.title('256_moving')
51 | plt.imshow(moving_image_256)
52 |
53 | plt.subplot(2,2,3)
54 | plt.imshow(moving_image_256_return)
55 |
56 |
57 | wrap=cv2.getPerspectiveTransform(points1-128,points2-128)
58 |
59 | moving_image_256_return=cv2.warpPerspective(moving_image_256,wrap,(256,256))
60 |
61 | plt.subplot(2,2,4)
62 | plt.imshow(moving_image_256_return)
63 |
64 |
65 | plt.show()
66 |
67 |
68 |
69 |
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/data_generator.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 |
4 |
5 | def vxm_data_generator(fixed_image,moving_image, per_matrix,batch_size=32):
6 |
7 |
8 | while True:
9 | # prepare inputs
10 | # inputs need to be of the size [batch_size, H, W, number_features]
11 | # number_features at input is 1 for us
12 | idx1 = np.random.randint(0, fixed_image.shape[0], size=batch_size)
13 | fixed_images = fixed_image[idx1, ..., ]
14 |
15 | # idx2 = np.random.randint(0, x_data.shape[0], size=batch_size)
16 | moving_images = moving_image[idx1, ..., ]
17 | tr_matrix=per_matrix[idx1, ...,]
18 |
19 |
20 | inputs1=fixed_images
21 | inputs2=moving_images
22 | outputs = tr_matrix
23 |
24 | yield [inputs1,inputs2], outputs
25 |
26 |
27 |
28 |
29 |
30 | # matrixt_file= 'D:/ProgramData/DenseNet_registration/Densenet/matrix.npz'
31 | # fixed_image=np.load(matrixt_file)['path_fixed']
32 | # moving_image=np.load(matrixt_file)['path_moving']
33 | # per_matrix=np.load(matrixt_file)['Perpecttive'].reshape(fixed_image.shape[0],9)
34 | #
35 | # fixed_image=fixed_image.astype('float')/255
36 | # moving_image=moving_image.astype('float')/255
37 | #
38 | # data=vxm_data_generator(fixed_image,moving_image,per_matrix)
39 | #
40 | # x,y,z=next(data)
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_pre_test/pre_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 |
3 | import numpy as np
4 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
5 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
6 |
7 |
8 | fixed=[]
9 | moving=[]
10 | displacement_4_point=[]
11 | for x in range(1024,1150,128):
12 | for y in range(1024,1150,128):
13 | x_y=np.array([x,y,1]).reshape(3,1)
14 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64].reshape((128,128,1))
15 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
16 |
17 | fixed.append(sub_fixed)
18 |
19 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
20 | points2 = np.float32([[np.random.randint(0,15,size=(1,))[0],np.random.randint(0,15,size=(1,))[0]],
21 | [np.random.randint(113,128,size=(1,))[0], np.random.randint(0,15,size=(1,))[0]],
22 | [np.random.randint(113,128,size=(1,))[0], np.random.randint(113,128,size=(1,))[0]],
23 | [np.random.randint(0,15,size=(1,))[0], np.random.randint(113,128,size=(1,))[0]]])
24 |
25 | matrix = cv2.getPerspectiveTransform(points1, points2)
26 |
27 | temp=points2-points1
28 | displacement_4_point.append(temp.reshape(1,8))
29 |
30 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
31 | moving.append(sub_moving.reshape((128,128,1)))
32 |
33 |
34 | fixed=np.array(fixed)
35 | moving=np.array(moving)
36 | displacement_4_point=np.array(displacement_4_point)
37 |
38 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\pre_displacement_4_point.npz",
39 | fixed=fixed,
40 | moving=moving,
41 | displacement_4_point=displacement_4_point
42 | )
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_train_data/4_point_displacement_fixed_random_overlap_landsat_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | one=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\one\\landsat1_1.npz")['image'][0]
5 | second=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\second\\landsat2_2.npz")['image'][0]
6 |
7 | one=(one-np.min(one))/(np.max(one)-np.min(one))
8 | second=(second-np.min(second))/(np.max(second)-np.min(second))
9 |
10 | fixed=[]
11 | moving=[]
12 | displacement_4_point=[]
13 |
14 | for x in range(128,one.shape[0]-128,32):
15 | for y in range(128,one.shape[1]-128,64):
16 |
17 | random=np.random.randint(0,128,(1,))[0]
18 |
19 | sub_fixed = one[x - 128:x + 128, y - 128:y + 128]
20 | sub_moved = second[x - random:x + 256-random, y - random:y + 256-random]
21 |
22 | fixed.append(sub_fixed.reshape((256, 256, 1)))
23 |
24 |
25 |
26 | points3 = np.float32([[0, 0], [256, 0], [256, 256], [0, 256]])
27 | points4 = np.float32([[np.random.randint(0,30,size=(1,))[0],np.random.randint(0,30,size=(1,))[0]],
28 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(0,30,size=(1,))[0]],
29 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]],
30 | [np.random.randint(0,30,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]]])
31 |
32 | matrix2 = cv2.getPerspectiveTransform(points3, points4)
33 | sub_moving = cv2.warpPerspective(sub_moved, matrix2, (256, 256))
34 | moving.append(sub_moving.reshape((256, 256, 1)))
35 |
36 |
37 |
38 | temp=points4-points3
39 |
40 | displacement_4_point.append(temp.reshape(1,8))
41 |
42 |
43 |
44 | fixed=np.array(fixed)
45 | moving=np.array(moving)
46 | displacement_4_point=np.array(displacement_4_point)
47 |
48 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_fixed_random_overlap_landsat_256.npz",
49 | fixed=fixed,
50 | moving=moving,
51 | displacement_4_point=displacement_4_point
52 | )
--------------------------------------------------------------------------------
/Similarityfunction/111.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 | x = tf.random_uniform([100, 784], minval=0, maxval=1.0)
4 | y = tf.random_uniform([100, 10], minval=0, maxval=1.0)
5 |
6 | GRIDS = 20
7 | def core_function1d(x, y, grids = GRIDS):
8 | return tf.maximum((1/(grids - 1)) - tf.abs(tf.subtract(x, y)), 0)
9 |
10 | def core_function2d(x1, x2, y1, y2, grids1 = GRIDS, grids2 = GRIDS):
11 | return core_function1d(x1, y1, grids1) + core_function1d(x2, y2, grids1)
12 |
13 | def entropy1d(x, grids = GRIDS):
14 | shape1 = [x.get_shape().as_list()[0], 1, x.get_shape().as_list()[1]]
15 | shape2 = [1, grids, 1]
16 |
17 | gx = tf.linspace(0.0, 1.0, grids)
18 |
19 | X = tf.reshape(x, shape1)
20 | GX = tf.reshape(gx, shape2)
21 |
22 | mapping = core_function1d(GX, X, grids)
23 | mapping = tf.reduce_sum(mapping, 0)
24 | mapping = tf.add(mapping, 1e-10)
25 | mapping_normalized = tf.divide(mapping, tf.reduce_sum(mapping, 0, keepdims = True))
26 |
27 | entropy = tf.negative(tf.reduce_sum(tf.reduce_sum(tf.multiply(mapping_normalized, tf.log(mapping_normalized * grids)), 0)))
28 |
29 | return entropy
30 |
31 | def entropy2d(x, y, gridsx = GRIDS, gridsy = GRIDS):
32 | batch_size = x.get_shape().as_list()[0]
33 | x_szie = x.get_shape().as_list()[1]
34 | y_size = y.get_shape().as_list()[1]
35 |
36 | gx = tf.linspace(0.0, 1.0, gridsx)
37 | gy = tf.linspace(0.0, 1.0, gridsy)
38 |
39 | X = tf.reshape(x, [batch_size, 1, 1, x_szie, 1])
40 | Y = tf.reshape(y, [batch_size, 1, 1, 1, y_size])
41 |
42 | GX = tf.reshape(gx, [1, gridsx, 1, 1, 1])
43 | GY = tf.reshape(gy, [1, 1, gridsy, 1, 1])
44 |
45 | mapping = core_function2d(GX, GY, X, Y, gridsx, gridsy)
46 | mapping = tf.reduce_sum(mapping, 0)
47 | mapping = tf.add(mapping, 1e-10)
48 | mapping_normalized = tf.divide(mapping, tf.reduce_sum(mapping, [0, 1], keepdims = True))
49 |
50 | entropy = tf.negative(tf.reduce_sum(tf.reduce_sum(tf.multiply(mapping_normalized, tf.log(mapping_normalized * (gridsx *gridsy))), [0, 1])))
51 |
52 | return entropy
53 |
54 | def matul_info(x, y):
55 | ex = entropy1d(x)
56 | ey = entropy1d(y)
57 | exy = entropy2d(x, y)
58 | return ex + ey - exy
59 |
60 | multi_info = entropy1d(x) + entropy1d(y) - entropy2d(x, y)
61 | with tf.Session():
62 | print(multi_info.eval())
--------------------------------------------------------------------------------
/DenseNet_registration/experiment/test_tps_pot.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 | import numpy.linalg as nl
4 | import matplotlib.pyplot as plt
5 | from scipy.spatial.distance import pdist, cdist, squareform
6 |
7 | def makeT(cp):
8 | # cp: [K x 2] control points
9 | # T: [(K+3) x (K+3)]
10 | K = cp.shape[0]
11 | T = np.zeros((K+3, K+3))
12 | T[:K, 0] = 1
13 | T[:K, 1:3] = cp
14 | T[K, 3:] = 1
15 | T[K+1:, 3:] = cp.T
16 | R = squareform(pdist(cp, metric='euclidean'))
17 | R = R * R
18 | R[R == 0] = 1 # a trick to make R ln(R) 0
19 | R = R * np.log(R)
20 | np.fill_diagonal(R, 0)
21 | T[:K, 3:] = R
22 | return T
23 |
24 | def liftPts(p, cp):
25 | # p: [N x 2], input points
26 | # cp: [K x 2], control points
27 | # pLift: [N x (3+K)], lifted input points
28 | N, K = p.shape[0], cp.shape[0]
29 | pLift = np.zeros((N, K+3))
30 | pLift[:,0] = 1
31 | pLift[:,1:3] = p
32 | R = cdist(p, cp, 'euclidean')
33 | R = R * R
34 | R[R == 0] = 1
35 | R = R * np.log(R)
36 | pLift[:,3:] = R
37 | return pLift
38 |
39 | # source control points
40 | x, y = np.linspace(0, 1, 3), np.linspace(-1, 1, 3)
41 | x, y = np.meshgrid(x, y)
42 | xs = x.flatten()
43 | ys = y.flatten()
44 | cps = np.vstack([xs, ys]).T
45 | print(cps)
46 | # target control points
47 | xt = xs + np.random.uniform(-0.3, 0.3, size=xs.size)
48 | yt = ys + np.random.uniform(-0.3, 0.3, size=ys.size)
49 |
50 | # construct T
51 | T = makeT(cps)
52 |
53 | # solve cx, cy (coefficients for x and y)
54 | xtAug = np.concatenate([xt, np.zeros(3)])
55 | ytAug = np.concatenate([yt, np.zeros(3)])
56 | cx = nl.solve(T, xtAug) # [K+3]
57 | cy = nl.solve(T, ytAug)
58 |
59 | # dense grid
60 | N = 30
61 | x = np.linspace(-2, 2, N)
62 | y = np.linspace(-2, 2, N)
63 | x, y = np.meshgrid(x, y)
64 | xgs, ygs = x.flatten(), y.flatten()
65 | gps = np.vstack([xgs, ygs]).T
66 |
67 | # transform
68 | pgLift = liftPts(gps, cps) # [N x (K+3)]
69 | xgt = np.dot(pgLift, cx.T)
70 | ygt = np.dot(pgLift, cy.T)
71 |
72 | # display
73 | plt.xlim(-2.5, 2.5)
74 | plt.ylim(-2.5, 2.5)
75 | plt.subplot(1, 2, 1)
76 | plt.title('Source')
77 | plt.grid()
78 | plt.scatter(xs, ys, marker='+', c='r', s=40)
79 | plt.scatter(xgs, ygs, marker='.', c='r', s=5)
80 | plt.subplot(1, 2, 2)
81 | plt.title('Target')
82 | plt.grid()
83 | plt.scatter(xt, yt, marker='+', c='b', s=40)
84 | plt.scatter(xgt, ygt, marker='.', c='b', s=5)
85 | plt.show()
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/LBP特征.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | from matplotlib.pyplot import *
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | # image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
9 | image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
10 | # image_cv2_band1=image_cv2_band7
11 |
12 |
13 | plt.subplot(1,4,1)
14 | plt.imshow(image_cv2_band7,cmap=cm.gray)
15 |
16 | plt.subplot(1,4,2)
17 | plt.imshow(image_cv2_band1,cmap=cm.gray)
18 |
19 |
20 |
21 |
22 | def LBP(src):
23 | '''
24 | :param src:灰度图像
25 | :return:
26 | '''
27 | height = src.shape[0]
28 | width = src.shape[1]
29 | dst = src.copy()
30 |
31 | lbp_value = np.zeros((1,8), dtype=np.uint8)
32 | neighbours = np.zeros((1,8), dtype=np.uint8)
33 | for x in range(1, width-1):
34 | for y in range(1, height-1):
35 | neighbours[0, 0] = src[y - 1, x - 1]
36 | neighbours[0, 1] = src[y - 1, x]
37 | neighbours[0, 2] = src[y - 1, x + 1]
38 | neighbours[0, 3] = src[y, x - 1]
39 | neighbours[0, 4] = src[y, x + 1]
40 | neighbours[0, 5] = src[y + 1, x - 1]
41 | neighbours[0, 6] = src[y + 1, x]
42 | neighbours[0, 7] = src[y + 1, x + 1]
43 |
44 | center = src[y, x]
45 |
46 | for i in range(8):
47 | if neighbours[0, i] > center:
48 | lbp_value[0, i] = 1
49 | else:
50 | lbp_value[0, i] = 0
51 |
52 | lbp = lbp_value[0, 0] * 1 + lbp_value[0, 1] * 2 + lbp_value[0, 2] * 4 + lbp_value[0, 3] * 8 \
53 | + lbp_value[0, 4] * 16 + lbp_value[0, 5] * 32 + lbp_value[0, 6] * 64 + lbp_value[0, 0] * 128
54 |
55 | dst[y, x] = lbp
56 |
57 | return dst
58 |
59 |
60 |
61 |
62 | img=LBP(image_cv2_band7)
63 | img1=LBP(image_cv2_band1)
64 |
65 | plt.subplot(1,4,3)
66 | plt.imshow(img,cmap=cm.gray)
67 |
68 | plt.subplot(1,4,4)
69 | plt.imshow(img1,cmap=cm.gray)
70 |
71 | plt.show()
72 |
73 |
74 |
75 |
76 |
77 |
78 | image1=np.array(img)
79 | image2=np.array(img1)
80 |
81 |
82 |
83 | reduce=image1-image2
84 |
85 | print(reduce)
86 |
87 | squr=np.mean(reduce**2)
88 | print(squr)
89 |
90 | print('直接减法',np.sum(image_cv2_band7-image_cv2_band1))
91 | print('LBP',np.sum(squr))
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/conv_impliment.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | def numpy_conv(inputs,filter,padding="VALID"):
4 | H, W = inputs.shape
5 | filter_size = filter.shape[0]
6 | # default np.floor
7 | filter_center = int(filter_size / 2.0)
8 | filter_center_ceil = int(np.ceil(filter_size / 2.0))
9 |
10 | # SAME模式,输入和输出大小一致,所以要在外面填充0
11 | if padding == "SAME":
12 | padding_inputs = np.zeros([H + filter_center_ceil, W + filter_center_ceil], np.float32)
13 | padding_inputs[filter_center:-filter_center, filter_center:-filter_center] = inputs
14 | inputs = padding_inputs
15 | #这里先定义一个和输入一样的大空间,但是周围一圈后面会截掉
16 | result = np.zeros((inputs.shape))
17 | #更新下新输入,SAME模式下,会改变HW
18 | H, W = inputs.shape
19 | #print("new size",H,W)
20 | #卷积核通过输入的每块区域,stride=1,注意输出坐标起始位置
21 | for r in range(filter_center,H -filter_center):
22 | for c in range(filter_center,W -filter_center ):
23 | #获取卷积核大小的输入区域
24 | cur_input = inputs[r -filter_center :r +filter_center_ceil,
25 | c - filter_center:c + filter_center_ceil]
26 | #和核进行乘法计算
27 | cur_output = cur_input * filter
28 | #再把所有值求和
29 | conv_sum = np.sum(cur_output)
30 | #当前点输出值
31 | result[r, c] = conv_sum
32 | # 外面一圈都是0,裁减掉
33 | final_result = result[filter_center:result.shape[0] - filter_center,
34 | filter_center:result.shape[1] -filter_center]
35 | return final_result
36 |
37 |
38 |
39 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
40 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
41 |
42 |
43 | image_cv2_band7=image_cv2_band7[0:100,0:100]
44 | image_cv2_band1=image_cv2_band1[0:100,0:100]
45 |
46 | filt=np.array([
47 | [0,0,0,1,0,0,0],
48 | [0,0,0,0,0,0,0],
49 | [0,0,0,0,0,0,0],
50 | [1,0,0,1,0,0,1],
51 | [0,0,0,0,0,0,0],
52 | [0,0,0,0,0,0,0],
53 | [0,0,0,1,0,0,0]
54 | ])
55 |
56 |
57 | im7=numpy_conv(image_cv2_band7,filt)
58 | im1=numpy_conv(image_cv2_band1,filt)
59 |
60 |
61 |
62 | print('直接减法',np.sum(image_cv2_band7-image_cv2_band1))
63 | print('相似性计算',np.sum(im7-im1))
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/BRIEF特征描述子.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import matplotlib.pyplot as plt
3 | import numpy as np
4 | import math
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | # image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | # # image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
9 | # image_cv2_band1=data[3:4,100:200,0:100].reshape((100,100))
10 | #
11 | #
12 | #
13 | # plt.subplot(1,2,1)
14 | # plt.imshow(image_cv2_band7)
15 | #
16 | # plt.subplot(1,2,2)
17 | # plt.imshow(image_cv2_band1)
18 | # plt.show()
19 |
20 |
21 | img = data[0:1,0:200,0:200].reshape((200,200))
22 | img1 = data[2:3,100:300,100:300].reshape((200,200))
23 |
24 | plt.subplot(1,2,1)
25 | plt.imshow(img)
26 |
27 | plt.subplot(1,2,2)
28 | plt.imshow(img1)
29 | plt.show()
30 |
31 |
32 | # Initiate FAST detector
33 | star = cv2.xfeatures2d.StarDetector_create()
34 |
35 | # Initiate BRIEF extractor
36 | brief = cv2.xfeatures2d.BriefDescriptorExtractor_create()
37 |
38 | # find the keypoints with STAR
39 | kp = star.detect(img,None)
40 | print(kp)
41 | # compute the descriptors with BRIEF
42 |
43 | kp=cv2.KeyPoint(28.0,28.0,1)
44 |
45 | kp=[kp]
46 |
47 | kp, des = brief.compute(img, kp)
48 | kp1, des1 = brief.compute(img1, kp)
49 | # print( brief.descriptorSize() )
50 |
51 | print(kp)
52 | # print(kp[0].pt,kp[0].size,kp[0].angle,kp[0].response,kp[0].octave,kp[0].class_id)
53 | print(des)
54 |
55 | print(kp1)
56 |
57 | print(des1)
58 |
59 | out=des1-des
60 | print('')
61 | print(out)
62 |
63 | # img1 = cv2.drawKeypoints(img, kp, None, color=(255,0,0))
64 | # cv2.imshow('BRIEF',img1)
65 | # cv2.waitKey()
66 |
67 | descripor=[]
68 | for i in range(28,img.shape[0]-28):
69 | for j in range(28,img.shape[1]-28):
70 | kp = cv2.KeyPoint(i, j, 1)
71 |
72 | kp = [kp]
73 |
74 | kp, des = brief.compute(img, kp)
75 | descripor.append(des)
76 |
77 |
78 | descripor1=[]
79 | for i in range(28,img1.shape[0]-28):
80 | for j in range(28,img1.shape[1]-28):
81 | kp = cv2.KeyPoint(i, j, 1)
82 |
83 | kp = [kp]
84 |
85 | kp, des = brief.compute(img1, kp)
86 | descripor1.append(des)
87 |
88 |
89 | descripor=np.array(descripor)
90 | descripor1=np.array(descripor1)
91 |
92 | print(descripor)
93 | print(descripor1)
94 |
95 | reduce=descripor-descripor1
96 | squr=np.mean((reduce**2))
97 |
98 |
99 | print('FAST 打分的均方误差',squr)
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
--------------------------------------------------------------------------------
/DenseNet_registration/experiment/cal_Homography_batch.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import cv2
4 | np.set_printoptions(suppress=True)
5 |
6 |
7 |
8 |
9 | def getPerspectiveTransformMatrix(input):
10 |
11 | batch=tf.shape(input)[0]
12 |
13 | point1 = tf.convert_to_tensor([[0, 0], [256, 0], [256, 256], [0, 256]],dtype=tf.float32)
14 | point1 = tf.reshape(point1, (1, 8))
15 | point1 = tf.tile(point1, [batch, 1])
16 |
17 | point2 = tf.subtract(point1,input)
18 | print(batch.numpy())
19 |
20 | batch_A=[]
21 | for i in range(0, batch):
22 | print(i)
23 |
24 | x1, x2, x3, x4 = point1[i:(i+1), 0].numpy()[0], point1[i:(i+1), 2].numpy()[0], point1[i:(i+1), 4].numpy()[0], point1[i:(i+1), 6].numpy()[0]
25 | y1, y2, y3, y4 = point1[i:(i+1), 1].numpy()[0], point1[i:(i+1), 3].numpy()[0], point1[i:(i+1), 5].numpy()[0], point1[i:(i+1), 7].numpy()[0]
26 |
27 | u1, u2, u3, u4 = point2[i:(i+1), 0].numpy()[0], point2[i:(i+1), 2].numpy()[0], point2[i:(i+1), 4].numpy()[0], point2[i:(i+1), 6].numpy()[0]
28 | v1, v2, v3, v4 = point2[i:(i+1), 1].numpy()[0], point2[i:(i+1), 3].numpy()[0], point2[i:(i+1), 5].numpy()[0], point2[i:(i+1), 7].numpy()[0]
29 |
30 | A = [[x1, y1, 1, 0, 0, 0, -u1 * x1, -u1 * y1, -u1],
31 | [0, 0, 0, x1, y1, 1, -v1 * x1, -v1 * y1, -v1],
32 | [x2, y2, 1, 0, 0, 0, -u2 * x2, -u2 * y2, -u2],
33 | [0, 0, 0, x2, y2, 1, -v2 * x2, -v2 * y2, -v2],
34 | [x3, y3, 1, 0, 0, 0, -u3 * x3, -u3 * y3, -u3],
35 | [0, 0, 0, x3, y3, 1, -v3 * x3, -v3 * y3, -v3],
36 | [x4, y4, 1, 0, 0, 0, -u4 * x4, -u4 * y4, -u4],
37 | [0, 0, 0, x4, y4, 1, -v4 * x4, -v4 * y4, -v4]]
38 |
39 | batch_A.append(A)
40 |
41 | batch_A=tf.stack(batch_A)
42 |
43 | # arrayA = tf.convert_to_tensor(batch_A)
44 | arrayA=tf.reshape(batch_A,(batch,8,9))
45 |
46 | U,S,Vh=tf.linalg.svd(arrayA,full_matrices=True)
47 |
48 | Vh=tf.transpose(Vh)
49 |
50 | L = Vh[-1, :] / Vh[-1, -1]
51 |
52 | L=tf.transpose(L)
53 |
54 | return L
55 |
56 |
57 | i=tf.convert_to_tensor([[ 10. , -9.],
58 | [ -1., -5.],
59 | [-10. ,10.],
60 | [ -8. ,-10.]])
61 |
62 | i=tf.reshape(i,(1,8))
63 |
64 | i=tf.tile(i,[3,1])
65 |
66 | H=getPerspectiveTransformMatrix(i)
67 |
68 |
69 |
70 | p1=np.float32([[0, 0], [256, 0], [256, 256], [0, 256]])
71 |
72 | displace_4_point=np.array([[ 10. , -9.],[ -1., -5.],[-10. ,10.],[ -8. ,-10.]])
73 |
74 |
75 | p2=p1-displace_4_point
76 | p2=np.float32(p2)
77 |
78 | m=cv2.getPerspectiveTransform(p1,p2)
79 |
80 |
81 |
82 | print(H)
83 |
84 | print(m)
85 |
86 |
87 |
88 |
89 |
90 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/test_4point_random_random_landsat_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
7 |
8 |
9 |
10 | pts1 = np.float32([[0,0],[1000,0],[1000,1000]])
11 | pts2 = np.float32([[0,np.random.randint(0,200,(1,))[0]],[np.random.randint(800,1000,(1,))[0],0],[1000,np.random.randint(800,1000,(1,))[0]]])
12 | M1 = cv2.getAffineTransform(pts1,pts2)
13 | wrap1 = cv2.warpAffine(image_cv2,M1,(1000,1000))
14 |
15 |
16 | plt.subplot(2,2,1)
17 | plt.imshow(wrap1)
18 |
19 |
20 | M2=cv2.getAffineTransform(pts2,pts1)
21 | wrap2 = cv2.warpAffine(wrap1,M2,(1000,1000))
22 |
23 | plt.subplot(2,2,2)
24 | plt.imshow(wrap2)
25 |
26 |
27 | pts3 = np.float32([[0,0],[1000,0],[1000,1000]])
28 | pts4 = np.float32([[0,np.random.randint(0,200,(1,))[0]],[np.random.randint(800,1000,(1,))[0],0],[1000,np.random.randint(800,1000,(1,))[0]]])
29 | M3 = cv2.getAffineTransform(pts3,pts4)
30 | wrap3 = cv2.warpAffine(wrap2,M3,(1000,1000))
31 |
32 | plt.subplot(2,2,3)
33 | plt.imshow(wrap3)
34 |
35 |
36 | pts5 = pts2
37 | pts6 = pts4
38 | M4 = cv2.getAffineTransform(pts6,pts5)
39 | wrap4 = cv2.warpAffine(wrap3,M4,(1000,1000))
40 |
41 |
42 | plt.subplot(2,2,4)
43 | plt.imshow(wrap4)
44 | plt.show()
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/image_wrapaffine.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 |
6 | image_cv2=cv2.imread("C:\\Users\\kylenate\\Desktop\\paper_registration1114\\santa_cruz_az-band7.tif",cv2.IMREAD_UNCHANGED)
7 |
8 |
9 |
10 | # pts1 = np.float32([[50,50],[200,50],[50,200]])
11 | # pts2 = np.float32([[10,100],[200,50],[100,250]])
12 | pts1 = np.float32([[0,0],[1000,0],[1000,1000]])
13 | pts2 = np.float32([[0,200],[870,0],[1000,750]])
14 |
15 | M = cv2.getAffineTransform(pts1,pts2)
16 | print(M)
17 |
18 | wrap = cv2.warpAffine(image_cv2,M,(1000,1000))
19 | plt.subplot(1,2,1)
20 | plt.imshow(wrap)
21 |
22 |
23 | arr=np.array([0,0,1]).reshape((1,3))
24 | con=np.concatenate((M,arr),axis=0)
25 | x_point=np.array([1000,0,1]).reshape((3,1))
26 |
27 |
28 | tra=np.matmul(con,x_point)
29 |
30 |
31 | print(con)
32 |
33 | print(tra)
34 |
35 |
36 | # points1 = np.float32([[56,65],[368,52],[28,387],[389,390]])
37 | # points2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
38 |
39 | points1 = np.float32([[0,0],[1000,0],[1000,1000],[0,1000]])
40 | points2 = np.float32([[1000,1000],[0,1000],[0,0],[1000,0]])
41 | matrix = cv2.getPerspectiveTransform(points1,points2)
42 | output = cv2.warpPerspective(image_cv2, matrix, (1000, 1000))
43 |
44 | print(matrix)
45 |
46 |
47 | plt.subplot(1,2,2)
48 | plt.imshow(output)
49 | plt.show()
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 |
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
198 |
199 |
200 |
201 |
202 |
203 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
214 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_train_data/4_point_displacement_random_random_landsat_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | one=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\one\\landsat1_1.npz")['image'][0]
5 | second=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\second\\landsat2_2.npz")['image'][0]
6 |
7 | one=(one-np.min(one))/(np.max(one)-np.min(one))
8 | second=(second-np.min(second))/(np.max(second)-np.min(second))
9 |
10 | fixed=[]
11 | moving=[]
12 | displacement_4_point=[]
13 |
14 | for x in range(128,one.shape[0]-128,32):
15 | for y in range(128,one.shape[1]-128,64):
16 |
17 |
18 | sub_fixed = one[x - 128:x + 128, y - 128:y + 128]
19 | sub_moved = second[x - 128:x + 128, y - 128:y + 128]
20 |
21 | points1 = np.float32([[0, 0], [256, 0], [256, 256], [0, 256]])
22 | points2 = np.float32([[np.random.randint(0, 30, size=(1,))[0], np.random.randint(0, 30, size=(1,))[0]],
23 | [np.random.randint(226, 256, size=(1,))[0], np.random.randint(0, 30, size=(1,))[0]],
24 | [np.random.randint(226, 256, size=(1,))[0], np.random.randint(226, 256, size=(1,))[0]],
25 | [np.random.randint(0, 30, size=(1,))[0], np.random.randint(226, 256, size=(1,))[0]]])
26 |
27 | matrix1 = cv2.getPerspectiveTransform(points1, points2)
28 |
29 | sub_fixed = cv2.warpPerspective(sub_fixed, matrix1, (256, 256))
30 |
31 | fixed.append(sub_fixed.reshape((256, 256, 1)))
32 |
33 |
34 | points3 = np.float32([[0, 0], [256, 0], [256, 256], [0, 256]])
35 | points4 = np.float32([[np.random.randint(0,30,size=(1,))[0],np.random.randint(0,30,size=(1,))[0]],
36 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(0,30,size=(1,))[0]],
37 | [np.random.randint(226,256,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]],
38 | [np.random.randint(0,30,size=(1,))[0], np.random.randint(226,256,size=(1,))[0]]])
39 |
40 | matrix2 = cv2.getPerspectiveTransform(points3, points4)
41 | sub_moving = cv2.warpPerspective(sub_moved, matrix2, (256, 256))
42 | moving.append(sub_moving.reshape((256, 256, 1)))
43 |
44 |
45 |
46 | temp=points4-points2
47 |
48 | displacement_4_point.append(temp.reshape(1,8))
49 |
50 |
51 |
52 | fixed=np.array(fixed)
53 | moving=np.array(moving)
54 | displacement_4_point=np.array(displacement_4_point)
55 |
56 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_random_random_landsat_256.npz",
57 | fixed=fixed,
58 | moving=moving,
59 | displacement_4_point=displacement_4_point)
--------------------------------------------------------------------------------
/Similarityfunction/wrapAffine/generator_cvs_path_affine_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 |
6 |
7 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
8 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
9 |
10 | if not os.path.exists("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_fixed"): os.makedirs(
11 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_fixed")
12 |
13 | if not os.path.exists("D:\\ProgramData\DenseNet_registration\\Densenet\\densenet_train_moving"): os.makedirs(
14 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_moving")
15 |
16 | if not os.path.exists("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix"): os.makedirs(
17 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix")
18 |
19 |
20 | path_fixed=[]
21 | path_moving=[]
22 | Perpecttive=[]
23 | for x in range(64,image_cv2_band7.shape[0]-64,128):
24 | for y in range(64,image_cv2_band7.shape[1]-64,128):
25 | x_y=np.array([x,y,1]).reshape(3,1)
26 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
27 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
28 |
29 | path_fixed.append(sub_fixed)
30 |
31 |
32 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
33 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
34 | [np.random.randint(80,128,size=(1,))[0], 0],
35 | [128, np.random.randint(85,128,size=(1,))[0]],
36 | [np.random.randint(0,40,size=(1,))[0], 128]])
37 | matrix = cv2.getPerspectiveTransform(points1, points2)
38 |
39 |
40 | matrix2=cv2.getPerspectiveTransform(points2,points1)
41 |
42 | Perpecttive.append(matrix2)
43 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
44 | path_moving.append(sub_moving)
45 |
46 |
47 | path_fixed=np.array(path_fixed)
48 | path_moving=np.array(path_moving)
49 | Perpecttive=np.array(Perpecttive)
50 |
51 | np.savez("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\Perspective_matrix.npz",
52 | path_fixed=path_fixed,
53 | path_moving=path_moving,
54 | Perpecttive=Perpecttive
55 | )
56 |
57 | # cvssave=pd.DataFrame({"path_fixed":path_fixed,"path_moving":path_moving,"Perpecttive":Perpecttive})
58 | #
59 | # cvssave.to_csv("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\matrix.csv")
60 | #
61 | #
62 | # read=pd.read_csv("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\matrix.csv")
63 | #
64 | # fixed=read["path_fixed"]
65 | #
66 | #
67 | # print(fixed)
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/generator_cvs_path_affine_matrix.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import pandas as pd
3 | import numpy as np
4 | import os
5 |
6 |
7 | image_cv2_band7=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band1.tif",cv2.IMREAD_UNCHANGED)
8 | image_cv2_band1=cv2.imread("D:\\Second_paper_VAE_CNN\\xian_image\\xi_an_sub10000band3.tif",cv2.IMREAD_UNCHANGED)
9 |
10 | if not os.path.exists("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_fixed"): os.makedirs(
11 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_fixed")
12 |
13 | if not os.path.exists("D:\\ProgramData\DenseNet_registration\\Densenet\\densenet_train_moving"): os.makedirs(
14 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_train_moving")
15 |
16 | if not os.path.exists("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix"): os.makedirs(
17 | "D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix")
18 |
19 |
20 | path_fixed=[]
21 | path_moving=[]
22 | Perpecttive=[]
23 | for x in range(64,image_cv2_band7.shape[0]-64,128):
24 | for y in range(64,image_cv2_band7.shape[1]-64,128):
25 | x_y=np.array([x,y,1]).reshape(3,1)
26 | sub_fixed=image_cv2_band7[x-64:x+64,y-64:y+64]
27 | sub_moved = image_cv2_band1[x - 64:x + 64, y - 64:y + 64]
28 |
29 | path_fixed.append(sub_fixed)
30 |
31 |
32 | points1 = np.float32([[0, 0], [128, 0], [128, 128], [0, 128]])
33 | points2 = np.float32([[0,np.random.randint(0,50,size=(1,))[0]],
34 | [np.random.randint(80,128,size=(1,))[0], 0],
35 | [128, np.random.randint(85,128,size=(1,))[0]],
36 | [np.random.randint(0,40,size=(1,))[0], 128]])
37 | matrix = cv2.getPerspectiveTransform(points1, points2)
38 |
39 |
40 | matrix2=cv2.getPerspectiveTransform(points2,points1)
41 |
42 | Perpecttive.append(matrix2)
43 | sub_moving = cv2.warpPerspective(sub_moved, matrix, (128, 128))
44 | path_moving.append(sub_moving)
45 |
46 |
47 | path_fixed=np.array(path_fixed)
48 | path_moving=np.array(path_moving)
49 | Perpecttive=np.array(Perpecttive)
50 |
51 | np.savez("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\Perspective_matrix.npz",
52 | path_fixed=path_fixed,
53 | path_moving=path_moving,
54 | Perpecttive=Perpecttive
55 | )
56 |
57 | # cvssave=pd.DataFrame({"path_fixed":path_fixed,"path_moving":path_moving,"Perpecttive":Perpecttive})
58 | #
59 | # cvssave.to_csv("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\matrix.csv")
60 | #
61 | #
62 | # read=pd.read_csv("D:\\ProgramData\\DenseNet_registration\\Densenet\\densenet_Affine_matrix\\matrix.csv")
63 | #
64 | # fixed=read["path_fixed"]
65 | #
66 | #
67 | # print(fixed)
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
--------------------------------------------------------------------------------
/DenseNet_registration/111111111.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | class CNN(tf.keras.Model):
4 | def __init__(self):
5 | super().__init__()
6 | self.conv1 = tf.keras.layers.Conv2D(
7 | filters=32, # 卷积层神经元(卷积核)数目
8 | kernel_size=[5, 5], # 感受野大小
9 | padding='same', # padding策略(vaild 或 same)
10 | activation=tf.nn.relu # 激活函数
11 | )
12 | self.pool1 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
13 | self.conv2 = tf.keras.layers.Conv2D(
14 | filters=64,
15 | kernel_size=[5, 5],
16 | padding='same',
17 | activation=tf.nn.relu
18 | )
19 | self.pool2 = tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
20 | self.flatten = tf.keras.layers.Flatten()
21 | self.dense1 = tf.keras.layers.Dense(units=1024, activation=tf.nn.relu)
22 | self.dense2 = tf.keras.layers.Dense(units=6)
23 |
24 | def call(self, inputs):
25 | x = self.conv1(inputs) # [batch_size, 28, 28, 32]
26 | x = self.pool1(x) # [batch_size, 14, 14, 32]
27 | x = self.conv2(x) # [batch_size, 14, 14, 64]
28 | x = self.pool2(x) # [batch_size, 7, 7, 64]
29 | x = self.flatten(x) # [batch_size, 7 * 7 * 64]
30 | x = self.dense1(x) # [batch_size, 1024]
31 | x = self.dense2(x) # [batch_size, 10]
32 | output = tf.nn.softmax(x)
33 | return output
34 |
35 |
36 |
37 | matrixt_file= 'D:/ProgramData/DenseNet_registration/Densenet/densenet_Affine_matrix/matrix.npz'
38 | fixed_image=np.load(matrixt_file)['path_fixed'].reshape((6084, 128, 128,1))
39 | moving_image=np.load(matrixt_file)['path_moving'].reshape((6084, 128, 128,1))
40 |
41 |
42 | per_matrix=np.load(matrixt_file)['Perpecttive'].reshape((fixed_image.shape[0],1,6))
43 |
44 | fixed_image=fixed_image.astype('float32')/255
45 | moving_image=moving_image.astype('float32')/255
46 | con=np.concatenate((fixed_image,moving_image),axis=3)
47 |
48 | per_matrix=per_matrix.astype('float32')
49 |
50 | train_ds = tf.data.Dataset.from_tensor_slices((con,per_matrix)).shuffle(10000).batch(4)
51 |
52 | print(train_ds)
53 | print('')
54 |
55 | model=CNN()
56 |
57 |
58 |
59 | model=CNN()
60 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001)
61 |
62 |
63 | def train_step(images,labels):
64 | with tf.GradientTape() as tape:
65 | predictions = model(images)
66 | loss = tf.reduce_mean(tf.square(labels-predictions))
67 | print(loss.numpy())
68 |
69 | grads = tape.gradient(loss, model.variables) # 使用 model.variables 这一属性直接获得模型中的所有变量
70 | optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))
71 |
72 |
73 | EPOCHS =1
74 | for epoch in range(EPOCHS):
75 | for images1, label in train_ds:
76 | train_step(images1, label)
77 |
78 |
79 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/displacement_4point.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | from skimage.external.tifffile import TiffFile
3 | import matplotlib.pyplot as plt
4 | import numpy as np
5 | from skimage.draw import line
6 |
7 | np.set_printoptions(suppress=True)
8 | plt.rcParams['font.sans-serif']=['SimHei']
9 |
10 |
11 | image_cv2=cv2.imread("C:\\Users\\lilia\\Desktop\\dataresult\\origin\\data.png",cv2.IMREAD_UNCHANGED)
12 |
13 | image_cv2=np.pad(np.array(image_cv2),((200,300),(200,300),(0,0)),constant_values=0)
14 | print(image_cv2.shape)
15 |
16 |
17 | rr,cc=line(500,500,500,900)
18 | image_cv2[rr,cc]=(0,0,255)
19 |
20 | rr,cc=line(500,900,900,900)
21 | image_cv2[rr,cc]=(0,0,255)
22 |
23 | rr,cc=line(900,900,900,500)
24 | image_cv2[rr,cc]=(0,0,255)
25 |
26 | rr,cc=line(900,500,500,500)
27 | image_cv2[rr,cc]=(0,0,255)
28 |
29 | plt.subplot(2,2,1)
30 | plt.title('原图片')
31 | plt.imshow(image_cv2)
32 | cv2.imwrite('C:\\Users\\lilia\\Desktop\\data\\origin.tif',image_cv2)
33 |
34 |
35 | #其他四点变换
36 | # point1_256=np.float32([[400,400],[600,400],[600,600],[400,600]])
37 | # point2_256=np.float32([[402,398],[603,402],[598,596 ],[400,605]])
38 |
39 | point1_256=np.float32([[500,500],[900,500],[900,900],[500,900]])
40 | point2_256=np.float32([[485,480],[920,490],[880,880 ],[488,879]])
41 |
42 | matrix1 = cv2.getPerspectiveTransform(point1_256,point2_256)
43 | output1 = cv2.warpPerspective(image_cv2, matrix1, (1500, 1500))
44 |
45 |
46 | p_400_400_displace=np.matmul(matrix1,np.array([1200,1200,1]).reshape((3,1)))
47 | print(matrix1)
48 | print('')
49 | print(p_400_400_displace/p_400_400_displace[2,0])
50 |
51 |
52 |
53 |
54 | rr,cc=line(500,500,500,900)
55 | output1[rr,cc]=(0,255,0)
56 |
57 | rr,cc=line(500,900,900,900)
58 | output1[rr,cc]=(0,255,0)
59 |
60 | rr,cc=line(900,900,900,500)
61 | output1[rr,cc]=(0,255,0)
62 |
63 | rr,cc=line(900,500,500,500)
64 | output1[rr,cc]=(0,255,0)
65 |
66 |
67 | rr,cc=line(200,200,200,1200)
68 | output1[rr,cc]=(0,255,0)
69 |
70 | rr,cc=line(200,1200,1200,1200)
71 | output1[rr,cc]=(0,255,0)
72 |
73 | rr,cc=line(1200,1200,1200,200)
74 | output1[rr,cc]=(0,255,0)
75 |
76 | rr,cc=line(1200,200,200,200)
77 | output1[rr,cc]=(0,255,0)
78 |
79 | plt.subplot(2,2,2)
80 | plt.title('H(AB)4点变换')
81 | plt.imshow(output1)
82 | cv2.imwrite('C:\\Users\\lilia\\Desktop\\data\\origin_H(AB).tif',output1)
83 |
84 |
85 |
86 |
87 | #对矩阵求逆,再变换
88 | matrix2 = cv2.getPerspectiveTransform(point2_256,point1_256)
89 | output2 = cv2.warpPerspective(image_cv2, matrix2, (1500, 1500))
90 |
91 |
92 | plt.subplot(2,2,3)
93 | plt.title('H(BA)4点变换')
94 | plt.imshow(output2)
95 | cv2.imwrite('C:\\Users\\lilia\\Desktop\\data\\origin_H(BA).tif',output2)
96 |
97 |
98 |
99 | matrix3 = cv2.getPerspectiveTransform(point1_256,point2_256)
100 | output3 = cv2.warpPerspective(output2, matrix3, (1500, 1500))
101 |
102 | plt.subplot(2,2,4)
103 | plt.title('用H(AB)变换H(BA)的结果')
104 | plt.imshow(output3)
105 | cv2.imwrite('C:\\Users\\lilia\\Desktop\\data\\origin_H(AB)_H(BA).tif',output3)
106 |
107 | #
108 | # plt.show()
--------------------------------------------------------------------------------
/Similarityfunction/data/1111.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | """
3 | Created on Mon Sep 10 09:59:22 2018
4 |
5 | @author: zy
6 | """
7 |
8 | '''
9 | 使用BRIEF特征描述符
10 | '''
11 | import cv2
12 | import numpy as np
13 |
14 |
15 | def brief_test():
16 | # 加载图片 灰色
17 | img1 = cv2.imread('C:\\Users\\kylenate\\Desktop\\test\\timg.jpg')
18 | img1 = cv2.resize(img1, dsize=(600, 400))
19 | gray1 = cv2.cvtColor(img1, cv2.COLOR_BGR2GRAY)
20 | img2 = cv2.imread('C:\\Users\\kylenate\\Desktop\\test\\timg2.jpg')
21 | img2 = cv2.resize(img2, dsize=(600, 400))
22 | gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY)
23 | image1 = gray1.copy()
24 | image2 = gray2.copy()
25 |
26 | image1 = cv2.medianBlur(image1, 5)
27 | image2 = cv2.medianBlur(image2, 5)
28 |
29 | '''
30 | 1.使用SURF算法检测关键点
31 | '''
32 | # 创建一个SURF对象 阈值越高,能识别的特征就越少,因此可以采用试探法来得到最优检测。
33 | surf = cv2.xfeatures2d.SURF_create(3000)
34 | keypoints1 = surf.detect(image1)
35 | keypoints2 = surf.detect(image2)
36 | # 在图像上绘制关键点
37 | image1 = cv2.drawKeypoints(image=image1, keypoints=keypoints1, outImage=image1, color=(255, 0, 255),
38 | flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
39 | image2 = cv2.drawKeypoints(image=image2, keypoints=keypoints2, outImage=image2, color=(255, 0, 255),
40 | flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
41 | # 显示图像
42 | cv2.imshow('sift_keypoints1', image1)
43 | cv2.imshow('sift_keypoints2', image2)
44 | cv2.waitKey(20)
45 |
46 | '''
47 | 2.计算特征描述符
48 | '''
49 | brief = cv2.xfeatures2d.BriefDescriptorExtractor_create(32)
50 | keypoints1, descriptors1 = brief.compute(image1, keypoints1)
51 | keypoints2, descriptors2 = brief.compute(image2, keypoints2)
52 |
53 | print('descriptors1:', len(descriptors1), 'descriptors2', len(descriptors2))
54 | print(descriptors1)
55 |
56 | '''
57 | 3.匹配 汉明距离匹配特征点
58 | '''
59 | matcher = cv2.BFMatcher_create(cv2.HAMMING_NORM_TYPE)
60 | matchePoints = matcher.match(descriptors1, descriptors2)
61 | print(type(matchePoints), len(matchePoints), matchePoints[0])
62 |
63 | # 提取强匹配特征点
64 | minMatch = 1
65 | maxMatch = 0
66 | for i in range(len(matchePoints)):
67 | if minMatch > matchePoints[i].distance:
68 | minMatch = matchePoints[i].distance
69 | if maxMatch < matchePoints[i].distance:
70 | maxMatch = matchePoints[i].distance
71 | print('最佳匹配值是:', minMatch)
72 | print('最差匹配值是:', maxMatch)
73 |
74 | # 获取排雷在前边的几个最优匹配结果
75 | goodMatchePoints = []
76 | for i in range(len(matchePoints)):
77 | if matchePoints[i].distance < minMatch + (maxMatch - minMatch) / 3:
78 | goodMatchePoints.append(matchePoints[i])
79 |
80 | # 绘制最优匹配点
81 | outImg = None
82 | outImg = cv2.drawMatches(img1, keypoints1, img2, keypoints2, goodMatchePoints, outImg, matchColor=(0, 255, 0),
83 | flags=cv2.DRAW_MATCHES_FLAGS_DEFAULT)
84 | cv2.imshow('matche', outImg)
85 | cv2.waitKey(0)
86 | cv2.destroyAllWindows()
87 |
88 |
89 | if __name__ == '__main__':
90 | brief_test()
--------------------------------------------------------------------------------
/generate_affine_pre_data/wrapAffine/homography_matrix_numpy.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | np.set_printoptions(suppress=True)
5 | # Setting matching points in first image
6 | xy_1 = np.array([[157, 32], # x1[0][0], y1[0][1]
7 | [211, 37], # x2[1][0], y2[1][1]
8 | [222, 107], # x3[2][0], y3[2][1]
9 | [147, 124]]).astype('float32') # x4[3][0], y4[3][1]
10 |
11 | # Setting matching points in second image
12 | xy_2 = np.array([[6, 38], # x'1[0][0], y'1[0][1]
13 | [56, 31], # x'2[1][0], y'2[1][1]
14 | [82, 85], # x'3[2][0], y'3[2][1]
15 | [22, 118]]).astype('float32') # x'4[3][0], y'4[3][1]
16 |
17 |
18 | arrayA = np.array([[xy_1[0][0], xy_1[0][1], 1, 0, 0, 0, -xy_1[0][0] * xy_2[0][0], -xy_1[0][1] * xy_2[0][0],-xy_2[0][0]],
19 | [0, 0, 0, xy_1[0][0], xy_1[0][1], 1, -xy_1[0][0] * xy_2[0][1], -xy_1[0][1] * xy_2[0][1],-xy_2[0][1]],
20 | [xy_1[1][0], xy_1[1][1], 1, 0, 0, 0, -xy_1[1][0] * xy_2[1][0], -xy_1[1][1] * xy_2[1][0],-xy_2[1][0]],
21 | [0, 0, 0, xy_1[1][0], xy_1[1][1], 1, -xy_1[1][0] * xy_2[1][1], -xy_1[1][1] * xy_2[1][1],-xy_2[1][1]],
22 | [xy_1[2][0], xy_1[2][1], 1, 0, 0, 0, -xy_1[2][0] * xy_2[2][0], -xy_1[2][1] * xy_2[2][0],-xy_2[2][0]],
23 | [0, 0, 0, xy_1[2][0], xy_1[2][1], 1, -xy_1[2][0] * xy_2[2][1], -xy_1[2][1] * xy_2[2][1],-xy_2[2][1]],
24 | [xy_1[3][0], xy_1[3][1], 1, 0, 0, 0, -xy_1[3][0] * xy_2[3][0], -xy_1[3][1] * xy_2[3][0],-xy_2[3][0]],
25 | [0, 0, 0, xy_1[3][0], xy_1[3][1], 1, -xy_1[3][0] * xy_2[3][1], -xy_1[3][1] * xy_2[3][1],-xy_2[3][1]]])
26 |
27 |
28 | def getPerspectiveTransformMatrix(p1, p2):
29 | arrayA = np.array(
30 | [[p1[0][0], p1[0][1], 1, 0, 0, 0, -p1[0][0] * p2[0][0], -p1[0][1] * p2[0][0], -p2[0][0]],
31 | [0, 0, 0, p1[0][0], p1[0][1], 1, -p1[0][0] * p2[0][1], -p1[0][1] * p2[0][1], -p2[0][1]],
32 | [p1[1][0], p1[1][1], 1, 0, 0, 0, -p1[1][0] * p2[1][0], -p1[1][1] * p2[1][0], -p2[1][0]],
33 | [0, 0, 0, p1[1][0], p1[1][1], 1, -p1[1][0] * p2[1][1], -p1[1][1] * p2[1][1], -p2[1][1]],
34 | [p1[2][0], p1[2][1], 1, 0, 0, 0, -p1[2][0] * p2[2][0], -p1[2][1] * p2[2][0], -p2[2][0]],
35 | [0, 0, 0, p1[2][0], p1[2][1], 1, -p1[2][0] * p2[2][1], -p1[2][1] * p2[2][1], -p2[2][1]],
36 | [p1[3][0], p1[3][1], 1, 0, 0, 0, -p1[3][0] * p2[3][0], -p1[3][1] * p2[3][0], -p2[3][0]],
37 | [0, 0, 0, p1[3][0], p1[3][1], 1, -p1[3][0] * p2[3][1], -p1[3][1] * p2[3][1], -p2[3][1]]])
38 |
39 | print(arrayA.shape)
40 | U, S, Vh = np.linalg.svd(arrayA)
41 | print(Vh,'==========================')
42 | print(Vh[-1, :],'-----------------')
43 |
44 | print(Vh[-1, -1],'+++++++++++++++++++++++')
45 |
46 | L = Vh[-1, :] / Vh[-1, -1]
47 | H = L.reshape(3, 3)
48 | return H
49 |
50 |
51 |
52 | a=cv2.getPerspectiveTransform(xy_1,xy_2)
53 | b=getPerspectiveTransformMatrix(xy_1,xy_2)
54 | print('')
55 | print(arrayA)
56 | print('')
57 | print(a)
58 | print('''''')
59 | print(b)
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/voxelmorph_loss.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import keras.backend as K
3 | import cv2
4 | import numpy as np
5 | import matplotlib.pyplot as plt
6 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
7 |
8 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
9 | image_cv2_band1=data[3:4,0:100,0:100].reshape((100,100))
10 | # image_cv2_band1=data[5:6,100:200,100:200].reshape((100,100))
11 |
12 | # image_cv2_band1=image_cv2_band1[100:200,100:200]
13 | #image_cv2_band1=image_cv2_band1[100:200,100:200]/255
14 |
15 |
16 |
17 | plt.subplot(1,2,1)
18 | plt.imshow(image_cv2_band7)
19 |
20 | plt.subplot(1,2,2)
21 | plt.imshow(image_cv2_band1)
22 | plt.show()
23 |
24 |
25 |
26 | class NCC():
27 | """
28 | local (over window) normalized cross correlation
29 | """
30 |
31 | def __init__(self, win=None, eps=1e-5):
32 | self.win = win
33 | self.eps = eps
34 |
35 | def ncc(self, I, J):
36 | # get dimension of volume
37 | # assumes I, J are sized [batch_size, *vol_shape, nb_feats]
38 | ndims = len(I.get_shape().as_list()) - 2
39 | assert ndims in [1, 2, 3], "volumes should be 1 to 3 dimensions. found: %d" % ndims
40 |
41 | # set window size
42 | if self.win is None:
43 | self.win = [9] * ndims
44 |
45 | # get convolution function
46 | conv_fn = getattr(tf.nn, 'conv%dd' % ndims)
47 |
48 | # compute CC squares
49 | I2 = I * I
50 | J2 = J * J
51 | IJ = I * J
52 |
53 | # compute filters
54 | sum_filt = tf.ones([*self.win, 1, 1])
55 | strides = 1
56 | if ndims > 1:
57 | strides = [1] * (ndims + 2)
58 | padding = 'SAME'
59 |
60 | # compute local sums via convolution
61 | I_sum = conv_fn(I, sum_filt, strides, padding)
62 | J_sum = conv_fn(J, sum_filt, strides, padding)
63 | I2_sum = conv_fn(I2, sum_filt, strides, padding)
64 | J2_sum = conv_fn(J2, sum_filt, strides, padding)
65 | IJ_sum = conv_fn(IJ, sum_filt, strides, padding)
66 |
67 | # compute cross correlation
68 | win_size = np.prod(self.win)
69 | u_I = I_sum / win_size
70 | u_J = J_sum / win_size
71 |
72 | cross = IJ_sum - u_J * I_sum - u_I * J_sum + u_I * u_J * win_size
73 | I_var = I2_sum - 2 * u_I * I_sum + u_I * u_I * win_size
74 | J_var = J2_sum - 2 * u_J * J_sum + u_J * u_J * win_size
75 |
76 | cc = cross * cross / (I_var * J_var + self.eps)
77 |
78 | # return negative cc.
79 | return tf.reduce_mean(cc)
80 |
81 | def loss(self, I, J):
82 | return - self.ncc(I, J)
83 |
84 |
85 |
86 | image_cv2_band7=np.reshape(image_cv2_band7,(1,100,100,1))
87 | image_cv2_band1=np.reshape(image_cv2_band1,(1,100,100,1))
88 |
89 | image_cv2_band7=tf.convert_to_tensor(image_cv2_band7,dtype='float32')
90 | image_cv2_band1=tf.convert_to_tensor(image_cv2_band1,dtype='float32')
91 |
92 | T=NCC()
93 |
94 | output=T.loss(image_cv2_band7,image_cv2_band1)
95 | tf.InteractiveSession()
96 | output = output.eval()
97 |
98 |
99 | print(output)
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 |
110 |
111 |
112 |
--------------------------------------------------------------------------------
/predict.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from Registration_model.densenet_model import Registration_model
4 | from Registration_model.data_generator import vxm_data_generator
5 | np.set_printoptions(suppress=True)
6 |
7 | import os
8 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
9 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
10 |
11 |
12 |
13 | class regis_model(tf.keras.Model):
14 | def __init__(self):
15 | super().__init__()
16 | self.net1=Registration_model()
17 | self.net2=Registration_model()
18 |
19 | self.fully_con1 = tf.keras.layers.Dense(units=1024,activation=tf.nn.relu,dtype='float32')
20 | self.fully_con2 = tf.keras.layers.Dense(units=512,activation=tf.nn.relu,dtype='float32')
21 | self.fully_con3 = tf.keras.layers.Dense(units=128,activation=tf.nn.relu,dtype='float32')
22 | self.fully_con4 = tf.keras.layers.Dense(units=64,activation=tf.nn.relu,dtype='float32')
23 | self.out_Affine = tf.keras.layers.Dense(units=8,dtype='float32')
24 |
25 |
26 | def call(self,Inputtensor):
27 | input1=Inputtensor[0]
28 | input2=Inputtensor[1]
29 | ou1=self.net1(input1)
30 | ou2=self.net2(input2)
31 | output = tf.keras.layers.concatenate([ou1, ou2])
32 |
33 | output=self.fully_con1(output)
34 | output=self.fully_con2(output)
35 | output = self.fully_con3(output)
36 | output = self.fully_con4(output)
37 | output=self.out_Affine(output)
38 |
39 |
40 | return output
41 |
42 | matrixt_file= 'D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\pre_displacement_4_point.npz'
43 |
44 | fixed_image=np.load(matrixt_file)['fixed'].reshape((1, 128, 128,1))
45 | moving_image=np.load(matrixt_file)['moving'].reshape((1, 128, 128,1))
46 | per_matrix=np.load(matrixt_file)['displacement_4_point'].reshape((fixed_image.shape[0],8))
47 |
48 |
49 | fixed_image=fixed_image.astype('float32')/255
50 | moving_image=moving_image.astype('float32')/255
51 | per_matrix=per_matrix.astype('float32')
52 |
53 | data=vxm_data_generator(fixed_image,moving_image,per_matrix,batch_size=1)
54 | con,m=next(data)
55 | print(m)
56 | model=regis_model()
57 | model.compile(loss='MSE',
58 | optimizer=tf.keras.optimizers.Adam(0.0001)
59 | )
60 |
61 | # Load the state of the old model
62 | model.load_weights('D:/ProgramData_second/DenseNet_registration/save_model_3/model')
63 |
64 | # Check that the model state has been preserved
65 | new_predictions = model.predict(con)
66 | temp=m-new_predictions
67 |
68 | print(new_predictions)
69 | print('')
70 | print(np.reshape(temp,(8,)))
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 | # with tf.device("/cpu:0"):
110 | # model = regis_model()
111 | #
112 | # model.load_weights('D:/ProgramData/DenseNet_registration/Densenet/model.h5',by_name=True)
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
--------------------------------------------------------------------------------
/train.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from Registration_model.densenet_model import Registration_model
4 | from Registration_model.densenet_model import getPerspectiveTransformMatrix
5 | from Registration_model.densenet_model import spatial_transformer_network
6 | from Registration_model.loss import matix_loss_mes,image_loss_mes
7 |
8 |
9 | class regis_model(tf.keras.Model):
10 | def __init__(self):
11 | super().__init__()
12 | self.net1=Registration_model()
13 | self.net2=Registration_model()
14 |
15 | self.fully_con1 = tf.keras.layers.Dense(units=1024,activation=tf.nn.relu,dtype='float32')
16 | self.fully_con2 = tf.keras.layers.Dense(units=512,activation=tf.nn.relu,dtype='float32')
17 | self.fully_con3 = tf.keras.layers.Dense(units=128,activation=tf.nn.relu,dtype='float32')
18 | self.fully_con4 = tf.keras.layers.Dense(units=64,activation=tf.nn.relu,dtype='float32')
19 | self.out_Affine = tf.keras.layers.Dense(units=8,dtype='float32')
20 | self.matix = getPerspectiveTransformMatrix(name='spatial_transformer')
21 | self.image = spatial_transformer_network()
22 |
23 | def call(self,Inputtensor):
24 |
25 | input1=Inputtensor[:,:,:,0:1]
26 | input2=Inputtensor[:,:,:,1:2]
27 |
28 | ou1=self.net1(input1)
29 | ou2=self.net2(input2)
30 | output = tf.keras.layers.concatenate([ou1, ou2])
31 | output=self.fully_con1(output)
32 | output=self.fully_con2(output)
33 | output = self.fully_con3(output)
34 | output = self.fully_con4(output)
35 | output1=self.out_Affine(output)
36 |
37 | output2 = self.matix(output1)
38 | output3 = self.image([input2,output2])
39 |
40 | return [output1,output3]
41 |
42 | import os
43 |
44 | # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
45 | # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
46 |
47 | # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
48 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
49 |
50 | matrixt_file= 'E:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_paper_random_landsat_256.npz'
51 | fixed_image=np.load(matrixt_file)['fixed'][0:300,:,:,:]
52 | moving_image=np.load(matrixt_file)['moving'][0:300,:,:,:]
53 | per_matrix=np.load(matrixt_file)['displacement_4_point'][0:300,:].reshape((300,8))
54 |
55 |
56 | fixed_image=fixed_image.astype('float32')
57 | moving_image=moving_image.astype('float32')
58 | matrix=per_matrix.astype('float32')
59 |
60 |
61 | con_fixed_moving_image=np.concatenate((fixed_image,moving_image),axis=3)
62 | train_ds = tf.data.Dataset.from_tensor_slices((con_fixed_moving_image,(matrix,fixed_image))).shuffle(100).batch(4)
63 | train_ds=train_ds.prefetch(tf.data.experimental.AUTOTUNE)
64 |
65 |
66 |
67 | losses=[matix_loss_mes(),image_loss_mes()]
68 |
69 | model=regis_model()
70 |
71 | model.compile(loss=losses,
72 | optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
73 | loss_weights=[1., 0.2]
74 | )
75 |
76 | model.fit(train_ds,epochs=50,verbose = 1)
77 |
78 | # model.fit_generator(vxm_data_generator(fixed_image,moving_image,per_matrix), epochs=1, steps_per_epoch=4, verbose = 1)
79 | model.save_weights('E:/ProgramData_second/DenseNet_registration/save_model/model',save_format='tf')
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/predict.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from Registration_model.densenet_model import Registration_model
4 | from Registration_model.data_generator import vxm_data_generator
5 | np.set_printoptions(suppress=True)
6 |
7 | import os
8 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
9 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
10 |
11 |
12 |
13 | class regis_model(tf.keras.Model):
14 | def __init__(self):
15 | super().__init__()
16 | self.net1=Registration_model()
17 | self.net2=Registration_model()
18 |
19 | self.fully_con1 = tf.keras.layers.Dense(units=1024,activation=tf.nn.relu,dtype='float32')
20 | self.fully_con2 = tf.keras.layers.Dense(units=512,activation=tf.nn.relu,dtype='float32')
21 | self.fully_con3 = tf.keras.layers.Dense(units=128,activation=tf.nn.relu,dtype='float32')
22 | self.fully_con4 = tf.keras.layers.Dense(units=64,activation=tf.nn.relu,dtype='float32')
23 | self.out_Affine = tf.keras.layers.Dense(units=8,dtype='float32')
24 |
25 |
26 | def call(self,Inputtensor):
27 | input1=Inputtensor[0]
28 | input2=Inputtensor[1]
29 | ou1=self.net1(input1)
30 | ou2=self.net2(input2)
31 | output = tf.keras.layers.concatenate([ou1, ou2])
32 |
33 | output=self.fully_con1(output)
34 | output=self.fully_con2(output)
35 | output = self.fully_con3(output)
36 | output = self.fully_con4(output)
37 | output=self.out_Affine(output)
38 |
39 |
40 | return output
41 |
42 | matrixt_file= 'D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\pre_displacement_4_point.npz'
43 |
44 | fixed_image=np.load(matrixt_file)['fixed'].reshape((1, 128, 128,1))
45 | moving_image=np.load(matrixt_file)['moving'].reshape((1, 128, 128,1))
46 | per_matrix=np.load(matrixt_file)['displacement_4_point'].reshape((fixed_image.shape[0],8))
47 |
48 |
49 | fixed_image=fixed_image.astype('float32')/255
50 | moving_image=moving_image.astype('float32')/255
51 | per_matrix=per_matrix.astype('float32')
52 |
53 | data=vxm_data_generator(fixed_image,moving_image,per_matrix,batch_size=1)
54 | con,m=next(data)
55 | print(m)
56 | model=regis_model()
57 | model.compile(loss='MSE',
58 | optimizer=tf.keras.optimizers.Adam(0.0001)
59 | )
60 |
61 | # Load the state of the old model
62 | model.load_weights('D:/ProgramData_second/DenseNet_registration/save_model_3/model')
63 |
64 | # Check that the model state has been preserved
65 | new_predictions = model.predict(con)
66 | temp=m-new_predictions
67 |
68 | print(new_predictions)
69 | print('')
70 | print(np.reshape(temp,(8,)))
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
96 |
97 |
98 |
99 |
100 |
101 |
102 |
103 |
104 |
105 |
106 |
107 |
108 |
109 | # with tf.device("/cpu:0"):
110 | # model = regis_model()
111 | #
112 | # model.load_weights('D:/ProgramData/DenseNet_registration/Densenet/model.h5',by_name=True)
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/train.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | from Registration_model.densenet_model import Registration_model
4 | from Registration_model.densenet_model import getPerspectiveTransformMatrix
5 | from Registration_model.densenet_model import spatial_transformer_network
6 | from Registration_model.loss import matix_loss_mes,image_loss_mes
7 |
8 |
9 | class regis_model(tf.keras.Model):
10 | def __init__(self):
11 | super().__init__()
12 | self.net1=Registration_model()
13 | self.net2=Registration_model()
14 |
15 | self.fully_con1 = tf.keras.layers.Dense(units=1024,activation=tf.nn.relu,dtype='float32')
16 | self.fully_con2 = tf.keras.layers.Dense(units=512,activation=tf.nn.relu,dtype='float32')
17 | self.fully_con3 = tf.keras.layers.Dense(units=128,activation=tf.nn.relu,dtype='float32')
18 | self.fully_con4 = tf.keras.layers.Dense(units=64,activation=tf.nn.relu,dtype='float32')
19 | self.out_Affine = tf.keras.layers.Dense(units=8,dtype='float32')
20 | self.matix = getPerspectiveTransformMatrix(name='spatial_transformer')
21 | self.image = spatial_transformer_network()
22 |
23 | def call(self,Inputtensor):
24 |
25 | input1=Inputtensor[:,:,:,0:1]
26 | input2=Inputtensor[:,:,:,1:2]
27 |
28 | ou1=self.net1(input1)
29 | ou2=self.net2(input2)
30 | output = tf.keras.layers.concatenate([ou1, ou2])
31 | output=self.fully_con1(output)
32 | output=self.fully_con2(output)
33 | output = self.fully_con3(output)
34 | output = self.fully_con4(output)
35 | output1=self.out_Affine(output)
36 |
37 | output2 = self.matix(output1)
38 | output3 = self.image([input2,output2])
39 |
40 | return [output1,output3]
41 |
42 | import os
43 |
44 | # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
45 | # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
46 |
47 | # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
48 | # os.environ["CUDA_VISIBLE_DEVICES"] = "0"
49 |
50 | matrixt_file= 'E:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_paper_random_landsat_256.npz'
51 | fixed_image=np.load(matrixt_file)['fixed'][0:300,:,:,:]
52 | moving_image=np.load(matrixt_file)['moving'][0:300,:,:,:]
53 | per_matrix=np.load(matrixt_file)['displacement_4_point'][0:300,:].reshape((300,8))
54 |
55 |
56 | fixed_image=fixed_image.astype('float32')
57 | moving_image=moving_image.astype('float32')
58 | matrix=per_matrix.astype('float32')
59 |
60 |
61 | con_fixed_moving_image=np.concatenate((fixed_image,moving_image),axis=3)
62 | train_ds = tf.data.Dataset.from_tensor_slices((con_fixed_moving_image,(matrix,fixed_image))).shuffle(100).batch(4)
63 | train_ds=train_ds.prefetch(tf.data.experimental.AUTOTUNE)
64 |
65 |
66 |
67 | losses=[matix_loss_mes(),image_loss_mes()]
68 |
69 | model=regis_model()
70 |
71 | model.compile(loss=losses,
72 | optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
73 | loss_weights=[1., 0.2]
74 | )
75 |
76 | model.fit(train_ds,epochs=50,verbose = 1)
77 |
78 | # model.fit_generator(vxm_data_generator(fixed_image,moving_image,per_matrix), epochs=1, steps_per_epoch=4, verbose = 1)
79 | model.save_weights('E:/ProgramData_second/DenseNet_registration/save_model/model',save_format='tf')
80 |
81 |
82 |
83 |
84 |
85 |
86 |
87 |
88 |
89 |
90 |
91 |
92 |
93 |
94 |
95 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/FAST_impliment_9.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
6 |
7 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
8 | #image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
9 |
10 | image_cv2_band1=data[3:4,0:100,0:100].reshape((100,100))
11 |
12 | plt.subplot(1,3,1)
13 | plt.imshow(image_cv2_band7)
14 |
15 | plt.subplot(1,3,2)
16 | plt.imshow(image_cv2_band1)
17 |
18 | plt.subplot(1,3,3)
19 | plt.plot(np.arange(100).reshape(100,),image_cv2_band7[50:51,:].reshape(100,))
20 | plt.plot(np.arange(100).reshape(100,),image_cv2_band1[50:51,:].reshape(100,))
21 |
22 |
23 | plt.show()
24 |
25 |
26 |
27 | origin=130
28 | image1=[]
29 |
30 | for i in range(3,image_cv2_band7.shape[0]-3):
31 | for j in range(2,image_cv2_band7.shape[1]-3):
32 |
33 | temp=[
34 |
35 | np.fabs(image_cv2_band7[i-3][j]-image_cv2_band7[i][j]),
36 | np.fabs(image_cv2_band7[i-3][j+1]-image_cv2_band7[i][j]),
37 | np.fabs(image_cv2_band7[i-2][j+2]-image_cv2_band7[i][j]),
38 | np.fabs(image_cv2_band7[i-1][j+3]-image_cv2_band7[i][j]),
39 | np.fabs(image_cv2_band7[i][j+3] - image_cv2_band7[i][j]),
40 | np.fabs(image_cv2_band7[i +1][j+3] - image_cv2_band7[i][j]),
41 | np.fabs(image_cv2_band7[i + 2][j+2] - image_cv2_band7[i][j]),
42 | np.fabs(image_cv2_band7[i + 3][j+1] - image_cv2_band7[i][j]),
43 | np.fabs(image_cv2_band7[i + 3][j] - image_cv2_band7[i][j]),
44 | np.fabs(image_cv2_band7[i + 3][j-1] - image_cv2_band7[i][j]),
45 | np.fabs(image_cv2_band7[i + 2][j-2] - image_cv2_band7[i][j]),
46 | np.fabs(image_cv2_band7[i + 1][j-3] - image_cv2_band7[i][j]),
47 | np.fabs(image_cv2_band7[i ][j-3] - image_cv2_band7[i][j]),
48 | np.fabs(image_cv2_band7[i -1][j-3] - image_cv2_band7[i][j]),
49 | np.fabs(image_cv2_band7[i -2][j-2] - image_cv2_band7[i][j]),
50 | np.fabs(image_cv2_band7[i -3][j-1] - image_cv2_band7[i][j])
51 |
52 | ]
53 | temp=np.array(temp)
54 |
55 | # image1.append(np.mean(temp))
56 | t=[]
57 | for k in temp:
58 | if k >origin:
59 | t.append(1)
60 | else:t.append(0)
61 | image1.append(np.sum(t))
62 |
63 |
64 | image2=[]
65 |
66 | for i in range(3,image_cv2_band1.shape[0]-3):
67 | for j in range(2,image_cv2_band7.shape[1]-3):
68 |
69 | temp=[np.fabs(image_cv2_band1[i-3][j]-image_cv2_band1[i][j]),
70 | np.fabs(image_cv2_band1[i-3][j+1]-image_cv2_band1[i][j]),
71 | np.fabs(image_cv2_band1[i-2][j+2]-image_cv2_band1[i][j]),
72 | np.fabs(image_cv2_band1[i-1][j+3]-image_cv2_band1[i][j]),
73 | np.fabs(image_cv2_band1[i][j+3] - image_cv2_band1[i][j]),
74 | np.fabs(image_cv2_band1[i +1][j+3] - image_cv2_band1[i][j]),
75 | np.fabs(image_cv2_band1[i + 2][j+2] - image_cv2_band1[i][j]),
76 | np.fabs(image_cv2_band1[i + 3][j+1] - image_cv2_band1[i][j]),
77 | np.fabs(image_cv2_band1[i + 3][j] - image_cv2_band1[i][j]),
78 | np.fabs(image_cv2_band1[i + 3][j-1] - image_cv2_band1[i][j]),
79 | np.fabs(image_cv2_band1[i + 2][j-2] - image_cv2_band1[i][j]),
80 | np.fabs(image_cv2_band1[i + 1][j-3] - image_cv2_band1[i][j]),
81 | np.fabs(image_cv2_band1[i ][j-3] - image_cv2_band1[i][j]),
82 | np.fabs(image_cv2_band1[i -1][j-3] - image_cv2_band1[i][j]),
83 | np.fabs(image_cv2_band1[i -2][j-2] - image_cv2_band1[i][j]),
84 | np.fabs(image_cv2_band1[i -3][j-1] - image_cv2_band1[i][j])]
85 | temp=np.array(temp)
86 |
87 | t=[]
88 | # image2.append(np.mean(temp))
89 | for k in temp:
90 | if k >origin:
91 | t.append(1)
92 | else:t.append(0)
93 | image2.append(np.sum(t))
94 |
95 |
96 |
97 |
98 | image1=np.array(image1)
99 | image2=np.array(image2)
100 |
101 | print(image1)
102 | print(image2)
103 |
104 |
105 | reduce=image2-image1
106 | squr=np.mean(reduce**2)
107 |
108 | print('直接减法',np.sum(image_cv2_band7-image_cv2_band1))
109 | print('FAST 打分的均方误差',np.sum(squr))
110 |
111 |
112 |
113 |
114 |
115 |
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/FAST_impliment.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 | data=np.load('C:\\Users\\kylenate\\Desktop\\paper_registration1114\\land_sat.npz')['data_image']
5 |
6 | image_cv2_band7=data[0:1,0:100,0:100].reshape((100,100))
7 | # image_cv2_band1=data[5:6,0:100,0:100].reshape((100,100))
8 | image_cv2_band1=data[1:2,100:200,0:100].reshape((100,100))
9 |
10 |
11 |
12 | plt.subplot(1,2,1)
13 | plt.imshow(image_cv2_band7)
14 |
15 | plt.subplot(1,2,2)
16 | plt.imshow(image_cv2_band1)
17 | plt.show()
18 |
19 |
20 | def cicle(row,col):
21 | point1 = (row -3, col)
22 | point2 = (row -3, col +1)
23 | point3 = (row -2, col +2)
24 | point4 = (row -1, col +3)
25 | point5 = (row, col +3)
26 | point6 = (row +1, col +3)
27 | point7 = (row +2, col +2)
28 | point8 = (row +3, col +1)
29 | point9 = (row +3, col)
30 | point10 = (row +3, col -1)
31 | point11 = (row +2, col -2)
32 | point12 = (row +1, col -3)
33 | point13 = (row, col -3)
34 | point14 = (row -1, col -3)
35 | point15 = (row -2, col -2)
36 | point16 = (row -3, col -1)
37 |
38 | con_point=[point1, point2 ,point3 ,point4 ,point5 ,point6 ,point7 ,point8 ,point9 ,point10 ,point11 ,point12, point13
39 | ,point14 ,point15 ,point16]
40 |
41 | return con_point
42 |
43 |
44 | image1=[]
45 |
46 | for i in range(3,image_cv2_band7.shape[0]-3):
47 | for j in range(2,image_cv2_band7.shape[1]-3):
48 |
49 | temp=[
50 |
51 | np.fabs(image_cv2_band7[i-3][j]-image_cv2_band7[i][j]),
52 | np.fabs(image_cv2_band7[i-3][j+1]-image_cv2_band7[i][j]),
53 | np.fabs(image_cv2_band7[i-2][j+2]-image_cv2_band7[i][j]),
54 | np.fabs(image_cv2_band7[i-1][j+3]-image_cv2_band7[i][j]),
55 | np.fabs(image_cv2_band7[i][j+3] - image_cv2_band7[i][j]),
56 | np.fabs(image_cv2_band7[i +1][j+3] - image_cv2_band7[i][j]),
57 | np.fabs(image_cv2_band7[i + 2][j+2] - image_cv2_band7[i][j]),
58 | np.fabs(image_cv2_band7[i + 3][j+1] - image_cv2_band7[i][j]),
59 | np.fabs(image_cv2_band7[i + 3][j] - image_cv2_band7[i][j]),
60 | np.fabs(image_cv2_band7[i + 3][j-1] - image_cv2_band7[i][j]),
61 | np.fabs(image_cv2_band7[i + 2][j-2] - image_cv2_band7[i][j]),
62 | np.fabs(image_cv2_band7[i + 1][j-3] - image_cv2_band7[i][j]),
63 | np.fabs(image_cv2_band7[i ][j-3] - image_cv2_band7[i][j]),
64 | np.fabs(image_cv2_band7[i -1][j-3] - image_cv2_band7[i][j]),
65 | np.fabs(image_cv2_band7[i -2][j-2] - image_cv2_band7[i][j]),
66 | np.fabs(image_cv2_band7[i -3][j-1] - image_cv2_band7[i][j])
67 | ]
68 | temp=np.array(temp)
69 |
70 | image1.append(np.mean(temp))
71 |
72 |
73 |
74 | image2=[]
75 |
76 | for i in range(3,image_cv2_band1.shape[0]-3):
77 | for j in range(2,image_cv2_band7.shape[1]-3):
78 |
79 | temp=[np.fabs(image_cv2_band1[i-3][j]-image_cv2_band1[i][j]),
80 | np.fabs(image_cv2_band1[i-3][j+1]-image_cv2_band1[i][j]),
81 | np.fabs(image_cv2_band1[i-2][j+2]-image_cv2_band1[i][j]),
82 | np.fabs(image_cv2_band1[i-1][j+3]-image_cv2_band1[i][j]),
83 | np.fabs(image_cv2_band1[i][j+3] - image_cv2_band1[i][j]),
84 | np.fabs(image_cv2_band1[i +1][j+3] - image_cv2_band1[i][j]),
85 | np.fabs(image_cv2_band1[i + 2][j+2] - image_cv2_band1[i][j]),
86 | np.fabs(image_cv2_band1[i + 3][j+1] - image_cv2_band1[i][j]),
87 | np.fabs(image_cv2_band1[i + 3][j] - image_cv2_band1[i][j]),
88 | np.fabs(image_cv2_band1[i + 3][j-1] - image_cv2_band1[i][j]),
89 | np.fabs(image_cv2_band1[i + 2][j-2] - image_cv2_band1[i][j]),
90 | np.fabs(image_cv2_band1[i + 1][j-3] - image_cv2_band1[i][j]),
91 | np.fabs(image_cv2_band1[i ][j-3] - image_cv2_band1[i][j]),
92 | np.fabs(image_cv2_band1[i -1][j-3] - image_cv2_band1[i][j]),
93 | np.fabs(image_cv2_band1[i -2][j-2] - image_cv2_band1[i][j]),
94 | np.fabs(image_cv2_band1[i -3][j-1] - image_cv2_band1[i][j])]
95 |
96 | temp=np.array(temp)
97 |
98 | image2.append(np.mean(temp))
99 |
100 |
101 |
102 |
103 |
104 | image1=np.array(image1)
105 | image2=np.array(image2)
106 |
107 |
108 | reduce=image1-image2
109 |
110 | print(reduce)
111 | squr=np.mean(reduce**2)
112 | print(squr)
113 |
114 | print('直接减法',np.sum(image_cv2_band7-image_cv2_band1))
115 | print('FAST点均方误差',np.sum(squr)/10000)
116 |
117 |
118 |
119 |
120 |
121 |
122 |
123 |
124 |
125 |
126 |
127 |
128 |
129 |
130 |
131 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
141 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
--------------------------------------------------------------------------------
/generate_affine_pre_data/generate_train_data/4_point_displacement_paper_random_256.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import numpy as np
3 |
4 | one=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\one\\landsat1_1.npz")['image'][0]
5 | second=np.load("D:\\Second_paper_VAE_CNN\\xi_an_landsat8\\second\\landsat2_2.npz")['image'][0]
6 |
7 | one=(one-np.min(one))/(np.max(one)-np.min(one))
8 | second=(second-np.min(second))/(np.max(second)-np.min(second))
9 | one=one.astype('float32')
10 | second=second.astype('float32')
11 |
12 | fixed0=[]
13 | moving0=[]
14 | displacement_4_point0=[]
15 |
16 |
17 | for x in range(256,one.shape[0]-256,32):
18 | for y in range(256,one.shape[1]-256,64):
19 | print(x,y)
20 |
21 | sub_fixed_512=one[x-256:x+256,y-256:y+256]
22 | sub_moved_512 = second[x - 256:x + 256, y - 256:y + 256]
23 |
24 | sub_fixed = sub_fixed_512[128:384, 128:384]
25 |
26 | fixed0.append(sub_fixed.reshape((256, 256, 1)))
27 |
28 | points1 = np.float32([[128, 128], [384, 128], [384, 384], [128, 384]])
29 | points2 = np.float32([[np.random.randint(118,138,size=(1,))[0],np.random.randint(118,138,size=(1,))[0]],
30 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(118,138,size=(1,))[0]],
31 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]],
32 | [np.random.randint(118,138,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]]])
33 |
34 | wrap_moving_matrix=cv2.getPerspectiveTransform(points2,points1)
35 |
36 | moving_image=cv2.warpPerspective(sub_moved_512,wrap_moving_matrix,(512,512))
37 |
38 | sub_moving_image=moving_image[128:384,128:384]
39 |
40 | moving0.append(sub_moving_image.reshape((256, 256, 1)))
41 |
42 | temp=points1-points2
43 |
44 | displacement_4_point0.append(temp.reshape(1,8))
45 |
46 |
47 | fixed0=np.array(fixed0)
48 | moving0=np.array(moving0)
49 | displacement_4_point0=np.array(displacement_4_point0)
50 |
51 |
52 | fixed1=[]
53 | moving1=[]
54 | displacement_4_point1=[]
55 |
56 |
57 | for x in range(256,one.shape[0]-256,64):
58 | for y in range(256,one.shape[1]-256,64):
59 | print(x,y)
60 |
61 | sub_fixed_512=one[x-256:x+256,y-256:y+256]
62 | sub_moved_512 = second[x - 256:x + 256, y - 256:y + 256]
63 |
64 | sub_fixed = sub_fixed_512[128:384, 128:384]
65 |
66 | fixed1.append(sub_fixed.reshape((256, 256, 1)))
67 |
68 | points1 = np.float32([[128, 128], [384, 128], [384, 384], [128, 384]])
69 | points2 = np.float32([[np.random.randint(118,138,size=(1,))[0],np.random.randint(118,138,size=(1,))[0]],
70 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(118,138,size=(1,))[0]],
71 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]],
72 | [np.random.randint(118,138,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]]])
73 |
74 | wrap_moving_matrix=cv2.getPerspectiveTransform(points2,points1)
75 |
76 | moving_image=cv2.warpPerspective(sub_moved_512,wrap_moving_matrix,(512,512))
77 |
78 | sub_moving_image=moving_image[128:384,128:384]
79 |
80 | moving1.append(sub_moving_image.reshape((256, 256, 1)))
81 |
82 | temp=points1-points2
83 |
84 | displacement_4_point1.append(temp.reshape(1,8))
85 |
86 |
87 | fixed1=np.array(fixed1)
88 | moving1=np.array(moving1)
89 | displacement_4_point1=np.array(displacement_4_point1)
90 |
91 |
92 |
93 | fixed2=[]
94 | moving2=[]
95 | displacement_4_point2=[]
96 |
97 |
98 | for x in range(256,one.shape[0]-256,64):
99 | for y in range(256,one.shape[1]-256,64):
100 | print(x,y)
101 |
102 | sub_fixed_512=one[x-256:x+256,y-256:y+256]
103 | sub_moved_512 = second[x - 256:x + 256, y - 256:y + 256]
104 |
105 | sub_fixed = sub_fixed_512[128:384, 128:384]
106 |
107 | fixed2.append(sub_fixed.reshape((256, 256, 1)))
108 |
109 | points1 = np.float32([[128, 128], [384, 128], [384, 384], [128, 384]])
110 | points2 = np.float32([[np.random.randint(118,138,size=(1,))[0],np.random.randint(118,138,size=(1,))[0]],
111 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(118,138,size=(1,))[0]],
112 | [np.random.randint(374,394,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]],
113 | [np.random.randint(118,138,size=(1,))[0], np.random.randint(374,394,size=(1,))[0]]])
114 |
115 | wrap_moving_matrix=cv2.getPerspectiveTransform(points2,points1)
116 |
117 | moving_image=cv2.warpPerspective(sub_moved_512,wrap_moving_matrix,(512,512))
118 |
119 | sub_moving_image=moving_image[128:384,128:384]
120 |
121 | moving2.append(sub_moving_image.reshape((256, 256, 1)))
122 |
123 | temp=points1-points2
124 |
125 | displacement_4_point2.append(temp.reshape(1,8))
126 |
127 |
128 | fixed2=np.array(fixed2)
129 | moving2=np.array(moving2)
130 | displacement_4_point2=np.array(displacement_4_point2)
131 |
132 |
133 |
134 | fixed=np.concatenate((fixed0,fixed1,fixed2),axis=0)
135 | moving=np.concatenate((moving0,moving1,moving2),axis=0)
136 | displacement_4_point=np.concatenate((displacement_4_point0,displacement_4_point1,displacement_4_point2),axis=0)
137 |
138 |
139 |
140 |
141 |
142 | np.savez("D:\\ProgramData_second\\generate_affine_pre_data\\data\\generated_npz_image\\displacement_4_point_paper_random_landsat_256.npz",
143 | fixed=fixed,
144 | moving=moving,
145 | displacement_4_point=displacement_4_point)
--------------------------------------------------------------------------------
/densenet.py:
--------------------------------------------------------------------------------
1 |
2 | import tensorflow as tf
3 | l2 = tf.keras.regularizers.l2
4 |
5 |
6 | class ConvBlock(tf.keras.Model):
7 |
8 | def __init__(self, num_filters, data_format, bottleneck, weight_decay=1e-4,
9 | dropout_rate=0):
10 | super(ConvBlock, self).__init__()
11 | self.bottleneck = bottleneck
12 |
13 | axis = -1 if data_format == "channels_last" else 1
14 | inter_filter = num_filters * 4 # 每一层的输出特征图数目是growth_rate的4倍
15 | # don't forget to set use_bias=False when using batchnorm
16 | self.conv2 = tf.keras.layers.Conv2D(num_filters,
17 | (3, 3),
18 | padding="same",
19 | use_bias=False,
20 | data_format=data_format,
21 | kernel_initializer="he_normal",
22 | kernel_regularizer=l2(weight_decay))
23 |
24 | self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
25 | self.dropout = tf.keras.layers.Dropout(dropout_rate)
26 |
27 | if self.bottleneck:
28 | self.conv1 = tf.keras.layers.Conv2D(inter_filter, # 这里可以看出如果加入bottleneck操作的话,在conv2之前特征图会降到4 * grow_rate
29 | (1, 1),
30 | padding="same",
31 | use_bias=False,
32 | data_format=data_format,
33 | kernel_initializer="he_normal",
34 | kernel_regularizer=l2(weight_decay))
35 | self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
36 |
37 | def call(self, x, training=True):
38 | output = self.batchnorm1(x, training=training)
39 |
40 | if self.bottleneck:
41 | output = self.conv1(tf.nn.relu(output))
42 | output = self.batchnorm2(output, training=training)
43 |
44 | output = self.conv2(tf.nn.relu(output))
45 | output = self.dropout(output, training=training)
46 |
47 | return output
48 |
49 |
50 |
51 | class transition_block(tf.keras.Model):
52 |
53 |
54 | def __init__(self, num_filters, data_format,
55 | weight_decay=1e-4, dropout_rate=0):
56 | super(transition_block, self).__init__()
57 | axis = -1 if data_format == "channels_last" else 1
58 |
59 | self.batchnorm = tf.keras.layers.BatchNormalization(axis=axis)
60 | self.conv = tf.keras.layers.Conv2D(num_filters,
61 | (1, 1),
62 | padding="same",
63 | use_bias=False,
64 | data_format=data_format,
65 | kernel_initializer="he_normal",
66 | kernel_regularizer=l2(weight_decay))
67 | self.avg_pool = tf.keras.layers.AveragePooling2D(data_format=data_format)
68 |
69 | def call(self, x, training=True):
70 | output = self.batchnorm(x, training=training)
71 | output = self.conv(tf.nn.relu(output))
72 | output = self.avg_pool(output)
73 | return output
74 |
75 |
76 | class dense_block(tf.keras.Model):
77 |
78 |
79 | def __init__(self, num_layers, growth_rate, data_format, bottleneck,
80 | weight_decay=1e-4, dropout_rate=0):
81 | super(dense_block, self).__init__()
82 | self.num_layers = num_layers
83 | self.axis = -1 if data_format == "channels_last" else 1
84 |
85 | self.blocks = []
86 | for _ in range(int(self.num_layers)):
87 | self.blocks.append(ConvBlock(growth_rate, # 每一层输出的特征图数目(不包括前面层的concatenate)
88 | data_format,
89 | bottleneck,
90 | weight_decay, # 当前层的权重衰减系数
91 | dropout_rate))
92 |
93 | def call(self, x, training=True):
94 |
95 | for i in range(int(self.num_layers)):
96 | output = self.blocks[i](x, training=training) # 每一层自身的输出
97 | x = tf.concat([x, output], axis=self.axis) # 每一层自身的输出堆叠上前面层的输出
98 |
99 | return x
100 |
101 |
102 | class Registration_model(tf.keras.Model):
103 |
104 | def __init__(self):
105 | super(Registration_model,self).__init__()
106 |
107 | self.conv1=tf.keras.layers.Conv2D(filters=64,kernel_size=[3,3],padding='same')
108 | self.pool1=tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
109 |
110 | self.densenet1=dense_block(num_layers=1,growth_rate=8,data_format="channels_last",bottleneck=True)
111 | self.transition1=transition_block(num_filters=64,data_format="channels_last")
112 | #
113 | self.densenet2=dense_block(num_layers=2,growth_rate=8,data_format="channels_last",bottleneck=True)
114 | self.transition2=transition_block(num_filters=64,data_format="channels_last")
115 | #
116 | self.densenet3=dense_block(num_layers=4,growth_rate=8,data_format="channels_last",bottleneck=True)
117 | self.transition3=transition_block(num_filters=64,data_format="channels_last")
118 | #
119 | self.densenet4=dense_block(num_layers=8,growth_rate=8,data_format="channels_last",bottleneck=True)
120 | self.transition4=transition_block(num_filters=64,data_format="channels_last")
121 |
122 | self.densenet5=dense_block(num_layers=16,growth_rate=8,data_format="channels_last",bottleneck=True)
123 | self.transition5=transition_block(num_filters=64,data_format="channels_last")
124 |
125 | # self.densenet6=dense_block(nb_layers=32,nb_filter=64,growth_rate=8)
126 | # self.transition6=transition_block(nb_filter=64)
127 | # self.conv2=tf.keras.layers.Conv2D(filters=128,kernel_size=[1,1])
128 | # self.pool2=tf.keras.layers.MaxPool2D(pool_size=[1,1])
129 |
130 | self.flatten=tf.keras.layers.Flatten()
131 |
132 |
133 | def call(self,Inputensor_1):
134 |
135 | x=self.conv1(Inputensor_1)
136 | x=self.pool1(x)
137 | x=self.densenet1(x)
138 |
139 | x=self.transition1(x)
140 | x=self.densenet2(x)
141 | x=self.transition2(x)
142 | x = self.densenet3(x)
143 | x = self.transition3(x)
144 | x = self.densenet4(x)
145 | x = self.transition4(x)
146 | x = self.densenet5(x)
147 | x = self.transition5(x)
148 |
149 |
150 | # x = self.densenet6(x)
151 | # x = self.transition6(x)
152 | # x=self.conv2(x)
153 | # x=self.pool2(x)
154 |
155 | x=self.flatten(x)
156 | return x
157 |
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/densenet.py:
--------------------------------------------------------------------------------
1 |
2 | import tensorflow as tf
3 | l2 = tf.keras.regularizers.l2
4 |
5 |
6 | class ConvBlock(tf.keras.Model):
7 |
8 | def __init__(self, num_filters, data_format, bottleneck, weight_decay=1e-4,
9 | dropout_rate=0):
10 | super(ConvBlock, self).__init__()
11 | self.bottleneck = bottleneck
12 |
13 | axis = -1 if data_format == "channels_last" else 1
14 | inter_filter = num_filters * 4 # 每一层的输出特征图数目是growth_rate的4倍
15 | # don't forget to set use_bias=False when using batchnorm
16 | self.conv2 = tf.keras.layers.Conv2D(num_filters,
17 | (3, 3),
18 | padding="same",
19 | use_bias=False,
20 | data_format=data_format,
21 | kernel_initializer="he_normal",
22 | kernel_regularizer=l2(weight_decay))
23 |
24 | self.batchnorm1 = tf.keras.layers.BatchNormalization(axis=axis)
25 | self.dropout = tf.keras.layers.Dropout(dropout_rate)
26 |
27 | if self.bottleneck:
28 | self.conv1 = tf.keras.layers.Conv2D(inter_filter, # 这里可以看出如果加入bottleneck操作的话,在conv2之前特征图会降到4 * grow_rate
29 | (1, 1),
30 | padding="same",
31 | use_bias=False,
32 | data_format=data_format,
33 | kernel_initializer="he_normal",
34 | kernel_regularizer=l2(weight_decay))
35 | self.batchnorm2 = tf.keras.layers.BatchNormalization(axis=axis)
36 |
37 | def call(self, x, training=True):
38 | output = self.batchnorm1(x, training=training)
39 |
40 | if self.bottleneck:
41 | output = self.conv1(tf.nn.relu(output))
42 | output = self.batchnorm2(output, training=training)
43 |
44 | output = self.conv2(tf.nn.relu(output))
45 | output = self.dropout(output, training=training)
46 |
47 | return output
48 |
49 |
50 |
51 | class transition_block(tf.keras.Model):
52 |
53 |
54 | def __init__(self, num_filters, data_format,
55 | weight_decay=1e-4, dropout_rate=0):
56 | super(transition_block, self).__init__()
57 | axis = -1 if data_format == "channels_last" else 1
58 |
59 | self.batchnorm = tf.keras.layers.BatchNormalization(axis=axis)
60 | self.conv = tf.keras.layers.Conv2D(num_filters,
61 | (1, 1),
62 | padding="same",
63 | use_bias=False,
64 | data_format=data_format,
65 | kernel_initializer="he_normal",
66 | kernel_regularizer=l2(weight_decay))
67 | self.avg_pool = tf.keras.layers.AveragePooling2D(data_format=data_format)
68 |
69 | def call(self, x, training=True):
70 | output = self.batchnorm(x, training=training)
71 | output = self.conv(tf.nn.relu(output))
72 | output = self.avg_pool(output)
73 | return output
74 |
75 |
76 | class dense_block(tf.keras.Model):
77 |
78 |
79 | def __init__(self, num_layers, growth_rate, data_format, bottleneck,
80 | weight_decay=1e-4, dropout_rate=0):
81 | super(dense_block, self).__init__()
82 | self.num_layers = num_layers
83 | self.axis = -1 if data_format == "channels_last" else 1
84 |
85 | self.blocks = []
86 | for _ in range(int(self.num_layers)):
87 | self.blocks.append(ConvBlock(growth_rate, # 每一层输出的特征图数目(不包括前面层的concatenate)
88 | data_format,
89 | bottleneck,
90 | weight_decay, # 当前层的权重衰减系数
91 | dropout_rate))
92 |
93 | def call(self, x, training=True):
94 |
95 | for i in range(int(self.num_layers)):
96 | output = self.blocks[i](x, training=training) # 每一层自身的输出
97 | x = tf.concat([x, output], axis=self.axis) # 每一层自身的输出堆叠上前面层的输出
98 |
99 | return x
100 |
101 |
102 | class Registration_model(tf.keras.Model):
103 |
104 | def __init__(self):
105 | super(Registration_model,self).__init__()
106 |
107 | self.conv1=tf.keras.layers.Conv2D(filters=64,kernel_size=[3,3],padding='same')
108 | self.pool1=tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=2)
109 |
110 | self.densenet1=dense_block(num_layers=1,growth_rate=8,data_format="channels_last",bottleneck=True)
111 | self.transition1=transition_block(num_filters=64,data_format="channels_last")
112 | #
113 | self.densenet2=dense_block(num_layers=2,growth_rate=8,data_format="channels_last",bottleneck=True)
114 | self.transition2=transition_block(num_filters=64,data_format="channels_last")
115 | #
116 | self.densenet3=dense_block(num_layers=4,growth_rate=8,data_format="channels_last",bottleneck=True)
117 | self.transition3=transition_block(num_filters=64,data_format="channels_last")
118 | #
119 | self.densenet4=dense_block(num_layers=8,growth_rate=8,data_format="channels_last",bottleneck=True)
120 | self.transition4=transition_block(num_filters=64,data_format="channels_last")
121 |
122 | self.densenet5=dense_block(num_layers=16,growth_rate=8,data_format="channels_last",bottleneck=True)
123 | self.transition5=transition_block(num_filters=64,data_format="channels_last")
124 |
125 | # self.densenet6=dense_block(nb_layers=32,nb_filter=64,growth_rate=8)
126 | # self.transition6=transition_block(nb_filter=64)
127 | # self.conv2=tf.keras.layers.Conv2D(filters=128,kernel_size=[1,1])
128 | # self.pool2=tf.keras.layers.MaxPool2D(pool_size=[1,1])
129 |
130 | self.flatten=tf.keras.layers.Flatten()
131 |
132 |
133 | def call(self,Inputensor_1):
134 |
135 | x=self.conv1(Inputensor_1)
136 | x=self.pool1(x)
137 | x=self.densenet1(x)
138 |
139 | x=self.transition1(x)
140 | x=self.densenet2(x)
141 | x=self.transition2(x)
142 | x = self.densenet3(x)
143 | x = self.transition3(x)
144 | x = self.densenet4(x)
145 | x = self.transition4(x)
146 | x = self.densenet5(x)
147 | x = self.transition5(x)
148 |
149 |
150 | # x = self.densenet6(x)
151 | # x = self.transition6(x)
152 | # x=self.conv2(x)
153 | # x=self.pool2(x)
154 |
155 | x=self.flatten(x)
156 | return x
157 |
--------------------------------------------------------------------------------
/DenseNet_registration/STN/STN_proj.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import cv2
4 | import matplotlib.pyplot as plt
5 | from STN.Proj_tr_matrix import Matrix
6 |
7 | def spatial_transformer_network(input_fmap, theta, out_dims=None, **kwargs):
8 |
9 | # grab input dimensions
10 | B = tf.shape(input_fmap)[0]
11 | H = tf.shape(input_fmap)[1]
12 | W = tf.shape(input_fmap)[2]
13 |
14 |
15 | # reshape theta to (B, 2, 3)
16 | theta = tf.reshape(theta, [B, 3, 3])
17 |
18 | # generate grids of same size or upsample/downsample if specified
19 | if out_dims:
20 | out_H = out_dims[0]
21 | out_W = out_dims[1]
22 | batch_grids = affine_grid_generator(out_H, out_W, theta)
23 | else:
24 | batch_grids = affine_grid_generator(H, W, theta)
25 |
26 | x_s = batch_grids[:, 0, :, :]/batch_grids[:, 2, :, :]
27 | y_s = batch_grids[:, 1, :, :]/batch_grids[:, 2, :, :]
28 |
29 |
30 | # sample input with grid to get output
31 | out_fmap = bilinear_sampler(input_fmap, x_s, y_s)
32 |
33 |
34 | return out_fmap
35 |
36 |
37 | def get_pixel_value(img, x, y):
38 |
39 | shape = tf.shape(x)
40 | batch_size = shape[0]
41 | height = shape[1]
42 | width = shape[2]
43 |
44 | batch_idx = tf.range(0, batch_size)
45 | batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
46 | b = tf.tile(batch_idx, (1, height, width))
47 |
48 | indices = tf.stack([b, y, x], 3)
49 |
50 | return tf.gather_nd(img, indices)
51 |
52 |
53 | def affine_grid_generator(height, width, theta):
54 |
55 | num_batch = tf.shape(theta)[0]
56 |
57 | # create normalized 2D grid
58 | x = tf.linspace(0.0, 500.0, width)
59 | y = tf.linspace(0.0, 500.0, height)
60 | x_t, y_t = tf.meshgrid(x, y)
61 |
62 |
63 | # flatten
64 | x_t_flat = tf.reshape(x_t, [-1])
65 | y_t_flat = tf.reshape(y_t, [-1])
66 |
67 |
68 | # reshape to [x_t, y_t , 1] - (homogeneous form)
69 | ones = tf.ones_like(x_t_flat)
70 | sampling_grid = tf.stack([x_t_flat, y_t_flat, ones])
71 |
72 | # repeat grid num_batch times
73 | sampling_grid = tf.expand_dims(sampling_grid, axis=0)#(1, 3, 250000)
74 | sampling_grid = tf.tile(sampling_grid, tf.stack([num_batch, 1, 1]))#(2, 3, 250000)
75 |
76 |
77 | # cast to float32 (required for matmul)
78 | theta = tf.cast(theta, 'float32')#(2,6)
79 |
80 | sampling_grid = tf.cast(sampling_grid, 'float32')#(2, 3, 250000)
81 |
82 |
83 | # transform the sampling grid - batch multiply
84 | batch_grids = tf.matmul(theta, sampling_grid)#(2, 2, 250000)
85 |
86 | # batch grid has shape (num_batch, 2, H*W)
87 |
88 | # reshape to (num_batch, H, W, 2)
89 | batch_grids = tf.reshape(batch_grids, [num_batch, 3, height, width])
90 |
91 |
92 | return batch_grids
93 |
94 |
95 | def bilinear_sampler(img, x, y):
96 |
97 | H = tf.shape(img)[1]
98 | W = tf.shape(img)[2]
99 | max_y = tf.cast(H - 1, 'int32')
100 | max_x = tf.cast(W - 1, 'int32')
101 | zero = tf.zeros([], dtype='int32')
102 |
103 | # rescale x and y to [0, W-1/H-1]
104 | x = tf.cast(x, 'float32')
105 | y = tf.cast(y, 'float32')
106 | # x = 0.5 * ((x + 1.0) * tf.cast(max_x-1, 'float32'))
107 | # y = 0.5 * ((y + 1.0) * tf.cast(max_y-1, 'float32'))
108 |
109 |
110 | # grab 4 nearest corner points for each (x_i, y_i)
111 | x0 = tf.cast(tf.floor(x), 'int32')
112 | x1 = x0 + 1
113 | y0 = tf.cast(tf.floor(y), 'int32')
114 | y1 = y0 + 1
115 |
116 | # clip to range [0, H-1/W-1] to not violate img boundaries
117 | x0 = tf.clip_by_value(x0, zero, max_x)
118 | x1 = tf.clip_by_value(x1, zero, max_x)
119 | y0 = tf.clip_by_value(y0, zero, max_y)
120 | y1 = tf.clip_by_value(y1, zero, max_y)
121 |
122 | # get pixel value at corner coords
123 | Ia = get_pixel_value(img, x0, y0)
124 | Ib = get_pixel_value(img, x0, y1)
125 | Ic = get_pixel_value(img, x1, y0)
126 | Id = get_pixel_value(img, x1, y1)
127 |
128 | # recast as float for delta calculation
129 | x0 = tf.cast(x0, 'float32')
130 | x1 = tf.cast(x1, 'float32')
131 | y0 = tf.cast(y0, 'float32')
132 | y1 = tf.cast(y1, 'float32')
133 |
134 | # calculate deltas
135 | wa = (x1-x) * (y1-y)
136 | wb = (x1-x) * (y-y0)
137 | wc = (x-x0) * (y1-y)
138 | wd = (x-x0) * (y-y0)
139 |
140 | # add dimension for addition
141 | wa = tf.expand_dims(wa, axis=3)
142 | wb = tf.expand_dims(wb, axis=3)
143 | wc = tf.expand_dims(wc, axis=3)
144 | wd = tf.expand_dims(wd, axis=3)
145 |
146 | # compute output
147 | out = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
148 |
149 | return out
150 |
151 | if __name__=='__main__':
152 | img = cv2.imread('C:\\Users\\kylenate\\Desktop\\panda1.jpg')
153 |
154 | img = cv2.cvtColor(img, cv2.IMREAD_COLOR)/255
155 |
156 | height = img.shape[0]
157 | width = img.shape[1]
158 |
159 | imgs = np.concatenate((np.expand_dims(img, 0), np.expand_dims(img, 0)))
160 | imgs = tf.convert_to_tensor(imgs, dtype='float32')
161 |
162 | points1 = np.float32([[0, 0], [500, 0], [500, 500],[0,500]])
163 | points2 = np.float32([[0, 100], [400, 0], [500, 400],[50,500]])
164 |
165 | affine_matrix1 = cv2.getPerspectiveTransform(points2, points1)
166 | print(affine_matrix1)
167 |
168 | affine_matrix2 = cv2.getPerspectiveTransform(points1, points2)
169 | print(affine_matrix2)
170 |
171 | # thetas = [
172 | # [[1.28034883e+00, - 1.60043603e-01 , 1.60043603e+01],
173 | # [3.48330195e-01, 1.39332078e+00 ,- 1.39332078e+02],
174 | # [1.40719453e-04 , 2.84907343e-04 , 1.00000000e+00]],
175 | #
176 | # [[7.78947368e-01, 8.94736842e-02, 0.00000000e+00],
177 | # [-2.00000000e-01 , 6.94736842e-01 , 1.00000000e+02],
178 | # [-5.26315789e-05, - 2.10526316e-04, 1.00000000e+00]]
179 | # ]
180 | # thetas=np.reshape(thetas,(2,9))
181 | # thetas = tf.convert_to_tensor(thetas, dtype='float32')
182 |
183 | t=points1-points2
184 | t=t.reshape((1,8))
185 | t=tf.convert_to_tensor(t)
186 | t=tf.tile(t,[2,1])
187 |
188 | matrix=Matrix(t)
189 | print(matrix)
190 |
191 | output = spatial_transformer_network(imgs, matrix, (height, width))
192 | # output = spatial_transformer_network(imgs, thetas,(height, width))
193 |
194 | cv_out = cv2.warpPerspective(img, affine_matrix2, (width, height))
195 |
196 | plt.figure()
197 | plt.subplot(221)
198 | plt.title('origin')
199 | plt.imshow(img)
200 |
201 | plt.subplot(222)
202 | plt.title('network__point2-point1')
203 | plt.imshow(output[0])
204 |
205 | plt.subplot(223)
206 | plt.title('network__point1-point2')
207 | plt.imshow(output[1])
208 |
209 | plt.subplot(224)
210 | plt.title('point1-point2')
211 | plt.imshow(cv_out)
212 | plt.show()
213 |
214 |
215 |
216 |
217 |
218 |
219 |
220 |
221 |
222 |
223 |
224 |
225 |
--------------------------------------------------------------------------------
/DenseNet_registration/STN/TPS_STN.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 |
4 |
5 | def TPS_STN(U, nx, ny, cp, out_size):
6 | """Thin Plate Spline Spatial Transformer Layer
7 | TPS control points are arranged in a regular grid.
8 | U : float Tensor
9 | shape [num_batch, height, width, num_channels].
10 | nx : int
11 | The number of control points on x-axis
12 | ny : int
13 | The number of control points on y-axis
14 | cp : float Tensor
15 | control points. shape [num_batch, nx*ny, 2].
16 | out_size: tuple of two ints
17 | The size of the output of the network (height, width)
18 | ----------
19 | Reference :
20 | https://github.com/daviddao/spatial-transformer-tensorflow/blob/master/spatial_transformer.py
21 | """
22 |
23 | def _repeat(x, n_repeats):
24 | rep = tf.transpose(
25 | tf.expand_dims(tf.ones(shape=tf.stack([n_repeats, ])), 1), [1, 0])
26 | rep = tf.cast(rep, 'int32')
27 | x = tf.matmul(tf.reshape(x, (-1, 1)), rep)
28 | return tf.reshape(x, [-1])
29 |
30 | def _interpolate(im, x, y, out_size):
31 | # constants
32 | num_batch = tf.shape(im)[0]
33 | height = tf.shape(im)[1]
34 | width = tf.shape(im)[2]
35 | channels = tf.shape(im)[3]
36 |
37 | x = tf.cast(x, 'float32')
38 | y = tf.cast(y, 'float32')
39 | height_f = tf.cast(height, 'float32')
40 | width_f = tf.cast(width, 'float32')
41 | out_height = out_size[0]
42 | out_width = out_size[1]
43 | zero = tf.zeros([], dtype='int32')
44 | max_y = tf.cast(tf.shape(im)[1] - 1, 'int32')
45 | max_x = tf.cast(tf.shape(im)[2] - 1, 'int32')
46 |
47 | # scale indices from [-1, 1] to [0, width/height]
48 | x = (x + 1.0) * (width_f) / 2.0
49 | y = (y + 1.0) * (height_f) / 2.0
50 |
51 | # do sampling
52 | x0 = tf.cast(tf.floor(x), 'int32')
53 | x1 = x0 + 1
54 | y0 = tf.cast(tf.floor(y), 'int32')
55 | y1 = y0 + 1
56 |
57 | x0 = tf.clip_by_value(x0, zero, max_x)
58 | x1 = tf.clip_by_value(x1, zero, max_x)
59 | y0 = tf.clip_by_value(y0, zero, max_y)
60 | y1 = tf.clip_by_value(y1, zero, max_y)
61 | dim2 = width
62 | dim1 = width * height
63 | base = _repeat(tf.range(num_batch) * dim1, out_height * out_width)
64 | base_y0 = base + y0 * dim2
65 | base_y1 = base + y1 * dim2
66 | idx_a = base_y0 + x0
67 | idx_b = base_y1 + x0
68 | idx_c = base_y0 + x1
69 | idx_d = base_y1 + x1
70 |
71 | # use indices to lookup pixels in the flat image and restore
72 | # channels dim
73 | im_flat = tf.reshape(im, tf.stack([-1, channels]))
74 | im_flat = tf.cast(im_flat, 'float32')
75 | Ia = tf.gather(im_flat, idx_a)
76 | Ib = tf.gather(im_flat, idx_b)
77 | Ic = tf.gather(im_flat, idx_c)
78 | Id = tf.gather(im_flat, idx_d)
79 |
80 | # and finally calculate interpolated values
81 | x0_f = tf.cast(x0, 'float32')
82 | x1_f = tf.cast(x1, 'float32')
83 | y0_f = tf.cast(y0, 'float32')
84 | y1_f = tf.cast(y1, 'float32')
85 | wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
86 | wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
87 | wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
88 | wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
89 | output = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
90 | return output
91 |
92 | def _meshgrid(height, width, fp):
93 | x_t = tf.matmul(
94 | tf.ones(shape=tf.stack([height, 1])),
95 | tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, width), 1), [1, 0]))
96 | y_t = tf.matmul(
97 | tf.expand_dims(tf.linspace(-1.0, 1.0, height), 1),
98 | tf.ones(shape=tf.stack([1, width])))
99 |
100 | x_t_flat = tf.reshape(x_t, (1, -1))
101 | y_t_flat = tf.reshape(y_t, (1, -1))
102 |
103 | x_t_flat_b = tf.expand_dims(x_t_flat, 0) # [1, 1, h*w]
104 | y_t_flat_b = tf.expand_dims(y_t_flat, 0) # [1, 1, h*w]
105 |
106 | num_batch = tf.shape(fp)[0]
107 | px = tf.expand_dims(fp[:, :, 0], 2) # [n, nx*ny, 1]
108 | py = tf.expand_dims(fp[:, :, 1], 2) # [n, nx*ny, 1]
109 | d = tf.sqrt(tf.pow(x_t_flat_b - px, 2.) + tf.pow(y_t_flat_b - py, 2.))
110 | r = tf.pow(d, 2) * tf.math.log(d + 1e-6) # [n, nx*ny, h*w]
111 | x_t_flat_g = tf.tile(x_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
112 | y_t_flat_g = tf.tile(y_t_flat_b, tf.stack([num_batch, 1, 1])) # [n, 1, h*w]
113 | ones = tf.ones_like(x_t_flat_g) # [n, 1, h*w]
114 |
115 | grid = tf.concat([ones, x_t_flat_g, y_t_flat_g, r], 1) # [n, nx*ny+3, h*w]
116 | return grid
117 |
118 | def _transform(T, fp, input_dim, out_size):
119 | num_batch = tf.shape(input_dim)[0]
120 | height = tf.shape(input_dim)[1]
121 | width = tf.shape(input_dim)[2]
122 | num_channels = tf.shape(input_dim)[3]
123 |
124 | # grid of (x_t, y_t, 1), eq (1) in ref [1]
125 | height_f = tf.cast(height, 'float32')
126 | width_f = tf.cast(width, 'float32')
127 | out_height = out_size[0]
128 | out_width = out_size[1]
129 | grid = _meshgrid(out_height, out_width, fp) # [2, h*w]
130 |
131 | # transform A x (1, x_t, y_t, r1, r2, ..., rn) -> (x_s, y_s)
132 | T_g = tf.matmul(T, grid) # MARK
133 | x_s = tf.slice(T_g, [0, 0, 0], [-1, 1, -1])
134 | y_s = tf.slice(T_g, [0, 1, 0], [-1, 1, -1])
135 | x_s_flat = tf.reshape(x_s, [-1])
136 | y_s_flat = tf.reshape(y_s, [-1])
137 |
138 | input_transformed = _interpolate(
139 | input_dim, x_s_flat, y_s_flat, out_size)
140 |
141 | output = tf.reshape(
142 | input_transformed,
143 | tf.stack([num_batch, out_height, out_width, num_channels]))
144 | return output
145 |
146 | def _solve_system(cp, nx, ny):
147 | gx = 2. / nx # grid x size
148 | gy = 2. / ny # grid y size
149 | cx = -1. + gx / 2. # x coordinate
150 | cy = -1. + gy / 2. # y coordinate
151 |
152 | p_ = np.empty([nx * ny, 3], dtype='float32')
153 | i = 0
154 | for _ in range(ny):
155 | for _ in range(nx):
156 | p_[i, :] = 1, cx, cy
157 | i += 1
158 | cx += gx
159 | cx = -1. + gx / 2
160 | cy += gy
161 |
162 | p_1 = p_.reshape([nx * ny, 1, 3])
163 | p_2 = p_.reshape([1, nx * ny, 3])
164 | d = np.sqrt(np.sum((p_1 - p_2) ** 2, 2)) # [nx*ny, nx*ny]
165 | r = d * d * np.log(d * d + 1e-5)
166 | W = np.zeros([nx * ny + 3, nx * ny + 3], dtype='float32')
167 | W[:nx * ny, 3:] = r
168 | W[:nx * ny, :3] = p_
169 | W[nx * ny:, 3:] = p_.T
170 |
171 | num_batch = tf.shape(cp)[0]
172 | fp = tf.constant(p_[:, 1:], dtype='float32') # [nx*ny, 2]
173 | fp = tf.expand_dims(fp, 0) # [1, nx*ny, 2]
174 | fp = tf.tile(fp, tf.stack([num_batch, 1, 1])) # [n, nx*ny, 2]
175 |
176 | W_inv = np.linalg.inv(W)
177 | W_inv_t = tf.constant(W_inv, dtype='float32') # [nx*ny+3, nx*ny+3]
178 | W_inv_t = tf.expand_dims(W_inv_t, 0) # [1, nx*ny+3, nx*ny+3]
179 | W_inv_t = tf.tile(W_inv_t, tf.stack([num_batch, 1, 1]))
180 |
181 | cp_pad = tf.pad(cp + fp, [[0, 0], [0, 3], [0, 0]], "CONSTANT")
182 | T = tf.matmul(W_inv_t, cp_pad)
183 | T = tf.transpose(T, [0, 2, 1])
184 |
185 | return T, fp
186 |
187 | T, fp = _solve_system(cp, nx, ny)
188 | output = _transform(T, fp, U, out_size)
189 | return output
--------------------------------------------------------------------------------
/DenseNet_registration/STN/STN_aff.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 | import numpy as np
3 | import cv2
4 | import matplotlib.pyplot as plt
5 |
6 | def spatial_transformer_network(input_fmap, theta, out_dims=None, **kwargs):
7 | """
8 | Spatial Transformer Network layer implementation as described in [1].
9 | The layer is composed of 3 elements:
10 | - localization_net: takes the original image as input and outputs
11 | the parameters of the affine transformation that should be applied
12 | to the input image.
13 | - affine_grid_generator: generates a grid of (x,y) coordinates that
14 | correspond to a set of points where the input should be sampled
15 | to produce the transformed output.
16 | - bilinear_sampler: takes as input the original image and the grid
17 | and produces the output image using bilinear interpolation.
18 | Input
19 | -----
20 | - input_fmap: output of the previous layer. Can be input if spatial
21 | transformer layer is at the beginning of architecture. Should be
22 | a tensor of shape (B, H, W, C).
23 | - theta: affine transform tensor of shape (B, 6). Permits cropping,
24 | translation and isotropic scaling. Initialize to identity matrix.
25 | It is the output of the localization network.
26 | Returns
27 | -------
28 | - out_fmap: transformed input feature map. Tensor of size (B, H, W, C).
29 | Notes
30 | -----
31 | [1]: 'Spatial Transformer Networks', Jaderberg et. al,
32 | (https://arxiv.org/abs/1506.02025)
33 | """
34 | # grab input dimensions
35 | B = tf.shape(input_fmap)[0]
36 | H = tf.shape(input_fmap)[1]
37 | W = tf.shape(input_fmap)[2]
38 |
39 |
40 |
41 | # reshape theta to (B, 2, 3)
42 | theta = tf.reshape(theta, [B, 2, 3])
43 |
44 | # generate grids of same size or upsample/downsample if specified
45 | if out_dims:
46 | out_H = out_dims[0]
47 | out_W = out_dims[1]
48 | batch_grids = affine_grid_generator(out_H, out_W, theta)
49 | else:
50 | batch_grids = affine_grid_generator(H, W, theta)
51 |
52 | x_s = batch_grids[:, 0, :, :]
53 | y_s = batch_grids[:, 1, :, :]
54 |
55 | # sample input with grid to get output
56 | out_fmap = bilinear_sampler(input_fmap, x_s, y_s)
57 |
58 | return out_fmap
59 |
60 |
61 | def get_pixel_value(img, x, y):
62 | """
63 | Utility function to get pixel value for coordinate
64 | vectors x and y from a 4D tensor image.
65 | Input
66 | -----
67 | - img: tensor of shape (B, H, W, C)
68 | - x: flattened tensor of shape (B*H*W,)
69 | - y: flattened tensor of shape (B*H*W,)
70 | Returns
71 | -------
72 | - output: tensor of shape (B, H, W, C)
73 | """
74 | shape = tf.shape(x)
75 | batch_size = shape[0]
76 | height = shape[1]
77 | width = shape[2]
78 |
79 | batch_idx = tf.range(0, batch_size)
80 | batch_idx = tf.reshape(batch_idx, (batch_size, 1, 1))
81 | b = tf.tile(batch_idx, (1, height, width))
82 |
83 | indices = tf.stack([b, y, x], 3)
84 |
85 | return tf.gather_nd(img, indices)
86 |
87 |
88 | def affine_grid_generator(height, width, theta):
89 | """
90 | This function returns a sampling grid, which when
91 | used with the bilinear sampler on the input feature
92 | map, will create an output feature map that is an
93 | affine transformation [1] of the input feature map.
94 | Input
95 | -----
96 | - height: desired height of grid/output. Used
97 | to downsample or upsample.
98 | - width: desired width of grid/output. Used
99 | to downsample or upsample.
100 | - theta: affine transform matrices of shape (num_batch, 2, 3).
101 | For each image in the batch, we have 6 theta parameters of
102 | the form (2x3) that define the affine transformation T.
103 | Returns
104 | -------
105 | - normalized grid (-1, 1) of shape (num_batch, 2, H, W).
106 | The 2nd dimension has 2 components: (x, y) which are the
107 | sampling points of the original image for each point in the
108 | target image.
109 | Note
110 | ----
111 | [1]: the affine transformation allows cropping, translation,
112 | and isotropic scaling.
113 | """
114 | num_batch = tf.shape(theta)[0]
115 |
116 | # create normalized 2D grid
117 | x = tf.linspace(-1.0, 1.0, width)
118 | y = tf.linspace(-1.0, 1.0, height)
119 | x_t, y_t = tf.meshgrid(x, y)
120 |
121 |
122 |
123 | # flatten
124 | x_t_flat = tf.reshape(x_t, [-1])
125 | y_t_flat = tf.reshape(y_t, [-1])
126 |
127 |
128 | # reshape to [x_t, y_t , 1] - (homogeneous form)
129 | ones = tf.ones_like(x_t_flat)
130 | sampling_grid = tf.stack([x_t_flat, y_t_flat, ones])
131 |
132 | # repeat grid num_batch times
133 | sampling_grid = tf.expand_dims(sampling_grid, axis=0)#(1, 3, 250000)
134 | sampling_grid = tf.tile(sampling_grid, tf.stack([num_batch, 1, 1]))#(2, 3, 250000)
135 |
136 |
137 |
138 | # cast to float32 (required for matmul)
139 | theta = tf.cast(theta, 'float32')#(2,6)
140 |
141 | sampling_grid = tf.cast(sampling_grid, 'float32')#(2, 3, 250000)
142 |
143 |
144 | # transform the sampling grid - batch multiply
145 | batch_grids = tf.matmul(theta, sampling_grid)#(2, 2, 250000)
146 |
147 | # batch grid has shape (num_batch, 2, H*W)
148 |
149 | # reshape to (num_batch, H, W, 2)
150 | batch_grids = tf.reshape(batch_grids, [num_batch, 2, height, width])
151 |
152 |
153 | return batch_grids
154 |
155 |
156 | def bilinear_sampler(img, x, y):
157 | """
158 | Performs bilinear sampling of the input images according to the
159 | normalized coordinates provided by the sampling grid. Note that
160 | the sampling is done identically for each channel of the input.
161 | To test if the function works properly, output image should be
162 | identical to input image when theta is initialized to identity
163 | transform.
164 | Input
165 | -----
166 | - img: batch of images in (B, H, W, C) layout.
167 | - grid: x, y which is the output of affine_grid_generator.
168 | Returns
169 | -------
170 | - out: interpolated images according to grids. Same size as grid.
171 | """
172 | H = tf.shape(img)[1]
173 | W = tf.shape(img)[2]
174 | max_y = tf.cast(H - 1, 'int32')
175 | max_x = tf.cast(W - 1, 'int32')
176 | zero = tf.zeros([], dtype='int32')
177 |
178 | # rescale x and y to [0, W-1/H-1]
179 | x = tf.cast(x, 'float32')
180 | y = tf.cast(y, 'float32')
181 |
182 | x = 0.5 * ((x + 1.0) * tf.cast(max_x-1, 'float32'))
183 | y = 0.5 * ((y + 1.0) * tf.cast(max_y-1, 'float32'))
184 |
185 |
186 |
187 | # grab 4 nearest corner points for each (x_i, y_i)
188 | x0 = tf.cast(tf.floor(x), 'int32')
189 | x1 = x0 + 1
190 | y0 = tf.cast(tf.floor(y), 'int32')
191 | y1 = y0 + 1
192 |
193 | # clip to range [0, H-1/W-1] to not violate img boundaries
194 | x0 = tf.clip_by_value(x0, zero, max_x)
195 | x1 = tf.clip_by_value(x1, zero, max_x)
196 | y0 = tf.clip_by_value(y0, zero, max_y)
197 | y1 = tf.clip_by_value(y1, zero, max_y)
198 |
199 | # get pixel value at corner coords
200 | Ia = get_pixel_value(img, x0, y0)
201 | Ib = get_pixel_value(img, x0, y1)
202 | Ic = get_pixel_value(img, x1, y0)
203 | Id = get_pixel_value(img, x1, y1)
204 |
205 | # recast as float for delta calculation
206 | x0 = tf.cast(x0, 'float32')
207 | x1 = tf.cast(x1, 'float32')
208 | y0 = tf.cast(y0, 'float32')
209 | y1 = tf.cast(y1, 'float32')
210 |
211 | # calculate deltas
212 | wa = (x1-x) * (y1-y)
213 | wb = (x1-x) * (y-y0)
214 | wc = (x-x0) * (y1-y)
215 | wd = (x-x0) * (y-y0)
216 |
217 | # add dimension for addition
218 | wa = tf.expand_dims(wa, axis=3)
219 | wb = tf.expand_dims(wb, axis=3)
220 | wc = tf.expand_dims(wc, axis=3)
221 | wd = tf.expand_dims(wd, axis=3)
222 |
223 | # compute output
224 | out = tf.add_n([wa*Ia, wb*Ib, wc*Ic, wd*Id])
225 |
226 | return out
227 |
228 |
229 | if __name__ == '__main__':
230 | img = cv2.imread('C:\\Users\\kylenate\\Desktop\\panda.jpg')
231 | img = cv2.cvtColor(img,cv2.IMREAD_COLOR)/255
232 |
233 |
234 | height = img.shape[0]
235 | width = img.shape[1]
236 |
237 | imgs = np.concatenate((np.expand_dims(img,0), np.expand_dims(img,0)))
238 | imgs = tf.convert_to_tensor(imgs, dtype='float32')
239 |
240 |
241 | thetas = [
242 | [[1, 0., -0.2],
243 | [0, 1., -0.4]],
244 | [[1., 0.2, 0],
245 | [0.1, 1., 0]],
246 | ]
247 | thetas = np.reshape(thetas, (2, 6))
248 | thetas = tf.convert_to_tensor(thetas, dtype='float32')
249 |
250 |
251 | output = spatial_transformer_network(imgs, thetas, (500, 500))
252 |
253 |
254 | plt.figure()
255 | plt.subplot(221)
256 | plt.imshow(img)
257 | plt.subplot(222)
258 | plt.imshow(output[0])
259 | plt.subplot(223)
260 | plt.imshow(output[1])
261 |
262 |
263 | plt.show()
264 |
265 | # test_get_pixel_value()
266 |
267 |
--------------------------------------------------------------------------------
/Similarityfunction/similar_function/self_brief描述子.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import cv2
3 | import os
4 | from scipy.spatial.distance import cdist
5 | from keypointDetect import DoGdetector
6 | import random
7 |
8 | import matplotlib.pyplot as plt
9 |
10 |
11 | def makeTestPattern(patch_width=9, nbits=256):
12 | '''
13 | Creates Test Pattern for BRIEF
14 | Run this routine for the given parameters patch_width = 9 and n = 256
15 | INPUTS
16 | patch_width - the width of the image patch (usually 9)
17 | nbits - the number of tests n in the BRIEF descriptor
18 | OUTPUTS
19 | compareX and compareY - LINEAR indices into the patch_width x patch_width image
20 | patch and are each (nbits,) vectors.
21 | '''
22 | #############################
23 | # TO DO ...
24 | # Generate testpattern here
25 |
26 | # RANGE OF COMPARE X AND Y SHOULD BE BETWEEN 0 AND 81
27 |
28 | # IT IS GIVEN IN THE PAPER THAT sigma*sigma = 1/25 (S*S), WHERE S IS 9. S*S IS SAMPLE SPACE
29 | # SO, SIGMA EQUALS 9/5
30 | sigma, k = 1.8, 9 # generate a (2k+1)x(2k+1) gaussian kernel with mean=0 and sigma = sigma
31 | probs = [np.exp(-z * z / (2 * sigma * sigma)) / np.sqrt(2 * np.pi * sigma * sigma) for z in range(-4, 4 + 1)]
32 | kernel = np.outer(probs, probs)
33 | kernel = np.multiply(kernel, 256)
34 | kernel = kernel.round()
35 | # print(kernel)
36 | # print(kernel.sum())
37 |
38 | half = (int)(patch_width / 2)
39 |
40 | kernel = kernel.astype(int)
41 |
42 | kernel[half, half] -= 1
43 | kernel[half, half - 1] -= 1
44 | kernel[half, half + 1] -= 1
45 | kernel[half - 1, half] -= 1
46 | kernel[half + 1, half] -= 1
47 |
48 | # print(kernel)
49 | # print(kernel.sum())
50 |
51 | # plt.imshow(kernel)
52 | # plt.colorbar()
53 | # plt.show()
54 |
55 | compareX, compareY = [], []
56 |
57 | counter = 0
58 | for i in range(0, patch_width):
59 | for j in range(0, patch_width):
60 | for k in range(0, kernel[i, j]):
61 | compareX.append(counter)
62 | compareY.append(counter)
63 | counter += 1
64 |
65 | # print(compareX)
66 | # print(compareY)
67 |
68 | random.shuffle(compareX)
69 | random.shuffle(compareY)
70 |
71 | print('compareX: ', compareX)
72 | print('compareY: ', compareY)
73 |
74 | test_pattern_file = '../results/testPattern.npy'
75 | if not os.path.isdir('../results'):
76 | os.mkdir('../results')
77 | np.save(test_pattern_file, [compareX, compareY])
78 | print('test_pattern_file saved')
79 |
80 | return compareX, compareY
81 |
82 |
83 | # load test pattern for Brief
84 | test_pattern_file = '../results/testPattern.npy'
85 | if os.path.isfile(test_pattern_file):
86 | # load from file if exists
87 | compareX, compareY = np.load(test_pattern_file)
88 | print('loaded compare X and Y from results ')
89 | # print('loaded compare X: ', compareX)
90 | # print('loaded compare Y: ', compareY)
91 | else:
92 | # produce and save patterns if not exist
93 | compareX, compareY = makeTestPattern()
94 | if not os.path.isdir('../results'):
95 | os.mkdir('../results')
96 | np.save(test_pattern_file, [compareX, compareY])
97 | print('test_pattern_file saved2')
98 |
99 | compareX, compareY = np.load(test_pattern_file)
100 |
101 |
102 | def computeBrief(im, gaussian_pyramid, locsDoG, k, levels,
103 | compareX, compareY):
104 | '''
105 | Compute Brief feature
106 | INPUT
107 | locsDoG - locsDoG are the keypoint locations returned by the DoG
108 | detector.
109 | levels - Gaussian scale levels that were given in Section1.
110 | compareX and compareY - linear indices into the
111 | (patch_width x patch_width) image patch and are
112 | each (nbits,) vectors.
113 |
114 |
115 | OUTPUT
116 | locs - an m x 3 vector, where the first two columns are the image
117 | coordinates of keypoints and the third column is the pyramid
118 | level of the keypoints.
119 | desc - an m x n bits matrix of stacked BRIEF descriptors. m is the number
120 | of valid descriptors in the image and will vary.
121 | '''
122 | ##############################
123 | # TO DO ...
124 | # compute locs, desc here
125 | locs = []
126 | desc = []
127 |
128 | # print('locsDoG shape: ', locsDoG.shape)
129 | H = im.shape[0]
130 | W = im.shape[1]
131 |
132 | for key_point in locsDoG:
133 | column = key_point[0]
134 | row = key_point[1]
135 | depth = key_point[2]
136 | k_half = int(k / 2)
137 | if row >= k_half and column >= k_half and row <= (H - 1) - k_half and column <= (W - 1) - k_half:
138 | # WE TAKE DEPTH +1 BECAUSE WE HAVE COMPARE INTENSITY VALUES IN A SMOOTHED IMAGE (6 LEVELS , SMOOTHED LEVELS)
139 | patch = gaussian_pyramid[row - k_half:row + k_half + 1, column - k_half:column + k_half + 1, depth + 1]
140 | patch = np.asarray(patch)
141 | compare_vector = []
142 | patch_flat = patch.flatten()
143 | # print(patch_flat.shape)
144 | for i in range(0, len(compareX)):
145 | compare_vector.append(int(patch_flat[compareX[i]] < patch_flat[compareY[i]]))
146 | locs.append(key_point)
147 | desc.append(compare_vector)
148 |
149 | locs = np.asarray(locs)
150 | desc = np.asarray(desc)
151 | print('locs shape: ', locs.shape)
152 | print('desc shape: ', desc.shape)
153 |
154 | return locs, desc
155 |
156 |
157 | def briefLite(im):
158 | '''
159 | INPUTS
160 | im - gray image with values between 0 and 1
161 | OUTPUTS
162 | locs - an m x 3 vector, where the first two columns are the image coordinates
163 | of keypoints and the third column is the pyramid level of the keypoints
164 | desc - an m x n bits matrix of stacked BRIEF descriptors.
165 | m is the number of valid descriptors in the image and will vary
166 | n is the number of bits for the BRIEF descriptor
167 | '''
168 | ###################
169 | # TO DO ...
170 |
171 | # GRAYSCALE AND NORMALIZATION HANDELED IN CODE OF GAUSSIAN PYRAMID
172 | locsDoG, gaussian_pyramid = DoGdetector(im)
173 | print('locsDoG shape: ', locsDoG.shape)
174 |
175 | compareX, compareY = np.load(test_pattern_file)
176 |
177 | locs, desc = computeBrief(im, gaussian_pyramid, locsDoG, k=9, levels=gaussian_pyramid.shape[2],
178 | compareX=compareX, compareY=compareY)
179 | return locs, desc
180 |
181 |
182 | def briefMatch(desc1, desc2, ratio=0.8):
183 | '''
184 | performs the descriptor matching
185 | inputs : desc1 , desc2 - m1 x n and m2 x n matrix. m1 and m2 are the number of keypoints in image 1 and 2.
186 | n is the number of bits in the brief
187 | outputs : matches - p x 2 matrix. where the first column are indices
188 | into desc1 and the second column are indices into desc2
189 | '''
190 | D = cdist(np.float32(desc1), np.float32(desc2), metric='hamming')
191 | # find smallest distance
192 | ix2 = np.argmin(D, axis=1)
193 | d1 = D.min(1)
194 | # find second smallest distance
195 | d12 = np.partition(D, 2, axis=1)[:, 0:2]
196 | d2 = d12.max(1)
197 | r = d1 / (d2 + 1e-10)
198 | is_discr = r < ratio
199 | ix2 = ix2[is_discr]
200 | ix1 = np.arange(D.shape[0])[is_discr]
201 |
202 | matches = np.stack((ix1, ix2), axis=-1)
203 | return matches
204 |
205 |
206 | def plotMatches(im1, im2, matches, locs1, locs2):
207 | fig = plt.figure()
208 | # draw two images side by side
209 | imH = max(im1.shape[0], im2.shape[0])
210 | im = np.zeros((imH, im1.shape[1] + im2.shape[1]), dtype='uint8')
211 | im[0:im1.shape[0], 0:im1.shape[1]] = cv2.cvtColor(im1, cv2.COLOR_BGR2GRAY)
212 | im[0:im2.shape[0], im1.shape[1]:] = cv2.cvtColor(im2, cv2.COLOR_BGR2GRAY)
213 | plt.imshow(im, cmap='gray')
214 | for i in range(matches.shape[0]):
215 | pt1 = locs1[matches[i, 0], 0:2]
216 | pt2 = locs2[matches[i, 1], 0:2].copy()
217 | pt2[0] += im1.shape[1]
218 | x = np.asarray([pt1[0], pt2[0]])
219 | y = np.asarray([pt1[1], pt2[1]])
220 | plt.plot(x, y, 'r', linewidth=0.8)
221 | plt.plot(x, y, 'g.')
222 | plt.show()
223 |
224 |
225 | if __name__ == '__main__':
226 | # test makeTestPattern
227 | compareX, compareY = makeTestPattern()
228 |
229 | # # ------------------------- TEST BRIEF_LIGHT ---------------------------------
230 | # im = cv2.imread('../data/model_chickenbroth.jpg')
231 | # locs, desc = briefLite(im)
232 | # fig = plt.figure()
233 | # plt.imshow(cv2.cvtColor(im, cv2.COLOR_BGR2GRAY), cmap='gray')
234 | # plt.plot(locs[:,0], locs[:,1], 'r.')
235 | # plt.draw()
236 | # plt.waitforbuttonpress(0)
237 | # plt.close(fig)
238 |
239 | # # ------------------------- TEST MATCHING OF SAME IMAGES -----------------------
240 | # im = cv2.imread('../data/model_chickenbroth.jpg')
241 | # locs, desc = briefLite(im)
242 | # matches = briefMatch(desc, desc)
243 | # plotMatches(im,im,matches,locs,locs)
244 |
245 | # ---------------------- TEST MATCHING OF DIFFERENT IMAGES ---------------------------------------------
246 | print('------------------------- IMAGE 1 -----------------------------')
247 | im1 = cv2.imread('../data/model_chickenbroth.jpg')
248 | # im1 = cv2.imread('../data/incline_L.png')
249 | # im1 = cv2.imread('../data/pf_scan_scaled.jpg')
250 | locs1, desc1 = briefLite(im1)
251 |
252 | print('------------------------- IMAGE 2 -----------------------------')
253 | im2 = cv2.imread('../data/chickenbroth_01.jpg')
254 | # im2 = cv2.imread('../data/incline_R.png')
255 | # im2 = cv2.imread('../data/pf_desk.jpg')
256 | # im2 = cv2.imread('../data/pf_floor.jpg')
257 | # im2 = cv2.imread('../data/pf_pile.jpg')
258 | # im2 = cv2.imread('../data/pf_stand.jpg')
259 | # im2 = cv2.imread('../data/pf_floor_rot.jpg')
260 | locs2, desc2 = briefLite(im2)
261 |
262 | matches = briefMatch(desc1, desc2)
263 | print('matches shape: ', matches.shape)
264 | plotMatches(im1, im2, matches, locs1, locs2)
--------------------------------------------------------------------------------
/loss.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | class image_loss_mes(tf.keras.losses.Loss):
5 |
6 | def call(self, y_true, y_pred):
7 |
8 | index=tf.where(y_pred>0.001)
9 | data_true_image=tf.gather_nd(y_true,index)
10 | data_pred_image=tf.gather_nd(y_pred,index)
11 |
12 | return NMInformation(data_pred_image-data_true_image)
13 |
14 |
15 | class matix_loss_mes(tf.keras.losses.Loss):
16 |
17 | def call(self,y_true,y_predict):
18 |
19 | loss = tf.reduce_mean(tf.square(y_predict - y_true))
20 |
21 | return loss
22 |
23 | class NMInformation(NMI):
24 |
25 | def loss(self, y_true, y_pred):
26 | return -self.volumes(y_true, y_pred)
27 |
28 | class NMI:
29 | """
30 | Soft Mutual Information approximation for intensity volumes and probabilistic volumes
31 | (e.g. probabilistic segmentaitons)
32 | More information/citation:
33 | - Courtney K Guo.
34 | Multi-modal image registration with unsupervised deep learning.
35 | PhD thesis, Massachusetts Institute of Technology, 2019.
36 | - M Hoffmann, B Billot, DN Greve, JE Iglesias, B Fischl, AV Dalca
37 | SynthMorph: learning contrast-invariant registration without acquired images
38 | IEEE Transactions on Medical Imaging (TMI), 41 (3), 543-558, 2022
39 | https://doi.org/10.1109/TMI.2021.3116879
40 | # TODO: add local MI by using patches. This is quite memory consuming, though.
41 | Includes functions that can compute mutual information between volumes,
42 | between segmentations, or between a volume and a segmentation map
43 | mi = MutualInformation()
44 | mi.volumes
45 | mi.segs
46 | mi.volume_seg
47 | mi.channelwise
48 | mi.maps
49 | """
50 |
51 | def __init__(self,
52 | bin_centers=None,
53 | nb_bins=None,
54 | soft_bin_alpha=None,
55 | min_clip=None,
56 | max_clip=None):
57 | """
58 | Initialize the mutual information class
59 | Arguments below are related to soft quantizing of volumes, which is done automatically
60 | in functions that comptue MI over volumes (e.g. volumes(), volume_seg(), channelwise())
61 | using these parameters
62 | Args:
63 | bin_centers (np.float32, optional): Array or list of bin centers. Defaults to None.
64 | nb_bins (int, optional): Number of bins. Defaults to 16 if bin_centers
65 | is not specified.
66 | soft_bin_alpha (int, optional): Alpha in RBF of soft quantization. Defaults
67 | to `1 / 2 * square(sigma)`.
68 | min_clip (float, optional): Lower value to clip data. Defaults to -np.inf.
69 | max_clip (float, optional): Upper value to clip data. Defaults to np.inf.
70 | """
71 |
72 | self.bin_centers = None
73 | if bin_centers is not None:
74 | self.bin_centers = tf.convert_to_tensor(bin_centers, dtype=tf.float32)
75 | assert nb_bins is None, 'cannot provide both bin_centers and nb_bins'
76 | nb_bins = bin_centers.shape[0]
77 |
78 | self.nb_bins = nb_bins
79 | if bin_centers is None and nb_bins is None:
80 | self.nb_bins = 16
81 |
82 | self.min_clip = min_clip
83 | if self.min_clip is None:
84 | self.min_clip = -np.inf
85 |
86 | self.max_clip = max_clip
87 | if self.max_clip is None:
88 | self.max_clip = np.inf
89 |
90 | self.soft_bin_alpha = soft_bin_alpha
91 | if self.soft_bin_alpha is None:
92 | sigma_ratio = 0.5
93 | if self.bin_centers is None:
94 | sigma = sigma_ratio / (self.nb_bins - 1)
95 | else:
96 | sigma = sigma_ratio * tf.reduce_mean(tf.experimental.numpy.diff(bin_centers))
97 | self.soft_bin_alpha = 1 / (2 * tf.square(sigma))
98 | print(self.soft_bin_alpha)
99 |
100 | def volumes(self, x, y):
101 | """
102 | Mutual information for each item in a batch of volumes.
103 | Algorithm:
104 | - use neurite.utils.soft_quantize() to create a soft quantization (binning) of
105 | intensities in each channel
106 | - channelwise()
107 | Parameters:
108 | x and y: [bs, ..., 1]
109 | Returns:
110 | Tensor of size [bs]
111 | """
112 | # check shapes
113 | tensor_channels_x = K.shape(x)[-1]
114 | tensor_channels_y = K.shape(y)[-1]
115 | msg = 'volume_mi requires two single-channel volumes. See channelwise().'
116 | tf.debugging.assert_equal(tensor_channels_x, 1, msg)
117 | tf.debugging.assert_equal(tensor_channels_y, 1, msg)
118 |
119 | # volume mi
120 | return K.flatten(self.channelwise(x, y))
121 |
122 | def segs(self, x, y):
123 | """
124 | Mutual information between two probabilistic segmentation maps.
125 | Wraps maps()
126 | Parameters:
127 | x and y: [bs, ..., nb_labels]
128 | Returns:
129 | Tensor of size [bs]
130 | """
131 | # volume mi
132 | return self.maps(x, y)
133 |
134 | def volume_seg(self, x, y):
135 | """
136 | Mutual information between a volume and a probabilistic segmentation maps.
137 | Wraps maps()
138 | Parameters:
139 | x and y: a volume and a probabilistic (soft) segmentation. Either:
140 | - x: [bs, ..., 1] and y: [bs, ..., nb_labels], Or:
141 | - x: [bs, ..., nb_labels] and y: [bs, ..., 1]
142 | Returns:
143 | Tensor of size [bs]
144 | """
145 | # check shapes
146 | tensor_channels_x = K.shape(x)[-1]
147 | tensor_channels_y = K.shape(y)[-1]
148 | msg = 'volume_seg_mi requires one single-channel volume.'
149 | tf.debugging.assert_equal(tf.minimum(tensor_channels_x, tensor_channels_y), 1, msg)
150 | # otherwise we don't know which one is which
151 | msg = 'volume_seg_mi requires one multi-channel segmentation.'
152 | tf.debugging.assert_greater(tf.maximum(tensor_channels_x, tensor_channels_y), 1, msg)
153 |
154 | # transform volume to soft-quantized volume
155 | if tensor_channels_x == 1:
156 | x = self._soft_sim_map(x[..., 0]) # [bs, ..., B]
157 | else:
158 | y = self._soft_sim_map(y[..., 0]) # [bs, ..., B]
159 |
160 | return self.maps(x, y) # [bs]
161 |
162 | def channelwise(self, x, y):
163 | """
164 | Mutual information for each channel in x and y. Thus for each item and channel this
165 | returns retuns MI(x[...,i], x[...,i]). To do this, we use neurite.utils.soft_quantize() to
166 | create a soft quantization (binning) of the intensities in each channel
167 | Parameters:
168 | x and y: [bs, ..., C]
169 | Returns:
170 | Tensor of size [bs, C]
171 | """
172 | # check shapes
173 | tensor_shape_x = K.shape(x)
174 | tensor_shape_y = K.shape(y)
175 | tf.debugging.assert_equal(tensor_shape_x, tensor_shape_y, 'volume shapes do not match')
176 |
177 | # reshape to [bs, V, C]
178 | if tensor_shape_x.shape[0] != 3:
179 | new_shape = K.stack([tensor_shape_x[0], -1, tensor_shape_x[-1]])
180 | x = tf.reshape(x, new_shape) # [bs, V, C]
181 | y = tf.reshape(y, new_shape) # [bs, V, C]
182 |
183 | # move channels to first dimension
184 | ndims_k = len(x.shape)
185 | permute = [ndims_k - 1] + list(range(ndims_k - 1))
186 | cx = tf.transpose(x, permute) # [C, bs, V]
187 | cy = tf.transpose(y, permute) # [C, bs, V]
188 |
189 | # soft quantize
190 | cxq = self._soft_sim_map(cx) # [C, bs, V, B]
191 | cyq = self._soft_sim_map(cy) # [C, bs, V, B]
192 |
193 | # get mi
194 | map_fn = lambda x: self.maps(*x)
195 | cout = tf.map_fn(map_fn, [cxq, cyq], dtype=tf.float32) # [C, bs]
196 |
197 | # permute back
198 | return tf.transpose(cout, [1, 0]) # [bs, C]
199 |
200 | def maps(self, x, y):
201 | """
202 | Computes mutual information for each entry in batch, assuming each item contains
203 | probability or similarity maps *at each voxel*. These could be e.g. from a softmax output
204 | (e.g. when performing segmentaiton) or from soft_quantization of intensity image.
205 | Note: the MI is computed separate for each itemin the batch, so the joint probabilities
206 | might be different across inputs. In some cases, computing MI actoss the whole batch
207 | might be desireable (TODO).
208 | Parameters:
209 | x and y are probability maps of size [bs, ..., B], where B is the size of the
210 | discrete probability domain grid (e.g. bins/labels). B can be different for x and y.
211 | Returns:
212 | Tensor of size [bs]
213 | """
214 |
215 | # check shapes
216 | tensor_shape_x = K.shape(x)
217 | tensor_shape_y = K.shape(y)
218 | tf.debugging.assert_equal(tensor_shape_x, tensor_shape_y)
219 | tf.debugging.assert_non_negative(x)
220 | tf.debugging.assert_non_negative(y)
221 |
222 | eps = K.epsilon()
223 |
224 | # reshape to [bs, V, B]
225 | if tensor_shape_x.shape[0] != 3:
226 | new_shape = K.stack([tensor_shape_x[0], -1, tensor_shape_x[-1]])
227 | x = tf.reshape(x, new_shape) # [bs, V, B1]
228 | y = tf.reshape(y, new_shape) # [bs, V, B2]
229 |
230 | # joint probability for each batch entry
231 | x_trans = tf.transpose(x, (0, 2, 1)) # [bs, B1, V]
232 | pxy = K.batch_dot(x_trans, y) # [bs, B1, B2]
233 | pxy = pxy / (K.sum(pxy, axis=[1, 2], keepdims=True) + eps) # [bs, B1, B2]
234 |
235 | # x probability for each batch entry
236 | px = K.sum(x, 1, keepdims=True) # [bs, 1, B1]
237 | px = px / (K.sum(px, 2, keepdims=True) + eps) # [bs, 1, B1]
238 |
239 | # y probability for each batch entry
240 | py = K.sum(y, 1, keepdims=True) # [bs, 1, B2]
241 | py = py / (K.sum(py, 2, keepdims=True) + eps) # [bs, 1, B2]
242 |
243 | # independent xy probability
244 | px_trans = K.permute_dimensions(px, (0, 2, 1)) # [bs, B1, 1]
245 | pxpy = K.batch_dot(px_trans, py) # [bs, B1, B2]
246 | pxpy_eps = pxpy + eps
247 |
248 | # mutual information
249 | log_term = K.log(pxy / pxpy_eps + eps) # [bs, B1, B2]
250 | mi = K.sum(pxy * log_term, axis=[1, 2]) # [bs]
251 | return mi
252 |
253 | def _soft_log_sim_map(self, x):
254 | """
255 | soft quantization of intensities (values) in a given volume
256 | See neurite.utils.soft_quantize
257 | Parameters:
258 | x [bs, ...]: intensity image.
259 | Returns:
260 | volume with one more dimension [bs, ..., B]
261 | """
262 |
263 | return ne.utils.soft_quantize(x,
264 | alpha=self.soft_bin_alpha,
265 | bin_centers=self.bin_centers,
266 | nb_bins=self.nb_bins,
267 | min_clip=self.min_clip,
268 | max_clip=self.max_clip,
269 | return_log=True) # [bs, ..., B]
270 |
271 | def _soft_sim_map(self, x):
272 | """
273 | See neurite.utils.soft_quantize
274 | Parameters:
275 | x [bs, ...]: intensity image.
276 | Returns:
277 | volume with one more dimension [bs, ..., B]
278 | """
279 | return ne.utils.soft_quantize(x,
280 | alpha=self.soft_bin_alpha,
281 | bin_centers=self.bin_centers,
282 | nb_bins=self.nb_bins,
283 | min_clip=self.min_clip,
284 | max_clip=self.max_clip,
285 | return_log=False) # [bs, ..., B]
286 |
287 | def _soft_prob_map(self, x, **kwargs):
288 | """
289 | normalize a soft_quantized volume at each voxel, so that each voxel now holds a prob. map
290 | Parameters:
291 | x [bs, ..., B]: soft quantized volume
292 | Returns:
293 | x [bs, ..., B]: renormalized so that each voxel adds to 1 across last dimension
294 | """
295 | x_hist = self._soft_sim_map(x, **kwargs) # [bs, ..., B]
296 | x_hist_sum = K.sum(x_hist, -1, keepdims=True), K.epsilon() # [bs, ..., B]
297 | x_prob = x_hist / (x_hist_sum) # [bs, ..., B]
298 | return x_prob
299 |
300 |
--------------------------------------------------------------------------------
/DenseNet_registration/Registration_model/loss.py:
--------------------------------------------------------------------------------
1 | import tensorflow as tf
2 |
3 |
4 | class image_loss_mes(tf.keras.losses.Loss):
5 |
6 | def call(self, y_true, y_pred):
7 |
8 | index=tf.where(y_pred>0.001)
9 | data_true_image=tf.gather_nd(y_true,index)
10 | data_pred_image=tf.gather_nd(y_pred,index)
11 |
12 | return NMInformation(data_pred_image-data_true_image)
13 |
14 |
15 | class matix_loss_mes(tf.keras.losses.Loss):
16 |
17 | def call(self,y_true,y_predict):
18 |
19 | loss = tf.reduce_mean(tf.square(y_predict - y_true))
20 |
21 | return loss
22 |
23 | class NMInformation(NMI):
24 |
25 | def loss(self, y_true, y_pred):
26 | return -self.volumes(y_true, y_pred)
27 |
28 | class NMI:
29 | """
30 | Soft Mutual Information approximation for intensity volumes and probabilistic volumes
31 | (e.g. probabilistic segmentaitons)
32 | More information/citation:
33 | - Courtney K Guo.
34 | Multi-modal image registration with unsupervised deep learning.
35 | PhD thesis, Massachusetts Institute of Technology, 2019.
36 | - M Hoffmann, B Billot, DN Greve, JE Iglesias, B Fischl, AV Dalca
37 | SynthMorph: learning contrast-invariant registration without acquired images
38 | IEEE Transactions on Medical Imaging (TMI), 41 (3), 543-558, 2022
39 | https://doi.org/10.1109/TMI.2021.3116879
40 | # TODO: add local MI by using patches. This is quite memory consuming, though.
41 | Includes functions that can compute mutual information between volumes,
42 | between segmentations, or between a volume and a segmentation map
43 | mi = MutualInformation()
44 | mi.volumes
45 | mi.segs
46 | mi.volume_seg
47 | mi.channelwise
48 | mi.maps
49 | """
50 |
51 | def __init__(self,
52 | bin_centers=None,
53 | nb_bins=None,
54 | soft_bin_alpha=None,
55 | min_clip=None,
56 | max_clip=None):
57 | """
58 | Initialize the mutual information class
59 | Arguments below are related to soft quantizing of volumes, which is done automatically
60 | in functions that comptue MI over volumes (e.g. volumes(), volume_seg(), channelwise())
61 | using these parameters
62 | Args:
63 | bin_centers (np.float32, optional): Array or list of bin centers. Defaults to None.
64 | nb_bins (int, optional): Number of bins. Defaults to 16 if bin_centers
65 | is not specified.
66 | soft_bin_alpha (int, optional): Alpha in RBF of soft quantization. Defaults
67 | to `1 / 2 * square(sigma)`.
68 | min_clip (float, optional): Lower value to clip data. Defaults to -np.inf.
69 | max_clip (float, optional): Upper value to clip data. Defaults to np.inf.
70 | """
71 |
72 | self.bin_centers = None
73 | if bin_centers is not None:
74 | self.bin_centers = tf.convert_to_tensor(bin_centers, dtype=tf.float32)
75 | assert nb_bins is None, 'cannot provide both bin_centers and nb_bins'
76 | nb_bins = bin_centers.shape[0]
77 |
78 | self.nb_bins = nb_bins
79 | if bin_centers is None and nb_bins is None:
80 | self.nb_bins = 16
81 |
82 | self.min_clip = min_clip
83 | if self.min_clip is None:
84 | self.min_clip = -np.inf
85 |
86 | self.max_clip = max_clip
87 | if self.max_clip is None:
88 | self.max_clip = np.inf
89 |
90 | self.soft_bin_alpha = soft_bin_alpha
91 | if self.soft_bin_alpha is None:
92 | sigma_ratio = 0.5
93 | if self.bin_centers is None:
94 | sigma = sigma_ratio / (self.nb_bins - 1)
95 | else:
96 | sigma = sigma_ratio * tf.reduce_mean(tf.experimental.numpy.diff(bin_centers))
97 | self.soft_bin_alpha = 1 / (2 * tf.square(sigma))
98 | print(self.soft_bin_alpha)
99 |
100 | def volumes(self, x, y):
101 | """
102 | Mutual information for each item in a batch of volumes.
103 | Algorithm:
104 | - use neurite.utils.soft_quantize() to create a soft quantization (binning) of
105 | intensities in each channel
106 | - channelwise()
107 | Parameters:
108 | x and y: [bs, ..., 1]
109 | Returns:
110 | Tensor of size [bs]
111 | """
112 | # check shapes
113 | tensor_channels_x = K.shape(x)[-1]
114 | tensor_channels_y = K.shape(y)[-1]
115 | msg = 'volume_mi requires two single-channel volumes. See channelwise().'
116 | tf.debugging.assert_equal(tensor_channels_x, 1, msg)
117 | tf.debugging.assert_equal(tensor_channels_y, 1, msg)
118 |
119 | # volume mi
120 | return K.flatten(self.channelwise(x, y))
121 |
122 | def segs(self, x, y):
123 | """
124 | Mutual information between two probabilistic segmentation maps.
125 | Wraps maps()
126 | Parameters:
127 | x and y: [bs, ..., nb_labels]
128 | Returns:
129 | Tensor of size [bs]
130 | """
131 | # volume mi
132 | return self.maps(x, y)
133 |
134 | def volume_seg(self, x, y):
135 | """
136 | Mutual information between a volume and a probabilistic segmentation maps.
137 | Wraps maps()
138 | Parameters:
139 | x and y: a volume and a probabilistic (soft) segmentation. Either:
140 | - x: [bs, ..., 1] and y: [bs, ..., nb_labels], Or:
141 | - x: [bs, ..., nb_labels] and y: [bs, ..., 1]
142 | Returns:
143 | Tensor of size [bs]
144 | """
145 | # check shapes
146 | tensor_channels_x = K.shape(x)[-1]
147 | tensor_channels_y = K.shape(y)[-1]
148 | msg = 'volume_seg_mi requires one single-channel volume.'
149 | tf.debugging.assert_equal(tf.minimum(tensor_channels_x, tensor_channels_y), 1, msg)
150 | # otherwise we don't know which one is which
151 | msg = 'volume_seg_mi requires one multi-channel segmentation.'
152 | tf.debugging.assert_greater(tf.maximum(tensor_channels_x, tensor_channels_y), 1, msg)
153 |
154 | # transform volume to soft-quantized volume
155 | if tensor_channels_x == 1:
156 | x = self._soft_sim_map(x[..., 0]) # [bs, ..., B]
157 | else:
158 | y = self._soft_sim_map(y[..., 0]) # [bs, ..., B]
159 |
160 | return self.maps(x, y) # [bs]
161 |
162 | def channelwise(self, x, y):
163 | """
164 | Mutual information for each channel in x and y. Thus for each item and channel this
165 | returns retuns MI(x[...,i], x[...,i]). To do this, we use neurite.utils.soft_quantize() to
166 | create a soft quantization (binning) of the intensities in each channel
167 | Parameters:
168 | x and y: [bs, ..., C]
169 | Returns:
170 | Tensor of size [bs, C]
171 | """
172 | # check shapes
173 | tensor_shape_x = K.shape(x)
174 | tensor_shape_y = K.shape(y)
175 | tf.debugging.assert_equal(tensor_shape_x, tensor_shape_y, 'volume shapes do not match')
176 |
177 | # reshape to [bs, V, C]
178 | if tensor_shape_x.shape[0] != 3:
179 | new_shape = K.stack([tensor_shape_x[0], -1, tensor_shape_x[-1]])
180 | x = tf.reshape(x, new_shape) # [bs, V, C]
181 | y = tf.reshape(y, new_shape) # [bs, V, C]
182 |
183 | # move channels to first dimension
184 | ndims_k = len(x.shape)
185 | permute = [ndims_k - 1] + list(range(ndims_k - 1))
186 | cx = tf.transpose(x, permute) # [C, bs, V]
187 | cy = tf.transpose(y, permute) # [C, bs, V]
188 |
189 | # soft quantize
190 | cxq = self._soft_sim_map(cx) # [C, bs, V, B]
191 | cyq = self._soft_sim_map(cy) # [C, bs, V, B]
192 |
193 | # get mi
194 | map_fn = lambda x: self.maps(*x)
195 | cout = tf.map_fn(map_fn, [cxq, cyq], dtype=tf.float32) # [C, bs]
196 |
197 | # permute back
198 | return tf.transpose(cout, [1, 0]) # [bs, C]
199 |
200 | def maps(self, x, y):
201 | """
202 | Computes mutual information for each entry in batch, assuming each item contains
203 | probability or similarity maps *at each voxel*. These could be e.g. from a softmax output
204 | (e.g. when performing segmentaiton) or from soft_quantization of intensity image.
205 | Note: the MI is computed separate for each itemin the batch, so the joint probabilities
206 | might be different across inputs. In some cases, computing MI actoss the whole batch
207 | might be desireable (TODO).
208 | Parameters:
209 | x and y are probability maps of size [bs, ..., B], where B is the size of the
210 | discrete probability domain grid (e.g. bins/labels). B can be different for x and y.
211 | Returns:
212 | Tensor of size [bs]
213 | """
214 |
215 | # check shapes
216 | tensor_shape_x = K.shape(x)
217 | tensor_shape_y = K.shape(y)
218 | tf.debugging.assert_equal(tensor_shape_x, tensor_shape_y)
219 | tf.debugging.assert_non_negative(x)
220 | tf.debugging.assert_non_negative(y)
221 |
222 | eps = K.epsilon()
223 |
224 | # reshape to [bs, V, B]
225 | if tensor_shape_x.shape[0] != 3:
226 | new_shape = K.stack([tensor_shape_x[0], -1, tensor_shape_x[-1]])
227 | x = tf.reshape(x, new_shape) # [bs, V, B1]
228 | y = tf.reshape(y, new_shape) # [bs, V, B2]
229 |
230 | # joint probability for each batch entry
231 | x_trans = tf.transpose(x, (0, 2, 1)) # [bs, B1, V]
232 | pxy = K.batch_dot(x_trans, y) # [bs, B1, B2]
233 | pxy = pxy / (K.sum(pxy, axis=[1, 2], keepdims=True) + eps) # [bs, B1, B2]
234 |
235 | # x probability for each batch entry
236 | px = K.sum(x, 1, keepdims=True) # [bs, 1, B1]
237 | px = px / (K.sum(px, 2, keepdims=True) + eps) # [bs, 1, B1]
238 |
239 | # y probability for each batch entry
240 | py = K.sum(y, 1, keepdims=True) # [bs, 1, B2]
241 | py = py / (K.sum(py, 2, keepdims=True) + eps) # [bs, 1, B2]
242 |
243 | # independent xy probability
244 | px_trans = K.permute_dimensions(px, (0, 2, 1)) # [bs, B1, 1]
245 | pxpy = K.batch_dot(px_trans, py) # [bs, B1, B2]
246 | pxpy_eps = pxpy + eps
247 |
248 | # mutual information
249 | log_term = K.log(pxy / pxpy_eps + eps) # [bs, B1, B2]
250 | mi = K.sum(pxy * log_term, axis=[1, 2]) # [bs]
251 | return mi
252 |
253 | def _soft_log_sim_map(self, x):
254 | """
255 | soft quantization of intensities (values) in a given volume
256 | See neurite.utils.soft_quantize
257 | Parameters:
258 | x [bs, ...]: intensity image.
259 | Returns:
260 | volume with one more dimension [bs, ..., B]
261 | """
262 |
263 | return ne.utils.soft_quantize(x,
264 | alpha=self.soft_bin_alpha,
265 | bin_centers=self.bin_centers,
266 | nb_bins=self.nb_bins,
267 | min_clip=self.min_clip,
268 | max_clip=self.max_clip,
269 | return_log=True) # [bs, ..., B]
270 |
271 | def _soft_sim_map(self, x):
272 | """
273 | See neurite.utils.soft_quantize
274 | Parameters:
275 | x [bs, ...]: intensity image.
276 | Returns:
277 | volume with one more dimension [bs, ..., B]
278 | """
279 | return ne.utils.soft_quantize(x,
280 | alpha=self.soft_bin_alpha,
281 | bin_centers=self.bin_centers,
282 | nb_bins=self.nb_bins,
283 | min_clip=self.min_clip,
284 | max_clip=self.max_clip,
285 | return_log=False) # [bs, ..., B]
286 |
287 | def _soft_prob_map(self, x, **kwargs):
288 | """
289 | normalize a soft_quantized volume at each voxel, so that each voxel now holds a prob. map
290 | Parameters:
291 | x [bs, ..., B]: soft quantized volume
292 | Returns:
293 | x [bs, ..., B]: renormalized so that each voxel adds to 1 across last dimension
294 | """
295 | x_hist = self._soft_sim_map(x, **kwargs) # [bs, ..., B]
296 | x_hist_sum = K.sum(x_hist, -1, keepdims=True), K.epsilon() # [bs, ..., B]
297 | x_prob = x_hist / (x_hist_sum) # [bs, ..., B]
298 | return x_prob
299 |
300 |
301 |
302 |
303 |
--------------------------------------------------------------------------------