├── .gitignore ├── Img ├── test.png ├── output.png ├── output_new.png └── learning_curve.png ├── README.md ├── Other files ├── test_saver.py ├── my_test.py ├── Automap_chongduan ├── test_gpu.py ├── test_fft2_normalize.py ├── myAutomap_recon.py ├── myAutomap_recon_cduan.py ├── myAutomap_cpu.py ├── myAutomap_recon_cduan_v2.py ├── myAutomap.py ├── myAutomap_cduan.py └── myAutomap_cduan_v2.py ├── generate_input_motion.py ├── generate_input.py ├── myAutomap_recon.py ├── myAutomap_gpu.py └── myAutomap_cpu.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Status 2 | -------------------------------------------------------------------------------- /Img/test.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chongduan/MRI-AUTOMAP/HEAD/Img/test.png -------------------------------------------------------------------------------- /Img/output.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chongduan/MRI-AUTOMAP/HEAD/Img/output.png -------------------------------------------------------------------------------- /Img/output_new.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chongduan/MRI-AUTOMAP/HEAD/Img/output_new.png -------------------------------------------------------------------------------- /Img/learning_curve.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/chongduan/MRI-AUTOMAP/HEAD/Img/learning_curve.png -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # MRI-AUTOMAP 2 | This is an implementation of the AUTOMAP algorithm described in the following paper: 3 | Zhu, Bo, et al. "Image reconstruction by domain-transform manifold learning." Nature 555.7697 (2018): 487. 4 | 5 | ## Sample results 6 | ![](https://raw.githubusercontent.com/chongduan/MRI-AUTOMAP/master/Img/output_new.png) 7 | 8 | Figure 1. First row contains MRI k-space data, which is the input to the network. Second row is the direct Fourier transform of the k-space data, and finally the third row is the network-reconstructed MRI images. 9 | 10 | Note that the network output are blurry. This might due to the training was performed on a small dataset (~5000 cardiac MRI images). 11 | 12 | -------------------------------------------------------------------------------- /Other files/test_saver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Oct 22 11:40:33 2018 5 | 6 | @author: chongduan 7 | """ 8 | 9 | import tensorflow as tf 10 | 11 | #Prepare to feed input, i.e. feed_dict and placeholders 12 | w1 = tf.placeholder("float", name="w1") 13 | w2 = tf.placeholder("float", name="w2") 14 | b1= tf.Variable(2.0,name="bias") 15 | feed_dict ={w1:4,w2:8} 16 | 17 | #Define a test operation that we will restore 18 | w3 = tf.add(w1,w2) 19 | w4 = tf.multiply(w3,b1,name="op_to_restore") 20 | sess = tf.Session() 21 | sess.run(tf.global_variables_initializer()) 22 | 23 | #Create a saver object which will save all the variables 24 | saver = tf.train.Saver() 25 | 26 | #Run the operation by feeding input 27 | print(sess.run(w4,feed_dict)) 28 | #Prints 24 which is sum of (w1+w2)*b1 29 | 30 | #Now, save the graph 31 | saver.save(sess, 'path to save model/my_test_model',global_step=1000) 32 | 33 | 34 | -------------------------------------------------------------------------------- /Other files/my_test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Wed Oct 17 14:27:50 2018 4 | 5 | @author: Chong 6 | """ 7 | 8 | import tensorflow as tf 9 | import matplotlib.pyplot as plt 10 | from scipy.io import loadmat 11 | import os 12 | import numpy as np 13 | from generate_input import create_x_motion 14 | 15 | dir_train = "/home/chongduan/Documents/Automap-MRI/Dataset" 16 | data = loadmat(os.path.join(dir_train, 'Stone_all_crop_64'))['crop_data_resize'] 17 | 18 | img = np.abs(data[:,:,1,4,1]) 19 | plt.imshow(img, cmap='gray') 20 | plt.show() 21 | 22 | X = create_x_motion(img) 23 | 24 | ### Plot images 25 | X_compl = X[:, :, :, 0] + X[:, :, :, 1] * 1j 26 | 27 | im_artif0 = np.fft.ifft2(X_compl[0, :, :]) 28 | 29 | img_artif_M0 = np.abs(im_artif0) 30 | 31 | plt.figure() 32 | plt.subplot(131), plt.imshow(np.abs(X_compl[0,:,:]), cmap='gray') 33 | plt.title('k-space'), plt.xticks([]), plt.yticks([]) 34 | plt.subplot(132), plt.imshow(img_artif_M0, cmap='gray') 35 | plt.title('ifft'), plt.xticks([]), plt.yticks([]) 36 | plt.subplot(133), plt.imshow(img, cmap='gray') 37 | plt.title('groundTruth'), plt.xticks([]), plt.yticks([]) -------------------------------------------------------------------------------- /Other files/Automap_chongduan: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Tue Oct 23 09:10:16 2018 5 | 6 | @author: chongduan 7 | """ 8 | 9 | import numpy as np 10 | import tensorflow as tf 11 | from tensorflow.python.framework import ops 12 | import math 13 | import time 14 | from generate_input import load_images_from_folder 15 | 16 | 17 | # Load training data: 18 | tic1 = time.time() 19 | # Folder with images 20 | dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 21 | n_cases = (0,1) # load image data from 0 to 1 22 | X_train, Y_train = load_images_from_folder( # Load images for training 23 | dir_train, 24 | n_cases, 25 | normalize=False, 26 | imrotate=False) 27 | toc1 = time.time() 28 | print('Time to load data = ', (toc1 - tic1)) 29 | print('X_train.shape at input = ', X_train.shape) 30 | print('Y_train.shape at input = ', Y_train.shape) 31 | 32 | 33 | model = tf.keras.models.Sequential([ 34 | tf.keras.layers.Flatten(), 35 | tf.keras.layers.Dense(512, activation=tf.nn.relu), 36 | tf.keras.layers.Dropout(0.2), 37 | tf.keras.layers.Dense(10, activation=tf.nn.softmax)]) 38 | 39 | model.compile(optimizer='adam', 40 | loss='sparse_categorical_crossentropy', 41 | metrics=['accuracy']) 42 | model.fit(x_train, y_train, epochs=5) 43 | model.evaluate(x_test, y_test) -------------------------------------------------------------------------------- /Other files/test_gpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Nov 5 13:04:46 2018 5 | 6 | @author: cduan 7 | """ 8 | import tensorflow as tf 9 | 10 | #### Logging Device placement 11 | # 12 | ## Creates a graph. 13 | #a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') 14 | #b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') 15 | #c = tf.matmul(a, b) 16 | ## Creates a session with log_device_placement set to True. 17 | #sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 18 | ## Runs the op. 19 | #print(sess.run(c)) 20 | #print('success!!!') 21 | 22 | 23 | ### Manual device placement, 24 | # In the following example: a and b will be run on cpu, while c or matmul will be run on gpu (default) 25 | # Creates a graph. 26 | with tf.device('/cpu:0'): 27 | a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') 28 | b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') 29 | c = tf.matmul(a, b) 30 | # Creates a session with log_device_placement set to True. 31 | # sess = tf.Session() 32 | sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 33 | # Runs the op. 34 | print(sess.run(c)) 35 | 36 | 37 | #### Using a single GPU on a multi-GPU system 38 | ## Creates a graph. 39 | #with tf.device('/device:GPU:2'): 40 | # a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3], name='a') 41 | # b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2], name='b') 42 | # c = tf.matmul(a, b) 43 | ## Creates a session with log_device_placement set to True; if the specified gpu device doesn't exit, the allow_soft_placement=True will automatically choose one 44 | #sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True)) 45 | ## Runs the op. 46 | #print(sess.run(c)) 47 | 48 | 49 | ### Using multiple GPUs 50 | # Creates a graph. 51 | c = [] 52 | for d in ['/device:GPU:2', '/device:GPU:3']: 53 | with tf.device(d): 54 | a = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[2, 3]) 55 | b = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], shape=[3, 2]) 56 | c.append(tf.matmul(a, b)) 57 | with tf.device('/cpu:0'): 58 | sum = tf.add_n(c) 59 | # Creates a session with log_device_placement set to True. 60 | sess = tf.Session(config=tf.ConfigProto(log_device_placement=True)) 61 | # Runs the op. 62 | print(sess.run(sum)) 63 | 64 | 65 | 66 | -------------------------------------------------------------------------------- /Other files/test_fft2_normalize.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Tue Oct 30 13:15:50 2018 5 | 6 | @author: chongduan 7 | """ 8 | 9 | ### Without normalization 10 | X_dev_compl = X_dev[:, :, :, 0] + X_dev[:, :, :, 1] * 1j 11 | 12 | #iFFT 13 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 14 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 15 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 16 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 17 | 18 | # Magnitude of complex image 19 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 20 | + np.power(X_iFFT0.imag, 2)) 21 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 22 | + np.power(X_iFFT1.imag, 2)) 23 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 24 | + np.power(X_iFFT2.imag, 2)) 25 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 26 | + np.power(X_iFFT3.imag, 2)) 27 | 28 | # SHOW 29 | # Show Y - input images 30 | plt.subplot(241), plt.imshow(Y_dev[im1, :, :], cmap='gray') 31 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 32 | plt.subplot(242), plt.imshow(Y_dev[im2, :, :], cmap='gray') 33 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 34 | plt.subplot(243), plt.imshow(Y_dev[im3, :, :], cmap='gray') 35 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 36 | plt.subplot(244), plt.imshow(Y_dev[im4, :, :], cmap='gray') 37 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 38 | 39 | # Show images reconstructed using iFFT 40 | plt.subplot(245), plt.imshow(X_iFFT_M1, cmap='gray') 41 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 42 | plt.subplot(246), plt.imshow(X_iFFT_M2, cmap='gray') 43 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 44 | plt.subplot(247), plt.imshow(X_iFFT_M3, cmap='gray') 45 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 46 | plt.subplot(248), plt.imshow(X_iFFT_M4, cmap='gray') 47 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 48 | 49 | 50 | 51 | ### With normalization 52 | X_dev_norm = X_dev / np.max(X_dev) 53 | X_dev_compl = X_dev_norm[:, :, :, 0] + X_dev_norm[:, :, :, 1] * 1j 54 | 55 | #iFFT 56 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 57 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 58 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 59 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 60 | 61 | # Magnitude of complex image 62 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 63 | + np.power(X_iFFT0.imag, 2)) 64 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 65 | + np.power(X_iFFT1.imag, 2)) 66 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 67 | + np.power(X_iFFT2.imag, 2)) 68 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 69 | + np.power(X_iFFT3.imag, 2)) 70 | 71 | # SHOW 72 | # Show Y - input images 73 | plt.subplot(241), plt.imshow(Y_dev[im1, :, :], cmap='gray') 74 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 75 | plt.subplot(242), plt.imshow(Y_dev[im2, :, :], cmap='gray') 76 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 77 | plt.subplot(243), plt.imshow(Y_dev[im3, :, :], cmap='gray') 78 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 79 | plt.subplot(244), plt.imshow(Y_dev[im4, :, :], cmap='gray') 80 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 81 | 82 | # Show images reconstructed using iFFT 83 | plt.subplot(245), plt.imshow(X_iFFT_M1, cmap='gray') 84 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 85 | plt.subplot(246), plt.imshow(X_iFFT_M2, cmap='gray') 86 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 87 | plt.subplot(247), plt.imshow(X_iFFT_M3, cmap='gray') 88 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 89 | plt.subplot(248), plt.imshow(X_iFFT_M4, cmap='gray') 90 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 91 | -------------------------------------------------------------------------------- /generate_input_motion.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | from matplotlib import pyplot as plt 5 | 6 | 7 | def load_images_from_folder(folder, n_im, normalize=False, imrotate=False): 8 | """ Loads n_im images from the folder and puts them in an array bigy of 9 | size (n_im, im_size1, im_size2), where (im_size1, im_size2) is an image 10 | size. 11 | Performs FFT of every input image and puts it in an array bigx of size 12 | (n_im, im_size1, im_size2, 2), where "2" represents real and imaginary 13 | dimensions 14 | :param folder: path to the folder, which contains images 15 | :param n_im: number of images to load from the folder 16 | :param normalize: if True - the xbig data will be normalized 17 | :param imrotate: if True - the each input image will be rotated by 90, 180, 18 | and 270 degrees 19 | :return: 20 | bigx: 4D array of frequency data of size (n_im, im_size1, im_size2, 2) 21 | bigy: 3D array of images of size (n_im, im_size1, im_size2) 22 | """ 23 | 24 | # Initialize the arrays: 25 | if imrotate: # number of images is 4 * n_im 26 | bigy = np.empty((n_im * 4, 80, 80)) 27 | bigx = np.empty((n_im * 4, 80, 80, 2)) 28 | else: 29 | bigy = np.empty((n_im, 80, 80)) 30 | bigx = np.empty((n_im, 80, 80, 2)) 31 | 32 | im = 0 # image counter 33 | for filename in os.listdir(folder): 34 | if not filename.startswith('.'): 35 | bigy_temp = cv2.imread(os.path.join(folder, filename), 36 | cv2.IMREAD_GRAYSCALE) 37 | bigy_padded = np.zeros((80, 80)) 38 | bigy_padded[8:72, 8:72] = bigy_temp 39 | bigy[im, :, :] = bigy_padded 40 | bigx[im, :, :, :] = create_x(bigy_temp, normalize) 41 | im += 1 42 | if imrotate: 43 | for angle in [90, 180, 270]: 44 | bigy_rot = im_rotate(bigy_temp, angle) 45 | bigx_rot = create_x(bigy_rot, normalize) 46 | 47 | bigy_rot_padded = np.zeros((80, 80)) 48 | bigy_rot_padded[8:72, 8:72] = bigy_rot 49 | 50 | bigy[im, :, :] = bigy_rot_padded 51 | bigx[im, :, :, :] = bigx_rot 52 | im += 1 53 | 54 | if imrotate: 55 | if im > (n_im * 4 - 1): # how many images to load 56 | break 57 | else: 58 | if im > (n_im - 1): # how many images to load 59 | break 60 | 61 | if normalize: 62 | bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx)) 63 | 64 | return bigx, bigy 65 | 66 | 67 | def create_x(y, normalize=False): 68 | """ 69 | Prepares frequency data from image data: first image y is padded by 8 70 | pixels of value zero from each side (y_pad_loc1), then second image is 71 | created by moving the input image (64x64) 8 pixels down -> two same images 72 | at different locations are created; then both images are transformed to 73 | frequency space and their frequency space is combined as if the image 74 | moved half-way through the acquisition (upper part of freq space from one 75 | image and lower part of freq space from another image) 76 | expands the dimensions from 3D to 4D, and normalizes if normalize=True 77 | :param y: input image 78 | :param normalize: if True - the frequency data will be normalized 79 | :return: "Motion corrupted" frequency-space data of the input image, 80 | 4D array of size (1, im_size1, im_size2, 2), third dimension (size: 2) 81 | contains real and imaginary part 82 | """ 83 | 84 | # Pad y and move 8 pixels 85 | y_pad_loc1 = np.zeros((80, 80)) 86 | y_pad_loc2 = np.zeros((80, 80)) 87 | y_pad_loc1[8:72, 8:72] = y 88 | y_pad_loc2[0:64, 8:72] = y 89 | 90 | # FFT of both images 91 | img_f1 = np.fft.fft2(y_pad_loc1) # FFT 92 | img_fshift1 = np.fft.fftshift(img_f1) # FFT shift 93 | img_f2 = np.fft.fft2(y_pad_loc2) # FFT 94 | img_fshift2 = np.fft.fftshift(img_f2) # FFT shift 95 | 96 | # Combine halfs of both k-space - as if subject moved 8 pixels in the 97 | # middle of acquisition 98 | x_compl = np.zeros((80, 80), dtype=np.complex_) 99 | x_compl[0:41, :] = img_fshift1[0:41, :] 100 | x_compl[41:81, :] = img_fshift2[41:81, :] 101 | 102 | # Finally, separate into real and imaginary channels 103 | x_real = x_compl.real 104 | x_imag = x_compl.imag 105 | x = np.dstack((x_real, x_imag)) 106 | 107 | x = np.expand_dims(x, axis=0) 108 | 109 | if normalize: 110 | x = x - np.mean(x) 111 | 112 | return x 113 | 114 | 115 | def im_rotate(img, angle): 116 | """ Rotates an image by angle degrees 117 | :param img: input image 118 | :param angle: angle by which the image is rotated, in degrees 119 | :return: rotated image 120 | """ 121 | 122 | rows, cols = img.shape 123 | rotM = cv2.getRotationMatrix2D((cols/2-0.5, rows/2-0.5), angle, 1) 124 | imrotated = cv2.warpAffine(img, rotM, (cols, rows)) 125 | 126 | return imrotated 127 | 128 | 129 | # For debugging: show the images and their frequency space 130 | dir_temp = 'path to folder with images' 131 | X, Y = load_images_from_folder(dir_temp, 5, normalize=False, imrotate=True) 132 | 133 | print(Y.shape) 134 | print(X.shape) 135 | 136 | # Image 137 | plt.subplot(221), plt.imshow(Y[8, :, :], cmap='gray') 138 | plt.title('Y_rot0'), plt.xticks([]), plt.yticks([]) 139 | plt.subplot(222), plt.imshow(Y[9, :, :], cmap='gray') 140 | plt.title('Y_rot90'), plt.xticks([]), plt.yticks([]) 141 | plt.subplot(223), plt.imshow(Y[10, :, :], cmap='gray') 142 | plt.title('Y_rot180'), plt.xticks([]), plt.yticks([]) 143 | plt.subplot(224), plt.imshow(Y[11, :, :], cmap='gray') 144 | plt.title('Y_rot270'), plt.xticks([]), plt.yticks([]) 145 | plt.show() 146 | 147 | # Corresponding frequency space (magnitude) 148 | X_m = np.sqrt(np.power(X[:, :, :, 0], 2) 149 | + np.power(X[:, :, :, 1], 2)) 150 | plt.subplot(221), plt.imshow(X_m[8, :, :], cmap='gray') 151 | plt.title('X_freq_rot0'), plt.xticks([]), plt.yticks([]) 152 | plt.subplot(222), plt.imshow(X_m[9, :, :], cmap='gray') 153 | plt.title('X_freq_rot90'), plt.xticks([]), plt.yticks([]) 154 | plt.subplot(223), plt.imshow(X_m[10, :, :], cmap='gray') 155 | plt.title('X_freq_rot180'), plt.xticks([]), plt.yticks([]) 156 | plt.subplot(224), plt.imshow(X_m[11, :, :], cmap='gray') 157 | plt.title('X_freq_rot270'), plt.xticks([]), plt.yticks([]) 158 | plt.show() 159 | 160 | 161 | # iFFT back to image from corrupted frequency space 162 | X_compl = X[:, :, :, 0] + X[:, :, :, 1] * 1j 163 | 164 | im_artif0 = np.fft.ifft2(X_compl[8, :, :]) 165 | im_artif1 = np.fft.ifft2(X_compl[9, :, :]) 166 | im_artif2 = np.fft.ifft2(X_compl[10, :, :]) 167 | im_artif3 = np.fft.ifft2(X_compl[11, :, :]) 168 | 169 | img_artif_M0 = np.sqrt(np.power(im_artif0.real, 2) 170 | + np.power(im_artif0.imag, 2)) 171 | img_artif_M1 = np.sqrt(np.power(im_artif1.real, 2) 172 | + np.power(im_artif1.imag, 2)) 173 | img_artif_M2 = np.sqrt(np.power(im_artif2.real, 2) 174 | + np.power(im_artif2.imag, 2)) 175 | img_artif_M3 = np.sqrt(np.power(im_artif3.real, 2) 176 | + np.power(im_artif3.imag, 2)) 177 | 178 | plt.subplot(221), plt.imshow(img_artif_M0, cmap='gray') 179 | plt.title('X_rot0'), plt.xticks([]), plt.yticks([]) 180 | plt.subplot(222), plt.imshow(img_artif_M1, cmap='gray') 181 | plt.title('X_rot1'), plt.xticks([]), plt.yticks([]) 182 | plt.subplot(223), plt.imshow(img_artif_M2, cmap='gray') 183 | plt.title('X_rot2'), plt.xticks([]), plt.yticks([]) 184 | plt.subplot(224), plt.imshow(img_artif_M3, cmap='gray') 185 | plt.title('X_rot3'), plt.xticks([]), plt.yticks([]) 186 | plt.show() 187 | -------------------------------------------------------------------------------- /Other files/myAutomap_recon.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | from matplotlib import pyplot as plt 5 | from generate_input_motion import load_images_from_folder 6 | 7 | 8 | # Load development/test data: 9 | dir_dev = "path to the folder with dev/test images" 10 | n_im_dev = 60 # How many images to load 11 | # Load images and create motion-corrupted frequency space 12 | # No normalization or rotations: 13 | X_dev, Y_dev = load_images_from_folder( # Load images for evaluating model 14 | dir_dev, 15 | n_im_dev, 16 | normalize=False, 17 | imrotate=False) 18 | print('X_dev.shape at input = ', X_dev.shape) 19 | print('Y_dev.shape at input = ', Y_dev.shape) 20 | 21 | 22 | def create_placeholders(n_H0, n_W0): 23 | """ Creates placeholders for x and y for tf.session 24 | :param n_H0: image height 25 | :param n_W0: image width 26 | :return: x and y - tf placeholders 27 | """ 28 | 29 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 30 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 31 | 32 | return x, y 33 | 34 | 35 | def initialize_parameters(): 36 | """ Initializes filters for the convolutional and de-convolutional layers 37 | :return: parameters - a dictionary of filters (W1 - first convolutional 38 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 39 | """ 40 | 41 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 42 | initializer=tf.contrib.layers.xavier_initializer 43 | (seed=0)) 44 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 45 | initializer=tf.contrib.layers.xavier_initializer 46 | (seed=0)) 47 | W3 = tf.get_variable("W3", [7, 7, 1, 64], # 64 filters of size 7x7 48 | initializer=tf.contrib.layers.xavier_initializer 49 | (seed=0)) # conv2d_transpose 50 | 51 | parameters = {"W1": W1, 52 | "W2": W2, 53 | "W3": W3} 54 | 55 | return parameters 56 | 57 | 58 | def forward_propagation(x, parameters): 59 | """ Defines all layers for forward propagation: 60 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 61 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 62 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 63 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 64 | -> De-convolutional: size (n_im, n_H0, n_W0) 65 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 66 | :param parameters: parameters of the layers (e.g. filters) 67 | :return: output of the last layer of the neural network 68 | """ 69 | 70 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 71 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 72 | 73 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 74 | FC1 = tf.contrib.layers.fully_connected( 75 | x_temp, 76 | n_out, 77 | activation_fn=tf.tanh, 78 | normalizer_fn=None, 79 | normalizer_params=None, 80 | weights_initializer=tf.contrib.layers.xavier_initializer(), 81 | weights_regularizer=None, 82 | biases_initializer=None, 83 | biases_regularizer=None, 84 | reuse=True, 85 | variables_collections=None, 86 | outputs_collections=None, 87 | trainable=True, 88 | scope='fc1') 89 | 90 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 91 | FC2 = tf.contrib.layers.fully_connected( 92 | FC1, 93 | n_out, 94 | activation_fn=tf.tanh, 95 | normalizer_fn=None, 96 | normalizer_params=None, 97 | weights_initializer=tf.contrib.layers.xavier_initializer(), 98 | weights_regularizer=None, 99 | biases_initializer=None, 100 | biases_regularizer=None, 101 | reuse=True, 102 | variables_collections=None, 103 | outputs_collections=None, 104 | trainable=True, 105 | scope='fc2') 106 | 107 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 108 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 109 | 110 | # Retrieve the parameters from the dictionary "parameters": 111 | W1 = parameters['W1'] 112 | W2 = parameters['W2'] 113 | W3 = parameters['W3'] 114 | 115 | # CONV2D: filters W1, stride of 1, padding 'SAME' 116 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 117 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 118 | # RELU 119 | CONV1 = tf.nn.relu(Z1) 120 | 121 | # CONV2D: filters W2, stride 1, padding 'SAME' 122 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 123 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 124 | # RELU 125 | CONV2 = tf.nn.relu(Z2) 126 | 127 | # DE-CONV2D: filters W3, stride 1, padding 'SAME' 128 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 129 | batch_size = tf.shape(x)[0] 130 | deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 131 | DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 132 | strides=[1, 1, 1, 1], padding='SAME') 133 | DECONV = tf.squeeze(DECONV) 134 | 135 | return DECONV 136 | 137 | 138 | def model(X_dev): 139 | """ Runs the forward propagation to reconstruct images using trained model 140 | :param X_dev: input development frequency-space data 141 | :return: returns the image, reconstructed using a trained model 142 | """ 143 | 144 | ops.reset_default_graph() # to not overwrite tf variables 145 | (_, n_H0, n_W0, _) = X_dev.shape 146 | 147 | # Create Placeholders 148 | X, Y = create_placeholders(n_H0, n_W0) 149 | 150 | # Initialize parameters 151 | parameters = initialize_parameters() 152 | 153 | # Build the forward propagation in the tf graph 154 | forward_propagation(X, parameters) 155 | 156 | # Add ops to save and restore all the variables 157 | saver = tf.train.Saver() 158 | 159 | # Start the session to compute the tf graph 160 | with tf.Session() as sess: 161 | 162 | saver.restore(sess, "path to saved model/model_name.ckpt") 163 | 164 | print("Model restored") 165 | 166 | Y_recon_temp = forward_propagation(X, parameters) 167 | Y_recon = Y_recon_temp.eval({X: X_dev}) 168 | 169 | return parameters, Y_recon 170 | 171 | 172 | # Reconstruct the image using trained model 173 | _, Y_recon = model(X_dev) 174 | print('Y_recon.shape = ', Y_recon.shape) 175 | print('Y_dev.shape = ', Y_dev.shape) 176 | 177 | 178 | # Visualize the images, their reconstruction using iFFT and using trained model 179 | # 4 images to visualize: 180 | im1 = 32 181 | im2 = 33 182 | im3 = 34 183 | im4 = 35 184 | 185 | # iFFT back to image from corrupted frequency space 186 | # Complex image from real and imaginary part 187 | X_dev_compl = X_dev[:, :, :, 0] + X_dev[:, :, :, 1] * 1j 188 | 189 | #iFFT 190 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 191 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 192 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 193 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 194 | 195 | # Magnitude of complex image 196 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 197 | + np.power(X_iFFT0.imag, 2)) 198 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 199 | + np.power(X_iFFT1.imag, 2)) 200 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 201 | + np.power(X_iFFT2.imag, 2)) 202 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 203 | + np.power(X_iFFT3.imag, 2)) 204 | 205 | # SHOW 206 | # Show Y - input images 207 | plt.subplot(341), plt.imshow(Y_dev[im1, :, :], cmap='gray') 208 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 209 | plt.subplot(342), plt.imshow(Y_dev[im2, :, :], cmap='gray') 210 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 211 | plt.subplot(343), plt.imshow(Y_dev[im3, :, :], cmap='gray') 212 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 213 | plt.subplot(344), plt.imshow(Y_dev[im4, :, :], cmap='gray') 214 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 215 | 216 | # Show images reconstructed using iFFT 217 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 218 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 219 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 220 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 221 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 222 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 223 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 224 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 225 | 226 | # Show images reconstructed using model 227 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 228 | plt.title('Y_recon1'), plt.xticks([]), plt.yticks([]) 229 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 230 | plt.title('Y_recon2'), plt.xticks([]), plt.yticks([]) 231 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 232 | plt.title('Y_recon3'), plt.xticks([]), plt.yticks([]) 233 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 234 | plt.title('Y_recon4'), plt.xticks([]), plt.yticks([]) 235 | plt.show() 236 | 237 | 238 | 239 | -------------------------------------------------------------------------------- /Other files/myAutomap_recon_cduan.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | from matplotlib import pyplot as plt 5 | from generate_input import load_images_from_folder 6 | #from generate_input_motion import load_images_from_folder 7 | 8 | # Load development/test data: 9 | dir_dev = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 10 | #n_im_dev = 60 # How many images to load 11 | # Load images and create motion-corrupted frequency space 12 | # No normalization or rotations: 13 | n_cases = (3,4) # load image data from 0 to 2 14 | X_dev, Y_dev = load_images_from_folder( # Load images for training 15 | dir_dev, 16 | n_cases, 17 | normalize=False, 18 | imrotate=False) 19 | print('X_dev.shape at input = ', X_dev.shape) 20 | print('Y_dev.shape at input = ', Y_dev.shape) 21 | 22 | def create_placeholders(n_H0, n_W0): 23 | """ Creates placeholders for x and y for tf.session 24 | :param n_H0: image height 25 | :param n_W0: image width 26 | :return: x and y - tf placeholders 27 | """ 28 | 29 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 30 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 31 | 32 | return x, y 33 | 34 | 35 | def initialize_parameters(): 36 | """ Initializes filters for the convolutional and de-convolutional layers 37 | :return: parameters - a dictionary of filters (W1 - first convolutional 38 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 39 | """ 40 | 41 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 42 | initializer=tf.contrib.layers.xavier_initializer 43 | (seed=0)) 44 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 45 | initializer=tf.contrib.layers.xavier_initializer 46 | (seed=0)) 47 | W3 = tf.get_variable("W3", [7, 7, 1, 64], # 64 filters of size 7x7 48 | initializer=tf.contrib.layers.xavier_initializer 49 | (seed=0)) # conv2d_transpose 50 | 51 | parameters = {"W1": W1, 52 | "W2": W2, 53 | "W3": W3} 54 | 55 | return parameters 56 | 57 | 58 | def forward_propagation(x, parameters): 59 | """ Defines all layers for forward propagation: 60 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 61 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 62 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 63 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 64 | -> De-convolutional: size (n_im, n_H0, n_W0) 65 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 66 | :param parameters: parameters of the layers (e.g. filters) 67 | :return: output of the last layer of the neural network 68 | """ 69 | 70 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 71 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 72 | 73 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 74 | FC1 = tf.contrib.layers.fully_connected( 75 | x_temp, 76 | n_out, 77 | activation_fn=tf.tanh, 78 | normalizer_fn=None, 79 | normalizer_params=None, 80 | weights_initializer=tf.contrib.layers.xavier_initializer(), 81 | weights_regularizer=None, 82 | biases_initializer=None, 83 | biases_regularizer=None, 84 | # reuse=True, 85 | reuse = tf.AUTO_REUSE, 86 | variables_collections=None, 87 | outputs_collections=None, 88 | trainable=True, 89 | scope='fc1') 90 | 91 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 92 | FC2 = tf.contrib.layers.fully_connected( 93 | FC1, 94 | n_out, 95 | activation_fn=tf.tanh, 96 | normalizer_fn=None, 97 | normalizer_params=None, 98 | weights_initializer=tf.contrib.layers.xavier_initializer(), 99 | weights_regularizer=None, 100 | biases_initializer=None, 101 | biases_regularizer=None, 102 | # reuse=True, 103 | reuse = tf.AUTO_REUSE, 104 | variables_collections=None, 105 | outputs_collections=None, 106 | trainable=True, 107 | scope='fc2') 108 | 109 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 110 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 111 | 112 | # Retrieve the parameters from the dictionary "parameters": 113 | W1 = parameters['W1'] 114 | W2 = parameters['W2'] 115 | W3 = parameters['W3'] 116 | 117 | # CONV2D: filters W1, stride of 1, padding 'SAME' 118 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 119 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 120 | # RELU 121 | CONV1 = tf.nn.relu(Z1) 122 | 123 | # CONV2D: filters W2, stride 1, padding 'SAME' 124 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 125 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 126 | # RELU 127 | CONV2 = tf.nn.relu(Z2) 128 | 129 | # DE-CONV2D: filters W3, stride 1, padding 'SAME' 130 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 131 | batch_size = tf.shape(x)[0] 132 | deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 133 | DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 134 | strides=[1, 1, 1, 1], padding='SAME') 135 | DECONV = tf.squeeze(DECONV) 136 | 137 | return DECONV 138 | 139 | 140 | def model(X_dev): 141 | """ Runs the forward propagation to reconstruct images using trained model 142 | :param X_dev: input development frequency-space data 143 | :return: returns the image, reconstructed using a trained model 144 | """ 145 | with tf.device('/cpu:0'): 146 | ops.reset_default_graph() # to not overwrite tf variables 147 | (_, n_H0, n_W0, _) = X_dev.shape 148 | 149 | # Create Placeholders 150 | X, Y = create_placeholders(n_H0, n_W0) 151 | 152 | # Initialize parameters 153 | parameters = initialize_parameters() 154 | 155 | # Build the forward propagation in the tf graph 156 | forward_propagation(X, parameters) 157 | 158 | # Add ops to save and restore all the variables 159 | saver = tf.train.Saver() 160 | 161 | # Start the session to compute the tf graph 162 | with tf.Session() as sess: 163 | 164 | # Chong Duan 165 | ckpt = tf.train.get_checkpoint_state('./model/') 166 | saver.restore(sess, ckpt.model_checkpoint_path) 167 | 168 | print("Model restored") 169 | 170 | Y_recon_temp = forward_propagation(X, parameters) 171 | Y_recon = Y_recon_temp.eval({X: X_dev}) 172 | 173 | return parameters, Y_recon 174 | 175 | 176 | # Reconstruct the image using trained model 177 | _, Y_recon = model(X_dev) 178 | print('Y_recon.shape = ', Y_recon.shape) 179 | print('Y_dev.shape = ', Y_dev.shape) 180 | 181 | 182 | # Visualize the images, their reconstruction using iFFT and using trained model 183 | # 4 images to visualize: 184 | im1 = 32 185 | im2 = 33 186 | im3 = 34 187 | im4 = 35 188 | 189 | # iFFT back to image from corrupted frequency space 190 | # Complex image from real and imaginary part 191 | X_dev_compl = X_dev[:, :, :, 0] + X_dev[:, :, :, 1] * 1j 192 | 193 | #iFFT 194 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 195 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 196 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 197 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 198 | 199 | # Magnitude of complex image 200 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 201 | + np.power(X_iFFT0.imag, 2)) 202 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 203 | + np.power(X_iFFT1.imag, 2)) 204 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 205 | + np.power(X_iFFT2.imag, 2)) 206 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 207 | + np.power(X_iFFT3.imag, 2)) 208 | 209 | # SHOW 210 | # Show Y - input images 211 | plt.subplot(341), plt.imshow(Y_dev[im1, :, :], cmap='gray') 212 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 213 | plt.subplot(342), plt.imshow(Y_dev[im2, :, :], cmap='gray') 214 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 215 | plt.subplot(343), plt.imshow(Y_dev[im3, :, :], cmap='gray') 216 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 217 | plt.subplot(344), plt.imshow(Y_dev[im4, :, :], cmap='gray') 218 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 219 | 220 | # Show images reconstructed using iFFT 221 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 222 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 223 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 224 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 225 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 226 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 227 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 228 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 229 | 230 | # Show images reconstructed using model 231 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 232 | plt.title('Y_recon1'), plt.xticks([]), plt.yticks([]) 233 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 234 | plt.title('Y_recon2'), plt.xticks([]), plt.yticks([]) 235 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 236 | plt.title('Y_recon3'), plt.xticks([]), plt.yticks([]) 237 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 238 | plt.title('Y_recon4'), plt.xticks([]), plt.yticks([]) 239 | plt.show() -------------------------------------------------------------------------------- /Other files/myAutomap_cpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Nov 5 13:04:46 2018 5 | 6 | @author: cduan 7 | """ 8 | 9 | import numpy as np 10 | import tensorflow as tf 11 | from tensorflow.python.framework import ops 12 | import math 13 | import time 14 | import matplotlib.pyplot as plt 15 | from generate_input import load_STONE_data 16 | 17 | 18 | # Load training data, cropped and resized from MATLAB 19 | tic1 = time.time() 20 | dir_train = "/home/chongduan/Documents/Automap-MRI/Dataset" 21 | n_cases = (0,1) 22 | X_train, Y_train = load_STONE_data( 23 | dir_train, 24 | n_cases, 25 | normalize=False, 26 | imrotate=False, 27 | motion=True) 28 | toc1 = time.time() 29 | print('Time to load data = ', (toc1 - tic1)) 30 | print('X_train.shape at input = ', X_train.shape) 31 | print('Y_train.shape at input = ', Y_train.shape) 32 | 33 | 34 | def create_placeholders(n_H0, n_W0): 35 | """ Creates placeholders for x and y for tf.session 36 | :param n_H0: image height 37 | :param n_W0: image width 38 | :return: x and y - tf placeholders 39 | """ 40 | 41 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 42 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 43 | 44 | return x, y 45 | 46 | def forward_propagation(x): 47 | """ Defines all layers for forward propagation: 48 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 49 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 50 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 51 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 52 | -> De-convolutional: size (n_im, n_H0, n_W0) 53 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 54 | :param parameters: parameters of the layers (e.g. filters) 55 | :return: output of the last layer of the neural network 56 | """ 57 | 58 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 59 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 60 | 61 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 62 | FC1 = tf.contrib.layers.fully_connected( 63 | x_temp, 64 | n_out, 65 | activation_fn=tf.tanh, 66 | normalizer_fn=None, 67 | normalizer_params=None, 68 | weights_initializer=tf.contrib.layers.xavier_initializer(), 69 | weights_regularizer=None, 70 | biases_initializer=None, 71 | biases_regularizer=None, 72 | reuse=tf.AUTO_REUSE, 73 | variables_collections=None, 74 | outputs_collections=None, 75 | trainable=True, 76 | scope='fc1') 77 | 78 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 79 | FC2 = tf.contrib.layers.fully_connected( 80 | FC1, 81 | n_out, 82 | activation_fn=tf.tanh, 83 | normalizer_fn=None, 84 | normalizer_params=None, 85 | weights_initializer=tf.contrib.layers.xavier_initializer(), 86 | weights_regularizer=None, 87 | biases_initializer=None, 88 | biases_regularizer=None, 89 | reuse=tf.AUTO_REUSE, 90 | variables_collections=None, 91 | outputs_collections=None, 92 | trainable=True, 93 | scope='fc2') 94 | 95 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 96 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 97 | 98 | CONV1 = tf.layers.conv2d( 99 | FC_M, 100 | filters=64, 101 | kernel_size=5, 102 | strides=(1, 1), 103 | padding='same', 104 | data_format='channels_last', 105 | dilation_rate=(1, 1), 106 | activation=tf.nn.relu, 107 | use_bias=True, 108 | kernel_initializer=None, 109 | bias_initializer=tf.zeros_initializer(), 110 | kernel_regularizer=None, 111 | bias_regularizer=None, 112 | activity_regularizer=None, 113 | kernel_constraint=None, 114 | bias_constraint=None, 115 | trainable=True, 116 | name='conv1', 117 | reuse=tf.AUTO_REUSE) 118 | 119 | CONV2 = tf.layers.conv2d( 120 | CONV1, 121 | filters=64, 122 | kernel_size=5, 123 | strides=(1, 1), 124 | padding='same', 125 | data_format='channels_last', 126 | dilation_rate=(1, 1), 127 | activation=tf.nn.relu, 128 | use_bias=True, 129 | kernel_initializer=None, 130 | bias_initializer=tf.zeros_initializer(), 131 | kernel_regularizer=None, 132 | bias_regularizer=None, 133 | activity_regularizer=None, 134 | kernel_constraint=None, 135 | bias_constraint=None, 136 | trainable=True, 137 | name='conv2', 138 | reuse=tf.AUTO_REUSE) 139 | 140 | # Apply L1-norm on last hidden layer to the activation as described in the paper 141 | CONV3 = tf.layers.conv2d( 142 | CONV2, 143 | filters=1, 144 | kernel_size=7, 145 | strides=(1, 1), 146 | padding='same', 147 | data_format='channels_last', 148 | dilation_rate=(1, 1), 149 | activation=tf.nn.relu, 150 | use_bias=True, 151 | kernel_initializer=None, 152 | bias_initializer=tf.zeros_initializer(), 153 | kernel_regularizer=None, 154 | bias_regularizer=None, 155 | # activity_regularizer = None, 156 | activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 157 | kernel_constraint=None, 158 | bias_constraint=None, 159 | trainable=True, 160 | name='conv3', 161 | reuse=tf.AUTO_REUSE) 162 | 163 | OUTPUT = tf.squeeze(CONV3) 164 | 165 | return OUTPUT 166 | 167 | def compute_cost(OUTPUT, Y): 168 | """ 169 | Computes cost (squared loss) between the output of forward propagation and 170 | the label image 171 | :param DECONV: output of forward propagation 172 | :param Y: label image 173 | :return: cost (squared loss) 174 | """ 175 | 176 | cost = tf.square(OUTPUT - Y) 177 | 178 | return cost 179 | 180 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 181 | """ Shuffles training examples and partitions them into mini-batches 182 | to speed up the gradient descent 183 | :param x: input frequency space data 184 | :param y: input image space data 185 | :param mini_batch_size: mini-batch size 186 | :param seed: can be chosen to keep the random choice consistent 187 | :return: a mini-batch of size mini_batch_size of training examples 188 | """ 189 | 190 | m = x.shape[0] # number of input images 191 | mini_batches = [] 192 | np.random.seed(seed) 193 | 194 | # Shuffle (x, y) 195 | permutation = list(np.random.permutation(m)) 196 | shuffled_X = x[permutation, :] 197 | shuffled_Y = y[permutation, :] 198 | 199 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 200 | num_complete_minibatches = int(math.floor( 201 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 202 | 203 | for k in range(0, num_complete_minibatches): 204 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 205 | + mini_batch_size, :, :, :] 206 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 207 | + mini_batch_size, :, :] 208 | mini_batch = (mini_batch_X, mini_batch_Y) 209 | mini_batches.append(mini_batch) 210 | 211 | # Handling the end case (last mini-batch < mini_batch_size) 212 | if m % mini_batch_size != 0: 213 | mini_batch_X = shuffled_X[num_complete_minibatches 214 | * mini_batch_size: m, :, :, :] 215 | mini_batch_Y = shuffled_Y[num_complete_minibatches 216 | * mini_batch_size: m, :, :] 217 | mini_batch = (mini_batch_X, mini_batch_Y) 218 | mini_batches.append(mini_batch) 219 | 220 | return mini_batches 221 | 222 | 223 | def model(X_train, Y_train, learning_rate=0.0001, 224 | num_epochs=100, minibatch_size=5, print_cost=True): 225 | """ Runs the forward and backward propagation 226 | :param X_train: input training frequency-space data 227 | :param Y_train: input training image-space data 228 | :param learning_rate: learning rate of gradient descent 229 | :param num_epochs: number of epochs 230 | :param minibatch_size: size of mini-batch 231 | :param print_cost: if True - the cost will be printed every epoch, as well 232 | as how long it took to run the epoch 233 | :return: this function saves the model to a file. The model can then 234 | be used to reconstruct the image from frequency space 235 | """ 236 | 237 | ops.reset_default_graph() # to not overwrite tf variables 238 | seed = 3 239 | (m, n_H0, n_W0, _) = X_train.shape 240 | 241 | # Create Placeholders 242 | X, Y = create_placeholders(n_H0, n_W0) 243 | 244 | # # Initialize parameters 245 | # parameters = initialize_parameters() 246 | 247 | # Build the forward propagation in the tf graph 248 | OUTPUT = forward_propagation(X) 249 | 250 | # Add cost function to tf graph 251 | cost = compute_cost(OUTPUT, Y) 252 | 253 | # Add global_step variable for save training models - Chong Duan 254 | my_global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step') 255 | 256 | # Backpropagation 257 | optimizer = tf.train.RMSPropOptimizer(learning_rate, 258 | decay=0.9, 259 | momentum=0.0).minimize(cost, global_step = my_global_step) 260 | 261 | # Initialize all the variables globally 262 | init = tf.global_variables_initializer() 263 | 264 | # Add ops to save and restore all the variables 265 | saver = tf.train.Saver(save_relative_paths=True) 266 | 267 | # Memory config 268 | config = tf.ConfigProto() 269 | config.gpu_options.allow_growth = True 270 | config = tf.ConfigProto(log_device_placement=True) 271 | 272 | # Start the session to compute the tf graph 273 | with tf.Session(config=config) as sess: 274 | 275 | # Initialization 276 | sess.run(init) 277 | 278 | # Training loop 279 | learning_curve = [] 280 | for epoch in range(num_epochs): 281 | tic = time.time() 282 | 283 | minibatch_cost = 0. 284 | num_minibatches = int(m / minibatch_size) # number of minibatches 285 | seed += 1 286 | minibatches = random_mini_batches(X_train, Y_train, 287 | minibatch_size, seed) 288 | # Minibatch loop 289 | for minibatch in minibatches: 290 | # Select a minibatch 291 | (minibatch_X, minibatch_Y) = minibatch 292 | # Run the session to execute the optimizer and the cost 293 | _, temp_cost = sess.run( 294 | [optimizer, cost], 295 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 296 | 297 | cost_mean = np.mean(temp_cost) / num_minibatches 298 | minibatch_cost += cost_mean 299 | 300 | # Print the cost every epoch 301 | learning_curve.append(minibatch_cost) 302 | if print_cost: 303 | toc = time.time() 304 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 305 | 306 | if (epoch + 1) % 2 == 0: 307 | save_path = saver.save(sess, '../checkpoints/model.ckpt', global_step = my_global_step) 308 | print("Model saved in file: %s" % save_path) 309 | 310 | 311 | # Plot learning curve 312 | plt.plot(learning_curve) 313 | plt.title('Learning Curve') 314 | plt.xlabel('Epoch') 315 | plt.ylabel('Cost') 316 | plt.show() 317 | 318 | # Close sess 319 | sess.close() 320 | 321 | # Finally run the model! 322 | model(X_train, Y_train, 323 | learning_rate=0.0001, 324 | num_epochs=5, 325 | minibatch_size=11, # should be < than the number of input examples 326 | print_cost=True) -------------------------------------------------------------------------------- /Other files/myAutomap_recon_cduan_v2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | from matplotlib import pyplot as plt 5 | from generate_input import load_images_from_folder,load_STONE_data 6 | #from generate_input_motion import load_images_from_folder 7 | 8 | ## Load development/test data: 9 | #dir_dev = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 10 | ##n_im_dev = 60 # How many images to load 11 | ## Load images and create motion-corrupted frequency space 12 | ## No normalization or rotations: 13 | #n_cases = (0,1) # load image data from 0 to 2 14 | #X_dev, Y_dev = load_images_from_folder( # Load images for training 15 | # dir_dev, 16 | # n_cases, 17 | # normalize=False, 18 | # imrotate=False) 19 | #print('X_dev.shape at input = ', X_dev.shape) 20 | #print('Y_dev.shape at input = ', Y_dev.shape) 21 | 22 | 23 | # Load training data, cropped and resized from MATLAB 24 | # Folder with images 25 | dir_train = "/home/chongduan/Documents/11_AUTOMAP/Dataset" 26 | n_cases = 3 # load 3 cases 27 | X_dev, Y_dev = load_STONE_data( # Load images for training 28 | dir_train, 29 | n_cases, 30 | normalize=False, 31 | imrotate=False) 32 | print('X_train.shape at input = ', X_dev.shape) 33 | print('Y_train.shape at input = ', Y_dev.shape) 34 | 35 | 36 | def create_placeholders(n_H0, n_W0): 37 | """ Creates placeholders for x and y for tf.session 38 | :param n_H0: image height 39 | :param n_W0: image width 40 | :return: x and y - tf placeholders 41 | """ 42 | 43 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 44 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 45 | 46 | return x, y 47 | 48 | 49 | def initialize_parameters(): 50 | """ Initializes filters for the convolutional and de-convolutional layers 51 | :return: parameters - a dictionary of filters (W1 - first convolutional 52 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 53 | """ 54 | 55 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 56 | initializer=tf.contrib.layers.xavier_initializer 57 | (seed=0)) 58 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 59 | initializer=tf.contrib.layers.xavier_initializer 60 | (seed=0)) 61 | W3 = tf.get_variable("W3", [7, 7, 64, 1], # 64 filters of size 7x7 62 | initializer=tf.contrib.layers.xavier_initializer 63 | (seed=0)) # set to std conv2d, Chong Duan 64 | 65 | parameters = {"W1": W1, 66 | "W2": W2, 67 | "W3": W3} 68 | 69 | return parameters 70 | 71 | 72 | def forward_propagation(x, parameters): 73 | """ Defines all layers for forward propagation: 74 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 75 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 76 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 77 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 78 | -> De-convolutional: size (n_im, n_H0, n_W0) 79 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 80 | :param parameters: parameters of the layers (e.g. filters) 81 | :return: output of the last layer of the neural network 82 | """ 83 | 84 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 85 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 86 | 87 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 88 | FC1 = tf.contrib.layers.fully_connected( 89 | x_temp, 90 | n_out, 91 | activation_fn=tf.tanh, 92 | normalizer_fn=None, 93 | normalizer_params=None, 94 | weights_initializer=tf.contrib.layers.xavier_initializer(), 95 | weights_regularizer=None, 96 | biases_initializer=None, 97 | biases_regularizer=None, 98 | # reuse=True, 99 | reuse = tf.AUTO_REUSE, 100 | variables_collections=None, 101 | outputs_collections=None, 102 | trainable=True, 103 | scope='fc1') 104 | 105 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 106 | FC2 = tf.contrib.layers.fully_connected( 107 | FC1, 108 | n_out, 109 | activation_fn=tf.tanh, 110 | normalizer_fn=None, 111 | normalizer_params=None, 112 | weights_initializer=tf.contrib.layers.xavier_initializer(), 113 | weights_regularizer=None, 114 | biases_initializer=None, 115 | biases_regularizer=None, 116 | # reuse=True, 117 | reuse = tf.AUTO_REUSE, 118 | variables_collections=None, 119 | outputs_collections=None, 120 | trainable=True, 121 | scope='fc2') 122 | 123 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 124 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 125 | 126 | # Retrieve the parameters from the dictionary "parameters": 127 | W1 = parameters['W1'] 128 | W2 = parameters['W2'] 129 | W3 = parameters['W3'] 130 | 131 | # CONV2D: filters W1, stride of 1, padding 'SAME' 132 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 133 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 134 | # RELU 135 | CONV1 = tf.nn.relu(Z1) 136 | 137 | # CONV2D: filters W2, stride 1, padding 'SAME' 138 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 139 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 140 | # RELU 141 | CONV2 = tf.nn.relu(Z2) 142 | 143 | # # DE-CONV2D: filters W3, stride 1, padding 'SAME' 144 | # # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 145 | # batch_size = tf.shape(x)[0] 146 | # deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 147 | # DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 148 | # strides=[1, 1, 1, 1], padding='SAME') 149 | 150 | # Use conv for the last layer, Chong Duan 151 | Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME') 152 | # RELU 153 | CONV3 = tf.nn.relu(Z2) 154 | 155 | DECONV = tf.squeeze(CONV3) 156 | 157 | return DECONV 158 | 159 | 160 | def model(X_dev): 161 | """ Runs the forward propagation to reconstruct images using trained model 162 | :param X_dev: input development frequency-space data 163 | :return: returns the image, reconstructed using a trained model 164 | """ 165 | with tf.device('/cpu:0'): 166 | ops.reset_default_graph() # to not overwrite tf variables 167 | (_, n_H0, n_W0, _) = X_dev.shape 168 | 169 | # Create Placeholders 170 | X, Y = create_placeholders(n_H0, n_W0) 171 | 172 | # Initialize parameters 173 | parameters = initialize_parameters() 174 | 175 | # Build the forward propagation in the tf graph 176 | forward_propagation(X, parameters) 177 | 178 | # Add ops to save and restore all the variables 179 | saver = tf.train.Saver() 180 | 181 | # Start the session to compute the tf graph 182 | with tf.Session() as sess: 183 | 184 | # Chong Duan 185 | ckpt = tf.train.get_checkpoint_state('./model/') 186 | saver.restore(sess, ckpt.model_checkpoint_path) 187 | 188 | print("Model restored") 189 | 190 | Y_recon_temp = forward_propagation(X, parameters) 191 | Y_recon = Y_recon_temp.eval({X: X_dev}) 192 | 193 | return parameters, Y_recon 194 | 195 | 196 | # Reconstruct the image using trained model 197 | _, Y_recon = model(X_dev) 198 | print('Y_recon.shape = ', Y_recon.shape) 199 | print('Y_dev.shape = ', Y_dev.shape) 200 | 201 | 202 | # Visualize the images, their reconstruction using iFFT and using trained model 203 | # 4 images to visualize: 204 | im1 = 4 205 | im2 = 15 206 | im3 = 26 207 | im4 = 37 208 | 209 | # iFFT back to image from corrupted frequency space 210 | # Complex image from real and imaginary part 211 | X_dev_compl = X_dev[:, :, :, 0] + X_dev[:, :, :, 1] * 1j 212 | 213 | #iFFT 214 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 215 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 216 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 217 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 218 | 219 | # Magnitude of complex image 220 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 221 | + np.power(X_iFFT0.imag, 2)) 222 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 223 | + np.power(X_iFFT1.imag, 2)) 224 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 225 | + np.power(X_iFFT2.imag, 2)) 226 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 227 | + np.power(X_iFFT3.imag, 2)) 228 | 229 | # SHOW 230 | # Show Y - input images 231 | plt.subplot(341), plt.imshow(Y_dev[im1, :, :], cmap='gray') 232 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 233 | plt.subplot(342), plt.imshow(Y_dev[im2, :, :], cmap='gray') 234 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 235 | plt.subplot(343), plt.imshow(Y_dev[im3, :, :], cmap='gray') 236 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 237 | plt.subplot(344), plt.imshow(Y_dev[im4, :, :], cmap='gray') 238 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 239 | 240 | # Show images reconstructed using iFFT 241 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 242 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 243 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 244 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 245 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 246 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 247 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 248 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 249 | 250 | # Show images reconstructed using model 251 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 252 | plt.title('Y_recon1'), plt.xticks([]), plt.yticks([]) 253 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 254 | plt.title('Y_recon2'), plt.xticks([]), plt.yticks([]) 255 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 256 | plt.title('Y_recon3'), plt.xticks([]), plt.yticks([]) 257 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 258 | plt.title('Y_recon4'), plt.xticks([]), plt.yticks([]) 259 | plt.subplots_adjust(hspace=0.3) 260 | plt.show() 261 | 262 | # Chong Duan - Display resutls 263 | # Show X - input k-space 264 | plt.subplot(341), plt.imshow(np.abs(X_dev_compl[im1, :, :]), cmap='gray') 265 | plt.title('Input-im1'), plt.xticks([]), plt.yticks([]) 266 | plt.subplot(342), plt.imshow(np.abs(X_dev_compl[im2, :, :]), cmap='gray') 267 | plt.title('Input-im2'), plt.xticks([]), plt.yticks([]) 268 | plt.subplot(343), plt.imshow(np.abs(X_dev_compl[im3, :, :]), cmap='gray') 269 | plt.title('Input-im3'), plt.xticks([]), plt.yticks([]) 270 | plt.subplot(344), plt.imshow(np.abs(X_dev_compl[im4, :, :]), cmap='gray') 271 | plt.title('Input-im4'), plt.xticks([]), plt.yticks([]) 272 | 273 | # Show images reconstructed using iFFT 274 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 275 | plt.title('iFFT_im1'), plt.xticks([]), plt.yticks([]) 276 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 277 | plt.title('iFFT_im2'), plt.xticks([]), plt.yticks([]) 278 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 279 | plt.title('iFFT_im3'), plt.xticks([]), plt.yticks([]) 280 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 281 | plt.title('iFFT_im4'), plt.xticks([]), plt.yticks([]) 282 | 283 | # Show images reconstructed using model 284 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 285 | plt.title('Output-im1'), plt.xticks([]), plt.yticks([]) 286 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 287 | plt.title('Output-im2'), plt.xticks([]), plt.yticks([]) 288 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 289 | plt.title('Output-im3'), plt.xticks([]), plt.yticks([]) 290 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 291 | plt.title('Output-im4'), plt.xticks([]), plt.yticks([]) 292 | plt.subplots_adjust(hspace=0.3) 293 | plt.show() -------------------------------------------------------------------------------- /generate_input.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import cv2 3 | import os 4 | 5 | # Chong Duan 6 | from scipy.io import loadmat 7 | from matplotlib import pyplot as plt 8 | 9 | 10 | def load_STONE_data(folder, n_cases, normalize=False, imrotate=False, motion=False): 11 | """ 12 | """ 13 | temp = loadmat(os.path.join(folder, 'Stone_all_crop_64'))['crop_data_resize'] 14 | row, col, t1w, sli, n = temp.shape 15 | temp = np.reshape(temp[:,:,:,:,n_cases[0]:n_cases[1]], (row, col, -1)) 16 | bigy = np.transpose(temp, (2,0,1)) 17 | 18 | 19 | # normalize 20 | if normalize: 21 | bigy = np.abs(bigy) 22 | temp_bigy = np.reshape(bigy, (55, n_cases[1] - n_cases[0], row, col)) 23 | for i in range(n_cases[1] - n_cases[0]): 24 | temp_bigy[:,i,:,:] = (temp_bigy[:,i,:,:] - np.min(temp_bigy[:,i,:,:])) / (np.max(temp_bigy[:,i,:,:]) -np.min(temp_bigy[:,i,:,:])) 25 | 26 | bigy = np.reshape(temp_bigy, (-1, row, col)) 27 | 28 | # convert to k-space 29 | imgs, row, col = bigy.shape 30 | if imrotate: 31 | bigx_rot_all = [] 32 | bigy_rot_all = [] 33 | # cv2 rotate does not work on complex data 34 | bigy = np.abs(bigy) 35 | for i in range(imgs): 36 | temp_image = np.squeeze(bigy[i,:,:]) 37 | bigy_rot_all.append(temp_image) 38 | bigx_rot_all.append(create_x(temp_image, motion)) 39 | for angle in [45, 90, 135, 180, 225, 270]: 40 | bigy_rot = im_rotate(temp_image, angle) 41 | bigy_rot_all.append(bigy_rot) 42 | bigx_rot_all.append(create_x(bigy_rot, motion)) 43 | 44 | 45 | bigx_rot_all = np.asarray(np.squeeze(bigx_rot_all)) 46 | bigy_rot_all = np.asarray(np.abs(bigy_rot_all)) 47 | 48 | # Pad bigy_rot_all if motion is included 49 | if motion: 50 | temp_bigy = bigy_rot_all.copy() 51 | bigy_rot_all = np.zeros((temp_bigy.shape[0], 80, 80)) 52 | bigy_rot_all[:,8:72, 8:72] = temp_bigy 53 | 54 | return bigx_rot_all, bigy_rot_all 55 | 56 | else: 57 | bigx = [] 58 | bigy = np.abs(bigy) 59 | for i in range(imgs): 60 | bigx.append(create_x(np.squeeze(bigy[i,:,:]), motion)) 61 | 62 | bigx = np.asarray(np.squeeze(bigx)) 63 | 64 | # Pad bigy if motion is included 65 | if motion: 66 | temp_bigy = bigy.copy() 67 | bigy = np.zeros((temp_bigy.shape[0], 80, 80)) 68 | bigy[:,8:72, 8:72] = temp_bigy 69 | 70 | return bigx, bigy 71 | 72 | 73 | def load_images_from_folder(folder, n_cases, normalize=False, imrotate=False): 74 | """ Loads n_im images from the folder and puts them in an array bigy of 75 | size (n_im, im_size1, im_size2), where (im_size1, im_size2) is an image 76 | size. 77 | Performs FFT of every input image and puts it in an array bigx of size 78 | (n_im, im_size1, im_size2, 2), where "2" represents real and imaginary 79 | dimensions 80 | :param folder: path to the folder, which contains images 81 | :param n_im: number of images to load from the folder 82 | :param normalize: if True - the xbig data will be normalized 83 | :param imrotate: if True - the each input image will be rotated by 90, 180, 84 | and 270 degrees 85 | :return: 86 | bigx: 4D array of frequency data of size (n_im, im_size1, im_size2, 2) 87 | bigy: 3D array of images of size (n_im, im_size1, im_size2) 88 | 89 | 90 | Modified by Chong Duan, 10/17/2018 91 | """ 92 | 93 | # # Initialize the arrays: 94 | # if imrotate: # number of images is 4 * n_im 95 | # bigy = np.empty((n_im * 4, 64, 64)) 96 | # bigx = np.empty((n_im * 4, 64, 64, 2)) 97 | # else: 98 | # bigy = np.empty((n_im, 64, 64)) 99 | # bigx = np.empty((n_im, 64, 64, 2)) 100 | 101 | # im = 0 # image counter 102 | bigy = [] 103 | filenames = os.listdir(folder) 104 | for filename in filenames[n_cases[0]:n_cases[1]]: 105 | if not filename.startswith('.'): 106 | temp = loadmat(os.path.join(folder, filename))['res'] 107 | 108 | # Clean the STONE sense recon data 109 | row, col, t1w, sli = temp.shape 110 | temp = np.reshape(temp, (row, col, -1)) 111 | valid_mask = (np.abs(np.squeeze(temp[int(row/2), int(col/2), :])) != 0) 112 | final_images = temp[:,:,valid_mask] 113 | 114 | # # Resize images 115 | final_images = np.abs(final_images) 116 | final_images_resized = np.zeros((64,64,final_images.shape[2])) 117 | for i in range(final_images.shape[2]): 118 | final_images_resized[:,:,i] = cv2.resize(final_images[:,:,i], (64,64)) 119 | 120 | # # Only take a small part of the data 121 | # final_images = final_images[140:180,140:180,:] 122 | 123 | # # Convert to abs values 124 | # final_images = np.abs(final_images) 125 | # 126 | # # Normalize based on single patient case 127 | # final_images = (final_images - np.mean(final_images)) / np.std(final_images) 128 | 129 | # bigy_temp = cv2.imread(os.path.join(folder, filename), 130 | # cv2.IMREAD_GRAYSCALE) 131 | 132 | 133 | bigy.append(final_images_resized) 134 | 135 | bigy = np.asarray(bigy) 136 | cases, row, col, imgs = bigy.shape 137 | bigy = np.transpose(np.reshape(np.transpose(bigy, (1,2,3,0)), (row, col, -1)), (2,0,1)) 138 | 139 | # convert to k-space 140 | imgs, row, col = bigy.shape 141 | bigx = np.empty((imgs, row, col, 2)) 142 | for i in range(imgs): 143 | bigx[i, :, :, :] = create_x(np.squeeze(bigy[i,:,:]), normalize=False) 144 | 145 | # convert bigx from complex to abs values 146 | bigy = np.abs(bigy) 147 | 148 | # im += 1 149 | # if imrotate: 150 | # for angle in [90, 180, 270]: 151 | # bigy_rot = im_rotate(bigy_temp, angle) 152 | # bigx_rot = create_x(bigy_rot, normalize) 153 | # bigy[im, :, :] = bigy_rot 154 | # bigx[im, :, :, :] = bigx_rot 155 | # im += 1 156 | 157 | # if imrotate: 158 | # if im > (n_im * 4 - 1): # how many images to load 159 | # break 160 | # else: 161 | # if im > (n_im - 1): # how many images to load 162 | # break 163 | 164 | # if normalize: 165 | # bigx = (bigx - np.amin(bigx)) / (np.amax(bigx) - np.amin(bigx)) 166 | 167 | return bigx, bigy 168 | 169 | 170 | def create_x_motion(y, normalize=False): 171 | """ 172 | Prepares frequency data from image data: first image y is padded by 8 173 | pixels of value zero from each side (y_pad_loc1), then second image is 174 | created by moving the input image (64x64) 8 pixels down -> two same images 175 | at different locations are created; then both images are transformed to 176 | frequency space and their frequency space is combined as if the image 177 | moved half-way through the acquisition (upper part of freq space from one 178 | image and lower part of freq space from another image) 179 | expands the dimensions from 3D to 4D, and normalizes if normalize=True 180 | :param y: input image 181 | :param normalize: if True - the frequency data will be normalized 182 | :return: "Motion corrupted" frequency-space data of the input image, 183 | 4D array of size (1, im_size1, im_size2, 2), third dimension (size: 2) 184 | contains real and imaginary part 185 | """ 186 | 187 | # Pad y and move 8 pixels 188 | y_pad_loc1 = np.zeros((80, 80)) 189 | y_pad_loc2 = np.zeros((80, 80)) 190 | y_pad_loc1[8:72, 8:72] = y 191 | y_pad_loc2[0:64, 8:72] = y 192 | 193 | # FFT of both images 194 | img_f1 = np.fft.fft2(y_pad_loc1) # FFT 195 | img_fshift1 = np.fft.fftshift(img_f1) # FFT shift 196 | img_f2 = np.fft.fft2(y_pad_loc2) # FFT 197 | img_fshift2 = np.fft.fftshift(img_f2) # FFT shift 198 | 199 | # Combine halfs of both k-space - as if subject moved 8 pixels in the 200 | # middle of acquisition 201 | x_compl = np.zeros((80, 80), dtype=np.complex_) 202 | x_compl[0:41, :] = img_fshift1[0:41, :] 203 | x_compl[41:81, :] = img_fshift2[41:81, :] 204 | 205 | # Finally, separate into real and imaginary channels 206 | x_real = x_compl.real 207 | x_imag = x_compl.imag 208 | x = np.dstack((x_real, x_imag)) 209 | 210 | x = np.expand_dims(x, axis=0) 211 | 212 | if normalize: 213 | x = x - np.mean(x) 214 | 215 | return x 216 | 217 | def create_x(y, motion=False): 218 | """ 219 | Prepares frequency data from image data: applies to_freq_space, 220 | expands the dimensions from 3D to 4D, and normalizes if normalize=True 221 | :param y: input image 222 | :param normalize: if True - the frequency data will be normalized 223 | :return: frequency data 4D array of size (1, im_size1, im_size2, 2) 224 | """ 225 | 226 | if motion: 227 | # Pad y and move 8 pixels 228 | y_pad_loc1 = np.zeros((80, 80)) 229 | y_pad_loc2 = np.zeros((80, 80)) 230 | y_pad_loc1[8:72, 8:72] = y 231 | y_pad_loc2[0:64, 8:72] = y 232 | 233 | # FFT of both images 234 | img_f1 = np.fft.fft2(y_pad_loc1) # FFT 235 | img_fshift1 = np.fft.fftshift(img_f1) # FFT shift 236 | img_f2 = np.fft.fft2(y_pad_loc2) # FFT 237 | img_fshift2 = np.fft.fftshift(img_f2) # FFT shift 238 | 239 | # Combine halfs of both k-space - as if subject moved 8 pixels in the 240 | # middle of acquisition 241 | x_compl = np.zeros((80, 80), dtype=np.complex_) 242 | x_compl[0:41, :] = img_fshift1[0:41, :] 243 | x_compl[41:81, :] = img_fshift2[41:81, :] 244 | 245 | # Finally, separate into real and imaginary channels 246 | x_real = x_compl.real 247 | x_imag = x_compl.imag 248 | x = np.dstack((x_real, x_imag)) 249 | 250 | x = np.expand_dims(x, axis=0) 251 | else: 252 | x = to_freq_space(y) 253 | x = np.expand_dims(x, axis=0) 254 | 255 | return x 256 | 257 | 258 | def to_freq_space(img): 259 | """ Performs FFT of an image 260 | :param img: input 2D image 261 | :return: Frequency-space data of the input image, third dimension (size: 2) 262 | contains real ans imaginary part 263 | """ 264 | 265 | img_f = np.fft.fft2(img) # FFT 266 | img_fshift = np.fft.fftshift(img_f) # FFT shift 267 | img_real = img_fshift.real # Real part: (im_size1, im_size2) 268 | img_imag = img_fshift.imag # Imaginary part: (im_size1, im_size2) 269 | img_real_imag = np.dstack((img_real, img_imag)) # (im_size1, im_size2, 2) 270 | 271 | return img_real_imag 272 | 273 | 274 | def im_rotate(img, angle): 275 | """ Rotates an image by angle degrees 276 | :param img: input image 277 | :param angle: angle by which the image is rotated, in degrees 278 | :return: rotated image 279 | """ 280 | rows, cols = img.shape 281 | rotM = cv2.getRotationMatrix2D((cols/2-0.5, rows/2-0.5), angle, 1) 282 | imrotated = cv2.warpAffine(img, rotM, (cols, rows)) 283 | 284 | return imrotated 285 | 286 | 287 | ''' 288 | # For debugging: show the images and their frequency space 289 | 290 | dir_temp = 'path to folder with images' 291 | X, Y = load_images_from_folder(dir_temp, 5, normalize=False, imrotate=True) 292 | 293 | print(Y.shape) 294 | print(X.shape) 295 | 296 | 297 | plt.subplot(221), plt.imshow(Y[12, :, :], cmap='gray') 298 | plt.xticks([]), plt.yticks([]) 299 | plt.subplot(222), plt.imshow(Y[13, :, :], cmap='gray') 300 | plt.xticks([]), plt.yticks([]) 301 | plt.subplot(223), plt.imshow(Y[14, :, :], cmap='gray') 302 | plt.xticks([]), plt.yticks([]) 303 | plt.subplot(224), plt.imshow(Y[15, :, :], cmap='gray') 304 | plt.xticks([]), plt.yticks([]) 305 | plt.show() 306 | 307 | X_m = 20*np.log(np.sqrt(np.power(X[:, :, :, 0], 2) + 308 | np.power(X[:, :, :, 1], 2))) # Magnitude 309 | plt.subplot(221), plt.imshow(X_m[12, :, :], cmap='gray') 310 | plt.xticks([]), plt.yticks([]) 311 | plt.subplot(222), plt.imshow(X_m[13, :, :], cmap='gray') 312 | plt.xticks([]), plt.yticks([]) 313 | plt.subplot(223), plt.imshow(X_m[14, :, :], cmap='gray') 314 | plt.xticks([]), plt.yticks([]) 315 | plt.subplot(224), plt.imshow(X_m[15, :, :], cmap='gray') 316 | plt.xticks([]), plt.yticks([]) 317 | plt.show() 318 | ''' 319 | -------------------------------------------------------------------------------- /Other files/myAutomap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | import math 5 | import time 6 | from generate_input import load_images_from_folder 7 | 8 | 9 | # Load training data: 10 | tic1 = time.time() 11 | dir_train = 'path to the folder with images for training' # Folder with images 12 | n_im = 10000 # How many images to load 13 | X_train, Y_train = load_images_from_folder( # Load images for training 14 | dir_train, 15 | n_im, 16 | normalize=False, 17 | imrotate=True) 18 | toc1 = time.time() 19 | print('Time to load data = ', (toc1 - tic1)) 20 | print('X_train.shape at input = ', X_train.shape) 21 | print('Y_train.shape at input = ', Y_train.shape) 22 | 23 | 24 | def create_placeholders(n_H0, n_W0): 25 | """ Creates placeholders for x and y for tf.session 26 | :param n_H0: image height 27 | :param n_W0: image width 28 | :return: x and y - tf placeholders 29 | """ 30 | 31 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 32 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 33 | 34 | return x, y 35 | 36 | 37 | def initialize_parameters(): 38 | """ Initializes filters for the convolutional and de-convolutional layers 39 | :return: parameters - a dictionary of filters (W1 - first convolutional 40 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 41 | """ 42 | 43 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 44 | initializer=tf.contrib.layers.xavier_initializer 45 | (seed=0)) 46 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 47 | initializer=tf.contrib.layers.xavier_initializer 48 | (seed=0)) 49 | W3 = tf.get_variable("W3", [7, 7, 1, 64], # 64 filters of size 7x7 50 | initializer=tf.contrib.layers.xavier_initializer 51 | (seed=0)) # conv2d_transpose 52 | 53 | parameters = {"W1": W1, 54 | "W2": W2, 55 | "W3": W3} 56 | 57 | return parameters 58 | 59 | 60 | def forward_propagation(x, parameters): 61 | """ Defines all layers for forward propagation: 62 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 63 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 64 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 65 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 66 | -> De-convolutional: size (n_im, n_H0, n_W0) 67 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 68 | :param parameters: parameters of the layers (e.g. filters) 69 | :return: output of the last layer of the neural network 70 | """ 71 | 72 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 73 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 74 | 75 | with tf.device('/gpu:0'): 76 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 77 | FC1 = tf.contrib.layers.fully_connected( 78 | x_temp, 79 | n_out, 80 | activation_fn=tf.tanh, 81 | normalizer_fn=None, 82 | normalizer_params=None, 83 | weights_initializer=tf.contrib.layers.xavier_initializer(), 84 | weights_regularizer=None, 85 | biases_initializer=None, 86 | biases_regularizer=None, 87 | reuse=tf.AUTO_REUSE, 88 | variables_collections=None, 89 | outputs_collections=None, 90 | trainable=True, 91 | scope='fc1') 92 | 93 | with tf.device('/cpu:0'): 94 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 95 | FC2 = tf.contrib.layers.fully_connected( 96 | FC1, 97 | n_out, 98 | activation_fn=tf.tanh, 99 | normalizer_fn=None, 100 | normalizer_params=None, 101 | weights_initializer=tf.contrib.layers.xavier_initializer(), 102 | weights_regularizer=None, 103 | biases_initializer=None, 104 | biases_regularizer=None, 105 | reuse=tf.AUTO_REUSE, 106 | variables_collections=None, 107 | outputs_collections=None, 108 | trainable=True, 109 | scope='fc2') 110 | 111 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 112 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 113 | 114 | # Retrieve the parameters from the dictionary "parameters": 115 | W1 = parameters['W1'] 116 | W2 = parameters['W2'] 117 | W3 = parameters['W3'] 118 | 119 | # CONV2D: filters W1, stride of 1, padding 'SAME' 120 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 121 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 122 | # RELU 123 | CONV1 = tf.nn.relu(Z1) 124 | 125 | # CONV2D: filters W2, stride 1, padding 'SAME' 126 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 127 | # Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 128 | # RELU 129 | # CONV2 = tf.nn.relu(Z2) 130 | CONV2 = tf.layers.conv2d( 131 | CONV1, 132 | filters=64, 133 | kernel_size=5, 134 | strides=(1, 1), 135 | padding='same', 136 | data_format='channels_last', 137 | dilation_rate=(1, 1), 138 | activation=tf.nn.relu, 139 | use_bias=True, 140 | kernel_initializer=None, 141 | bias_initializer=tf.zeros_initializer(), 142 | kernel_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 143 | bias_regularizer=None, 144 | activity_regularizer=None, 145 | kernel_constraint=None, 146 | bias_constraint=None, 147 | trainable=True, 148 | name='conv2', 149 | reuse=tf.AUTO_REUSE) 150 | 151 | # DE-CONV2D: filters W3, stride 1, padding 'SAME' 152 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 153 | batch_size = tf.shape(x)[0] 154 | deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 155 | DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 156 | strides=[1, 1, 1, 1], padding='SAME') 157 | DECONV = tf.squeeze(DECONV) 158 | 159 | return DECONV 160 | 161 | 162 | def compute_cost(DECONV, Y): 163 | """ 164 | Computes cost (squared loss) between the output of forward propagation and 165 | the label image 166 | :param DECONV: output of forward propagation 167 | :param Y: label image 168 | :return: cost (squared loss) 169 | """ 170 | 171 | cost = tf.square(DECONV - Y) 172 | 173 | return cost 174 | 175 | 176 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 177 | """ Shuffles training examples and partitions them into mini-batches 178 | to speed up the gradient descent 179 | :param x: input frequency space data 180 | :param y: input image space data 181 | :param mini_batch_size: mini-batch size 182 | :param seed: can be chosen to keep the random choice consistent 183 | :return: a mini-batch of size mini_batch_size of training examples 184 | """ 185 | 186 | m = x.shape[0] # number of input images 187 | mini_batches = [] 188 | np.random.seed(seed) 189 | 190 | # Shuffle (x, y) 191 | permutation = list(np.random.permutation(m)) 192 | shuffled_X = x[permutation, :] 193 | shuffled_Y = y[permutation, :] 194 | 195 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 196 | num_complete_minibatches = int(math.floor( 197 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 198 | 199 | for k in range(0, num_complete_minibatches): 200 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 201 | + mini_batch_size, :, :, :] 202 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 203 | + mini_batch_size, :, :] 204 | mini_batch = (mini_batch_X, mini_batch_Y) 205 | mini_batches.append(mini_batch) 206 | 207 | # Handling the end case (last mini-batch < mini_batch_size) 208 | if m % mini_batch_size != 0: 209 | mini_batch_X = shuffled_X[num_complete_minibatches 210 | * mini_batch_size: m, :, :, :] 211 | mini_batch_Y = shuffled_Y[num_complete_minibatches 212 | * mini_batch_size: m, :, :] 213 | mini_batch = (mini_batch_X, mini_batch_Y) 214 | mini_batches.append(mini_batch) 215 | 216 | return mini_batches 217 | 218 | 219 | def model(X_train, Y_train, learning_rate=0.0001, 220 | num_epochs=100, minibatch_size=64, print_cost=True): 221 | """ Runs the forward and backward propagation 222 | :param X_train: input training frequency-space data 223 | :param Y_train: input training image-space data 224 | :param learning_rate: learning rate of gradient descent 225 | :param num_epochs: number of epochs 226 | :param minibatch_size: size of mini-batch 227 | :param print_cost: if True - the cost will be printed every epoch, as well 228 | as how long it took to run the epoch 229 | :return: this function saves the model to a file. The model can then 230 | be used to reconstruct the image from frequency space 231 | """ 232 | 233 | with tf.device('/gpu:0'): 234 | ops.reset_default_graph() # to not overwrite tf variables 235 | seed = 3 236 | (m, n_H0, n_W0, _) = X_train.shape 237 | 238 | # Create Placeholders 239 | X, Y = create_placeholders(n_H0, n_W0) 240 | 241 | # Initialize parameters 242 | parameters = initialize_parameters() 243 | 244 | # Build the forward propagation in the tf graph 245 | DECONV = forward_propagation(X, parameters) 246 | 247 | # Add cost function to tf graph 248 | cost = compute_cost(DECONV, Y) 249 | 250 | # Backpropagation 251 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) 252 | 253 | # Initialize all the variables globally 254 | init = tf.global_variables_initializer() 255 | 256 | # Add ops to save and restore all the variables 257 | saver = tf.train.Saver() 258 | 259 | # For memory 260 | config = tf.ConfigProto() 261 | config.gpu_options.allow_growth = True 262 | 263 | # Memory config 264 | #config = tf.ConfigProto() 265 | #config.gpu_options.allow_growth = True 266 | config = tf.ConfigProto(log_device_placement=True) 267 | 268 | # Start the session to compute the tf graph 269 | with tf.Session(config=config) as sess: 270 | 271 | # Initialization 272 | sess.run(init) 273 | 274 | # Training loop 275 | for epoch in range(num_epochs): 276 | tic = time.time() 277 | 278 | minibatch_cost = 0. 279 | num_minibatches = int(m / minibatch_size) # number of minibatches 280 | seed += 1 281 | minibatches = random_mini_batches(X_train, Y_train, 282 | minibatch_size, seed) 283 | # Minibatch loop 284 | for minibatch in minibatches: 285 | # Select a minibatch 286 | (minibatch_X, minibatch_Y) = minibatch 287 | # Run the session to execute the optimizer and the cost 288 | _, temp_cost = sess.run( 289 | [optimizer, cost], 290 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 291 | 292 | cost_mean = np.mean(temp_cost) / num_minibatches 293 | minibatch_cost += cost_mean 294 | 295 | # Print the cost every epoch 296 | if print_cost: 297 | toc = time.time() 298 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 299 | 300 | # Save the variables to disk. 301 | save_path = saver.save(sess, "path to save model/model_name.ckpt") 302 | print("Model saved in file: %s" % save_path) 303 | 304 | sess.close() 305 | 306 | 307 | # Finally run the model! 308 | model(X_train, Y_train, 309 | learning_rate=0.00002, 310 | num_epochs=30, 311 | minibatch_size=64, # should be < than the number of input examples 312 | print_cost=True) 313 | 314 | 315 | -------------------------------------------------------------------------------- /Other files/myAutomap_cduan.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | import math 5 | import time 6 | import matplotlib.pyplot as plt 7 | from generate_input import load_images_from_folder 8 | 9 | # Load training data: 10 | tic1 = time.time() 11 | # Folder with images 12 | dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 13 | n_cases = (0,10) # load image data from 0 to 1 14 | X_train, Y_train = load_images_from_folder( # Load images for training 15 | dir_train, 16 | n_cases, 17 | normalize=False, 18 | imrotate=False) 19 | toc1 = time.time() 20 | print('Time to load data = ', (toc1 - tic1)) 21 | print('X_train.shape at input = ', X_train.shape) 22 | print('Y_train.shape at input = ', Y_train.shape) 23 | 24 | 25 | def create_placeholders(n_H0, n_W0): 26 | """ Creates placeholders for x and y for tf.session 27 | :param n_H0: image height 28 | :param n_W0: image width 29 | :return: x and y - tf placeholders 30 | """ 31 | 32 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 33 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 34 | 35 | return x, y 36 | 37 | def initialize_parameters(): 38 | """ Initializes filters for the convolutional and de-convolutional layers 39 | :return: parameters - a dictionary of filters (W1 - first convolutional 40 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 41 | """ 42 | 43 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 44 | initializer=tf.contrib.layers.xavier_initializer 45 | (seed=0)) 46 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 47 | initializer=tf.contrib.layers.xavier_initializer 48 | (seed=0)) 49 | W3 = tf.get_variable("W3", [7, 7, 1, 64], # 64 filters of size 7x7 50 | initializer=tf.contrib.layers.xavier_initializer 51 | (seed=0)) # conv2d_transpose 52 | 53 | parameters = {"W1": W1, 54 | "W2": W2, 55 | "W3": W3} 56 | 57 | return parameters 58 | 59 | 60 | def forward_propagation(x, parameters): 61 | """ Defines all layers for forward propagation: 62 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 63 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 64 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 65 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 66 | -> De-convolutional: size (n_im, n_H0, n_W0) 67 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 68 | :param parameters: parameters of the layers (e.g. filters) 69 | :return: output of the last layer of the neural network 70 | """ 71 | 72 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 73 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 74 | 75 | # with tf.device('/gpu:0'): 76 | with tf.device('/cpu:0'): 77 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 78 | FC1 = tf.contrib.layers.fully_connected( 79 | x_temp, 80 | n_out, 81 | activation_fn=tf.tanh, 82 | normalizer_fn=None, 83 | normalizer_params=None, 84 | weights_initializer=tf.contrib.layers.xavier_initializer(), 85 | weights_regularizer=None, 86 | biases_initializer=None, 87 | biases_regularizer=None, 88 | reuse=tf.AUTO_REUSE, 89 | variables_collections=None, 90 | outputs_collections=None, 91 | trainable=True, 92 | scope='fc1') 93 | 94 | with tf.device('/cpu:0'): 95 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 96 | FC2 = tf.contrib.layers.fully_connected( 97 | FC1, 98 | n_out, 99 | activation_fn=tf.tanh, 100 | normalizer_fn=None, 101 | normalizer_params=None, 102 | weights_initializer=tf.contrib.layers.xavier_initializer(), 103 | weights_regularizer=None, 104 | biases_initializer=None, 105 | biases_regularizer=None, 106 | reuse=tf.AUTO_REUSE, 107 | variables_collections=None, 108 | outputs_collections=None, 109 | trainable=True, 110 | scope='fc2') 111 | 112 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 113 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 114 | 115 | # Retrieve the parameters from the dictionary "parameters": 116 | W1 = parameters['W1'] 117 | W2 = parameters['W2'] 118 | W3 = parameters['W3'] 119 | 120 | # CONV2D: filters W1, stride of 1, padding 'SAME' 121 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 122 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 123 | # RELU 124 | CONV1 = tf.nn.relu(Z1) 125 | 126 | # CONV2D: filters W2, stride 1, padding 'SAME' 127 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 128 | # Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 129 | # # RELU 130 | # CONV2 = tf.nn.relu(Z2) 131 | CONV2 = tf.layers.conv2d( 132 | CONV1, 133 | filters=64, 134 | kernel_size=5, 135 | strides=(1, 1), 136 | padding='same', 137 | data_format='channels_last', 138 | dilation_rate=(1, 1), 139 | activation=tf.nn.relu, 140 | use_bias=True, 141 | kernel_initializer=None, 142 | bias_initializer=tf.zeros_initializer(), 143 | kernel_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 144 | bias_regularizer=None, 145 | activity_regularizer=None, 146 | kernel_constraint=None, 147 | bias_constraint=None, 148 | trainable=True, 149 | name='conv2', 150 | reuse=tf.AUTO_REUSE) 151 | 152 | # DE-CONV2D: filters W3, stride 1, padding 'SAME' 153 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 154 | batch_size = tf.shape(x)[0] 155 | deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 156 | DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 157 | strides=[1, 1, 1, 1], padding='SAME') 158 | DECONV = tf.squeeze(DECONV) 159 | 160 | return DECONV 161 | 162 | 163 | def compute_cost(DECONV, Y): 164 | """ 165 | Computes cost (squared loss) between the output of forward propagation and 166 | the label image 167 | :param DECONV: output of forward propagation 168 | :param Y: label image 169 | :return: cost (squared loss) 170 | """ 171 | 172 | cost = tf.square(DECONV - Y) 173 | 174 | return cost 175 | 176 | 177 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 178 | """ Shuffles training examples and partitions them into mini-batches 179 | to speed up the gradient descent 180 | :param x: input frequency space data 181 | :param y: input image space data 182 | :param mini_batch_size: mini-batch size 183 | :param seed: can be chosen to keep the random choice consistent 184 | :return: a mini-batch of size mini_batch_size of training examples 185 | """ 186 | 187 | m = x.shape[0] # number of input images 188 | mini_batches = [] 189 | np.random.seed(seed) 190 | 191 | # Shuffle (x, y) 192 | permutation = list(np.random.permutation(m)) 193 | shuffled_X = x[permutation, :] 194 | shuffled_Y = y[permutation, :] 195 | 196 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 197 | num_complete_minibatches = int(math.floor( 198 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 199 | 200 | for k in range(0, num_complete_minibatches): 201 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 202 | + mini_batch_size, :, :, :] 203 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 204 | + mini_batch_size, :, :] 205 | mini_batch = (mini_batch_X, mini_batch_Y) 206 | mini_batches.append(mini_batch) 207 | 208 | # Handling the end case (last mini-batch < mini_batch_size) 209 | if m % mini_batch_size != 0: 210 | mini_batch_X = shuffled_X[num_complete_minibatches 211 | * mini_batch_size: m, :, :, :] 212 | mini_batch_Y = shuffled_Y[num_complete_minibatches 213 | * mini_batch_size: m, :, :] 214 | mini_batch = (mini_batch_X, mini_batch_Y) 215 | mini_batches.append(mini_batch) 216 | 217 | return mini_batches 218 | 219 | 220 | def model(X_train, Y_train, learning_rate=0.0001, 221 | num_epochs=100, minibatch_size=5, print_cost=True): 222 | """ Runs the forward and backward propagation 223 | :param X_train: input training frequency-space data 224 | :param Y_train: input training image-space data 225 | :param learning_rate: learning rate of gradient descent 226 | :param num_epochs: number of epochs 227 | :param minibatch_size: size of mini-batch 228 | :param print_cost: if True - the cost will be printed every epoch, as well 229 | as how long it took to run the epoch 230 | :return: this function saves the model to a file. The model can then 231 | be used to reconstruct the image from frequency space 232 | """ 233 | 234 | with tf.device('/cpu:0'): 235 | ops.reset_default_graph() # to not overwrite tf variables 236 | seed = 3 237 | (m, n_H0, n_W0, _) = X_train.shape 238 | 239 | # Create Placeholders 240 | X, Y = create_placeholders(n_H0, n_W0) 241 | 242 | # Initialize parameters 243 | parameters = initialize_parameters() 244 | 245 | # Build the forward propagation in the tf graph 246 | DECONV = forward_propagation(X, parameters) 247 | 248 | # Add cost function to tf graph 249 | cost = compute_cost(DECONV, Y) 250 | 251 | # Backpropagation 252 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) 253 | 254 | # Initialize all the variables globally 255 | init = tf.global_variables_initializer() 256 | 257 | # Add ops to save and restore all the variables 258 | saver = tf.train.Saver(save_relative_paths=True) 259 | 260 | # For memory 261 | config = tf.ConfigProto() 262 | config.gpu_options.allow_growth = True 263 | 264 | # Memory config 265 | #config = tf.ConfigProto() 266 | #config.gpu_options.allow_growth = True 267 | config = tf.ConfigProto(log_device_placement=True) 268 | 269 | # Start the session to compute the tf graph 270 | with tf.Session(config=config) as sess: 271 | 272 | # Initialization 273 | sess.run(init) 274 | 275 | # Training loop 276 | for epoch in range(num_epochs): 277 | tic = time.time() 278 | 279 | minibatch_cost = 0. 280 | num_minibatches = int(m / minibatch_size) # number of minibatches 281 | seed += 1 282 | minibatches = random_mini_batches(X_train, Y_train, 283 | minibatch_size, seed) 284 | # Minibatch loop 285 | for minibatch in minibatches: 286 | # Select a minibatch 287 | (minibatch_X, minibatch_Y) = minibatch 288 | # Run the session to execute the optimizer and the cost 289 | _, temp_cost = sess.run( 290 | [optimizer, cost], 291 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 292 | 293 | cost_mean = np.mean(temp_cost) / num_minibatches 294 | minibatch_cost += cost_mean 295 | 296 | # Print the cost every epoch 297 | if print_cost: 298 | toc = time.time() 299 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 300 | 301 | # Save the variables to disk. 302 | save_path = saver.save(sess, './model/' + 'model.ckpt') 303 | print("Model saved in file: %s" % save_path) 304 | 305 | sess.close() 306 | 307 | 308 | # Finally run the model! 309 | model(X_train, Y_train, 310 | learning_rate=0.0001, 311 | num_epochs=200, 312 | minibatch_size=11, # should be < than the number of input examples 313 | print_cost=True) -------------------------------------------------------------------------------- /myAutomap_recon.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Nov 5 13:04:46 2018 5 | 6 | @author: cduan 7 | """ 8 | import numpy as np 9 | import tensorflow as tf 10 | from tensorflow.python.framework import ops 11 | from matplotlib import pyplot as plt 12 | from generate_input import load_images_from_folder,load_STONE_data 13 | #from generate_input_motion import load_images_from_folder 14 | 15 | ## Load development/test data: 16 | #dir_dev = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 17 | ##n_im_dev = 60 # How many images to load 18 | ## Load images and create motion-corrupted frequency space 19 | ## No normalization or rotations: 20 | #n_cases = (0,1) # load image data from 0 to 2 21 | #X_dev, Y_dev = load_images_from_folder( # Load images for training 22 | # dir_dev, 23 | # n_cases, 24 | # normalize=False, 25 | # imrotate=False) 26 | #print('X_dev.shape at input = ', X_dev.shape) 27 | #print('Y_dev.shape at input = ', Y_dev.shape) 28 | 29 | 30 | # Load training data, cropped and resized from MATLAB 31 | # Folder with images 32 | dir_train = "/home/cduan/Documents/Dataset" 33 | n_cases = (76,77) # load 3 cases 34 | X_dev, Y_dev = load_STONE_data( # Load images for training 35 | dir_train, 36 | n_cases, 37 | normalize=True, 38 | imrotate=False) 39 | print('X_dev.shape at input = ', X_dev.shape) 40 | print('Y_dev.shape at input = ', Y_dev.shape) 41 | 42 | 43 | ## Reduce precision point 44 | #X_dev = X_dev.astype(np.float32) 45 | #Y_dev = Y_dev.astype(np.float32) 46 | 47 | 48 | def create_placeholders(n_H0, n_W0): 49 | """ Creates placeholders for x and y for tf.session 50 | :param n_H0: image height 51 | :param n_W0: image width 52 | :return: x and y - tf placeholders 53 | """ 54 | 55 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 56 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 57 | 58 | return x, y 59 | 60 | 61 | def initialize_parameters(): 62 | """ Initializes filters for the convolutional and de-convolutional layers 63 | :return: parameters - a dictionary of filters (W1 - first convolutional 64 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 65 | """ 66 | 67 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 68 | initializer=tf.contrib.layers.xavier_initializer 69 | (seed=0)) 70 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 71 | initializer=tf.contrib.layers.xavier_initializer 72 | (seed=0)) 73 | W3 = tf.get_variable("W3", [7, 7, 64, 1], # 64 filters of size 7x7 74 | initializer=tf.contrib.layers.xavier_initializer 75 | (seed=0)) # set to std conv2d, Chong Duan 76 | 77 | parameters = {"W1": W1, 78 | "W2": W2, 79 | "W3": W3} 80 | 81 | return parameters 82 | 83 | 84 | def forward_propagation(x, parameters): 85 | """ Defines all layers for forward propagation: 86 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 87 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 88 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 89 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 90 | -> De-convolutional: size (n_im, n_H0, n_W0) 91 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 92 | :param parameters: parameters of the layers (e.g. filters) 93 | :return: output of the last layer of the neural network 94 | """ 95 | 96 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 97 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 98 | 99 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 100 | FC1 = tf.contrib.layers.fully_connected( 101 | x_temp, 102 | n_out, 103 | activation_fn=tf.tanh, 104 | normalizer_fn=None, 105 | normalizer_params=None, 106 | weights_initializer=tf.contrib.layers.xavier_initializer(), 107 | weights_regularizer=None, 108 | biases_initializer=None, 109 | biases_regularizer=None, 110 | # reuse=True, 111 | reuse = tf.AUTO_REUSE, 112 | variables_collections=None, 113 | outputs_collections=None, 114 | trainable=True, 115 | scope='fc1') 116 | 117 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 118 | FC2 = tf.contrib.layers.fully_connected( 119 | FC1, 120 | n_out, 121 | activation_fn=tf.tanh, 122 | normalizer_fn=None, 123 | normalizer_params=None, 124 | weights_initializer=tf.contrib.layers.xavier_initializer(), 125 | weights_regularizer=None, 126 | biases_initializer=None, 127 | biases_regularizer=None, 128 | # reuse=True, 129 | reuse = tf.AUTO_REUSE, 130 | variables_collections=None, 131 | outputs_collections=None, 132 | trainable=True, 133 | scope='fc2') 134 | 135 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 136 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 137 | 138 | # Retrieve the parameters from the dictionary "parameters": 139 | W1 = parameters['W1'] 140 | W2 = parameters['W2'] 141 | W3 = parameters['W3'] 142 | 143 | # CONV2D: filters W1, stride of 1, padding 'SAME' 144 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 145 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 146 | # RELU 147 | CONV1 = tf.nn.relu(Z1) 148 | 149 | # CONV2D: filters W2, stride 1, padding 'SAME' 150 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 151 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 152 | # RELU 153 | CONV2 = tf.nn.relu(Z2) 154 | 155 | # # DE-CONV2D: filters W3, stride 1, padding 'SAME' 156 | # # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 157 | # batch_size = tf.shape(x)[0] 158 | # deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 159 | # DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 160 | # strides=[1, 1, 1, 1], padding='SAME') 161 | 162 | # # Use conv for the last layer, Chong Duan 163 | # Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME') 164 | # # RELU 165 | # CONV3 = tf.nn.relu(Z2) 166 | 167 | 168 | # Apply L1-norm on last hidden layer to the activation as described in the paper 169 | CONV3 = tf.layers.conv2d( 170 | CONV2, 171 | filters=1, 172 | kernel_size=7, 173 | strides=(1, 1), 174 | padding='same', 175 | data_format='channels_last', 176 | dilation_rate=(1, 1), 177 | activation=tf.nn.relu, 178 | use_bias=True, 179 | kernel_initializer=None, 180 | bias_initializer=tf.zeros_initializer(), 181 | kernel_regularizer=None, 182 | bias_regularizer=None, 183 | # activity_regularizer = None, 184 | activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 185 | kernel_constraint=None, 186 | bias_constraint=None, 187 | trainable=True, 188 | name='conv3', 189 | reuse=tf.AUTO_REUSE) 190 | 191 | DECONV = tf.squeeze(CONV3) 192 | 193 | return DECONV 194 | 195 | 196 | def model(X_dev): 197 | """ Runs the forward propagation to reconstruct images using trained model 198 | :param X_dev: input development frequency-space data 199 | :return: returns the image, reconstructed using a trained model 200 | """ 201 | with tf.device('/cpu:0'): 202 | ops.reset_default_graph() # to not overwrite tf variables 203 | (_, n_H0, n_W0, _) = X_dev.shape 204 | 205 | # Create Placeholders 206 | X, Y = create_placeholders(n_H0, n_W0) 207 | 208 | # Initialize parameters 209 | parameters = initialize_parameters() 210 | 211 | # Build the forward propagation in the tf graph 212 | forward_propagation(X, parameters) 213 | 214 | # Add ops to save and restore all the variables 215 | saver = tf.train.Saver() 216 | 217 | # Start the session to compute the tf graph 218 | with tf.Session() as sess: 219 | 220 | # Chong Duan 221 | ckpt = tf.train.get_checkpoint_state('./checkpoints/') 222 | saver.restore(sess, ckpt.model_checkpoint_path) 223 | 224 | print("Model restored") 225 | 226 | Y_recon_temp = forward_propagation(X, parameters) 227 | Y_recon = Y_recon_temp.eval({X: X_dev}) 228 | 229 | return parameters, Y_recon 230 | 231 | 232 | # Reconstruct the image using trained model 233 | _, Y_recon = model(X_dev) 234 | print('Y_recon.shape = ', Y_recon.shape) 235 | print('Y_dev.shape = ', Y_dev.shape) 236 | 237 | 238 | # Visualize the images, their reconstruction using iFFT and using trained model 239 | # 4 images to visualize: 240 | im1 = 15 241 | im2 = 16 242 | im3 = 17 243 | im4 = 18 244 | 245 | # iFFT back to image from corrupted frequency space 246 | # Complex image from real and imaginary part 247 | X_dev_compl = X_dev[:, :, :, 0] + X_dev[:, :, :, 1] * 1j 248 | 249 | #iFFT 250 | X_iFFT0 = np.fft.ifft2(X_dev_compl[im1, :, :]) 251 | X_iFFT1 = np.fft.ifft2(X_dev_compl[im2, :, :]) 252 | X_iFFT2 = np.fft.ifft2(X_dev_compl[im3, :, :]) 253 | X_iFFT3 = np.fft.ifft2(X_dev_compl[im4, :, :]) 254 | 255 | # Magnitude of complex image 256 | X_iFFT_M1 = np.sqrt(np.power(X_iFFT0.real, 2) 257 | + np.power(X_iFFT0.imag, 2)) 258 | X_iFFT_M2 = np.sqrt(np.power(X_iFFT1.real, 2) 259 | + np.power(X_iFFT1.imag, 2)) 260 | X_iFFT_M3 = np.sqrt(np.power(X_iFFT2.real, 2) 261 | + np.power(X_iFFT2.imag, 2)) 262 | X_iFFT_M4 = np.sqrt(np.power(X_iFFT3.real, 2) 263 | + np.power(X_iFFT3.imag, 2)) 264 | 265 | # SHOW 266 | # Show Y - input images 267 | plt.subplot(341), plt.imshow(Y_dev[im1, :, :], cmap='gray') 268 | plt.title('Y_dev1'), plt.xticks([]), plt.yticks([]) 269 | plt.subplot(342), plt.imshow(Y_dev[im2, :, :], cmap='gray') 270 | plt.title('Y_dev2'), plt.xticks([]), plt.yticks([]) 271 | plt.subplot(343), plt.imshow(Y_dev[im3, :, :], cmap='gray') 272 | plt.title('Y_dev3'), plt.xticks([]), plt.yticks([]) 273 | plt.subplot(344), plt.imshow(Y_dev[im4, :, :], cmap='gray') 274 | plt.title('Y_dev4'), plt.xticks([]), plt.yticks([]) 275 | 276 | # Show images reconstructed using iFFT 277 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 278 | plt.title('X_iFFT1'), plt.xticks([]), plt.yticks([]) 279 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 280 | plt.title('X_iFFT2'), plt.xticks([]), plt.yticks([]) 281 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 282 | plt.title('X_iFFT3'), plt.xticks([]), plt.yticks([]) 283 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 284 | plt.title('X_iFFT4'), plt.xticks([]), plt.yticks([]) 285 | 286 | # Show images reconstructed using model 287 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 288 | plt.title('Y_recon1'), plt.xticks([]), plt.yticks([]) 289 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 290 | plt.title('Y_recon2'), plt.xticks([]), plt.yticks([]) 291 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 292 | plt.title('Y_recon3'), plt.xticks([]), plt.yticks([]) 293 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 294 | plt.title('Y_recon4'), plt.xticks([]), plt.yticks([]) 295 | plt.subplots_adjust(hspace=0.3) 296 | plt.show() 297 | 298 | # Chong Duan - Display resutls 299 | # Show X - input k-space 300 | plt.subplot(341), plt.imshow(np.abs(X_dev_compl[im1, :, :]), cmap='gray') 301 | plt.title('Input-im1'), plt.xticks([]), plt.yticks([]) 302 | plt.subplot(342), plt.imshow(np.abs(X_dev_compl[im2, :, :]), cmap='gray') 303 | plt.title('Input-im2'), plt.xticks([]), plt.yticks([]) 304 | plt.subplot(343), plt.imshow(np.abs(X_dev_compl[im3, :, :]), cmap='gray') 305 | plt.title('Input-im3'), plt.xticks([]), plt.yticks([]) 306 | plt.subplot(344), plt.imshow(np.abs(X_dev_compl[im4, :, :]), cmap='gray') 307 | plt.title('Input-im4'), plt.xticks([]), plt.yticks([]) 308 | 309 | # Show images reconstructed using iFFT 310 | plt.subplot(345), plt.imshow(X_iFFT_M1, cmap='gray') 311 | plt.title('iFFT_im1'), plt.xticks([]), plt.yticks([]) 312 | plt.subplot(346), plt.imshow(X_iFFT_M2, cmap='gray') 313 | plt.title('iFFT_im2'), plt.xticks([]), plt.yticks([]) 314 | plt.subplot(347), plt.imshow(X_iFFT_M3, cmap='gray') 315 | plt.title('iFFT_im3'), plt.xticks([]), plt.yticks([]) 316 | plt.subplot(348), plt.imshow(X_iFFT_M4, cmap='gray') 317 | plt.title('iFFT_im4'), plt.xticks([]), plt.yticks([]) 318 | 319 | # Show images reconstructed using model 320 | plt.subplot(349), plt.imshow(Y_recon[im1, :, :], cmap='gray') 321 | plt.title('Output-im1'), plt.xticks([]), plt.yticks([]) 322 | plt.subplot(3, 4, 10), plt.imshow(Y_recon[im2, :, :], cmap='gray') 323 | plt.title('Output-im2'), plt.xticks([]), plt.yticks([]) 324 | plt.subplot(3, 4, 11), plt.imshow(Y_recon[im3, :, :], cmap='gray') 325 | plt.title('Output-im3'), plt.xticks([]), plt.yticks([]) 326 | plt.subplot(3, 4, 12), plt.imshow(Y_recon[im4, :, :], cmap='gray') 327 | plt.title('Output-im4'), plt.xticks([]), plt.yticks([]) 328 | plt.subplots_adjust(hspace=0.3) 329 | plt.show() -------------------------------------------------------------------------------- /Other files/myAutomap_cduan_v2.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | import math 5 | import time 6 | import matplotlib.pyplot as plt 7 | from generate_input import load_images_from_folder, load_STONE_data 8 | 9 | ## Load training data: 10 | #tic1 = time.time() 11 | ## Folder with images 12 | #dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 13 | #n_cases = (0,1) # load image data from 0 to 1 14 | #X_train, Y_train = load_images_from_folder( # Load images for training 15 | # dir_train, 16 | # n_cases, 17 | # normalize=False, 18 | # imrotate=False) 19 | #toc1 = time.time() 20 | #print('Time to load data = ', (toc1 - tic1)) 21 | #print('X_train.shape at input = ', X_train.shape) 22 | #print('Y_train.shape at input = ', Y_train.shape) 23 | 24 | 25 | # Load training data, cropped and resized from MATLAB 26 | tic1 = time.time() 27 | # Folder with images 28 | dir_train = "/home/chongduan/Documents/11_AUTOMAP/Dataset" 29 | n_cases = 1 # load 3 cases 30 | X_train, Y_train = load_STONE_data( # Load images for training 31 | dir_train, 32 | n_cases, 33 | normalize=False, 34 | imrotate=False) 35 | toc1 = time.time() 36 | print('Time to load data = ', (toc1 - tic1)) 37 | print('X_train.shape at input = ', X_train.shape) 38 | print('Y_train.shape at input = ', Y_train.shape) 39 | 40 | 41 | ## Reduce precision point 42 | #X_train = X_train.astype(np.float32) 43 | #Y_train = Y_train.astype(np.float32) 44 | 45 | 46 | def create_placeholders(n_H0, n_W0): 47 | """ Creates placeholders for x and y for tf.session 48 | :param n_H0: image height 49 | :param n_W0: image width 50 | :return: x and y - tf placeholders 51 | """ 52 | 53 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 54 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 55 | 56 | return x, y 57 | 58 | def initialize_parameters(): 59 | """ Initializes filters for the convolutional and de-convolutional layers 60 | :return: parameters - a dictionary of filters (W1 - first convolutional 61 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 62 | """ 63 | 64 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 65 | initializer=tf.contrib.layers.xavier_initializer 66 | (seed=0)) 67 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 68 | initializer=tf.contrib.layers.xavier_initializer 69 | (seed=0)) 70 | W3 = tf.get_variable("W3", [5, 5, 64, 1], # 64 filters of size 7x7 71 | initializer=tf.contrib.layers.xavier_initializer 72 | (seed=0)) # set to std conv2d, Chong Duan 73 | 74 | parameters = {"W1": W1, 75 | "W2": W2, 76 | "W3": W3} 77 | 78 | return parameters 79 | 80 | 81 | def forward_propagation(x, parameters): 82 | """ Defines all layers for forward propagation: 83 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 84 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 85 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 86 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 87 | -> De-convolutional: size (n_im, n_H0, n_W0) 88 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 89 | :param parameters: parameters of the layers (e.g. filters) 90 | :return: output of the last layer of the neural network 91 | """ 92 | 93 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 94 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 95 | 96 | # with tf.device('/gpu:0'): 97 | with tf.device('/cpu:0'): 98 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 99 | FC1 = tf.contrib.layers.fully_connected( 100 | x_temp, 101 | n_out, 102 | activation_fn=tf.tanh, 103 | normalizer_fn=None, 104 | normalizer_params=None, 105 | weights_initializer=tf.contrib.layers.xavier_initializer(), 106 | weights_regularizer=None, 107 | biases_initializer=None, 108 | biases_regularizer=None, 109 | reuse=tf.AUTO_REUSE, 110 | variables_collections=None, 111 | outputs_collections=None, 112 | trainable=True, 113 | scope='fc1') 114 | 115 | with tf.device('/cpu:0'): 116 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 117 | FC2 = tf.contrib.layers.fully_connected( 118 | FC1, 119 | n_out, 120 | activation_fn=tf.tanh, 121 | normalizer_fn=None, 122 | normalizer_params=None, 123 | weights_initializer=tf.contrib.layers.xavier_initializer(), 124 | weights_regularizer=None, 125 | biases_initializer=None, 126 | biases_regularizer=None, 127 | reuse=tf.AUTO_REUSE, 128 | variables_collections=None, 129 | outputs_collections=None, 130 | trainable=True, 131 | scope='fc2') 132 | 133 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 134 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 135 | 136 | # Retrieve the parameters from the dictionary "parameters": 137 | W1 = parameters['W1'] 138 | W2 = parameters['W2'] 139 | W3 = parameters['W3'] 140 | 141 | # CONV2D: filters W1, stride of 1, padding 'SAME' 142 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 143 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 144 | # RELU 145 | CONV1 = tf.nn.relu(Z1) 146 | 147 | # CONV2D: filters W2, stride 1, padding 'SAME' 148 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 149 | # Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 150 | # # RELU 151 | # CONV2 = tf.nn.relu(Z2) 152 | CONV2 = tf.layers.conv2d( 153 | CONV1, 154 | filters=64, 155 | kernel_size=5, 156 | strides=(1, 1), 157 | padding='same', 158 | data_format='channels_last', 159 | dilation_rate=(1, 1), 160 | activation=tf.nn.relu, 161 | use_bias=True, 162 | kernel_initializer=None, 163 | bias_initializer=tf.zeros_initializer(), 164 | kernel_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 165 | bias_regularizer=None, 166 | activity_regularizer=None, 167 | kernel_constraint=None, 168 | bias_constraint=None, 169 | trainable=True, 170 | name='conv2', 171 | reuse=tf.AUTO_REUSE) 172 | 173 | # # DE-CONV2D: filters W3, stride 1, padding 'SAME' 174 | # # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 175 | # batch_size = tf.shape(x)[0] 176 | # deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 177 | # DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 178 | # strides=[1, 1, 1, 1], padding='SAME') 179 | 180 | 181 | # Use conv for the last layer, Chong Duan 182 | Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME') 183 | # RELU 184 | CONV3 = tf.nn.relu(Z2) 185 | 186 | DECONV = tf.squeeze(CONV3) 187 | 188 | return DECONV 189 | 190 | 191 | def compute_cost(DECONV, Y): 192 | """ 193 | Computes cost (squared loss) between the output of forward propagation and 194 | the label image 195 | :param DECONV: output of forward propagation 196 | :param Y: label image 197 | :return: cost (squared loss) 198 | """ 199 | 200 | cost = tf.square(DECONV - Y) 201 | 202 | return cost 203 | 204 | 205 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 206 | """ Shuffles training examples and partitions them into mini-batches 207 | to speed up the gradient descent 208 | :param x: input frequency space data 209 | :param y: input image space data 210 | :param mini_batch_size: mini-batch size 211 | :param seed: can be chosen to keep the random choice consistent 212 | :return: a mini-batch of size mini_batch_size of training examples 213 | """ 214 | 215 | m = x.shape[0] # number of input images 216 | mini_batches = [] 217 | np.random.seed(seed) 218 | 219 | # Shuffle (x, y) 220 | permutation = list(np.random.permutation(m)) 221 | shuffled_X = x[permutation, :] 222 | shuffled_Y = y[permutation, :] 223 | 224 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 225 | num_complete_minibatches = int(math.floor( 226 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 227 | 228 | for k in range(0, num_complete_minibatches): 229 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 230 | + mini_batch_size, :, :, :] 231 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 232 | + mini_batch_size, :, :] 233 | mini_batch = (mini_batch_X, mini_batch_Y) 234 | mini_batches.append(mini_batch) 235 | 236 | # Handling the end case (last mini-batch < mini_batch_size) 237 | if m % mini_batch_size != 0: 238 | mini_batch_X = shuffled_X[num_complete_minibatches 239 | * mini_batch_size: m, :, :, :] 240 | mini_batch_Y = shuffled_Y[num_complete_minibatches 241 | * mini_batch_size: m, :, :] 242 | mini_batch = (mini_batch_X, mini_batch_Y) 243 | mini_batches.append(mini_batch) 244 | 245 | return mini_batches 246 | 247 | 248 | def model(X_train, Y_train, learning_rate=0.0001, 249 | num_epochs=100, minibatch_size=5, print_cost=True): 250 | """ Runs the forward and backward propagation 251 | :param X_train: input training frequency-space data 252 | :param Y_train: input training image-space data 253 | :param learning_rate: learning rate of gradient descent 254 | :param num_epochs: number of epochs 255 | :param minibatch_size: size of mini-batch 256 | :param print_cost: if True - the cost will be printed every epoch, as well 257 | as how long it took to run the epoch 258 | :return: this function saves the model to a file. The model can then 259 | be used to reconstruct the image from frequency space 260 | """ 261 | 262 | with tf.device('/cpu:0'): 263 | ops.reset_default_graph() # to not overwrite tf variables 264 | seed = 3 265 | (m, n_H0, n_W0, _) = X_train.shape 266 | 267 | # Create Placeholders 268 | X, Y = create_placeholders(n_H0, n_W0) 269 | 270 | # Initialize parameters 271 | parameters = initialize_parameters() 272 | 273 | # Build the forward propagation in the tf graph 274 | DECONV = forward_propagation(X, parameters) 275 | 276 | # Add cost function to tf graph 277 | cost = compute_cost(DECONV, Y) 278 | 279 | # Backpropagation 280 | optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(cost) 281 | 282 | # Initialize all the variables globally 283 | init = tf.global_variables_initializer() 284 | 285 | # Add ops to save and restore all the variables 286 | saver = tf.train.Saver(save_relative_paths=True) 287 | 288 | # For memory 289 | config = tf.ConfigProto() 290 | config.gpu_options.allow_growth = True 291 | 292 | # Memory config 293 | #config = tf.ConfigProto() 294 | #config.gpu_options.allow_growth = True 295 | config = tf.ConfigProto(log_device_placement=True) 296 | 297 | # Start the session to compute the tf graph 298 | with tf.Session(config=config) as sess: 299 | 300 | # Initialization 301 | sess.run(init) 302 | 303 | # Training loop 304 | learning_curve = [] 305 | for epoch in range(num_epochs): 306 | tic = time.time() 307 | 308 | minibatch_cost = 0. 309 | num_minibatches = int(m / minibatch_size) # number of minibatches 310 | seed += 1 311 | minibatches = random_mini_batches(X_train, Y_train, 312 | minibatch_size, seed) 313 | # Minibatch loop 314 | for minibatch in minibatches: 315 | # Select a minibatch 316 | (minibatch_X, minibatch_Y) = minibatch 317 | # Run the session to execute the optimizer and the cost 318 | _, temp_cost = sess.run( 319 | [optimizer, cost], 320 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 321 | 322 | cost_mean = np.mean(temp_cost) / num_minibatches 323 | minibatch_cost += cost_mean 324 | 325 | # Print the cost every epoch 326 | learning_curve.append(minibatch_cost) 327 | if print_cost: 328 | toc = time.time() 329 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 330 | 331 | # Save the variables to disk. 332 | save_path = saver.save(sess, './model/' + 'model.ckpt') 333 | print("Model saved in file: %s" % save_path) 334 | 335 | # Plot learning curve 336 | plt.plot(learning_curve) 337 | plt.title('Learning Curve') 338 | plt.xlabel('Epoch') 339 | plt.ylabel('Cost') 340 | plt.show() 341 | 342 | # Close sess 343 | sess.close() 344 | 345 | # Finally run the model! 346 | model(X_train, Y_train, 347 | learning_rate=0.00002, 348 | num_epochs=500, 349 | minibatch_size=10, # should be < than the number of input examples 350 | print_cost=True) -------------------------------------------------------------------------------- /myAutomap_gpu.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Created on Mon Nov 5 13:04:46 2018 5 | 6 | @author: cduan 7 | """ 8 | import numpy as np 9 | import tensorflow as tf 10 | from tensorflow.python.framework import ops 11 | import math 12 | import time 13 | import matplotlib.pyplot as plt 14 | from generate_input import load_STONE_data 15 | 16 | 17 | # Load training data, cropped and resized from MATLAB 18 | tic1 = time.time() 19 | # Folder with images 20 | dir_train = "/home/chongduan/Documents/Automap-MRI/Datasett" 21 | n_cases = (0,3) # load 70 cases 22 | X_train, Y_train = load_STONE_data( # Load images for training 23 | dir_train, 24 | n_cases, 25 | normalize=True, 26 | imrotate=True, 27 | motion=True) 28 | toc1 = time.time() 29 | print('Time to load data = ', (toc1 - tic1)) 30 | print('X_train.shape at input = ', X_train.shape) 31 | print('Y_train.shape at input = ', Y_train.shape) 32 | 33 | 34 | def create_placeholders(n_H0, n_W0): 35 | """ Creates placeholders for x and y for tf.session 36 | :param n_H0: image height 37 | :param n_W0: image width 38 | :return: x and y - tf placeholders 39 | """ 40 | 41 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 42 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 43 | 44 | return x, y 45 | 46 | def initialize_parameters(): 47 | """ Initializes filters for the convolutional and de-convolutional layers 48 | :return: parameters - a dictionary of filters (W1 - first convolutional 49 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 50 | """ 51 | 52 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 53 | initializer=tf.contrib.layers.xavier_initializer 54 | (seed=0)) 55 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 56 | initializer=tf.contrib.layers.xavier_initializer 57 | (seed=0)) 58 | W3 = tf.get_variable("W3", [7, 7, 64, 1], # 64 filters of size 7x7 59 | initializer=tf.contrib.layers.xavier_initializer 60 | (seed=0)) # set to std conv2d, Chong Duan 61 | 62 | parameters = {"W1": W1, 63 | "W2": W2, 64 | "W3": W3} 65 | 66 | return parameters 67 | 68 | 69 | def forward_propagation(x, parameters): 70 | """ Defines all layers for forward propagation: 71 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 72 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 73 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 74 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 75 | -> De-convolutional: size (n_im, n_H0, n_W0) 76 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 77 | :param parameters: parameters of the layers (e.g. filters) 78 | :return: output of the last layer of the neural network 79 | """ 80 | 81 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 82 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 83 | 84 | with tf.device('/gpu:0'): 85 | # with tf.device('/cpu:0'): 86 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 87 | FC1 = tf.contrib.layers.fully_connected( 88 | x_temp, 89 | n_out, 90 | activation_fn=tf.tanh, 91 | normalizer_fn=None, 92 | normalizer_params=None, 93 | weights_initializer=tf.contrib.layers.xavier_initializer(), 94 | weights_regularizer=None, 95 | biases_initializer=None, 96 | biases_regularizer=None, 97 | reuse=tf.AUTO_REUSE, 98 | variables_collections=None, 99 | outputs_collections=None, 100 | trainable=True, 101 | scope='fc1') 102 | 103 | with tf.device('/gpu:0'): 104 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 105 | FC2 = tf.contrib.layers.fully_connected( 106 | FC1, 107 | n_out, 108 | activation_fn=tf.tanh, 109 | normalizer_fn=None, 110 | normalizer_params=None, 111 | weights_initializer=tf.contrib.layers.xavier_initializer(), 112 | weights_regularizer=None, 113 | biases_initializer=None, 114 | biases_regularizer=None, 115 | reuse=tf.AUTO_REUSE, 116 | variables_collections=None, 117 | outputs_collections=None, 118 | trainable=True, 119 | scope='fc2') 120 | 121 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 122 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 123 | 124 | # Retrieve the parameters from the dictionary "parameters": 125 | W1 = parameters['W1'] 126 | W2 = parameters['W2'] 127 | W3 = parameters['W3'] 128 | 129 | # CONV2D: filters W1, stride of 1, padding 'SAME' 130 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 131 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 132 | # RELU 133 | CONV1 = tf.nn.relu(Z1) 134 | 135 | # CONV2D: filters W2, stride 1, padding 'SAME' 136 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 137 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 138 | # RELU 139 | CONV2 = tf.nn.relu(Z2) 140 | 141 | # CONV2 = tf.layers.conv2d( 142 | # CONV1, 143 | # filters=64, 144 | # kernel_size=5, 145 | # strides=(1, 1), 146 | # padding='same', 147 | # data_format='channels_last', 148 | # dilation_rate=(1, 1), 149 | # activation=tf.nn.relu, 150 | # use_bias=True, 151 | # kernel_initializer=None, 152 | # bias_initializer=tf.zeros_initializer(), 153 | # kernel_regularizer=None, 154 | # bias_regularizer=None, 155 | # activity_regularizer=None, 156 | ## activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 157 | # kernel_constraint=None, 158 | # bias_constraint=None, 159 | # trainable=True, 160 | # name='conv2', 161 | # reuse=tf.AUTO_REUSE) 162 | 163 | # # DE-CONV2D: filters W3, stride 1, padding 'SAME' 164 | # # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 165 | # batch_size = tf.shape(x)[0] 166 | # deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 167 | # DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 168 | # strides=[1, 1, 1, 1], padding='SAME') 169 | 170 | 171 | # # Use conv for the last layer, Chong Duan 172 | # Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME') 173 | # # RELU 174 | # CONV3 = tf.nn.relu(Z2) 175 | 176 | # Apply L1-norm on last hidden layer to the activation as described in the paper 177 | CONV3 = tf.layers.conv2d( 178 | CONV2, 179 | filters=1, 180 | kernel_size=7, 181 | strides=(1, 1), 182 | padding='same', 183 | data_format='channels_last', 184 | dilation_rate=(1, 1), 185 | activation=tf.nn.relu, 186 | use_bias=True, 187 | kernel_initializer=None, 188 | bias_initializer=tf.zeros_initializer(), 189 | kernel_regularizer=None, 190 | bias_regularizer=None, 191 | # activity_regularizer = None, 192 | activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 193 | kernel_constraint=None, 194 | bias_constraint=None, 195 | trainable=True, 196 | name='conv3', 197 | reuse=tf.AUTO_REUSE) 198 | 199 | DECONV = tf.squeeze(CONV3) 200 | 201 | return DECONV 202 | 203 | 204 | def compute_cost(DECONV, Y): 205 | """ 206 | Computes cost (squared loss) between the output of forward propagation and 207 | the label image 208 | :param DECONV: output of forward propagation 209 | :param Y: label image 210 | :return: cost (squared loss) 211 | """ 212 | 213 | cost = tf.square(DECONV - Y) 214 | 215 | return cost 216 | 217 | 218 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 219 | """ Shuffles training examples and partitions them into mini-batches 220 | to speed up the gradient descent 221 | :param x: input frequency space data 222 | :param y: input image space data 223 | :param mini_batch_size: mini-batch size 224 | :param seed: can be chosen to keep the random choice consistent 225 | :return: a mini-batch of size mini_batch_size of training examples 226 | """ 227 | 228 | m = x.shape[0] # number of input images 229 | mini_batches = [] 230 | np.random.seed(seed) 231 | 232 | # Shuffle (x, y) 233 | permutation = list(np.random.permutation(m)) 234 | shuffled_X = x[permutation, :] 235 | shuffled_Y = y[permutation, :] 236 | 237 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 238 | num_complete_minibatches = int(math.floor( 239 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 240 | 241 | for k in range(0, num_complete_minibatches): 242 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 243 | + mini_batch_size, :, :, :] 244 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 245 | + mini_batch_size, :, :] 246 | mini_batch = (mini_batch_X, mini_batch_Y) 247 | mini_batches.append(mini_batch) 248 | 249 | # Handling the end case (last mini-batch < mini_batch_size) 250 | if m % mini_batch_size != 0: 251 | mini_batch_X = shuffled_X[num_complete_minibatches 252 | * mini_batch_size: m, :, :, :] 253 | mini_batch_Y = shuffled_Y[num_complete_minibatches 254 | * mini_batch_size: m, :, :] 255 | mini_batch = (mini_batch_X, mini_batch_Y) 256 | mini_batches.append(mini_batch) 257 | 258 | return mini_batches 259 | 260 | 261 | def model(X_train, Y_train, learning_rate=0.0001, 262 | num_epochs=100, minibatch_size=5, print_cost=True): 263 | """ Runs the forward and backward propagation 264 | :param X_train: input training frequency-space data 265 | :param Y_train: input training image-space data 266 | :param learning_rate: learning rate of gradient descent 267 | :param num_epochs: number of epochs 268 | :param minibatch_size: size of mini-batch 269 | :param print_cost: if True - the cost will be printed every epoch, as well 270 | as how long it took to run the epoch 271 | :return: this function saves the model to a file. The model can then 272 | be used to reconstruct the image from frequency space 273 | """ 274 | 275 | with tf.device('/gpu:0'): 276 | ops.reset_default_graph() # to not overwrite tf variables 277 | seed = 3 278 | (m, n_H0, n_W0, _) = X_train.shape 279 | 280 | # Create Placeholders 281 | X, Y = create_placeholders(n_H0, n_W0) 282 | 283 | # Initialize parameters 284 | parameters = initialize_parameters() 285 | 286 | # Build the forward propagation in the tf graph 287 | DECONV = forward_propagation(X, parameters) 288 | 289 | # Add cost function to tf graph 290 | cost = compute_cost(DECONV, Y) 291 | 292 | # # Backpropagation 293 | # optimizer = tf.train.RMSPropOptimizer(learning_rate, 294 | # decay=0.9, 295 | # momentum=0.0).minimize(cost) 296 | 297 | # Backpropagation 298 | # Add global_step variable for save training models - Chong Duan 299 | my_global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step') 300 | 301 | optimizer = tf.train.RMSPropOptimizer(learning_rate, 302 | decay=0.9, 303 | momentum=0.0).minimize(cost, global_step = my_global_step) 304 | 305 | # Initialize all the variables globally 306 | init = tf.global_variables_initializer() 307 | 308 | # Add ops to save and restore all the variables 309 | saver = tf.train.Saver(save_relative_paths=True) 310 | 311 | # For memory 312 | config = tf.ConfigProto() 313 | config.gpu_options.allow_growth = True 314 | 315 | # Memory config 316 | #config = tf.ConfigProto() 317 | #config.gpu_options.allow_growth = True 318 | config = tf.ConfigProto(log_device_placement=True) 319 | 320 | # Start the session to compute the tf graph 321 | with tf.Session(config=config) as sess: 322 | 323 | # Initialization 324 | sess.run(init) 325 | 326 | # Training loop 327 | learning_curve = [] 328 | for epoch in range(num_epochs): 329 | tic = time.time() 330 | 331 | minibatch_cost = 0. 332 | num_minibatches = int(m / minibatch_size) # number of minibatches 333 | seed += 1 334 | minibatches = random_mini_batches(X_train, Y_train, 335 | minibatch_size, seed) 336 | # Minibatch loop 337 | for minibatch in minibatches: 338 | # Select a minibatch 339 | (minibatch_X, minibatch_Y) = minibatch 340 | # Run the session to execute the optimizer and the cost 341 | _, temp_cost = sess.run( 342 | [optimizer, cost], 343 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 344 | 345 | cost_mean = np.mean(temp_cost) / num_minibatches 346 | minibatch_cost += cost_mean 347 | 348 | # Print the cost every epoch 349 | learning_curve.append(minibatch_cost) 350 | if print_cost: 351 | toc = time.time() 352 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 353 | 354 | if (epoch + 1) % 100 == 0: 355 | save_path = saver.save(sess, '../checkpoints/model.ckpt', global_step = my_global_step) 356 | print("Model saved in file: %s" % save_path) 357 | 358 | 359 | # # Save the variables to disk. 360 | # save_path = saver.save(sess, './model/' + 'model.ckpt') 361 | # print("Model saved in file: %s" % save_path) 362 | 363 | # Plot learning curve 364 | plt.plot(learning_curve) 365 | plt.title('Learning Curve') 366 | plt.xlabel('Epoch') 367 | plt.ylabel('Cost') 368 | plt.show() 369 | 370 | # Close sess 371 | sess.close() 372 | 373 | # Finally run the model! 374 | model(X_train, Y_train, 375 | # learning_rate=0.00002, 376 | learning_rate=0.0001, 377 | num_epochs=2000, 378 | minibatch_size=66, # should be < than the number of input examples 379 | print_cost=True) 380 | -------------------------------------------------------------------------------- /myAutomap_cpu.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from tensorflow.python.framework import ops 4 | import math 5 | import time 6 | import matplotlib.pyplot as plt 7 | from generate_input import load_images_from_folder, load_STONE_data 8 | 9 | ## Load training data: 10 | #tic1 = time.time() 11 | ## Folder with images 12 | #dir_train = "/home/chongduan/Documents/6_DeepLearning_CMR-FT_Strain/Deep-MRI-Reconstruction-master/load_raw_T1_Map_data/sense_recon" 13 | #n_cases = (0,1) # load image data from 0 to 1 14 | #X_train, Y_train = load_images_from_folder( # Load images for training 15 | # dir_train, 16 | # n_cases, 17 | # normalize=False, 18 | # imrotate=False) 19 | #toc1 = time.time() 20 | #print('Time to load data = ', (toc1 - tic1)) 21 | #print('X_train.shape at input = ', X_train.shape) 22 | #print('Y_train.shape at input = ', Y_train.shape) 23 | 24 | 25 | # Load training data, cropped and resized from MATLAB 26 | tic1 = time.time() 27 | # Folder with images 28 | dir_train = "/home/mnezafat/Documents/11_AUTOMAP/Dataset" 29 | n_cases = (0,70) # load 3 cases 30 | X_train, Y_train = load_STONE_data( # Load images for training 31 | dir_train, 32 | n_cases, 33 | normalize=False, 34 | imrotate=False) 35 | toc1 = time.time() 36 | print('Time to load data = ', (toc1 - tic1)) 37 | print('X_train.shape at input = ', X_train.shape) 38 | print('Y_train.shape at input = ', Y_train.shape) 39 | 40 | 41 | ## Reduce precision point 42 | #X_train = X_train.astype(np.float32) 43 | #Y_train = Y_train.astype(np.float32) 44 | 45 | 46 | def create_placeholders(n_H0, n_W0): 47 | """ Creates placeholders for x and y for tf.session 48 | :param n_H0: image height 49 | :param n_W0: image width 50 | :return: x and y - tf placeholders 51 | """ 52 | 53 | x = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0, 2], name='x') 54 | y = tf.placeholder(tf.float32, shape=[None, n_H0, n_W0], name='y') 55 | 56 | return x, y 57 | 58 | def initialize_parameters(): 59 | """ Initializes filters for the convolutional and de-convolutional layers 60 | :return: parameters - a dictionary of filters (W1 - first convolutional 61 | layer, W2 - second convolutional layer, W3 - de-convolutional layer 62 | """ 63 | 64 | W1 = tf.get_variable("W1", [5, 5, 1, 64], # 64 filters of size 5x5 65 | initializer=tf.contrib.layers.xavier_initializer 66 | (seed=0)) 67 | W2 = tf.get_variable("W2", [5, 5, 64, 64], # 64 filters of size 5x5 68 | initializer=tf.contrib.layers.xavier_initializer 69 | (seed=0)) 70 | W3 = tf.get_variable("W3", [7, 7, 64, 1], # 64 filters of size 7x7 71 | initializer=tf.contrib.layers.xavier_initializer 72 | (seed=0)) # set to std conv2d, Chong Duan 73 | 74 | parameters = {"W1": W1, 75 | "W2": W2, 76 | "W3": W3} 77 | 78 | return parameters 79 | 80 | 81 | def forward_propagation(x, parameters): 82 | """ Defines all layers for forward propagation: 83 | Fully connected (FC1) -> tanh activation: size (n_im, n_H0 * n_W0) 84 | -> Fully connected (FC2) -> tanh activation: size (n_im, n_H0 * n_W0) 85 | -> Convolutional -> ReLU activation: size (n_im, n_H0, n_W0, 64) 86 | -> Convolutional -> ReLU activation with l1 regularization: size (n_im, n_H0, n_W0, 64) 87 | -> De-convolutional: size (n_im, n_H0, n_W0) 88 | :param x: Input - images in frequency space, size (n_im, n_H0, n_W0, 2) 89 | :param parameters: parameters of the layers (e.g. filters) 90 | :return: output of the last layer of the neural network 91 | """ 92 | 93 | x_temp = tf.contrib.layers.flatten(x) # size (n_im, n_H0 * n_W0 * 2) 94 | n_out = np.int(x.shape[1] * x.shape[2]) # size (n_im, n_H0 * n_W0) 95 | 96 | # with tf.device('/gpu:0'): 97 | with tf.device('/cpu:0'): 98 | # FC: input size (n_im, n_H0 * n_W0 * 2), output size (n_im, n_H0 * n_W0) 99 | FC1 = tf.contrib.layers.fully_connected( 100 | x_temp, 101 | n_out, 102 | activation_fn=tf.tanh, 103 | normalizer_fn=None, 104 | normalizer_params=None, 105 | weights_initializer=tf.contrib.layers.xavier_initializer(), 106 | weights_regularizer=None, 107 | biases_initializer=None, 108 | biases_regularizer=None, 109 | reuse=tf.AUTO_REUSE, 110 | variables_collections=None, 111 | outputs_collections=None, 112 | trainable=True, 113 | scope='fc1') 114 | 115 | with tf.device('/cpu:0'): 116 | # FC: input size (n_im, n_H0 * n_W0), output size (n_im, n_H0 * n_W0) 117 | FC2 = tf.contrib.layers.fully_connected( 118 | FC1, 119 | n_out, 120 | activation_fn=tf.tanh, 121 | normalizer_fn=None, 122 | normalizer_params=None, 123 | weights_initializer=tf.contrib.layers.xavier_initializer(), 124 | weights_regularizer=None, 125 | biases_initializer=None, 126 | biases_regularizer=None, 127 | reuse=tf.AUTO_REUSE, 128 | variables_collections=None, 129 | outputs_collections=None, 130 | trainable=True, 131 | scope='fc2') 132 | 133 | # Reshape output from FC layers into array of size (n_im, n_H0, n_W0, 1): 134 | FC_M = tf.reshape(FC2, [tf.shape(x)[0], tf.shape(x)[1], tf.shape(x)[2], 1]) 135 | 136 | # Retrieve the parameters from the dictionary "parameters": 137 | W1 = parameters['W1'] 138 | W2 = parameters['W2'] 139 | W3 = parameters['W3'] 140 | 141 | # CONV2D: filters W1, stride of 1, padding 'SAME' 142 | # Input size (n_im, n_H0, n_W0, 1), output size (n_im, n_H0, n_W0, 64) 143 | Z1 = tf.nn.conv2d(FC_M, W1, strides=[1, 1, 1, 1], padding='SAME') 144 | # RELU 145 | CONV1 = tf.nn.relu(Z1) 146 | 147 | # CONV2D: filters W2, stride 1, padding 'SAME' 148 | # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 64) 149 | Z2 = tf.nn.conv2d(CONV1, W2, strides=[1, 1, 1, 1], padding='SAME') 150 | # RELU 151 | CONV2 = tf.nn.relu(Z2) 152 | 153 | # CONV2 = tf.layers.conv2d( 154 | # CONV1, 155 | # filters=64, 156 | # kernel_size=5, 157 | # strides=(1, 1), 158 | # padding='same', 159 | # data_format='channels_last', 160 | # dilation_rate=(1, 1), 161 | # activation=tf.nn.relu, 162 | # use_bias=True, 163 | # kernel_initializer=None, 164 | # bias_initializer=tf.zeros_initializer(), 165 | # kernel_regularizer=None, 166 | # bias_regularizer=None, 167 | # activity_regularizer=None, 168 | ## activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 169 | # kernel_constraint=None, 170 | # bias_constraint=None, 171 | # trainable=True, 172 | # name='conv2', 173 | # reuse=tf.AUTO_REUSE) 174 | 175 | # # DE-CONV2D: filters W3, stride 1, padding 'SAME' 176 | # # Input size (n_im, n_H0, n_W0, 64), output size (n_im, n_H0, n_W0, 1) 177 | # batch_size = tf.shape(x)[0] 178 | # deconv_shape = tf.stack([batch_size, x.shape[1], x.shape[2], 1]) 179 | # DECONV = tf.nn.conv2d_transpose(CONV2, W3, output_shape=deconv_shape, 180 | # strides=[1, 1, 1, 1], padding='SAME') 181 | 182 | 183 | # # Use conv for the last layer, Chong Duan 184 | # Z2 = tf.nn.conv2d(CONV2, W3, strides=[1, 1, 1, 1], padding='SAME') 185 | # # RELU 186 | # CONV3 = tf.nn.relu(Z2) 187 | 188 | # Apply L1-norm on last hidden layer to the activation as described in the paper 189 | CONV3 = tf.layers.conv2d( 190 | CONV2, 191 | filters=1, 192 | kernel_size=7, 193 | strides=(1, 1), 194 | padding='same', 195 | data_format='channels_last', 196 | dilation_rate=(1, 1), 197 | activation=tf.nn.relu, 198 | use_bias=True, 199 | kernel_initializer=None, 200 | bias_initializer=tf.zeros_initializer(), 201 | kernel_regularizer=None, 202 | bias_regularizer=None, 203 | activity_regularizer = None, 204 | # activity_regularizer=tf.contrib.layers.l1_regularizer(0.0001), 205 | kernel_constraint=None, 206 | bias_constraint=None, 207 | trainable=True, 208 | name='conv3', 209 | reuse=tf.AUTO_REUSE) 210 | 211 | DECONV = tf.squeeze(CONV3) 212 | 213 | return DECONV 214 | 215 | 216 | def compute_cost(DECONV, Y): 217 | """ 218 | Computes cost (squared loss) between the output of forward propagation and 219 | the label image 220 | :param DECONV: output of forward propagation 221 | :param Y: label image 222 | :return: cost (squared loss) 223 | """ 224 | 225 | cost = tf.square(DECONV - Y) 226 | 227 | return cost 228 | 229 | 230 | def random_mini_batches(x, y, mini_batch_size=64, seed=0): 231 | """ Shuffles training examples and partitions them into mini-batches 232 | to speed up the gradient descent 233 | :param x: input frequency space data 234 | :param y: input image space data 235 | :param mini_batch_size: mini-batch size 236 | :param seed: can be chosen to keep the random choice consistent 237 | :return: a mini-batch of size mini_batch_size of training examples 238 | """ 239 | 240 | m = x.shape[0] # number of input images 241 | mini_batches = [] 242 | np.random.seed(seed) 243 | 244 | # Shuffle (x, y) 245 | permutation = list(np.random.permutation(m)) 246 | shuffled_X = x[permutation, :] 247 | shuffled_Y = y[permutation, :] 248 | 249 | # Partition (shuffled_X, shuffled_Y). Minus the end case. 250 | num_complete_minibatches = int(math.floor( 251 | m / mini_batch_size)) # number of mini batches of size mini_batch_size 252 | 253 | for k in range(0, num_complete_minibatches): 254 | mini_batch_X = shuffled_X[k * mini_batch_size:k * mini_batch_size 255 | + mini_batch_size, :, :, :] 256 | mini_batch_Y = shuffled_Y[k * mini_batch_size:k * mini_batch_size 257 | + mini_batch_size, :, :] 258 | mini_batch = (mini_batch_X, mini_batch_Y) 259 | mini_batches.append(mini_batch) 260 | 261 | # Handling the end case (last mini-batch < mini_batch_size) 262 | if m % mini_batch_size != 0: 263 | mini_batch_X = shuffled_X[num_complete_minibatches 264 | * mini_batch_size: m, :, :, :] 265 | mini_batch_Y = shuffled_Y[num_complete_minibatches 266 | * mini_batch_size: m, :, :] 267 | mini_batch = (mini_batch_X, mini_batch_Y) 268 | mini_batches.append(mini_batch) 269 | 270 | return mini_batches 271 | 272 | 273 | def model(X_train, Y_train, learning_rate=0.0001, 274 | num_epochs=100, minibatch_size=5, print_cost=True): 275 | """ Runs the forward and backward propagation 276 | :param X_train: input training frequency-space data 277 | :param Y_train: input training image-space data 278 | :param learning_rate: learning rate of gradient descent 279 | :param num_epochs: number of epochs 280 | :param minibatch_size: size of mini-batch 281 | :param print_cost: if True - the cost will be printed every epoch, as well 282 | as how long it took to run the epoch 283 | :return: this function saves the model to a file. The model can then 284 | be used to reconstruct the image from frequency space 285 | """ 286 | 287 | with tf.device('/cpu:0'): 288 | ops.reset_default_graph() # to not overwrite tf variables 289 | seed = 3 290 | (m, n_H0, n_W0, _) = X_train.shape 291 | 292 | # Create Placeholders 293 | X, Y = create_placeholders(n_H0, n_W0) 294 | 295 | # Initialize parameters 296 | parameters = initialize_parameters() 297 | 298 | # Build the forward propagation in the tf graph 299 | DECONV = forward_propagation(X, parameters) 300 | 301 | # Add cost function to tf graph 302 | cost = compute_cost(DECONV, Y) 303 | 304 | # # Backpropagation 305 | # optimizer = tf.train.RMSPropOptimizer(learning_rate, 306 | # decay=0.9, 307 | # momentum=0.0).minimize(cost) 308 | 309 | # Backpropagation 310 | # Add global_step variable for save training models - Chong Duan 311 | my_global_step = tf.Variable(0, dtype=tf.int32, trainable=False, name='global_step') 312 | 313 | optimizer = tf.train.RMSPropOptimizer(learning_rate, 314 | decay=0.9, 315 | momentum=0.0).minimize(cost, global_step = my_global_step) 316 | 317 | # Initialize all the variables globally 318 | init = tf.global_variables_initializer() 319 | 320 | # Add ops to save and restore all the variables 321 | saver = tf.train.Saver(save_relative_paths=True) 322 | 323 | # For memory 324 | config = tf.ConfigProto() 325 | config.gpu_options.allow_growth = True 326 | 327 | # Memory config 328 | #config = tf.ConfigProto() 329 | #config.gpu_options.allow_growth = True 330 | config = tf.ConfigProto(log_device_placement=True) 331 | 332 | # Start the session to compute the tf graph 333 | with tf.Session(config=config) as sess: 334 | 335 | # Initialization 336 | sess.run(init) 337 | 338 | # Training loop 339 | learning_curve = [] 340 | for epoch in range(num_epochs): 341 | tic = time.time() 342 | 343 | minibatch_cost = 0. 344 | num_minibatches = int(m / minibatch_size) # number of minibatches 345 | seed += 1 346 | minibatches = random_mini_batches(X_train, Y_train, 347 | minibatch_size, seed) 348 | # Minibatch loop 349 | for minibatch in minibatches: 350 | # Select a minibatch 351 | (minibatch_X, minibatch_Y) = minibatch 352 | # Run the session to execute the optimizer and the cost 353 | _, temp_cost = sess.run( 354 | [optimizer, cost], 355 | feed_dict={X: minibatch_X, Y: minibatch_Y}) 356 | 357 | cost_mean = np.mean(temp_cost) / num_minibatches 358 | minibatch_cost += cost_mean 359 | 360 | # Print the cost every epoch 361 | learning_curve.append(minibatch_cost) 362 | if print_cost: 363 | toc = time.time() 364 | print ('EPOCH = ', epoch, 'COST = ', minibatch_cost, 'Elapsed time = ', (toc - tic)) 365 | 366 | if (epoch + 1) % 10 == 0: 367 | save_path = saver.save(sess, './checkpoints/model.ckpt', global_step = my_global_step) 368 | print("Model saved in file: %s" % save_path) 369 | 370 | 371 | # # Save the variables to disk. 372 | # save_path = saver.save(sess, './model/' + 'model.ckpt') 373 | # print("Model saved in file: %s" % save_path) 374 | 375 | # Plot learning curve 376 | plt.plot(learning_curve) 377 | plt.title('Learning Curve') 378 | plt.xlabel('Epoch') 379 | plt.ylabel('Cost') 380 | plt.show() 381 | 382 | # Close sess 383 | sess.close() 384 | 385 | # Finally run the model! 386 | model(X_train, Y_train, 387 | # learning_rate=0.00002, 388 | learning_rate=0.0001, 389 | num_epochs=500, 390 | minibatch_size=55, # should be < than the number of input examples 391 | print_cost=True) --------------------------------------------------------------------------------