├── README.md ├── compared_networks ├── DD_Net_tf2.py ├── fbpconv.py └── red_cnn.py ├── fan-beam ├── Readme ├── new2.py └── testnew2.py ├── fan2para ├── Readme ├── new1.py └── testnew1.py └── parabeam ├── Readme ├── make_ini.py ├── newmodel.py ├── testnew.py ├── train └── traindata └── utilize.py /README.md: -------------------------------------------------------------------------------- 1 | # CT-image-reconstruction 2 | The codes implement the CT reconstruction networks described in our peer-reviwed work 3 | "An end-to-end deep network for reconstructing CT images directly from sparse sinograms" submited to IEEE TCI. 4 | The code was written based on tensorflow2.0. 5 | 6 | The sparse matrix AT and inputted sinograms can be downloaded from the baiduyun link: https://pan.baidu.com/s/1Vey42hWPz-myxnHZOZFnVQ code: jaj2. 7 | 8 | -------------------------------------------------------------------------------- /compared_networks/DD_Net_tf2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import datetime 4 | import os 5 | import glob 6 | 7 | 8 | def BN(img): 9 | # batch_mean, batch_var = tf.nn.moments(img, [0, 1, 2], name='moments') 10 | # img = tf.nn.batch_normalization(img, batch_mean, batch_var, 0, 1, 1e-3) 11 | img=tf.keras.layers.BatchNormalization()(img) 12 | return img 13 | 14 | # def conv2d(x, W): 15 | # tf.keras.layers.Conv2D(64, 3, strides=[1, 1, 1, 1], padding='same')(output) 16 | # return tf.nn.conv2d(x, W, strides=[1, 1, 1, 1], padding='SAME') 17 | 18 | 19 | def max_pool_2x1(x): 20 | return tf.keras.layers.MaxPool2D([1, 2], strides=[1, 2], padding='same')(x) 21 | # return tf.nn.max_pool(x, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME') 22 | 23 | 24 | def max_pool_2x2(x): 25 | # return tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') 26 | return tf.keras.layers.MaxPool2D([2, 2], strides=[2, 2], padding='same')(x) 27 | 28 | 29 | def max_pool(x, n): 30 | return tf.keras.layers.MaxPool2D([n, n], strides=[1, 2], padding='same')(x) 31 | # return tf.nn.max_pool(x, ksize=[1, n, n, 1], strides=[1, n, n, 1], padding='VALID') 32 | 33 | 34 | def build_unpool(source, kernel_shape): 35 | # input_shape = source.get_shape().as_list() 36 | input_shape=tf.shape(source) 37 | # return tf.reshape(source,[input_shape[1] * kernel_shape[1], input_shape[2] * kernel_shape[2]]) 38 | return tf.image.resize(source, [input_shape[1] * kernel_shape[1], input_shape[2] * kernel_shape[2]]) 39 | 40 | 41 | 42 | def DenseNet(input, growth_rate=16, nb_filter=16, filter_wh=5): 43 | # shape = input.get_shape().as_list() 44 | shape=tf.shape(input) 45 | with tf.name_scope('layer1'): 46 | input = BN(input) 47 | input = tf.nn.relu(input) 48 | 49 | # w1_1 = weight_variable([1, 1, shape[3], nb_filter * 4]) 50 | # b1_1 = bias_variable([nb_filter * 4]) 51 | # c1_1 = tf.nn.conv2d(input, w1_1, strides=[1, 1, 1, 1], padding='SAME') + b1_1 52 | c1_1 = tf.keras.layers.Conv2D(nb_filter * 4, [1,1],1, padding='same')(input) 53 | ## 54 | 55 | c1_1 = BN(c1_1) 56 | c1_1 = tf.nn.relu(c1_1) 57 | 58 | # w1 = weight_variable([filter_wh, filter_wh, nb_filter * 4, nb_filter]) 59 | # b1 = bias_variable([nb_filter]) 60 | # c1 = tf.nn.conv2d(c1_1, w1, strides=1, padding='SAME') + b1 61 | c1 = tf.keras.layers.Conv2D(nb_filter,[filter_wh, filter_wh], 1, padding='same')(c1_1) 62 | 63 | h_concat1 = tf.concat([input, c1], 3) 64 | 65 | with tf.name_scope('layer2'): 66 | h_concat1 = BN(h_concat1) 67 | h_concat1 = tf.nn.relu(h_concat1) 68 | 69 | # w2_1 = weight_variable([1, 1, shape[3] + nb_filter, nb_filter * 4]) 70 | # b2_1 = bias_variable([nb_filter * 4]) 71 | # c2_1 = tf.nn.conv2d(h_concat1, w2_1, strides=[1, 1, 1, 1], padding='SAME') + b2_1 72 | c2_1 = tf.keras.layers.Conv2D(nb_filter * 4, [1, 1], 1, padding='same')(h_concat1) 73 | ## 74 | 75 | c2_1 = BN(c2_1) 76 | c2_1 = tf.nn.relu(c2_1) 77 | 78 | # w2 = weight_variable([filter_wh, filter_wh, nb_filter * 4, nb_filter]) 79 | # b2 = bias_variable([nb_filter]) 80 | # c2 = tf.nn.conv2d(c2_1, w2, strides=[1, 1, 1, 1], padding='SAME') + b2 81 | c2 = tf.keras.layers.Conv2D(nb_filter, [filter_wh, filter_wh], 1, padding='same')(c2_1) 82 | 83 | h_concat2 = tf.concat([input, c1, c2], 3) 84 | 85 | with tf.name_scope('layer3'): 86 | h_concat2 = BN(h_concat2) 87 | h_concat2 = tf.nn.relu(h_concat2) 88 | 89 | # w3_1 = weight_variable([1, 1, shape[3] + nb_filter + nb_filter, nb_filter * 4]) 90 | # b3_1 = bias_variable([nb_filter * 4]) 91 | # c3_1 = tf.nn.conv2d(h_concat2, w3_1, strides=[1, 1, 1, 1], padding='SAME') + b3_1 92 | c3_1 = tf.keras.layers.Conv2D(nb_filter * 4, [1, 1], 1, padding='same')(h_concat2) 93 | ## 94 | 95 | c3_1 = BN(c3_1) 96 | c3_1 = tf.nn.relu(c3_1) 97 | 98 | # w3 = weight_variable([filter_wh, filter_wh, nb_filter * 4, nb_filter]) 99 | # b3 = bias_variable([nb_filter]) 100 | # c3 = tf.nn.conv2d(c3_1, w3, strides=[1, 1, 1, 1], padding='SAME') + b3 101 | c3 = tf.keras.layers.Conv2D(nb_filter, [filter_wh, filter_wh], 1, padding='same')(c3_1) 102 | 103 | h_concat3 = tf.concat([input, c1, c2, c3], 3) 104 | 105 | with tf.name_scope('layer4'): 106 | h_concat3 = BN(h_concat3) 107 | h_concat3 = tf.nn.relu(h_concat3) 108 | 109 | # w4_1 = weight_variable([1, 1, shape[3] + nb_filter + nb_filter + nb_filter, nb_filter * 4]) 110 | # b4_1 = bias_variable([nb_filter * 4]) 111 | # c4_1 = tf.nn.conv2d(h_concat3, w4_1, strides=[1, 1, 1, 1], padding='SAME') + b4_1 112 | c4_1 = tf.keras.layers.Conv2D(nb_filter * 4, [1, 1], 1, padding='same')(h_concat3) 113 | ## 114 | 115 | c4_1 = BN(c4_1) 116 | c4_1 = tf.nn.relu(c4_1) 117 | 118 | # w4 = weight_variable([filter_wh, filter_wh, nb_filter * 4, nb_filter]) 119 | # b4 = bias_variable([nb_filter]) 120 | # c4 = tf.nn.conv2d(c4_1, w4, strides=[1, 1, 1, 1], padding='SAME') + b4 121 | c4 = tf.keras.layers.Conv2D(nb_filter, [filter_wh, filter_wh], 1, padding='same')(c4_1) 122 | 123 | return tf.concat([input, c1, c2, c3, c4], 3) 124 | 125 | def mix(input_image): 126 | nb_filter = 16 127 | # W_conv1 = weight_variable([7, 7, 1, nb_filter]) 128 | # b_conv1 = bias_variable([nb_filter]) 129 | # h_conv1 = (tf.nn.conv2d(input_image, W_conv1, strides=[1, 1, 1, 1], 130 | # padding='SAME') + b_conv1) # 256*256**(nb_filter) 131 | h_conv1 =tf.keras.layers.Conv2D(nb_filter, [7, 7], 1, padding='same')(input_image) 132 | 133 | h_pool1 = tf.nn.max_pool(h_conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], 134 | padding='SAME') # 128*128*(nb_filter) 135 | 136 | 137 | D1 = DenseNet(h_pool1, growth_rate=16, nb_filter=nb_filter, filter_wh=5) # 128*128*(nb_filter*4+nb_filter) 138 | 139 | D1 = BN(D1) 140 | D1 = tf.nn.relu(D1) 141 | # W_conv1_T = weight_variable([1, 1, nb_filter + nb_filter * 4, nb_filter]) 142 | # b_conv1_T = bias_variable([nb_filter]) 143 | # h_conv1_T = ( 144 | # tf.nn.conv2d(D1, W_conv1_T, strides=[1, 1, 1, 1], padding='SAME') + b_conv1_T) # 128*128*(nb_filter) 145 | h_conv1_T = tf.keras.layers.Conv2D(nb_filter, [1, 1], 1, padding='same')(D1) 146 | 147 | h_pool1_T = tf.nn.max_pool(h_conv1_T, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 148 | padding='SAME') # 64*64*(nb_filter) 149 | 150 | ## 151 | D2 = DenseNet(h_pool1_T, growth_rate=16, nb_filter=nb_filter, filter_wh=5) # 64*64*(4*nb_filter + nb_filter) 152 | D2 = BN(D2) 153 | D2 = tf.nn.relu(D2) 154 | 155 | # W_conv2_T = weight_variable([1, 1, nb_filter + nb_filter * 4, nb_filter]) 156 | # b_conv2_T = bias_variable([nb_filter]) 157 | # h_conv2_T = ( 158 | # tf.nn.conv2d(D2, W_conv2_T, strides=[1, 1, 1, 1], padding='SAME') + b_conv2_T) # 64*64*(nb_filter) 159 | 160 | h_conv2_T = tf.keras.layers.Conv2D(nb_filter, [1, 1], 1, padding='same')(D2) 161 | h_pool2_T = tf.nn.max_pool(h_conv2_T, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 162 | padding='SAME') # 32*32*(nb_filter) 163 | 164 | ## 165 | D3 = DenseNet(h_pool2_T, growth_rate=16, nb_filter=nb_filter, filter_wh=5) # 32*32*(4*nb_filter + nb_filter) 166 | D3 = BN(D3) 167 | D3 = tf.nn.relu(D3) 168 | # W_conv3_T = weight_variable([1, 1, nb_filter + nb_filter * 4, nb_filter]) 169 | # b_conv3_T = bias_variable([nb_filter]) 170 | # h_conv3_T = ( 171 | # tf.nn.conv2d(D3, W_conv3_T, strides=[1, 1, 1, 1], padding='SAME') + b_conv3_T) # 32*32*(nb_filter) 172 | 173 | h_conv3_T = tf.keras.layers.Conv2D(nb_filter, [1, 1], 1, padding='same')(D3) 174 | h_pool3_T = tf.nn.max_pool(h_conv3_T, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], 175 | padding='SAME') # 16*16*(nb_filter) 176 | 177 | ## 178 | D4 = DenseNet(h_pool3_T, growth_rate=16, nb_filter=nb_filter, filter_wh=5) # 16*16*(4*nb_filter + nb_filter) 179 | D4 = BN(D4) 180 | D4 = tf.nn.relu(D4) 181 | # W_conv4_T = weight_variable([1, 1, nb_filter + nb_filter * 4, nb_filter]) 182 | # b_conv4_T = bias_variable([nb_filter]) 183 | # h_conv4_T = ( 184 | # tf.nn.conv2d(D4, W_conv4_T, strides=[1, 1, 1, 1], padding='SAME') + b_conv4_T) # 16*16*(nb_filter) 185 | h_conv4_T = tf.keras.layers.Conv2D(nb_filter, [1, 1], 1, padding='same')(D4) 186 | 187 | ## 188 | 189 | # W_conv40 = weight_variable([5, 5, 2 * nb_filter, 2 * nb_filter]) 190 | # b_conv40 = bias_variable([2 * nb_filter]) 191 | # h_conv40 = tf.nn.relu( 192 | # tf.nn.conv2d_transpose(tf.concat([build_unpool(h_conv4_T, [1, 2, 2, 1]), h_conv3_T], 3), W_conv40, 193 | # [batch, 64, 64, 2 * nb_filter], strides=[1, 1, 1, 1], 194 | # padding='SAME') + b_conv40) # 32*32*40 195 | h_conv40 = tf.concat([build_unpool(h_conv4_T, [1, 2, 2, 1]), h_conv3_T], 3) 196 | h_conv40 = tf.keras.layers.Conv2DTranspose(2 * nb_filter,[5,5],strides=1,padding='SAME')(h_conv40) 197 | h_conv40 = tf.nn.relu(h_conv40) 198 | batch_mean, batch_var = tf.nn.moments(h_conv40, [0, 1, 2], name='moments') 199 | h_conv40 = tf.nn.batch_normalization(h_conv40, batch_mean, batch_var, 0, 1, 1e-3) # 32*32 200 | 201 | # W_conv40_T = weight_variable([1, 1, nb_filter, (2 * nb_filter)]) 202 | # b_conv40_T = bias_variable([nb_filter]) 203 | # h_conv40_T = tf.nn.relu( 204 | # tf.nn.conv2d_transpose(h_conv40, W_conv40_T, [batch, 64, 64, nb_filter], strides=[1, 1, 1, 1], 205 | # padding='SAME') + b_conv40_T) # 32*32*40 206 | h_conv40_T = tf.keras.layers.Conv2DTranspose(nb_filter, [1, 1], strides=1, padding='SAME')(h_conv40) 207 | h_conv40_T = tf.nn.relu(h_conv40_T) 208 | batch_mean, batch_var = tf.nn.moments(h_conv40_T, [0, 1, 2], name='moments') 209 | h_conv40_T = tf.nn.batch_normalization(h_conv40_T, batch_mean, batch_var, 0, 1, 1e-3) 210 | 211 | ## 212 | # W_conv5 = weight_variable([5, 5, 2 * nb_filter, 2 * nb_filter]) 213 | # b_conv5 = bias_variable([2 * nb_filter]) 214 | # h_conv5 = tf.nn.relu( 215 | # tf.nn.conv2d_transpose(tf.concat([build_unpool(h_conv40_T, [1, 2, 2, 1]), h_conv2_T], 3), W_conv5, 216 | # [batch, 128, 128, 2 * nb_filter], strides=[1, 1, 1, 1], 217 | # padding='SAME') + b_conv5) # 64*64*20 218 | h_conv5 = tf.concat([build_unpool(h_conv40_T, [1, 2, 2, 1]), h_conv2_T], 3) 219 | h_conv5 = tf.keras.layers.Conv2DTranspose(2 * nb_filter, [5, 5], strides=1, padding='SAME')(h_conv5) 220 | h_conv5 = tf.nn.relu(h_conv5) 221 | batch_mean, batch_var = tf.nn.moments(h_conv5, [0, 1, 2], name='moments') 222 | h_conv5 = tf.nn.batch_normalization(h_conv5, batch_mean, batch_var, 0, 1, 1e-3) 223 | 224 | # W_conv5_T = weight_variable([1, 1, nb_filter, 2 * nb_filter]) 225 | # b_conv5_T = bias_variable([nb_filter]) 226 | # h_conv5_T = tf.nn.relu( 227 | # tf.nn.conv2d_transpose(h_conv5, W_conv5_T, [batch, 128, 128, nb_filter], strides=[1, 1, 1, 1], 228 | # padding='SAME') + b_conv5_T) # 64*64*20 229 | h_conv5_T = tf.keras.layers.Conv2DTranspose(nb_filter, [1, 1], strides=1, padding='SAME')(h_conv5) 230 | h_conv5_T = tf.nn.relu(h_conv5_T) 231 | batch_mean, batch_var = tf.nn.moments(h_conv5_T, [0, 1, 2], name='moments') 232 | h_conv5_T = tf.nn.batch_normalization(h_conv5_T, batch_mean, batch_var, 0, 1, 1e-3) 233 | 234 | ## 235 | # W_conv6 = weight_variable([5, 5, 2 * nb_filter, 2 * nb_filter]) 236 | # b_conv6 = bias_variable([2 * nb_filter]) 237 | # h_conv6 = tf.nn.relu( 238 | # tf.nn.conv2d_transpose(tf.concat([build_unpool(h_conv5_T, [1, 2, 2, 1]), h_conv1_T], 3), W_conv6, 239 | # [batch, 256, 256, 2 * nb_filter], strides=[1, 1, 1, 1], 240 | # padding='SAME') + b_conv6) 241 | h_conv6 = tf.concat([build_unpool(h_conv5_T, [1, 2, 2, 1]), h_conv1_T], 3) 242 | h_conv6 = tf.keras.layers.Conv2DTranspose(2 * nb_filter, [5, 5], strides=1, padding='SAME')(h_conv6) 243 | h_conv6 = tf.nn.relu(h_conv6) 244 | batch_mean, batch_var = tf.nn.moments(h_conv6, [0, 1, 2], name='moments') 245 | h_conv6 = tf.nn.batch_normalization(h_conv6, batch_mean, batch_var, 0, 1, 1e-3) 246 | 247 | # W_conv6_T = weight_variable([1, 1, nb_filter, 2 * nb_filter]) 248 | # b_conv6_T = bias_variable([nb_filter]) 249 | # h_conv6_T = tf.nn.relu( 250 | # tf.nn.conv2d_transpose(h_conv6, W_conv6_T, [batch, 256, 256, nb_filter], strides=[1, 1, 1, 1], 251 | # padding='SAME') + b_conv6_T) # 64*64*20 252 | h_conv6_T = tf.keras.layers.Conv2DTranspose(nb_filter, [1, 1], strides=1, padding='SAME')(h_conv6) 253 | h_conv6_T = tf.nn.relu(h_conv6_T) 254 | batch_mean, batch_var = tf.nn.moments(h_conv6_T, [0, 1, 2], name='moments') 255 | h_conv6_T = tf.nn.batch_normalization(h_conv6_T, batch_mean, batch_var, 0, 1, 1e-3) 256 | 257 | # W_conv7 = weight_variable([5, 5, 2 * nb_filter, 2 * nb_filter]) 258 | # b_conv7 = bias_variable([2 * nb_filter]) 259 | # h_conv7 = tf.nn.relu( 260 | # tf.nn.conv2d_transpose(tf.concat([build_unpool(h_conv6_T, [1, 2, 2, 1]), h_conv1], 3), W_conv7, 261 | # [batch, 512, 512, 2 * nb_filter], strides=[1, 1, 1, 1], 262 | # padding='SAME') + b_conv7) 263 | h_conv7 = tf.concat([build_unpool(h_conv6_T, [1, 2, 2, 1]), h_conv1], 3) 264 | h_conv7 = tf.keras.layers.Conv2DTranspose(2 * nb_filter, [5, 5], strides=1, padding='SAME')(h_conv7) 265 | h_conv7 = tf.nn.relu(h_conv7) 266 | 267 | # W_conv8 = weight_variable([1, 1, 1, 2 * nb_filter]) 268 | # b_conv8 = bias_variable([1]) 269 | # h_conv8 = tf.nn.relu(tf.nn.conv2d_transpose(h_conv7, W_conv8, [batch, 512, 512, 1], strides=[1, 1, 1, 1], 270 | # padding='SAME') + b_conv8) 271 | h_conv8 = tf.keras.layers.Conv2DTranspose(1, [1, 1], strides=1, padding='SAME')(h_conv7) 272 | h_conv8 = tf.nn.relu(h_conv8) 273 | return h_conv8 274 | 275 | def make_model(batch,ux=None,uy=None): 276 | inputs = tf.keras.Input(shape=(ux,uy,1),batch_size=batch) 277 | outputs=mix(inputs) 278 | model=tf.keras.Model(inputs=inputs,outputs=outputs) 279 | return model 280 | 281 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 282 | u_img = np.load(udir + 'u_CT_img_no_scale.npy') 283 | print('shape of u_img:', u_img.shape) 284 | # f_img = np.load(udir + '/f,angle=' + str(angles) + '_255.0_0.002.npy') 285 | ini_u_img = np.load(udir + 'ini,angle=60_no_scale__0.5.npy') 286 | 287 | M = np.max(np.max(ini_u_img, 1), 1) 288 | M = np.reshape(M, [np.shape(M)[0], 1, 1, 1]) 289 | u_img = u_img / M * 255 290 | ini_u_img = ini_u_img / M * 255 291 | 292 | 293 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 294 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 295 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 296 | 297 | optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.001) 298 | Model = make_model(batch,ux=256,uy=256) 299 | if restore == 1: 300 | # call the build function in the layers since do not use tf.keras.Input 301 | ##maybe move the functions in build function to _ini_ need not do this 302 | _=Model(ini_u_img[0:1]) 303 | Model.load_weights(ckpt) 304 | print('load weights, done') 305 | tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 306 | 307 | u_img=tf.cast(u_img,tf.float32) 308 | ini_u_img = tf.cast(ini_u_img, tf.float32) 309 | N=tf.shape(u_img)[0] 310 | vx=ini_u_img[N-5:N] 311 | vy=u_img[N-5:N] 312 | # vx=tf.cast(vx,tf.float32) 313 | # vy = tf.cast(vy, tf.float32) 314 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5], ini_u_img[0:N-5])).batch(batch) 315 | for i in range(epoch): 316 | for iter, ufini in enumerate(train_data): 317 | u, ini_u = ufini 318 | Loss, m1, m2,m3 = train_step(ini_u, Model, u, loss, psnr, optimizer,vx,vy) 319 | print(iter, "/", i, ":", Loss.numpy(), 320 | "psnr1:", m1.numpy(), 321 | "psnr2:", m2.numpy(), 322 | 'psnr3:', m3.numpy() 323 | ) 324 | if i%10==0: 325 | Model.save_weights(ckpt) 326 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 327 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 328 | # validation_split=1/80) 329 | Model.save_weights(ckpt) 330 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 331 | 332 | 333 | @tf.function 334 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy): 335 | with tf.GradientTape() as tape: 336 | predictions = model(inputs, training=1) 337 | loss = Loss(labels, predictions) 338 | grads = tape.gradient(loss, model.trainable_variables) 339 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 340 | m1 = Metric(labels, inputs) 341 | m2 = Metric(labels, model(inputs, training=0)) 342 | m3 = Metric(vy, model(vx, training=0)) 343 | return loss, m1, m2, m3 344 | 345 | 346 | def loss(x, y): 347 | x1 = tf.cast(x, tf.float32) 348 | y1 = tf.cast(y, tf.float32) 349 | shape = tf.cast(tf.shape(x), tf.float32) 350 | return tf.reduce_sum(tf.math.square(x1 - y1)) / shape[0] / shape[1] / shape[2] / shape[3] 351 | 352 | 353 | def psnr(x, y,max_val=255): 354 | x = tf.cast(x, tf.float32) 355 | y = tf.cast(y, tf.float32) 356 | batch = tf.cast(tf.shape(x)[0], tf.float32) 357 | return tf.reduce_sum(tf.image.psnr(x, y, max_val=tf.reduce_max(x))) / batch 358 | 359 | 360 | 361 | if __name__ == '__main__': 362 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 363 | iternum = 20 364 | epoch = 200 365 | batch = 5 366 | angles = 180 367 | theta = np.linspace(0, 180, angles, endpoint=False) 368 | udir = "./train/" 369 | vdir = "validate" 370 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/weights/DD_NET') -------------------------------------------------------------------------------- /compared_networks/fbpconv.py: -------------------------------------------------------------------------------- 1 | 2 | import tensorflow as tf 3 | from collections import OrderedDict 4 | import numpy as np 5 | import datetime 6 | import os 7 | 8 | 9 | def crop_and_concat(x1,x2): 10 | x1_shape = tf.shape(x1) 11 | x2_shape = tf.shape(x2) 12 | # offsets for the top left corner of the crop 13 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, (x1_shape[2] - x2_shape[2]) // 2, 0] 14 | size = [x1_shape[0], x2_shape[1], x2_shape[2], x1_shape[3]] 15 | x1_crop = tf.slice(x1, offsets, size) 16 | # return tf.concat([x1_crop, x2], 3) 17 | return tf.keras.layers.Concatenate(3)([x1_crop, x2]) 18 | 19 | 20 | def create_conv_net(x, channels=1, n_class=1, layers=3, features_root=16, filter_size=3, pool_size=2, Ngpu=1, 21 | maxpool=True, summaries=True): 22 | """ 23 | Creates a new convolutional unet for the given parametrization. 24 | 25 | :param x: input tensor, shape [?,nx,ny,channels] 26 | :param keep_prob: dropout probability tensor 27 | :param channels: number of channels in the input image 28 | :param n_class: number of output labels 29 | :param layers: number of layers in the net 30 | :param features_root: number of features in the first layer 31 | :param filter_size: size of the convolution filter 32 | :param pool_size: size of the max pooling operation 33 | :param summaries: Flag if summaries should be created 34 | """ 35 | 36 | # Placeholder for the input image 37 | nx = tf.shape(x)[1] 38 | ny = tf.shape(x)[2] 39 | x_image = tf.reshape(x, [-1, nx, ny, channels]) 40 | in_node = x_image 41 | # batch_size = tf.shape(x_image)[0] 42 | 43 | # weights = [] 44 | # weights_d = [] 45 | # biases = [] 46 | # biases_d = [] 47 | dw_h_convs = OrderedDict() 48 | 49 | in_size = 1000 50 | size = in_size 51 | padding = 'same' 52 | if Ngpu == 1: 53 | gname = '0' 54 | else: 55 | gname = '1' 56 | # down layers 57 | with tf.device('/gpu:0'): 58 | for layer in range(0, layers): 59 | features = 2 ** layer * features_root 60 | filters = features 61 | if layer == 0: 62 | # w1 = weight_variable([filter_size, filter_size, channels, features], stddev) 63 | w1_kernel_size=[filter_size, filter_size] 64 | else: 65 | # w1 = weight_variable([filter_size, filter_size, features // 2, features], stddev) 66 | w1_kernel_size = [filter_size, filter_size] 67 | 68 | # w2 = weight_variable([filter_size, filter_size, features, features], stddev) 69 | w2_kernel_size=[filter_size, filter_size] 70 | # b1 = bias_variable([features]) 71 | # b2 = bias_variable([features]) 72 | 73 | # conv = conv2d(in_node, w1, keep_prob, padding) 74 | # in_node = tf.nn.relu(conv + b1) 75 | conv=tf.keras.layers.Conv2D(filters,w1_kernel_size,padding=padding)(in_node) 76 | in_node=tf.keras.layers.ReLU()(conv) 77 | 78 | # conv = conv2d(in_node, w2, keep_prob, padding) 79 | # in_node = tf.nn.relu(conv + b2) 80 | conv = tf.keras.layers.Conv2D(filters, w2_kernel_size, padding=padding)(in_node) 81 | in_node = tf.keras.layers.ReLU()(conv) 82 | 83 | dw_h_convs[layer] = in_node 84 | # dw_h_convs[layer] = tf.nn.relu(conv2 + b2) 85 | # convs.append((conv1, conv2)) 86 | 87 | size -= 4 88 | if layer < layers - 1: 89 | if maxpool: 90 | in_node = tf.keras.layers.MaxPool2D(pool_size)(dw_h_convs[layer]) 91 | else: 92 | in_node = tf.keras.layers.AveragePooling2D(pool_size)(dw_h_convs[layer]) 93 | 94 | # pools[layer] = max_pool(dw_h_convs[layer], pool_size) 95 | # in_node = pools[layer] 96 | size /= 2 97 | 98 | in_node = dw_h_convs[layers - 1] 99 | 100 | with tf.device('/gpu:0'): 101 | # up layers 102 | for layer in range(layers - 2, -1, -1): 103 | features = 2 ** (layer + 1) * features_root 104 | # stddev = np.sqrt(2 / (filter_size ** 2 * features)) 105 | 106 | # wd = weight_variable_devonc([pool_size, pool_size, features // 2, features], stddev) 107 | # bd = bias_variable([features // 2]) 108 | # in_node = tf.nn.relu(deconv2d(in_node, wd, pool_size, padding) + bd) 109 | in_node = tf.keras.layers.Conv2DTranspose(features, pool_size,strides=2, padding=padding)(in_node) 110 | in_node = tf.keras.layers.ReLU()(in_node) 111 | 112 | conv = crop_and_concat(dw_h_convs[layer], in_node) 113 | 114 | # w1 = weight_variable([filter_size, filter_size, features, features // 2], stddev) 115 | # w2 = weight_variable([filter_size, filter_size, features // 2, features // 2], stddev) 116 | # b1 = bias_variable([features // 2]) 117 | # b2 = bias_variable([features // 2]) 118 | filters=features // 2 119 | w1_kernel_size=[filter_size, filter_size] 120 | w2_kernel_size=[filter_size, filter_size] 121 | 122 | # conv = conv2d(conv, w1, keep_prob, padding) 123 | conv=tf.keras.layers.Conv2D(filters, w1_kernel_size, padding=padding)(conv) 124 | # conv = tf.nn.relu(conv + b1) 125 | conv=tf.keras.layers.ReLU()(conv) 126 | 127 | # conv = conv2d(conv, w2, keep_prob, padding) 128 | # in_node = tf.nn.relu(conv + b2) 129 | conv = tf.keras.layers.Conv2D(filters, w2_kernel_size, padding=padding)(conv) 130 | in_node =tf.keras.layers.ReLU()(conv) 131 | 132 | # weights.append((w1, w2)) 133 | # weights_d.append((wd)) 134 | # biases.append((b1, b2)) 135 | # biases_d.append((bd)) 136 | 137 | # convs.append((conv1, conv2)) 138 | 139 | size *= 2 140 | size -= 4 141 | 142 | # with tf.device('/gpu:1'): 143 | # Output Map 144 | # weight = weight_variable([1, 1, features_root, n_class], stddev) 145 | # bias = bias_variable([n_class]) 146 | # conv = conv2d(in_node, weight, tf.constant(1.0), padding) 147 | conv=tf.keras.layers.Conv2D(n_class, [1, 1], padding=padding)(in_node) 148 | # output_map = conv + bias + x_image # tf.nn.relu(conv + bias) 149 | output_map = conv + x_image 150 | return output_map 151 | 152 | 153 | def make_model(batch,ux=256,uy=256): 154 | inputs = tf.keras.Input(shape=(ux,uy,1),batch_size=batch) 155 | outputs=create_conv_net(inputs) 156 | model=tf.keras.Model(inputs=inputs,outputs=outputs) 157 | return model 158 | 159 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 160 | max_val = 255 161 | angles = np.shape(theta)[0] 162 | u_img = np.load(udir + 'u_CT_img_no_scale.npy') 163 | print('shape of u_img:', u_img.shape) 164 | # f_img = np.load(udir + '/f,angle=' + str(angles) + '_255.0_0.002.npy') 165 | ini_u_img = np.load(udir + 'ini,angle=60_no_scale__0.5.npy') 166 | 167 | M = np.max(np.max(ini_u_img, 1), 1) 168 | M = np.reshape(M, [np.shape(M)[0], 1, 1, 1]) 169 | u_img = u_img / M * 255 170 | ini_u_img = ini_u_img / M * 255 171 | 172 | 173 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 174 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 175 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 176 | 177 | optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.001) 178 | Model = make_model(batch) 179 | if restore == 1: 180 | # call the build function in the layers since do not use tf.keras.Input 181 | ##maybe move the functions in build function to _ini_ need not do this 182 | _=Model(ini_u_img[0:1]) 183 | Model.load_weights(ckpt) 184 | print('load weights, done') 185 | tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 186 | 187 | u_img = tf.cast(u_img, tf.float32) 188 | ini_u_img = tf.cast(ini_u_img, tf.float32) 189 | N=tf.shape(u_img)[0] 190 | vx=ini_u_img[N-5:N] 191 | vy=u_img[N-5:N] 192 | # vx=tf.cast(vx,tf.float32) 193 | # vy = tf.cast(vy, tf.float32) 194 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5], ini_u_img[0:N-5])).batch(batch) 195 | for i in range(epoch): 196 | for iter, ufini in enumerate(train_data): 197 | u, ini_u = ufini 198 | Loss, m1, m2,m3 = train_step(ini_u, Model, u, loss, psnr, optimizer,vx,vy) 199 | print(iter, "/", i, ":", Loss.numpy(), 200 | "psnr1:", m1.numpy(), 201 | "psnr2:", m2.numpy(), 202 | 'psnr3:', m3.numpy() 203 | ) 204 | if i%10==0: 205 | Model.save_weights(ckpt) 206 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 207 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 208 | # validation_split=1/80) 209 | Model.save_weights(ckpt) 210 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 211 | 212 | 213 | # @tf.function 214 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy): 215 | with tf.GradientTape() as tape: 216 | predictions = model(inputs, training=1) 217 | loss = Loss(labels, predictions) 218 | grads = tape.gradient(loss, model.trainable_variables) 219 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 220 | m1 = Metric(labels, inputs) 221 | m2 = Metric(labels, model(inputs, training=0)) 222 | m3 = Metric(vy, model(vx, training=0)) 223 | return loss, m1, m2, m3 224 | 225 | 226 | def loss(x, y): 227 | x1 = tf.cast(x, tf.float32) 228 | y1 = tf.cast(y, tf.float32) 229 | shape = tf.cast(tf.shape(x), tf.float32) 230 | return tf.reduce_sum(tf.math.square(x1 - y1)) / shape[0] / shape[1] / shape[2] / shape[3] 231 | 232 | 233 | def psnr(x, y,max_val=255): 234 | x = tf.cast(x, tf.float32) 235 | y = tf.cast(y, tf.float32) 236 | batch = tf.cast(tf.shape(x)[0], tf.float32) 237 | return tf.reduce_sum(tf.image.psnr(x, y, max_val=tf.reduce_max(x))) / batch 238 | 239 | 240 | if __name__ == '__main__': 241 | # tf.debugging.set_log_device_placement(True) 242 | os.environ["CUDA_VISIBLE_DEVICES"] = "1" 243 | iternum = 20 244 | epoch = 200 245 | batch = 5 246 | angles = 180 247 | theta = np.linspace(0, 180, angles, endpoint=False) 248 | # udir = "/home/wangwei/ct-compare/CPTAC-LUAD//npy/" 249 | udir = "./train/" 250 | vdir = "validate" 251 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/weights/fbpconv') -------------------------------------------------------------------------------- /compared_networks/red_cnn.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import cv2 as cv 3 | import numpy as np 4 | import os 5 | import datetime 6 | 7 | 8 | def redcnn(in_image, kernel_size=[5, 5], filter_size=96, conv_stride=1, initial_std=0.01): 9 | # conv layer1 10 | conv1 = tf.keras.layers.Conv2D(filter_size, kernel_size, conv_stride, padding='valid')(in_image) 11 | conv1=tf.keras.layers.ReLU()(conv1) 12 | # conv layer2 13 | conv2 = tf.keras.layers.Conv2D( filter_size, kernel_size, conv_stride, padding='valid')(conv1) 14 | conv2 = shortcut_deconv8 = tf.keras.layers.ReLU()(conv2) 15 | # conv layer3 16 | conv3 = tf.keras.layers.Conv2D(filter_size, kernel_size, conv_stride, padding='valid')(conv2) 17 | conv3 = tf.keras.layers.ReLU()(conv3) 18 | # conv layer4 19 | conv4 = tf.keras.layers.Conv2D(filter_size, kernel_size, conv_stride, padding='valid')(conv3) 20 | conv4 = shortcut_deconv6 = tf.keras.layers.ReLU()(conv4) 21 | # conv layer5 22 | conv5 = tf.keras.layers.Conv2D(filter_size, kernel_size, conv_stride, padding='valid')(conv4) 23 | conv5 = tf.keras.layers.ReLU()(conv5) 24 | 25 | """ 26 | decoder 27 | """ 28 | # deconv 6 + shortcut (residual style) 29 | deconv6 = tf.keras.layers.Conv2DTranspose(filter_size, kernel_size, conv_stride, padding='valid')(conv5) 30 | deconv6 += shortcut_deconv6 31 | deconv6 = tf.keras.layers.ReLU()(deconv6) 32 | # deconv 7 33 | deconv7 = tf.keras.layers.Conv2DTranspose(filter_size, kernel_size, conv_stride, padding='valid')(deconv6) 34 | deconv7 = tf.keras.layers.ReLU()(deconv7) 35 | # deconv 8 + shortcut 36 | deconv8 = tf.keras.layers.Conv2DTranspose(filter_size, kernel_size, conv_stride, padding='valid')(deconv7) 37 | deconv8 += shortcut_deconv8 38 | deconv8 = tf.keras.layers.ReLU()(deconv8) 39 | # deconv 9 40 | deconv9 = tf.keras.layers.Conv2DTranspose(filter_size, kernel_size, conv_stride, padding='valid')(deconv8) 41 | deconv9 = tf.keras.layers.ReLU()(deconv9) 42 | # deconv 10 + shortcut 43 | deconv10 = tf.keras.layers.Conv2DTranspose(1, kernel_size, conv_stride, padding='valid')(deconv9) 44 | deconv10 += in_image 45 | output = tf.keras.layers.ReLU()(deconv10) 46 | return output 47 | 48 | 49 | def make_model(batch): 50 | inputs = tf.keras.Input(shape=(None,None,1),batch_size=batch) 51 | outputs=redcnn(inputs) 52 | model=tf.keras.Model(inputs=inputs,outputs=outputs) 53 | return model 54 | 55 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 56 | max_val = 255 57 | angles = np.shape(theta)[0] 58 | u_img = np.load(udir + 'u_CT_img_no_scale.npy') 59 | print('shape of u_img:', u_img.shape) 60 | # f_img = np.load(udir + '/f,angle=' + str(angles) + '_255.0_0.002.npy') 61 | ini_u_img = np.load(udir + 'ini,angle=60_no_scale__0.5.npy') 62 | 63 | M = np.max(np.max(ini_u_img, 1), 1) 64 | M = np.reshape(M, [np.shape(M)[0], 1, 1, 1]) 65 | u_img = u_img / M * 255 66 | ini_u_img = ini_u_img / M * 255 67 | 68 | 69 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 70 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 71 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 72 | 73 | optimizer = tf.keras.optimizers.Adagrad(learning_rate=0.001) 74 | Model = make_model(batch) 75 | if restore == 1: 76 | # call the build function in the layers since do not use tf.keras.Input 77 | ##maybe move the functions in build function to _ini_ need not do this 78 | _=Model(ini_u_img[0:1]) 79 | Model.load_weights(ckpt) 80 | print('load weights, done') 81 | tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 82 | 83 | u_img = tf.cast(u_img, tf.float32) 84 | ini_u_img = tf.cast(ini_u_img, tf.float32) 85 | N=tf.shape(u_img)[0] 86 | vx=ini_u_img[N-5:N] 87 | vy=u_img[N-5:N] 88 | vx=tf.cast(vx,tf.float32) 89 | vy = tf.cast(vy, tf.float32) 90 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5], ini_u_img[0:N-5])).batch(batch) 91 | for i in range(epoch): 92 | for iter, ufini in enumerate(train_data): 93 | u, ini_u = ufini 94 | Loss, m1, m2,m3 = train_step(ini_u, Model, u, loss, psnr, optimizer,vx,vy) 95 | print(iter, "/", i, ":", Loss.numpy(), 96 | "psnr1:", m1.numpy(), 97 | "psnr2:", m2.numpy(), 98 | 'psnr3:', m3.numpy() 99 | ) 100 | if i%2==0: 101 | Model.save_weights(ckpt) 102 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 103 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 104 | # validation_split=1/80) 105 | Model.save_weights(ckpt) 106 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 107 | 108 | 109 | @tf.function 110 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy): 111 | with tf.GradientTape() as tape: 112 | predictions = model(inputs, training=1) 113 | loss = Loss(labels, predictions) 114 | grads = tape.gradient(loss, model.trainable_variables) 115 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 116 | m1 = Metric(labels, inputs) 117 | m2 = Metric(labels, model(inputs, training=0)) 118 | m3 = Metric(vy, model(vx, training=0)) 119 | return loss, m1, m2, m3 120 | 121 | 122 | def loss(x, y): 123 | x1 = tf.cast(x, tf.float32) 124 | y1 = tf.cast(y, tf.float32) 125 | shape = tf.cast(tf.shape(x), tf.float32) 126 | return tf.reduce_sum(tf.math.square(x1 - y1)) / shape[0] / shape[1] / shape[2] / shape[3] 127 | 128 | 129 | def psnr(x, y,max_val=255): 130 | x = tf.cast(x, tf.float32) 131 | y = tf.cast(y, tf.float32) 132 | batch = tf.cast(tf.shape(x)[0], tf.float32) 133 | return tf.reduce_sum(tf.image.psnr(x, y, max_val=tf.reduce_max(x))) / batch 134 | 135 | 136 | 137 | if __name__ == '__main__': 138 | os.environ["CUDA_VISIBLE_DEVICES"] = "2" 139 | iternum = 20 140 | epoch = 200 141 | batch = 5 142 | angles = 60 143 | theta = np.linspace(0, 180, angles, endpoint=False) 144 | # udir = "/home/wangwei/ct-compare/CPTAC-LUAD//npy/" 145 | udir = "./train/" 146 | vdir = "validate" 147 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/weights/red_cnn') -------------------------------------------------------------------------------- /fan-beam/Readme: -------------------------------------------------------------------------------- 1 | Run testnew2.py to test our model. 2 | -------------------------------------------------------------------------------- /fan-beam/new2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import cv2 as cv 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import os 6 | import datetime 7 | from skimage.transform import radon,iradon,rotate 8 | # from utilize import CT_uitil 9 | from scipy.fftpack import fft, ifft, fftfreq, fftshift 10 | import random 11 | 12 | class sinLayer(tf.keras.Model): 13 | def __init__(self, AT,alpha,w_c,s_shape = (360,1601),out_size=(512,512)): 14 | super(sinLayer, self).__init__() 15 | self.AT = AT 16 | self.w_c=w_c 17 | self.alpha=alpha 18 | self.h=alpha[1]-alpha[0] 19 | self.s_shape=s_shape 20 | self.out_size = out_size 21 | self.sinLayer=[] 22 | self.sinLayer_1 = [] 23 | self.ctLayer=[] 24 | self.ctLayer_1 = [] 25 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv1', activation=tf.nn.relu)) 26 | self.M1=3 27 | for layers in range(1,self.M1+1): 28 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv%d' % layers, use_bias=False)) 29 | self.sinLayer.append(tf.keras.layers.BatchNormalization()) 30 | self.sinLayer.append(tf.keras.layers.ReLU()) 31 | self.sinLayer.append(tf.keras.layers.Conv2D(1, 5, name='sinconv6',padding='same')) 32 | self.sinLayer_1.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sin1conv1', activation=tf.nn.relu)) 33 | ###CTLayer### 34 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv1', activation=tf.nn.relu)) 35 | self.M2=5 36 | for layers in range(1,self.M2+1): 37 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv%d' % layers, use_bias=False)) 38 | self.ctLayer.append(tf.keras.layers.BatchNormalization()) 39 | self.ctLayer.append(tf.keras.layers.ReLU()) 40 | self.ctLayer.append(tf.keras.layers.Conv2D(1, 5, name='ctconv6', padding='same')) 41 | self.ctLayer_1.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ct1conv1', activation=tf.nn.relu)) 42 | 43 | def decode(self, sin_fan): 44 | AT, alpha, h, w_c=self.AT,self.alpha,self.h,self.w_c 45 | cos_alpha = tf.math.cos(alpha) 46 | s_fan_shape = self.s_shape 47 | batch=tf.shape(sin_fan)[0] 48 | sin_fan1 = h * tf.expand_dims(sin_fan[:, :, :, 0] * cos_alpha, -1) 49 | sin_fan1 = tf.reshape(sin_fan1, [-1, s_fan_shape[1], 1]) 50 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(w_c, -1), stride=1, padding='SAME') 51 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 52 | filter_s_fan2 = tf.reshape(filter_s_fan, [batch, -1]) 53 | filter_s_fan2 = tf.transpose(filter_s_fan2) 54 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 55 | rf = tf.transpose(rf) 56 | rf = tf.reshape(rf, [batch, 512, 512, 1]) 57 | return rf 58 | 59 | def call(self, inputs): 60 | de_sin = self.sinLayer[0](inputs) 61 | pp = de_sin 62 | for i in range(1, self.M1 + 1): 63 | for j in range(0, 3): 64 | de_sin = self.sinLayer[3 * i + j - 2](de_sin) 65 | pp = de_sin + pp 66 | de_sin = self.sinLayer[3 * self.M1 + 1](pp/self.M1) + inputs 67 | 68 | fbp = self.decode(de_sin) 69 | 70 | outputs = self.ctLayer[0](fbp) 71 | qq = outputs 72 | for i in range(1, self.M2 + 1): 73 | for j in range(0, 3): 74 | outputs = self.ctLayer[3 * i + j - 2](outputs) 75 | qq = qq + outputs 76 | outputs = self.ctLayer[3 * self.M2 + 1](qq/self.M2) + fbp 77 | return [de_sin, outputs, fbp] 78 | 79 | def interp(f,xp,x): 80 | # f_img=f[:,:,::2,:] 81 | f_img=f 82 | shape=f.shape 83 | L=len(x) 84 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 85 | idL = np.where(x <= xp[0])[0] 86 | idR = np.where(x >= xp[-1])[0] 87 | xx = x[idL[-1] + 1:idR[0]] 88 | id = np.searchsorted(xp, xx) 89 | L = xx - xp[id - 1] 90 | R = xp[id] - xx 91 | w1 = R / (L + R) 92 | w2 = 1 - w1 93 | val1 = f_img[:, :, id - 1, :] 94 | val2 = f_img[:, :, id, :] 95 | val1 = val1.transpose([0, 1, 3, 2]) 96 | val2 = val2.transpose([0, 1, 3, 2]) 97 | temp = val1 * w1 + val2 * w2 98 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 99 | for i in idL: 100 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 101 | for j in idR: 102 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 103 | return f_interp 104 | 105 | @tf.function 106 | def decode(sin_fan,AT,alpha,h,w_c): 107 | cos_alpha=tf.math.cos(alpha) 108 | s_fan_shape = tf.shape(sin_fan) 109 | sin_fan1 = h * tf.expand_dims(sin_fan[:, :, :, 0] * cos_alpha, -1) 110 | sin_fan1 = tf.reshape(sin_fan1, [-1, s_fan_shape[2], 1]) 111 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(w_c, -1), stride=1, padding='SAME') 112 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 113 | filter_s_fan2 = tf.reshape(filter_s_fan, [s_fan_shape[0], -1]) 114 | filter_s_fan2 = tf.transpose(filter_s_fan2) 115 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 116 | rf = tf.transpose(rf) 117 | rf = tf.reshape(rf, [s_fan_shape[0], 512, 512, 1]) 118 | return rf 119 | 120 | 121 | def make_model_3(AT,alpha,w_c,s_shape=(720, 1601),out_size=(512,512)): 122 | CT=sinLayer(AT,alpha,w_c,s_shape,out_size) 123 | inputs = tf.keras.Input(shape=(s_shape[0],s_shape[1],1)) 124 | [de_sin, outputs, fbp]=CT(inputs) 125 | model = tf.keras.Model(inputs=inputs, outputs=[de_sin, outputs, fbp]) 126 | return model 127 | 128 | 129 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 130 | #####do not use tf.cast in train function since it will exhaust/use GPU memory 131 | s_shape = (360, 1601) 132 | out_size = (512, 512) 133 | 134 | data = np.load('train' + '_fan_data.npz') 135 | u_img=data['u'].astype('float32') 136 | f_img=data['sin_fan'].astype('float32') 137 | f_noisy_img=data['sin_fan_ini'].astype('float32') 138 | # u_ini=data['u_ini'].astype('float32') 139 | del data 140 | 141 | # M = np.max(np.max(u_ini, 1), 1) 142 | # M=np.reshape(M, [np.shape(M)[0],1,1,1]) 143 | # u_img=u_img/M*255 144 | # f_noisy_img=f_noisy_img/M*255 145 | # f_img=f_img/M*255 146 | # # u_ini=u_ini/M*255 147 | # del u_ini 148 | 149 | 150 | AT = np.load('AT_fan_512x512_theta=0_0.5_175.5_alpha=-40:0.05:40_beta=0:1:359_R=600.npz') 151 | val = AT['val'].astype('float32') 152 | index = AT['index'] 153 | shape = AT['shape'] 154 | w_c = AT['w_c'].astype('float32') 155 | AT = tf.sparse.SparseTensor(index, val, shape) 156 | AT = tf.sparse.reorder(AT) 157 | # AT = tf.cast(AT, tf.float32) 158 | del val 159 | del index 160 | max_alpha = 40 161 | k1 = 0.05 162 | k2=0.1 163 | n1=1601 164 | n2=801 165 | alpha = np.linspace(-max_alpha, max_alpha, n1) * np.pi / 180 166 | alpha = alpha.astype('float32') 167 | # alpha_1=np.linspace(-max_alpha, max_alpha, n2) * np.pi / 180 168 | # f_noisy_img = interp(f_noisy_img, alpha_1, alpha) 169 | # np.savez('./512x512/fan-beam/f_noisy_interp',f_noisy_img=f_noisy_img) 170 | # f_noisy_img =np.load('./512x512/fan-beam/f_noisy_interp.npz')['f_noisy_img'] 171 | print('shape of u_img:', u_img.shape) 172 | 173 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 174 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 175 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 176 | 177 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) 178 | Model = make_model_3(AT, alpha, w_c, s_shape, out_size) 179 | # Model=sinLayer(AT,alpha,w_c,s_shape,out_size) 180 | # tf.keras.backend.clear_session() 181 | # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 182 | 183 | 184 | N=u_img.shape[0] 185 | vx=f_noisy_img[N-5:N] 186 | vy=[f_img[N-5:N],u_img[N-5:N]] 187 | 188 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5],f_img[0:N-5], f_noisy_img[0:N-5])).shuffle(tf.cast(N-5,tf.int64)).batch(batch) 189 | if restore == 1: 190 | # call the build function in the layers since do not use tf.keras.Input 191 | ##maybe move the functions in build function to _ini_ need not do this 192 | _=Model(vx[0:1]) 193 | Model.load_weights(ckpt) 194 | print('load weights, done') 195 | for i in range(epoch): 196 | for iter, ufini in enumerate(train_data): 197 | u,f, f_noisy = ufini 198 | # Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss, psnr, optimizer, vx, vy,epochnum=i) 199 | Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss_1, psnr, optimizer, vx, vy, epochnum=i) 200 | print(iter, "/", i, ":", Loss.numpy(), 201 | "psnr_f_fnoisy:", m1.numpy(), 202 | "psnr1", [m2[0].numpy(), m2[1].numpy(), m2[2].numpy()], 203 | ###psnr of f and f_noisy, u and fbp, u and reconstructe,respectively 204 | 'psnr3:', [m3[0].numpy(), m3[1].numpy(), m3[2].numpy()] 205 | ) 206 | 207 | if i%2==0: 208 | Model.save_weights(ckpt) 209 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 210 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 211 | # validation_split=1/80) 212 | Model.save_weights(ckpt) 213 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 214 | 215 | 216 | @tf.function 217 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy,epochnum): 218 | # if epochnum<1000: 219 | # weights = 0.9999 220 | # else: 221 | # weights = 0.0001 222 | with tf.GradientTape() as tape: 223 | predictions = model(inputs, training=1) 224 | loss = Loss(labels, predictions) 225 | grads = tape.gradient(loss, model.trainable_variables) 226 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 227 | # m1 = Metric(labels, inputs) 228 | m1 = tf.reduce_sum(tf.image.psnr(labels[0], inputs, max_val=tf.reduce_max(labels[0]))) / tf.cast(tf.shape(inputs)[0],tf.float32) 229 | m2 = Metric(labels, model(inputs, training=0)) 230 | m3 = Metric(vy, model(vx, training=0)) 231 | return loss, m1, m2, m3 232 | 233 | 234 | def loss_1(x, y,weights=0.5): 235 | x0 = tf.cast(x[0], tf.float32) 236 | x1 = tf.cast(x[1], tf.float32) 237 | y0 = tf.cast(y[0], tf.float32) 238 | y1 = tf.cast(y[1], tf.float32) 239 | shape = tf.cast(tf.shape(x[0]), tf.float32) 240 | shape1 = tf.cast(tf.shape(x[1]), tf.float32) 241 | return weights*tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3]\ 242 | +(1-weights)*tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 243 | # return tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3] 244 | # return tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 245 | 246 | def psnr(x, y,max_val=255): 247 | x0 = tf.cast(x[0], tf.float32) 248 | x1 = tf.cast(x[1], tf.float32) 249 | y0 = tf.cast(y[0], tf.float32) 250 | y1 = tf.cast(y[1], tf.float32) 251 | y2 = tf.cast(y[2], tf.float32) 252 | batch = tf.cast(tf.shape(x[1])[0], tf.float32) 253 | psnr1=tf.reduce_sum(tf.image.psnr(x0, y0, max_val=tf.reduce_max(x0))) / batch######psnr of f and de_sin 254 | psnr2=tf.reduce_sum(tf.image.psnr(x1, y2, max_val=tf.reduce_max(x1))) / batch######psnr of u and fbp 255 | psnr3 = tf.reduce_sum(tf.image.psnr(x1, y1, max_val=tf.reduce_max(x1))) / batch#####psnr of u and reconstructed 256 | return [psnr1,psnr2,psnr3] 257 | 258 | 259 | if __name__ == '__main__': 260 | os.environ["CUDA_VISIBLE_DEVICES"] = "1" 261 | iternum = 20 262 | epoch = 100 263 | 264 | batch = 2 265 | angles = 60 266 | theta = np.linspace(0, 180, angles, endpoint=False) 267 | udir = "./train/" 268 | vdir = "validate" 269 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/fan-beam/weights/new1_model_lambda=0.5') -------------------------------------------------------------------------------- /fan-beam/testnew2.py: -------------------------------------------------------------------------------- 1 | import new2 as net 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import tensorflow as tf 5 | import os 6 | def interp(f,xp,x): 7 | # f_img=f[:,:,::2,:] 8 | f_img=f 9 | shape=f.shape 10 | L=len(x) 11 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 12 | idL = np.where(x <= xp[0])[0] 13 | idR = np.where(x >= xp[-1])[0] 14 | xx = x[idL[-1] + 1:idR[0]] 15 | id = np.searchsorted(xp, xx) 16 | L = xx - xp[id - 1] 17 | R = xp[id] - xx 18 | w1 = R / (L + R) 19 | w2 = 1 - w1 20 | val1 = f_img[:, :, id - 1, :] 21 | val2 = f_img[:, :, id, :] 22 | val1 = val1.transpose([0, 1, 3, 2]) 23 | val2 = val2.transpose([0, 1, 3, 2]) 24 | temp = val1 * w1 + val2 * w2 25 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 26 | for i in idL: 27 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 28 | for j in idR: 29 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 30 | return f_interp 31 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 32 | #AT = np.load('AT_fan_512x512_theta=0_0.5_175.5_alpha=-40:0.05:40_beta=0:1:359_R=600.npz') 33 | AT = np.load('AT_fan_512x512_theta=0_0.5_175.5_alpha=-40_0.05_40_beta=0_1_359_R=600.npz') 34 | val = AT['val'].astype('float32') 35 | index = AT['index'] 36 | shape = AT['shape'] 37 | w_c = AT['w_c'].astype('float32') 38 | AT = tf.sparse.SparseTensor(index, val, shape) 39 | AT = tf.sparse.reorder(AT)####### 40 | del val 41 | del index 42 | 43 | batch=5 44 | s_shape = (360, 1601) 45 | out_size = (512, 512) 46 | max_alpha = 40 47 | n1=1601 48 | alpha = np.linspace(-max_alpha, max_alpha, n1) * np.pi / 180 49 | alpha = alpha.astype('float32') 50 | Model=net.make_model_3(AT, alpha, w_c, s_shape, out_size) 51 | data=np.load('test'+'_fan_data.npz') 52 | f_noisy_img=data['sin_fan_ini'].astype('float32') 53 | ckpt='./fan-beam/weights'+'/new2_model_lambda=0.5' 54 | L=500 55 | f_noisy=f_noisy_img[0:L] 56 | def inimodel(f_noisy,Model,ckpt=ckpt): 57 | _ = Model(f_noisy[0:1]) 58 | Model.load_weights(ckpt) 59 | 60 | 61 | def evaluate(f_noisy, batch, Model): 62 | _ = inimodel(f_noisy, Model) 63 | prediction = np.zeros([L,512,512,1]) 64 | iter = list(range(0, L, batch)) 65 | for i in range(len(iter)): 66 | prediction[iter[i]:iter[i] + batch] = Model(f_noisy[iter[i]:iter[i] + batch])[1].numpy() 67 | print(i) 68 | return prediction 69 | 70 | prediction=evaluate(f_noisy,batch,Model) 71 | ii=np.random.randint(0,L) 72 | print('show figure:',ii) 73 | plt.imshow(f_noisy[ii,:,:,0],cmap='gray') 74 | plt.figure() 75 | plt.imshow(prediction[ii,:,:,0],cmap='gray') 76 | plt.show() 77 | 78 | # vy=data['u'].astype('float32') 79 | # vy=vy[0:L] 80 | # vy=tf.cast(vy,tf.float32) 81 | # pp=tf.image.psnr(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 82 | # qq=tf.image.ssim(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 83 | # print('average psnr:',tf.reduce_mean(pp).numpy()) 84 | # print('average ssim:',tf.reduce_mean(qq).numpy()) 85 | 86 | 87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /fan2para/Readme: -------------------------------------------------------------------------------- 1 | run testnew1.py to test the fan2para network 2 | -------------------------------------------------------------------------------- /fan2para/new1.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import cv2 as cv 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import os 6 | import datetime 7 | from skimage.transform import radon,iradon,rotate 8 | from scipy.fftpack import fft, ifft, fftfreq, fftshift 9 | class sinLayer(tf.keras.Model): 10 | def __init__(self, AT,s_shape = (725, 360),out_size=(512,512)): 11 | super(sinLayer, self).__init__() 12 | self.AT = AT 13 | self.s_shape=s_shape 14 | self.out_size = out_size 15 | w_b=w_bfunction(np.pi,np.linspace(-np.floor(s_shape[0]/2),s_shape[0]-np.floor(s_shape[0]/2)-1,s_shape[0])) 16 | self.w_b =w_b.astype('float32') 17 | self.sinLayer=[] 18 | self.sinLayer_1 = [] 19 | 20 | self.ctLayer=[] 21 | self.ctLayer_1 = [] 22 | ###sinLayer### 23 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv1', activation=tf.nn.relu)) 24 | self.M1=4 25 | for layers in range(1,self.M1+1): 26 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv%d' % layers, use_bias=False)) 27 | self.sinLayer.append(tf.keras.layers.BatchNormalization()) 28 | self.sinLayer.append(tf.keras.layers.ReLU()) 29 | self.sinLayer.append(tf.keras.layers.Conv2D(1, 5, name='sinconv6',padding='same')) 30 | ###CTLayer### 31 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv1', activation=tf.nn.relu)) 32 | self.M2=5 33 | for layers in range(1,self.M2+1): 34 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv%d' % layers, use_bias=False)) 35 | self.ctLayer.append(tf.keras.layers.BatchNormalization()) 36 | self.ctLayer.append(tf.keras.layers.ReLU()) 37 | self.ctLayer.append(tf.keras.layers.Conv2D(1, 5, name='ctconv6', padding='same')) 38 | 39 | def decode(self, sin_fan): 40 | # AT, alpha, h, w_c=self.AT,self.alpha,self.h,self.w_c 41 | AT, w_b = self.AT, self.w_b 42 | sin_fan = tf.transpose(sin_fan, perm=[0, 2, 1, 3]) 43 | # cos_alpha = tf.math.cos(alpha) 44 | s_fan_shape = sin_fan.shape 45 | batch = tf.shape(sin_fan)[0] 46 | sin_fan1 = tf.reshape(sin_fan, [-1, s_fan_shape[2], 1]) 47 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(tf.expand_dims(w_b, -1), -1), stride=1, padding='SAME') 48 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 49 | filter_s_fan2 = tf.reshape(filter_s_fan, [batch, -1]) 50 | filter_s_fan2 = tf.transpose(filter_s_fan2) 51 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 52 | rf = tf.transpose(rf) 53 | rf = tf.reshape(rf, [batch, 512, 512, 1]) 54 | return 4 * rf 55 | 56 | # @tf.function 57 | def call(self, inputs): 58 | de_sin = self.sinLayer[0](inputs) 59 | pp = de_sin 60 | for i in range(1, self.M1 + 1): 61 | for j in range(0, 3): 62 | de_sin = self.sinLayer[3 * i + j - 2](de_sin) 63 | pp = de_sin + pp 64 | de_sin = self.sinLayer[3 * self.M1 + 1](pp/self.M1) + inputs 65 | 66 | fbp = self.decode(de_sin) 67 | 68 | outputs = self.ctLayer[0](fbp) 69 | qq = outputs 70 | for i in range(1, self.M2 + 1): 71 | for j in range(0, 3): 72 | outputs = self.ctLayer[3 * i + j - 2](outputs) 73 | qq = qq + outputs 74 | outputs = self.ctLayer[3 * self.M2 + 1](qq/self.M2) + fbp 75 | return [de_sin, outputs, fbp] 76 | 77 | def interp(f,xp,x): 78 | # f_img=f[:,:,::2,:] 79 | f_img=f 80 | shape=f.shape 81 | L=len(x) 82 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 83 | idL = np.where(x <= xp[0])[0] 84 | idR = np.where(x >= xp[-1])[0] 85 | xx = x[idL[-1] + 1:idR[0]] 86 | id = np.searchsorted(xp, xx) 87 | L = xx - xp[id - 1] 88 | R = xp[id] - xx 89 | w1 = R / (L + R) 90 | w2 = 1 - w1 91 | val1 = f_img[:, :, id - 1, :] 92 | val2 = f_img[:, :, id, :] 93 | val1 = val1.transpose([0, 1, 3, 2]) 94 | val2 = val2.transpose([0, 1, 3, 2]) 95 | temp = val1 * w1 + val2 * w2 96 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 97 | for i in idL: 98 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 99 | for j in idR: 100 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 101 | return f_interp 102 | 103 | def u_function(s): 104 | u=np.zeros(s.shape) 105 | index_1=np.where(s==0)[0] 106 | u[index_1]=1/2 107 | index=np.where(s!=0)[0] 108 | v=s[index] 109 | u[index]=(np.cos(v)-1)/(v**2)+np.sin(v)/v 110 | return u 111 | def w_bfunction(b,s): 112 | return u_function(b*s)*(b**2)/(4*np.pi**2) 113 | 114 | def decode(sin_fan,AT,w_b): 115 | # AT, alpha, h, w_c=self.AT,self.alpha,self.h,self.w_c 116 | sin_fan=tf.transpose(sin_fan,perm=[0,2,1,3]) 117 | # cos_alpha = tf.math.cos(alpha) 118 | s_fan_shape =sin_fan.shape 119 | batch=tf.shape(sin_fan)[0] 120 | sin_fan1 = tf.reshape(sin_fan, [-1, s_fan_shape[2], 1]) 121 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(tf.expand_dims(w_b,-1),-1), stride=1, padding='SAME') 122 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 123 | filter_s_fan2 = tf.reshape(filter_s_fan, [batch, -1]) 124 | filter_s_fan2 = tf.transpose(filter_s_fan2) 125 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 126 | rf = tf.transpose(rf) 127 | rf = tf.reshape(rf, [batch, 512, 512, 1]) 128 | return 4*rf 129 | 130 | 131 | 132 | 133 | def make_model_3(AT,s_shape=(725, 360),out_size=(512,512)): 134 | CT=sinLayer(AT,s_shape,out_size) 135 | inputs = tf.keras.Input(shape=(s_shape[0],s_shape[1],1)) 136 | [de_sin, outputs, fbp]=CT(inputs) 137 | model = tf.keras.Model(inputs=inputs, outputs=[de_sin, outputs, fbp]) 138 | return model 139 | 140 | 141 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 142 | angles = np.shape(theta)[0] 143 | 144 | s_shape = (725, 180) 145 | out_size = (512, 512) 146 | AT = np.load('AT_' + str(180) + '_512x512' + '.npz') 147 | 148 | data = np.load('train' + '_fan2para.npz') 149 | u_img = data['u'].astype('float32') 150 | f_noisy_img = data['f_noisy'].astype('float32') 151 | f_img = data['f'].astype('float32') 152 | # u_ini = data['ini_u'].astype('float32') 153 | # M = np.max(np.max(u_ini, 1), 1) 154 | # M=np.reshape(M, [np.shape(M)[0],1,1,1]) 155 | # u_img=u_img/M*255 156 | # f_noisy_img=f_noisy_img/M*255 157 | # f_img=f_img/M*255 158 | # u_ini=u_ini/M*255 159 | 160 | val = AT['name1'].astype('float32') 161 | index = AT['name2'] 162 | shape = AT['name3'] 163 | # del u_ini 164 | 165 | AT = tf.sparse.SparseTensor(index, val, shape) 166 | # AT = tf.cast(AT, tf.float32) 167 | del val 168 | del index 169 | 170 | # u_img = np.load(udir + 'u_CT_img_no_scale.npy') 171 | print('shape of u_img:', u_img.shape) 172 | # f_noisy_img = np.load(udir + '/f_noisy,angle=' + str(180) + '_ no_scale__0.5.npy') 173 | 174 | # f_img=np.load(udir + '/f,angle=' + str(180) + '_ no_scale__0.5.npy') 175 | # ini_u_img = np.load(udir + '/ini,angle=' + str(angles) + '_255.0_0.002.npy') 176 | 177 | 178 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 179 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 180 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 181 | 182 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) 183 | # Model = make_model(batch, AT,w_b,s_shape,out_size) 184 | # Model=sinLayer(AT,s_shape,out_size) 185 | Model=make_model_3(AT,s_shape,out_size) 186 | tf.keras.backend.clear_session() 187 | # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 188 | 189 | # u_img = tf.cast(u_img, tf.float32) 190 | # f_noisy_img = tf.cast(f_noisy_img, tf.float32) 191 | # f_img=tf.cast(f_img, tf.float32) 192 | 193 | N=tf.shape(u_img)[0] 194 | vx=f_noisy_img[N-5:N] 195 | vy=[f_img[N-5:N],u_img[N-5:N]] 196 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5],f_img[0:N-5], f_noisy_img[0:N-5])).shuffle(tf.cast(N-5,tf.int64)).batch(batch) 197 | # _ = Model(vx[0:1]) 198 | if restore == 1: 199 | # call the build function in the layers since do not use tf.keras.Input 200 | ##maybe move the functions in build function to _ini_ need not do this 201 | _=Model(vx[0:1]) 202 | Model.load_weights(ckpt) 203 | print('load weights, done') 204 | for i in range(epoch): 205 | for iter, ufini in enumerate(train_data): 206 | u,f, f_noisy = ufini 207 | # Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss, psnr, optimizer, vx, vy,epochnum=i) 208 | Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss_1, psnr, optimizer, vx, vy, epochnum=i) 209 | print(iter, "/", i, ":", Loss.numpy(), 210 | "psnr_f_fnoisy:", m1.numpy(), 211 | "psnr1", [m2[0].numpy(), m2[1].numpy(), m2[2].numpy()], 212 | ###psnr of f and f_noisy, u and fbp, u and reconstructe,respectively 213 | 'psnr3:', [m3[0].numpy(), m3[1].numpy(), m3[2].numpy()] 214 | ) 215 | 216 | if i%2==0: 217 | Model.save_weights(ckpt) 218 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 219 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 220 | # validation_split=1/80) 221 | Model.save_weights(ckpt) 222 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 223 | 224 | 225 | @tf.function 226 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy,epochnum): 227 | # if epochnum<1000: 228 | # weights = 0.9999 229 | # else: 230 | # weights = 0.0001 231 | with tf.GradientTape() as tape: 232 | predictions = model(inputs, training=1) 233 | loss = Loss(labels, predictions) 234 | grads = tape.gradient(loss, model.trainable_variables) 235 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 236 | # m1 = Metric(labels, inputs) 237 | m1 = tf.reduce_sum(tf.image.psnr(labels[0], inputs, max_val=tf.reduce_max(labels[0]))) / tf.cast(tf.shape(inputs)[0],tf.float32) 238 | m2 = Metric(labels, model(inputs, training=0)) 239 | m3 = Metric(vy, model(vx, training=0)) 240 | return loss, m1, m2, m3 241 | 242 | 243 | def loss_1(x, y,weights=0.5): 244 | x0 = tf.cast(x[0], tf.float32) 245 | x1 = tf.cast(x[1], tf.float32) 246 | y0 = tf.cast(y[0], tf.float32) 247 | y1 = tf.cast(y[1], tf.float32) 248 | shape = tf.cast(tf.shape(x[0]), tf.float32) 249 | shape1 = tf.cast(tf.shape(x[1]), tf.float32) 250 | return weights*tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3]\ 251 | +(1-weights)*tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 252 | # return tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3] 253 | # return tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 254 | 255 | def psnr(x, y,max_val=255): 256 | x0 = tf.cast(x[0], tf.float32) 257 | x1 = tf.cast(x[1], tf.float32) 258 | y0 = tf.cast(y[0], tf.float32) 259 | y1 = tf.cast(y[1], tf.float32) 260 | y2 = tf.cast(y[2], tf.float32) 261 | batch = tf.cast(tf.shape(x[1])[0], tf.float32) 262 | psnr1=tf.reduce_sum(tf.image.psnr(x0, y0, max_val=tf.reduce_max(x0))) / batch######psnr of f and de_sin 263 | psnr2=tf.reduce_sum(tf.image.psnr(x1, y2, max_val=tf.reduce_max(x1))) / batch######psnr of u and fbp 264 | psnr3 = tf.reduce_sum(tf.image.psnr(x1, y1, max_val=tf.reduce_max(x1))) / batch#####psnr of u and reconstructed 265 | return [psnr1,psnr2,psnr3] 266 | 267 | 268 | if __name__ == '__main__': 269 | os.environ["CUDA_VISIBLE_DEVICES"] = "2" 270 | iternum = 20 271 | epoch = 100 272 | 273 | batch = 5 274 | theta = np.linspace(0, 180, 180, endpoint=False) 275 | udir = "./train/" 276 | vdir = "validate" 277 | # train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./weights/two_stage_4_2') 278 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/weights/new1_model_lambda=0.5') -------------------------------------------------------------------------------- /fan2para/testnew1.py: -------------------------------------------------------------------------------- 1 | import new1 as net 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import tensorflow as tf 5 | import os 6 | 7 | def interp(f,xp,x): 8 | # f_img=f[:,:,::2,:] 9 | f_img=f 10 | shape=f.shape 11 | L=len(x) 12 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 13 | idL = np.where(x <= xp[0])[0] 14 | idR = np.where(x >= xp[-1])[0] 15 | xx = x[idL[-1] + 1:idR[0]] 16 | id = np.searchsorted(xp, xx) 17 | L = xx - xp[id - 1] 18 | R = xp[id] - xx 19 | w1 = R / (L + R) 20 | w2 = 1 - w1 21 | val1 = f_img[:, :, id - 1, :] 22 | val2 = f_img[:, :, id, :] 23 | val1 = val1.transpose([0, 1, 3, 2]) 24 | val2 = val2.transpose([0, 1, 3, 2]) 25 | temp = val1 * w1 + val2 * w2 26 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 27 | for i in idL: 28 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 29 | for j in idR: 30 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 31 | return f_interp 32 | os.environ["CUDA_VISIBLE_DEVICES"] = "1" 33 | Aangles=180 34 | AT = np.load('AT_' + str(Aangles) + '_512x512' + '.npz') 35 | val = AT['name1'].astype('float32') 36 | index = AT['name2'] 37 | shape = AT['name3'] 38 | AT = tf.sparse.SparseTensor(index, val, shape) 39 | # AT = tf.cast(AT, tf.float32) 40 | theta=np.linspace(0, 180, Aangles, endpoint=False) 41 | Model=net.make_model_3(AT,(725, 180),(512, 512)) 42 | ckpt='./weights'+'/new1_model_lambda=0.5' 43 | batch=5 44 | data = np.load('test' + '_fan2para.npz') 45 | f_noisy_img = data['f_noisy'].astype('float32') 46 | L=500 47 | f_noisy_img=f_noisy_img[0:L] 48 | def inimodel(f_noisy,Model,ckpt=ckpt): 49 | _ = Model(f_noisy[0:1]) 50 | Model.load_weights(ckpt) 51 | 52 | 53 | def evaluate(f_noisy, batch, Model): 54 | _ = inimodel(f_noisy, Model) 55 | prediction = np.zeros([L,512,512,1]) 56 | iter = list(range(0, L, batch)) 57 | for i in range(len(iter)): 58 | prediction[iter[i]:iter[i] + batch] = Model(f_noisy[iter[i]:iter[i] + batch])[1].numpy() 59 | print(i) 60 | return prediction 61 | 62 | prediction=evaluate(f_noisy_img,batch,Model) 63 | ii=np.random.randint(0,L) 64 | print('show figure:',ii) 65 | plt.imshow(f_noisy_img[ii,:,:,0],cmap='gray') 66 | plt.figure() 67 | plt.imshow(prediction[ii,:,:,0],cmap='gray') 68 | plt.show() 69 | 70 | # vy=data['u'].astype('float32') 71 | # vy=vy[0:L] 72 | # vy=tf.cast(vy,tf.float32) 73 | # pp=tf.image.psnr(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 74 | # qq=tf.image.ssim(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 75 | # print('average psnr:',tf.reduce_mean(pp).numpy()) 76 | # print('average ssim:',tf.reduce_mean(qq).numpy()) 77 | 78 | 79 | 80 | -------------------------------------------------------------------------------- /parabeam/Readme: -------------------------------------------------------------------------------- 1 | To test our network, run testnew.py 2 | 3 | To train our model, 4 | run make_ini.py to prepare the training data where the size of input CT images= [L,512,512,1]. 5 | After producing the training data, run newmodel.py to begin training the model. 6 | -------------------------------------------------------------------------------- /parabeam/make_ini.py: -------------------------------------------------------------------------------- 1 | import glob 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | import tensorflow as tf 5 | from skimage.transform import radon,iradon 6 | from make_sin_noise import add_sin_noise 7 | from utilize import CT_uitil 8 | import os 9 | from skimage.measure import compare_psnr 10 | import matplotlib.pyplot as plt 11 | 12 | 13 | def make_ini(u_img,angles,udir): 14 | # u_img = readimage(dir, ux, uy) 15 | # np.save(udir + '/npy/' + '/u_CT_img_'+str(np.max(u_img)), u_img) 16 | print('shape of u_img:', u_img.shape) 17 | print('maximum of u_img:', np.max(u_img)) 18 | # np.save(udir + '/npy/' + '/u_CT_img_no_scale', u_img) 19 | theta = np.linspace(0, 180, angles, endpoint=False) 20 | ini_u_img = np.zeros(u_img.shape) 21 | temp = radon(u_img[0, :, :, 0], theta=theta, circle=False) 22 | shape = temp.shape 23 | ct_sin_img = np.zeros([u_img.shape[0], shape[0], shape[1], u_img.shape[3]]) 24 | ct_sin_img_noisy = np.zeros([u_img.shape[0], shape[0], shape[1], u_img.shape[3]]) 25 | 26 | var=0.5 27 | inter=54 28 | iter=list(range(0,u_img.shape[0],inter)) 29 | ct = CT_uitil(u_img.shape, theta) 30 | for i in range(len(iter)): 31 | ct_sin_img[iter[i]:iter[i]+inter] = ct.radon(u_img[iter[i]:iter[i]+inter]).numpy() 32 | # ct_sin_img_noisy[iter[i]:iter[i]+inter] = add_sin_noise(ct_sin_img[iter[i]:iter[i]+inter],var=var) 33 | ct_sin_img_noisy[iter[i]:iter[i]+inter] = ct_sin_img[iter[i]:iter[i]+inter] #%no noise 34 | ini_u_img[iter[i]:iter[i]+inter] = ct.iradon(ct_sin_img_noisy[iter[i]:iter[i]+inter]).numpy() 35 | print(i) 36 | 37 | 38 | # np.save(udir + '/npy//512x512/' + '/ini,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 39 | # ini_u_img) 40 | # np.save(udir + '/npy//512x512/' + '/f,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 41 | # ct_sin_img) 42 | # np.save(udir + '/npy//512x512/' + '/f_noisy,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 43 | # ct_sin_img_noisy) 44 | 45 | np.save(udir + '/ini,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 46 | ini_u_img) 47 | np.save(udir + '/f,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 48 | ct_sin_img) 49 | np.save(udir + '/f_noisy,angle=' + str(angles) + '_no_scale_' + '_' + str(var), 50 | ct_sin_img_noisy) 51 | 52 | print('save_complete') 53 | print('min of ct_sin_img_noisy:', np.min(ct_sin_img_noisy)) 54 | psnr=np.zeros([1,u_img.shape[0]]) 55 | psnr1=np.zeros([1,u_img.shape[0]]) 56 | for i in range( u_img.shape[0]): 57 | psnr[0,i]=compare_psnr(u_img[i],ini_u_img[i],np.max(u_img[i])) 58 | # psnr1[0, i] = compare_psnr(ct_sin_img[i], ct_sin_img_noisy[i], np.max(ct_sin_img[i])) 59 | print('psnr:',psnr) 60 | print('psnr1:', psnr1) 61 | 62 | # print(tf.image.psnr(u_img, ini_u_img, np.max(u_img)).numpy()) 63 | # print(tf.image.psnr(ct_sin_img, ct_sin_img_noisy, np.max(ct_sin_img)).numpy()) 64 | 65 | if __name__ == "__main__": 66 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 67 | # set='train' 68 | set = 'test' 69 | udir = 'E:\CT_image\AMP\sparse_angles\\'+set+'/npy' 70 | ux, uy = 512, 512 71 | # u_img=CT.make_CT(udir,ux,uy) 72 | # u_img=np.load(udir+'/npy/'+'u_CT_img_test_no_scale.npy') 73 | u_img = np.load(udir + '/u_CT_img_no_scale.npy') 74 | if set=='train': 75 | L=1000 76 | if set=='test': 77 | L=500 78 | L = np.minimum(L, len(u_img)) 79 | u_img=u_img[0:L] 80 | np.save(udir + '//u_CT_img_no_scale', u_img) 81 | # print('shape of u_img:', u_img.shape) 82 | angles = 60 83 | make_ini(u_img, angles,udir) 84 | 85 | -------------------------------------------------------------------------------- /parabeam/newmodel.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import cv2 as cv 3 | import matplotlib.pyplot as plt 4 | import numpy as np 5 | import os 6 | import datetime 7 | from skimage.transform import radon,iradon,rotate 8 | from scipy.fftpack import fft, ifft, fftfreq, fftshift 9 | class sinLayer(tf.keras.Model): 10 | def __init__(self, AT,s_shape = (725, 360),out_size=(512,512)): 11 | super(sinLayer, self).__init__() 12 | self.AT = AT 13 | self.s_shape=s_shape 14 | self.out_size = out_size 15 | w_b=w_bfunction(np.pi,np.linspace(-np.floor(s_shape[0]/2),s_shape[0]-np.floor(s_shape[0]/2)-1,s_shape[0])) 16 | self.w_b =w_b.astype('float32') 17 | self.sinLayer=[] 18 | self.sinLayer_1 = [] 19 | 20 | self.ctLayer=[] 21 | self.ctLayer_1 = [] 22 | ###sinLayer### 23 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv1', activation=tf.nn.relu)) 24 | self.M1=4 25 | for layers in range(1,self.M1+1): 26 | self.sinLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='sinconv%d' % layers, use_bias=False)) 27 | self.sinLayer.append(tf.keras.layers.BatchNormalization()) 28 | self.sinLayer.append(tf.keras.layers.ReLU()) 29 | self.sinLayer.append(tf.keras.layers.Conv2D(1, 5, name='sinconv6',padding='same')) 30 | 31 | ###CTLayer### 32 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv1', activation=tf.nn.relu)) 33 | self.M2=5 34 | for layers in range(1,self.M2+1): 35 | self.ctLayer.append(tf.keras.layers.Conv2D(64, 5, padding='same', name='ctconv%d' % layers, use_bias=False)) 36 | self.ctLayer.append(tf.keras.layers.BatchNormalization()) 37 | self.ctLayer.append(tf.keras.layers.ReLU()) 38 | self.ctLayer.append(tf.keras.layers.Conv2D(1, 5, name='ctconv6', padding='same')) 39 | def decode(self,sin_fan): 40 | # AT, alpha, h, w_c=self.AT,self.alpha,self.h,self.w_c 41 | AT, w_b=self.AT,self.w_b 42 | sin_fan=tf.transpose(sin_fan,perm=[0,2,1,3]) 43 | # cos_alpha = tf.math.cos(alpha) 44 | s_fan_shape =sin_fan.shape 45 | batch=tf.shape(sin_fan)[0] 46 | sin_fan1 = tf.reshape(sin_fan, [-1, s_fan_shape[2], 1]) 47 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(tf.expand_dims(w_b,-1),-1), stride=1, padding='SAME') 48 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 49 | filter_s_fan2 = tf.reshape(filter_s_fan, [batch, -1]) 50 | filter_s_fan2 = tf.transpose(filter_s_fan2) 51 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 52 | rf = tf.transpose(rf) 53 | rf = tf.reshape(rf, [batch, 512, 512, 1]) 54 | return 4*rf 55 | # @tf.function 56 | # def call(self, inputs): 57 | # M1=int(self.M1/2) 58 | # M2 = int(self.M2 / 2) 59 | # pp=[] 60 | # qq=[] 61 | # de_sin = self.sinLayer[0](inputs) 62 | # for i in range(1,M1+1): 63 | # pp.append(de_sin) 64 | # for j in range(0, 3): 65 | # de_sin=self.sinLayer[3*i+j-2](de_sin) 66 | # for i in range(M1+1, self.M1+1): 67 | # for j in range(0, 3): 68 | # de_sin = self.sinLayer[3 * i + j - 2](de_sin) 69 | # de_sin=de_sin+pp[i-M1-1] 70 | # de_sin=self.sinLayer[3*self.M1+1](de_sin)+inputs 71 | # 72 | # fbp=self.decode(de_sin) 73 | # 74 | # outputs=self.ctLayer[0](fbp) 75 | # for i in range(1,M2+1): 76 | # qq.append(outputs) 77 | # for j in range(0, 3): 78 | # outputs=self.ctLayer[3*i+j-2](outputs) 79 | # for i in range(M2+1, self.M2+1): 80 | # for j in range(0, 3): 81 | # outputs = self.ctLayer[3 * i + j - 2](outputs) 82 | # outputs=outputs+qq[i-M2-1] 83 | # outputs=self.ctLayer[3 * self.M2+1](outputs)+fbp 84 | # return [de_sin,outputs,fbp] 85 | def call(self, inputs): 86 | de_sin = self.sinLayer[0](inputs) 87 | pp = de_sin 88 | for i in range(1,self.M1+1): 89 | for j in range(0, 3): 90 | de_sin=self.sinLayer[3*i+j-2](de_sin) 91 | pp = de_sin +pp 92 | de_sin=self.sinLayer[3*self.M1+1](pp/self.M1)+inputs 93 | 94 | fbp=self.decode(de_sin) 95 | 96 | outputs=self.ctLayer[0](fbp) 97 | qq = outputs 98 | for i in range(1,self.M2+1): 99 | for j in range(0, 3): 100 | outputs=self.ctLayer[3*i+j-2](outputs) 101 | qq=qq+outputs 102 | outputs=self.ctLayer[3 * self.M2+1](qq/self.M2)+fbp 103 | return [de_sin,outputs,fbp] 104 | 105 | def interp(f,xp,x): 106 | # f_img=f[:,:,::2,:] 107 | f_img=f 108 | shape=f.shape 109 | L=len(x) 110 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 111 | idL = np.where(x <= xp[0])[0] 112 | idR = np.where(x >= xp[-1])[0] 113 | xx = x[idL[-1] + 1:idR[0]] 114 | id = np.searchsorted(xp, xx) 115 | L = xx - xp[id - 1] 116 | R = xp[id] - xx 117 | w1 = R / (L + R) 118 | w2 = 1 - w1 119 | val1 = f_img[:, :, id - 1, :] 120 | val2 = f_img[:, :, id, :] 121 | val1 = val1.transpose([0, 1, 3, 2]) 122 | val2 = val2.transpose([0, 1, 3, 2]) 123 | temp = val1 * w1 + val2 * w2 124 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 125 | for i in idL: 126 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 127 | for j in idR: 128 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 129 | return f_interp 130 | 131 | def u_function(s): 132 | u=np.zeros(s.shape) 133 | index_1=np.where(s==0)[0] 134 | u[index_1]=1/2 135 | index=np.where(s!=0)[0] 136 | v=s[index] 137 | u[index]=(np.cos(v)-1)/(v**2)+np.sin(v)/v 138 | return u 139 | def w_bfunction(b,s): 140 | return u_function(b*s)*(b**2)/(4*np.pi**2) 141 | 142 | def decode(sin_fan,AT,w_b): 143 | # AT, alpha, h, w_c=self.AT,self.alpha,self.h,self.w_c 144 | sin_fan=tf.transpose(sin_fan,perm=[0,2,1,3]) 145 | # cos_alpha = tf.math.cos(alpha) 146 | s_fan_shape =sin_fan.shape 147 | batch=tf.shape(sin_fan)[0] 148 | sin_fan1 = tf.reshape(sin_fan, [-1, s_fan_shape[2], 1]) 149 | filter_s_fan = tf.nn.conv1d(sin_fan1, tf.expand_dims(tf.expand_dims(w_b,-1),-1), stride=1, padding='SAME') 150 | # filter_s_fan1=tf.reshape(filter_s_fan,s_fan_shape) 151 | filter_s_fan2 = tf.reshape(filter_s_fan, [batch, -1]) 152 | filter_s_fan2 = tf.transpose(filter_s_fan2) 153 | rf = tf.sparse.sparse_dense_matmul(AT, filter_s_fan2) 154 | rf = tf.transpose(rf) 155 | rf = tf.reshape(rf, [batch, 512, 512, 1]) 156 | return 4*rf 157 | 158 | 159 | 160 | def make_model_3(AT,s_shape=(725, 360),out_size=(512,512)): 161 | CT=sinLayer(AT,s_shape,out_size) 162 | inputs = tf.keras.Input(shape=(s_shape[0],s_shape[1],1)) 163 | [de_sin, outputs, fbp]=CT(inputs) 164 | model = tf.keras.Model(inputs=inputs, outputs=[de_sin, outputs, fbp]) 165 | return model 166 | 167 | 168 | def train(epoch, udir,batch, theta, iternum, restore=0, ckpt='./weights/CT_tf2_4'): 169 | angles = np.shape(theta)[0] 170 | 171 | s_shape = (725, 180) 172 | out_size = (512, 512) 173 | AT = np.load('AT_' + str(180) + '_512x512' + '.npz') 174 | 175 | 176 | u_img = np.load(udir + 'u_CT_img_no_scale.npy').astype('float32') 177 | f_noisy_img = np.load(udir + 'f_noisy,angle='+str(angles)+'_no_scale__0.5.npy').astype('float32') 178 | f_img = np.load(udir + '/f,angle=' + str(180) + '_no_scale__0.5.npy').astype('float32') 179 | u_ini = np.load(udir + 'ini,angle='+str(angles)+'_no_scale__0.5.npy').astype('float32') 180 | # M = np.max(np.max(u_ini, 1), 1) 181 | # M=np.reshape(M, [np.shape(M)[0],1,1,1]) 182 | # u_img=u_img/M*255 183 | # f_noisy_img=f_noisy_img/M*255 184 | # f_img=f_img/M*255 185 | 186 | 187 | val = AT['name1'].astype('float32') 188 | index = AT['name2'] 189 | shape = AT['name3'] 190 | del u_ini 191 | 192 | AT = tf.sparse.SparseTensor(index, val, shape) 193 | # AT = tf.cast(AT, tf.float32) 194 | del val 195 | del index 196 | 197 | # u_img = np.load(udir + 'u_CT_img_no_scale.npy') 198 | print('shape of u_img:', u_img.shape) 199 | # f_noisy_img = np.load(udir + '/f_noisy,angle=' + str(180) + '_ no_scale__0.5.npy') 200 | f_noisy_img = interp(f_noisy_img, np.linspace(0, 180, angles, endpoint=False), np.linspace(0, 180, 180, endpoint=False)) 201 | # f_img=np.load(udir + '/f,angle=' + str(180) + '_ no_scale__0.5.npy') 202 | # ini_u_img = np.load(udir + '/ini,angle=' + str(angles) + '_255.0_0.002.npy') 203 | 204 | 205 | current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S") 206 | train_log_dir = 'logs/gradient_tape/' + current_time + '/train' 207 | test_log_dir = 'logs/gradient_tape/' + current_time + '/test' 208 | 209 | optimizer = tf.keras.optimizers.Adam(learning_rate=0.001) 210 | 211 | # Model=sinLayer(AT,s_shape,out_size) 212 | Model=make_model_3(AT,s_shape,out_size) 213 | tf.keras.backend.clear_session() 214 | # tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=train_log_dir) 215 | 216 | # u_img = tf.cast(u_img, tf.float32) 217 | # f_noisy_img = tf.cast(f_noisy_img, tf.float32) 218 | # f_img=tf.cast(f_img, tf.float32) 219 | 220 | N=tf.shape(u_img)[0] 221 | vx=f_noisy_img[N-5:N] 222 | vy=[f_img[N-5:N],u_img[N-5:N]] 223 | train_data = tf.data.Dataset.from_tensor_slices((u_img[0:N-5],f_img[0:N-5], f_noisy_img[0:N-5])).shuffle(tf.cast(N-5,tf.int64)).batch(batch) 224 | # _ = Model(vx[0:1]) 225 | if restore == 1: 226 | # call the build function in the layers since do not use tf.keras.Input 227 | ##maybe move the functions in build function to _ini_ need not do this 228 | _=Model(vx[0:1]) 229 | Model.load_weights(ckpt) 230 | print('load weights, done') 231 | for i in range(epoch): 232 | for iter, ufini in enumerate(train_data): 233 | u,f, f_noisy = ufini 234 | # Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss, psnr, optimizer, vx, vy,epochnum=i) 235 | Loss, m1, m2, m3 = train_step(f_noisy, Model, [f, u], loss_1, psnr, optimizer, vx, vy, epochnum=i) 236 | print(iter, "/", i, ":", Loss.numpy(), 237 | "psnr_f_fnoisy:", m1.numpy(), 238 | "psnr1", [m2[0].numpy(), m2[1].numpy(), m2[2].numpy()], 239 | ###psnr of f and f_noisy, u and fbp, u and reconstructe,respectively 240 | 'psnr3:', [m3[0].numpy(), m3[1].numpy(), m3[2].numpy()] 241 | ) 242 | 243 | if i%2==0: 244 | Model.save_weights(ckpt) 245 | # Model.compile(optimizer=optimizer, loss=[loss], metrics=[psnr]) 246 | # Model.fit(x, y, batch_size=batch, epochs=epoch, callbacks=[tensorboard_callback], 247 | # validation_split=1/80) 248 | Model.save_weights(ckpt) 249 | # tf.keras.utils.plot_model(Model, 'multi_input_and_output_model.png', show_shapes=True) 250 | 251 | 252 | @tf.function 253 | def train_step(inputs, model, labels, Loss, Metric, optimizer,vx,vy,epochnum): 254 | # if epochnum<1000: 255 | # weights = 0.9999 256 | # else: 257 | # weights = 0.0001 258 | with tf.GradientTape() as tape: 259 | predictions = model(inputs, training=1) 260 | loss = Loss(labels, predictions) 261 | grads = tape.gradient(loss, model.trainable_variables) 262 | optimizer.apply_gradients(zip(grads, model.trainable_variables)) 263 | # m1 = Metric(labels, inputs) 264 | m1 = tf.reduce_sum(tf.image.psnr(labels[0], inputs, max_val=tf.reduce_max(labels[0]))) / tf.cast(tf.shape(inputs)[0],tf.float32) 265 | m2 = Metric(labels, model(inputs, training=0)) 266 | m3 = Metric(vy, model(vx, training=0)) 267 | return loss, m1, m2, m3 268 | 269 | 270 | def loss_1(x, y,weights=0.5): 271 | x0 = tf.cast(x[0], tf.float32) 272 | x1 = tf.cast(x[1], tf.float32) 273 | y0 = tf.cast(y[0], tf.float32) 274 | y1 = tf.cast(y[1], tf.float32) 275 | shape = tf.cast(tf.shape(x[0]), tf.float32) 276 | shape1 = tf.cast(tf.shape(x[1]), tf.float32) 277 | return weights*tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3]\ 278 | +(1-weights)*tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 279 | # return tf.reduce_sum(tf.math.square(x0 - y0)) / shape[0] / shape[1] / shape[2] / shape[3] 280 | # return tf.reduce_sum(tf.math.square(x1 - y1))/shape1[0] / shape1[1] / shape1[2] / shape1[3] 281 | 282 | def psnr(x, y,max_val=255): 283 | x0 = tf.cast(x[0], tf.float32) 284 | x1 = tf.cast(x[1], tf.float32) 285 | y0 = tf.cast(y[0], tf.float32) 286 | y1 = tf.cast(y[1], tf.float32) 287 | y2 = tf.cast(y[2], tf.float32) 288 | batch = tf.cast(tf.shape(x[1])[0], tf.float32) 289 | psnr1=tf.reduce_sum(tf.image.psnr(x0, y0, max_val=tf.reduce_max(x0))) / batch######psnr of f and de_sin 290 | psnr2=tf.reduce_sum(tf.image.psnr(x1, y2, max_val=tf.reduce_max(x1))) / batch######psnr of u and fbp 291 | psnr3 = tf.reduce_sum(tf.image.psnr(x1, y1, max_val=tf.reduce_max(x1))) / batch#####psnr of u and reconstructed 292 | return [psnr1,psnr2,psnr3] 293 | 294 | 295 | if __name__ == '__main__': 296 | os.environ["CUDA_VISIBLE_DEVICES"] = "3" 297 | iternum = 20 298 | epoch = 100 299 | 300 | batch = 5 301 | angles = 60 302 | theta = np.linspace(0, 180, angles, endpoint=False) 303 | udir = "./train/" 304 | vdir = "validate" 305 | # train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./weights/two_stage_4_2') 306 | train(epoch, udir, batch, theta, iternum, restore=0, ckpt='./512x512/weights/new_model_lambda=0.5') -------------------------------------------------------------------------------- /parabeam/testnew.py: -------------------------------------------------------------------------------- 1 | import newmodel as net4 2 | import numpy as np 3 | import matplotlib.pyplot as plt 4 | import tensorflow as tf 5 | import os 6 | import cv2 as cv 7 | 8 | def interp(f,xp,x): 9 | # f_img=f[:,:,::2,:] 10 | f_img=f 11 | shape=f.shape 12 | L=len(x) 13 | f_interp = np.zeros(shape=[shape[0],shape[1],L,shape[3]]) 14 | idL = np.where(x <= xp[0])[0] 15 | idR = np.where(x >= xp[-1])[0] 16 | xx = x[idL[-1] + 1:idR[0]] 17 | id = np.searchsorted(xp, xx) 18 | L = xx - xp[id - 1] 19 | R = xp[id] - xx 20 | w1 = R / (L + R) 21 | w2 = 1 - w1 22 | val1 = f_img[:, :, id - 1, :] 23 | val2 = f_img[:, :, id, :] 24 | val1 = val1.transpose([0, 1, 3, 2]) 25 | val2 = val2.transpose([0, 1, 3, 2]) 26 | temp = val1 * w1 + val2 * w2 27 | f_interp[:, :, idL[-1] + 1:idR[0], :] = temp.transpose([0, 1, 3, 2]) 28 | for i in idL: 29 | f_interp[:, :, i, :] = f_img[:, :, 0, :] 30 | for j in idR: 31 | f_interp[:, :, j, :] = f_img[:, :, -1, :] 32 | return f_interp 33 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 34 | Aangles=180 35 | AT = np.load('AT_' + str(Aangles) + '_512x512' + '.npz') 36 | val = AT['name1'].astype('float32') 37 | index = AT['name2'] 38 | shape = AT['name3'] 39 | AT = tf.sparse.SparseTensor(index, val, shape) 40 | # AT = tf.cast(AT, tf.float32) 41 | theta=np.linspace(0, 180, Aangles, endpoint=False) 42 | ckpt='./weights'+'/new_model_lambda=0.5' 43 | batch=5 44 | Model=net4.make_model_3(AT,(725, 180),(512, 512)) 45 | udir = "./test/" 46 | angles=60 47 | superangles=180 48 | f_noisy_img = np.load(udir + '/f_noisy,angle=' + str(angles) + '_no_scale__0.5.npy') 49 | f_noisy_img=interp(f_noisy_img, np.linspace(0, 180, angles, endpoint=False), np.linspace(0, 180, superangles, endpoint=False)) 50 | L=500 51 | f_noisy_img=f_noisy_img[0:L] 52 | f_noisy=tf.cast(f_noisy_img,tf.float32) 53 | def inimodel( f_noisy, Model,ckpt=ckpt): 54 | _ = Model(f_noisy[0:1]) 55 | Model.load_weights(ckpt) 56 | 57 | 58 | def evaluate(f_noisy, batch, Model): 59 | _ = inimodel(f_noisy, Model) 60 | prediction = np.zeros([tf.shape(f_noisy).numpy()[0],512,512,1]) 61 | iter = list(range(0, tf.shape(f_noisy).numpy()[0], batch)) 62 | for i in range(len(iter)): 63 | prediction[iter[i]:iter[i] + batch] = Model(f_noisy_img[iter[i]:iter[i] + batch])[ 64 | 1].numpy() 65 | print(i) 66 | return prediction 67 | 68 | prediction=evaluate(f_noisy,batch,Model) 69 | ii=np.random.randint(0,L) 70 | print('show figure:',ii) 71 | plt.imshow(f_noisy[ii,:,:,0],cmap='gray') 72 | plt.figure() 73 | plt.imshow(prediction[ii,:,:,0],cmap='gray') 74 | plt.show() 75 | 76 | # vy=np.load(udir+ 'u_CT_img_no_scale.npy') 77 | # vy=vy[0:L] 78 | # vy=tf.cast(vy,tf.float32) 79 | # pp=tf.image.psnr(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 80 | # qq=tf.image.ssim(tf.cast(prediction,tf.float32),vy,tf.reduce_max(prediction)).numpy() 81 | # print('average psnr:',tf.reduce_mean(pp).numpy()) 82 | # print('average ssim:',tf.reduce_mean(qq).numpy()) 83 | -------------------------------------------------------------------------------- /parabeam/train/traindata: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /parabeam/utilize.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | from scipy.fftpack import fft, ifft, fftfreq,fftshift 4 | from functools import partial 5 | from scipy.interpolate import interp1d 6 | from skimage.transform import radon,iradon,rotate 7 | import pydicom 8 | import tensorflow as tf 9 | from skimage.measure import compare_psnr 10 | import math 11 | import glob 12 | import os 13 | import matplotlib.pyplot as plt 14 | class CT_uitil: 15 | def __init__(self,img_size,theta=None,filter="ramp"): 16 | self.img_size=img_size 17 | if theta is None: 18 | theta=np.arange(180) 19 | self.theta = theta * np.pi / 180.0 20 | else: 21 | self.theta = theta * np.pi / 180.0 22 | self.filter=filter 23 | self.pad_width, self.diagonal = self.shape_radon() 24 | self.sin_size = [img_size[0], self.diagonal, len(self.theta), img_size[3]] 25 | self.fourier_filter=self.get_fourier_filter() 26 | self.index_w=self.make_cor_rotate() 27 | 28 | def get_fourier_filter(self): 29 | img_shape=self.sin_size[1] 30 | size = max(64, int(2 ** np.ceil(np.log2(2 * img_shape)))) 31 | filter_name=self.filter 32 | filter_types = ('ramp', 'shepp-logan', 'cosine', 'hamming', 'hann', None) 33 | if filter_name not in filter_types: 34 | raise ValueError("Unknown filter: %s" % filter) 35 | n = np.concatenate((np.arange(1, size / 2 + 1, 2, dtype=np.int), 36 | np.arange(size / 2 - 1, 0, -2, dtype=np.int))) 37 | f = np.zeros(size) 38 | f[0] = 0.25 39 | f[1::2] = -1 / (np.pi * n) ** 2 40 | 41 | fourier_filter = 2 * np.real(fft(f)) # ramp filter 42 | if filter_name == "ramp": 43 | pass 44 | elif filter_name == "shepp-logan": 45 | # Start from first element to avoid divide by zero 46 | omega = np.pi * fftfreq(size)[1:] 47 | fourier_filter[1:] *= tf.sin(omega) / omega 48 | elif filter_name == "cosine": 49 | freq = np.linspace(0, np.pi, size, endpoint=False) 50 | cosine_filter = tf.signal.fftshift(tf.sin(freq)) 51 | fourier_filter *= cosine_filter 52 | elif filter_name == "hamming": 53 | fourier_filter *= tf.signal.fftshift(np.hamming(size)) 54 | elif filter_name == "hann": 55 | fourier_filter *= tf.signal.fftshift(np.hanning(size)) 56 | elif filter_name is None: 57 | fourier_filter[:] = 1 58 | fourier_filter=fourier_filter[:, np.newaxis] 59 | fourier_filter = np.expand_dims(np.transpose(fourier_filter, [1, 0]).astype(np.complex128), 0) 60 | fourier_filter = np.expand_dims(fourier_filter, 0) 61 | return fourier_filter 62 | 63 | # @tf.function 64 | def iradon(self, radon_image, output_size=None,interpolation="linear"): 65 | shape = self.sin_size 66 | fourier_filter = self.fourier_filter 67 | theta=self.theta 68 | angles_count = len(theta) 69 | if angles_count != shape[2]: 70 | raise ValueError("The given ``theta`` does not match the number of " 71 | "projections in ``radon_image``.") 72 | 73 | img_shape = shape[1] 74 | if output_size is None: 75 | # If output size not specified, estimate from input radon image 76 | output_size = int(np.floor(np.sqrt((img_shape) ** 2 / 2.0))) 77 | 78 | projection_size_padded = max(64, int(2 ** np.ceil(np.log2(2 * img_shape)))) 79 | pad_width = ((0, 0), (0, projection_size_padded - img_shape), (0, 0), (0, 0)) 80 | img = tf.pad(radon_image, pad_width, mode='constant', constant_values=0) 81 | # fourier_filter = get_fourier_filter(projection_size_padded, filter) 82 | projection = tf.signal.fft(tf.cast(tf.transpose(img, [0, 3, 2, 1]), tf.complex128)) * fourier_filter 83 | radon_filtered = tf.math.real(tf.signal.ifft(projection)[:, :, :, :img_shape]) 84 | # 85 | radon_filtered = tf.transpose(radon_filtered, [3, 2, 0, 1]) 86 | radon_filtered = tf.cast(radon_filtered, tf.float64) 87 | 88 | # Reconstruct image by interpolation 89 | reconstructed = tf.zeros((tf.shape(radon_image)[0], output_size, output_size, tf.shape(radon_image)[3])) 90 | reconstructed = tf.cast(reconstructed,tf.float64) 91 | radius = output_size // 2 92 | xpr, ypr = np.mgrid[:output_size, :output_size] - radius 93 | x = np.arange(img_shape) - img_shape // 2 94 | 95 | thetad = tf.cast(theta, tf.float64) 96 | # for col, angle in dd: 97 | for i in range(len(theta)): 98 | col, angle = radon_filtered[:, i, :, :], thetad[i] 99 | t = ypr * tf.math.cos(angle) - xpr * tf.math.sin(angle) 100 | temp = tf.gather(col, tf.cast(tf.math.ceil(t), tf.int32) + img_shape // 2) 101 | temp1 = tf.gather(col, tf.cast(tf.math.floor(t), tf.int32) + img_shape // 2) 102 | w = t - tf.math.floor(t) 103 | w = tf.expand_dims(w, -1) 104 | w = tf.expand_dims(w, -1) 105 | w = tf.broadcast_to(w, tf.shape(temp)) 106 | temp2 = w * temp + (1 - w) * temp1 107 | temp3 = tf.transpose(temp2, [2, 0, 1, 3]) 108 | reconstructed += temp3 109 | return reconstructed * np.pi / (2 * angles_count) 110 | 111 | # @tf.function 112 | def radon(self, img): 113 | input_shape=self.img_size 114 | # assert tf.constant(input_shape)==tf.shape(img) 115 | theta=self.theta 116 | numAngles = len(theta) 117 | pad_width, diagonal = self.pad_width, self.diagonal 118 | img=tf.cast(img,tf.float64) 119 | img1 = tf.pad(img, pad_width, mode='constant', constant_values=0) 120 | # sinogram = np.zeros((input_shape[0], diagonal, len(theta), input_shape[3])) 121 | pp=[] 122 | for n in range(numAngles): 123 | rotated = self.imrotate(img1, n) 124 | # sinogram[:, :, n, :] = tf.reduce_sum(rotated, axis=1) 125 | pp.append(tf.reduce_sum(rotated, axis=1)) 126 | # pp=np.array(pp) 127 | pp=tf.stack(pp) 128 | sinogram = tf.transpose(pp,[1,2,0,3]) 129 | return sinogram 130 | 131 | def shape_radon(self): 132 | # numAngles = len(theta) 133 | # shape = tf.shape(img) 134 | # shape1 = tf.cast(shape, tf.float32) 135 | shape=shape1 = self.img_size 136 | diagonal = np.sqrt(2) * np.max([shape1[1],shape1[2]]) 137 | pad = [np.ceil(diagonal - shape1[1]), np.ceil(diagonal - shape1[2])] 138 | # pad = tf.cast(pad, tf.int32) 139 | pad = np.array(pad).astype(np.int32) 140 | new_center = [(shape[1] + pad[0]) // 2, (shape[2] + pad[1]) // 2] 141 | old_center = [shape[1] // 2, shape[2] // 2] 142 | pad_before = [new_center[0] - old_center[0], new_center[1] - old_center[1]] 143 | pad_width = [(0, 0), (pad_before[0], pad[0] - pad_before[0]), (pad_before[1], pad[1] - pad_before[1]), (0, 0)] 144 | # img1 = np.pad(img, pad_width, mode='constant', constant_values=0) 145 | assert pad[0]+shape[1]==pad[1]+shape[2] 146 | pad_width = np.array(pad_width).astype(np.int32) 147 | return pad_width, pad[0] + shape[1] 148 | 149 | def imrotate(self, img, theta_i): 150 | index11, index12, index21, index22, w11, w12, w21, w22 = self.index_w[theta_i] 151 | img1 = tf.cast(tf.transpose(img, [1, 2, 3, 0]),tf.float64) 152 | f11, f12, f21, f22 = tf.gather_nd(img1, index11), tf.gather_nd(img1, index12), tf.gather_nd(img1, 153 | index21), tf.gather_nd( 154 | img1, index22) 155 | bilinear = w11 * tf.transpose(f11, [2, 1, 0]) + w12 * tf.transpose(f12, [2, 1, 0]) + w21 * tf.transpose(f21, 156 | [2, 1, 157 | 0]) + w22 * tf.transpose( 158 | f22, [2, 1, 0]) 159 | rotate = tf.reshape(bilinear, 160 | [tf.shape(bilinear)[0], tf.shape(bilinear)[1], tf.shape(img)[1], tf.shape(img)[2]]) 161 | rotate = tf.transpose(rotate, [0, 2, 3, 1]) 162 | 163 | return rotate 164 | 165 | def cor_rotate(self, theta_i): 166 | theta=self.theta[theta_i] 167 | cos = math.cos(theta) 168 | sin = math.sin(theta) 169 | ux=uy=self.diagonal 170 | semicorx = math.floor(ux / 2) 171 | semicory = math.floor(uy / 2) 172 | x = np.arange(ux) - semicorx 173 | y = np.arange(uy) - semicory 174 | XY = np.meshgrid(x, y) 175 | X, Y = XY[0], XY[1] 176 | sx = (cos * Y - sin * X) + semicorx 177 | sy = (sin * Y + cos * X) + semicory 178 | sx = np.reshape(sx, [-1]) 179 | sy = np.reshape(sy, [-1]) 180 | x1 = np.floor(sx) 181 | x2 = x1+1 182 | y1 = np.floor(sy) 183 | y2 = y1+1 184 | # index = np.stack([sx, sy], 1) 185 | index11 = np.stack([x1, y1], 1).astype(np.int32) 186 | index12 = np.stack([x1, y2], 1).astype(np.int32) 187 | index21 = np.stack([x2, y1], 1).astype(np.int32) 188 | index22 = np.stack([x2, y2], 1).astype(np.int32) 189 | w11 = ((x2 - sx) * (y2 - sy)).astype(np.float64) 190 | w12 = ((x2 - sx) * (sy - y1)).astype(np.float64) 191 | w21 = ((sx - x1) * (y2 - sy)).astype(np.float64) 192 | w22 = ((sx - x1) * (sy - y1)).astype(np.float64) 193 | return index11, index12, index21, index22, w11, w12, w21, w22 194 | 195 | def make_cor_rotate(self): 196 | cor=[] 197 | theta=self.theta 198 | for i in range(len(theta)): 199 | cor.append(self.cor_rotate(i)) 200 | return cor 201 | 202 | 203 | if __name__=='__main__': 204 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 205 | # udir = '/home/wangwei/ct-compare/CT_image/Pancreas-CT/PANCREAS_0002//11-24-2015-PANCREAS0002-Pancreas-23046/Pancreas-63502' 206 | udir='E:/CT_image/Pancreas-CT/PANCREAS_0002//11-24-2015-PANCREAS0002-Pancreas-23046/Pancreas-63502' 207 | # udir='/home/wangwei/ct-compare/CPTAC-LUAD/CPTAC-LUAD' 208 | dd = glob.glob(udir + "/**/*.dcm", recursive=True) 209 | dd.sort() 210 | L = len(dd) 211 | L = min(5000, L) 212 | f = [] 213 | for i in range(L): 214 | name = dd[i] 215 | dc = pydicom.dcmread(name) 216 | temp = dc.pixel_array 217 | temp = temp.astype(np.float32) 218 | divider = np.max(temp) - np.min(temp) 219 | if divider == 0: 220 | print('divider being zero: index ', i) 221 | pass 222 | temp = (temp - np.min(temp)) / divider 223 | f.append(temp) 224 | f = np.array(f) 225 | f = np.expand_dims(f, -1) 226 | cen=f.shape[1]//2 227 | # f=f[0:10,:,:,:] 228 | batch = 2 229 | M = N = 256 230 | LL=M//2 231 | f = f[0:batch, cen-LL:cen+LL, cen-LL:cen+LL, :]*255 232 | angles = 180 233 | theta = np.linspace(0, 180, angles, endpoint=False) 234 | 235 | s = radon(f[0, :, :, 0], circle=False,theta=theta) 236 | rf = iradon(s, theta) 237 | pp = compare_psnr(f[0, :, :, 0], rf, np.max(f[0, :, :, 0])) 238 | 239 | shape=f.shape 240 | ct=CT_uitil([0,shape[1],shape[2],0],theta=theta) 241 | s1=ct.radon(f) 242 | ss=tf.expand_dims(tf.expand_dims(s,0),-1) 243 | rf1=ct.iradon(ss) 244 | # rf1 = ct.iradon(s1) 245 | pp1=tf.image.psnr(f[:1,:,:,:],rf1,np.max(f)) 246 | print('f_shap:', f.shape) 247 | print('s-s1:', np.sum(np.abs(s - s1[0, :, :, 0]))) 248 | print('rf-rf1:', np.sum(np.abs(rf-rf1[0, :, :, 0]))) 249 | print('f-rf:', np.sum(np.abs(f[0, :, :, 0] - rf))) 250 | print('f-rf1:', np.sum(np.abs(f[0,:,:,0] - rf1[0, :, :, 0]))) 251 | print('psnr beween f and rf:', pp) 252 | print('psnr beween f1 and rf1:',pp1.numpy()) 253 | print('debug') 254 | 255 | 256 | 257 | 258 | 259 | --------------------------------------------------------------------------------