├── README.md └── utils.py /README.md: -------------------------------------------------------------------------------- 1 | # U-Net - Image Segmentation 2 | 3 | NEWS: click [here](https://github.com/zsdonghao/u-net-brain-tumor) for training. 4 | 5 | You can find the figure and description [here](http://lmb.informatik.uni-freiburg.de/people/ronneber/u-net/) 6 | 7 | Enjoy! 8 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python 2 | # -*- coding: utf8 -*- 3 | 4 | 5 | 6 | """ 7 | License 8 | ======= 9 | 10 | Copyright (c) 2016 Data Science Institute, Imperial College London. All rights reserved. 11 | 12 | Oct 2016 13 | 14 | Contact 15 | ======= 16 | Questions? Please contact hao.dong11@imperial.ac.uk 17 | 18 | """ 19 | 20 | import tensorflow as tf 21 | import tensorlayer as tl 22 | import nibabel as nib 23 | import numpy as np 24 | import os 25 | # from tensorlayer.activation import pixel_wise_softmax 26 | # import sys # this line added 27 | # sys.setrecursionlimit(1000000) # this line added 28 | 29 | import skimage 30 | # from skimage.transform import swirl 31 | 32 | 33 | ## loss and matrix 34 | def average_gradients(tower_grads): 35 | """Calculate the average gradient for each shared variable across all towers. 36 | Note that this function provides a synchronization point across all towers. 37 | Args: 38 | tower_grads: List of lists of (gradient, variable) tuples. The outer list 39 | is over individual gradients. The inner list is over the gradient 40 | calculation for each tower. 41 | Returns: 42 | List of pairs of (gradient, variable) where the gradient has been averaged 43 | across all towers. 44 | 45 | References 46 | ----------- 47 | - https://github.com/tensorflow/tensorflow/blob/master/tensorflow/models/image/cifar10/cifar10_multi_gpu_train.py 48 | """ 49 | average_grads = [] 50 | for grad_and_vars in zip(*tower_grads): 51 | # Note that each grad_and_vars looks like the following: 52 | # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN)) 53 | grads = [] 54 | for g, _ in grad_and_vars: 55 | # Add 0 dimension to the gradients to represent the tower. 56 | expanded_g = tf.expand_dims(g, 0) 57 | # Append on a 'tower' dimension which we will average over below. 58 | grads.append(expanded_g) 59 | # Average over the 'tower' dimension. 60 | grad = tf.concat(0, grads) 61 | grad = tf.reduce_mean(grad, 0) 62 | # Keep in mind that the Variables are redundant because they are shared 63 | # across towers. So .. we will just return the first tower's pointer to 64 | # the Variable. 65 | v = grad_and_vars[0][1] 66 | grad_and_var = (grad, v) 67 | average_grads.append(grad_and_var) 68 | return average_grads 69 | 70 | def cross_entropy_weight(y_, output_map, weight=1, name="cross_entropy_weight"): 71 | """Compute cost entropy with two images, note that it do not compute softmax internally. 72 | 73 | Fangde : haven't used 74 | 75 | tf.clip_by_value to avoid NaN 76 | 77 | Parameters 78 | ----------- 79 | y_ : 4D tensor [batch_size, height, weight, channel] 80 | target outputs 81 | output_map : 4D tensor [batch_size, height, weight, channel] 82 | predict outputs 83 | 84 | Examples 85 | --------- 86 | >>> outputs = pixel_wise_softmax(network.outputs) 87 | >>> wc = cross_entropy_weight(y_, outputs, weight=20) 88 | """ 89 | return -tf.reduce_mean(weight * y_ * tf.log(tf.clip_by_value(output_map, 1e-10, 1.0)), name=name) 90 | 91 | 92 | ## Load file 93 | def read_Nifti1Image(file_dir, name): 94 | """ 95 | http://nipy.org/nibabel/gettingstarted.html 96 | """ 97 | img_dir = os.path.join(file_dir, name) 98 | img = nib.load(img_dir) 99 | print(" *Name: %s Shape: %s " % (name, img.shape)) 100 | # print(type(img)) # 101 | # print(img.get_data_dtype() == np.dtype(np.int16)) # True 102 | # return np.array(img, dtype=np.float32) 103 | return img 104 | 105 | def prepare_data(file_dir, file_list, shape=(), threshold=None): 106 | print(" * Preparing %s" %file_list) 107 | data = np.empty(shape=(0,shape[1],shape[2],1)) 108 | for f in file_list: 109 | img = read_Nifti1Image(file_dir, f) 110 | X = img.get_data() 111 | X = np.transpose(X, (1,0,2)) 112 | X = X[:,:,:,np.newaxis] 113 | # 114 | if threshold: 115 | X = (X > threshold).astype(int) 116 | else: 117 | X = X/np.max(X) 118 | if X.shape == shape: 119 | data = np.vstack((data, X)) 120 | else: 121 | print(" *shape don't match") 122 | return data 123 | 124 | def prepare_data2(file_dir, file_list, label_list, shape=(), dim_order=(1,0,2)): 125 | print(" * Preparing %s %s" % (file_list, label_list)) 126 | # data = np.empty(shape=(0,shape[1],shape[2],1)) ######## Akara : slower than list append 127 | # data2 = np.empty(shape=(0,shape[1],shape[2],1)) 128 | data = [] 129 | data2 = [] 130 | # j = 0 131 | for f, f2 in zip(file_list, label_list): 132 | print("%s - %s" % (f, f2)) 133 | ## read original image 134 | img = read_Nifti1Image(file_dir, f) 135 | X = img.get_data() 136 | X = np.transpose(X, dim_order) 137 | X = X[:,:,:,np.newaxis] 138 | ## read label image 139 | img = read_Nifti1Image(file_dir, f2) 140 | Y = img.get_data() 141 | Y = np.transpose(Y, dim_order) 142 | Y = Y[:,:,:,np.newaxis] 143 | # print(X.shape, shape) 144 | ## if shape correct 145 | if X.shape == shape: 146 | for i in range(Y.shape[0]): 147 | # print(i, 'Y', np.mean(Y[i]), np.max(Y[i])) 148 | # print(i, 'X', np.mean(X[i]), np.max(X[i])) 149 | ## if label exists 150 | if np.max(Y[i]) > 0: 151 | ## display data values 152 | # print('%d Y max:%.3f mean:%.3f' % (i, np.max(Y[i]), np.mean(Y[i])))#, np.median(Y[i])) 153 | print('%d X max:%.3f min:%.3f' % (i, np.max(X[i]), np.min(X[i])))#, np.median(X[i])) 154 | ## make image [0,1] 155 | # X[i] = (X[i] - np.min(X[i])) / (np.max(X[i]) - np.min(X[i])) 156 | ## make label binary 157 | Y[i] = (Y[i] > 0.5).astype(int) 158 | mask = (Y[i] != 2).astype(int) 159 | Y[i] = Y[i] * mask 160 | # Y[i] = (Y[i] == 4).astype(int) 161 | # print(j) 162 | # j+= 1 163 | # print(i, np.mean(Y[i]), np.max(Y[i])) 164 | # print(data2.shape,Y[i].shape) 165 | ## stack data 166 | # data = np.vstack((data, [X[i]])) ###### Akara : slower than list append 167 | # data2 = np.vstack((data2, [Y[i]])) 168 | # print(X[i].dtype) # float 32 169 | data.append(X[i].astype(np.float32)) 170 | data2.append(Y[i].astype(np.float32)) 171 | else: 172 | print(" *shape doesn't match") 173 | ## plot an example 174 | # for i in range(0, data.shape[0], 1): 175 | # # tl.visualize.frame(X[i,:,:,0], second=0.01, saveable=False, name='slice x:'+str(i),cmap='gray') 176 | # tl.visualize.images2d(images=np.asarray([data[i,:,:,:], data2[i,:,:,:]]), second=0.01, saveable=False, name='slice x:'+str(i), dtype=None) 177 | # exit() 178 | return np.asarray(data, dtype=np.float32), np.asarray(data2, dtype=np.float32) 179 | 180 | def train(total_loss, global_step, init_lr, decay_factor, num_batches_per_epoch, num_epoch_per_decay, mode='sgd'): 181 | """Train CIFAR-10 model. 182 | Create an optimizer and apply to all trainable variables. Add moving 183 | average for all trainable variables. 184 | Args: 185 | total_loss: Total loss from loss(). 186 | global_step: Integer Variable counting the number of training steps 187 | processed. 188 | Returns: 189 | train_op: op for training. 190 | """ 191 | MOVING_AVERAGE_DECAY = 0.9999 192 | def _add_loss_summaries(total_loss): 193 | """Add summaries for losses in CIFAR-10 model. 194 | Generates moving average for all losses and associated summaries for 195 | visualizing the performance of the network. 196 | Args: 197 | total_loss: Total loss from loss(). 198 | Returns: 199 | loss_averages_op: op for generating moving averages of losses. 200 | 201 | https://github.com/tensorflow/tensorflow/blob/r0.11/tensorflow/models/image/cifar10/cifar10.py 202 | """ 203 | # Compute the moving average of all individual losses and the total loss. 204 | loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') 205 | losses = tf.get_collection('losses') 206 | loss_averages_op = loss_averages.apply(losses + [total_loss]) 207 | 208 | # Attach a scalar summary to all individual losses and the total loss; do the 209 | # same for the averaged version of the losses. 210 | for l in losses + [total_loss]: 211 | # Name each loss as '(raw)' and name the moving average version of the loss 212 | # as the original loss name. 213 | tf.scalar_summary(l.op.name +' (raw)', l) 214 | tf.scalar_summary(l.op.name, loss_averages.average(l)) 215 | 216 | return loss_averages_op 217 | # Variables that affect learning rate. 218 | # num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / FLAGS.batch_size 219 | decay_steps = int(num_batches_per_epoch * num_epoch_per_decay) 220 | 221 | # Decay the learning rate exponentially based on the number of steps. 222 | lr = tf.train.exponential_decay(init_lr, 223 | global_step, 224 | decay_steps, 225 | decay_factor, 226 | staircase=True) 227 | tf.scalar_summary('learning_rate', lr) 228 | 229 | # Generate moving averages of all losses and associated summaries. 230 | loss_averages_op = _add_loss_summaries(total_loss) 231 | 232 | # Compute gradients. 233 | with tf.control_dependencies([loss_averages_op]): 234 | if mode == 'sgd': 235 | opt = tf.train.GradientDescentOptimizer(lr) 236 | elif mode == 'adam': 237 | opt = tf.train.AdamOptimizer(lr, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False) 238 | else: 239 | raise Exception("%s not support" % mode) 240 | grads = opt.compute_gradients(total_loss) 241 | 242 | # Apply gradients. 243 | apply_gradient_op = opt.apply_gradients(grads, global_step=global_step) 244 | 245 | # Add histograms for trainable variables. 246 | for var in tf.trainable_variables(): 247 | tf.histogram_summary(var.op.name, var) 248 | 249 | # Add histograms for gradients. 250 | for grad, var in grads: 251 | if grad is not None: 252 | tf.histogram_summary(var.op.name + '/gradients', grad) 253 | 254 | # Track the moving averages of all trainable variables. 255 | variable_averages = tf.train.ExponentialMovingAverage( 256 | MOVING_AVERAGE_DECAY, global_step) 257 | variables_averages_op = variable_averages.apply(tf.trainable_variables()) 258 | 259 | with tf.control_dependencies([apply_gradient_op, variables_averages_op]): 260 | train_op = tf.no_op(name='train') 261 | 262 | return train_op 263 | 264 | 265 | ## Model 266 | def u_net_2d_64_1024_deconv_pro(x, n_out=2): 267 | """ 2-D U-Net for Image Segmentation. 268 | 269 | Parameters 270 | ----------- 271 | x : tensor or placeholder of input with shape of [batch_size, row, col, channel] 272 | batch_size : int, batch size 273 | n_out : int, number of output channel, default is 2 for foreground and background (binary segmentation) 274 | 275 | Returns 276 | -------- 277 | network : TensorLayer layer class with identity output 278 | outputs : tensor, the output with pixel-wise softmax 279 | 280 | Notes 281 | ----- 282 | - Recommend to use Adam with learning rate of 1e-5 283 | """ 284 | batch_size = int(x._shape[0]) 285 | nx = int(x._shape[1]) 286 | ny = int(x._shape[2]) 287 | nz = int(x._shape[3]) 288 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 289 | ## define initializer 290 | w_init = tf.truncated_normal_initializer(stddev=0.01) 291 | b_init = tf.constant_initializer(value=0.0) 292 | ## u-net model 293 | # convolution 294 | # with tf.device('\gpu:0'): 295 | net_in = tl.layers.InputLayer(x, name='input') 296 | conv1 = tl.layers.Conv2dLayer(net_in, act=tf.nn.relu, 297 | shape=[3,3,nz,64], strides=[1,1,1,1], padding='SAME', 298 | W_init=w_init, b_init=b_init, name='conv1') 299 | conv2 = tl.layers.Conv2dLayer(conv1, act=tf.nn.relu, 300 | shape=[3,3,64,64], strides=[1,1,1,1], padding='SAME', 301 | W_init=w_init, b_init=b_init, name='conv2') 302 | pool1 = tl.layers.PoolLayer(conv2, ksize=[1,2,2,1], 303 | strides=[1,2,2,1], padding='SAME', 304 | pool=tf.nn.max_pool, name='pool1') 305 | conv3 = tl.layers.Conv2dLayer(pool1, act=tf.nn.relu, 306 | shape=[3,3,64,128], strides=[1,1,1,1], padding='SAME', 307 | W_init=w_init, b_init=b_init, name='conv3') 308 | conv4 = tl.layers.Conv2dLayer(conv3, act=tf.nn.relu, 309 | shape=[3,3,128,128], strides=[1,1,1,1], padding='SAME', 310 | W_init=w_init, b_init=b_init, name='conv4') 311 | pool2 = tl.layers.PoolLayer(conv4, ksize=[1,2,2,1], 312 | strides=[1,2,2,1], padding='SAME', 313 | pool=tf.nn.max_pool, name='pool2') 314 | conv5 = tl.layers.Conv2dLayer(pool2, act=tf.nn.relu, 315 | shape=[3,3,128,256], strides=[1,1,1,1], padding='SAME', 316 | W_init=w_init, b_init=b_init, name='conv5') 317 | conv6 = tl.layers.Conv2dLayer(conv5, act=tf.nn.relu, 318 | shape=[3,3,256,256], strides=[1,1,1,1], padding='SAME', 319 | W_init=w_init, b_init=b_init, name='conv6') 320 | pool3 = tl.layers.PoolLayer(conv6, ksize=[1,2,2,1], 321 | strides=[1,2,2,1], padding='SAME', 322 | pool=tf.nn.max_pool, name='pool3') 323 | conv7 = tl.layers.Conv2dLayer(pool3, act=tf.nn.relu, 324 | shape=[3,3,256,512], strides=[1,1,1,1], padding='SAME', 325 | W_init=w_init, b_init=b_init, name='conv7') 326 | conv8 = tl.layers.Conv2dLayer(conv7, act=tf.nn.relu, 327 | shape=[3,3,512,512], strides=[1,1,1,1], padding='SAME', 328 | W_init=w_init, b_init=b_init, name='conv8') 329 | # print(conv8.outputs) # (10, 30, 30, 512) 330 | pool4 = tl.layers.PoolLayer(conv8, ksize=[1,2,2,1], 331 | strides=[1,2,2,1], padding='SAME', 332 | pool=tf.nn.max_pool,name='pool4') 333 | conv9 = tl.layers.Conv2dLayer(pool4, act=tf.nn.relu, 334 | shape=[3,3,512,1024], strides=[1,1,1,1], padding='SAME', 335 | W_init=w_init, b_init=b_init, name='conv9') 336 | conv10 = tl.layers.Conv2dLayer(conv9, act=tf.nn.relu, 337 | shape=[3,3,1024,1024], strides=[1,1,1,1], padding='SAME', 338 | W_init=w_init, b_init=b_init, name='conv10') 339 | print(" * After conv: %s" % conv10.outputs) # (batch_size, 32, 32, 1024) 340 | # deconvoluation 341 | deconv1 = tl.layers.DeConv2dLayer(conv10, act=tf.identity, #act=tf.nn.relu, 342 | shape=[3,3,512,1024], strides=[1,2,2,1], output_shape=[batch_size,nx/8,ny/8,512], 343 | padding='SAME', W_init=w_init, b_init=b_init, name='devcon1_1') 344 | # print(deconv1.outputs) #(10, 30, 30, 512) 345 | deconv1_2 = tl.layers.ConcatLayer([conv8, deconv1], concat_dim=3, name='concat1_2') 346 | deconv1_3 = tl.layers.Conv2dLayer(deconv1_2, act=tf.nn.relu, 347 | shape=[3,3,1024,512], strides=[1,1,1,1], padding='SAME', 348 | W_init=w_init, b_init=b_init, name='conv1_3') 349 | deconv1_4 = tl.layers.Conv2dLayer(deconv1_3, act=tf.nn.relu, 350 | shape=[3,3,512,512], strides=[1,1,1,1], padding='SAME', 351 | W_init=w_init, b_init=b_init, name='conv1_4') 352 | deconv2 = tl.layers.DeConv2dLayer(deconv1_4, act=tf.identity, #act=tf.nn.relu, 353 | shape=[3,3,256,512], strides=[1,2,2,1], output_shape=[batch_size,nx/4,ny/4,256], 354 | padding='SAME', W_init=w_init, b_init=b_init, name='devcon2_1') 355 | deconv2_2 = tl.layers.ConcatLayer([conv6, deconv2], concat_dim=3, name='concat2_2') 356 | deconv2_3 = tl.layers.Conv2dLayer(deconv2_2, act=tf.nn.relu, 357 | shape=[3,3,512,256], strides=[1,1,1,1], padding='SAME', 358 | W_init=w_init, b_init=b_init, name='conv2_3') 359 | deconv2_4 = tl.layers.Conv2dLayer(deconv2_3, act=tf.nn.relu, 360 | shape=[3,3,256,256], strides=[1,1,1,1], padding='SAME', 361 | W_init=w_init, b_init=b_init, name='conv2_4') 362 | deconv3 = tl.layers.DeConv2dLayer(deconv2_4, act=tf.identity, #act=tf.nn.relu, 363 | shape=[3,3,128,256], strides=[1,2,2,1], output_shape=[batch_size,nx/2,ny/2,128], 364 | padding='SAME', W_init=w_init, b_init=b_init, name='devcon3_1') 365 | deconv3_2 = tl.layers.ConcatLayer([conv4, deconv3], concat_dim=3, name='concat3_2') 366 | deconv3_3 = tl.layers.Conv2dLayer(deconv3_2, act=tf.identity, #act=tf.nn.relu, 367 | shape=[3,3,256,128], strides=[1,1,1,1], padding='SAME', 368 | W_init=w_init, b_init=b_init, name='conv3_3') 369 | deconv3_4 = tl.layers.Conv2dLayer(deconv3_3, act=tf.nn.relu, 370 | shape=[3,3,128,128], strides=[1,1,1,1], padding='SAME', 371 | W_init=w_init, b_init=b_init, name='conv3_4') 372 | deconv4 = tl.layers.DeConv2dLayer(deconv3_4, act=tf.identity, #act=tf.nn.relu, 373 | shape=[3,3,64,128], strides=[1,2,2,1], output_shape=[batch_size,nx,ny,64], 374 | padding='SAME', W_init=w_init, b_init=b_init, name='devconv4_1') 375 | deconv4_2 = tl.layers.ConcatLayer([conv2, deconv4], concat_dim=3, name='concat4_2') 376 | deconv4_3 = tl.layers.Conv2dLayer(deconv4_2, act=tf.nn.relu, 377 | shape=[3,3,128,64], strides=[1,1,1,1], padding='SAME', 378 | W_init=w_init, b_init=b_init, name='conv4_3') 379 | deconv4_4 = tl.layers.Conv2dLayer(deconv4_3, act=tf.nn.relu, 380 | shape=[3,3,64,64], strides=[1,1,1,1], padding='SAME', 381 | W_init=w_init, b_init=b_init, name='conv4_4') 382 | network = tl.layers.Conv2dLayer(deconv4_4, 383 | act=tf.identity, 384 | shape=[1,1,64,n_out], # [0]:foreground prob; [1]:background prob 385 | strides=[1,1,1,1], 386 | padding='SAME', 387 | W_init=w_init, b_init=b_init, name='conv4_5') 388 | # compute the softmax output 389 | print(" * Output: %s" % network.outputs) 390 | outputs = tl.act.pixel_wise_softmax(network.outputs) 391 | return network, outputs 392 | 393 | def u_net_2d_64_1024_deconv(x, n_out=2): 394 | from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer 395 | nx = int(x._shape[1]) 396 | ny = int(x._shape[2]) 397 | nz = int(x._shape[3]) 398 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 399 | 400 | w_init = tf.truncated_normal_initializer(stddev=0.01) 401 | b_init = tf.constant_initializer(value=0.0) 402 | inputs = InputLayer(x, name='inputs') 403 | 404 | conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') 405 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') 406 | pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') 407 | 408 | conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') 409 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') 410 | pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2') 411 | 412 | conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1') 413 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2') 414 | pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3') 415 | 416 | conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1') 417 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2') 418 | pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4') 419 | 420 | conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1') 421 | conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2') 422 | 423 | print(" * After conv: %s" % conv5.outputs) 424 | 425 | up4 = DeConv2d(conv5, 512, (3, 3), out_size = (nx/8, ny/8), strides = (2, 2), 426 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv4') 427 | up4 = ConcatLayer([up4, conv4], concat_dim=3, name='concat4') 428 | conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_1') 429 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_2') 430 | 431 | up3 = DeConv2d(conv4, 256, (3, 3), out_size = (nx/4, ny/4), strides = (2, 2), 432 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv3') 433 | up3 = ConcatLayer([up3, conv3], concat_dim=3, name='concat3') 434 | conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_1') 435 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_2') 436 | 437 | up2 = DeConv2d(conv3, 128, (3, 3), out_size = (nx/2, ny/2), strides = (2, 2), 438 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv2') 439 | up2 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat2') 440 | conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_1') 441 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_2') 442 | 443 | up1 = DeConv2d(conv2, 64, (3, 3), out_size = (nx/1, ny/1), strides = (2, 2), 444 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv1') 445 | up1 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat1') 446 | conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_1') 447 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_2') 448 | 449 | conv1 = Conv2d(conv1, n_out, (1, 1), act=None, name='uconv1') 450 | print(" * Output: %s" % conv1.outputs) 451 | outputs = tl.act.pixel_wise_softmax(conv1.outputs) 452 | return conv1, outputs 453 | 454 | def u_net_2d_64_1024_deconv_resnet(x, n_out=2): #TODO 455 | from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer 456 | # batch_size = int(x._shape[0]) 457 | nx = int(x._shape[1]) 458 | ny = int(x._shape[2]) 459 | nz = int(x._shape[3]) 460 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 461 | 462 | w_init = tf.truncated_normal_initializer(stddev=0.01) 463 | b_init = tf.constant_initializer(value=0.0) 464 | inputs = InputLayer(x, name='inputs') 465 | 466 | conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') 467 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') 468 | pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') 469 | 470 | conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') 471 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') 472 | pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2') 473 | 474 | conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1') 475 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2') 476 | pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3') 477 | 478 | conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1') 479 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2') 480 | pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4') 481 | 482 | conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1') 483 | conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2') 484 | 485 | print(" * After conv: %s" % conv5.outputs) 486 | 487 | up4 = DeConv2d(conv5, 512, (3, 3), out_size = (nx/8, ny/8), strides = (2, 2), 488 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv4') 489 | up4 = ConcatLayer([up4, conv4], concat_dim=3, name='concat4') 490 | conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_1') 491 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_2') 492 | 493 | up3 = DeConv2d(conv4, 256, (3, 3), out_size = (nx/4, ny/4), strides = (2, 2), 494 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv3') 495 | up3 = ConcatLayer([up3, conv3], concat_dim=3, name='concat3') 496 | conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_1') 497 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_2') 498 | 499 | up2 = DeConv2d(conv3, 128, (3, 3), out_size = (nx/2, ny/2), strides = (2, 2), 500 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv2') 501 | up2 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat2') 502 | conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_1') 503 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_2') 504 | 505 | up1 = DeConv2d(conv2, 64, (3, 3), out_size = (nx/1, ny/1), strides = (2, 2), 506 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv1') 507 | up1 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat1') 508 | conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_1') 509 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_2') 510 | 511 | conv1 = Conv2d(conv1, n_out, (1, 1), act=None, name='uconv1') 512 | print(" * Output: %s" % conv1.outputs) 513 | outputs = tl.act.pixel_wise_softmax(conv1.outputs) 514 | return conv1, outputs 515 | 516 | def u_net_2d_64_2048_deconv(x, n_out=2): 517 | from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer 518 | # batch_size = int(x._shape[0]) 519 | nx = int(x._shape[1]) 520 | ny = int(x._shape[2]) 521 | nz = int(x._shape[3]) 522 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 523 | 524 | w_init = tf.truncated_normal_initializer(stddev=0.01) 525 | b_init = tf.constant_initializer(value=0.0) 526 | inputs = InputLayer(x, name='inputs') 527 | 528 | conv1 = Conv2d(inputs, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') 529 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') 530 | pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') 531 | 532 | conv2 = Conv2d(pool1, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') 533 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') 534 | pool2 = MaxPool2d(conv2, (2, 2), padding='SAME', name='pool2') 535 | 536 | conv3 = Conv2d(pool2, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1') 537 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2') 538 | pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3') 539 | 540 | conv4 = Conv2d(pool3, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1') 541 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2') 542 | pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4') 543 | 544 | conv5 = Conv2d(pool4, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1') 545 | conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2') 546 | pool5 = MaxPool2d(conv5, (2, 2), padding='SAME', name='pool5') 547 | 548 | conv6 = Conv2d(pool5, 2048, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1') 549 | conv6 = Conv2d(conv6, 2048, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2') 550 | 551 | print(" * After conv: %s" % conv6.outputs) 552 | 553 | up5 = DeConv2d(conv6, 1024, (3, 3), out_size = (nx/16, ny/16), strides = (2, 2), 554 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv5') 555 | up5 = ConcatLayer([up5, conv5], concat_dim=3, name='concat5') 556 | conv5 = Conv2d(up5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv5_1') 557 | conv5 = Conv2d(conv5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv5_2') 558 | 559 | up4 = DeConv2d(conv5, 512, (3, 3), out_size = (nx/8, ny/8), strides = (2, 2), 560 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv4') 561 | up4 = ConcatLayer([up4, conv4], concat_dim=3, name='concat4') 562 | conv4 = Conv2d(up4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_1') 563 | conv4 = Conv2d(conv4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv4_2') 564 | 565 | up3 = DeConv2d(conv4, 256, (3, 3), out_size = (nx/4, ny/4), strides = (2, 2), 566 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv3') 567 | up3 = ConcatLayer([up3, conv3], concat_dim=3, name='concat3') 568 | conv3 = Conv2d(up3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_1') 569 | conv3 = Conv2d(conv3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv3_2') 570 | 571 | up2 = DeConv2d(conv3, 128, (3, 3), out_size = (nx/2, ny/2), strides = (2, 2), 572 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv2') 573 | up2 = ConcatLayer([up2, conv2] ,concat_dim=3, name='concat2') 574 | conv2 = Conv2d(up2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_1') 575 | conv2 = Conv2d(conv2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv2_2') 576 | 577 | up1 = DeConv2d(conv2, 64, (3, 3), out_size = (nx/1, ny/1), strides = (2, 2), 578 | padding = 'SAME', act=None, W_init=w_init, b_init=b_init, name='deconv1') 579 | up1 = ConcatLayer([up1, conv1] ,concat_dim=3, name='concat1') 580 | conv1 = Conv2d(up1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_1') 581 | conv1 = Conv2d(conv1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='uconv1_2') 582 | 583 | conv1 = Conv2d(conv1, n_out, (1, 1), act=None, name='uconv1') 584 | print(" * Output: %s" % conv1.outputs) 585 | outputs = tl.act.pixel_wise_softmax(conv1.outputs) 586 | return conv1, outputs 587 | 588 | def u_net_2d_32_512_upsam(x, n_out=2): 589 | """ 590 | https://github.com/jocicmarko/ultrasound-nerve-segmentation 591 | """ 592 | from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer 593 | batch_size = int(x._shape[0]) 594 | nx = int(x._shape[1]) 595 | ny = int(x._shape[2]) 596 | nz = int(x._shape[3]) 597 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 598 | ## define initializer 599 | w_init = tf.truncated_normal_initializer(stddev=0.01) 600 | b_init = tf.constant_initializer(value=0.0) 601 | inputs = InputLayer(x, name='inputs') 602 | # inputs = Input((1, img_rows, img_cols)) 603 | conv1 = Conv2d(inputs, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') 604 | # print(conv1.outputs) # (10, 240, 240, 32) 605 | # conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs) 606 | conv1 = Conv2d(conv1, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') 607 | # print(conv1.outputs) # (10, 240, 240, 32) 608 | # conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1) 609 | pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') 610 | # pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 611 | # print(pool1.outputs) # (10, 120, 120, 32) 612 | # exit() 613 | conv2 = Conv2d(pool1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') 614 | # conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1) 615 | conv2 = Conv2d(conv2, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') 616 | # conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2) 617 | pool2 = MaxPool2d(conv2, (2,2), padding='SAME', name='pool2') 618 | # pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 619 | 620 | conv3 = Conv2d(pool2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1') 621 | # conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2) 622 | conv3 = Conv2d(conv3, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2') 623 | # conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3) 624 | pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3') 625 | # pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 626 | # print(pool3.outputs) # (10, 30, 30, 64) 627 | 628 | conv4 = Conv2d(pool3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1') 629 | # print(conv4.outputs) # (10, 30, 30, 256) 630 | # conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3) 631 | conv4 = Conv2d(conv4, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2') 632 | # print(conv4.outputs) # (10, 30, 30, 256) != (10, 30, 30, 512) 633 | # conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4) 634 | pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4') 635 | # pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) 636 | 637 | conv5 = Conv2d(pool4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1') 638 | # conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4) 639 | conv5 = Conv2d(conv5, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2') 640 | # conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5) 641 | # print(conv5.outputs) # (10, 15, 15, 512) 642 | print(" * After conv: %s" % conv5.outputs) 643 | # print(nx/8,ny/8) # 30 30 644 | up6 = UpSampling2dLayer(conv5, (2, 2), name='up6') 645 | # print(up6.outputs) # (10, 30, 30, 512) == (10, 30, 30, 512) 646 | up6 = ConcatLayer([up6, conv4], concat_dim=3, name='concat6') 647 | # print(up6.outputs) # (10, 30, 30, 768) 648 | # up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1) 649 | conv6 = Conv2d(up6, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1') 650 | # conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6) 651 | conv6 = Conv2d(conv6, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2') 652 | # conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6) 653 | 654 | up7 = UpSampling2dLayer(conv6, (2, 2), name='up7') 655 | up7 = ConcatLayer([up7, conv3] ,concat_dim=3, name='concat7') 656 | # up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1) 657 | conv7 = Conv2d(up7, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_1') 658 | # conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7) 659 | conv7 = Conv2d(conv7, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_2') 660 | # conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7) 661 | 662 | up8 = UpSampling2dLayer(conv7, (2, 2), name='up8') 663 | up8 = ConcatLayer([up8, conv2] ,concat_dim=3, name='concat8') 664 | # up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1) 665 | conv8 = Conv2d(up8, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_1') 666 | # conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8) 667 | conv8 = Conv2d(conv8, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_2') 668 | # conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8) 669 | 670 | up9 = UpSampling2dLayer(conv8, (2, 2), name='up9') 671 | up9 = ConcatLayer([up9, conv1] ,concat_dim=3, name='concat9') 672 | # up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1) 673 | conv9 = Conv2d(up9, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_1') 674 | # conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9) 675 | conv9 = Conv2d(conv9, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_2') 676 | # conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9) 677 | 678 | conv10 = Conv2d(conv9, n_out, (1, 1), act=None, name='conv9') 679 | # conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9) 680 | print(" * Output: %s" % conv10.outputs) 681 | outputs = tl.act.pixel_wise_softmax(conv10.outputs) 682 | return conv10, outputs 683 | 684 | def u_net_2d_32_1024_upsam(x, n_out=2): 685 | """ 686 | https://github.com/jocicmarko/ultrasound-nerve-segmentation 687 | """ 688 | from tensorlayer.layers import InputLayer, Conv2d, MaxPool2d, DeConv2d, ConcatLayer 689 | batch_size = int(x._shape[0]) 690 | nx = int(x._shape[1]) 691 | ny = int(x._shape[2]) 692 | nz = int(x._shape[3]) 693 | print(" * Input: size of image: %d %d %d" % (nx, ny, nz)) 694 | ## define initializer 695 | w_init = tf.truncated_normal_initializer(stddev=0.01) 696 | b_init = tf.constant_initializer(value=0.0) 697 | inputs = InputLayer(x, name='inputs') 698 | 699 | conv1 = Conv2d(inputs, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_1') 700 | conv1 = Conv2d(conv1, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv1_2') 701 | pool1 = MaxPool2d(conv1, (2, 2), padding='SAME', name='pool1') 702 | 703 | conv2 = Conv2d(pool1, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_1') 704 | conv2 = Conv2d(conv2, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv2_2') 705 | pool2 = MaxPool2d(conv2, (2,2), padding='SAME', name='pool2') 706 | 707 | conv3 = Conv2d(pool2, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_1') 708 | conv3 = Conv2d(conv3, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv3_2') 709 | pool3 = MaxPool2d(conv3, (2, 2), padding='SAME', name='pool3') 710 | 711 | conv4 = Conv2d(pool3, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_1') 712 | conv4 = Conv2d(conv4, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv4_2') 713 | pool4 = MaxPool2d(conv4, (2, 2), padding='SAME', name='pool4') 714 | 715 | conv5 = Conv2d(pool4, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_1') 716 | conv5 = Conv2d(conv5, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv5_2') 717 | pool5 = MaxPool2d(conv5, (2, 2), padding='SAME', name='pool6') 718 | 719 | # hao add 720 | conv6 = Conv2d(pool5, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_1') 721 | conv6 = Conv2d(conv6, 1024, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv6_2') 722 | 723 | print(" * After conv: %s" % conv6.outputs) 724 | 725 | # hao add 726 | up7 = UpSampling2dLayer(conv6, (15, 15), is_scale=False, method=1, name='up7') 727 | up7 = ConcatLayer([up7, conv5], concat_dim=3, name='concat7') 728 | conv7 = Conv2d(up7, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_1') 729 | conv7 = Conv2d(conv7, 512, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv7_2') 730 | 731 | # print(nx/8,ny/8) # 30 30 732 | up8 = UpSampling2dLayer(conv7, (2, 2), method=1, name='up8') 733 | up8 = ConcatLayer([up8, conv4], concat_dim=3, name='concat8') 734 | conv8 = Conv2d(up8, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_1') 735 | conv8 = Conv2d(conv8, 256, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv8_2') 736 | 737 | up9 = UpSampling2dLayer(conv8, (2, 2), method=1, name='up9') 738 | up9 = ConcatLayer([up9, conv3] ,concat_dim=3, name='concat9') 739 | conv9 = Conv2d(up9, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_1') 740 | conv9 = Conv2d(conv9, 128, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv9_2') 741 | 742 | up10 = UpSampling2dLayer(conv9, (2, 2), method=1, name='up10') 743 | up10 = ConcatLayer([up10, conv2] ,concat_dim=3, name='concat10') 744 | conv10 = Conv2d(up10, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv10_1') 745 | conv10 = Conv2d(conv10, 64, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv10_2') 746 | 747 | up11 = UpSampling2dLayer(conv10, (2, 2), method=1, name='up11') 748 | up11 = ConcatLayer([up11, conv1] ,concat_dim=3, name='concat11') 749 | conv11 = Conv2d(up11, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv11_1') 750 | conv11 = Conv2d(conv11, 32, (3, 3), act=tf.nn.relu, padding='SAME', W_init=w_init, b_init=b_init, name='conv11_2') 751 | 752 | conv12 = Conv2d(conv11, n_out, (1, 1), act=None, name='conv12') 753 | print(" * Output: %s" % conv12.outputs) 754 | outputs = tl.act.pixel_wise_softmax(conv12.outputs) 755 | return conv10, outputs 756 | --------------------------------------------------------------------------------