├── 35.mp4 ├── 38.mp4 ├── 3dVNet+liver.PNG ├── 3dVNet.png ├── 51.mp4 ├── GTvsVNet.bmp ├── LICENSE ├── LiTS ├── Vnet │ ├── __init__.py │ ├── layer.py │ ├── model_vnet3d.py │ └── util.py ├── data_input │ ├── __init__.py │ ├── dataset_input.py │ ├── puppy.jpg │ ├── puppy_mask.jpg │ ├── testimage.jpg │ ├── testmask.jpg │ ├── trainX.csv │ └── trainY.csv ├── dataprocess │ ├── getPatchImageAndMask.py │ ├── preprocessing.py │ └── utils.py ├── vnet3d_predict.py ├── vnet3d_train.py └── vnet3d_train_predict.py ├── LiTS_header.jpg ├── README.md ├── diceloss.PNG ├── liverleadboard.PNG ├── livertop30.PNG └── tumortop34.PNG /35.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/35.mp4 -------------------------------------------------------------------------------- /38.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/38.mp4 -------------------------------------------------------------------------------- /3dVNet+liver.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/3dVNet+liver.PNG -------------------------------------------------------------------------------- /3dVNet.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/3dVNet.png -------------------------------------------------------------------------------- /51.mp4: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/51.mp4 -------------------------------------------------------------------------------- /GTvsVNet.bmp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/GTvsVNet.bmp -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 junqiangchen 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /LiTS/Vnet/__init__.py: -------------------------------------------------------------------------------- 1 | __author__ = 'junqiang chen' 2 | __version__ = '1.1.0' 3 | __company__ = 'Neusoft Medical System company' 4 | -------------------------------------------------------------------------------- /LiTS/Vnet/layer.py: -------------------------------------------------------------------------------- 1 | ''' 2 | covlution layer,pool layer,initialization。。。。 3 | ''' 4 | from __future__ import division 5 | import tensorflow as tf 6 | import numpy as np 7 | import cv2 8 | 9 | 10 | # Weight initialization (Xavier's init) 11 | def weight_xavier_init(shape, n_inputs, n_outputs, activefunction='sigomd', uniform=True, variable_name=None): 12 | with tf.device('/cpu:0'): 13 | if activefunction == 'sigomd': 14 | if uniform: 15 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) 16 | initial = tf.random_uniform(shape, -init_range, init_range) 17 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 18 | else: 19 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) 20 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 21 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 22 | elif activefunction == 'relu': 23 | if uniform: 24 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * np.sqrt(2) 25 | initial = tf.random_uniform(shape, -init_range, init_range) 26 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 27 | else: 28 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * np.sqrt(2) 29 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 30 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 31 | elif activefunction == 'tan': 32 | if uniform: 33 | init_range = tf.sqrt(6.0 / (n_inputs + n_outputs)) * 4 34 | initial = tf.random_uniform(shape, -init_range, init_range) 35 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 36 | else: 37 | stddev = tf.sqrt(2.0 / (n_inputs + n_outputs)) * 4 38 | initial = tf.truncated_normal(shape, mean=0.0, stddev=stddev) 39 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 40 | 41 | 42 | # Bias initialization 43 | def bias_variable(shape, variable_name=None): 44 | with tf.device('/cpu:0'): 45 | initial = tf.constant(0.1, shape=shape) 46 | return tf.get_variable(name=variable_name, initializer=initial, trainable=True) 47 | 48 | 49 | # 3D convolution 50 | def conv3d(x, W, stride=1): 51 | conv_3d = tf.nn.conv3d(x, W, strides=[1, stride, stride, stride, 1], padding='SAME') 52 | return conv_3d 53 | 54 | 55 | # 3D upsampling 56 | def upsample3d(x, scale_factor, scope=None): 57 | '''' 58 | X shape is [nsample,dim,rows, cols, channel] 59 | out shape is[nsample,dim*scale_factor,rows*scale_factor, cols*scale_factor, channel] 60 | ''' 61 | x_shape = tf.shape(x) 62 | k = tf.ones([scale_factor, scale_factor, scale_factor, x_shape[-1], x_shape[-1]]) 63 | # note k.shape = [dim,rows, cols, depth_in, depth_output] 64 | output_shape = tf.stack( 65 | [x_shape[0], x_shape[1] * scale_factor, x_shape[2] * scale_factor, x_shape[3] * scale_factor, x_shape[4]]) 66 | upsample = tf.nn.conv3d_transpose(value=x, filter=k, output_shape=output_shape, 67 | strides=[1, scale_factor, scale_factor, scale_factor, 1], 68 | padding='SAME', name=scope) 69 | return upsample 70 | 71 | 72 | # 3D deconvolution 73 | def deconv3d(x, W, samefeature=False, depth=False): 74 | """ 75 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 76 | """ 77 | x_shape = tf.shape(x) 78 | if depth: 79 | if samefeature: 80 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4]]) 81 | else: 82 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3] * 2, x_shape[4] // 2]) 83 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 2, 1], padding='SAME') 84 | else: 85 | if samefeature: 86 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4]]) 87 | else: 88 | output_shape = tf.stack([x_shape[0], x_shape[1] * 2, x_shape[2] * 2, x_shape[3], x_shape[4] // 2]) 89 | deconv = tf.nn.conv3d_transpose(x, W, output_shape, strides=[1, 2, 2, 1, 1], padding='SAME') 90 | return deconv 91 | 92 | 93 | # Max Pooling 94 | def max_pool3d(x, depth=False): 95 | """ 96 | depth flag:False is z axis is same between input and output,true is z axis is input is twice than output 97 | """ 98 | if depth: 99 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 2, 1], strides=[1, 2, 2, 2, 1], padding='SAME') 100 | else: 101 | pool3d = tf.nn.max_pool3d(x, ksize=[1, 2, 2, 1, 1], strides=[1, 2, 2, 1, 1], padding='SAME') 102 | return pool3d 103 | 104 | 105 | # Unet crop and concat 106 | def crop_and_concat(x1, x2): 107 | x1_shape = tf.shape(x1) 108 | x2_shape = tf.shape(x2) 109 | # offsets for the top left corner of the crop 110 | offsets = [0, (x1_shape[1] - x2_shape[1]) // 2, 111 | (x1_shape[2] - x2_shape[2]) // 2, (x1_shape[3] - x2_shape[3]) // 2, 0] 112 | size = [-1, x2_shape[1], x2_shape[2], x2_shape[3], -1] 113 | x1_crop = tf.slice(x1, offsets, size) 114 | return tf.concat([x1_crop, x2], 4) 115 | 116 | 117 | # Batch Normalization 118 | def normalizationlayer(x, is_train, height=None, width=None, image_z=None, norm_type=None, G=16, esp=1e-5, scope=None): 119 | """ 120 | :param x:input data with shap of[batch,height,width,channel] 121 | :param is_train:flag of normalizationlayer,True is training,False is Testing 122 | :param height:in some condition,the data height is in Runtime determined,such as through deconv layer and conv2d 123 | :param width:in some condition,the data width is in Runtime determined 124 | :param image_z: 125 | :param norm_type:normalization type:support"batch","group","None" 126 | :param G:in group normalization,channel is seperated with group number(G) 127 | :param esp:Prevent divisor from being zero 128 | :param scope:normalizationlayer scope 129 | :return: 130 | """ 131 | with tf.name_scope(scope + norm_type): 132 | if norm_type == None: 133 | output = x 134 | elif norm_type == 'batch': 135 | output = tf.contrib.layers.batch_norm(x, center=True, scale=True, is_train=is_train) 136 | elif norm_type == "group": 137 | # tranpose:[bs,z,h,w,c]to[bs,c,z,h,w]following the paper 138 | x = tf.transpose(x, [0, 4, 1, 2, 3]) 139 | N, C, Z, H, W = x.get_shape().as_list() 140 | G = min(G, C) 141 | if H == None and W == None and Z == None: 142 | Z, H, W = image_z, height, width 143 | x = tf.reshape(x, [-1, G, C // G, Z, H, W]) 144 | mean, var = tf.nn.moments(x, [2, 3, 4, 5], keep_dims=True) 145 | x = (x - mean) / tf.sqrt(var + esp) 146 | gama = tf.get_variable(scope + norm_type + 'group_gama', [C], initializer=tf.constant_initializer(1.0)) 147 | beta = tf.get_variable(scope + norm_type + 'group_beta', [C], initializer=tf.constant_initializer(0.0)) 148 | gama = tf.reshape(gama, [1, C, 1, 1, 1]) 149 | beta = tf.reshape(beta, [1, C, 1, 1, 1]) 150 | output = tf.reshape(x, [-1, C, Z, H, W]) * gama + beta 151 | # tranpose:[bs,c,z,h,w]to[bs,z,h,w,c]following the paper 152 | output = tf.transpose(output, [0, 2, 3, 4, 1]) 153 | return output 154 | 155 | 156 | # resnet add_connect 157 | def resnet_Add(x1, x2): 158 | if x1.get_shape().as_list()[4] != x2.get_shape().as_list()[4]: 159 | # Option A: Zero-padding 160 | residual_connection = x2 + tf.pad(x1, [[0, 0], [0, 0], [0, 0], [0, 0], 161 | [0, x2.get_shape().as_list()[4] - 162 | x1.get_shape().as_list()[4]]]) 163 | else: 164 | residual_connection = x2 + x1 165 | return residual_connection 166 | 167 | 168 | def save_images(images, size, path): 169 | img = (images + 1.0) / 2.0 170 | h, w = img.shape[1], img.shape[2] 171 | merge_img = np.zeros((h * size[0], w * size[1])) 172 | for idx, image in enumerate(images): 173 | i = idx % size[1] 174 | j = idx // size[1] 175 | merge_img[j * h:j * h + h, i * w:i * w + w] = image 176 | result = merge_img * 255. 177 | result = np.clip(result, 0, 255).astype('uint8') 178 | return cv2.imwrite(path, result) 179 | -------------------------------------------------------------------------------- /LiTS/Vnet/model_vnet3d.py: -------------------------------------------------------------------------------- 1 | ''' 2 | 3 | ''' 4 | from Vnet.layer import (conv3d, deconv3d, normalizationlayer, crop_and_concat, resnet_Add, 5 | weight_xavier_init, bias_variable, save_images) 6 | import tensorflow as tf 7 | import numpy as np 8 | import cv2 9 | import os 10 | 11 | 12 | def conv_bn_relu_drop(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 13 | with tf.name_scope(scope): 14 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 15 | n_outputs=kernal[-1], activefunction='relu', variable_name=scope + 'conv_W') 16 | B = bias_variable([kernal[-1]], variable_name=scope + 'conv_B') 17 | conv = conv3d(x, W) + B 18 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 19 | scope=scope) 20 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 21 | return conv 22 | 23 | 24 | def down_sampling(x, kernal, phase, drop, image_z=None, height=None, width=None, scope=None): 25 | with tf.name_scope(scope): 26 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 27 | n_outputs=kernal[-1], 28 | activefunction='relu', variable_name=scope + 'W') 29 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 30 | conv = conv3d(x, W, 2) + B 31 | conv = normalizationlayer(conv, is_train=phase, height=height, width=width, image_z=image_z, norm_type='group', 32 | scope=scope) 33 | conv = tf.nn.dropout(tf.nn.relu(conv), drop) 34 | return conv 35 | 36 | 37 | def deconv_relu(x, kernal, samefeture=False, scope=None): 38 | with tf.name_scope(scope): 39 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[-1], 40 | n_outputs=kernal[-2], activefunction='relu', variable_name=scope + 'W') 41 | B = bias_variable([kernal[-2]], variable_name=scope + 'B') 42 | conv = deconv3d(x, W, samefeture, True) + B 43 | conv = tf.nn.relu(conv) 44 | return conv 45 | 46 | 47 | def conv_sigmod(x, kernal, scope=None): 48 | with tf.name_scope(scope): 49 | W = weight_xavier_init(shape=kernal, n_inputs=kernal[0] * kernal[1] * kernal[2] * kernal[3], 50 | n_outputs=kernal[-1], activefunction='sigomd', variable_name=scope + 'W') 51 | B = bias_variable([kernal[-1]], variable_name=scope + 'B') 52 | conv = conv3d(x, W) + B 53 | conv = tf.nn.sigmoid(conv) 54 | return conv 55 | 56 | 57 | def _create_conv_net(X, image_z, image_width, image_height, image_channel, phase, drop, n_class=1): 58 | inputX = tf.reshape(X, [-1, image_z, image_width, image_height, image_channel]) # shape=(?, 32, 32, 1) 59 | # Vnet model 60 | # layer1->convolution 61 | layer0 = conv_bn_relu_drop(x=inputX, kernal=(3, 3, 3, image_channel, 16), phase=phase, drop=drop, 62 | scope='layer0') 63 | layer1 = conv_bn_relu_drop(x=layer0, kernal=(3, 3, 3, 16, 16), phase=phase, drop=drop, 64 | scope='layer1') 65 | layer1 = resnet_Add(x1=layer0, x2=layer1) 66 | # down sampling1 67 | down1 = down_sampling(x=layer1, kernal=(3, 3, 3, 16, 32), phase=phase, drop=drop, scope='down1') 68 | # layer2->convolution 69 | layer2 = conv_bn_relu_drop(x=down1, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 70 | scope='layer2_1') 71 | layer2 = conv_bn_relu_drop(x=layer2, kernal=(3, 3, 3, 32, 32), phase=phase, drop=drop, 72 | scope='layer2_2') 73 | layer2 = resnet_Add(x1=down1, x2=layer2) 74 | # down sampling2 75 | down2 = down_sampling(x=layer2, kernal=(3, 3, 3, 32, 64), phase=phase, drop=drop, scope='down2') 76 | # layer3->convolution 77 | layer3 = conv_bn_relu_drop(x=down2, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 78 | scope='layer3_1') 79 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 80 | scope='layer3_2') 81 | layer3 = conv_bn_relu_drop(x=layer3, kernal=(3, 3, 3, 64, 64), phase=phase, drop=drop, 82 | scope='layer3_3') 83 | layer3 = resnet_Add(x1=down2, x2=layer3) 84 | # down sampling3 85 | down3 = down_sampling(x=layer3, kernal=(3, 3, 3, 64, 128), phase=phase, drop=drop, scope='down3') 86 | # layer4->convolution 87 | layer4 = conv_bn_relu_drop(x=down3, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 88 | scope='layer4_1') 89 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 90 | scope='layer4_2') 91 | layer4 = conv_bn_relu_drop(x=layer4, kernal=(3, 3, 3, 128, 128), phase=phase, drop=drop, 92 | scope='layer4_3') 93 | layer4 = resnet_Add(x1=down3, x2=layer4) 94 | # down sampling4 95 | down4 = down_sampling(x=layer4, kernal=(3, 3, 3, 128, 256), phase=phase, drop=drop, scope='down4') 96 | # layer5->convolution 97 | layer5 = conv_bn_relu_drop(x=down4, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 98 | scope='layer5_1') 99 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 100 | scope='layer5_2') 101 | layer5 = conv_bn_relu_drop(x=layer5, kernal=(3, 3, 3, 256, 256), phase=phase, drop=drop, 102 | scope='layer5_3') 103 | layer5 = resnet_Add(x1=down4, x2=layer5) 104 | 105 | # layer9->deconvolution 106 | deconv1 = deconv_relu(x=layer5, kernal=(3, 3, 3, 128, 256), scope='deconv1') 107 | # layer8->convolution 108 | layer6 = crop_and_concat(layer4, deconv1) 109 | _, Z, H, W, _ = layer4.get_shape().as_list() 110 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 256, 128), image_z=Z, height=H, width=W, phase=phase, 111 | drop=drop, scope='layer6_1') 112 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 113 | drop=drop, scope='layer6_2') 114 | layer6 = conv_bn_relu_drop(x=layer6, kernal=(3, 3, 3, 128, 128), image_z=Z, height=H, width=W, phase=phase, 115 | drop=drop, scope='layer6_3') 116 | layer6 = resnet_Add(x1=deconv1, x2=layer6) 117 | # layer9->deconvolution 118 | deconv2 = deconv_relu(x=layer6, kernal=(3, 3, 3, 64, 128), scope='deconv2') 119 | # layer8->convolution 120 | layer7 = crop_and_concat(layer3, deconv2) 121 | _, Z, H, W, _ = layer3.get_shape().as_list() 122 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 128, 64), image_z=Z, height=H, width=W, phase=phase, 123 | drop=drop, scope='layer7_1') 124 | layer7 = conv_bn_relu_drop(x=layer7, kernal=(3, 3, 3, 64, 64), image_z=Z, height=H, width=W, phase=phase, 125 | drop=drop, scope='layer7_2') 126 | layer7 = resnet_Add(x1=deconv2, x2=layer7) 127 | # layer9->deconvolution 128 | deconv3 = deconv_relu(x=layer7, kernal=(3, 3, 3, 32, 64), scope='deconv3') 129 | # layer8->convolution 130 | layer8 = crop_and_concat(layer2, deconv3) 131 | _, Z, H, W, _ = layer2.get_shape().as_list() 132 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 64, 32), image_z=Z, height=H, width=W, phase=phase, 133 | drop=drop, scope='layer10_1') 134 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 135 | drop=drop, scope='layer10_2') 136 | layer8 = conv_bn_relu_drop(x=layer8, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 137 | drop=drop, scope='layer10_3') 138 | layer8 = resnet_Add(x1=deconv3, x2=layer8) 139 | # layer9->deconvolution 140 | deconv4 = deconv_relu(x=layer8, kernal=(3, 3, 3, 16, 32), scope='deconv4') 141 | # layer8->convolution 142 | layer9 = crop_and_concat(layer1, deconv4) 143 | _, Z, H, W, _ = layer1.get_shape().as_list() 144 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 145 | drop=drop, scope='layer11_1') 146 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 147 | drop=drop, scope='layer11_2') 148 | layer9 = conv_bn_relu_drop(x=layer9, kernal=(3, 3, 3, 32, 32), image_z=Z, height=H, width=W, phase=phase, 149 | drop=drop, scope='layer11_3') 150 | layer9 = resnet_Add(x1=deconv4, x2=layer9) 151 | # layer14->output 152 | output_map = conv_sigmod(x=layer9, kernal=(1, 1, 1, 32, n_class), scope='output') 153 | return output_map 154 | 155 | 156 | # Serve data by batches 157 | def _next_batch(train_images, train_labels, batch_size, index_in_epoch): 158 | start = index_in_epoch 159 | index_in_epoch += batch_size 160 | 161 | num_examples = train_images.shape[0] 162 | # when all trainig data have been already used, it is reorder randomly 163 | if index_in_epoch > num_examples: 164 | # shuffle the data 165 | perm = np.arange(num_examples) 166 | np.random.shuffle(perm) 167 | train_images = train_images[perm] 168 | train_labels = train_labels[perm] 169 | # start next epoch 170 | start = 0 171 | index_in_epoch = batch_size 172 | assert batch_size <= num_examples 173 | end = index_in_epoch 174 | return train_images[start:end], train_labels[start:end], index_in_epoch 175 | 176 | 177 | class Vnet3dModule(object): 178 | """ 179 | A unet2d implementation 180 | 181 | :param image_height: number of height in the input image 182 | :param image_width: number of width in the input image 183 | :param image_depth: number of depth in the input image 184 | :param channels: number of channels in the input image 185 | :param costname: name of the cost function.Default is "dice coefficient" 186 | """ 187 | 188 | def __init__(self, image_height, image_width, image_depth, channels=1, costname=("dice coefficient",), 189 | inference=False, model_path=None): 190 | self.image_width = image_width 191 | self.image_height = image_height 192 | self.image_depth = image_depth 193 | self.channels = channels 194 | 195 | self.X = tf.placeholder("float", shape=[None, self.image_depth, self.image_height, self.image_width, 196 | self.channels]) 197 | self.Y_gt = tf.placeholder("float", shape=[None, self.image_depth, self.image_height, self.image_width, 198 | self.channels]) 199 | self.lr = tf.placeholder('float') 200 | self.phase = tf.placeholder(tf.bool) 201 | self.drop = tf.placeholder('float') 202 | 203 | self.Y_pred = _create_conv_net(self.X, self.image_depth, self.image_width, self.image_height, self.channels, 204 | self.phase, self.drop) 205 | self.cost = self.__get_cost(costname[0]) 206 | self.accuracy = -self.__get_cost(costname[0]) 207 | if inference: 208 | init = tf.global_variables_initializer() 209 | saver = tf.train.Saver() 210 | self.sess = tf.InteractiveSession() 211 | self.sess.run(init) 212 | saver.restore(self.sess, model_path) 213 | 214 | def __get_cost(self, cost_name): 215 | Z, H, W, C = self.Y_gt.get_shape().as_list()[1:] 216 | if cost_name == "dice coefficient": 217 | smooth = 1e-5 218 | pred_flat = tf.reshape(self.Y_pred, [-1, H * W * C * Z]) 219 | true_flat = tf.reshape(self.Y_gt, [-1, H * W * C * Z]) 220 | intersection = 2 * tf.reduce_sum(pred_flat * true_flat, axis=1) + smooth 221 | denominator = tf.reduce_sum(pred_flat, axis=1) + tf.reduce_sum(true_flat, axis=1) + smooth 222 | loss = -tf.reduce_mean(intersection / denominator) 223 | return loss 224 | 225 | def train(self, train_images, train_lanbels, model_path, logs_path, learning_rate, 226 | dropout_conv=0.8, train_epochs=5, batch_size=1): 227 | if not os.path.exists(logs_path): 228 | os.makedirs(logs_path) 229 | if not os.path.exists(logs_path + "model\\"): 230 | os.makedirs(logs_path + "model\\") 231 | model_path = logs_path + "model\\" + model_path 232 | train_op = tf.train.AdamOptimizer(self.lr).minimize(self.cost) 233 | 234 | init = tf.global_variables_initializer() 235 | saver = tf.train.Saver(tf.all_variables(), max_to_keep=10) 236 | 237 | tf.summary.scalar("loss", self.cost) 238 | tf.summary.scalar("accuracy", self.accuracy) 239 | merged_summary_op = tf.summary.merge_all() 240 | sess = tf.InteractiveSession(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) 241 | summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) 242 | sess.run(init) 243 | 244 | DISPLAY_STEP = 1 245 | index_in_epoch = 0 246 | 247 | train_epochs = train_images.shape[0] * train_epochs 248 | for i in range(train_epochs): 249 | # get new batch 250 | batch_xs_path, batch_ys_path, index_in_epoch = _next_batch(train_images, train_lanbels, batch_size, 251 | index_in_epoch) 252 | batch_xs = np.empty((len(batch_xs_path), self.image_depth, self.image_height, self.image_width, 253 | self.channels)) 254 | batch_ys = np.empty((len(batch_ys_path), self.image_depth, self.image_height, self.image_width, 255 | self.channels)) 256 | for num in range(len(batch_xs_path)): 257 | index = 0 258 | for _ in os.listdir(batch_xs_path[num][0]): 259 | image = cv2.imread(batch_xs_path[num][0] + "/" + str(index) + ".bmp", cv2.IMREAD_GRAYSCALE) 260 | label = cv2.imread(batch_ys_path[num][0] + "/" + str(index) + ".bmp", cv2.IMREAD_GRAYSCALE) 261 | batch_xs[num, index, :, :, :] = np.reshape(image, (self.image_height, self.image_width, 262 | self.channels)) 263 | batch_ys[num, index, :, :, :] = np.reshape(label, (self.image_height, self.image_width, 264 | self.channels)) 265 | index += 1 266 | # Extracting images and labels from given data 267 | batch_xs = batch_xs.astype(np.float) 268 | batch_ys = batch_ys.astype(np.float) 269 | # Normalize from [0:255] => [0.0:1.0] 270 | batch_xs = np.multiply(batch_xs, 1.0 / 255.0) 271 | batch_ys = np.multiply(batch_ys, 1.0 / 255.0) 272 | # check progress on every 1st,2nd,...,10th,20th,...,100th... step 273 | if i % DISPLAY_STEP == 0 or (i + 1) == train_epochs: 274 | train_loss, train_accuracy = sess.run([self.cost, self.accuracy], 275 | feed_dict={self.X: batch_xs, 276 | self.Y_gt: batch_ys, 277 | self.lr: learning_rate, 278 | self.phase: 1, 279 | self.drop: dropout_conv}) 280 | print('epochs %d training_loss ,Training_accuracy => %.5f,%.5f ' % (i, train_loss, train_accuracy)) 281 | 282 | pred = sess.run(self.Y_pred, feed_dict={self.X: batch_xs, 283 | self.Y_gt: batch_ys, 284 | self.phase: 1, 285 | self.drop: 1}) 286 | 287 | gt = np.reshape(batch_xs[0], (self.image_depth, self.image_height, self.image_width)) 288 | gt = gt.astype(np.float32) 289 | save_images(gt, [4, 4], path=logs_path + 'src_%d_epoch.png' % (i)) 290 | 291 | gt = np.reshape(batch_ys[0], (self.image_depth, self.image_height, self.image_width)) 292 | gt = gt.astype(np.float32) 293 | save_images(gt, [4, 4], path=logs_path + 'gt_%d_epoch.png' % (i)) 294 | 295 | result = np.reshape(pred[0], (self.image_depth, self.image_height, self.image_width)) 296 | result = result.astype(np.float32) 297 | save_images(result, [4, 4], path=logs_path + 'predict_%d_epoch.png' % (i)) 298 | 299 | save_path = saver.save(sess, model_path, global_step=i) 300 | print("Model saved in file:", save_path) 301 | if i % (DISPLAY_STEP * 10) == 0 and i: 302 | DISPLAY_STEP *= 10 303 | 304 | # train on batch 305 | _, summary = sess.run([train_op, merged_summary_op], feed_dict={self.X: batch_xs, 306 | self.Y_gt: batch_ys, 307 | self.lr: learning_rate, 308 | self.phase: 1, 309 | self.drop: dropout_conv}) 310 | summary_writer.add_summary(summary, i) 311 | summary_writer.close() 312 | 313 | save_path = saver.save(sess, model_path) 314 | print("Model saved in file:", save_path) 315 | 316 | def prediction(self, test_images): 317 | test_images = np.reshape(test_images, (test_images.shape[0], test_images.shape[1], test_images.shape[2], 1)) 318 | test_images = test_images.astype(np.float) 319 | test_images = np.multiply(test_images, 1.0 / 255.0) 320 | y_dummy = test_images 321 | pred = self.sess.run(self.Y_pred, feed_dict={self.X: [test_images], 322 | self.Y_gt: [y_dummy], 323 | self.phase: 1, 324 | self.drop: 1}) 325 | result = pred.astype(np.float32) * 255. 326 | result = np.clip(result, 0, 255).astype('uint8') 327 | result = np.reshape(result, (test_images.shape[0], test_images.shape[1], test_images.shape[2])) 328 | return result 329 | -------------------------------------------------------------------------------- /LiTS/Vnet/util.py: -------------------------------------------------------------------------------- 1 | from tensorflow.python.framework import graph_util 2 | from tensorflow.python.framework import graph_io 3 | import tensorflow as tf 4 | import numpy as np 5 | 6 | 7 | def getdice(Y_pred,Y_gt,K=255): 8 | intersection=2*np.sum(Y_pred[Y_gt==K]) 9 | denominator=np.sum(Y_pred)+np.sum(Y_gt) 10 | loss=(intersection/denominator) 11 | return loss 12 | 13 | 14 | 15 | def convertMetaModelToPbModel(meta_model, pb_model): 16 | # Step 1 17 | # import the model metagraph 18 | saver = tf.train.import_meta_graph(meta_model + '.meta', clear_devices=True) 19 | # make that as the default graph 20 | graph = tf.get_default_graph() 21 | sess = tf.Session() 22 | # now restore the variables 23 | saver.restore(sess, meta_model) 24 | # Step 2 25 | # Find the output name 26 | for op in graph.get_operations(): 27 | print(op.name) 28 | # Step 3 29 | output_graph_def = graph_util.convert_variables_to_constants( 30 | sess, # The session 31 | sess.graph_def, # input_graph_def is useful for retrieving the nodes 32 | ["Placeholder", "output/Sigmoid"]) 33 | 34 | # Step 4 35 | # output folder 36 | output_fld = './' 37 | # output pb file name 38 | output_model_file = 'model.pb' 39 | # write the graph 40 | graph_io.write_graph(output_graph_def, pb_model + output_fld, output_model_file, as_text=False) 41 | -------------------------------------------------------------------------------- /LiTS/data_input/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /LiTS/data_input/dataset_input.py: -------------------------------------------------------------------------------- 1 | #coding=utf-8 2 | 3 | import pandas as pd 4 | import tensorflow as tf 5 | import random 6 | 7 | preprocessing_dict = {'resize_shape':[512, 512], 8 | 'rotate':True, 9 | 'rotate_fix':True, 10 | 'flip':True, 11 | 'brightness':True, 12 | 'brightness_range':0.2, 13 | 'saturation':True, 14 | 'saturation_range':[0.5, 1.5], 15 | 'contrast':True, 16 | 'contrast_range':[0.5, 1.5]} 17 | 18 | image_type = 'jpg' 19 | 20 | #TODO how to add image_type and preprocessing_dict as addition arg in map function 21 | 22 | def _parse_function(image, mask): 23 | image_string = tf.read_file(image) 24 | mask_string = tf.read_file(mask) 25 | if image_type == 'jpg': 26 | image_decoded = tf.image.decode_jpeg(image_string, 0) 27 | mask_decoded = tf.image.decode_jpeg(mask_string, 1) 28 | elif image_type == 'png': 29 | image_decoded = tf.image.decode_png(image_string, 0) 30 | mask_decoded = tf.image.decode_png(mask_string, 1) 31 | elif image_type == 'bmp': 32 | image_decoded = tf.image.decode_bmp(image_string, 0) 33 | mask_decoded = tf.image.decode_bmp(mask_string, 1) 34 | else: 35 | raise TypeError('==> Error: Only support jpg, png and bmp.') 36 | 37 | # already in 0~1 38 | image_decoded = tf.image.convert_image_dtype(image_decoded, tf.float32) 39 | mask_decoded = tf.image.convert_image_dtype(mask_decoded, tf.float32) 40 | 41 | return image_decoded, mask_decoded 42 | 43 | def _preprocess_function(image_decoded, mask_decoded): 44 | shape = preprocessing_dict['resize_shape'] 45 | assert len(shape) == 2 and isinstance(shape, list), '==> Error: shape error.' 46 | image = tf.image.resize_images(image_decoded, shape) 47 | mask = tf.image.resize_images(mask_decoded, shape) 48 | 49 | # randomly rotate 50 | if preprocessing_dict['rotate'] == True: 51 | if preprocessing_dict['rotate_fix'] == True: 52 | k = random.sample([1,2,3], 1)[0] 53 | image = tf.image.rot90(image, k) 54 | mask = tf.image.rot90(mask, k) 55 | else: 56 | raise ValueError('==> Error: Only support rotate 90, 180 and 270 degree.') 57 | 58 | # randomly flip 59 | if preprocessing_dict['flip'] == True: 60 | k = [1, 2] 61 | if random.sample(k, 1) == [1]: 62 | image = tf.image.flip_left_right(image) 63 | mask = tf.image.flip_left_right(mask) 64 | else: 65 | image = tf.image.flip_up_down(image) 66 | mask = tf.image.flip_up_down(mask) 67 | 68 | # adjust the brightness of images by a random factor 69 | if preprocessing_dict['brightness'] == True: 70 | delta = preprocessing_dict['brightness'] 71 | # delta randomly picked in the interval [-delta, delta) 72 | image = tf.image.random_brightness(image, max_delta=delta) 73 | 74 | # adjust the saturation of an RGB image by a random factor 75 | if preprocessing_dict['saturation'] == True: 76 | saturation_range = preprocessing_dict['saturation_range'] 77 | assert len(saturation_range) == 2 and isinstance(saturation_range, list), '==> Error: saturation_range error.' 78 | image = tf.image.random_saturation(image, *saturation_range) 79 | 80 | # adjust the contrast of an image by a random factor 81 | if preprocessing_dict['contrast'] == True: 82 | contrast_range = preprocessing_dict['contrast_range'] 83 | assert len(contrast_range) == 2 and isinstance(contrast_range, list), '==> Error: saturation_range error.' 84 | image = tf.image.random_contrast(image, *contrast_range) 85 | 86 | # make sure pixel value in 0~1 87 | image = tf.clip_by_value(image, 0.0, 1.0) 88 | 89 | return image, mask 90 | 91 | def datagenerator(imagecsv_path, maskcsv_path, batch_size): 92 | """ 93 | return: data iterator 94 | """ 95 | df_image = pd.read_csv(imagecsv_path) 96 | df_mask = pd.read_csv(maskcsv_path) 97 | 98 | try: 99 | image_filenames = tf.constant(df_image['filename'].tolist()) 100 | mask_filenames = tf.constant(df_mask['filename'].tolist()) 101 | except: 102 | raise ValueError('==> csv error') 103 | 104 | dataset = tf.data.Dataset.from_tensor_slices((image_filenames, mask_filenames)) 105 | dataset = dataset.shuffle(buffer_size=10000) 106 | dataset = dataset.repeat() 107 | dataset = dataset.map(_parse_function) 108 | dataset = dataset.map(_preprocess_function) 109 | dataset = dataset.batch(batch_size) 110 | data_iterator = dataset.make_initializable_iterator() 111 | 112 | return data_iterator 113 | 114 | def main(): 115 | # test 116 | import cv2 117 | import numpy as np 118 | 119 | data_iterator = datagenerator('trainX.csv', 'trainY.csv', 1) 120 | with tf.Session() as sess: 121 | sess.run(data_iterator.initializer) 122 | next_batch = data_iterator.get_next() 123 | image, mask = sess.run(next_batch) 124 | cv2.imwrite('testimage.jpg', cv2.cvtColor(np.squeeze(image) * 255, cv2.COLOR_BGR2RGB)) 125 | cv2.imwrite('testmask.jpg', np.squeeze(mask) * 255) 126 | 127 | if __name__ == '__main__': 128 | main() 129 | -------------------------------------------------------------------------------- /LiTS/data_input/puppy.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/LiTS/data_input/puppy.jpg -------------------------------------------------------------------------------- /LiTS/data_input/puppy_mask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/LiTS/data_input/puppy_mask.jpg -------------------------------------------------------------------------------- /LiTS/data_input/testimage.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/LiTS/data_input/testimage.jpg -------------------------------------------------------------------------------- /LiTS/data_input/testmask.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/LiTS/data_input/testmask.jpg -------------------------------------------------------------------------------- /LiTS/data_input/trainX.csv: -------------------------------------------------------------------------------- 1 | ,filename 2 | 0,puppy.jpg 3 | -------------------------------------------------------------------------------- /LiTS/data_input/trainY.csv: -------------------------------------------------------------------------------- 1 | ,filename 2 | 0,puppy_mask.jpg 3 | -------------------------------------------------------------------------------- /LiTS/dataprocess/getPatchImageAndMask.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function, division 2 | import SimpleITK as sitk 3 | import numpy as np 4 | import cv2 5 | import os 6 | 7 | trainImage = "D:\Data\LIST\\3dPatchdata_25625616\Image" 8 | trainLiverMask = "D:\Data\LIST\\3dPatchdata_25625616\MaskLiver" 9 | trainTumorMask = "D:\Data\LIST\\3dPatchdata_25625616\MaskTumor" 10 | 11 | 12 | def getRangImageDepth(image): 13 | """ 14 | :param image: 15 | :return:rangofimage depth 16 | """ 17 | fistflag = True 18 | startposition = 0 19 | endposition = 0 20 | for z in range(image.shape[0]): 21 | notzeroflag = np.max(image[z]) 22 | if notzeroflag and fistflag: 23 | startposition = z 24 | fistflag = False 25 | if notzeroflag: 26 | endposition = z 27 | return startposition, endposition 28 | 29 | 30 | def subimage_generator(image, mask, patch_block_size, numberxy, numberz): 31 | """ 32 | generate the sub images and masks with patch_block_size 33 | :param image: 34 | :param patch_block_size: 35 | :param stride: 36 | :return: 37 | """ 38 | width = np.shape(image)[1] 39 | height = np.shape(image)[2] 40 | imagez = np.shape(image)[0] 41 | block_width = np.array(patch_block_size)[1] 42 | block_height = np.array(patch_block_size)[2] 43 | blockz = np.array(patch_block_size)[0] 44 | stridewidth = (width - block_width) // numberxy 45 | strideheight = (height - block_height) // numberxy 46 | stridez = (imagez - blockz) // numberz 47 | # step 1:if stridez is bigger 1,return numberxy * numberxy * numberz samples 48 | if stridez >= 1 and stridewidth >= 1 and strideheight >= 1: 49 | step_width = width - (stridewidth * numberxy + block_width) 50 | step_width = step_width // 2 51 | step_height = height - (strideheight * numberxy + block_height) 52 | step_height = step_height // 2 53 | step_z = imagez - (stridez * numberz + blockz) 54 | step_z = step_z // 2 55 | hr_samples_list = [] 56 | hr_mask_samples_list = [] 57 | for z in range(step_z, numberz * (stridez + 1) + step_z, numberz): 58 | for x in range(step_width, numberxy * (stridewidth + 1) + step_width, numberxy): 59 | for y in range(step_height, numberxy * (strideheight + 1) + step_height, numberxy): 60 | if np.max(mask[z:z + blockz, x:x + block_width, y:y + block_height]) != 0: 61 | hr_samples_list.append(image[z:z + blockz, x:x + block_width, y:y + block_height]) 62 | hr_mask_samples_list.append(mask[z:z + blockz, x:x + block_width, y:y + block_height]) 63 | hr_samples = np.array(hr_samples_list).reshape((len(hr_samples_list), blockz, block_width, block_height)) 64 | hr_mask_samples = np.array(hr_mask_samples_list).reshape( 65 | (len(hr_mask_samples_list), blockz, block_width, block_height)) 66 | return hr_samples, hr_mask_samples 67 | # step 2:other sutitation,return one samples 68 | else: 69 | nb_sub_images = 1 * 1 * 1 70 | hr_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float) 71 | hr_mask_samples = np.zeros(shape=(nb_sub_images, blockz, block_width, block_height), dtype=np.float) 72 | rangz = min(imagez, blockz) 73 | rangwidth = min(width, block_width) 74 | rangheight = min(height, block_height) 75 | hr_samples[0, 0:rangz, 0:rangwidth, 0:rangheight] = image[0:rangz, 0:rangwidth, 0:rangheight] 76 | hr_mask_samples[0, 0:rangz, 0:rangwidth, 0:rangheight] = mask[0:rangz, 0:rangwidth, 0:rangheight] 77 | return hr_samples, hr_mask_samples 78 | 79 | 80 | def make_patch(image,mask, patch_block_size, numberxy, numberz, startpostion, endpostion): 81 | """ 82 | make number patch 83 | :param image:[depth,512,512] 84 | :param patch_block: such as[64,128,128] 85 | :return:[samples,64,128,128] 86 | expand the dimension z range the subimage:[startpostion-blockz//2:endpostion+blockz//2,:,:] 87 | """ 88 | blockz = np.array(patch_block_size)[0] 89 | imagezsrc = np.shape(image)[0] 90 | subimage_startpostion = startpostion - blockz // 2 91 | subimage_endpostion = endpostion + blockz // 2 92 | if subimage_startpostion < 0: 93 | subimage_startpostion = 0 94 | if subimage_endpostion > imagezsrc: 95 | subimage_endpostion = imagezsrc 96 | if (subimage_endpostion - subimage_startpostion) < blockz: 97 | subimage_startpostion = 0 98 | subimage_endpostion = imagezsrc 99 | imageroi = image[subimage_startpostion:subimage_endpostion, :, :] 100 | image_subsample, mask_subsample = subimage_generator(image=image, mask=mask, patch_block_size=patch_block_size, 101 | numberxy=numberxy, numberz=numberz) 102 | return image_subsample, mask_subsample 103 | 104 | 105 | ''' 106 | This funciton reads a '.mhd' file using SimpleITK and return the image array, origin and spacing of the image. 107 | read_Image_mask fucntion get image and mask 108 | ''' 109 | 110 | 111 | def load_itk(filename): 112 | """ 113 | load mhd files and normalization 0-255 114 | :param filename: 115 | :return: 116 | """ 117 | rescalFilt = sitk.RescaleIntensityImageFilter() 118 | rescalFilt.SetOutputMaximum(255) 119 | rescalFilt.SetOutputMinimum(0) 120 | # Reads the image using SimpleITK 121 | itkimage = rescalFilt.Execute(sitk.Cast(sitk.ReadImage(filename), sitk.sitkFloat32)) 122 | return itkimage 123 | 124 | 125 | def gen_image_mask(srcimg, seg_image, index, shape, numberxy, numberz): 126 | # step 1 get mask effective range(startpostion:endpostion) 127 | startpostion, endpostion = getRangImageDepth(seg_image) 128 | # step 2 get subimages (numberxy*numberxy*numberz,16, 256, 256) 129 | sub_srcimages,sub_liverimages = make_patch(srcimg,seg_image, patch_block_size=shape, numberxy=numberxy, numberz=numberz, 130 | startpostion=startpostion, 131 | endpostion=endpostion) 132 | # step 3 only save subimages (numberxy*numberxy*numberz,16, 256, 256) 133 | samples, imagez = np.shape(sub_srcimages)[0], np.shape(sub_srcimages)[1] 134 | for j in range(samples): 135 | sub_masks = sub_liverimages.astype(np.float32) 136 | sub_masks = np.clip(sub_masks, 0, 255).astype('uint8') 137 | if np.max(sub_masks[j, :, :, :]) == 255: 138 | filepath = trainImage + "\\" + str(index) + "_" + str(j) + "\\" 139 | filepath2 = trainLiverMask + "\\" + str(index) + "_" + str(j) + "\\" 140 | if not os.path.exists(filepath) and not os.path.exists(filepath2): 141 | os.makedirs(filepath) 142 | os.makedirs(filepath2) 143 | for z in range(imagez): 144 | image = sub_srcimages[j, z, :, :] 145 | image = image.astype(np.float32) 146 | image = np.clip(image, 0, 255).astype('uint8') 147 | cv2.imwrite(filepath + str(z) + ".bmp", image) 148 | cv2.imwrite(filepath2 + str(z) + ".bmp", sub_masks[j, z, :, :]) 149 | 150 | 151 | def preparetraindata(): 152 | for i in range(0, 131, 1): 153 | seg = sitk.ReadImage("D:\Data\LIST\src_data\segmentation-" + str(i) + ".nii", sitk.sitkUInt8) 154 | segimg = sitk.GetArrayFromImage(seg) 155 | src = load_itk("D:\Data\LIST\src_data\\volume-" + str(i) + ".nii") 156 | srcimg = sitk.GetArrayFromImage(src) 157 | 158 | seg_liverimage = segimg.copy() 159 | seg_liverimage[segimg > 0] = 255 160 | 161 | seg_tumorimage = segimg.copy() 162 | seg_tumorimage[segimg == 1] = 0 163 | seg_tumorimage[segimg == 2] = 255 164 | gen_image_mask(srcimg, seg_liverimage, i, shape=(16, 256, 256), numberxy=5, numberz=10) 165 | # gen_image_mask(srcimg, seg_tumorimage, i, shape=(16, 256, 256), numberxy=5, numberz=10) 166 | 167 | 168 | preparetraindata() 169 | -------------------------------------------------------------------------------- /LiTS/dataprocess/preprocessing.py: -------------------------------------------------------------------------------- 1 | import cv2 2 | import numpy as np 3 | 4 | class LITSPreprocessor(object): 5 | def __init__(self, image): 6 | assert len(image.shape) == 3, '==> InputError' 7 | self.image = image 8 | self.shape = image.shape 9 | self.depth = image.shape[-1] 10 | 11 | def transform_ctdata(self, windowWidth, windowCenter, normal=False): 12 | """ 13 | return: trucated image according to window center and window width 14 | """ 15 | minWindow = float(windowCenter) - 0.5*float(windowWidth) 16 | newimg = (self.image - minWindow) / float(windowWidth) 17 | newimg[newimg < 0] = 0 18 | newimg[newimg > 1] = 1 19 | if not normal: 20 | newimg = (newimg * 255).astype('uint8') 21 | return newimg 22 | 23 | def resize_3d(self, width, height): 24 | """ 25 | return: resized image in shape [depth, width, height] 26 | """ 27 | if not self.shape[:2] == (width, height): 28 | newimg = [cv2.resize(self.image[:,:,i], (height, width)) for i in range(self.depth)] 29 | newimg = np.array(newimg) 30 | else: 31 | newimg = self.image.transpose(2,0,1) 32 | return newimg 33 | 34 | def main(): 35 | # test 36 | image = np.load('/data/LITS2017/patch_test/volume-2_patch_1.npy') 37 | print(image.shape) 38 | lits = LITSPreprocessor(image) 39 | # the proper ct value for observe liver is 50~70 40 | image = lits.transform_ctdata(20, 60) 41 | image = lits.resize_3d(128, 128) 42 | print(image.shape) 43 | 44 | if __name__ == '__main__': 45 | main() 46 | -------------------------------------------------------------------------------- /LiTS/dataprocess/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | 4 | def file_name_path(file_dir): 5 | """ 6 | get root path,sub_dirs,all_sub_files 7 | :param file_dir: 8 | :return: 9 | """ 10 | for root, dirs, files in os.walk(file_dir): 11 | if len(dirs): 12 | print("sub_dirs:", dirs) 13 | return dirs 14 | 15 | 16 | def save_file2csv(file_dir, file_name): 17 | """ 18 | save file path to csv,this is for segmentation 19 | :param file_dir:preprocess data path 20 | :param file_name:output csv name 21 | :return: 22 | """ 23 | out = open(file_name, 'w') 24 | sub_dirs = file_name_path(file_dir) 25 | out.writelines("filename" + "\n") 26 | for index in range(len(sub_dirs)): 27 | out.writelines(file_dir + "/" + sub_dirs[index] + "\n") 28 | 29 | 30 | save_file2csv("G:\Data\LIST\\3dliver_25625616\Image", "train_X.csv") 31 | -------------------------------------------------------------------------------- /LiTS/vnet3d_predict.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 4 | os.environ["CUDA_VISIBLE_DEVICES"] = "-1" 5 | from tensorflow.python.client import device_lib 6 | 7 | print(device_lib.list_local_devices()) 8 | 9 | from Vnet.model_vnet3d import Vnet3dModule 10 | import numpy as np 11 | import pandas as pd 12 | import cv2 13 | 14 | 15 | def predict(): 16 | height = 512 17 | width = 512 18 | dimension = 32 19 | Vnet3d = Vnet3dModule(height, width, dimension, channels=1, costname=("dice coefficient",), inference=True, 20 | model_path="log\\diceVnet3d\\model\Vnet3d.pd") 21 | srcimagepath = "D:\Data\LIST\\test\Image\\111" 22 | predictpath = "D:\Data\LIST\\test\PredictMask" 23 | index = 0 24 | imagelist = [] 25 | for _ in os.listdir(srcimagepath): 26 | image = cv2.imread(srcimagepath + "/" + str(index) + ".bmp", cv2.IMREAD_GRAYSCALE) 27 | tmpimage = np.reshape(image, (height, width, 1)) 28 | imagelist.append(tmpimage) 29 | index += 1 30 | 31 | imagearray = np.array(imagelist) 32 | imagearray = np.reshape(imagearray, (index, height, width, 1)) 33 | imagemask = np.zeros((index, height, width), np.int32) 34 | 35 | for i in range(0, index + dimension, dimension // 2): 36 | if (i + dimension) <= index: 37 | imagedata = imagearray[i:i + dimension, :, :, :] 38 | imagemask[i:i + dimension, :, :] = Vnet3d.prediction(imagedata) 39 | elif (i < index): 40 | imagedata = imagearray[index - dimension:index, :, :, :] 41 | imagemask[index - dimension:index, :, :] = Vnet3d.prediction(imagedata) 42 | 43 | mask = imagemask.copy() 44 | mask[imagemask > 0] = 255 45 | result = np.clip(mask, 0, 255).astype('uint8') 46 | for i in range(0, index): 47 | cv2.imwrite(predictpath + "/" + str(i) + ".bmp", result[i]) 48 | 49 | 50 | predict() 51 | -------------------------------------------------------------------------------- /LiTS/vnet3d_train.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 4 | os.environ["CUDA_VISIBLE_DEVICES"] = "0" 5 | from tensorflow.python.client import device_lib 6 | 7 | print(device_lib.list_local_devices()) 8 | 9 | from Vnet.model_vnet3d import Vnet3dModule 10 | import numpy as np 11 | import pandas as pd 12 | 13 | 14 | def train(): 15 | ''' 16 | Preprocessing for dataset 17 | ''' 18 | # Read data set (Train data from CSV file) 19 | csvmaskdata = pd.read_csv('trainY25625616.csv') 20 | csvimagedata = pd.read_csv('trainX25625616.csv') 21 | maskdata = csvmaskdata.iloc[:, :].values 22 | imagedata = csvimagedata.iloc[:, :].values 23 | # shuffle imagedata and maskdata together 24 | perm = np.arange(len(csvimagedata)) 25 | np.random.shuffle(perm) 26 | imagedata = imagedata[perm] 27 | maskdata = maskdata[perm] 28 | 29 | Vnet3d = Vnet3dModule(256, 256, 16, channels=1, costname=("dice coefficient",)) 30 | Vnet3d.train(imagedata, maskdata, "Vnet3d.pd", "log\\diceVnet3d\\", 0.001, 0.7, 10, 1) 31 | 32 | 33 | train() 34 | -------------------------------------------------------------------------------- /LiTS/vnet3d_train_predict.py: -------------------------------------------------------------------------------- 1 | from promise2012.Vnet.model_vnet3d import Vnet3dModule 2 | from promise2012.Vnet.util import convertMetaModelToPbModel 3 | import numpy as np 4 | import pandas as pd 5 | import cv2 6 | 7 | 8 | def train(): 9 | ''' 10 | Preprocessing for dataset 11 | ''' 12 | # Read data set (Train data from CSV file) 13 | csvmaskdata = pd.read_csv('trainY.csv') 14 | csvimagedata = pd.read_csv('trainX.csv') 15 | maskdata = csvmaskdata.iloc[:, :].values 16 | imagedata = csvimagedata.iloc[:, :].values 17 | # shuffle imagedata and maskdata together 18 | perm = np.arange(len(csvimagedata)) 19 | np.random.shuffle(perm) 20 | imagedata = imagedata[perm] 21 | maskdata = maskdata[perm] 22 | 23 | Vnet3d = Vnet3dModule(128, 128, 64, channels=1, costname="dice coefficient") 24 | Vnet3d.train(imagedata, maskdata, "model\\Vnet3dModule.pd", "log\\", 0.001, 0.7, 100000, 1) 25 | 26 | 27 | def predict0(): 28 | Vnet3d = Vnet3dModule(256, 256, 64, inference=True, model_path="model\\Vnet3dModule.pd") 29 | for filenumber in range(30): 30 | batch_xs = np.zeros(shape=(64, 256, 256)) 31 | for index in range(64): 32 | imgs = cv2.imread( 33 | "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + ".bmp", 0) 34 | batch_xs[index, :, :] = imgs[128:384, 128:384] 35 | 36 | predictvalue = Vnet3d.prediction(batch_xs) 37 | 38 | for index in range(64): 39 | result = np.zeros(shape=(512, 512), dtype=np.uint8) 40 | result[128:384, 128:384] = predictvalue[index] 41 | kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) 42 | result = cv2.morphologyEx(result, cv2.MORPH_CLOSE, kernel) 43 | cv2.imwrite( 44 | "D:\Data\PROMISE2012\Vnet3d_data\\test\image\\" + str(filenumber) + "\\" + str(index) + "mask.bmp", 45 | result) 46 | 47 | 48 | def meta2pd(): 49 | convertMetaModelToPbModel(meta_model="model\\Vnet3dModule.pd", pb_model="model") 50 | 51 | train() 52 | #predict0() 53 | #meta2pd() 54 | -------------------------------------------------------------------------------- /LiTS_header.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/LiTS_header.jpg -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ImageSegmentation With Vnet3D 2 | > This is an example of the CT images Segment from LiTS---Liver-Tumor-Segmentation-Challenge 3 | ![](LiTS_header.jpg) 4 | 5 | ## Prerequisities 6 | The following dependencies are needed: 7 | - numpy >= 1.11.1 8 | - SimpleITK >=1.0.1 9 | - opencv-python >=3.3.0 10 | - tensorflow-gpu ==1.8.0 11 | - pandas >=0.20.1 12 | - scikit-learn >= 0.17.1 13 | 14 | ## How to Use 15 | (re)implemented the model with tensorflow in the paper of "Milletari, F., Navab, N., & Ahmadi, S. A. (2016) V-net: Fully convolutional neural networks for volumetric medical image segmentation.3DV 2016" 16 | 17 | **1、Preprocess** 18 | * LiTS data of image and mask are all type of .nii files,in order to train and visulise,convert .nii file to .bmp file. 19 | * Liver data preparing,i have tried many patch size,and finally using the patch(256,256,16),if you have better GPU,you can change 16 to 24 or 32:run the getPatchImageAndMask.py 20 | * Tumor data preparing,using the patch(256,256,16):run the getPatchImageAndMask.py,disable the line gen_image_mask(srcimg, seg_liverimage, i, shape=(16, 256, 256), numberxy=5, numberz=10) and enable the line gen_image_mask(srcimg, seg_tumorimage, i, shape=(16, 256, 256), numberxy=5, numberz=10),and change the trainLiverMask to trainTumorMask 21 | * last save all the data folder path into csv file: run the utils.py 22 | 23 | the file like this: 24 | 25 | G:\Data\segmentation\Image/0_161 26 | 27 | G:\Data\segmentation\Image/0_162 28 | 29 | G:\Data\segmentation\Image/0_163 30 | 31 | **2、Liver and Tumor Segmentation** 32 | * the VNet model 33 | 34 | ![](3dVNet.png) 35 | 36 | * train and predict in the script of vnet3d_train.py and vnet3d_predict.py 37 | 38 | **3、download resource** 39 | * liver segmentation trained model,log,test data can download on here:https://pan.baidu.com/s/1ijK6BG3vZM4nHwZ6S2yFiw, password:74j5 40 | * LiTS data have 130 cases,using 0-110 cases trainging,and other is testing.testing result can download on here:https://pan.baidu.com/s/1A_-u7tJcn7rIqnrLaSqi4A password:22es 41 | * LiTS train and test source data can download here:https://pan.baidu.com/s/1-kxJ7reS4kq5ypitfmQxeg password:nlrd 42 | 43 | ## Result 44 | Trained Loss 45 | ![](diceloss.PNG) 46 | 47 | Liver Segment Result 48 | 49 | Liver leaderboard 50 | ![](livertop30.PNG) 51 | 52 | test case segmentation result can see in the file of 35.mp4,38.mp4 and 51.mp4 53 | 54 | first col is srcimage,second col is GroundTruth Mask image,third col is VNet segmentation image 55 | ![](GTvsVNet.bmp) 56 | 57 | Lesion leaderboard 58 | ![](tumortop34.PNG) 59 | 60 | ## Contact 61 | * https://github.com/junqiangchen 62 | * email: 1207173174@qq.com,ydx0902@gmail.com,188123134@qq.com 63 | * Contact:junqiangChen,dexianYe,xingTao 64 | * WeChat Public number: 最新医学影像技术 65 | -------------------------------------------------------------------------------- /diceloss.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/diceloss.PNG -------------------------------------------------------------------------------- /liverleadboard.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/liverleadboard.PNG -------------------------------------------------------------------------------- /livertop30.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/livertop30.PNG -------------------------------------------------------------------------------- /tumortop34.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/junqiangchen/LiTS---Liver-Tumor-Segmentation-Challenge/b547eb42cab6e09a1a4bd86ad428c9e7d8a31cf8/tumortop34.PNG --------------------------------------------------------------------------------