├── BRATS2015.py ├── BraTS2018.ipynb ├── README.md └── images ├── MRI.png ├── cropping.png ├── flowchart.png ├── my_unet.jpg ├── my_unet7.jpg ├── overview.png ├── result1.png ├── result2.png ├── result3.png └── result4.png /BRATS2015.py: -------------------------------------------------------------------------------- 1 | #%% 2 | 3 | import numpy as np 4 | import pandas as pd 5 | import matplotlib.pyplot as plt 6 | import skimage.io as io 7 | import skimage.transform as trans 8 | import random as r 9 | from keras.models import Sequential,load_model,Model,model_from_json 10 | from keras.layers import Dense, Dropout, Activation, Flatten 11 | from keras.layers import Convolution2D,concatenate, Conv2D, MaxPooling2D, Conv2DTranspose 12 | from keras.layers import Input, merge, UpSampling2D 13 | from keras.callbacks import ModelCheckpoint 14 | from keras.optimizers import Adam 15 | from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img 16 | from keras import backend as K 17 | K.tensorflow_backend._get_available_gpus() 18 | import SimpleITK as sitk 19 | #K.set_image_data_format("channels_first") 20 | K.set_image_dim_ordering("th") 21 | img_size = 120 #original img size is 240*240 22 | smooth = 1 23 | num_of_aug = 1 24 | num_epoch = 20 25 | 26 | 27 | #%% 28 | 29 | import glob 30 | def create_data(src, mask, label=False, resize=(155,img_size,img_size)): 31 | files = glob.glob(src + mask, recursive=True) 32 | imgs = [] 33 | print('Processing---', mask) 34 | for file in files: 35 | img = io.imread(file, plugin='simpleitk') 36 | img = trans.resize(img, resize, mode='constant') 37 | if label: 38 | #img[img == 4] = 1 #turn enhancing tumor into necrosis 39 | #img[img != 1] = 0 #only left enhancing tumor + necrosis 40 | img[img != 0] = 1 #Region 1 => 1+2+3+4 complete tumor 41 | img = img.astype('float32') 42 | else: 43 | img = (img-img.mean()) / img.std() #normalization => zero mean !!!care for the std=0 problem 44 | for slice in range(50,130): 45 | img_t = img[slice,:,:] 46 | img_t =img_t.reshape((1,)+img_t.shape) 47 | img_t =img_t.reshape((1,)+img_t.shape) #become rank 4 48 | img_g = augmentation(img_t,num_of_aug) 49 | for n in range(img_g.shape[0]): 50 | imgs.append(img_g[n,:,:,:]) 51 | name = 'y_'+ str(img_size) if label else 'x_'+ str(img_size) 52 | np.save(name, np.array(imgs).astype('float32')) # save at home 53 | print('Saved', len(files), 'to', name) 54 | 55 | #%% 56 | 57 | def n4itk(img): #must input with sitk img object 58 | img = sitk.Cast(img, sitk.sitkFloat32) 59 | img_mask = sitk.BinaryNot(sitk.BinaryThreshold(img, 0, 0)) ## Create a mask spanning the part containing the brain, as we want to apply the filter to the brain image 60 | corrected_img = sitk.N4BiasFieldCorrection(img, img_mask) 61 | return corrected_img 62 | 63 | 64 | #%% 65 | 66 | def augmentation(scans,n): #input img must be rank 4 67 | datagen = ImageDataGenerator( 68 | featurewise_center=False, 69 | samplewise_center=False, 70 | featurewise_std_normalization=False, 71 | samplewise_std_normalization=False, 72 | zca_whitening=False, 73 | rotation_range=25, 74 | #width_shift_range=0.3, 75 | #height_shift_range=0.3, 76 | horizontal_flip=True, 77 | vertical_flip=True, 78 | zoom_range=False) 79 | i=0 80 | scans_g=scans.copy() 81 | for batch in datagen.flow(scans, batch_size=1, seed=1000): 82 | scans_g=np.vstack([scans_g,batch]) 83 | i += 1 84 | if i == n: 85 | break 86 | ''' remember arg + labels 87 | i=0 88 | labels_g=labels.copy() 89 | for batch in datagen.flow(labels, batch_size=1, seed=1000): 90 | labels_g=np.vstack([labels_g,batch]) 91 | i += 1 92 | if i > n: 93 | break 94 | return ((scans_g,labels_g))''' 95 | return scans_g 96 | #scans_g,labels_g = augmentation(img,img1, 10) 97 | #X_train = X_train.reshape(X_train.shape[0], 1, img_size, img_size) 98 | 99 | #%% 100 | 101 | ''' 102 | Model - 103 | 104 | structure: 105 | 106 | ''' 107 | 108 | def dice_coef(y_true, y_pred): 109 | y_true_f = K.flatten(y_true) 110 | y_pred_f = K.flatten(y_pred) 111 | intersection = K.sum(y_true_f * y_pred_f) 112 | return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth) 113 | 114 | 115 | def dice_coef_loss(y_true, y_pred): 116 | return -dice_coef(y_true, y_pred) 117 | 118 | 119 | def unet_model(): 120 | inputs = Input((1, img_size, img_size)) 121 | conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(inputs) # KERNEL =3 STRIDE =3 122 | conv1 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv1) 123 | pool1 = MaxPooling2D(pool_size=(2, 2))(conv1) 124 | 125 | conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(pool1) 126 | conv2 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv2) 127 | pool2 = MaxPooling2D(pool_size=(2, 2))(conv2) 128 | 129 | conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(pool2) 130 | conv3 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv3) 131 | pool3 = MaxPooling2D(pool_size=(2, 2))(conv3) 132 | 133 | conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(pool3) 134 | conv4 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv4) 135 | pool4 = MaxPooling2D(pool_size=(2, 2))(conv4) 136 | 137 | conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(pool4) 138 | conv5 = Convolution2D(512, 3, 3, activation='relu', border_mode='same')(conv5) 139 | 140 | up6 = merge([UpSampling2D(size=(2, 2))(conv5), conv4], mode='concat', concat_axis=1) 141 | conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(up6) 142 | conv6 = Convolution2D(256, 3, 3, activation='relu', border_mode='same')(conv6) 143 | 144 | up7 = merge([UpSampling2D(size=(2, 2))(conv6), conv3], mode='concat', concat_axis=1) 145 | conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(up7) 146 | conv7 = Convolution2D(128, 3, 3, activation='relu', border_mode='same')(conv7) 147 | 148 | up8 = merge([UpSampling2D(size=(2, 2))(conv7), conv2], mode='concat', concat_axis=1) 149 | conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(up8) 150 | conv8 = Convolution2D(64, 3, 3, activation='relu', border_mode='same')(conv8) 151 | 152 | up9 = merge([UpSampling2D(size=(2, 2))(conv8), conv1], mode='concat', concat_axis=1) 153 | conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(up9) 154 | conv9 = Convolution2D(32, 3, 3, activation='relu', border_mode='same')(conv9) 155 | 156 | conv10 = Convolution2D(1, 1, 1, activation='sigmoid')(conv9) 157 | 158 | model = Model(input=inputs, output=conv10) 159 | 160 | model.compile(optimizer=Adam(lr=1e-5), loss=dice_coef_loss, metrics=[dice_coef]) 161 | 162 | return model 163 | 164 | 165 | 166 | 167 | #%% 168 | # catch all T1c.mha 169 | create_data('/home/andy/Brain_tumor/BRATS2015/BRATS2015_Training/HGG/', '**/*Flair*.mha', label=False, resize=(155,img_size,img_size)) 170 | create_data('/home/andy/Brain_tumor/BRATS2015/BRATS2015_Training/HGG/', '**/*OT*.mha', label=True, resize=(155,img_size,img_size)) 171 | 172 | #%% 173 | # catch BRATS2017 Data 174 | create_data('/home/andy/Brain_tumor/BRATS2017/Pre-operative_TCGA_GBM_NIfTI_and_Segmentations/', '**/*_flair.nii.gz', label=False, resize=(155,img_size,img_size)) 175 | create_data('/home/andy/Brain_tumor/BRATS2017/Pre-operative_TCGA_GBM_NIfTI_and_Segmentations/', '**/*_GlistrBoost_ManuallyCorrected.nii.gz', label=True, resize=(155,img_size,img_size)) 176 | 177 | 178 | #%% 179 | # load numpy array data 180 | x = np.load('/home/andy/x_{}.npy'.format(img_size)) 181 | y = np.load('/home/andy/y_{}.npy'.format(img_size)) 182 | 183 | #%% 184 | #training 185 | num = 31100 186 | 187 | model = unet_model() 188 | history = model.fit(x, y, batch_size=16, validation_split=0.2 ,nb_epoch= num_epoch, verbose=1, shuffle=True) 189 | pred = model.predict(x[num:num+100]) 190 | 191 | #%% 192 | # save model and weights 193 | model.save('aug{}_{}_epoch{}'.format(num_of_aug,img_size,num_epoch)) 194 | model.save_weights('weights_{}_{}.h5'.format(img_size,num_epoch)) 195 | #model.load_weights('weights.h5') 196 | 197 | #%% 198 | # list all data in history 199 | print(history.history.keys()) 200 | # summarize history for accuracy 201 | plt.plot(history.history['dice_coef']) 202 | plt.plot(history.history['val_dice_coef']) 203 | plt.title('model dice_coef') 204 | plt.ylabel('dice_coef') 205 | plt.xlabel('epoch') 206 | plt.legend(['train', 'validation'], loc='upper left') 207 | plt.show() 208 | # summarize history for loss 209 | plt.plot(history.history['loss']) 210 | plt.plot(history.history['val_loss']) 211 | plt.title('model loss') 212 | plt.ylabel('loss') 213 | plt.xlabel('epoch') 214 | plt.legend(['train', 'test'], loc='upper left') 215 | plt.show() 216 | 217 | #%% 218 | #show results 219 | for n in range(2): 220 | i = int(r.random() * pred.shape[0]) 221 | plt.figure(figsize=(15,10)) 222 | 223 | plt.subplot(131) 224 | plt.title('Input'+str(i+num)) 225 | plt.imshow(x[i+num, 0, :, :],cmap='gray') 226 | 227 | plt.subplot(132) 228 | plt.title('Ground Truth') 229 | plt.imshow(y[i+num, 0, :, :],cmap='gray') 230 | 231 | plt.subplot(133) 232 | plt.title('Prediction') 233 | plt.imshow(pred[i, 0, :, :],cmap='gray') 234 | 235 | plt.show() 236 | 237 | #%% 238 | ''' 239 | animation 240 | ''' 241 | import matplotlib.animation as animation 242 | def animate(pat, gifname): 243 | # Based on @Zombie's code 244 | fig = plt.figure() 245 | anim = plt.imshow(pat[50]) 246 | def update(i): 247 | anim.set_array(pat[i]) 248 | return anim, 249 | 250 | a = animation.FuncAnimation(fig, update, frames=range(len(pat)), interval=50, blit=True) 251 | a.save(gifname, writer='imagemagick') 252 | 253 | #animate(pat, 'test.gif') 254 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Brain Tumor Segmentation Using U-net Model in Multi-Contrast MRI Image 2 | 3 | Download link for trained model weights : 4 | https://drive.google.com/file/d/1hE9It0ZOOeIuSFvt6GdiR_0cq9inWdTy/view?usp=sharing 5 | 6 | ## Abstract 7 | 8 | With the huge success of deep learning in the field of computer vision, there is rising awareness of its application in medical image. Detection of brain tumor using a segmentation approach is critical in cases, where survival of a subject depends on an accurate and timely clinical diagnosis. Gliomas are the most commonly found tumors having irregular shape and ambiguous boundaries, making them one of the hardest tumors to detect. We present a fully automatic deep learning approach for brain tumor segmentation in multi-contrast magnetic resonance image. The proposed approach is based on fully convolutional network (FCN) and using U-net as the model. Using the dataset provided for the Brain Tumor Segmentation (BraTS) Challenge by the Medical Image Computing and Computer Assisted Intervention (MICCAI) society. Our proposal was validated in the BraTS2018 leaderboard dataset and achieve mean Dice Similarity Coefficient metric of 0.87 in the full tumor region, 0.76 in the tumor core region and 0.71 in the enhancing tumor region, also median Dice Similarity Coefficient metric of 0.90, 0.84, 0.80 for the full tumor, tumor core, and enhancing tumor, respectively. 9 | 10 | ## MRI Contrasts 11 | Magnetic resonance imaging (MRI) is commonly used in medical image for analysis of brain tumors. MRI is a non-invasive system, which can be utilized alongside with other imaging modalities, such as computed tomography (CT), positron emission tomography (PET) to give accurate data for brain tumor structure. However, using these systems alongside MRI is expensive and, in some of the case, can be invasive (e.g. PET). Therefore, different MR that are non-invasive and image both structure and functions are mostly used for brain imaging. MRI machines themselves come with different configurations and produce images with varying intensities. This makes tumor detection a difficult task when different MRI configurations (such as magnetic field strength 1.5, 3 or 7 Tesla) and the acquisition protocol (field of view value, voxel resolution, gradient strength, b0 value, etc.) are used. These configurations have different intensity values across voxels, which result in the same tumorous cells may have different grayscale values when pictured in different hospitals. MRI can show different tissue contrasts through different pulse sequences, making it an adaptable and widely used imaging technique for visualizing regions of interest in the human brain. MRI modalities are combined to produce multi-modal images giving more information about irregular shaped tumors, which are difficult to localize with a single modality. These modalities include T1-weighted MRI (T1), T1-weighted MRI with contrast enhancement (T1c), T2-weighted MRI (T2) and T2-weighted MRI with fluid attenuated inversion recovery (T2-Flair). This multi-modal data contains information that can be used for tumor segmentation with significant improvement in performance. 12 | Figure 1 shows an axial slice of the four standard sequences for a glioma patient including manually drawn tumor regions. T1-weighting is the most commonly used sequence for the structural analysis; it also allows for an easy annotation of the healthy tissues. In T1-weighted contrast-enhanced images (gadolinium-DTPA), the tumor borders appear brighter because the contrast agent accumulates there due to the disruption of the blood–brain barrier in the proliferative tumor region. In this sequence, the necrotic and the active tumor region can be distinguished easily. In T2-weighted MRI, the edema region, which surrounds the tumor, appears bright. T2FLAIR (FLAIR) is a special sequence, which helps in separating the edema region from the cerebrospinal fluid (CSF) because the free water signal is suppressed. The radiological definition of the tumor margins in the clinical context are often manually determined by the radiologist on the T2 and post-gadolinium T1 images by thresholding boundaries between T2 hyperintense/T1 contrast-enhanced lesions and the surrounding healthy tissue to define the outer margins of a tumor. 13 | 14 | 15 | MRI contrast example 16 | Figure 1: One axial slice of an MR image of a high-grade glioma patient. From left to right in row 1: T1-weighted image, T2-weighted image, T2-FLAIR-weighted image T1-weighted image with contrast enhancement. In row 2, showing the three sub-region ground truth labeled by exports. 17 | 18 | 19 | 20 | ## Method 21 | Overview 22 | Figure 2: The overview of the proposed approach for brain tumor segmentation. 23 | 24 | 25 | 26 | 27 | At first, we use only one U-net model to do three different segmentation tasks (Full Tumor, Tumor Core, Enhancing Tumor), the result of full tumor is promising but the rest of two is not good. We find out that the problem is because the tumor core and enhancing tumor are too small compare to the whole brain, which means the pixel number of these two labels are too little (average 0.75% for tumor core and 0.45% for enhancing tumor per slice). Therefore, the data imbalanced problem cause that model tend to predict “no tumor” but still can get good result. According to Figure 1, we knew that tumor core must be inside of edema, and enhancing tumor is part of tumor core. To solve the problem, we use the advantage of full tumor prediction and calculating the center point of full tumor, then use the center point to crop out the training data for tumor core and enhancing tumor (shown in Figure 3). The number of cropping depend on the size of full tumor and we even crop the overlap part to do data-augmentation, cropping size is 64x64 fixed. After cropping, the ratio of tumor core and enhancing tumor have achieved average 12.7% and 7.6% per training patch. 28 | 29 | Cropping method 30 | Figure 3: The cropping method we proposed for improving the performance of tumor core and enhancing tumor segmentation. Cropping the T1c image according to center point of full tumor prediction. The patch size is 64x64 and if the tumor size is bigger than 64x64, we crop more than 1 patch. 31 | 32 | 33 | 34 | 35 | 36 | Then we put the 64x64 training data into another U-net model to train and predict. The result of tumor core prediction and enhancing tumor prediction will be paste back to original full tumor prediction according the center point we got. Fig. 4 present the flowchart of the proposed approach. Instead of using all MRI modalities as, we only use T2 and Flair data for full tumor segmentation and only T1ce for tumor core/enhancing tumor segmentation to accelerate training. According to, to those radiologists, the edema was segmented primarily from T2 images and FLAIR was used to cross-check the extension of the edema and discriminate it against ventricles and other fluid-filled structures. Tumor core and enhancing tumor was both segmented by evaluating hyper-intensities in T1ce images. 37 | 38 | 39 | 40 | Flowchart 41 | Figure 4: we use T2 and Flair image (after pre-processing) as input to a 9 layers U-net for full tumor segmentation. Then using the full tumor prediction to crop out the training patches from T1c image (after pre-processing). These training patches are put in to two 7 layers U-net for tumor core segmentation and enhancing tumor segmentation. Then we combine all three predictions by post-processing and getting final results. 42 | 43 | 44 | 45 | ## Pre-processing 46 | The MRI images was already skull stripped and resampled to 1mm3 resolution as we got the dataset. No non-uniform intensity normalization algorithm or non-parametric algorithm has been used to correct for intensity non-uniformities caused by the inhomogeneity of scanner’s magnetic field, because it will be obliterated the T2-Flair signal. We use SimpleITK. to read the NIFTI format data. and covert to numpy array format. The data size of each subject is 240x240x155, we only pick the 60-120th axial slices as training data due to the rest part of brain is very unlikely to have any tumor. The slices are then zero-mean normalized using the mean and standard deviation. 47 | 48 | 49 | # Proposed Network Architecture 50 | Our brain tumor segmentation model consists two stages. Firstly, we use a 9 layers U-net like architecture to segment full tumor. Secondly, using segmentation result as input for two 7 layers U-net like architectures to segment tumor core and enhancing tumor. The architecture of the proposed network is shown in Figure 3-4 and Figure 3-5. The proposed architecture can be considered as an auto-encoder, where a contraction network tries to learn the image features, and an expanding network tries to use these features to reconstruct a low dimensional data representation of image, such as the ground truth of data. The contracting layer consists of stacked convolution/pooling layers, while the expanding network is made up of up-sampling/transposed convolution layers. In order to localize, high resolution features from the contracting path are concatenated with the up-sampled output of different stages. These are called “skip connections”. The advantages of using skip connections are shown in detail by Drozdzal et al. in. They make a distinction between using long skip and short skip, spanning over a single residual block. The difference between original U-net and our U-net is that we connect a batch normalization after each convolution layer to keep the gradient levels controlled, speed up convergence and reduce the effect of internal covariate shift, so that the parameters of the network do not change rapidly during backpropagation. 51 | The filter size of convolution layers is 3 x 3 and we use the same padding to keep the output size of convolution layers unchanged, which is also different from original U-net. To increase the receptive filed at the lowest resolution, two layers of convolution/batch normalization are stacked, which is the idea from VGG16. After two convolution layers and batch normalization, a 2 x 2 max-pooling with stride 2 down-sample image size to 1/2. At each down-sampling step they double the number of feature channels. Every step in the expansive path consists of an up-sampling of the feature map followed by a 2x2 transposed convolution (“up-convolution”) that halves the number of feature channels, a concatenation with the correspondingly cropped feature map from the contracting path, and two 3x3 convolutions. Since it is binary segmentation for each model, the final operation is a 1 x 1 convolution with 1 filter, producing binary prediction that 1 is target and 0 is null. 52 | 53 | 54 | my U-net 55 | Figure 5: The 9 layers U-net Structure we proposed for full tumor segmentation. There are four differences between our U-net and original U-net (shows by red arrows) [16]. (a) We add batch-normalization layer after each convolution layer. (b) We use same padding in convolution layers to keep the feature map size unchanged. (c) The final convolution filter number is one for binary segmentation. (d) Input channels are two because we use T2 and Flair image for full tumor segmentation. 56 | 57 | ## Result 58 | 59 | result1 60 | Figure 6: In the first row, we have 4 MRI modalities of a HGG subject from BraTS2015 dataset. Second row shows the ground truth of three different tasks and combination of all three tasks. Third row was the prediction from our method. 61 | 62 | result2 63 | 64 | result3 65 | 66 | result4 67 | Figure 7: Result of all three tasks (Green=edema, yellow=enhancing tumor, blue=necrosis and non-enhancing). These are all the 100th slice from different HGG subjects, which are most likely to be center part of brain tumor. The result shows that our method performed well when it comes to core of brain tumor. 68 | -------------------------------------------------------------------------------- /images/MRI.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/MRI.png -------------------------------------------------------------------------------- /images/cropping.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/cropping.png -------------------------------------------------------------------------------- /images/flowchart.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/flowchart.png -------------------------------------------------------------------------------- /images/my_unet.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/my_unet.jpg -------------------------------------------------------------------------------- /images/my_unet7.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/my_unet7.jpg -------------------------------------------------------------------------------- /images/overview.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/overview.png -------------------------------------------------------------------------------- /images/result1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/result1.png -------------------------------------------------------------------------------- /images/result2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/result2.png -------------------------------------------------------------------------------- /images/result3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/result3.png -------------------------------------------------------------------------------- /images/result4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/AndyWangON/Brain-tumor-segmentation-using-deep-learning/78a4bdec8475478b9428f1d576480587628ca957/images/result4.png --------------------------------------------------------------------------------