├── .gitattributes ├── figures └── res.png ├── net ├── __pycache__ │ └── ResUNet.cpython-36.pyc └── ResUNet.py ├── requirements.txt ├── loss ├── BCE.py ├── Dice.py ├── ELDice.py └── Jaccard.py ├── rmsd.py ├── asd.py ├── LICENSE ├── README.md ├── difference.py ├── dataset └── dataset.py ├── VOE.py ├── data_prepare └── get_training_set.py ├── utilities └── calculate_metrics.py ├── parameter.py ├── metrics.py ├── train.py └── test.py /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /figures/res.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/safamathl/3D-ResUnet/HEAD/figures/res.png -------------------------------------------------------------------------------- /net/__pycache__/ResUNet.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/safamathl/3D-ResUnet/HEAD/net/__pycache__/ResUNet.cpython-36.pyc -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy==1.14.2 2 | torch==1.0.1.post2 3 | visdom==0.1.8.8 4 | pandas==0.23.3 5 | scipy==1.0.0 6 | tqdm==4.40.2 7 | scikit-image==0.13.1 8 | SimpleITK==1.0.1 9 | pydensecrf==1.0rc3 -------------------------------------------------------------------------------- /loss/BCE.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Fonction de perte d'entropie croisée binaire 4 | """ 5 | 6 | import torch.nn as nn 7 | 8 | 9 | class BCELoss(nn.Module): 10 | 11 | def __init__(self): 12 | super().__init__() 13 | 14 | self.bce_loss = nn.BCELoss() 15 | 16 | def forward(self, pred, target): 17 | 18 | pred = pred.squeeze(dim=1) 19 | 20 | return self.bce_loss(pred, target) 21 | -------------------------------------------------------------------------------- /rmsd.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from rmsd.calculate_rmsd import rmsd as rm 3 | 4 | 5 | import SimpleITK as sitk 6 | 7 | 8 | 9 | y_true = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/liver_pred/pred1.nii") 10 | y_pred = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/seg/segmentation1.nii") 11 | #t = np.array(y_true) 12 | #p = np.array(y_pred) 13 | 14 | rsmd = rm(y_true, y_pred) 15 | 16 | print("RSMD" , rsmd) 17 | -------------------------------------------------------------------------------- /asd.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | from medpy.metric import binary as bn 3 | import numpy as np 4 | 5 | 6 | 7 | 8 | 9 | y_true = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/liver_pred/pred1.nii") 10 | y_pred = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/seg/segmentation1.nii") 11 | 12 | t = np.array(y_true) 13 | p = np.array(y_pred) 14 | 15 | asd = bn. assd(t, p, voxelspacing=None, connectivity=1) 16 | 17 | print("ASD" , asd) 18 | -------------------------------------------------------------------------------- /loss/Dice.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Dice loss 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class DiceLoss(nn.Module): 11 | 12 | def __init__(self): 13 | super().__init__() 14 | 15 | def forward(self, pred, target): 16 | 17 | pred = pred.squeeze(dim=1) 18 | 19 | smooth = 1 20 | 21 | # coefficient de Dice 22 | dice = 2 * (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (pred.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + 23 | target.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + smooth) 24 | 25 | return torch.clamp((1 - dice).mean(), 0, 1) 26 | -------------------------------------------------------------------------------- /loss/ELDice.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Exponential Logarithmic Dice loss 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class ELDiceLoss(nn.Module): 11 | def __init__(self): 12 | super().__init__() 13 | 14 | def forward(self, pred, target): 15 | 16 | pred = pred.squeeze(dim=1) 17 | 18 | smooth = 1 19 | 20 | # dice系数的定义 21 | dice = 2 * (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (pred.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + 22 | target.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + smooth) 23 | 24 | # 返回的是dice距离 25 | return torch.clamp((torch.pow(-torch.log(dice + 1e-5), 0.3)).mean(), 0, 2) -------------------------------------------------------------------------------- /loss/Jaccard.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Jaccard loss 4 | """ 5 | 6 | import torch 7 | import torch.nn as nn 8 | 9 | 10 | class JaccardLoss(nn.Module): 11 | def __init__(self): 12 | super().__init__() 13 | 14 | def forward(self, pred, target): 15 | 16 | pred = pred.squeeze(dim=1) 17 | 18 | smooth = 1 19 | 20 | # jaccard系数的定义 21 | dice = (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) / (pred.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) + 22 | target.pow(2).sum(dim=1).sum(dim=1).sum(dim=1) - (pred * target).sum(dim=1).sum(dim=1).sum(dim=1) + smooth) 23 | 24 | # 返回的是jaccard距离 25 | return torch.clamp((1 - dice).mean(), 0, 1) 26 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2020 safamathl 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # 3D knee segmengtation using deep learning 2 | 3 | This is a repository containing code of 3D Deep Residual-Unet Networks for Volumetric knee cartilage Segmentation. 4 | 5 | ![test result](./figures/res.png) 6 | 7 | ## Dataset 8 | 9 | 3D data of knee are used with ```255 * 255 * 216``` size and a manually created 3D segmentation label controled by experts. 10 | 11 | 12 | Training-data are divided into two sub-directories: 13 | 14 | * __ct__: contains the input images. 15 | 16 | * __seg__: contains the output images (ground truth). 17 | 18 | ## Getting Started 19 | 20 | ### Requirements 21 | 22 | ``` 23 | python 3.6 24 | SimpleITK 25 | numpy 26 | pandas 27 | scipy 28 | ``` 29 | 30 | ### Step 1: Preparing dataset 31 | 32 | ```bash 33 | python get_training_set.py 34 | ``` 35 | 36 | ### Step 2: Train ResUnet 37 | 38 | ```bash 39 | python train.py 40 | ``` 41 | 42 | ### Step 3: Test ResUnet 43 | 44 | ```bash 45 | python test.py 46 | ``` 47 | 48 | > **parameter.py**, contain all the parameter, so first set dataset path of your own. 49 | after the model is well trained, run **test.py** to test the model. 50 | 51 | 52 | ## Author : 53 | Mathlouthi Safa 54 | 55 | ## contact : 56 | mathlouthisafa94@gmail.com -------------------------------------------------------------------------------- /difference.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import pandas as pd 3 | import quandl, math 4 | import numpy as np 5 | from sklearn import preprocessing, svm 6 | from sklearn.model_selection._validation import cross_validate 7 | from sklearn.linear_model import LinearRegression 8 | from sklearn.metrics import jaccard_similarity_score 9 | from sklearn.metrics import confusion_matrix 10 | from sklearn.metrics import accuracy_score 11 | print("hello") 12 | import cv2 13 | import sys 14 | 15 | 16 | 17 | y_true = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/liver_pred/pred1.nii") 18 | y_pred = sitk.ReadImage("/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/seg/segmentation1.nii") 19 | print("img true") 20 | print (y_true) 21 | print("img predict") 22 | print (y_pred) 23 | print("hello2") 24 | # accuracy: (tp + tn) / (p + n) 25 | accuracy = accuracy_score(y_true, y_pred) 26 | print('Accuracy: %f' % accuracy) 27 | # precision tp / (tp + fp) 28 | precision = precision_score(y_true, y_pred) 29 | print('Precision: %f' % precision) 30 | # recall: tp / (tp + fn) 31 | recall = recall_score(y_true, y_pred) 32 | print('Recall: %f' % recall) 33 | # f1: 2 tp / (2 tp + fp + fn) 34 | f1 = f1_score(y_true, y_pred) 35 | print('F1 score: %f' % f1) 36 | # confusion matrix 37 | matrix = confusion_matrix(y_true, y_pred) 38 | print(matrix) -------------------------------------------------------------------------------- /dataset/dataset.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | Dataset definition script in pytorch 4 | """ 5 | 6 | import os 7 | import sys 8 | sys.path.append(os.path.split(sys.path[0])[0]) 9 | 10 | import random 11 | 12 | import numpy as np 13 | import SimpleITK as sitk 14 | 15 | import torch 16 | from torch.utils.data import Dataset as dataset 17 | 18 | import parameter as para 19 | 20 | 21 | class Dataset(dataset): 22 | def __init__(self, ct_dir, seg_dir): 23 | 24 | self.ct_list = os.listdir(ct_dir) 25 | self.seg_list = list(map(lambda x: x.replace('volume', 'segmentation').replace('.nii', '.nii.gz'), self.ct_list)) 26 | 27 | self.ct_list = list(map(lambda x: os.path.join(ct_dir, x), self.ct_list)) 28 | self.seg_list = list(map(lambda x: os.path.join(seg_dir, x), self.seg_list)) 29 | 30 | def __getitem__(self, index): 31 | 32 | ct_path = self.ct_list[index] 33 | seg_path = self.seg_list[index] 34 | 35 | # Read CT and gold standard into memory 36 | ct = sitk.ReadImage(ct_path, sitk.sitkInt16) 37 | seg = sitk.ReadImage(seg_path, sitk.sitkUInt8) 38 | 39 | ct_array = sitk.GetArrayFromImage(ct) 40 | seg_array = sitk.GetArrayFromImage(seg) 41 | 42 | #min max normalization 43 | ct_array = ct_array.astype(np.float32) 44 | ct_array = ct_array / 200 45 | 46 | #Randomly select 48 slices in the slice plane 47 | start_slice = random.randint(0, ct_array.shape[0] - para.size) 48 | end_slice = start_slice + para.size - 1 49 | 50 | ct_array = ct_array[start_slice:end_slice + 1, :, :] 51 | seg_array = seg_array[start_slice:end_slice + 1, :, :] 52 | 53 | # After processing, convert array to tensor 54 | ct_array = torch.FloatTensor(ct_array).unsqueeze(0) 55 | seg_array = torch.FloatTensor(seg_array) 56 | 57 | return ct_array, seg_array 58 | 59 | def __len__(self): 60 | 61 | return len(self.ct_list) 62 | -------------------------------------------------------------------------------- /VOE.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import SimpleITK as sitk 3 | 4 | import itk 5 | 6 | 7 | import SimpleITK as sitk 8 | import pandas as pd 9 | import quandl, math 10 | import numpy as np 11 | from sklearn import preprocessing, svm 12 | from sklearn.model_selection._validation import cross_validate 13 | from sklearn.linear_model import LinearRegression 14 | from sklearn.metrics import jaccard_similarity_score 15 | from sklearn.metrics import confusion_matrix 16 | from sklearn.metrics import accuracy_score 17 | from sklearn.metrics import precision_score 18 | from sklearn.metrics import recall_score 19 | from sklearn.metrics import f1_score 20 | from sklearn.metrics import cohen_kappa_score 21 | from sklearn.metrics import roc_auc_score 22 | from sklearn.metrics import confusion_matrix 23 | from sklearn.metrics import jaccard_similarity_score 24 | #from load_data import loadDataGeneral 25 | import nibabel as nib 26 | from keras.models import load_model 27 | from scipy.misc import imresize 28 | from skimage.color import hsv2rgb, rgb2hsv, gray2rgb 29 | from skimage import io, exposure 30 | import SimpleITK as sitk 31 | #from sklearn.metrics import Dice 32 | import cv2 33 | import sys 34 | 35 | 36 | #y_true = sitk.ReadImage("C:/Users/lenovo/Desktop/safa/driveknee/drive-download-20190515T061013Z-001/IBSR_13/IBSR_13_segTRI_ana.nii") 37 | #y_pred = sitk.ReadImage("C:/Users/lenovo/Desktop/safa/driveknee/drive-download-20190515T061013Z-001/IBSR_13/IBSR_13_segTRI_predict.nii") 38 | 39 | 40 | def voe(y_true, y_pred): 41 | y_true.dtype == bool and y_pred.dtype == bool 42 | y_true_f = y_true.flatten() 43 | y_pred_f = y_pred.flatten() 44 | return (100 * (1. - np.logical_and(y_true_f, y_pred_f).sum() / float(np.logical_or(y_true_f, y_pred_f)))) 45 | #vd = (100 * (y_true_f.sum() - y_pred_f.sum()) / float(y_pred_f.sum())) 46 | 47 | 48 | y_true = sitk.ReadImage("IBSR_11_segTRI_ana.nii") 49 | y_pred = sitk.ReadImage("IBSR_11_segTRI_predict.nii") 50 | 51 | print("img true") 52 | print (y_true) 53 | print("img predict") 54 | 55 | print (y_pred) 56 | 57 | 58 | #t1 = np.array(y_true) 59 | #p1 = np.array(y_pred) 60 | 61 | 62 | 63 | t1 = sitk.GetArrayViewFromImage(y_true) 64 | p1 = sitk.GetArrayViewFromImage(y_pred) 65 | 66 | 67 | print("hhhhhhhhiiiiiiiiiiiiiiiiihh") 68 | print("img true_array") 69 | print (t1) 70 | print("img predict_array") 71 | 72 | print (p1) 73 | 74 | #t1 = y_true.flatten(t) 75 | #p1 = y_pred.flatten(p) 76 | t1 = t1.astype(int) 77 | p1= p1.astype(int) 78 | print("hhhhhhhhiiiiiiiiiiiiiiiiihhhhhhhhhhhhhhhhhhhhhhhh") 79 | #intersection = np.logical_and(y_true, y_pred).sum() 80 | #union = np.logical_or(y_true, y_pred).sum() 81 | #(2. * intersection + 1.) / (y_true.sum() + y_pred.sum() + 1.) 82 | 83 | #voe = 100 * (1. - intersection / union) 84 | #vd = 100 * (((y_true).sum() - (y_pred).sum()) / (y_pred).sum()) 85 | 86 | 87 | 88 | voe = (100 * (1. - np.logical_and(t1, p1).sum() / float(np.logical_or(t1, p1)))) 89 | vd = (100 * (t1.sum() - p1.sum()) / float(p1.sum())) 90 | 91 | #voe1 = voe(t1,p1) 92 | 93 | print("VOE" , voe) 94 | print("VD" , vd) -------------------------------------------------------------------------------- /data_prepare/get_training_set.py: -------------------------------------------------------------------------------- 1 | """ 2 | Obtain a training data set that can be used to train the network 3 | 4 | """ 5 | 6 | import os 7 | import sys 8 | sys.path.append(os.path.split(sys.path[0])[0]) 9 | import shutil 10 | from time import time 11 | 12 | import numpy as np 13 | from tqdm import tqdm 14 | import SimpleITK as sitk 15 | import scipy.ndimage as ndimage 16 | 17 | import parameter as para 18 | 19 | 20 | if os.path.exists(para.training_set_path): 21 | shutil.rmtree(para.training_set_path) 22 | 23 | new_ct_path = os.path.join(para.training_set_path, 'ct') 24 | new_seg_dir = os.path.join(para.training_set_path, 'seg') 25 | 26 | os.mkdir(para.training_set_path) 27 | os.mkdir(new_ct_path) 28 | os.mkdir(new_seg_dir) 29 | 30 | start = time() 31 | for file in tqdm(os.listdir(para.train_ct_path)): 32 | 33 | #Load CT and gold standard into memory 34 | ct = sitk.ReadImage(os.path.join(para.train_ct_path, file), sitk.sitkInt16) 35 | ct_array = sitk.GetArrayFromImage(ct) 36 | 37 | seg = sitk.ReadImage(os.path.join(para.train_seg_path, file.replace('volume', 'segmentation')), sitk.sitkUInt8) 38 | seg_array = sitk.GetArrayFromImage(seg) 39 | 40 | # Fusion of the liver and liver tumor labels in the gold standard into one 41 | seg_array[seg_array > 0] = 1 42 | 43 | # Truncate the gray value outside the threshold 44 | ct_array[ct_array > para.upper] = para.upper 45 | ct_array[ct_array < para.lower] = para.lower 46 | 47 | # Downsample the CT data on the cross-section and resample, adjust the spacing of the z-axis of all data to 1mm 48 | seg_array = ndimage.zoom(seg_array, (ct.GetSpacing()[-1] / para.slice_thickness, 1, 1), order=0) 49 | 50 | # Find the slices at the beginning and end of the liver area, and expand the slices outwards 51 | z = np.any(seg_array, axis=(1, 2)) 52 | start_slice, end_slice = np.where(z)[0][[0, -1]] 53 | 54 | # Expand slices in both directions 55 | start_slice = max(0, start_slice - para.expand_slice) 56 | end_slice = min(seg_array.shape[0] - 1, end_slice + para.expand_slice) 57 | 58 | # If the number of remaining slices is less than size at this time, just give up the data. There is very little data, so don’t worry. 59 | if end_slice - start_slice + 1 < para.size: 60 | print('!!!!!!!!!!!!!!!!') 61 | print(file, 'have too little slice', ct_array.shape[0]) 62 | print('!!!!!!!!!!!!!!!!') 63 | continue 64 | 65 | ct_array = ct_array[start_slice:end_slice + 1, :, :] 66 | seg_array = seg_array[start_slice:end_slice + 1, :, :] 67 | 68 | # Finally save the data as nii 69 | new_ct = sitk.GetImageFromArray(ct_array) 70 | 71 | new_ct.SetDirection(ct.GetDirection()) 72 | new_ct.SetOrigin(ct.GetOrigin()) 73 | new_ct.SetSpacing((ct.GetSpacing()[0] * int(1 / para.down_scale), ct.GetSpacing()[1] * int(1 / para.down_scale), para.slice_thickness)) 74 | 75 | new_seg = sitk.GetImageFromArray(seg_array) 76 | 77 | new_seg.SetDirection(ct.GetDirection()) 78 | new_seg.SetOrigin(ct.GetOrigin()) 79 | new_seg.SetSpacing((ct.GetSpacing()[0], ct.GetSpacing()[1], para.slice_thickness)) 80 | 81 | sitk.WriteImage(new_ct, os.path.join(new_ct_path, file)) 82 | sitk.WriteImage(new_seg, os.path.join(new_seg_dir, file.replace('volume', 'segmentation').replace('.nii', '.nii.gz'))) -------------------------------------------------------------------------------- /utilities/calculate_metrics.py: -------------------------------------------------------------------------------- 1 | import math 2 | 3 | import numpy as np 4 | import scipy.spatial as spatial 5 | import scipy.ndimage.morphology as morphology 6 | 7 | 8 | class Metirc(): 9 | 10 | def __init__(self, real_mask, pred_mask, voxel_spacing): 11 | self.real_mask = real_mask 12 | self.pred_mask = pred_mask 13 | self.voxel_sapcing = voxel_spacing 14 | 15 | self.real_mask_surface_pts = self.get_surface(real_mask, voxel_spacing) 16 | self.pred_mask_surface_pts = self.get_surface(pred_mask, voxel_spacing) 17 | 18 | self.real2pred_nn = self.get_real2pred_nn() 19 | self.pred2real_nn = self.get_pred2real_nn() 20 | 21 | def get_surface(self, mask, voxel_spacing): 22 | 23 | 24 | kernel = morphology.generate_binary_structure(3, 2) 25 | surface = morphology.binary_erosion(mask, kernel) ^ mask 26 | 27 | surface_pts = surface.nonzero() 28 | 29 | surface_pts = np.array(list(zip(surface_pts[0], surface_pts[1], surface_pts[2]))) 30 | 31 | return surface_pts * np.array(self.voxel_sapcing[::-1]).reshape(1, 3) 32 | 33 | def get_pred2real_nn(self): 34 | 35 | 36 | tree = spatial.cKDTree(self.real_mask_surface_pts) 37 | nn, _ = tree.query(self.pred_mask_surface_pts) 38 | 39 | return nn 40 | 41 | def get_real2pred_nn(self): 42 | 43 | tree = spatial.cKDTree(self.pred_mask_surface_pts) 44 | nn, _ = tree.query(self.real_mask_surface_pts) 45 | 46 | return nn 47 | 48 | # Dice 49 | def get_dice_coefficient(self): 50 | 51 | intersection = (self.real_mask * self.pred_mask).sum() 52 | union = self.real_mask.sum() + self.pred_mask.sum() 53 | 54 | return 2 * intersection / union, 2 * intersection, union 55 | 56 | def get_jaccard_index(self): 57 | 58 | intersection = (self.real_mask * self.pred_mask).sum() 59 | union = (self.real_mask | self.pred_mask).sum() 60 | 61 | return intersection / union 62 | 63 | def get_VOE(self): 64 | 65 | 66 | return 1 - self.get_jaccard_index() 67 | 68 | def get_RVD(self): 69 | 70 | return float(self.pred_mask.sum() - self.real_mask.sum()) / float(self.real_mask.sum()) 71 | 72 | def get_FNR(self): 73 | 74 | fn = self.real_mask.sum() - (self.real_mask * self.pred_mask).sum() 75 | union = (self.real_mask | self.pred_mask).sum() 76 | 77 | return fn / union 78 | 79 | def get_FPR(self): 80 | 81 | fp = self.pred_mask.sum() - (self.real_mask * self.pred_mask).sum() 82 | union = (self.real_mask | self.pred_mask).sum() 83 | 84 | return fp / union 85 | 86 | 87 | def get_ASSD(self): 88 | 89 | return (self.pred2real_nn.sum() + self.real2pred_nn.sum()) / \ 90 | (self.real_mask_surface_pts.shape[0] + self.pred_mask_surface_pts.shape[0]) 91 | 92 | def get_RMSD(self): 93 | 94 | return math.sqrt((np.power(self.pred2real_nn, 2).sum() + np.power(self.real2pred_nn, 2).sum()) / 95 | (self.real_mask_surface_pts.shape[0] + self.pred_mask_surface_pts.shape[0])) 96 | 97 | def get_MSD(self): 98 | 99 | return max(self.pred2real_nn.max(), self.real2pred_nn.max()) 100 | -------------------------------------------------------------------------------- /parameter.py: -------------------------------------------------------------------------------- 1 | # -----------------------Path related parameters--------------------------------------- 2 | 3 | train_ct_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/train/ct' # CT data path of the original training set 4 | 5 | 6 | train_seg_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/train/seg' # Original training set labeled data path 7 | 8 | test_ct_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/ct' # CT data path of the original test set 9 | 10 | test_seg_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/seg/' # Original test set labeled data path 11 | 12 | training_set_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/data_prepare/train/' # Adresse de stockage des données utilisée pour former le réseau 13 | 14 | pred_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/dataset/test/liver_pred' # Save path of network prediction results 15 | 16 | 17 | crf_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/test/crf' # CRF optimization result save path 18 | 19 | 20 | module_path = '/content/gdrive/My Drive/MICCAI-LITS2017-master/module/net550-0.251-0.231.pth' # / Test model path 21 | 22 | # -----------------------Path related parameters--------------- ------------------------ 23 | 24 | 25 | # ---------------------Training data to obtain relevant parameters----------------------------------- 26 | 27 | size = 48 # Use 48 consecutive slices as input to the network 28 | 29 | down_scale = 0.5 # Cross-sectional downsampling factor 30 | 31 | expand_slice = 20 # Cross-sectional downsampling factor... 32 | 33 | slice_thickness = 1 # Normalize the spacing of all data on the z-axis to 1mm 34 | 35 | upper, lower = 200, -200 # CT data gray cut window 36 | 37 | # ---------------------Training data to obtain relevant parameters----------------------------------- 38 | 39 | 40 | # -----------------------Network structure related parameters------------------------------------ 41 | 42 | drop_rate = 0.3 # dropout random drop probability 43 | 44 | # -----------------------Network structure related parameters------------------------------------ 45 | 46 | 47 | # ---------------------Network training related parameters-------------------------------------- 48 | 49 | gpu = '0' # The serial number of the graphics card used 50 | 51 | Epoch = 20 52 | 53 | learning_rate = 1e-3 54 | 55 | learning_rate_decay = [500, 750] 56 | 57 | alpha = 0.33 # In-depth supervision attenuation coefficient 58 | 59 | batch_size = 1 60 | 61 | num_workers = 3 62 | 63 | pin_memory = True 64 | 65 | cudnn_benchmark = True 66 | 67 | # ---------------------Network training related parameters-------------------------------------- 68 | 69 | 70 | # ----------------------Model test related parameters------------------------------------- 71 | 72 | threshold = 0.5 # Threshold 73 | 74 | stride = 12 # Sliding sampling step 75 | 76 | maximum_hole = 5e4 # Largest void area 77 | 78 | # ----------------------Model test related parameters------------------------------------- 79 | 80 | 81 | # ---------------------CRF post-processing optimization related parameters---------------------------------- 82 | 83 | z_expand, x_expand, y_expand = 10, 30, 30 # The number of expansions in three directions based on the predicted results 84 | 85 | max_iter = 20 # CRF iterations 86 | 87 | s1, s2, s3 = 1, 10, 10 # CRF Gaussian kernel parameters 88 | 89 | # ---------------------CRF post-processing optimization related parameters---------------------------------- -------------------------------------------------------------------------------- /metrics.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test script 3 | """ 4 | 5 | import os 6 | import copy 7 | import collections 8 | from time import time 9 | 10 | import torch 11 | import numpy as np 12 | import pandas as pd 13 | import scipy.ndimage as ndimage 14 | import SimpleITK as sitk 15 | import skimage.measure as measure 16 | import skimage.morphology as morphology 17 | 18 | from net.ResUNet import ResUNet 19 | from utilities.calculate_metrics import Metirc 20 | 21 | import parameter as para 22 | 23 | os.environ['CUDA_VISIBLE_DEVICES'] = para.gpu 24 | 25 | # In order to calculate the two variables defined by dice_globaldice_intersection = 0.0 26 | dice_union = 0.0 27 | 28 | file_name = [] # file name 29 | time_pre_case = [] # Singleton data consumption time 30 | 31 | # Define evaluation indicators 32 | liver_score = collections.OrderedDict() 33 | liver_score['dice'] = [] 34 | liver_score['jacard'] = [] 35 | liver_score['voe'] = [] 36 | liver_score['fnr'] = [] 37 | liver_score['fpr'] = [] 38 | liver_score['assd'] = [] 39 | liver_score['rmsd'] = [] 40 | liver_score['msd'] = [] 41 | 42 | 43 | for file_index, file in enumerate(os.listdir(para.test_ct_path)): 44 | # Read the gold standard into memory 45 | seg = sitk.ReadImage(os.path.join(para.test_seg_path, file.replace('volume', 'segmentation')), sitk.sitkUInt8) 46 | seg_array = sitk.GetArrayFromImage(seg) 47 | seg_array[seg_array > 0] = 1 48 | 49 | # Extract the largest connected domain of the liver, remove small areas, and fill the internal holes 50 | pred = sitk.ReadImage(os.path.join(para.pred_path, file.replace('volume', 'pred')), sitk.sitkUInt8) 51 | liver_seg = sitk.GetArrayFromImage(pred) 52 | liver_seg[liver_seg > 0] = 1 53 | 54 | 55 | 56 | # Calculate segmentation evaluation index 57 | liver_metric = Metirc(seg_array, liver_seg, ct.GetSpacing()) 58 | 59 | liver_score['dice'].append(liver_metric.get_dice_coefficient()[0]) 60 | liver_score['jacard'].append(liver_metric.get_jaccard_index()) 61 | liver_score['voe'].append(liver_metric.get_VOE()) 62 | liver_score['fnr'].append(liver_metric.get_FNR()) 63 | liver_score['fpr'].append(liver_metric.get_FPR()) 64 | liver_score['assd'].append(liver_metric.get_ASSD()) 65 | liver_score['rmsd'].append(liver_metric.get_RMSD()) 66 | liver_score['msd'].append(liver_metric.get_MSD()) 67 | 68 | dice_intersection += liver_metric.get_dice_coefficient()[1] 69 | dice_union += liver_metric.get_dice_coefficient()[2] 70 | 71 | 72 | # Write evaluation indicators into exel 73 | liver_data = pd.DataFrame(liver_score, index=file_name) 74 | liver_data['time'] = time_pre_case 75 | 76 | liver_statistics = pd.DataFrame(index=['mean', 'std', 'min', 'max'], columns=list(liver_data.columns)) 77 | liver_statistics.loc['mean'] = liver_data.mean() 78 | liver_statistics.loc['std'] = liver_data.std() 79 | liver_statistics.loc['min'] = liver_data.min() 80 | liver_statistics.loc['max'] = liver_data.max() 81 | 82 | writer = pd.ExcelWriter('./result.xlsx') 83 | liver_data.to_excel(writer, 'liver') 84 | liver_statistics.to_excel(writer, 'liver_statistics') 85 | writer.save() 86 | 87 | # dice global 88 | print('dice global:', dice_intersection / dice_union) 89 | 90 | 91 | 92 | 93 | 94 | 95 | 96 | 97 | 98 | 99 | 100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | -------------------------------------------------------------------------------- /train.py: -------------------------------------------------------------------------------- 1 | """ 2 | Training script 3 | """ 4 | 5 | import os 6 | from time import time 7 | 8 | import numpy as np 9 | 10 | import torch 11 | import torch.backends.cudnn as cudnn 12 | from torch.utils.data import DataLoader 13 | 14 | 15 | from dataset.dataset import Dataset 16 | 17 | from loss.Dice import DiceLoss 18 | from loss.ELDice import ELDiceLoss 19 | from loss.WBCE import WCELoss 20 | from loss.Jaccard import JaccardLoss 21 | from loss.SS import SSLoss 22 | from loss.Tversky import TverskyLoss 23 | from loss.Hybrid import HybridLoss 24 | from loss.BCE import BCELoss 25 | 26 | from net.ResUNet import net 27 | 28 | import parameter as para 29 | 30 | import matplotlib.pyplot as plt 31 | 32 | 33 | import cufflinks as cf 34 | from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot 35 | 36 | 37 | cf.go_offline() 38 | 39 | step_list = [0] 40 | 41 | # Set graphics card related 42 | os.environ['CUDA_VISIBLE_DEVICES'] = para.gpu 43 | cudnn.benchmark = para.cudnn_benchmark 44 | 45 | # Define the network 46 | net = torch.nn.DataParallel(net).cuda() 47 | net.train() 48 | 49 | # Define Dateset 50 | train_ds = Dataset(os.path.join(para.training_set_path, 'ct'), os.path.join(para.training_set_path, 'seg')) 51 | 52 | # Define data loading 53 | train_dl = DataLoader(train_ds, para.batch_size, True, num_workers=para.num_workers, pin_memory=para.pin_memory) 54 | 55 | # loss function 56 | loss_func_list = [DiceLoss(), ELDiceLoss(), WCELoss(), JaccardLoss(), SSLoss(), TverskyLoss(), HybridLoss(), BCELoss()] 57 | loss_func = loss_func_list[5] 58 | 59 | # Define optimizer 60 | opt = torch.optim.Adam(net.parameters(), lr=0.01) 61 | #opt = torch.optim.SGD(net.parameters(), lr=0.00001, momentum=0.9) 62 | 63 | # Learning rate decay 64 | lr_decay = torch.optim.lr_scheduler.MultiStepLR(opt, para.learning_rate_decay) 65 | 66 | # In-depth supervision attenuation coefficient 67 | alpha = para.alpha 68 | 69 | # Training network 70 | start = time() 71 | for epoch in range(para.Epoch): 72 | 73 | lr_decay.step() 74 | 75 | mean_loss = [] 76 | 77 | for step, (ct, seg) in enumerate(train_dl): 78 | 79 | ct = ct.cuda() 80 | seg = seg.cuda() 81 | 82 | outputs = net(ct) 83 | 84 | loss1 = loss_func(outputs[0], seg) 85 | loss2 = loss_func(outputs[1], seg) 86 | loss3 = loss_func(outputs[2], seg) 87 | loss4 = loss_func(outputs[3], seg) 88 | 89 | loss = (loss1 + loss2 + loss3) * alpha + loss4 90 | 91 | mean_loss.append(loss4.item()) 92 | 93 | opt.zero_grad() 94 | loss.backward() 95 | opt.step() 96 | 97 | if step % 5 is 0: 98 | 99 | step_list.append(step_list[-1] + 1) 100 | 101 | print('epoch:{}, step:{}, loss1:{:.3f}, loss2:{:.3f}, loss3:{:.3f}, loss4:{:.3f}, time:{:.3f} min' 102 | .format(epoch, step, loss1.item(), loss2.item(), loss3.item(), loss4.item(), (time() - start) / 60)) 103 | 104 | mean_loss = sum(mean_loss) / len(mean_loss) 105 | 106 | # Save model 107 | if epoch % 50 is 0 and epoch is not 0: 108 | 109 | # The naming method of the network model is: the number of epoch rounds + the loss of the current minibatch + the average loss of the current epoch 110 | #torch.save(net.state_dict(), './module/net{}-{:.3f}-{:.3f}.pth'.format(epoch, loss, mean_loss)) 111 | print('50%%%%%%%%%%%%%%%%%%%%%%') 112 | 113 | 114 | # depth supervision coefficient 115 | if epoch % 40 is 0 and epoch is not 0: 116 | alpha *= 0.8 117 | print('40%%%%%%%%%%%%%%%%%%%%%%') 118 | 119 | 120 | 121 | init_notebook_mode(connected=False) 122 | 123 | plt.plot(epoch, mean_loss) 124 | plt.title('model loss') 125 | plt.ylabel('loss') 126 | plt.xlabel('epoch') 127 | #plt.legend(['train', 'test'], loc='upper left') 128 | plt.show() 129 | 130 | # 131 | #plt.show() 132 | 133 | 134 | 135 | 136 | 137 | -------------------------------------------------------------------------------- /net/ResUNet.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | network definition 4 | """ 5 | 6 | import os 7 | import sys 8 | sys.path.append(os.path.split(sys.path[0])[0]) 9 | 10 | import torch 11 | import torch.nn as nn 12 | import torch.nn.functional as F 13 | 14 | import parameter as para 15 | 16 | 17 | class ResUNet(nn.Module): 18 | 19 | def __init__(self, training): 20 | super().__init__() 21 | 22 | self.training = training 23 | 24 | self.encoder_stage1 = nn.Sequential( 25 | nn.Conv3d(1, 16, 3, 1, padding=1), 26 | nn.PReLU(16), 27 | 28 | nn.Conv3d(16, 16, 3, 1, padding=1), 29 | nn.PReLU(16), 30 | ) 31 | 32 | self.encoder_stage2 = nn.Sequential( 33 | nn.Conv3d(32, 32, 3, 1, padding=1), 34 | nn.PReLU(32), 35 | 36 | nn.Conv3d(32, 32, 3, 1, padding=1), 37 | nn.PReLU(32), 38 | 39 | nn.Conv3d(32, 32, 3, 1, padding=1), 40 | nn.PReLU(32), 41 | ) 42 | 43 | self.encoder_stage3 = nn.Sequential( 44 | nn.Conv3d(64, 64, 3, 1, padding=1), 45 | nn.PReLU(64), 46 | 47 | nn.Conv3d(64, 64, 3, 1, padding=2, dilation=2), 48 | nn.PReLU(64), 49 | 50 | nn.Conv3d(64, 64, 3, 1, padding=4, dilation=4), 51 | nn.PReLU(64), 52 | ) 53 | 54 | self.encoder_stage4 = nn.Sequential( 55 | nn.Conv3d(128, 128, 3, 1, padding=3, dilation=3), 56 | nn.PReLU(128), 57 | 58 | nn.Conv3d(128, 128, 3, 1, padding=4, dilation=4), 59 | nn.PReLU(128), 60 | 61 | nn.Conv3d(128, 128, 3, 1, padding=5, dilation=5), 62 | nn.PReLU(128), 63 | ) 64 | 65 | self.decoder_stage1 = nn.Sequential( 66 | nn.Conv3d(128, 256, 3, 1, padding=1), 67 | nn.PReLU(256), 68 | 69 | nn.Conv3d(256, 256, 3, 1, padding=1), 70 | nn.PReLU(256), 71 | 72 | nn.Conv3d(256, 256, 3, 1, padding=1), 73 | nn.PReLU(256), 74 | ) 75 | 76 | self.decoder_stage2 = nn.Sequential( 77 | nn.Conv3d(128 + 64, 128, 3, 1, padding=1), 78 | nn.PReLU(128), 79 | 80 | nn.Conv3d(128, 128, 3, 1, padding=1), 81 | nn.PReLU(128), 82 | 83 | nn.Conv3d(128, 128, 3, 1, padding=1), 84 | nn.PReLU(128), 85 | ) 86 | 87 | self.decoder_stage3 = nn.Sequential( 88 | nn.Conv3d(64 + 32, 64, 3, 1, padding=1), 89 | nn.PReLU(64), 90 | 91 | nn.Conv3d(64, 64, 3, 1, padding=1), 92 | nn.PReLU(64), 93 | 94 | nn.Conv3d(64, 64, 3, 1, padding=1), 95 | nn.PReLU(64), 96 | ) 97 | 98 | self.decoder_stage4 = nn.Sequential( 99 | nn.Conv3d(32 + 16, 32, 3, 1, padding=1), 100 | nn.PReLU(32), 101 | 102 | nn.Conv3d(32, 32, 3, 1, padding=1), 103 | nn.PReLU(32), 104 | ) 105 | 106 | self.down_conv1 = nn.Sequential( 107 | nn.Conv3d(16, 32, 2, 2), 108 | nn.PReLU(32) 109 | ) 110 | 111 | self.down_conv2 = nn.Sequential( 112 | nn.Conv3d(32, 64, 2, 2), 113 | nn.PReLU(64) 114 | ) 115 | 116 | self.down_conv3 = nn.Sequential( 117 | nn.Conv3d(64, 128, 2, 2), 118 | nn.PReLU(128) 119 | ) 120 | 121 | self.down_conv4 = nn.Sequential( 122 | nn.Conv3d(128, 256, 3, 1, padding=1), 123 | nn.PReLU(256) 124 | ) 125 | 126 | self.up_conv2 = nn.Sequential( 127 | nn.ConvTranspose3d(256, 128, 2, 2), 128 | nn.PReLU(128) 129 | ) 130 | 131 | self.up_conv3 = nn.Sequential( 132 | nn.ConvTranspose3d(128, 64, 2, 2), 133 | nn.PReLU(64) 134 | ) 135 | 136 | self.up_conv4 = nn.Sequential( 137 | nn.ConvTranspose3d(64, 32, 2, 2), 138 | nn.PReLU(32) 139 | ) 140 | 141 | 142 | self.map4 = nn.Sequential( 143 | nn.Conv3d(32, 1, 1, 1), 144 | nn.Upsample(scale_factor=(1, 2, 2), mode='trilinear'), 145 | nn.Sigmoid() 146 | ) 147 | 148 | # Mapping at 128*128 149 | self.map3 = nn.Sequential( 150 | nn.Conv3d(64, 1, 1, 1), 151 | nn.Upsample(scale_factor=(2, 4, 4), mode='trilinear'), 152 | nn.Sigmoid() 153 | ) 154 | 155 | # Mapping at 64*64 156 | self.map2 = nn.Sequential( 157 | nn.Conv3d(128, 1, 1, 1), 158 | nn.Upsample(scale_factor=(4, 8, 8), mode='trilinear'), 159 | nn.Sigmoid() 160 | ) 161 | 162 | # Mapping at32*32 163 | self.map1 = nn.Sequential( 164 | nn.Conv3d(256, 1, 1, 1), 165 | nn.Upsample(scale_factor=(8, 16, 16), mode='trilinear'), 166 | nn.Sigmoid() 167 | ) 168 | 169 | def forward(self, inputs): 170 | 171 | long_range1 = self.encoder_stage1(inputs) + inputs 172 | 173 | short_range1 = self.down_conv1(long_range1) 174 | 175 | long_range2 = self.encoder_stage2(short_range1) + short_range1 176 | long_range2 = F.dropout(long_range2, para.drop_rate, self.training) 177 | 178 | short_range2 = self.down_conv2(long_range2) 179 | 180 | long_range3 = self.encoder_stage3(short_range2) + short_range2 181 | long_range3 = F.dropout(long_range3, para.drop_rate, self.training) 182 | 183 | short_range3 = self.down_conv3(long_range3) 184 | 185 | long_range4 = self.encoder_stage4(short_range3) + short_range3 186 | long_range4 = F.dropout(long_range4, para.drop_rate, self.training) 187 | 188 | short_range4 = self.down_conv4(long_range4) 189 | 190 | outputs = self.decoder_stage1(long_range4) + short_range4 191 | outputs = F.dropout(outputs, para.drop_rate, self.training) 192 | 193 | output1 = self.map1(outputs) 194 | 195 | short_range6 = self.up_conv2(outputs) 196 | 197 | outputs = self.decoder_stage2(torch.cat([short_range6, long_range3], dim=1)) + short_range6 198 | outputs = F.dropout(outputs, 0.3, self.training) 199 | 200 | output2 = self.map2(outputs) 201 | 202 | short_range7 = self.up_conv3(outputs) 203 | 204 | outputs = self.decoder_stage3(torch.cat([short_range7, long_range2], dim=1)) + short_range7 205 | outputs = F.dropout(outputs, 0.3, self.training) 206 | 207 | output3 = self.map3(outputs) 208 | 209 | short_range8 = self.up_conv4(outputs) 210 | 211 | outputs = self.decoder_stage4(torch.cat([short_range8, long_range1], dim=1)) + short_range8 212 | 213 | output4 = self.map4(outputs) 214 | 215 | if self.training is True: 216 | return output1, output2, output3, output4 217 | else: 218 | return output4 219 | 220 | 221 | def init(module): 222 | if isinstance(module, nn.Conv3d) or isinstance(module, nn.ConvTranspose3d): 223 | nn.init.kaiming_normal_(module.weight.data, 0.25) 224 | nn.init.constant_(module.bias.data, 0) 225 | 226 | 227 | net = ResUNet(training=True) 228 | net.apply(init) 229 | 230 | #network parameters 231 | print('net total parameters:', sum(param.numel() for param in net.parameters())) 232 | -------------------------------------------------------------------------------- /test.py: -------------------------------------------------------------------------------- 1 | """ 2 | Test 3 | """ 4 | 5 | import os 6 | import copy 7 | import collections 8 | from time import time 9 | 10 | import torch 11 | import numpy as np 12 | import pandas as pd 13 | import scipy.ndimage as ndimage 14 | import SimpleITK as sitk 15 | import skimage.measure as measure 16 | import skimage.morphology as morphology 17 | 18 | from net.ResUNet import ResUNet 19 | from utilities.calculate_metrics import Metirc 20 | 21 | import parameter as para 22 | 23 | os.environ['CUDA_VISIBLE_DEVICES'] = para.gpu 24 | 25 | # In order to calculate the two variables defined by dice_globaldice_intersection = 0.0 26 | dice_union = 0.0 27 | 28 | file_name = [] # file name 29 | time_pre_case = [] # Singleton data consumption time 30 | 31 | # Define evaluation indicators 32 | liver_score = collections.OrderedDict() 33 | liver_score['dice'] = [] 34 | liver_score['jacard'] = [] 35 | liver_score['voe'] = [] 36 | liver_score['fnr'] = [] 37 | liver_score['fpr'] = [] 38 | liver_score['assd'] = [] 39 | liver_score['rmsd'] = [] 40 | liver_score['msd'] = [] 41 | 42 | # Define network and load parameters 43 | net = torch.nn.DataParallel(ResUNet(training=False)).cuda() 44 | net.load_state_dict(torch.load(para.module_path)) 45 | net.eval() 46 | 47 | for file_index, file in enumerate(os.listdir(para.test_ct_path)): 48 | 49 | start = time() 50 | 51 | file_name.append(file) 52 | 53 | # Read CT into memory 54 | ct = sitk.ReadImage(os.path.join(para.test_ct_path, file), sitk.sitkInt16) 55 | ct_array = sitk.GetArrayFromImage(ct) 56 | 57 | origin_shape = ct_array.shape 58 | 59 | # Truncate the gray value outside the threshold 60 | ct_array[ct_array > para.upper] = para.upper 61 | ct_array[ct_array < para.lower] = para.lower 62 | 63 | # min max normalization 64 | ct_array = ct_array.astype(np.float32) 65 | ct_array = ct_array / 200 66 | 67 | # Interpolate CT using bicubic algorithm, the array after interpolation is still int16 68 | ct_array = ndimage.zoom(ct_array, (1, para.down_scale, para.down_scale), order=3) 69 | 70 | # Use padding for data with too few slices 71 | too_small = False 72 | if ct_array.shape[0] < para.size: 73 | depth = ct_array.shape[0] 74 | temp = np.ones((para.size, int(512 * para.down_scale), int(512 * para.down_scale))) * para.lower 75 | temp[0: depth] = ct_array 76 | ct_array = temp 77 | too_small = True 78 | 79 | # Sliding window sampling prediction 80 | start_slice = 0 81 | end_slice = start_slice + para.size - 1 82 | count = np.zeros((ct_array.shape[0], 512, 512), dtype=np.int16) 83 | probability_map = np.zeros((ct_array.shape[0], 512, 512), dtype=np.float32) 84 | 85 | with torch.no_grad(): 86 | while end_slice < ct_array.shape[0]: 87 | 88 | ct_tensor = torch.FloatTensor(ct_array[start_slice: end_slice + 1]).cuda() 89 | ct_tensor = ct_tensor.unsqueeze(dim=0).unsqueeze(dim=0) 90 | 91 | outputs = net(ct_tensor) 92 | 93 | count[start_slice: end_slice + 1] += 1 94 | probability_map[start_slice: end_slice + 1] += np.squeeze(outputs.cpu().detach().numpy()) 95 | 96 | # Due to insufficient video memory, the ndarray data is directly retained here, and the calculation graph is directly destroyed after saving 97 | del outputs 98 | 99 | start_slice += para.stride 100 | end_slice = start_slice + para.size - 1 101 | 102 | if end_slice != ct_array.shape[0] - 1: 103 | end_slice = ct_array.shape[0] - 1 104 | start_slice = end_slice - para.size + 1 105 | 106 | ct_tensor = torch.FloatTensor(ct_array[start_slice: end_slice + 1]).cuda() 107 | ct_tensor = ct_tensor.unsqueeze(dim=0).unsqueeze(dim=0) 108 | outputs = net(ct_tensor) 109 | 110 | count[start_slice: end_slice + 1] += 1 111 | probability_map[start_slice: end_slice + 1] += np.squeeze(outputs.cpu().detach().numpy()) 112 | 113 | del outputs 114 | 115 | pred_seg = np.zeros_like(probability_map) 116 | pred_seg[probability_map >= (para.threshold * count)] = 1 117 | 118 | if too_small: 119 | temp = np.zeros((depth, 512, 512), dtype=np.float32) 120 | temp += pred_seg[0: depth] 121 | pred_seg = temp 122 | 123 | # Read the gold standard into memory 124 | seg = sitk.ReadImage(os.path.join(para.test_seg_path, file.replace('volume', 'segmentation')), sitk.sitkUInt8) 125 | seg_array = sitk.GetArrayFromImage(seg) 126 | seg_array[seg_array > 0] = 1 127 | 128 | # Extract the largest connected domain of the liver, remove small areas, and fill the internal holes 129 | pred_seg = pred_seg.astype(np.uint8) 130 | liver_seg = copy.deepcopy(pred_seg) 131 | liver_seg = measure.label(liver_seg, 4) 132 | props = measure.regionprops(liver_seg) 133 | 134 | max_area = 0 135 | max_index = 0 136 | for index, prop in enumerate(props, start=1): 137 | if prop.area > max_area: 138 | max_area = prop.area 139 | max_index = index 140 | 141 | liver_seg[liver_seg != max_index] = 0 142 | liver_seg[liver_seg == max_index] = 1 143 | 144 | liver_seg = liver_seg.astype(np.bool) 145 | morphology.remove_small_holes(liver_seg, para.maximum_hole, connectivity=2, in_place=True) 146 | liver_seg = liver_seg.astype(np.uint8) 147 | 148 | # Calculate segmentation evaluation index 149 | liver_metric = Metirc(seg_array, liver_seg, ct.GetSpacing()) 150 | 151 | liver_score['dice'].append(liver_metric.get_dice_coefficient()[0]) 152 | liver_score['jacard'].append(liver_metric.get_jaccard_index()) 153 | liver_score['voe'].append(liver_metric.get_VOE()) 154 | liver_score['fnr'].append(liver_metric.get_FNR()) 155 | liver_score['fpr'].append(liver_metric.get_FPR()) 156 | liver_score['assd'].append(liver_metric.get_ASSD()) 157 | liver_score['rmsd'].append(liver_metric.get_RMSD()) 158 | liver_score['msd'].append(liver_metric.get_MSD()) 159 | 160 | #dice_intersection += liver_metric.get_dice_coefficient()[1] 161 | # dice_union += liver_metric.get_dice_coefficient()[2] 162 | 163 | # Save the predicted result as nii data 164 | pred_seg = sitk.GetImageFromArray(liver_seg) 165 | 166 | pred_seg.SetDirection(ct.GetDirection()) 167 | pred_seg.SetOrigin(ct.GetOrigin()) 168 | pred_seg.SetSpacing(ct.GetSpacing()) 169 | 170 | sitk.WriteImage(pred_seg, os.path.join(para.pred_path, file.replace('volume', 'pred'))) 171 | 172 | speed = time() - start 173 | time_pre_case.append(speed) 174 | 175 | print(file_index, 'this case use {:.3f} s'.format(speed)) 176 | print('-----------------------') 177 | 178 | 179 | # Write evaluation indicators into exel 180 | liver_data = pd.DataFrame(liver_score, index=file_name) 181 | liver_data['time'] = time_pre_case 182 | 183 | liver_statistics = pd.DataFrame(index=['mean', 'std', 'min', 'max'], columns=list(liver_data.columns)) 184 | liver_statistics.loc['mean'] = liver_data.mean() 185 | liver_statistics.loc['std'] = liver_data.std() 186 | liver_statistics.loc['min'] = liver_data.min() 187 | liver_statistics.loc['max'] = liver_data.max() 188 | 189 | writer = pd.ExcelWriter('./result.xlsx') 190 | liver_data.to_excel(writer, 'liver') 191 | liver_statistics.to_excel(writer, 'liver_statistics') 192 | writer.save() 193 | 194 | # dice global 195 | print('dice global:', dice_intersection / dice_union) 196 | 197 | 198 | 199 | 200 | 201 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | 210 | 211 | 212 | 213 | 214 | 215 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | 224 | 225 | 226 | 227 | 228 | 229 | 230 | 231 | 232 | 233 | 234 | 235 | 236 | --------------------------------------------------------------------------------