├── Dialogs.py ├── GUI.py ├── Instructions.docx ├── README.md ├── breast_needed_functions.py ├── data_rw.py ├── density_map_feature_based.py ├── execute_libra_code.py ├── get_info.py ├── initialize_variables.py ├── just_original_image_preprocessing.py ├── libra.py ├── load_models.py ├── metrics.py ├── needed_functions_CPU.py ├── needed_functions_GPU.py ├── postprocessing.py ├── preprocessing.py ├── preprocessing_pec.py ├── pyradiomics_features.py ├── requirements.txt ├── run_GUI.py ├── seg_losses.py ├── seg_metrics.py ├── segmentation_tools.py ├── timeout.py └── weight_selection.py /Dialogs.py: -------------------------------------------------------------------------------- 1 | from PyQt5.QtWidgets import QWidget, QFileDialog 2 | 3 | 4 | class Get_File(QWidget): 5 | 6 | def __init__(self): 7 | super().__init__() 8 | self.title = 'Direct to the file' 9 | self.left = 10 10 | self.top = 10 11 | self.width = 640 12 | self.height = 480 13 | self.initUI() 14 | 15 | def initUI(self): 16 | self.setWindowTitle(self.title) 17 | self.setGeometry(self.left, self.top, self.width, self.height) 18 | 19 | self.openFileNameDialog() 20 | 21 | self.show() 22 | 23 | 24 | def openFileNameDialog(self): 25 | options = QFileDialog.Options() 26 | options |= QFileDialog.DontUseNativeDialog 27 | self.fileName, _ = QFileDialog.getOpenFileName(self,"QFileDialog.getOpenFileName()", "","Saved Model (*.h5)", options=options) 28 | self.close() 29 | 30 | 31 | class Get_Folder(QWidget): 32 | 33 | def __init__(self): 34 | super().__init__() 35 | self.title = 'Direct to the path' 36 | self.left = 10 37 | self.top = 10 38 | self.width = 640 39 | self.height = 480 40 | self.initUI() 41 | 42 | def initUI(self): 43 | self.setWindowTitle(self.title) 44 | self.setGeometry(self.left, self.top, self.width, self.height) 45 | self.openFileNameDialog() 46 | self.show() 47 | 48 | 49 | def openFileNameDialog(self): 50 | options = QFileDialog.Options() 51 | options |= QFileDialog.DontUseNativeDialog 52 | self.folderpath = str(QFileDialog.getExistingDirectory(self, "Select Directory")) 53 | self.close() 54 | -------------------------------------------------------------------------------- /Instructions.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CBICA/Deep-LIBRA/10aff70caa1f1dec6fa1a000eebe8739e1bd6913/Instructions.docx -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | The toolkit has been tested on Ubuntu 20.04, Ubuntu 18.04, Centos 7, cluster based on Centos 7, and MacOX. It is expected to work on Windows too. Python 3 is needed for running the toolkit. Please follow the following instructions to install and use software: 2 | 3 | 1. Install Conda from the following link: https://docs.conda.io/projects/conda/en/latest/user-guide/install/ 4 | 2. Set up a virtual environment: 5 | 6 | conda create --name DeepLIBRA python=3.6 7 | 3. Activate the environment for the first time and anytime you need to work with the toolkit: 8 | 9 | conda activate DeepLIBRA 10 | 4. Install the required packages: 11 | 12 | pip install -r requirements.txt 13 | 5. The toolkit can be run either from the command line or using a graphical used interface: 14 | * Run toolkit using command line (there more options for running the toolkit, you can see them using `-h`): 15 | 16 | python3 Path_to_code/execute_libra_code.py -i ${Path_to_Images} -o ${Path_to_Output} -m ${Path_to_Networks} 17 | 18 | Example: 19 | 20 | python3 ~/Documents/MyPapers/A_Breast_Deep_LIBRA/ShortVersion/Code/execute_libra_code.py -i ~/Desktop/image -o ~/Desktop/image/output -m ~/Desktop/Nets 21 | * Run toolkit using graphical user interface: 22 | 23 | python3 ${Path_to_code}/run_GUI.py 24 | 25 | The pretrained networks can be found from this link: https://upenn.box.com/s/y08cpr0soxxu05x5godw7h5czpn7oak8 26 | -------------------------------------------------------------------------------- /breast_needed_functions.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pandas as pd 3 | import cv2, pdb, os 4 | from scipy import signal 5 | from copy import deepcopy 6 | from scipy.ndimage import label 7 | from skimage.filters import threshold_otsu 8 | from scipy.ndimage import gaussian_filter1d 9 | 10 | 11 | ################################################################################ 12 | ################################################################################ 13 | def air_Libra(obj): 14 | # LIBRA method for raw images 15 | row_range = obj.image.max(axis=1)-obj.image.min(axis=1) 16 | row_range = row_range/row_range.max() 17 | 18 | C1 = np.where(row_range>0.001)[0][0] 19 | C2 = np.where(row_range>0.001)[0][-1] 20 | I0_center = obj.image[C1:C2, :] 21 | 22 | img_pixs = I0_center.reshape([1,-1]) 23 | x = np.arange(img_pixs.min(), img_pixs.max(), 24 | (img_pixs.max()-img_pixs.min())/1000.0 ) 25 | n_elements = np.histogram(img_pixs, bins=1000, range=(img_pixs.min(), 26 | img_pixs.max())) 27 | c_elements = np.insert(np.cumsum(n_elements[0]),[0], [0]) 28 | dd = np.diff(c_elements) 29 | dd = gaussian_filter1d(dd, sigma=1) 30 | peaklocation = np.where(dd>dd.max()*0.05)[0][0] 31 | 32 | 33 | ddd = dd[1:] - dd[:-1] 34 | ddd_neg = peaklocation + np.where(ddd[peaklocation+1:]<=0)[0][0] 35 | threhsold_opt1 = ddd_neg + np.where(ddd[ddd_neg+1:]>=0)[0][0]-4 36 | threhsold = min(x[threhsold_opt1], x[peaklocation+30]) 37 | 38 | 39 | mask = obj.image <= threhsold 40 | 41 | if hasattr(obj, "Shrinking_ratio"): 42 | mask = cv2.resize(mask.astype("int")*1.0, 43 | (round(obj.image.shape[1]/obj.Shrinking_ratio), 44 | round(obj.image.shape[0]/obj.Shrinking_ratio)), 45 | interpolation = cv2.INTER_AREA) 46 | mask = mask>0 47 | 48 | return(mask) 49 | 50 | 51 | ################################################################################ 52 | ################################################################################ 53 | def air(obj): 54 | # # OTSU method for raw images 55 | threhsold = threshold_otsu(obj.img_norm) 56 | mask = obj.img_norm <= threhsold 57 | return(mask) 58 | 59 | 60 | 61 | ################################################################################ 62 | ################################################################################ 63 | def find_logical_background_objs(Mask): 64 | # the background should be zero and object one 65 | temp_mask = Mask.astype("int") 66 | 67 | temp_mask[0,:] = 0 68 | labeled_obj = label(temp_mask)[0] 69 | BG_Label = labeled_obj[0,-1] 70 | 71 | if labeled_obj.max()>1: 72 | for num in range(labeled_obj.max()+1): 73 | if num != BG_Label: 74 | Loc = np.where(labeled_obj==num) 75 | if not( Loc[0].min()==0 or Loc[0].max()==Mask.shape[0] or 76 | Loc[1].min()==0 or Loc[1].max()==Mask.shape[1] ): 77 | Mask[labeled_obj==num] = True 78 | 79 | return(Mask) 80 | 81 | 82 | 83 | ################################################################################ 84 | ################################################################################ 85 | def find_logical_pec_objs(Mask): 86 | # the pectoral should be one and rests zero (background called here) 87 | temp_mask = Mask.astype("int") 88 | 89 | temp_mask[0,:] = 0 90 | labeled_obj = label(temp_mask)[0] 91 | BG_Label = labeled_obj[0, -1] 92 | 93 | if labeled_obj.max()>1: 94 | for num in range(labeled_obj.max()+1): 95 | if num != BG_Label: 96 | Loc = np.where(labeled_obj==num) 97 | if not( Loc[0].min()==0 or Loc[1].min()==0 ): 98 | Mask[labeled_obj==num] = False 99 | 100 | return(Mask) 101 | 102 | 103 | 104 | ################################################################################ 105 | ################################################################################ 106 | def find_largest_obj(Mask): 107 | # zero shows background and one is objects 108 | temp_mask = Mask.astype("int") 109 | out_mask = deepcopy(Mask) 110 | 111 | # make the first row zero to make sure it is not affected by noise 112 | temp_mask[0,:] = 0 113 | 114 | labeled_obj = label(temp_mask)[0] 115 | 116 | if labeled_obj.max()>1: 117 | BG_Label = labeled_obj[0, -1] 118 | Unique_labels, counts = np.unique(labeled_obj, return_counts=True) 119 | counts = np.delete(counts, np.where(Unique_labels==BG_Label), None) 120 | Unique_labels = np.delete(Unique_labels, np.where(Unique_labels==BG_Label), None) 121 | Max = Unique_labels[counts.argmax()] 122 | out_mask[labeled_obj!=Max] = False 123 | 124 | return(out_mask) 125 | 126 | 127 | 128 | ################################################################################ 129 | ################################################################################ 130 | def detect_buttom_portion(obj, Mask): 131 | mask = deepcopy(Mask) 132 | mask[:int(mask.shape[0]/2), :] = 0 133 | non_zero_index = np.argwhere(mask>0) 134 | # careful X is to bottom and y is to right 135 | y_direction_indexes = non_zero_index[:,1] 136 | x_direction_indexes = non_zero_index[:,0] 137 | 138 | y_max_indexes = np.argwhere(y_direction_indexes==y_direction_indexes.max(axis=0)) 139 | x_max_y_max_index = x_direction_indexes[y_max_indexes].max() 140 | 141 | # remove anything above 1.3max_x 142 | mask[:min(int(1.3*x_max_y_max_index), mask.shape[0]), :] = 0 143 | 144 | if (mask>0).any(): 145 | # Now repeat it to be Safe 146 | non_zero_index = np.argwhere(mask>0) 147 | # careful X is to bottom and y is to right 148 | y_direction_indexes = non_zero_index[:,1] 149 | x_direction_indexes = non_zero_index[:,0] 150 | 151 | y_max_indexes = np.argwhere(y_direction_indexes==y_direction_indexes.max(axis=0)) 152 | x_max_y_max_index = x_direction_indexes[y_max_indexes].max() 153 | 154 | # remove anything above max_x 155 | mask[:x_max_y_max_index, :] = 0 156 | 157 | 158 | if (mask>0).any(): 159 | contours, hierarchy = cv2.findContours((mask*255).astype("uint8"), 160 | cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 161 | 162 | # make sure taht there is just one contour 163 | if len(contours)==1: 164 | contour = contours[0].reshape([-1,2]) 165 | 166 | # careful X is to bottom and y is to right 167 | Y_coordinates = contour[:,0] 168 | X_coordinates = contour[:,1] 169 | 170 | Indexes_touching_top_Xs = np.argwhere(X_coordinates==x_max_y_max_index) 171 | Indexe_max_Y = Y_coordinates[Indexes_touching_top_Xs].argmax() 172 | Index_top_right_corner = Indexes_touching_top_Xs[Indexe_max_Y] 173 | 174 | Indexes_touching_bottom_Xs = np.argwhere(X_coordinates==X_coordinates.max()) 175 | if len(Indexes_touching_bottom_Xs) == 1: 176 | Index_bottom_right_corner = Indexes_touching_bottom_Xs[0] 177 | else: 178 | Indexe_max_Y = Y_coordinates[Indexes_touching_bottom_Xs].argmax() 179 | Index_bottom_right_corner = Indexes_touching_bottom_Xs[Indexe_max_Y] 180 | 181 | 182 | contour = contour[int(min(Index_top_right_corner,Index_bottom_right_corner)): 183 | int(max(Index_top_right_corner,Index_bottom_right_corner)),:] 184 | 185 | if len(contour)!=0: 186 | # sort the coodrinates if needed 187 | if contour[0,1]>contour[-1,1]: 188 | contour = contour[::-1] 189 | 190 | # careful X is to bottom and y is to right 191 | Y_coordinates = contour[:,0] 192 | X_coordinates = contour[:,1] 193 | 194 | b, a = signal.butter(5, 0.05, 'low') 195 | try: 196 | Y_coordinates = signal.filtfilt(b, a, Y_coordinates) 197 | except: 198 | Y_coordinates 199 | Y_diff = np.diff(Y_coordinates) 200 | 201 | try: 202 | X_coordinates = signal.filtfilt(b, a, X_coordinates) 203 | except: 204 | X_coordinates 205 | X_diff = np.diff(X_coordinates) 206 | 207 | Threshold_for_changes = 0.1 208 | Threshold_for_come_back = 0.5 209 | 210 | # turn back for breast 211 | if (X_diff<-Threshold_for_come_back).any(): 212 | first_index = np.where(X_diff<-Threshold_for_come_back)[0][0] 213 | cut_diff = X_diff[first_index:] 214 | 215 | # larger threshold for come back if needed 216 | removing_index = np.where(cut_diff>Threshold_for_come_back)[0][0] 217 | contour[removing_index+first_index,:] 218 | Mask[:contour[removing_index+first_index,:][0], 219 | contour[removing_index+first_index,:][1]:]=0 220 | 221 | 222 | elif (Y_diff>Threshold_for_changes).any(): 223 | first_index = np.where(Y_diff>Threshold_for_changes)[0][0] 224 | removing_loc = contour[first_index+2,:] 225 | 226 | range_to_remove_x = np.arange(removing_loc[1], contour[first_index:,1].max()+1) 227 | range_to_remove_y = np.interp(range_to_remove_x, contour[first_index:,1], 228 | contour[first_index:,0]).astype(int)+2 229 | 230 | removing_indexes = np.concatenate((range_to_remove_x.reshape([-1,1]), 231 | range_to_remove_y.reshape([-1,1])), axis=1) 232 | 233 | for removing_index in removing_indexes: 234 | Mask[removing_index[0], :removing_index[1]] = 0 235 | 236 | 237 | Mask = Mask>0 238 | 239 | return(Mask) 240 | 241 | ################################################################################ 242 | ################################################################################ 243 | def Normalize_Image(IMAGE, Range, Min=None, Max=None, flag_max_edition=None, 244 | flag_min_edition=None, bits_conversion=None, Name=None): 245 | IMAGE = IMAGE.astype('float') 246 | 247 | if Min==None: Min = IMAGE.min() 248 | if Max==None: Max = IMAGE.max() 249 | 250 | if Min != Max: 251 | Out_Img = (IMAGE-Min)/(Max-Min) 252 | 253 | Out_Img = Out_Img*Range 254 | 255 | if flag_max_edition == None: 256 | try: 257 | Out_Img[Out_Img>Range] = Range 258 | except: 259 | Out_Img = Out_Img 260 | 261 | if flag_min_edition == None: 262 | try: 263 | Out_Img[Out_Img<0] = 0 264 | except: 265 | Out_Img = Out_Img 266 | 267 | if bits_conversion == None: 268 | if Range == 2**16-1: 269 | Out_Img = Out_Img.astype('uint16') 270 | else: 271 | Out_Img = Out_Img.astype('uint8') 272 | else: 273 | Out_Img = Out_Img.astype(bits_conversion) 274 | 275 | else: 276 | if Name == None: 277 | print("ERROR: SOMTHING WENT WRONG") 278 | else: 279 | print("ERROR: SOMTHING WENT WRONG for "+Name) 280 | 281 | return(Out_Img) 282 | 283 | 284 | 285 | ################################################################################ 286 | ################################################################################ 287 | def get_headers(ds, List): 288 | values = [] 289 | for item in List: 290 | if hasattr(ds, item): 291 | temp = getattr(ds, item) 292 | try: 293 | if item == "PatientAge" and temp[-1]=="Y": 294 | temp = temp[:-1] 295 | try: 296 | values.append(int(temp)) 297 | except: 298 | values.append(temp) 299 | except: 300 | values.append(np.nan) 301 | else: 302 | values.append(np.nan) 303 | 304 | Data = pd.DataFrame(data=[values], index=[0], columns=List) 305 | 306 | return(Data) 307 | 308 | 309 | 310 | ################################################################################ 311 | ################################################################################ 312 | def object_oriented_preprocessing(obj, metal_threshold=30000, 313 | max_image_threshold=64000): 314 | # max_image_threshold never meets! just in case. metal_threshold is useful 315 | image = deepcopy(obj.image) 316 | image_metal = deepcopy(image) 317 | 318 | if (hasattr(obj, "find_pacemaker") and obj.find_pacemaker==1 319 | and obj.ds.PhotometricInterpretation!='MONOCHROME1'): 320 | image_metal[image_metal>max_image_threshold]=max_image_threshold 321 | if obj.find_pacemaker==1: 322 | MIN = image_metal.min() 323 | if (image_metal>metal_threshold).any(): 324 | image_metal[image_metal>metal_threshold]=MIN 325 | 326 | if obj.ds.PresentationIntentType=='FOR PROCESSING': 327 | if image.min() < 1: 328 | image = image + abs(image.min()) + 1 329 | image_metal = image_metal + abs(image_metal.min()) + 1 330 | image = np.log(image) 331 | image_metal = np.log(image_metal) 332 | 333 | if obj.ds.PhotometricInterpretation=='MONOCHROME1': 334 | image = abs(image-image.max()) 335 | image_metal = abs(image_metal-image_metal.max()) 336 | 337 | if hasattr(obj, "find_pacemaker") and obj.find_pacemaker==1: 338 | image_metal = np.exp(image_metal) 339 | MIN = image_metal.min() 340 | image_metal[image_metal>metal_threshold]=MIN 341 | image_metal = np.log(image_metal) 342 | 343 | if obj.ds.PresentationIntentType=='FOR PROCESSING': 344 | image = image**2 345 | image_metal = image_metal**2 346 | 347 | 348 | if not(hasattr(obj.ds,'ImageLaterality')) and hasattr(obj.ds,'Laterality'): 349 | obj.ds.ImageLaterality = obj.ds.Laterality 350 | elif not(hasattr(obj.ds,'ImageLaterality')) and not(hasattr(obj.ds,'Laterality')): 351 | left = image[:, :int(image.shape[1]/2)].sum() 352 | right = image[:, int(image.shape[1]/2):].sum() 353 | if right>left: 354 | obj.ds.ImageLaterality = "R" 355 | else: 356 | obj.ds.ImageLaterality = "L" 357 | 358 | 359 | obj.fliping_flag = 0 360 | if hasattr(obj.ds,'FieldOfViewHorizontalFlip') and obj.ds.FieldOfViewHorizontalFlip =='YES': 361 | if obj.ds.ImageLaterality == 'L': 362 | image=np.fliplr(image) 363 | image_metal=np.fliplr(image_metal) 364 | obj.fliping_flag = 1 365 | else: 366 | if obj.ds.ImageLaterality == 'R': 367 | image=np.fliplr(image) 368 | image_metal=np.fliplr(image_metal) 369 | obj.fliping_flag = 1 370 | 371 | obj.image = image 372 | 373 | return (obj, image_metal) 374 | 375 | 376 | 377 | ################################################################################ 378 | ################################################################################ 379 | def Remove_Top_Below_Side_effect(obj): 380 | MIN = obj.image.min() 381 | # Top 382 | for n, row in enumerate(obj.image): 383 | if not( (row==MIN).all() or (row==row[0]).all()): 384 | obj.top_n = n 385 | break 386 | 387 | # Bottom 388 | for n, row in enumerate(reversed(obj.image)): 389 | if not( (row==MIN).all() or (row==row[0]).all()): 390 | obj.bottom_n = n 391 | break 392 | 393 | for n, cols in enumerate(reversed(obj.image.T)): 394 | if not( (row==MIN).all() ): 395 | obj.side_n = n 396 | break 397 | 398 | if obj.bottom_n == 0: 399 | bottom_n = 1 400 | else: 401 | bottom_n = obj.bottom_n 402 | 403 | if obj.side_n == 0: 404 | side_n = 1 405 | else: 406 | side_n = obj.side_n 407 | 408 | Non_min_image = obj.image[obj.top_n:-bottom_n, :-side_n] 409 | MIN = Non_min_image.min() 410 | 411 | if obj.top_n>0: 412 | obj.image[:obj.top_n,:] = MIN 413 | 414 | if obj.bottom_n>0: 415 | obj.image[-obj.bottom_n:,:] = MIN 416 | 417 | if obj.side_n>0: 418 | obj.image[:,:-obj.side_n] = MIN 419 | 420 | return(obj) 421 | 422 | 423 | 424 | ################################################################################ 425 | ################################################################################ 426 | def fix_ratio(IMG, height, width, method="area"): 427 | Flag = 0 428 | if np.array_equal(IMG, IMG.astype(bool)): 429 | IMG = IMG.astype("uint8") 430 | Flag = 1 431 | 432 | MIN = IMG.min() 433 | 434 | if IMG.shape[0] > IMG.shape[1]: 435 | IMG = np.concatenate((IMG, np.ones([IMG.shape[0], 436 | IMG.shape[0]-IMG.shape[1]])*MIN), axis=1) 437 | else: 438 | IMG = np.concatenate((IMG, np.ones([IMG.shape[1]-IMG.shape[0], 439 | IMG.shape[1]])*MIN), axis=0) 440 | 441 | if method=="area": 442 | IMG = cv2.resize(IMG, (height, width), interpolation=cv2.INTER_AREA) 443 | if method=="linear": 444 | IMG = cv2.resize(IMG, (height, width), interpolation=cv2.INTER_LINEAR) 445 | if method=="cubic": 446 | IMG = cv2.resize(IMG, (height, width), interpolation=cv2.INTER_CUBIC) 447 | if method=="nearest": 448 | IMG = cv2.resize(IMG, (height, width), interpolation=cv2.INTER_NEAREST) 449 | elif method=="lanc": 450 | IMG = cv2.resize(IMG, (height, width), interpolation=cv2.INTER_LANCZOS4) 451 | 452 | return(IMG) 453 | 454 | 455 | 456 | ################################################################################ 457 | ################################################################################ 458 | def fix_ratio_to_csv(IMG, obj): 459 | Image_Dimension = IMG.shape 460 | if IMG.shape[0] > IMG.shape[1]: 461 | Image_needed_side_extention="V" 462 | Needed_addition = IMG.shape[0]-IMG.shape[1] 463 | else: 464 | Image_needed_side_extention="H" 465 | Needed_addition = IMG.shape[0]-IMG.shape[1] 466 | 467 | Data = pd.DataFrame({"Image_needed_side_extention":Image_needed_side_extention, 468 | "Needed_addition":Needed_addition, "Image_Dimension_X": Image_Dimension[0], 469 | "Image_Dimension_Y": Image_Dimension[1]}, index=[0]) 470 | 471 | Path, File = os.path.split(obj.Case) 472 | if File[-4:] == ".dcm": File = File[:-4] 473 | saving_path = os.path.join(obj.output_path, File, "air_breast_mask") 474 | if not(os.path.isdir(saving_path)): os.makedirs(saving_path) 475 | Data.to_csv(os.path.join(saving_path, "fixing_ratio.csv")) 476 | 477 | 478 | 479 | ################################################################################ 480 | ################################################################################ 481 | def bring_back_images_to_orginal_size(Path_to_csv, IMG, type="mask"): 482 | image_reset_info = pd.read_csv(Path_to_csv, sep=",", index_col=0) 483 | 484 | Image_needed_side_extention = image_reset_info["Image_needed_side_extention"].iloc[0] 485 | Needed_addition = image_reset_info["Needed_addition"].iloc[0] 486 | Image_Dimension_X = image_reset_info["Image_Dimension_X"].iloc[0] 487 | Image_Dimension_Y = image_reset_info["Image_Dimension_Y"].iloc[0] 488 | 489 | Max = max(Image_Dimension_X, Image_Dimension_Y) 490 | if type=="mask": 491 | IMG = cv2.resize((IMG*255).astype("uint8"), (Max, Max), interpolation = cv2.INTER_NEAREST) 492 | else: 493 | IMG = cv2.resize(IMG.astype("uint8"), (Max, Max), interpolation = cv2.INTER_NEAREST) 494 | 495 | if Image_needed_side_extention == "V": 496 | IMG = IMG[:, :Image_Dimension_Y] 497 | else: 498 | IMG = IMG[:Image_Dimension_X, :] 499 | 500 | if type=="mask": 501 | IMG[IMG>0] = 1 502 | IMG = (IMG*255).astype("uint8") 503 | 504 | return(IMG) 505 | 506 | 507 | 508 | ################################################################################ 509 | ################################################################################ 510 | def bring_back_images_to_orginal_orientation(Path_to_csv, IMG): 511 | image_reset_orientation = pd.read_csv(Path_to_csv, sep=",", index_col=0) 512 | if ('FieldOfViewHorizontalFlip' in image_reset_orientation.columns) and not( 513 | pd.isnull(image_reset_orientation["FieldOfViewHorizontalFlip"].item())): 514 | if image_reset_orientation["ImageLaterality"].item() == 'L': 515 | IMG=np.fliplr(IMG) 516 | if image_reset_orientation["ImageLaterality"].item() == 'R': 517 | IMG=np.fliplr(IMG) 518 | 519 | return(IMG) 520 | 521 | 522 | 523 | ################################################################################ 524 | ################################################################################ 525 | def Z_scoring(IMG, mask=[]): 526 | if len(mask)==0: 527 | mask = np.ones(IMG.shape) 528 | 529 | try: 530 | MEAN = IMG[mask].mean() 531 | STD = IMG[mask].std() 532 | 533 | IMG = (IMG-MEAN)/STD 534 | 535 | except: 536 | MEAN = IMG.mean() 537 | STD = IMG.std() 538 | 539 | IMG = (IMG-MEAN)/STD 540 | 541 | return(IMG) 542 | -------------------------------------------------------------------------------- /data_rw.py: -------------------------------------------------------------------------------- 1 | # This is read and write functions needed for deeplearning 2 | import numpy as np 3 | from glob import glob 4 | from copy import deepcopy 5 | import os, pdb, json, cv2 6 | from shutil import copyfile 7 | import matplotlib.pyplot as plt 8 | from keras.callbacks import BaseLogger 9 | from keras.preprocessing.image import ImageDataGenerator 10 | 11 | 12 | 13 | def get_image_info(obj): 14 | # first image info 15 | if hasattr(obj, "image_folder"): 16 | Image_Path = os.path.join(obj.train_path, obj.image_folder) 17 | else: 18 | Image_Path = obj.train_path 19 | 20 | Image_Path = os.path.join(Image_Path, "*"+obj.image_format) 21 | 22 | Images_files = sorted(glob(Image_Path)) 23 | 24 | if len(Images_files)>=1: 25 | Image_sample = cv2.imread(Images_files[0], -1) 26 | else: 27 | Image_sample = cv2.imread(Images_files, -1) 28 | 29 | obj.target_size = (Image_sample.shape[0], Image_sample.shape[1]) 30 | if len(Image_sample.shape)>2: 31 | obj.image_dimension = Image_sample.shape[2] 32 | obj.image_color_mode = "rgb" 33 | else: 34 | obj.image_dimension = 1 35 | obj.image_color_mode = "grayscale" 36 | 37 | 38 | obj.num_training_image = len(Images_files) 39 | 40 | # then mask info 41 | if hasattr(obj, "mask_folder"): 42 | Mask_Path = os.path.join(obj.train_path, obj.mask_folder) 43 | Mask_Path = os.path.join(Mask_Path, "*"+obj.image_format) 44 | 45 | Mask_Files = sorted(glob(Mask_Path)) 46 | Mask_sample = cv2.imread(Mask_Files[0]) 47 | 48 | if len(Image_sample.shape)>2: 49 | obj.mask_color_mode = "rgb" 50 | else: 51 | obj.mask_color_mode = "grayscale" 52 | 53 | return(obj) 54 | 55 | 56 | 57 | def adjustData(image, obj, mask=[]): 58 | # make image between zero to one 59 | # plt.imshow(image[0,...,0]);plt.show() 60 | 61 | if image.max()>1: 62 | image = image/image.max() 63 | 64 | if len(image.shape)!=4: 65 | temp = deepcopy(image) 66 | image = np.zeros(image.shape + (1, )) 67 | image[..., 0] = temp 68 | 69 | 70 | if len(mask)!=0: 71 | mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0] 72 | 73 | Steps_fidning_classes = 256/obj.num_class 74 | Bottom_range_class = 0 75 | 76 | # add a dimension for mask 77 | if len(mask.shape) == 3: 78 | new_mask = np.zeros(mask.shape + (obj.num_class, )) 79 | 80 | for i in range(obj.num_class): 81 | new_mask[np.logical_and(mask>=Bottom_range_class, 82 | mask 0: 195 | # loop over the entries in the history log and 196 | # trim any entries that are past the starting 197 | # epoch 198 | for k in self.H.keys(): 199 | self.H[k] = self.H[k][:self.startAt] 200 | 201 | def on_epoch_end(self, epoch, logs={}): 202 | # loop over the logs and update the loss, accuracy, etc. 203 | # for the entire training process 204 | for (k, v) in logs.items(): 205 | l = self.H.get(k, []) 206 | l.append(v) 207 | self.H[k] = l 208 | 209 | # check to see if the training history should be serialized 210 | # to file 211 | if self.jsonPath is not None: 212 | f = open(self.jsonPath, "w") 213 | f.write(json.dumps(self.H)) 214 | f.close() 215 | 216 | # ensure at least two epochs have passed before plotting 217 | # (epoch starts at zero) 218 | # plot the training loss and accuracy 219 | colors = ['g', 'b', 'm', 'r', 'c', 'y', "lime", "orange", "gray", "navy", "brown", "pink"] 220 | number_color = 0 221 | font_size = 10 222 | if len(self.H["loss"]) > 1 and len(self.H["loss"])%2 == 0.0: 223 | N = np.arange(0, len(self.H["loss"])) 224 | fig = plt.figure() 225 | fig.set_size_inches(10, 4) 226 | plt.rcParams['savefig.facecolor'] = "0.6" 227 | plt.rcParams["font.weight"] = "bold" 228 | plt.rcParams["axes.labelweight"] = "bold" 229 | ax = fig.add_subplot(1, 1, 1) 230 | 231 | for key in self.H.keys(): 232 | if key!='loss' and key!='val_loss': 233 | ax.plot(N, self.H[key], label=key, color=colors[number_color]) 234 | number_color += 1 235 | 236 | for tick in ax.xaxis.get_major_ticks(): 237 | tick.label.set_fontsize(font_size) 238 | for tick in ax.xaxis.get_major_ticks(): 239 | tick.label.set_fontsize(font_size) 240 | 241 | plt.title("Performance Measures [Epoch {}]".format(len(self.H["loss"])), color='k', fontweight='bold') 242 | plt.xlabel("Epoch #", color='k') 243 | plt.ylabel("Measures Values", color='k') 244 | plt.legend() 245 | 246 | plt.savefig(self.figPath, dpi=400) 247 | plt.close() 248 | 249 | 250 | 251 | def testGenerator(obj): 252 | image_datagen = ImageDataGenerator() 253 | General_path_Image = obj.train_path 254 | 255 | Path, file_name = os.path.split(General_path_Image) 256 | if file_name == "image": 257 | General_path_Image = Path 258 | 259 | image_generator = image_datagen.flow_from_directory( 260 | General_path_Image, 261 | classes = ["image"], # always assuming that images are in path/image 262 | color_mode = obj.image_color_mode, 263 | class_mode = None, 264 | target_size = (obj.image_final_size, obj.image_final_size), 265 | batch_size = obj.batch_size, 266 | shuffle = False) 267 | 268 | return(image_generator) 269 | 270 | 271 | 272 | def eval_image_Generator(obj): 273 | image_datagen = ImageDataGenerator() 274 | General_path_Image = obj.train_path 275 | 276 | Path, file_name = os.path.split(General_path_Image) 277 | if file_name == "image": 278 | General_path_Image = Path 279 | 280 | image_generator = image_datagen.flow_from_directory( 281 | General_path_Image, 282 | classes = ["image"], # always assuming that images are in image folder 283 | class_mode = None, 284 | color_mode = obj.image_color_mode, 285 | target_size = (obj.image_final_size, obj.image_final_size), 286 | batch_size = obj.batch_size, 287 | shuffle = False) 288 | 289 | for image, M in zip(image_generator, image_generator): 290 | image = image / image.max() 291 | 292 | yield image 293 | 294 | 295 | def eval_mask_Generator(obj): 296 | mask_datagen = ImageDataGenerator() 297 | General_path_Image = obj.train_path 298 | 299 | mask_generator = mask_datagen.flow_from_directory( 300 | General_path_Image, 301 | classes = [obj.mask_folder], 302 | class_mode = None, 303 | color_mode = obj.image_color_mode, 304 | target_size = (obj.image_final_size, obj.image_final_size), 305 | batch_size = obj.batch_size, 306 | shuffle = False) 307 | 308 | for mask, M in zip(mask_generator, mask_generator): 309 | mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0] 310 | mask = np.round( mask/mask.max()*(obj.num_class-1) ) 311 | 312 | # add a dimension for mask 313 | new_mask = np.zeros(mask.shape + (obj.num_class, )) 314 | for i in range(obj.num_class): 315 | new_mask[mask==i, i] = 1 316 | mask = new_mask 317 | 318 | yield mask 319 | 320 | 321 | 322 | def saveResult(obj, saving_name="_predict"): 323 | if hasattr(obj, "image_folder"): 324 | Image_Path = os.path.join(obj.train_path, obj.image_folder) 325 | else: 326 | Image_Path = obj.train_path 327 | 328 | Images_paths = sorted(glob(os.path.join(Image_Path, "*"+obj.image_format))) 329 | for i, (image, input_image_path) in enumerate( zip(obj.results, Images_paths) ): 330 | if obj.num_class>3: 331 | image[:, :, 0] = 0 332 | for Class in range(image.shape[-1]-1): 333 | image[:, :, 0] = image[:, :, 0]+image[:, :, Class+1]*(Class+1) 334 | image = image[:, :, 0] 335 | elif obj.num_class==2: 336 | image = image[:, :, 1] 337 | 338 | image = np.round(image) 339 | image = np.uint8(image/image.max()*obj.A_Range) 340 | _, file_name = os.path.split(input_image_path) 341 | cv2.imwrite(os.path.join(obj.saving_path, file_name[:-4]+saving_name+obj.image_format), image) 342 | 343 | 344 | def saveResults_batch_based(obj, results, image_names, saving_name="_predict"): 345 | for image, input_image_path in zip(results, image_names): 346 | if obj.num_class>3: 347 | for Class in range(image.shape[-1]-1): 348 | image[:, :, 0] = image[:, :, 0]+image[:, :, Class+1]*(Class+1) 349 | image = image[:, :, 0] 350 | elif obj.num_class==2: 351 | image = image[:, :, -1] 352 | 353 | image = np.uint8(image/image.max()*obj.A_Range) 354 | image = (image>200).astype("uint8") 355 | 356 | _, file_name = os.path.split(input_image_path) 357 | file_name = file_name[:-4] 358 | 359 | cv2.imwrite(os.path.join(obj.saving_path, file_name+saving_name+obj.image_format), image) 360 | # cv2.imwrite(os.path.join(obj.output_path, file_name, file_name+saving_name+obj.image_format), image) 361 | -------------------------------------------------------------------------------- /density_map_feature_based.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | # From python packages 5 | import numpy as np 6 | import pandas as pd 7 | from time import time 8 | from termcolor import colored 9 | from collections import Counter 10 | import platform 11 | if platform.system() == "Darwin": 12 | import matplotlib 13 | matplotlib.use('TkAgg') 14 | import matplotlib.pyplot as plt 15 | import cv2, os, argparse, pdb, logging, pickle, json 16 | 17 | # From my packages 18 | from timeout import timeout 19 | from segmentation_tools import FSLIC 20 | from pyradiomics_features import extract_breast_radiomics_features 21 | from breast_needed_functions import bring_back_images_to_orginal_orientation, Z_scoring 22 | from breast_needed_functions import bring_back_images_to_orginal_size, Normalize_Image, fix_ratio 23 | 24 | 25 | 26 | 27 | ################################## This script is for training the svm 28 | ap = argparse.ArgumentParser() 29 | ap.add_argument("-o", "--output_path", required=False, default='./', 30 | help="path for saving results file") 31 | 32 | ap.add_argument("-i", "--input", required=False, default='./file.dcm', 33 | help="path for input files") 34 | 35 | ap.add_argument("-if", "--image_format", required=False, default='.png', 36 | help="The image format for saving") 37 | 38 | ap.add_argument("-po", "--print_off", type=int, default=0, 39 | help="If this is one, it turns off printing") 40 | 41 | ap.add_argument("-ar", "--A_Range", type=int, default=2**8-1, 42 | help="The number of bits for saving image") 43 | 44 | ap.add_argument("-fis", "--final_image_size", type=int, default=512, 45 | help="The final size of image") 46 | 47 | ap.add_argument("-sfn", "--saving_folder_name", default="breast_density", 48 | help="The name of folder that the resutls to be saved for batch processing") 49 | 50 | ap.add_argument("-cn", "--case_name", default="Case_ID", 51 | help="This name defines the saving path") 52 | 53 | ap.add_argument("-lt", "--libra_training", default="0", 54 | help="Zero means to masking and one means training") 55 | 56 | ap.add_argument("-pttm", "--Path_to_trained_model", default="/cbica/home/hajimago/Net/density/model.pkl", 57 | help="This is path to trained model for density prediction") 58 | 59 | ap.add_argument("-rii", "--remove_intermediate_images", 60 | default="K", help="R is removing and K is keeping them") 61 | 62 | ap.add_argument("-to", "--timeout_sec", type=int, default=1800, 63 | help="timeout for each batch") 64 | 65 | args = vars(ap.parse_args()) 66 | 67 | 68 | 69 | 70 | 71 | class Segmentor(object): # The main class 72 | def __init__(self): 73 | ######################################################################## Initial 74 | ######################################################################## Values 75 | self.Case_path = args["input"] 76 | self.image_format = args["image_format"] 77 | self.saving_folder_name = args["saving_folder_name"] 78 | self.case_name = args["case_name"] 79 | self.output_path = args["output_path"] 80 | self.libra_training = args["libra_training"] 81 | 82 | self.A_Range = args["A_Range"] 83 | self.final_image_size = args["final_image_size"] 84 | self.print_off = int(args["print_off"]) 85 | 86 | self.remove_intermediate_images = args["remove_intermediate_images"] 87 | 88 | if self.A_Range==2**16-1: 89 | self.bits_conversion = "uint16" 90 | elif self.A_Range==2**32-1: 91 | self.bits_conversion = "uint32" 92 | else: 93 | self.bits_conversion = "uint8" 94 | 95 | 96 | self.Path_to_trained_model = args["Path_to_trained_model"] 97 | self.path_to_svm, _ = os.path.split(self.Path_to_trained_model) 98 | self.Path_to_final_feature_list = os.path.join(self.path_to_svm, "Final_Features_List.txt") 99 | self.Path_to_max_min = os.path.join(self.path_to_svm, "Max_Min.csv") 100 | 101 | self.multi_svm = True 102 | 103 | 104 | 105 | @timeout(args["timeout_sec"]) 106 | def Main_Loop_Function(self): 107 | ######################################################################## Couple of 108 | ######################################################################## initializations 109 | T_Start = time() 110 | 111 | log_path = os.path.join(self.output_path, self.case_name, "LIBRA_"+self.case_name+".log") 112 | logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', filename=log_path, level=logging.INFO) 113 | logging.info('All segmentation tasks are done.') 114 | logging.info('Trying to calculate breast density.') 115 | 116 | 117 | #################################################################### Loading Image 118 | #################################################################### & files 119 | self.image = cv2.imread(self.Case_path, 0) 120 | self.mask = self.image>self.image.min() 121 | 122 | 123 | org_image_path = os.path.join(self.output_path, self.case_name, 124 | "air_breast_mask", self.case_name+"_16bits_Orginal"+self.image_format) 125 | org_image = cv2.imread(org_image_path, -1) 126 | org_image = fix_ratio(org_image, 127 | self.final_image_size, self.final_image_size) 128 | 129 | air_mask_path = os.path.join(self.output_path, self.case_name, 130 | "air_breast_mask", self.case_name+"_air_breast_mask"+self.image_format) 131 | air_mask = cv2.imread(air_mask_path, -1) 132 | 133 | 134 | org_image = Normalize_Image(org_image, 2**13-1, 135 | bits_conversion="uint16", 136 | flag_min_edition=True, 137 | flag_max_edition=True, 138 | Min=org_image.min(), 139 | Max=org_image[air_mask>0].max()) 140 | 141 | 142 | self.image_temp = org_image.copy() 143 | self.image_temp[np.logical_not(self.mask)] = self.image_temp[self.mask].min() 144 | self.image_temp = Normalize_Image(self.image_temp, 2**8-1, bits_conversion="uint8") 145 | self.image_slic = np.concatenate((self.image_temp.reshape([self.final_image_size, self.final_image_size, 1]), 146 | self.image_temp.reshape([self.final_image_size, self.final_image_size, 1])), axis=2) 147 | self.image_slic = np.concatenate((self.image_slic, 148 | self.image_temp.reshape([self.final_image_size, self.final_image_size, 1])), axis=2) 149 | 150 | 151 | Header_csv_path = os.path.join(self.output_path, self.case_name, "Headers.csv") 152 | Header_csv = pd.read_csv(Header_csv_path, sep=',', index_col=0) 153 | 154 | 155 | NumSLIC = 512; ComSLIC = 1.5; SigSLIC = 0.1 156 | if (Header_csv["PresentationIntentType"]=="FOR PRESENTATION").any(): 157 | ComSLIC = 5 158 | 159 | self.segments, Fusied_Image = FSLIC(self.image_slic, self.image_slic, 160 | NumSLIC=NumSLIC, ComSLIC=ComSLIC, SigSLIC=SigSLIC, Initial=True) 161 | 162 | super_pixels = os.path.join(self.output_path, "superpixels") 163 | if not(os.path.isdir(super_pixels)): os.makedirs(super_pixels) 164 | super_pixels = os.path.join(super_pixels, self.case_name+self.image_format) 165 | cv2.imwrite(super_pixels, (Fusied_Image*255).astype("uint8")) 166 | 167 | 168 | 169 | self, self.segments = extract_breast_radiomics_features(self, org_image, self.mask, self.segments, self.case_name) 170 | 171 | 172 | 173 | Saving_Path_All = os.path.join(self.output_path, self.saving_folder_name) 174 | if not(os.path.isdir(Saving_Path_All)): os.makedirs(Saving_Path_All) 175 | 176 | Unq_columns_listed = (np.unique(self.FEATUREs.columns)).tolist() 177 | columns_listed = self.FEATUREs.columns.tolist() 178 | Repeated_Columns = (Counter(columns_listed) - Counter(set(columns_listed))).keys() 179 | 180 | 181 | #fix repeated columns names 182 | for Column in Repeated_Columns: 183 | Indexes = np.argwhere(self.FEATUREs.columns==Column)[1:] 184 | for N, Index in enumerate(Indexes): 185 | self.FEATUREs.columns.values[Index] = self.FEATUREs.columns[Index]+"."+str(N+1) 186 | 187 | 188 | if self.libra_training != "1": 189 | ##### do density mapping 190 | Pixel_Spacing = Header_csv["ImagerPixelSpacing"] # remember this format "['0.094090909', '0.094090909']" 191 | Coma_loc = Pixel_Spacing[0].find(',') 192 | Pixel_Spacing_X = float(Pixel_Spacing[0][2:Coma_loc-1]) 193 | Pixel_Spacing_Y = float(Pixel_Spacing[0][Coma_loc+3:-2]) 194 | pixel_to_cm_conversion = Pixel_Spacing_X * Pixel_Spacing_Y * 0.1 * 0.1 195 | 196 | 197 | if self.multi_svm: 198 | for SVM_INDEX in range(3): 199 | SVM_INDEX += 1 200 | Base, File = os.path.split(self.Path_to_trained_model) 201 | with open(os.path.join(Base, str(SVM_INDEX)+File), 'rb') as pickle_file: 202 | loaded_model = pickle.load(pickle_file) 203 | Base, File = os.path.split(self.Path_to_final_feature_list) 204 | with open(os.path.join(Base, str(SVM_INDEX)+File), 'r') as json_file: 205 | feature_list = json.load(json_file) 206 | Base, File = os.path.split(self.Path_to_max_min) 207 | max_min = pd.read_csv(os.path.join(Base, str(SVM_INDEX)+File), sep=',', index_col=0) 208 | max_min = max_min.loc[feature_list] 209 | 210 | self.normalized_features_svm = (self.FEATUREs[feature_list]-max_min["Min"])/(max_min["Max"]-max_min["Min"]) 211 | temp_segment_Classes = loaded_model.predict(self.normalized_features_svm) 212 | 213 | if SVM_INDEX==1: 214 | segment_Classes = temp_segment_Classes.copy() 215 | else: 216 | segment_Classes += temp_segment_Classes 217 | segment_Classes = segment_Classes/3.0 218 | segment_Classes = np.int16(np.round(segment_Classes)) 219 | 220 | else: 221 | with open(self.Path_to_trained_model, 'rb') as pickle_file: 222 | loaded_model = pickle.load(pickle_file) 223 | with open(self.Path_to_final_feature_list, 'r') as json_file: 224 | feature_list = json.load(json_file) 225 | max_min = pd.read_csv(self.Path_to_max_min, sep=',', index_col=0) 226 | max_min = max_min.loc[feature_list] 227 | 228 | self.normalized_features_svm = (self.FEATUREs[feature_list]-max_min["Min"])/(max_min["Max"]-max_min["Min"]) 229 | segment_Classes = loaded_model.predict(self.normalized_features_svm) 230 | 231 | 232 | self.FEATUREs["Segment_Class"] = segment_Classes 233 | 234 | ## This is where I might need to modify the density if it is really low 235 | # np.argwhere(segment_Classes==1) 236 | 237 | breast_area = self.FEATUREs["Breast_area"].iloc[0] 238 | 239 | BD = np.sum(self.FEATUREs["Seg_area"][segment_Classes>0]/breast_area) 240 | 241 | self.FEATUREs["Breast_Density_Percentage"] = BD 242 | 243 | print(self.case_name, BD) 244 | 245 | # save image desnity map 246 | self.mask_density = np.zeros(self.image.shape) 247 | Indexes = self.FEATUREs["Seg_index"] 248 | for Index in Indexes[self.FEATUREs["Segment_Class"]==1]: 249 | self.mask_density[self.segments==Index] = 255 250 | 251 | 252 | Path_to_csv_ori = os.path.join(self.output_path, self.case_name, "Headers.csv") 253 | Path_to_csv_size = os.path.join(self.output_path, self.case_name, "air_breast_mask", "fixing_ratio.csv") 254 | 255 | 256 | self.mask = self.mask*255 257 | contours_mask, _ = cv2.findContours(self.mask.astype("uint8"), 258 | cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 259 | fig = plt.figure(frameon=False) 260 | fig.set_size_inches(5, 5) 261 | ax = plt.gca() 262 | for contour in contours_mask: 263 | if len(contour)>1: 264 | ax.imshow(self.image, 'gray') 265 | contour = np.concatenate((contour[:,:,0].T, contour[:,:,1].T), axis=0) 266 | ax.plot(contour[0], contour[1], linewidth=3, color='r') 267 | 268 | contours_density, _ = cv2.findContours(self.mask_density.astype("uint8"), 269 | cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) 270 | for contour in contours_density: 271 | contour = np.concatenate((contour[:,:,0].T, contour[:,:,1].T), axis=0) 272 | ax.plot(contour[0], contour[1], linewidth=2, color='lime') 273 | ax.set_axis_off() 274 | plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, hspace = 0, wspace = 0) 275 | ax.margins(0,0) 276 | ax.xaxis.set_major_locator(plt.NullLocator()) 277 | ax.yaxis.set_major_locator(plt.NullLocator()) 278 | image_path = os.path.join(self.output_path, self.case_name, self.case_name+ 279 | "_dense_tissue_overlay_on_image"+self.image_format) 280 | fig.canvas.draw() 281 | plt.close() 282 | 283 | image_returned = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8) 284 | image_returned = image_returned.reshape(fig.canvas.get_width_height()[::-1] + (3,)).astype("uint8") 285 | image_returned = cv2.cvtColor(image_returned, cv2.COLOR_BGR2RGB) 286 | image_returned= bring_back_images_to_orginal_size(Path_to_csv_size, image_returned, "image") 287 | image_returned = bring_back_images_to_orginal_orientation(Path_to_csv_ori, image_returned) 288 | 289 | Path_to_org_image = os.path.join(self.output_path, self.case_name, "air_breast_mask", 290 | self.case_name+"_Normalized"+self.image_format) 291 | original_image = cv2.imread(Path_to_org_image, 0) 292 | original_image = cv2.cvtColor(original_image,cv2.COLOR_GRAY2RGB) 293 | original_image = bring_back_images_to_orginal_size(Path_to_csv_size, original_image, "image") 294 | original_image = bring_back_images_to_orginal_orientation(Path_to_csv_ori, original_image) 295 | 296 | original_image[image_returned==255] = self.A_Range 297 | original_image[image_returned>0] = image_returned[image_returned>0] 298 | 299 | 300 | 301 | Ch1 = original_image[...,0] 302 | Ch2 = original_image[...,1] 303 | Ch3 = original_image[...,2] 304 | Ch3[image_returned[...,1]==255] = 0 305 | Ch2[image_returned[...,2]==255] = 0 306 | Ch1[np.logical_or(image_returned[...,2]==255, image_returned[...,1]==255)] = 0 307 | 308 | Ch1[np.logical_and(image_returned[...,0]==255, image_returned[...,2]==255, image_returned[...,1]==255)] = 255 309 | Ch2[np.logical_and(image_returned[...,0]==255, image_returned[...,2]==255, image_returned[...,1]==255)] = 255 310 | Ch3[np.logical_and(image_returned[...,0]==255, image_returned[...,2]==255, image_returned[...,1]==255)] = 255 311 | 312 | original_image[...,0] = Ch1 313 | original_image[...,1] = Ch2 314 | original_image[...,2] = Ch3 315 | cv2.imwrite(image_path, original_image) 316 | 317 | final_mask = os.path.join(Saving_Path_All, self.case_name+self.image_format) 318 | cv2.imwrite(final_mask, original_image) 319 | 320 | 321 | dense_mask_file_name = os.path.join(self.output_path, self.case_name, self.case_name+ 322 | "_dense_tissue_mask"+self.image_format) 323 | self.mask_density = bring_back_images_to_orginal_size(Path_to_csv_size, self.mask_density) 324 | self.mask_density = bring_back_images_to_orginal_orientation(Path_to_csv_ori, self.mask_density) 325 | cv2.imwrite(dense_mask_file_name, self.mask_density) 326 | 327 | 328 | final_mask = os.path.join(self.output_path, self.case_name, self.case_name+ 329 | "_final_breask_mask_image_size"+self.image_format) 330 | self.mask = bring_back_images_to_orginal_size(Path_to_csv_size, self.mask) 331 | self.mask = bring_back_images_to_orginal_orientation(Path_to_csv_ori, self.mask) 332 | cv2.imwrite(final_mask, self.mask) 333 | 334 | 335 | self.FEATUREs["Breast_area"] *= pixel_to_cm_conversion 336 | self.FEATUREs["Seg_area"] *= pixel_to_cm_conversion 337 | 338 | 339 | features_file_name = os.path.join(Saving_Path_All, self.case_name+"_Features.csv") 340 | self.FEATUREs_new = self.FEATUREs[self.FEATUREs.columns[:102]].iloc[[0]] 341 | self.FEATUREs_new[self.FEATUREs.columns[-1]] = self.FEATUREs[self.FEATUREs.columns[-1]].iloc[0] 342 | self.FEATUREs_new["Dense_area"] = np.sum(self.FEATUREs["Seg_area"][self.FEATUREs["Segment_Class"]==1]) 343 | self.FEATUREs_new.index = [self.case_name] 344 | self.FEATUREs_new.to_csv(features_file_name) 345 | 346 | else: 347 | features_file_name = os.path.join(Saving_Path_All, self.case_name+"_Features.csv") 348 | self.FEATUREs.to_csv(features_file_name) 349 | 350 | 351 | features_file_name = os.path.join(self.output_path, self.case_name, self.case_name+"_Features.csv") 352 | self.FEATUREs.to_csv(features_file_name) 353 | 354 | 355 | 356 | if self.remove_intermediate_images=="R": 357 | try: 358 | os.remove(org_image_path) 359 | except: 360 | org_image_path 361 | 362 | 363 | 364 | T_End = time() 365 | if self.print_off==0: print("[INFO] Elapsed Time (for this file): "+'\033[1m'+ \ 366 | colored(str(round(T_End-T_Start, 2)), 'blue')+'\033[0m'+" seconds") 367 | 368 | logging.info("The process for this case is done.") 369 | if self.print_off==0: print(colored("[INFO]", 'green')+" The process for this case is done.") 370 | 371 | 372 | 373 | ############################################################################### 374 | ############################################################################### 375 | ############################################################################### 376 | ############################################################################### 377 | ############################################################################### 378 | ############################################################################### Running the code 379 | if __name__ == "__main__": 380 | Info = Segmentor() 381 | Info.Main_Loop_Function() 382 | -------------------------------------------------------------------------------- /execute_libra_code.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | #!/usr/bin/python3 5 | from libra import * 6 | from initialize_variables import set_argparse, get_variables 7 | 8 | 9 | # python3 ~/github/LIBRA/final/execute_libra_code.py -i ~/comp_space/dataset/ -o ~/comp_space/dataset/libra_new2 -m ~/Net 10 | 11 | 12 | class run_libra(object): 13 | def __init__(self): 14 | args = set_argparse(argv=None) 15 | self = get_variables(self, args) 16 | 17 | 18 | def main_function(self): 19 | Info = LIBRA() 20 | print(colored("[INFO] Starting LIBRA "+Info.version, 'green')) 21 | 22 | 23 | Info.parse_args(["-i", self.input_data, 24 | "-po", self.print_off, 25 | "-o", self.output_path, 26 | "-ng", str(self.num_gpu), 27 | "-mc", str(self.multi_cpu), 28 | "-fb", str(self.find_bottom), 29 | "-m", self.general_model_path, 30 | "-lt", str(self.libra_training), 31 | "-cm", str(self.core_multiplier), 32 | "-fpm", str(self.find_pacemaker), 33 | "-tow",str(self.timeout_waiting), 34 | "-tbs", str(self.test_batch_size), 35 | "-fis", str(self.final_image_size), 36 | "-not", str(self.number_of_threads), 37 | "-wsm", self.weight_selection_method, 38 | "-wttbd", self.which_task_to_be_done, 39 | "-rii", self.remove_intermediate_images, 40 | "-lsm", str(self.libra_segmentation_method)]) 41 | 42 | 43 | if Info.which_task_to_be_done == "all" or \ 44 | Info.which_task_to_be_done.find("a_pre")>-1 or \ 45 | Info.which_task_to_be_done.find("a_cnn")>-1 or \ 46 | Info.which_task_to_be_done.find("j_org")>-1: 47 | Info.get_info_based_on_air_cnn() 48 | 49 | 50 | if Info.which_task_to_be_done == "j_org": 51 | Info.run_just_orginal_image_preprocessing() 52 | 53 | 54 | if Info.which_task_to_be_done == "all" or \ 55 | Info.which_task_to_be_done.find("a_pre")>-1: 56 | Info.run_air_preprocessing() 57 | 58 | 59 | if Info.which_task_to_be_done == "all" or \ 60 | Info.which_task_to_be_done.find("a_cnn")>-1: 61 | Info.run_air_cnn() 62 | 63 | 64 | if Info.which_task_to_be_done == "all" or \ 65 | Info.which_task_to_be_done.find("p_pre")>-1 or \ 66 | Info.which_task_to_be_done.find("p_cnn")>-1 or \ 67 | Info.which_task_to_be_done.find("b_pos")>-1 or \ 68 | Info.which_task_to_be_done.find("b_den")>-1: 69 | Info.get_info_based_on_pec_cnn() 70 | 71 | 72 | if Info.which_task_to_be_done == "all" or \ 73 | Info.which_task_to_be_done.find("p_pre")>-1: 74 | Info.run_pec_preprocessing() 75 | 76 | 77 | if Info.which_task_to_be_done == "all" or \ 78 | Info.which_task_to_be_done.find("p_cnn")>-1: 79 | Info.run_pec_cnn() 80 | 81 | 82 | if Info.which_task_to_be_done == "all" or \ 83 | Info.which_task_to_be_done.find("b_pos")>-1: 84 | Info.run_breast_postprocessing() 85 | 86 | 87 | if Info.which_task_to_be_done == "all" or \ 88 | Info.which_task_to_be_done.find("b_den")>-1: 89 | Info.run_feature_extraction() 90 | 91 | 92 | T_End = time() 93 | print("[INFO] The total elapsed time (for all files): "+'\033[1m'+ \ 94 | colored(str(round(T_End-Info.T_Start, 2)), 'red')+'\033[0m'+" seconds") 95 | print(colored("[INFO] *** The LIBRA is performed SUCCESSFULY and the results are SAVED ***", 'green')) 96 | 97 | 98 | 99 | ############################################################################### 100 | ############################################################################### 101 | ############################################################################### 102 | ############################################################################### 103 | ############################################################################### 104 | ############################################################################### Running the code 105 | if __name__ == "__main__": 106 | RUN = run_libra() 107 | RUN.main_function() 108 | -------------------------------------------------------------------------------- /get_info.py: -------------------------------------------------------------------------------- 1 | import os, json 2 | 3 | 4 | 5 | def read_json(obj): 6 | Path, File = os.path.split(obj.model_path) 7 | if File.find("_model.h5")>-1: 8 | File = File[:File.find("_model.h5")] 9 | else: 10 | File = File[:File.find("_Model.h5")] 11 | json_path = os.path.join(Path, File+"_data.json") 12 | obj.json = json.loads(open(json_path).read()) 13 | 14 | return(obj) 15 | 16 | 17 | 18 | def get_info_from_network(obj, model_path, Keys_input=[], Keys_output=[]): 19 | obj = read_json(obj) 20 | 21 | Path, _ = os.path.split(model_path) 22 | 23 | parameters = dict() 24 | Parameters_file = open(os.path.join(Path, "parameters.txt"), "r") 25 | data_in_param_file = Parameters_file.readlines() 26 | 27 | for item in data_in_param_file: 28 | if item.find('\n')>-1: 29 | item = item[:item.find('\n')] 30 | key, value = item.split(':', 1) 31 | parameters[key]=value 32 | 33 | 34 | for Key_i, Key_o in zip(Keys_input, Keys_output): 35 | try: 36 | value = parameters[Key_i] 37 | try: 38 | setattr(obj, Key_o, int( value )) 39 | except: 40 | setattr(obj, Key_o, value) 41 | except: 42 | Key_o = "NA" 43 | 44 | return(obj) 45 | -------------------------------------------------------------------------------- /initialize_variables.py: -------------------------------------------------------------------------------- 1 | import argparse, os 2 | 3 | 4 | def set_argparse(argv): 5 | ap = argparse.ArgumentParser() 6 | ap.add_argument("-i", "--input_dataset", 7 | default='home/ohm/Desktop/data/train/', 8 | help="Path to input dataset") 9 | 10 | ap.add_argument("-o", "--output_path", 11 | default='home/ohm/Desktop/data/train/', 12 | help="Path for saving results file") 13 | 14 | 15 | # models info 16 | ap.add_argument("-wsm", "--weight_selection_method", default="bv", 17 | help="How to select the best weights") 18 | 19 | ap.add_argument("-m", "--general_model_path", 20 | default="/home/ohm/Desktop/Net", 21 | help="A general path to where the models are saved") 22 | 23 | ap.add_argument("-ma", "--model_path_air", 24 | default="network_model.h5", 25 | help="The name of the saved and trained air_pec_breast model") 26 | 27 | ap.add_argument("-mp", "--model_path_pec", 28 | default="network_model.h5", 29 | help="The name of the saved and trained pec_breast model") 30 | 31 | ap.add_argument("-md", "--model_path_density", 32 | default="network_model.pkl", 33 | help="The name of the saved and trained breast density model") 34 | 35 | 36 | # all images to be saved in; you can go with defaults 37 | ap.add_argument("-sfnna", "--saving_folder_name_net_air", 38 | default="air_net_data", 39 | help="Foldername for saving the preprocessed air results") 40 | 41 | ap.add_argument("-sfnnp", "--saving_folder_name_net_pec", 42 | default="pec_net_data", 43 | help="Foldername for saving preprocessing pectoral results") 44 | 45 | ap.add_argument("-sfntbm", "--saving_folder_name_temp_breast_masks", 46 | default="breast_temp_masks", 47 | help="Foldername for saving temp resutls out of the pectoral cnn") 48 | 49 | ap.add_argument("-sfnfni", "--saving_folder_name_final_masked_normalized_images", 50 | default="final_images", 51 | help="Foldername to final masked and normalized images") 52 | 53 | ap.add_argument("-sfnbd", "--saving_folder_name_breast_density", 54 | default="breast_density", 55 | help="Foldernam to breast density results") 56 | 57 | 58 | # GPU CPU conditions 59 | ap.add_argument("-mc", "--multi_cpu", type=int, default=0, 60 | help="If you want to use maximum power of PC using multi core CPUs, ."+ 61 | "should be one. The defualt (zero) is using just one core.") 62 | 63 | ap.add_argument("-not", "--number_of_threads", type=int, default=10, 64 | help="How many threads for each CPU core to be considered.") 65 | 66 | ap.add_argument("-cm", "--core_multiplier", type=int, default=4, 67 | help="How many batches to be open to wait for clsoign Queue.") 68 | 69 | ap.add_argument("-ng", "--num_gpu", type=int, default=1, 70 | help="Number of GPU for being used in training. 0 means run by CPU.") 71 | 72 | ap.add_argument("-tbs", "--test_batch_size", type=int, default=10, 73 | help="The number of images in test batch size.") 74 | 75 | ap.add_argument("-tow", "--timeout_waiting", type=int, default=180, 76 | help="Timeout waiting value that if the time exceed than this "+ 77 | "number the tasks will break for density map generation step. "+ 78 | "The default is 3 minutes for each job.") 79 | 80 | 81 | # Other parametersget_network_segmentation 82 | ap.add_argument("-lt", "--libra_training", type=int, default=0, 83 | help="If one, then, its for training.") 84 | 85 | ap.add_argument("-fb", "--find_bottom", default="1", 86 | help="if this is one, it tries to remove the bottom.") 87 | 88 | ap.add_argument("-fpm", "--find_pacemaker", default=0, 89 | help="If this is one, it will remove the pacemakers by replacing it with minimum.") 90 | 91 | ap.add_argument("-lsm", "--libra_segmentation_method", default="Libra", 92 | help="It can be Libra or Exaturated.") 93 | 94 | ap.add_argument("-po", "--print_off", default="1", 95 | help="I just limits the printing to log if it si one; this is needed for batch processing.") 96 | 97 | ap.add_argument("-fis", "--final_image_size", type=int, default=512, 98 | help="This number should be selected based on the trained network. Keep it constant!") 99 | 100 | 101 | # if you want to use just one specific part of the method 102 | ap.add_argument("-wttbd", "--which_task_to_be_done", 103 | default="all", 104 | help="This is a flag to show which task/s to be performed. It is really useful " + 105 | "in training or for running specific part. The options are: " + 106 | "all, a_a (after preprocessing_air air), a_c_a (after cnn air), " + 107 | "a_p (after pectoral preprocessing), a_c_p (after pectoral cnn), " + 108 | "j_s (just segmentation)") 109 | 110 | ap.add_argument("-rii", "--remove_intermediate_images", 111 | default="K", help="R is removing and K is keeping them") 112 | 113 | args = vars(ap.parse_args(argv)) 114 | 115 | return(args) 116 | 117 | 118 | 119 | def get_variables(obj, args): 120 | obj.output_path = args["output_path"] 121 | obj.input_data = args["input_dataset"] 122 | 123 | 124 | obj.general_model_path = args["general_model_path"] 125 | obj.model_path_air = args["model_path_air"] 126 | obj.model_path_air = os.path.join(obj.general_model_path, "air", obj.model_path_air) 127 | obj.model_path_pec = args["model_path_pec"] 128 | obj.model_path_pec = os.path.join(obj.general_model_path, "pectoral", obj.model_path_pec) 129 | obj.model_path_density = args["model_path_density"] 130 | obj.model_path_density = os.path.join(obj.general_model_path, "density", obj.model_path_density) 131 | 132 | 133 | obj.saving_folder_name_net_air = args["saving_folder_name_net_air"] 134 | obj.saving_folder_name_net_air = os.path.join(obj.saving_folder_name_net_air, "image") 135 | obj.saving_folder_name_net_pec = args["saving_folder_name_net_pec"] 136 | obj.saving_folder_name_net_pec = os.path.join(obj.saving_folder_name_net_pec, "image") 137 | obj.saving_folder_name_breast_density = args["saving_folder_name_breast_density"] 138 | obj.saving_folder_name_temp_breast_masks = args["saving_folder_name_temp_breast_masks"] 139 | obj.saving_folder_name_final_masked_normalized_images = args["saving_folder_name_final_masked_normalized_images"] 140 | obj.saving_folder_name_final_masked_normalized_images = os.path.join(obj.saving_folder_name_final_masked_normalized_images, "image") 141 | 142 | 143 | obj.air_seg_prefix = "_final_air_predict" 144 | obj.pec_seg_prefix = "_final_pec_predict" 145 | 146 | 147 | obj.num_gpu = args["num_gpu"] 148 | obj.print_off = args["print_off"] 149 | obj.find_bottom = args["find_bottom"] 150 | obj.find_pacemaker = args["find_pacemaker"] 151 | obj.test_batch_size = args["test_batch_size"] 152 | obj.final_image_size = args["final_image_size"] 153 | obj.libra_training = int(args["libra_training"]) 154 | obj.which_task_to_be_done = args["which_task_to_be_done"] 155 | obj.weight_selection_method = args["weight_selection_method"] 156 | obj.libra_segmentation_method = args["libra_segmentation_method"] 157 | obj.remove_intermediate_images = args["remove_intermediate_images"] 158 | 159 | 160 | obj.timeout_waiting = args["timeout_waiting"] 161 | 162 | obj.multi_cpu = args["multi_cpu"] 163 | obj.core_multiplier = args["core_multiplier"] 164 | obj.number_of_threads = args["number_of_threads"] 165 | 166 | return(obj) 167 | -------------------------------------------------------------------------------- /just_original_image_preprocessing.py: -------------------------------------------------------------------------------- 1 | # From python packages 2 | from time import time 3 | from termcolor import colored 4 | import cv2, os, argparse, pydicom, logging 5 | 6 | 7 | # From my packages 8 | from breast_needed_functions import Normalize_Image 9 | from breast_needed_functions import object_oriented_preprocessing, Remove_Top_Below_Side_effect 10 | 11 | ################################## This script is for training the svm 12 | ap = argparse.ArgumentParser() 13 | ap.add_argument("-o", "--output_path", required=False, default='./output', 14 | help="path for saving results file") 15 | 16 | ap.add_argument("-i", "--input_dicom", required=False, default='Full_path_to_dicom_file', 17 | help="path for input files") 18 | 19 | ap.add_argument("-if", "--image_format", required=False, default='.png', 20 | help="The image format for saving") 21 | 22 | 23 | args = vars(ap.parse_args()) 24 | 25 | 26 | 27 | class Segmentor(object): # The main class 28 | def __init__(self): 29 | ######################################################################## Initial 30 | ######################################################################## Values 31 | self.Case = args["input_dicom"] 32 | self.output_path = args["output_path"] 33 | self.image_format = args["image_format"] 34 | 35 | 36 | 37 | def Main_Loop_Function(self): 38 | ######################################################################## Couple of 39 | ######################################################################## initializations 40 | T_Start = time() 41 | # no output path = return the results in the same path as dataset 42 | if self.output_path == '0': 43 | self.output_path = self.PATH 44 | 45 | if not os.path.exists(self.output_path): 46 | os.makedirs(self.output_path) 47 | 48 | 49 | #################################################################### Loading Image 50 | #################################################################### & files 51 | # Read Dicom file 52 | try: 53 | self.ds = pydicom.dcmread(self.Case) 54 | self.image = (self.ds.pixel_array).astype("float") 55 | except: 56 | ############ FIX THIS 57 | from medpy.io import load 58 | self.image, self.ds = load(self.Case) 59 | 60 | 61 | # Preprocessing step 62 | self = object_oriented_preprocessing(self) 63 | self = Remove_Top_Below_Side_effect(self) 64 | 65 | 66 | #################################################################### making 67 | #################################################################### the mask and original image 68 | self.image_16bits = Normalize_Image(self.image, 2**16-1, 69 | bits_conversion="uint16", flag_min_edition=True, Min=self.image.min()) 70 | 71 | 72 | Save_name_img = os.path.join(Image_Path, "air_breast_mask", 73 | File+"_16bits_Orginal"+self.image_format) 74 | cv2.imwrite(Save_name_img, self.image_16bits) 75 | 76 | 77 | 78 | 79 | ############################################################################### 80 | ############################################################################### 81 | ############################################################################### 82 | ############################################################################### 83 | ############################################################################### 84 | ############################################################################### Running the code 85 | if __name__ == "__main__": 86 | Info = Segmentor() 87 | Info.Main_Loop_Function() 88 | -------------------------------------------------------------------------------- /libra.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | import warnings 3 | warnings.filterwarnings("ignore") 4 | 5 | from time import time 6 | from glob import glob 7 | from copy import deepcopy 8 | from subprocess import call 9 | from termcolor import colored 10 | import os, pdb, multiprocessing 11 | 12 | from get_info import get_info_from_network 13 | from initialize_variables import set_argparse, get_variables 14 | from needed_functions_CPU import run_loop_multi_cpu_just_org_image 15 | from needed_functions_CPU import run_loop_multi_cpu_pec, run_loop_multi_cpu 16 | from needed_functions_CPU import run_loop_multi_cpu_post, run_loop_multi_cpu_denisty_map 17 | 18 | 19 | ################################################################################ 20 | ################################################################################ 21 | class LIBRA(object): # The main class 22 | def __init__(self): 23 | self.version = "version-1.0" 24 | 25 | 26 | ############################################################################ 27 | ############################################################################ 28 | def parse_args(self, argv=None): 29 | args = set_argparse(argv) 30 | self = get_variables(self, args) 31 | 32 | 33 | if self.multi_cpu == 1: 34 | self.number_cpu_cores = multiprocessing.cpu_count() 35 | else: 36 | self.number_cpu_cores = 1 37 | self.core_multiplier = 1 38 | self.number_of_threads = 1 39 | 40 | self.number_cpu_cores = self.number_cpu_cores*self.number_of_threads 41 | self.max_number_of_process = int(self.core_multiplier*self.number_cpu_cores) 42 | 43 | 44 | self.Keys_txt_file_input = ['image_format', 'num_class', 'save_period', 45 | 'model', 'backbone', 'training_mode', 46 | 'flag_multi_class', 'A_Range', 'image_final_size'] 47 | self.Keys_object = self.Keys_txt_file_input #### this is to name the output keys/ keep it the same 48 | 49 | 50 | self.saving_folder_name_net_pec_temp, folder_name = os.path.split(self.saving_folder_name_net_pec) 51 | self.saving_folder_name_net_pec_temp = self.saving_folder_name_net_pec_temp+"_temp" 52 | self.saving_folder_name_net_pec_temp = os.path.join(self.saving_folder_name_net_pec_temp, folder_name) 53 | 54 | 55 | if self.num_gpu == 0: 56 | self.test_batch_size = 1 57 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" 58 | os.environ["CUDA_VISIBLE_DEVICES"] = "" 59 | 60 | 61 | self.batch_size = self.test_batch_size 62 | self.image_final_size = self.final_image_size 63 | 64 | 65 | self.code_path = os.path.abspath(__file__) 66 | self.code_path,C = os.path.split(self.code_path) 67 | 68 | if not os.path.exists(self.output_path): os.makedirs(self.output_path) 69 | 70 | self.T_Start = time() 71 | 72 | 73 | ############################################################################ 74 | ############################################################################ 75 | def get_info_based_on_air_cnn(self): 76 | print(colored("[INFO] Loading required info.", 'cyan')) 77 | self.model_path = self.model_path_air 78 | self = get_info_from_network(self, self.model_path, 79 | self.Keys_txt_file_input, self.Keys_object) 80 | 81 | 82 | 83 | ############################################################################ 84 | ############################################################################ 85 | def run_just_orginal_image_preprocessing(self): 86 | print(colored("[INFO] Just original image preprocessing.", 'cyan')) 87 | T_Start = time() 88 | 89 | if self.input_data[-4:]!=".dcm": 90 | self.Cases = sorted(glob(os.path.join(self.input_data, "*dcm"))) 91 | else: 92 | self.Cases = [deepcopy(self.input_data)] 93 | 94 | Image_Path = os.path.join(self.output_path, self.saving_folder_name_net_air) 95 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 96 | 97 | if self.multi_cpu == 1: 98 | batch_processes = [] 99 | Q = multiprocessing.Queue() 100 | Total_processes = 0 101 | for N, self.Case in enumerate(self.Cases): 102 | p = multiprocessing.Process(target=run_loop_multi_cpu_just_org_image, 103 | args=(self, self.Case, self.code_path, )) 104 | batch_processes.append(p) 105 | p.start() 106 | 107 | if len(batch_processes)>self.number_cpu_cores or N+1 == len(self.Cases): 108 | Total_processes += self.number_cpu_cores 109 | for j in batch_processes: 110 | j.join() 111 | if Total_processes > self.max_number_of_process: 112 | Q.close() 113 | Q.join_thread() 114 | Q = multiprocessing.Queue() 115 | Total_processes = 0 116 | batch_processes = [] 117 | 118 | else: 119 | for self.Case in self.Cases: 120 | Path, File = os.path.split(self.Case) 121 | File = File[:-4] 122 | 123 | call(["python3", os.path.join(self.code_path, "just_original_image_preprocessing.py"), "-i", 124 | self.Case, "-o", self.output_path, "-if", self.image_format]) 125 | 126 | T_End = time() 127 | print("[INFO] The total elapsed time (for all files in just original image preprocessing step): "+'\033[1m'+ \ 128 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 129 | 130 | 131 | ############################################################################ 132 | ############################################################################ 133 | def run_air_preprocessing(self): 134 | print(colored("[INFO] Air segmentation preprocessing.", 'cyan')) 135 | 136 | T_Start = time() 137 | 138 | if self.input_data[-4:]!=".dcm": 139 | self.Cases = sorted(glob(os.path.join(self.input_data, "*dcm"))) 140 | else: 141 | self.Cases = [deepcopy(self.input_data)] 142 | 143 | Image_Path = os.path.join(self.output_path, self.saving_folder_name_net_air) 144 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 145 | print("[INFO] Saving path for the summary of this step is "+Image_Path) 146 | 147 | if self.multi_cpu == 1: 148 | batch_processes = [] 149 | Q = multiprocessing.Queue() 150 | Total_processes = 0 151 | for N, self.Case in enumerate(self.Cases): 152 | p = multiprocessing.Process(target=run_loop_multi_cpu, 153 | args=(self, self.Case, self.code_path, )) 154 | batch_processes.append(p) 155 | p.start() 156 | 157 | if len(batch_processes)>self.number_cpu_cores or N+1 == len(self.Cases): 158 | Total_processes += self.number_cpu_cores 159 | for j in batch_processes: 160 | j.join() 161 | if Total_processes > self.max_number_of_process: 162 | Q.close() 163 | Q.join_thread() 164 | Q = multiprocessing.Queue() 165 | Total_processes = 0 166 | batch_processes = [] 167 | 168 | else: 169 | for self.Case in self.Cases: 170 | Path, File = os.path.split(self.Case) 171 | File = File[:-4] 172 | 173 | call(["python3", os.path.join(self.code_path, "preprocessing.py"), "-i", 174 | self.Case, "-o", self.output_path, "-if", self.image_format, 175 | "-po", self.print_off, "-sfn", self.saving_folder_name_net_air, 176 | "-ar", str(self.A_Range), "-fis", str(self.final_image_size), 177 | "-lsm", self.libra_segmentation_method, "-fpm", self.find_pacemaker]) 178 | 179 | T_End = time() 180 | print("[INFO] The total elapsed time (for all files in air preprocessing step): "+'\033[1m'+ \ 181 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 182 | 183 | 184 | ############################################################################ 185 | ############################################################################ 186 | def run_air_cnn(self): 187 | from load_models import get_network_segmentation 188 | from needed_functions_GPU import test_network_air 189 | 190 | T_Start = time() 191 | print(colored("[INFO] Air segmentation using CNN is started.", 'cyan')) 192 | self = get_network_segmentation(self, self.model_path, 193 | self.Keys_txt_file_input, self.Keys_object) 194 | 195 | self = test_network_air(self) 196 | 197 | T_End = time() 198 | print("[INFO] The total elapsed time (for all files in air CNN step): "+'\033[1m'+ \ 199 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 200 | 201 | print(colored("[INFO] Air segmentation using CNN is done.", 'green')) 202 | 203 | 204 | ############################################################################ 205 | ############################################################################ 206 | def get_info_based_on_pec_cnn(self): 207 | print(colored("[INFO] Loading required info.", 'cyan')) 208 | self.model_path = self.model_path_pec 209 | self = get_info_from_network(self, self.model_path, 210 | self.Keys_txt_file_input, self.Keys_object) 211 | 212 | 213 | ############################################################################ 214 | ############################################################################ 215 | def run_pec_preprocessing(self): 216 | print(colored("[INFO] Preprocessing for breast vs pectroal segmentation.", 'cyan')) 217 | T_Start = time() 218 | 219 | Path_segmented_air = os.path.join(self.output_path, self.saving_folder_name_net_pec_temp) 220 | if self.input_data[-4:]!=".dcm": 221 | self.Cases = sorted(glob(os.path.join(Path_segmented_air, "*"+self.image_format))) 222 | else: 223 | _, file_name = os.path.split(self.input_data) 224 | temp_path = os.path.join(Path_segmented_air, file_name[:-4]+ 225 | self.air_seg_prefix+self.image_format) 226 | self.Cases = [temp_path] 227 | 228 | Image_Path = os.path.join(self.output_path, self.saving_folder_name_net_pec) 229 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 230 | print("[INFO] Saving path for the summary of this step is "+Image_Path) 231 | 232 | if self.multi_cpu == 1: 233 | batch_processes = [] 234 | Q = multiprocessing.Queue() 235 | Total_processes = 0 236 | for N, self.Case in enumerate(self.Cases): 237 | p = multiprocessing.Process(target=run_loop_multi_cpu_pec, 238 | args=(self, self.Case, self.code_path, )) 239 | batch_processes.append(p) 240 | p.start() 241 | 242 | if len(batch_processes)>self.number_cpu_cores or N+1 == len(self.Cases): 243 | Total_processes += self.number_cpu_cores 244 | for j in batch_processes: 245 | j.join() 246 | if Total_processes > self.max_number_of_process: 247 | Q.close() 248 | Q.join_thread() 249 | Q = multiprocessing.Queue() 250 | Total_processes = 0 251 | batch_processes = [] 252 | else: 253 | for self.Case in self.Cases: 254 | _, File = os.path.split(self.Case) 255 | self.File = File[:File.find(self.air_seg_prefix)] 256 | 257 | call(["python3", os.path.join(self.code_path, "preprocessing_pec.py"), 258 | "-i", self.Case, "-if", self.image_format, "-cn", self.File, 259 | "-po", self.print_off, "-sfn", self.saving_folder_name_net_pec, 260 | "-ar", str(self.A_Range), "-fis", str(self.final_image_size), 261 | "-o", self.output_path]) 262 | 263 | T_End = time() 264 | print("[INFO] The total elapsed time (for all files in pectroal preprocessing step): "+'\033[1m'+ \ 265 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 266 | 267 | 268 | ############################################################################ 269 | ############################################################################ 270 | def run_pec_cnn(self): 271 | from load_models import get_network_segmentation 272 | from needed_functions_GPU import test_network_pec 273 | 274 | T_Start = time() 275 | 276 | print(colored("[INFO] Pectoral segmentation using CNN is started.", 'cyan')) 277 | self = get_network_segmentation(self, self.model_path, 278 | self.Keys_txt_file_input, self.Keys_object) 279 | self = test_network_pec(self) 280 | 281 | T_End = time() 282 | print("[INFO] The total elapsed time (for all files in pectroal CNN): "+'\033[1m'+ \ 283 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 284 | 285 | print(colored("[INFO] Pectoral segmentation using CNN is done.", 'green')) 286 | 287 | 288 | ############################################################################ 289 | ############################################################################ 290 | def run_breast_postprocessing(self): 291 | print(colored("[INFO] Postprocessing for breast vs pectroal segmentation.", 'cyan')) 292 | T_Start = time() 293 | 294 | Path_segmented_pectoral = os.path.join(self.output_path, self.saving_folder_name_temp_breast_masks) 295 | if self.input_data[-4:]!=".dcm": 296 | self.Cases = sorted(glob(os.path.join(Path_segmented_pectoral, "*"+self.image_format))) 297 | else: 298 | _, file_name = os.path.split(self.input_data) 299 | temp_path = os.path.join(Path_segmented_pectoral, file_name[:-4]+ 300 | self.pec_seg_prefix+self.image_format) 301 | self.Cases = [temp_path] 302 | 303 | Image_Path = os.path.join(self.output_path, self.saving_folder_name_final_masked_normalized_images) 304 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 305 | print("[INFO] Saving path for the summary of this step is "+Image_Path) 306 | 307 | if self.multi_cpu == 1: 308 | batch_processes = [] 309 | Q = multiprocessing.Queue() 310 | Total_processes = 0 311 | for N, self.Case in enumerate(self.Cases): 312 | p = multiprocessing.Process(target=run_loop_multi_cpu_post, 313 | args=(self, self.Case, self.code_path, )) 314 | batch_processes.append(p) 315 | p.start() 316 | 317 | if len(batch_processes)>self.number_cpu_cores or N+1 == len(self.Cases): 318 | Total_processes += self.number_cpu_cores 319 | for j in batch_processes: 320 | j.join() 321 | if Total_processes > self.max_number_of_process: 322 | Q.close() 323 | Q.join_thread() 324 | Q = multiprocessing.Queue() 325 | Total_processes = 0 326 | batch_processes = [] 327 | 328 | else: 329 | for self.Case in self.Cases: 330 | _, File = os.path.split(self.Case) 331 | self.File = File[:File.find(self.pec_seg_prefix)] 332 | 333 | call(["python3", os.path.join(self.code_path, "postprocessing.py"), 334 | "-i", self.Case, "-if", self.image_format, "-cn", self.File, 335 | "-po", self.print_off, "-sfn", self.saving_folder_name_final_masked_normalized_images, 336 | "-ar", str(self.A_Range), "-fis", str(self.final_image_size), 337 | "-o", self.output_path, "-fb", self.find_bottom]) 338 | 339 | T_End = time() 340 | print("[INFO] The total elapsed time (for all files in breast postprocessing step): "+'\033[1m'+ \ 341 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 342 | 343 | 344 | ############################################################################ 345 | ############################################################################ 346 | def get_info_based_on_density_cnn(self): 347 | from needed_functions_GPU import test_birads 348 | from load_models import get_network_classification 349 | 350 | print(colored("[INFO] Loading breast density CNN.", 'cyan')) 351 | self.model_path = self.model_path_density 352 | self = get_network_classification(self, self.model_path, 353 | self.Keys_txt_file_input, self.Keys_object) 354 | 355 | 356 | ############################################################################ 357 | ############################################################################ 358 | def run_BIRADS_cnn(self): 359 | T_Start = time() 360 | 361 | print(colored("[INFO] BIRADS Prediction using CNN is started.", 'cyan')) 362 | self = test_birads(self) 363 | print(colored("[INFO] BIRADS Prediction using CNN is done.", 'green')) 364 | 365 | T_End = time() 366 | print("[INFO] The total elapsed time (for all files in BIRADS CNN): "+'\033[1m'+ \ 367 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 368 | 369 | 370 | ############################################################################ 371 | ############################################################################ 372 | def run_feature_extraction(self): 373 | print(colored("[INFO] Extarcing featrues for feature maps.", 'cyan')) 374 | print("[INFO] Saving summary path is "+self.saving_folder_name_breast_density) 375 | T_Start = time() 376 | 377 | Path_density_map = os.path.join(self.output_path, self.saving_folder_name_final_masked_normalized_images) 378 | if self.input_data[-4:]!=".dcm": 379 | self.Cases = sorted(glob(os.path.join(Path_density_map, "*"+self.image_format))) 380 | else: 381 | _, file_name = os.path.split(self.input_data) 382 | temp_path = os.path.join(Path_density_map, file_name[:-4]+self.image_format) 383 | self.Cases = [temp_path] 384 | 385 | Image_Path = os.path.join(self.output_path, self.saving_folder_name_breast_density) 386 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 387 | print("[INFO] Saving path for the summary of this step is "+Image_Path) 388 | 389 | 390 | if self.multi_cpu == 1: 391 | batch_processes = [] 392 | Q = multiprocessing.Queue() 393 | Total_processes = 0 394 | self.timeout_waiting *= self.max_number_of_process 395 | 396 | for N, self.Case in enumerate(self.Cases): 397 | p = multiprocessing.Process(target=run_loop_multi_cpu_denisty_map, 398 | args=(self, self.Case, self.code_path, )) 399 | batch_processes.append(p) 400 | p.start() 401 | 402 | if len(batch_processes)>self.number_cpu_cores or N+1 == len(self.Cases): 403 | Total_processes += self.number_cpu_cores 404 | for j in batch_processes: 405 | j.join() 406 | if Total_processes > self.max_number_of_process: 407 | Q.close() 408 | Q.join_thread() 409 | Q = multiprocessing.Queue() 410 | Total_processes = 0 411 | batch_processes = [] 412 | 413 | else: 414 | for self.Case in self.Cases: 415 | _, File = os.path.split(self.Case) 416 | self.File = File[:-4] 417 | 418 | call(["python3", os.path.join(self.code_path, "density_map_feature_based.py"), 419 | "-i", self.Case, "-if", self.image_format, "-cn", self.File, 420 | "-po", self.print_off, "-sfn", self.saving_folder_name_breast_density, 421 | "-ar", str(self.A_Range), "-fis", str(self.final_image_size), 422 | "-o", self.output_path, "-lt", str(self.libra_training), 423 | "-pttm", self.model_path_density, "-rii", self.remove_intermediate_images, 424 | "-to", str(self.timeout_waiting)]) 425 | 426 | T_End = time() 427 | print("[INFO] The total elapsed time (for all files in density mask step): "+'\033[1m'+ \ 428 | colored(str(round(T_End-T_Start, 2)), 'red')+'\033[0m'+" seconds") 429 | -------------------------------------------------------------------------------- /load_models.py: -------------------------------------------------------------------------------- 1 | import os, pdb 2 | from keras.models import load_model 3 | from keras.metrics import categorical_accuracy 4 | from keras.losses import categorical_crossentropy 5 | # other packages 6 | from get_info import get_info_from_network 7 | from weight_selection import weight_selection 8 | # the metrics 9 | from metrics import sensitivity, sensitivity_weighted 10 | from metrics import dice_weighted, loss_dice_weighted, dice 11 | from metrics import generalised_dice, generalised_dice_loss, loss_dice 12 | from metrics import dice_weighted_traditional, loss_dice_weighted_traditional 13 | from metrics import generalised_wasserstein_dice, generalised_wasserstein_dice_loss, generalised_wasserstein_dice_loss2 14 | # new metrics and losses 15 | from seg_metrics import Dice, IOU 16 | from seg_losses import jaccard_loss, bce_jaccard_loss, dice_loss, bce_dice_loss 17 | 18 | 19 | 20 | def get_network_segmentation(obj, model_path, Keys_input=[], Keys_output=[], max_index=-1): 21 | obj = get_info_from_network(obj, model_path, Keys_input, Keys_output) 22 | 23 | if obj.training_mode == "3_Class_Breast": 24 | obj.model = load_model(model_path, custom_objects={"dice_weighted": dice_weighted, 25 | "loss_dice_weighted": loss_dice_weighted, "sensitivity": sensitivity, 26 | "sensitivity_weighted": sensitivity_weighted, "dice":dice}) 27 | obj = weight_selection(obj, "dice_weighted", max_index) 28 | 29 | elif obj.training_mode == "3_Class_Breast_N": 30 | obj.model = load_model(model_path, custom_objects={"dice_weighted": dice_weighted, 31 | "loss_dice_weighted": loss_dice_weighted, "sensitivity": sensitivity, 32 | "sensitivity_weighted": sensitivity_weighted, "dice":dice, 33 | "dice_weighted_traditional": dice_weighted_traditional}) 34 | obj = weight_selection(obj, "dice_weighted", max_index) 35 | 36 | elif obj.training_mode == "3_Class_Breast_Traditional": 37 | obj.model = load_model(model_path, custom_objects={"dice_weighted_traditional": dice_weighted_traditional, 38 | "loss_dice_weighted_traditional": loss_dice_weighted_traditional, "sensitivity": sensitivity, 39 | "sensitivity_weighted": sensitivity_weighted, "dice":dice, "dice_weighted": dice_weighted}) 40 | obj = weight_selection(obj, "dice_weighted_traditional", max_index) 41 | 42 | elif obj.training_mode == "N_Class_General": 43 | obj.model = load_model(model_path, custom_objects={"loss_dice": loss_dice, "sensitivity": sensitivity, 44 | "dice":dice}) 45 | obj = weight_selection(obj, "dice", max_index) 46 | 47 | elif obj.training_mode == "N_Class_Generalize_2017": 48 | obj.model = load_model(model_path, custom_objects={"generalised_dice": generalised_dice, 49 | "generalised_dice_loss": generalised_dice_loss, "sensitivity": sensitivity, 50 | "dice":dice}) 51 | obj = weight_selection(obj, "generalised_dice", max_index) 52 | 53 | elif obj.training_mode == "Categorical_loss": 54 | obj.model = load_model(model_path, custom_objects={"categorical_accuracy": categorical_accuracy, 55 | "categorical_crossentropy": categorical_crossentropy, "sensitivity": sensitivity, 56 | "dice":dice}) 57 | obj = weight_selection(obj, "categorical_accuracy", max_index) 58 | 59 | elif obj.training_mode == "jaccard_loss": 60 | obj.model = load_model(model_path, custom_objects={"jaccard_loss": jaccard_loss, 61 | "Dice": Dice, "IOU": IOU, "sensitivity":sensitivity}) 62 | obj = weight_selection(obj, "IOU", max_index) 63 | 64 | elif obj.training_mode == "dice_loss": 65 | obj.model = load_model(model_path, custom_objects={"dice_loss": dice_loss, 66 | "Dice": Dice, "IOU": IOU, "sensitivity":sensitivity}) 67 | obj = weight_selection(obj, "Dice", max_index) 68 | 69 | elif obj.training_mode == "wasserstein_3_Class_Breast": 70 | obj.model = load_model(model_path, custom_objects={"generalised_wasserstein_dice": generalised_wasserstein_dice, 71 | "generalised_wasserstein_dice_loss": generalised_wasserstein_dice_loss, "sensitivity": sensitivity, 72 | "dice_weighted": dice_weighted, "dice":dice, 73 | "sensitivity_weighted": sensitivity_weighted}) 74 | obj = weight_selection(obj, "generalised_wasserstein_dice", max_index) 75 | 76 | elif obj.training_mode == "wasserstein": 77 | obj.model = load_model(model_path, custom_objects={"generalised_wasserstein_dice": generalised_wasserstein_dice, 78 | "generalised_wasserstein_dice_loss2": generalised_wasserstein_dice_loss2, "sensitivity": sensitivity, 79 | "dice":dice, "sensitivity_weighted": sensitivity_weighted}) 80 | obj = weight_selection(obj, "generalised_wasserstein_dice", max_index) 81 | 82 | return(obj) 83 | 84 | 85 | 86 | 87 | def get_network_classification(obj, model_path, Keys_input, Keys_output, max_index=-1): 88 | obj = get_info_from_network(obj, model_path, Keys_input, Keys_output, default_values=None) 89 | 90 | obj.model = load_model(model_path, custom_objects={"categorical_crossentropy": categorical_crossentropy}) 91 | obj = weight_selection(obj, "acc", max_index) 92 | 93 | return(obj) 94 | -------------------------------------------------------------------------------- /metrics.py: -------------------------------------------------------------------------------- 1 | import pdb 2 | import numpy as np 3 | import tensorflow as tf 4 | from keras import backend as K 5 | 6 | # this package is for metrics and related loss fucntions 7 | 8 | # all these functions are for two classes even if there is a third class 9 | # it will be ignored as it is background we do not care about it 10 | 11 | 12 | class Class_weighting: 13 | def __init__(self, y_true, y_pred): 14 | self.y_true = y_true 15 | self.y_pred = y_pred 16 | self.dimension = K.int_shape(y_pred) 17 | 18 | def general_weighting(self): 19 | for i in range(self.dimension[-1]): 20 | desired_class = K.sum(self.y_true[:,:,:,i], axis=(1,2)) 21 | all = K.sum(self.y_true, axis=(1,2,3)) 22 | weight_factor = (all-desired_class+ K.epsilon())/(all+ K.epsilon()) 23 | 24 | setattr(self, "weight"+str(i), weight_factor) 25 | setattr(self, "y_t"+str(i), self.y_true[:,:,:,i]) 26 | setattr(self, "y_p"+str(i), self.y_pred[:,:,:,i]) 27 | 28 | 29 | 30 | def general_dice_weighted(y_true, y_pred): 31 | Weights = Class_weighting(y_true, y_pred) 32 | Weights.general_weighting() 33 | 34 | for i in range(Weights.dimension[-1]): 35 | w = getattr(Weights, "weight"+str(i)) 36 | y1 = getattr(Weights, "y_t"+str(i)) 37 | y2 = getattr(Weights, "y_p"+str(i)) 38 | 39 | Sum = K.sum(y1 * y2, axis=(1,2)) 40 | Sum_true = K.sum(y1, axis=(1,2)) 41 | Sum_pred = K.sum(y2, axis=(1,2)) 42 | 43 | if i == 0: 44 | Nominator = w*K.sum(Sum) 45 | Denominator = w*K.sum(Sum_true) + w*K.sum(Sum_pred) 46 | 47 | else: 48 | Nominator += w*K.sum(Sum) 49 | Denominator += w*K.sum(Sum_true) 50 | 51 | Nominator += K.epsilon() 52 | Denominator += K.epsilon() 53 | 54 | return tf.keras.backend.mean((Nominator/Denominator)) 55 | 56 | def general_loss_dice_weighted(y_true, y_pred): 57 | return ( 1-general_dice_weighted(y_true, y_pred) ) 58 | 59 | 60 | 61 | def dice(y_true, y_pred, smooth=K.epsilon()): 62 | y_true = y_true[..., 1:] 63 | y_pred = y_pred[..., 1:] 64 | intersection = K.sum(y_true * y_pred) 65 | # return (2. * intersection + smooth) / (K.sum(K.square(y_t),-1) + K.sum(K.square(y_p),-1) + smooth) 66 | return (2. * intersection + smooth) / (K.sum(y_true) + K.sum(y_pred) + smooth) 67 | 68 | def loss_dice(y_true, y_pred): 69 | return 1-dice(y_true, y_pred) 70 | 71 | 72 | 73 | def categorical_crossentropy(y_true, y_pred): 74 | return K.categorical_crossentropy(y_true, y_pred) 75 | 76 | 77 | 78 | def weighting_no_background(y_true, y_pred): 79 | Coef = K.int_shape(y_pred)[1]/5 80 | Coef = K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2]/Coef 81 | Coef = 1/Coef 82 | 83 | Weights = [] 84 | Y1 = [] 85 | Y2 = [] 86 | if K.int_shape(y_pred)[-1]==2: 87 | y1 = y_true[:,:,:,-1] 88 | y2 = y_pred[:,:,:,-1] 89 | y_true_class1_w = K.sum(y1, axis=(1,2)) 90 | weight = (y_true_class1_w + 1)/(y_true_class1_w + 1) 91 | 92 | Y1=y1 93 | Y2=y2 94 | Weights=weight 95 | 96 | else: 97 | for Class in range(K.int_shape(y_pred)[-1]-1): 98 | Class += 1 99 | y1 = y_true[:,:,:,Class] 100 | y2 = y_pred[:,:,:,Class] 101 | 102 | y_true_class1_w = K.sum(y1, axis=(1,2)) 103 | y_true_others = K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2] - y_true_class1_w 104 | 105 | weights = (y_true_others)/(K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2]) 106 | 107 | weights = K.cast(weights,'float32') 108 | Condition_inverse = K.greater(weights, Coef) 109 | weights = K.cast(Condition_inverse,'float32')*weights 110 | Condition = K.equal(weights, 0) 111 | weight = weights+K.cast(Condition,'float32') 112 | 113 | Y1.append(y1) 114 | Y2.append(y2) 115 | Weights.append(weight) 116 | return (Y1, Y2, Weights) 117 | 118 | def dice_weighted(y_true, y_pred): 119 | Y1, Y2, Weights = weighting_no_background(y_true, y_pred) 120 | 121 | Sum_weights = 0 122 | if K.int_shape(y_pred)[-1]==2: 123 | Sum = K.sum(Y1 * Y2, axis=(1,2)) 124 | Sum_true = K.sum(Y1, axis=(1,2)) 125 | Sum_pred = K.sum(Y2, axis=(1,2)) 126 | 127 | Nominator = 2*( K.sum(Sum) ) 128 | Denominator = ( K.sum(Sum_true)+K.sum(Sum_pred)+K.epsilon() ) 129 | DICE = tf.keras.backend.mean((Nominator/Denominator)) 130 | Sum_weights = 1 131 | else: 132 | for N, (y1, y2, weight) in enumerate(zip(Y1, Y2, Weights)): 133 | Sum = K.sum(y1 * y2, axis=(1,2)) 134 | Sum_true = K.sum(y1, axis=(1,2)) 135 | Sum_pred = K.sum(y2, axis=(1,2)) 136 | Sum_weights += weight 137 | 138 | Nominator = weight*2*( K.sum(Sum) ) 139 | Denominator = ( K.sum(Sum_true) + K.sum(Sum_pred)+K.epsilon() ) 140 | 141 | if N==0: 142 | DICE = tf.keras.backend.mean((Nominator/Denominator)) 143 | else: 144 | DICE += tf.keras.backend.mean((Nominator/Denominator)) 145 | return DICE/Sum_weights 146 | 147 | def loss_dice_weighted(y_true, y_pred): 148 | return 1-dice_weighted(y_true, y_pred) 149 | 150 | 151 | 152 | def weighting_traditional(y_true, y_pred): 153 | Coef = K.int_shape(y_pred)[1]/5 154 | Coef = K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2]/Coef 155 | Coef = 1/Coef 156 | 157 | Weights = [] 158 | Y1 = [] 159 | Y2 = [] 160 | for Class in range(K.int_shape(y_pred)[-1]): 161 | y1 = y_true[:,:,:,Class] 162 | y2 = y_pred[:,:,:,Class] 163 | 164 | y_true_class1_w = K.sum(y1, axis=(1,2)) 165 | y_true_others = K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2] - y_true_class1_w 166 | 167 | weights = ( y_true_others )/( K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2] ) 168 | 169 | weight = weights 170 | # weights = K.cast(weights,'float32') 171 | # Condition_inverse = K.greater(weights, Coef) 172 | # weights = K.cast(Condition_inverse,'float32')*weights 173 | # Condition = K.equal(weights, 0) 174 | # weight = weights+K.cast(Condition,'float32') 175 | 176 | Y1.append(y1) 177 | Y2.append(y2) 178 | Weights.append(weight) 179 | return (Y1, Y2, Weights) 180 | 181 | def dice_weighted_traditional(y_true, y_pred): 182 | Y1, Y2, Weights = weighting_traditional(y_true, y_pred) 183 | 184 | Sum_weights = 0 185 | for N, (y1, y2, weight) in enumerate(zip(Y1, Y2, Weights)): 186 | Sum = K.sum(y1 * y2, axis=(1,2)) 187 | Sum_true = K.sum(y1, axis=(1,2)) 188 | Sum_pred = K.sum(y2, axis=(1,2)) 189 | Sum_weights += weight 190 | 191 | Nominator = weight*2*( K.sum(Sum) ) 192 | Denominator = ( K.sum(Sum_true) + K.sum(Sum_pred)+K.epsilon() ) 193 | result = tf.keras.backend.mean((Nominator/Denominator)) 194 | 195 | if N==0: 196 | DICE = result 197 | else: 198 | DICE += result 199 | return DICE/Sum_weights 200 | 201 | def loss_dice_weighted_traditional(y_true, y_pred): 202 | return 1-dice_weighted_traditional(y_true, y_pred) 203 | 204 | 205 | 206 | def labels_to_one_hot(ground_truth, num_classes=1): 207 | """ 208 | Converts ground truth labels to one-hot, sparse tensors. 209 | Used extensively in segmentation losses. 210 | 211 | :param ground_truth: ground truth categorical labels (rank `N`) 212 | :param num_classes: A scalar defining the depth of the one hot dimension 213 | (see `depth` of `tf.one_hot`) 214 | :return: one-hot sparse tf tensor 215 | (rank `N+1`; new axis appended at the end) 216 | """ 217 | # read input/output shapes 218 | if isinstance(num_classes, tf.Tensor): 219 | num_classes_tf = tf.to_int32(num_classes) 220 | else: 221 | num_classes_tf = tf.constant(num_classes, tf.int32) 222 | input_shape = tf.shape(ground_truth) 223 | output_shape = tf.concat( 224 | [input_shape, tf.reshape(num_classes_tf, (1,))], 0) 225 | 226 | if num_classes == 1: 227 | # need a sparse representation? 228 | return tf.reshape(ground_truth, output_shape) 229 | 230 | # squeeze the spatial shape 231 | ground_truth = tf.reshape(ground_truth, (-1,)) 232 | # shape of squeezed output 233 | dense_shape = tf.stack([tf.shape(ground_truth)[0], num_classes_tf], 0) 234 | 235 | # create a rank-2 sparse tensor 236 | ground_truth = tf.to_int64(ground_truth) 237 | ids = tf.range(tf.to_int64(dense_shape[0]), dtype=tf.int64) 238 | ids = tf.stack([ids, ground_truth], axis=1) 239 | one_hot = tf.SparseTensor( 240 | indices=ids, 241 | values=tf.ones_like(ground_truth, dtype=tf.float32), 242 | dense_shape=tf.to_int64(dense_shape)) 243 | 244 | # resume the spatial dims 245 | one_hot = tf.sparse_reshape(one_hot, output_shape) 246 | return one_hot 247 | 248 | 249 | 250 | def generalised_dice(y_true, y_pred): 251 | ground_truth = y_true 252 | prediction = y_pred 253 | 254 | prediction = tf.cast(prediction, tf.float32) 255 | if len(ground_truth.shape) == len(prediction.shape): 256 | ground_truth = ground_truth[..., -1] 257 | one_hot = labels_to_one_hot(ground_truth, tf.shape(prediction)[-1]) 258 | 259 | 260 | ref_vol = tf.sparse_reduce_sum(one_hot, reduction_axes=[0]) 261 | intersect = tf.sparse_reduce_sum(one_hot * prediction, 262 | reduction_axes=[0]) 263 | seg_vol = tf.reduce_sum(prediction, 0) 264 | 265 | 266 | weights = tf.reciprocal(tf.square(ref_vol)) 267 | 268 | new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights) 269 | weights = tf.where(tf.is_inf(weights), tf.ones_like(weights) * 270 | tf.reduce_max(new_weights), weights) 271 | generalised_dice_numerator = \ 272 | 2 * tf.reduce_sum(tf.multiply(weights, intersect)) 273 | # generalised_dice_denominator = \ 274 | # tf.reduce_sum(tf.multiply(weights, seg_vol + ref_vol)) + 1e-6 275 | generalised_dice_denominator = tf.reduce_sum( 276 | tf.multiply(weights, tf.maximum(seg_vol + ref_vol, 1))) 277 | generalised_dice_score = \ 278 | generalised_dice_numerator / generalised_dice_denominator 279 | generalised_dice_score = tf.where(tf.is_nan(generalised_dice_score), 1.0, 280 | generalised_dice_score) 281 | return generalised_dice_score 282 | 283 | def generalised_dice_loss(y_true, y_pred): 284 | return 1-generalised_dice(y_true, y_pred) 285 | 286 | 287 | 288 | def wasserstein_disagreement_map(prediction, ground_truth, M): 289 | n_classes = K.int_shape(prediction)[-1] 290 | ground_truth = tf.cast(ground_truth, dtype=tf.float64) 291 | prediction = tf.cast(prediction, dtype=tf.float64) 292 | pairwise_correlations = [] 293 | for i in range(n_classes): 294 | for j in range(n_classes): 295 | pairwise_correlations.append( 296 | M[i, j] * tf.multiply(prediction[:,i], ground_truth[:,j])) 297 | wass_dis_map = tf.add_n(pairwise_correlations) 298 | return wass_dis_map 299 | 300 | def generalised_wasserstein_dice(y_true, y_pred): 301 | M_tree_4 = np.array([[0., 1., 1., 1.,], 302 | [1., 0., 0.6, 0.5], 303 | [1., 0.6, 0., 0.7], 304 | [1., 0.5, 0.7, 0.]], dtype=np.float64) 305 | n_classes = K.int_shape(y_pred)[-1] 306 | 307 | ground_truth = tf.cast(tf.reshape(y_true,(-1,n_classes)), dtype=tf.int64) 308 | pred_proba = tf.cast(tf.reshape(y_pred,(-1,n_classes)), dtype=tf.float64) 309 | 310 | M = M_tree_4 311 | delta = wasserstein_disagreement_map(pred_proba, ground_truth, M) 312 | all_error = tf.reduce_sum(delta) 313 | one_hot = tf.cast(ground_truth, dtype=tf.float64) 314 | true_pos = tf.reduce_sum( 315 | tf.multiply(tf.constant(M[0, :n_classes], dtype=tf.float64), one_hot), 316 | axis=1) 317 | true_pos = tf.reduce_sum(tf.multiply(true_pos, 1. - delta), axis=0) 318 | WGDL = (2. * true_pos) / (2. * true_pos + all_error) 319 | return tf.cast(WGDL, dtype=tf.float32) 320 | 321 | def generalised_wasserstein_dice_loss(y_true, y_pred): 322 | return 1- (generalised_wasserstein_dice(y_true, y_pred)+dice_weighted(y_true, y_pred))/2 323 | 324 | def generalised_wasserstein_dice_loss2(y_true, y_pred): 325 | return 1- (generalised_wasserstein_dice(y_true, y_pred)) 326 | 327 | 328 | 329 | def get_iou( gt , pr , n_classes ): 330 | EPS = K.epsilon() 331 | class_wise = np.zeros(n_classes) 332 | for cl in range(n_classes): 333 | intersection = np.sum(( gt == cl )*( pr == cl )) 334 | union = np.sum(np.maximum( ( gt == cl ) , ( pr == cl ) )) 335 | iou = float(intersection)/( union + EPS ) 336 | class_wise[ cl ] = iou 337 | return class_wise 338 | 339 | 340 | 341 | def sensitivity(y_true, y_pred): 342 | y_true = y_true[..., 1:] 343 | y_pred = y_pred[..., 1:] 344 | 345 | true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) 346 | possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) 347 | return true_positives / (possible_positives + K.epsilon()) 348 | 349 | 350 | 351 | def specificity(y_true, y_pred): 352 | true_negatives = K.sum(K.round(K.clip((1-y_true) * (1-y_pred), 0, 1))) 353 | possible_negatives = K.sum(K.round(K.clip(1-y_true, 0, 1))) 354 | return true_negatives / (possible_negatives + K.epsilon()) 355 | 356 | 357 | 358 | def weighting(y_true, y_pred): 359 | if K.int_shape(y_pred)[-1] > 2: 360 | y1 = y_true[:,:,:,1:] 361 | y2 = y_pred[:,:,:,1:] 362 | 363 | y_true_class1_w = K.sum(y1[:,:,:,0], axis=(1,2)) 364 | y_true_class2_w = K.sum(y1[:,:,:,1], axis=(1,2)) 365 | 366 | else: 367 | y1 = y_true[:,:,:,:] 368 | y2 = y_pred[:,:,:,:] 369 | 370 | y_true_class1_w = K.sum(y1, axis=(1,2)) 371 | y_true_class2_w = K.int_shape(y_pred)[1]*K.int_shape(y_pred)[2] - y_true_class1_w 372 | 373 | weight1 = (y_true_class1_w+ 1)/(y_true_class2_w+y_true_class1_w+ 1) 374 | weight2 = (y_true_class2_w+ 1)/(y_true_class2_w+y_true_class1_w+ 1) 375 | return (y1, y2, weight1, weight2) 376 | 377 | def sensitivity_weighted(y_true, y_pred): 378 | y_true, y_pred, weight1, weight2 = weighting(y_true, y_pred) 379 | 380 | true_positives = K.sum(K.round(K.clip(y_true[:,:,:,0] * y_pred[:,:,:,0], 0, 1))) 381 | possible_positives = K.sum(K.round(K.clip(y_true[:,:,:,0], 0, 1))) 382 | sensitivity = weight2 * true_positives / (possible_positives + K.epsilon()) 383 | 384 | true_positives = K.sum(K.round(K.clip(y_true[:,:,:,1] * y_pred[:,:,:,1], 0, 1))) 385 | possible_positives = K.sum(K.round(K.clip(y_true[:,:,:,1], 0, 1))) 386 | sensitivity = sensitivity+weight1 * true_positives / (possible_positives + K.epsilon()) 387 | return K.minimum(sensitivity, K.ones(shape=1)) 388 | 389 | def specificity_weighted(y_true, y_pred): 390 | y_true = y_true[..., 1:] 391 | y_pred = y_pred[..., 1:] 392 | 393 | y_true, y_pred, weight1, weight2 = weighting(y_true, y_pred) 394 | 395 | true_negatives = K.sum(K.round(K.clip( (1-y_true[:,:,:,0]) * (1-y_pred[:,:,:,0]), 0, 1))) 396 | possible_negatives = K.sum(K.round(K.clip(1-y_true[:,:,:,0], 0, 1))) 397 | specifcity = weight2 * true_negatives / (possible_negatives + K.epsilon()) 398 | 399 | true_negatives = K.sum(K.round(K.clip( (1-y_true[:,:,:,1]) * (1-y_pred[:,:,:,1]), 0, 1))) 400 | possible_negatives = K.sum(K.round(K.clip(1-y_true[:,:,:,1], 0, 1))) 401 | specifcity = specifcity + weight1 * true_negatives / (possible_negatives + K.epsilon()) 402 | return K.minimum(specifcity, K.ones(shape=1)) 403 | -------------------------------------------------------------------------------- /needed_functions_CPU.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os, pdb, cv2 3 | import pandas as pd 4 | from subprocess import call 5 | from termcolor import colored 6 | import matplotlib.pyplot as plt 7 | from segmentation_tools import Normalize_Image, find_largest_obj 8 | 9 | 10 | 11 | 12 | def run_loop_multi_cpu(obj, image_path, code_path): 13 | Path, File = os.path.split(obj.Case) 14 | if File[-4:] == ".dcm": obj.File = File[:-4] 15 | 16 | call(["python3", os.path.join(code_path, "preprocessing.py"), "-i", 17 | image_path, "-o", obj.output_path, "-if", obj.image_format, 18 | "-po", obj.print_off, "-sfn", obj.saving_folder_name_net_air, 19 | "-ar", str(obj.A_Range), "-fis", str(obj.final_image_size), 20 | "-lsm", obj.libra_segmentation_method, "-fpm", obj.find_pacemaker]) 21 | 22 | 23 | 24 | def run_loop_multi_cpu_just_org_image(obj, image_path, code_path): 25 | call(["python3", os.path.join(code_path, "run_loop_multi_cpu_just_org_image.py"), "-i", 26 | image_path, "-o", obj.output_path, "-if", obj.image_format]) 27 | 28 | 29 | 30 | def run_loop_multi_cpu_pec(obj, image_path, code_path): 31 | _, File = os.path.split(obj.Case) 32 | obj.File = File[:File.find(obj.air_seg_prefix)] 33 | 34 | call(["python3", os.path.join(code_path, "preprocessing_pec.py"), 35 | "-i", image_path, "-if", obj.image_format, "-cn", obj.File, 36 | "-po", obj.print_off, "-sfn", obj.saving_folder_name_net_pec, 37 | "-ar", str(obj.A_Range), "-fis", str(obj.final_image_size), 38 | "-o", obj.output_path]) 39 | 40 | 41 | 42 | def run_loop_multi_cpu_post(obj, image_path, code_path): 43 | _, File = os.path.split(obj.Case) 44 | obj.File = File[:File.find(obj.pec_seg_prefix)] 45 | 46 | call(["python3", os.path.join(code_path, "postprocessing.py"), 47 | "-i", image_path, "-if", obj.image_format, "-cn", obj.File, 48 | "-sfn", obj.saving_folder_name_final_masked_normalized_images, 49 | "-ar", str(obj.A_Range), "-fis", str(obj.final_image_size), 50 | "-o", obj.output_path, "-po", obj.print_off, "-fb", obj.find_bottom]) 51 | 52 | 53 | 54 | def run_loop_multi_cpu_denisty_map(obj, image_path, code_path): 55 | _, File = os.path.split(obj.Case) 56 | obj.File = File[:-4] 57 | 58 | call(["python3", os.path.join(obj.code_path, "density_map_feature_based.py"), 59 | "-i", obj.Case, "-if", obj.image_format, "-cn", obj.File, 60 | "-po", obj.print_off, "-sfn", obj.saving_folder_name_breast_density, 61 | "-ar", str(obj.A_Range), "-fis", str(obj.final_image_size), 62 | "-o", obj.output_path, "-lt", str(obj.libra_training), 63 | "-pttm", obj.model_path_density, "-rii", obj.remove_intermediate_images, 64 | "-to", str(obj.timeout_waiting)]) 65 | 66 | 67 | 68 | def get_the_image_reset_info(obj): 69 | Output_file_path_mask = os.path.join(obj.output_path, obj.File, "final_breast_mask", obj.File+"_final_mask"+obj.image_format) 70 | Output_file_path_image = os.path.join(obj.output_path, obj.File, obj.File+"_masked_image"+obj.image_format) 71 | csv_file = os.path.join(obj.output_path, obj.File, "air_breast_mask", "fixing_ratio.csv") 72 | 73 | image_reset_info = pd.read_csv(csv_file, sep=",", index_col=0) 74 | 75 | Image_needed_side_extention = image_reset_info["Image_needed_side_extention"].iloc[0] 76 | Image_Dimension_X = image_reset_info["Image_Dimension_X"].iloc[0] 77 | Image_Dimension_Y = image_reset_info["Image_Dimension_Y"].iloc[0] 78 | 79 | Max = max(Image_Dimension_X, Image_Dimension_Y) 80 | Mask = cv2.resize(obj.mask, (Max, Max), interpolation = cv2.INTER_NEAREST) 81 | 82 | if Image_needed_side_extention == "V": 83 | Mask = Mask[:, :Image_Dimension_Y] 84 | else: 85 | Mask = Mask[:Image_Dimension_X, :] 86 | 87 | Mask = np.uint8( Mask*255/Mask.max() ) 88 | Mask = np.logical_not( find_largest_obj( np.logical_not(Mask>1) ) ) 89 | cv2.imwrite(Output_file_path_mask, np.uint8(Mask*255)) 90 | obj.mask = Mask>0 91 | 92 | 93 | Min_not_masked = obj.image[obj.mask].min() 94 | obj.image[np.logical_not(obj.mask)] = Min_not_masked 95 | obj.image = Normalize_Image(obj.image, obj.A_Range, bits_conversion=obj.bits_conversion) 96 | cv2.imwrite(Output_file_path_image) 97 | 98 | return(obj) 99 | -------------------------------------------------------------------------------- /needed_functions_GPU.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import os, pdb, cv2 3 | import pandas as pd 4 | from copy import deepcopy 5 | from termcolor import colored 6 | from keras.models import Model 7 | import matplotlib.pyplot as plt 8 | 9 | # My packages 10 | from data_rw import testGenerator, get_image_info, saveResults_batch_based 11 | 12 | 13 | 14 | def test_network_air(obj): 15 | obj.train_path = os.path.join(obj.output_path, obj.saving_folder_name_net_air) 16 | obj.saving_path = os.path.join(obj.output_path, obj.saving_folder_name_net_pec_temp) 17 | 18 | print("[INFO] The testing path is " + obj.train_path) 19 | print("[INFO] The saving path is " + obj.saving_path) 20 | print("[INFO] The model path is " + obj.model_path) 21 | 22 | if not(os.path.exists(obj.saving_path)): os.makedirs(obj.saving_path) 23 | 24 | if obj.print_off == "1": 25 | Verbose = 0 26 | else: 27 | Verbose = 1 28 | 29 | 30 | obj = get_image_info(obj) 31 | Test_set = testGenerator(obj) 32 | 33 | 34 | while True: 35 | indexes = next(Test_set.index_generator) 36 | images = Test_set._get_batches_of_transformed_samples(indexes) 37 | images = images/images.max() 38 | 39 | results = obj.model.predict(images, verbose=Verbose) 40 | image_names = [] 41 | for index in indexes: 42 | image_names.append(Test_set.filenames[index]) 43 | 44 | saveResults_batch_based(obj, results, image_names, obj.air_seg_prefix) 45 | 46 | if indexes[-1]==Test_set.n-1: 47 | break 48 | 49 | return(obj) 50 | 51 | 52 | def test_network_pec(obj): 53 | obj.train_path = os.path.join(obj.output_path, obj.saving_folder_name_net_pec) 54 | obj.saving_path = os.path.join(obj.output_path, obj.saving_folder_name_temp_breast_masks) 55 | 56 | print("[INFO] The testing path is " + obj.train_path) 57 | print("[INFO] The saving path is " + obj.saving_path) 58 | print("[INFO] The model path is " + obj.model_path) 59 | 60 | if not(os.path.exists(obj.saving_path)): os.makedirs(obj.saving_path) 61 | 62 | if obj.print_off == "1": 63 | Verbose = 0 64 | else: 65 | Verbose = 1 66 | 67 | 68 | obj = get_image_info(obj) 69 | Test_set = testGenerator(obj) 70 | 71 | while True: 72 | indexes = next(Test_set.index_generator) 73 | images = Test_set._get_batches_of_transformed_samples(indexes) 74 | images = images/images.max() 75 | 76 | results = obj.model.predict(images, verbose=Verbose) 77 | image_names = [] 78 | for index in indexes: 79 | image_names.append(Test_set.filenames[index]) 80 | 81 | saveResults_batch_based(obj, results, image_names, obj.pec_seg_prefix) 82 | 83 | if indexes[-1]==Test_set.n-1: 84 | break 85 | 86 | return(obj) 87 | 88 | 89 | 90 | def test_birads(obj): 91 | obj.train_path = os.path.join(obj.output_path, obj.saving_folder_name_final_masked_normalized_images) 92 | obj.saving_path = os.path.join(obj.output_path, obj.saving_folder_name_breast_density) 93 | 94 | 95 | if obj.print_off == "0": 96 | print(colored("[INFO]", "cyan") + " BIRADS assessment by network is started; please wait ...") 97 | print("[INFO] The testing path is " + obj.train_path) 98 | print("[INFO] The saving path is " + obj.saving_path) 99 | print("[INFO] The model path is " + obj.model_path) 100 | 101 | 102 | if not(os.path.exists(obj.saving_path)): os.makedirs(obj.saving_path) 103 | 104 | if obj.print_off == "1": 105 | Verbose = 0 106 | else: 107 | Verbose = 1 108 | 109 | 110 | obj = get_image_info(obj) 111 | Test_set = testGenerator(obj) 112 | 113 | # feature extractor model 114 | obj.Density_map_model = Model(obj.model.input, obj.model.layers[-6].output) 115 | 116 | 117 | Loop_counter = 0 118 | BIRADS_list = ["1", "2", "3", "4"] 119 | while True: 120 | indexes = next(Test_set.index_generator) 121 | images = Test_set._get_batches_of_transformed_samples(indexes) 122 | images = images/images.max() 123 | 124 | results = obj.model.predict(images, verbose=Verbose) 125 | features = obj.Density_map_model.predict(images, verbose=Verbose) 126 | features = features.reshape([len(indexes),-1]) 127 | 128 | if len(indexes)==1: 129 | Image_BIRADS = pd.DataFrame(data=[results[0]], 130 | index=[Test_set.filenames[int(indexes.item())]], 131 | columns=BIRADS_list) 132 | else: 133 | Image_BIRADS = pd.DataFrame(data=results, 134 | index=Test_set.filenames[0:len(indexes)], 135 | columns=BIRADS_list) 136 | if Loop_counter == 0: 137 | BIRADS = deepcopy(Image_BIRADS) 138 | else: 139 | temp = [BIRADS, Image_BIRADS] 140 | BIRADS = pd.concat(temp) 141 | 142 | 143 | if len(indexes)==1: 144 | Image_features = pd.DataFrame(data=[features[0]], 145 | index=[Test_set.filenames[int(indexes.item())]]) 146 | else: 147 | Image_features = pd.DataFrame(data=features, 148 | index=Test_set.filenames[0:len(indexes)]) 149 | if Loop_counter == 0: 150 | Features = deepcopy(Image_features) 151 | else: 152 | temp = [Features, Image_features] 153 | Features = pd.concat(temp) 154 | 155 | 156 | Loop_counter += 1 157 | if indexes[-1]==Test_set.n-1: 158 | break 159 | 160 | 161 | BIRADS.to_csv(os.path.join(obj.saving_path, "Predicted_BIRADS_All_Images.csv")) 162 | Features.to_hdf(os.path.join(obj.saving_path, 163 | "Extracted_Features_All_images.h5"), key="Features", mode="w") 164 | return(obj) 165 | -------------------------------------------------------------------------------- /postprocessing.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | # From python packages 5 | import numpy as np 6 | from time import time 7 | from termcolor import colored 8 | import cv2, os, argparse, pdb, logging 9 | 10 | # From my packages 11 | from breast_needed_functions import Normalize_Image, detect_buttom_portion 12 | from breast_needed_functions import find_logical_pec_objs, find_largest_obj, fix_ratio 13 | from breast_needed_functions import bring_back_images_to_orginal_size, bring_back_images_to_orginal_orientation 14 | 15 | import matplotlib.pyplot as plt 16 | 17 | ################################## This script is for training the svm 18 | ap = argparse.ArgumentParser() 19 | ap.add_argument("-o", "--output_path", required=False, default='./output', 20 | help="path for saving results file") 21 | 22 | ap.add_argument("-i", "--input", required=False, default='Full_path_to_image_name', 23 | help="path for input files") 24 | 25 | ap.add_argument("-if", "--image_format", required=False, default='.png', 26 | help="The image format for saving") 27 | 28 | ap.add_argument("-po", "--print_off", type=int, default=0, 29 | help="If this is one, it turns off printing") 30 | 31 | ap.add_argument("-ar", "--A_Range", type=int, default=2**8-1, 32 | help="The number of bits for saving image") 33 | 34 | ap.add_argument("-fis", "--final_image_size", type=int, default=512, 35 | help="The final size of image") 36 | 37 | ap.add_argument("-sfn", "--saving_folder_name", default="final_images/image", 38 | help="The name of folder that the resutls to be saved for batch processing") 39 | 40 | ap.add_argument("-cn", "--case_name", default="Case_ID", 41 | help="This name defines the saving path") 42 | 43 | ap.add_argument("-fb", "--find_bottom", default="1", 44 | help="if this is one, it tries to remove the bottom.") 45 | 46 | ap.add_argument("-rii", "--remove_intermediate_images", 47 | default="K", help="R is removing and K is keeping them") 48 | 49 | args = vars(ap.parse_args()) 50 | 51 | 52 | 53 | class Segmentor(object): # The main class 54 | def __init__(self): 55 | ######################################################################## Initial 56 | ######################################################################## Values 57 | self.Case_path = args["input"] 58 | self.image_format = args["image_format"] 59 | self.saving_folder_name = args["saving_folder_name"] 60 | self.case_name = args["case_name"] 61 | self.output_path = args["output_path"] 62 | 63 | self.A_Range = args["A_Range"] 64 | self.final_image_size = args["final_image_size"] 65 | self.print_off = int(args["print_off"]) 66 | self.find_bottom = args["find_bottom"] 67 | 68 | if self.A_Range==2**16-1: 69 | self.bits_conversion = "uint16" 70 | elif self.A_Range==2**32-1: 71 | self.bits_conversion = "uint32" 72 | else: 73 | self.bits_conversion = "uint8" 74 | 75 | 76 | 77 | def Main_Loop_Function(self): 78 | ######################################################################## Couple of 79 | ######################################################################## initializations 80 | T_Start = time() 81 | 82 | log_path = os.path.join(self.output_path, self.case_name, "LIBRA_"+self.case_name+".log") 83 | logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', filename=log_path, level=logging.INFO) 84 | logging.info('Segmentation of pectoral from breast is done.') 85 | logging.info('Masking final masks and normalized image.') 86 | 87 | 88 | #################################################################### Loading Image 89 | #################################################################### & files 90 | try: 91 | org_image_path = os.path.join(self.output_path, self.case_name, 92 | "air_breast_mask", self.case_name+"_16bits_Orginal"+self.image_format) 93 | self.org_image = cv2.imread(org_image_path, -1) 94 | self.org_image = fix_ratio(self.org_image, 95 | self.final_image_size, self.final_image_size) 96 | 97 | except: 98 | org_image_path = os.path.join(self.output_path, self.case_name, 99 | "air_breast_mask", self.case_name+"_Normalized"+self.image_format) 100 | self.org_image = cv2.imread(org_image_path, -1) 101 | 102 | air_mask_path = os.path.join(self.output_path, self.case_name, 103 | "air_breast_mask", self.case_name+"_air_breast_mask"+self.image_format) 104 | self.mask = cv2.imread(air_mask_path, -1) 105 | 106 | self.pec_mask = cv2.imread(self.Case_path, -1) 107 | if len(self.pec_mask.shape)>2: 108 | self.pec_mask = self.pec_mask[...,-1] 109 | 110 | self.pec_mask = find_logical_pec_objs(self.pec_mask>0) 111 | self.mask[self.pec_mask>0] = 0 112 | self.mask = self.mask>0 113 | 114 | self.mask[:5, :] = False 115 | self.mask[-1, :] = False 116 | self.mask[:, 0] = False 117 | self.mask = find_largest_obj(self.mask) 118 | 119 | # im_floodfill = self.mask.copy() 120 | # im_floodfill[:3,:] = False # to make it safe 121 | # im_floodfill[-3:,:] = False 122 | # im_floodfill[:,:3] = False 123 | # im_floodfill[:,-3:] = False 124 | # loc = np.where(im_floodfill) 125 | # h, w = im_floodfill.shape[:2] 126 | # mask = np.zeros((h+2, w+2), np.uint8) 127 | # im_floodfill = cv2.floodFill((im_floodfill*255).astype("uint8"), 128 | # mask, (loc[0][0], loc[1][0]), 255)[1] 129 | # im_floodfill = cv2.bitwise_not(im_floodfill) 130 | # im_floodfill = im_floodfill>0 131 | # if np.array_equal(im_floodfill, im_floodfill.astype(bool)) and im_floodfill.any(): 132 | # self.mask = self.mask | np.logical_not(im_floodfill) 133 | 134 | if self.find_bottom == "1": 135 | try: 136 | self.mask = detect_buttom_portion(self, self.mask) 137 | except: 138 | self.mask = self.mask 139 | self.mask = find_largest_obj(self.mask) 140 | 141 | 142 | 143 | self.org_image[np.logical_not(self.mask)] = 0 144 | Min = self.org_image[self.mask].min() 145 | self.org_image[np.logical_not(self.mask)] = Min 146 | 147 | 148 | # replace small and too bright spots 149 | top_one = np.percentile(self.org_image, 99.9) 150 | if (self.org_image>top_one).any(): 151 | self.org_image[self.org_image>top_one] = int(top_one) 152 | 153 | 154 | self.org_image = Normalize_Image(self.org_image, self.A_Range-1, 155 | bits_conversion=self.bits_conversion, Name=self.case_name)+1 156 | self.org_image[np.logical_not(self.mask)] = 0 157 | self.image_main = self.image_he = self.image = self.org_image 158 | 159 | 160 | self.image = np.concatenate((self.image.reshape([self.final_image_size, self.final_image_size, 1]), 161 | self.image_he.reshape([self.final_image_size, 162 | self.final_image_size,1])), axis=2) 163 | self.image = np.concatenate((self.image, 164 | self.image_main.reshape([self.final_image_size, 165 | self.final_image_size,1])), axis=2) 166 | self.image = self.image.astype(self.bits_conversion) 167 | 168 | 169 | Image_Path = os.path.join(self.output_path, self.saving_folder_name) 170 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 171 | Save_name_img = os.path.join(Image_Path, self.case_name+self.image_format) 172 | cv2.imwrite(Save_name_img, self.image) 173 | 174 | 175 | Image_Path = os.path.join(self.output_path, self.case_name) 176 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 177 | Save_name_img = os.path.join(Image_Path, self.case_name+"_final_breast_notmalized_image"+self.image_format) 178 | 179 | Path_to_csv_size = os.path.join(self.output_path, self.case_name, "air_breast_mask", "fixing_ratio.csv") 180 | self.image = bring_back_images_to_orginal_size(Path_to_csv_size, self.image, type="image") 181 | Path_to_csv_ori = os.path.join(self.output_path, self.case_name, "Headers.csv") 182 | self.image = bring_back_images_to_orginal_orientation(Path_to_csv_ori, self.image) 183 | cv2.imwrite(Save_name_img, self.image) 184 | 185 | 186 | Image_Path = os.path.join(Image_Path, "breast_mask") 187 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 188 | Save_name_mask = os.path.join(Image_Path, self.case_name+"_final_breast_mask"+self.image_format) 189 | cv2.imwrite(Save_name_mask, (self.mask*255).astype("uint8")) 190 | 191 | 192 | logging.info("The path of saved image is: "+Save_name_img) 193 | if self.print_off==0: print("[INFO] The path of saved image is: "+Save_name_img) 194 | 195 | 196 | T_End = time() 197 | if self.print_off==0: print("[INFO] Elapsed Time (for this file): "+'\033[1m'+ \ 198 | colored(str(round(T_End-T_Start, 2)), 'blue')+'\033[0m'+" seconds") 199 | 200 | logging.info("The process for this case is done.") 201 | if self.print_off==0: print(colored("[INFO]", 'green')+" The process for this case is done.") 202 | 203 | 204 | 205 | ############################################################################### 206 | ############################################################################### 207 | ############################################################################### 208 | ############################################################################### 209 | ############################################################################### 210 | ############################################################################### Running the code 211 | if __name__ == "__main__": 212 | Info = Segmentor() 213 | Info.Main_Loop_Function() 214 | -------------------------------------------------------------------------------- /preprocessing.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | # From python packages 5 | import numpy as np 6 | from time import time 7 | from copy import deepcopy 8 | from termcolor import colored 9 | import cv2, os, argparse, pydicom, pdb, logging, shutil 10 | 11 | 12 | # From my packages 13 | from breast_needed_functions import air_Libra, get_headers, air 14 | from breast_needed_functions import fix_ratio, fix_ratio_to_csv 15 | from breast_needed_functions import Normalize_Image, find_largest_obj 16 | from breast_needed_functions import object_oriented_preprocessing, Remove_Top_Below_Side_effect 17 | 18 | ################################## This script is for training the svm 19 | ap = argparse.ArgumentParser() 20 | ap.add_argument("-o", "--output_path", required=False, default='./output', 21 | help="path for saving results file") 22 | 23 | ap.add_argument("-i", "--input_dicom", required=False, default='Full_path_to_dicom_file', 24 | help="path for input files") 25 | 26 | ap.add_argument("-if", "--image_format", required=False, default='.png', 27 | help="The image format for saving") 28 | 29 | ap.add_argument("-po", "--print_off", type=int, default=0, 30 | help="If this is one, it turns off printing") 31 | 32 | ap.add_argument("-ar", "--A_Range", type=int, default=2**8-1, 33 | help="The number of bits for saving image") 34 | 35 | ap.add_argument("-fis", "--final_image_size", type=int, default=512, 36 | help="The final size of image") 37 | 38 | ap.add_argument("-sfn", "--saving_folder_name", default="air_net_data/image", 39 | help="The name of folder that the resutls to be saved for batch processing") 40 | 41 | ap.add_argument("-lsm", "--libra_segmentation_method", default="Libra", 42 | help="The segmentation method can be Libra or Exaturated") 43 | 44 | ap.add_argument("-fpm", "--find_pacemaker", type=int, default=0, 45 | help="If this is one, it will remove the pacemakers by replacing it with minimum") 46 | 47 | args = vars(ap.parse_args()) 48 | 49 | 50 | 51 | class Segmentor(object): # The main class 52 | def __init__(self): 53 | ######################################################################## Initial 54 | ######################################################################## Values 55 | self.Case = args["input_dicom"] 56 | self.output_path = args["output_path"] 57 | self.image_format = args["image_format"] 58 | self.saving_folder_name = args["saving_folder_name"] 59 | 60 | self.A_Range = args["A_Range"] 61 | self.final_image_size = args["final_image_size"] 62 | self.print_off = int(args["print_off"]) 63 | 64 | self.list_dicom_headers = ["PatientID", "PatientAge", "KVP", "Exposure", 65 | "PresentationIntentType", "Modality", "Manufacturer", 66 | "ImagerPixelSpacing", "BodyPartThickness", 67 | "ImageLaterality", "PhotometricInterpretation", 68 | "Rows", "Columns", "ViewPosition", "FieldOfViewHorizontalFlip"] 69 | 70 | if self.A_Range==2**16-1: 71 | self.bits_conversion = "uint16" 72 | elif self.A_Range==2**32-1: 73 | self.bits_conversion = "uint32" 74 | else: 75 | self.bits_conversion = "uint8" 76 | 77 | self.libra_segmentation_method = args["libra_segmentation_method"] 78 | self.find_pacemaker = args["find_pacemaker"] 79 | 80 | 81 | 82 | def Main_Loop_Function(self): 83 | ######################################################################## Couple of 84 | ######################################################################## initializations 85 | T_Start = time() 86 | 87 | 88 | # no output path = return the results in the same path as dataset 89 | if self.output_path == '0': 90 | self.output_path = self.PATH 91 | 92 | if not os.path.exists(self.output_path): 93 | os.makedirs(self.output_path) 94 | 95 | # Log file loading 96 | Path, File = os.path.split(self.Case) 97 | if File[-4:] == ".dcm": File = File[:-4] 98 | log_path = os.path.join(self.output_path, "LIBRA_"+File+".log") 99 | logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', 100 | filename=log_path, level=logging.INFO) 101 | logging.info('The preprocessing is starting.') 102 | 103 | 104 | #################################################################### Loading Image 105 | #################################################################### & files 106 | logging.info("The dicom file path is: " +self.Case) 107 | if self.print_off==0: print(colored("[INFO]", "yellow") + " The dicom file path is: " + 108 | colored(self.Case, "yellow")) 109 | 110 | # Read Dicom file 111 | try: 112 | self.ds = pydicom.dcmread(self.Case) 113 | self.image = (self.ds.pixel_array).astype("float") 114 | except: 115 | ############ FIX THIS 116 | from medpy.io import load 117 | self.image, self.ds = load(self.Case) 118 | 119 | 120 | fix_ratio_to_csv(self.ds.pixel_array, self) 121 | dicom_headers = get_headers(self.ds, self.list_dicom_headers) 122 | 123 | 124 | # Preprocessing step 125 | self, self.image_metal = object_oriented_preprocessing(self) 126 | self = Remove_Top_Below_Side_effect(self) 127 | self.temp_image = deepcopy(self.image) 128 | 129 | 130 | #################################################################### making 131 | #################################################################### the mask and original image 132 | logging.info("Saving image") 133 | if self.print_off==0: print("[INFO] Saving image") 134 | 135 | 136 | self.image_16bits = Normalize_Image(self.image, 2**16-1, 137 | bits_conversion="uint16", flag_min_edition=True, Min=self.image.min()) 138 | 139 | if self.find_pacemaker==1: 140 | self.image = self.image_metal 141 | self.image = fix_ratio(self.image, 142 | self.final_image_size, self.final_image_size) 143 | self.image = Normalize_Image(self.image, self.A_Range, 144 | bits_conversion=self.bits_conversion, flag_min_edition=True, Min=self.temp_image.min()) 145 | 146 | 147 | Image_Path = os.path.join(self.output_path, File) 148 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 149 | Save_name_img = os.path.join(Image_Path, "air_breast_mask", 150 | File+"_Normalized"+self.image_format) 151 | cv2.imwrite(Save_name_img, self.image) 152 | 153 | Save_name_img = os.path.join(Image_Path, "air_breast_mask", 154 | File+"_16bits_Orginal"+self.image_format) 155 | cv2.imwrite(Save_name_img, self.image_16bits) 156 | 157 | dicom_headers.to_csv(os.path.join(Image_Path, "Headers.csv")) 158 | 159 | 160 | Image_Path2 = os.path.join(self.output_path, self.saving_folder_name) 161 | if not(os.path.isdir(Image_Path2)): os.makedirs(Image_Path2) 162 | Save_name_img = os.path.join(Image_Path2, File+self.image_format) 163 | cv2.imwrite(Save_name_img, self.image) 164 | 165 | 166 | logging.info("The path of saved image (original normalized image) is: "+Save_name_img) 167 | if self.print_off==0: print("[INFO] The path of saved image is: "+Save_name_img) 168 | 169 | 170 | T_End = time() 171 | if self.print_off==0: print("[INFO] Elapsed Time (for this file): "+'\033[1m'+ \ 172 | colored(str(round(T_End-T_Start, 2)), 'blue')+'\033[0m'+" seconds") 173 | 174 | logging.info("Preprocessing was successfully done this case.") 175 | _, new_log_path = os.path.split(log_path) 176 | new_log_path = os.path.join(Image_Path, new_log_path) 177 | shutil.move(log_path, new_log_path) 178 | if self.print_off==0: print(colored("[INFO]", 'green')+" The breast-air segmentation was successfully processed for this case.") 179 | 180 | logging.info('Segmentation of breast+pectrocal from air (background) starting for ALL CASES using CNN.') 181 | 182 | 183 | ############################################################################### 184 | ############################################################################### 185 | ############################################################################### 186 | ############################################################################### 187 | ############################################################################### 188 | ############################################################################### Running the code 189 | if __name__ == "__main__": 190 | Info = Segmentor() 191 | Info.Main_Loop_Function() 192 | -------------------------------------------------------------------------------- /preprocessing_pec.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | warnings.filterwarnings("ignore") 3 | 4 | # From python packages 5 | import numpy as np 6 | from time import time 7 | from skimage import exposure 8 | from termcolor import colored 9 | import cv2, os, argparse, pdb, logging 10 | 11 | 12 | # From my packages 13 | from breast_needed_functions import find_logical_background_objs 14 | from breast_needed_functions import Normalize_Image, find_largest_obj 15 | 16 | 17 | ################################## This script is for training the svm 18 | ap = argparse.ArgumentParser() 19 | ap.add_argument("-o", "--output_path", required=False, default='./output', 20 | help="path for saving results file") 21 | 22 | ap.add_argument("-i", "--input", required=False, default='Full_path_to_image_name', 23 | help="path for input files") 24 | 25 | ap.add_argument("-if", "--image_format", required=False, default='.png', 26 | help="The image format for saving") 27 | 28 | ap.add_argument("-po", "--print_off", type=int, default=0, 29 | help="If this is one, it turns off printing") 30 | 31 | ap.add_argument("-ar", "--A_Range", type=int, default=2**8-1, 32 | help="The number of bits for saving image") 33 | 34 | ap.add_argument("-fis", "--final_image_size", type=int, default=512, 35 | help="The final size of image") 36 | 37 | ap.add_argument("-sfn", "--saving_folder_name", default="pec_net_data/image", 38 | help="The name of folder that the resutls to be saved for batch processing") 39 | 40 | ap.add_argument("-cn", "--case_name", default="Case_ID", 41 | help="This name defines the saving path") 42 | 43 | args = vars(ap.parse_args()) 44 | 45 | 46 | 47 | class Segmentor(object): # The main class 48 | def __init__(self): 49 | ######################################################################## Initial 50 | ######################################################################## Values 51 | self.Case_path = args["input"] 52 | self.image_format = args["image_format"] 53 | self.saving_folder_name = args["saving_folder_name"] 54 | self.case_name = args["case_name"] 55 | self.output_path = args["output_path"] 56 | 57 | self.A_Range = args["A_Range"] 58 | self.final_image_size = args["final_image_size"] 59 | self.print_off = int(args["print_off"]) 60 | 61 | if self.A_Range==2**16-1: 62 | self.bits_conversion = "uint16" 63 | elif self.A_Range==2**32-1: 64 | self.bits_conversion = "uint32" 65 | else: 66 | self.bits_conversion = "uint8" 67 | 68 | self.threshold_percentile = 0.5 69 | 70 | 71 | 72 | def Main_Loop_Function(self): 73 | ######################################################################## Couple of 74 | ######################################################################## initializations 75 | T_Start = time() 76 | 77 | log_path = os.path.join(self.output_path, self.case_name, "LIBRA_"+self.case_name+".log") 78 | logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', filename=log_path, level=logging.INFO) 79 | logging.info('Segmentation of background from breast+pectrocal is done.') 80 | logging.info('Segmentation of breast from pectrocal starting.') 81 | 82 | 83 | #################################################################### Loading Image 84 | #################################################################### & files 85 | org_image_path = os.path.join(self.output_path, self.case_name, 86 | "air_breast_mask", self.case_name+"_Normalized"+self.image_format) 87 | self.org_image = cv2.imread(org_image_path, -1) 88 | 89 | 90 | self.mask = cv2.imread(self.Case_path, 0) 91 | self.mask = self.mask>0 92 | 93 | 94 | try: 95 | # join masks 96 | self.mask = find_logical_background_objs(self.mask) 97 | self.mask = find_largest_obj(self.mask) 98 | 99 | except: 100 | if self.print_off==0: 101 | print("1 THIS IMAGE HAD ISSUE FOR PEC PREPROCESSING: "+self.case_name) 102 | logging.info("The breast air CNN mask had an issue. Warning.") 103 | 104 | self.mask = find_largest_obj(self.mask) 105 | 106 | 107 | self.org_image[np.logical_not(self.mask)] = 0 108 | try: 109 | Min = self.org_image[self.mask].min() 110 | except: 111 | Min = self.org_image.min() 112 | self.org_image[np.logical_not(self.mask)] = Min 113 | 114 | 115 | non_zero = self.org_image[self.org_image>Min] 116 | if len(non_zero)>0: 117 | self.image = (self.org_image-(np.percentile(non_zero,self.threshold_percentile)-1/self.A_Range))/ ( 118 | non_zero.max()-(np.percentile(non_zero,self.threshold_percentile)-1/self.A_Range)) 119 | else: 120 | self.image = self.org_image/self.org_image.max() 121 | 122 | 123 | self.image[self.image<0] = 0 124 | self.image = self.image * self.A_Range 125 | self.image = self.image.astype(self.bits_conversion) 126 | 127 | 128 | self.image_he = exposure.equalize_hist(self.image, nbins=self.A_Range, 129 | mask=self.mask>0) 130 | self.image_he[self.mask==0]=0 131 | self.image_he = Normalize_Image(self.image_he, self.A_Range-1, 132 | bits_conversion=self.bits_conversion) 133 | self.image_he += 1 134 | self.image_he[self.mask==0]=0 135 | 136 | 137 | self.org_image[self.mask==0] = 0 138 | self.image_main = Normalize_Image(self.org_image, self.A_Range-1, 139 | bits_conversion=self.bits_conversion) 140 | self.image_main += 1 141 | self.image_main[self.mask==0] = 0 142 | 143 | self.image = np.concatenate((self.image.reshape([self.final_image_size, self.final_image_size, 1]), 144 | self.image_he.reshape([self.final_image_size, 145 | self.final_image_size,1])), axis=2) 146 | self.image = np.concatenate((self.image, 147 | self.image_main.reshape([self.final_image_size, 148 | self.final_image_size,1])), axis=2) 149 | self.image = self.image.astype(self.bits_conversion) 150 | 151 | 152 | Image_Path = os.path.join(self.output_path, self.case_name) 153 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 154 | Mask_path = os.path.join(Image_Path, "air_breast_mask") 155 | if not(os.path.isdir(Mask_path)): os.makedirs(Mask_path) 156 | Save_name_mask = os.path.join(Mask_path, self.case_name+"_air_breast_mask"+self.image_format) 157 | cv2.imwrite(Save_name_mask, self.mask.astype(self.bits_conversion)*self.A_Range) 158 | 159 | 160 | Image_Path = os.path.join(Image_Path, "breast_mask") 161 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 162 | Save_name_img = os.path.join(Image_Path, self.case_name+"_pec_breast_preprocessed"+self.image_format) 163 | cv2.imwrite(Save_name_img, self.image) 164 | 165 | 166 | Image_Path = os.path.join(self.output_path, self.saving_folder_name) 167 | if not(os.path.isdir(Image_Path)): os.makedirs(Image_Path) 168 | Save_name_img = os.path.join(Image_Path, self.case_name+self.image_format) 169 | cv2.imwrite(Save_name_img, self.image) 170 | 171 | 172 | logging.info("The path of saved image is: "+Save_name_img) 173 | if self.print_off==0: print("[INFO] The path of saved image is: "+Save_name_img) 174 | 175 | 176 | T_End = time() 177 | if self.print_off==0: print("[INFO] Elapsed Time (for this file): "+'\033[1m'+ \ 178 | colored(str(round(T_End-T_Start, 2)), 'blue')+'\033[0m'+" seconds") 179 | 180 | logging.info("The breast-air segmentation was successfully processed for this case.") 181 | if self.print_off==0: print(colored("[INFO]", 'green')+" The breast-air segmentation was successfully processed for this case.") 182 | 183 | 184 | 185 | ############################################################################### 186 | ############################################################################### 187 | ############################################################################### 188 | ############################################################################### 189 | ############################################################################### 190 | ############################################################################### Running the code 191 | if __name__ == "__main__": 192 | Info = Segmentor() 193 | Info.Main_Loop_Function() 194 | -------------------------------------------------------------------------------- /pyradiomics_features.py: -------------------------------------------------------------------------------- 1 | import pdb, six 2 | import numpy as np 3 | import pandas as pd 4 | import SimpleITK as sitk 5 | from copy import deepcopy 6 | from scipy.stats import skew, kurtosis 7 | from skimage.feature import local_binary_pattern 8 | from radiomics import firstorder, glcm, ngtdm, gldm, glrlm, glszm 9 | 10 | 11 | # turn off logging notices 12 | import logging 13 | logger = logging.getLogger("radiomics") 14 | logger.setLevel(logging.ERROR) 15 | 16 | 17 | 18 | def extract_radiomics_features(obj, image, mask=[]): 19 | if len(mask)==0: 20 | mask = np.zeros(image.shape) 21 | 22 | try: 23 | bits_conversion = obj.bits_conversion 24 | except: 25 | bits_conversion = "uint8" 26 | 27 | 28 | features = [] 29 | features_name = [] 30 | 31 | List_featrues = ["avg", "std", "ske", "kur"] 32 | LBPParams = [{"radius":1, "points":8}, {"radius":3, "points":24}] 33 | for Param in LBPParams: 34 | for F in List_featrues: 35 | features_name.append("LBP_"+ F +"_R" + str(Param["radius"]) 36 | +"_P"+str(Param["points"])) 37 | 38 | # LBP features maps 39 | METHOD = 'uniform' 40 | LBP_filtered_Images = [] 41 | for LBP in LBPParams: 42 | lbp = local_binary_pattern(image, LBP["points"], LBP["radius"], METHOD) 43 | LBP_filtered_Images.append(lbp) 44 | 45 | for LBPI in LBP_filtered_Images: 46 | features.append(LBPI[mask>0].mean()) 47 | features.append(LBPI[mask>0].std()) 48 | features.append(skew(LBPI[mask>0])) 49 | features.append(kurtosis(LBPI[mask>0])) 50 | 51 | 52 | if image.shape[-1]==3: 53 | image = np.reshape(image, (3, image.shape[0], image.shape[1])) 54 | mask = np.reshape(mask, (3, mask.shape[0], image.shape[1])) 55 | else: 56 | image = np.reshape(image, (1, image.shape[0], image.shape[1])) 57 | mask = np.reshape(mask, (1, mask.shape[0], image.shape[1])) 58 | image = image.astype(bits_conversion) 59 | 60 | 61 | 62 | mask = mask.astype(bits_conversion) 63 | 64 | 65 | Image_ITK = sitk.GetImageFromArray(image) 66 | Mask_ITK = sitk.GetImageFromArray(mask) 67 | 68 | 69 | List_features = ["firstorder", "glcm", "ngtdm", "gldm", "glrlm", "glszm"] 70 | List_radiomics_labels = ["RadiomicsFirstOrder", "RadiomicsGLCM", 71 | "RadiomicsNGTDM", "RadiomicsGLDM", "RadiomicsGLRLM", "RadiomicsGLSZM"] 72 | for feature, function_to_load in zip(List_features, List_radiomics_labels): 73 | code = feature+"."+function_to_load+"(Image_ITK, Mask_ITK)" 74 | active_feature=eval(code) 75 | exec('active_feature.enableAllFeatures()') 76 | exec('active_feature.execute()') 77 | # pdb.set_trace() 78 | for (key, val) in six.iteritems(eval('active_feature.featureValues')): 79 | features.append(val.item()) 80 | features_name.append(key) 81 | 82 | obj.FEATUREs = pd.DataFrame(data=[features], columns=features_name) 83 | 84 | return(obj) 85 | 86 | 87 | 88 | ################################################################################ 89 | ################################################################################ 90 | ################################################################################ 91 | ################################################################################ 92 | def set_slic_features(image, mask, segments, Image_ITK, itk_mask, itk_segments, 93 | seg, LBP_filtered_Images, features, features_names, counter): 94 | seg_features = deepcopy(features) 95 | List_features = ["firstorder", "glcm"] 96 | List_radiomics_labels = ["RadiomicsFirstOrder", "RadiomicsGLCM"] 97 | 98 | if counter==0: 99 | List_featrues = ["avg", "std", "ske", "kur"] 100 | LBPParams = [{"radius":1, "points":8}, {"radius":3, "points":24}] 101 | for Param in LBPParams: 102 | for F in List_featrues: 103 | features_names.append("Seg_LBP_"+ F +"_R" + str(Param["radius"]) 104 | +"_P"+str(Param["points"])) 105 | 106 | seg_mask = np.zeros(mask.shape) 107 | seg_mask[segments==seg] = 1 108 | for LBPI in LBP_filtered_Images: 109 | seg_features.append(LBPI[seg_mask>0].mean()) 110 | seg_features.append(LBPI[seg_mask>0].std()) 111 | seg_features.append(skew(LBPI[seg_mask>0])) 112 | seg_features.append(kurtosis(LBPI[seg_mask>0])) 113 | 114 | 115 | seg_mask = np.zeros(itk_mask.shape) 116 | seg_mask[itk_segments==seg]=1 117 | Mask_ITK = sitk.GetImageFromArray(seg_mask) 118 | for feature, function_to_load in zip(List_features, List_radiomics_labels): 119 | code = feature+"."+function_to_load+"(Image_ITK, Mask_ITK)" 120 | active_feature=eval(code) 121 | exec('active_feature.enableAllFeatures()') 122 | exec('active_feature.execute()') 123 | 124 | for (key, val) in six.iteritems(eval('active_feature.featureValues')): 125 | seg_features.append(val.item()) 126 | features_names.append("Seg_"+key) 127 | 128 | features_names.append("Seg_area") 129 | features_names.append("Seg_index") 130 | 131 | 132 | else: 133 | seg_mask = np.zeros(mask.shape) 134 | seg_mask[segments==seg] = 1 135 | for LBPI in LBP_filtered_Images: 136 | seg_features.append(LBPI[seg_mask>0].mean()) 137 | seg_features.append(LBPI[seg_mask>0].std()) 138 | seg_features.append(skew(LBPI[seg_mask>0])) 139 | seg_features.append(kurtosis(LBPI[seg_mask>0])) 140 | 141 | 142 | seg_mask = np.zeros(itk_mask.shape) 143 | seg_mask[itk_segments==seg]=1 144 | Mask_ITK = sitk.GetImageFromArray(seg_mask) 145 | for feature, function_to_load in zip(List_features, List_radiomics_labels): 146 | code = feature+"."+function_to_load+"(Image_ITK, Mask_ITK)" 147 | active_feature=eval(code) 148 | exec('active_feature.enableAllFeatures()') 149 | exec('active_feature.execute()') 150 | 151 | for (key, val) in six.iteritems(eval('active_feature.featureValues')): 152 | seg_features.append(val.item()) 153 | 154 | Seg_area = np.sum(np.logical_and(segments==seg, mask>0)) 155 | seg_features.append(Seg_area) 156 | 157 | seg_features.append(seg) 158 | 159 | return(seg_features, features_names) 160 | 161 | 162 | 163 | 164 | 165 | 166 | ################################################################################ 167 | def extract_breast_radiomics_features(obj, image, mask=[], segments=[], 168 | case_name=np.nan, Minimum_acceptable_number_of_pixels_in_segment=49): 169 | if len(mask)==0: 170 | mask = np.zeros(image.shape) 171 | 172 | try: 173 | bits_conversion = obj.bits_conversion 174 | except: 175 | bits_conversion = "uint8" 176 | 177 | # image = image.astype(bits_conversion) 178 | mask = mask.astype(bits_conversion) 179 | 180 | 181 | features = [] 182 | features_names = [] 183 | 184 | 185 | Breast_area_total = np.sum(mask>0) 186 | features.append(Breast_area_total) 187 | features_names.append("Breast_area") 188 | 189 | 190 | List_featrues = ["avg", "std", "ske", "kur"] 191 | LBPParams = [{"radius":1, "points":8}, {"radius":3, "points":24}] 192 | for Param in LBPParams: 193 | for F in List_featrues: 194 | features_names.append("LBP_"+ F +"_R" + str(Param["radius"]) 195 | +"_P"+str(Param["points"])) 196 | 197 | # LBP features maps 198 | METHOD = 'uniform' 199 | LBP_filtered_Images = [] 200 | for LBP in LBPParams: 201 | lbp = local_binary_pattern(image, LBP["points"], LBP["radius"], METHOD) 202 | LBP_filtered_Images.append(lbp) 203 | 204 | for LBPI in LBP_filtered_Images: 205 | features.append(LBPI[mask>0].mean()) 206 | features.append(LBPI[mask>0].std()) 207 | features.append(skew(LBPI[mask>0])) 208 | features.append(kurtosis(LBPI[mask>0])) 209 | 210 | 211 | if image.shape[-1]==3: 212 | itk_image = np.reshape(image, (3, image.shape[0], image.shape[1])) 213 | itk_mask = np.reshape(mask, (3, mask.shape[0], image.shape[1])) 214 | itk_segments = np.reshape(segments, (3, segments.shape[0], segments.shape[1])) 215 | else: 216 | itk_image = np.reshape(image, (1, image.shape[0], image.shape[1])) 217 | itk_mask = np.reshape(mask, (1, mask.shape[0], image.shape[1])) 218 | itk_segments = np.reshape(segments, (1, segments.shape[0], segments.shape[1])) 219 | 220 | 221 | Image_ITK = sitk.GetImageFromArray(itk_image) 222 | Mask_ITK = sitk.GetImageFromArray(itk_mask) 223 | 224 | List_features = ["firstorder", "glcm", "ngtdm", "gldm", "glrlm", "glszm"] 225 | List_radiomics_labels = ["RadiomicsFirstOrder", "RadiomicsGLCM", 226 | "RadiomicsNGTDM", "RadiomicsGLDM", "RadiomicsGLRLM", "RadiomicsGLSZM"] 227 | for feature, function_to_load in zip(List_features, List_radiomics_labels): 228 | code = feature+"."+function_to_load+"(Image_ITK, Mask_ITK)" 229 | active_feature=eval(code) 230 | exec('active_feature.enableAllFeatures()') 231 | exec('active_feature.execute()') 232 | 233 | for (key, val) in six.iteritems(eval('active_feature.featureValues')): 234 | features.append(val.item()) 235 | features_names.append(key) 236 | 237 | N = 0 238 | for seg in np.unique(itk_segments[itk_mask>0]): 239 | if (np.sum(np.logical_and(segments==seg, mask>0))>Minimum_acceptable_number_of_pixels_in_segment): 240 | condition_to_remove = np.logical_and(segments==seg, np.logical_not(mask>0)) 241 | if condition_to_remove.any(): 242 | segments[condition_to_remove] = -1 243 | segments[np.logical_and(segments==seg, np.logical_not(mask>0))] = -1 244 | seg_features, features_names = set_slic_features(image, mask, segments, 245 | Image_ITK, itk_mask, itk_segments, seg, LBP_filtered_Images, 246 | features, features_names, N) 247 | 248 | if N==0: 249 | obj.FEATUREs = pd.DataFrame(data=[seg_features], columns=features_names) 250 | else: 251 | temp = pd.DataFrame(data=[seg_features], columns=features_names) 252 | obj.FEATUREs = [obj.FEATUREs, temp] 253 | obj.FEATUREs = pd.concat(obj.FEATUREs, ignore_index=True) 254 | 255 | N += 1 256 | 257 | return(obj, segments) 258 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | h5py==2.9.0 2 | Keras==2.2.4 3 | Keras-Applications==1.0.8 4 | Keras-Preprocessing==1.1.0 5 | matplotlib==3.1.0 6 | MedPy==0.4.0 7 | numpy==1.16.4 8 | opencv-python==4.1.0.25 9 | pandas==0.24.2 10 | Pillow==6.0.0 11 | ptyprocess==0.6.0 12 | pydicom==1.2.2 13 | pymediainfo==4.0 14 | PyQt5==5.12.2 15 | PyQt5-sip==4.19.17 16 | pyradiomics==2.2.0 17 | pytz==2019.1 18 | scikit-image==0.15.0 19 | scikit-learn==0.21.2 20 | scipy==1.3.0 21 | SimpleITK==1.2.0 22 | six==1.12.0 23 | tensorboard==1.13.1 24 | tensorflow-estimator==1.13.0 25 | termcolor==1.1.0 26 | truecolor==1.0b2 27 | tensorflow-gpu==1.12.0 28 | tensorflow==1.12.0 29 | -------------------------------------------------------------------------------- /run_GUI.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import sys 4 | from GUI import Ui_Form 5 | from PyQt5 import QtWidgets 6 | 7 | 8 | class ApplicationWindow(QtWidgets.QMainWindow): 9 | def __init__(self): 10 | super(ApplicationWindow, self).__init__() 11 | 12 | self.ui = Ui_Form() 13 | self.ui.setupUi(self) 14 | 15 | 16 | def main(): 17 | app = QtWidgets.QApplication(sys.argv) 18 | application = ApplicationWindow() 19 | application.show() 20 | sys.exit(app.exec_()) 21 | 22 | if __name__ == "__main__": 23 | main() 24 | -------------------------------------------------------------------------------- /seg_losses.py: -------------------------------------------------------------------------------- 1 | import keras.backend as K 2 | from keras.losses import binary_crossentropy 3 | from keras.losses import categorical_crossentropy 4 | from keras.utils.generic_utils import get_custom_objects 5 | 6 | from seg_metrics import iou_score, f_score 7 | 8 | SMOOTH = 1. 9 | 10 | __all__ = [ 11 | 'jaccard_loss', 'bce_jaccard_loss', 'cce_jaccard_loss', 12 | 'dice_loss', 'bce_dice_loss', 'cce_dice_loss', 13 | ] 14 | 15 | 16 | # ============================== Jaccard Losses ============================== 17 | 18 | def jaccard_loss(gt, pr): 19 | r"""Jaccard loss function for imbalanced datasets: 20 | 21 | .. math:: L(A, B) = 1 - \frac{A \cap B}{A \cup B} 22 | 23 | Args: 24 | gt: ground truth 4D keras tensor (B, H, W, C) 25 | pr: prediction 4D keras tensor (B, H, W, C) 26 | class_weights: 1. or list of class weights, len(weights) = C 27 | smooth: value to avoid division by zero 28 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 29 | else over whole batch 30 | 31 | Returns: 32 | Jaccard loss in range [0, 1] 33 | 34 | """ 35 | return 1 - iou_score(gt, pr) 36 | 37 | 38 | def bce_jaccard_loss(gt, pr): 39 | r"""Sum of binary crossentropy and jaccard losses: 40 | 41 | .. math:: L(A, B) = bce_weight * binary_crossentropy(A, B) + jaccard_loss(A, B) 42 | 43 | Args: 44 | gt: ground truth 4D keras tensor (B, H, W, C) 45 | pr: prediction 4D keras tensor (B, H, W, C) 46 | class_weights: 1. or list of class weights for jaccard loss, len(weights) = C 47 | smooth: value to avoid division by zero 48 | per_image: if ``True``, jaccard loss is calculated as mean over images in batch (B), 49 | else over whole batch (only for jaccard loss) 50 | 51 | Returns: 52 | loss 53 | 54 | """ 55 | bce = K.mean(binary_crossentropy(gt, pr)) 56 | loss = bce_weight * bce + jaccard_loss(gt, pr) 57 | return loss 58 | 59 | 60 | def cce_jaccard_loss(gt, pr): 61 | r"""Sum of categorical crossentropy and jaccard losses: 62 | 63 | .. math:: L(A, B) = cce_weight * categorical_crossentropy(A, B) + jaccard_loss(A, B) 64 | 65 | Args: 66 | gt: ground truth 4D keras tensor (B, H, W, C) 67 | pr: prediction 4D keras tensor (B, H, W, C) 68 | class_weights: 1. or list of class weights for jaccard loss, len(weights) = C 69 | smooth: value to avoid division by zero 70 | per_image: if ``True``, jaccard loss is calculated as mean over images in batch (B), 71 | else over whole batch 72 | 73 | Returns: 74 | loss 75 | 76 | """ 77 | cce = categorical_crossentropy(gt, pr) * class_weights 78 | cce = K.mean(cce) 79 | return 1 * cce + jaccard_loss(gt, pr) 80 | 81 | 82 | # Update custom objects 83 | get_custom_objects().update({ 84 | 'jaccard_loss': jaccard_loss, 85 | 'bce_jaccard_loss': bce_jaccard_loss, 86 | 'cce_jaccard_loss': cce_jaccard_loss, 87 | }) 88 | 89 | 90 | # ============================== Dice Losses ================================ 91 | 92 | def dice_loss(gt, pr): 93 | r"""Dice loss function for imbalanced datasets: 94 | 95 | .. math:: L(precision, recall) = 1 - (1 + \beta^2) \frac{precision \cdot recall} 96 | {\beta^2 \cdot precision + recall} 97 | 98 | Args: 99 | gt: ground truth 4D keras tensor (B, H, W, C) 100 | pr: prediction 4D keras tensor (B, H, W, C) 101 | class_weights: 1. or list of class weights, len(weights) = C 102 | smooth: value to avoid division by zero 103 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 104 | else over whole batch 105 | beta: coefficient for precision recall balance 106 | 107 | Returns: 108 | Dice loss in range [0, 1] 109 | 110 | """ 111 | return 1 - f_score(gt, pr) 112 | 113 | 114 | def bce_dice_loss(gt, pr): 115 | r"""Sum of binary crossentropy and dice losses: 116 | 117 | .. math:: L(A, B) = bce_weight * binary_crossentropy(A, B) + dice_loss(A, B) 118 | 119 | Args: 120 | gt: ground truth 4D keras tensor (B, H, W, C) 121 | pr: prediction 4D keras tensor (B, H, W, C) 122 | class_weights: 1. or list of class weights for dice loss, len(weights) = C 123 | smooth: value to avoid division by zero 124 | per_image: if ``True``, dice loss is calculated as mean over images in batch (B), 125 | else over whole batch 126 | beta: coefficient for precision recall balance 127 | 128 | Returns: 129 | loss 130 | 131 | """ 132 | bce = K.mean(binary_crossentropy(gt, pr)) 133 | loss = bce_weight * bce + dice_loss(gt, pr) 134 | return loss 135 | 136 | 137 | def cce_dice_loss(gt, pr): 138 | r"""Sum of categorical crossentropy and dice losses: 139 | 140 | .. math:: L(A, B) = cce_weight * categorical_crossentropy(A, B) + dice_loss(A, B) 141 | 142 | Args: 143 | gt: ground truth 4D keras tensor (B, H, W, C) 144 | pr: prediction 4D keras tensor (B, H, W, C) 145 | class_weights: 1. or list of class weights for dice loss, len(weights) = C 146 | smooth: value to avoid division by zero 147 | per_image: if ``True``, dice loss is calculated as mean over images in batch (B), 148 | else over whole batch 149 | beta: coefficient for precision recall balance 150 | 151 | Returns: 152 | loss 153 | 154 | """ 155 | cce = categorical_crossentropy(gt, pr) * class_weights 156 | cce = K.mean(cce) 157 | return 1 * cce + dice_loss(gt, pr) 158 | 159 | 160 | # Update custom objects 161 | get_custom_objects().update({ 162 | 'dice_loss': dice_loss, 163 | 'bce_dice_loss': bce_dice_loss, 164 | 'cce_dice_loss': cce_dice_loss, 165 | }) 166 | -------------------------------------------------------------------------------- /seg_metrics.py: -------------------------------------------------------------------------------- 1 | import keras.backend as K 2 | 3 | 4 | __all__ = [ 5 | 'iou_score', 'jaccard_score', 'f1_score', 'f2_score', 'dice_score', 6 | 'get_f_score', 'get_iou_score', 'get_jaccard_score', 7 | ] 8 | 9 | SMOOTH = 1. 10 | 11 | 12 | # ============================ Jaccard/IoU score ============================ 13 | 14 | 15 | def iou_score(gt, pr): 16 | class_weights=1.; smooth=1.; per_image=True; threshold=None 17 | r""" The `Jaccard index`_, also known as Intersection over Union and the Jaccard similarity coefficient 18 | (originally coined coefficient de communauté by Paul Jaccard), is a statistic used for comparing the 19 | similarity and diversity of sample sets. The Jaccard coefficient measures similarity between finite sample sets, 20 | and is defined as the size of the intersection divided by the size of the union of the sample sets: 21 | 22 | .. math:: J(A, B) = \frac{A \cap B}{A \cup B} 23 | 24 | Args: 25 | gt: ground truth 4D keras tensor (B, H, W, C) 26 | pr: prediction 4D keras tensor (B, H, W, C) 27 | class_weights: 1. or list of class weights, len(weights) = C 28 | smooth: value to avoid division by zero 29 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 30 | else over whole batch 31 | threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round 32 | 33 | Returns: 34 | IoU/Jaccard score in range [0, 1] 35 | 36 | .. _`Jaccard index`: https://en.wikipedia.org/wiki/Jaccard_index 37 | 38 | """ 39 | if per_image: 40 | axes = [1, 2] 41 | else: 42 | axes = [0, 1, 2] 43 | 44 | if threshold is not None: 45 | pr = K.greater(pr, threshold) 46 | pr = K.cast(pr, K.floatx()) 47 | 48 | intersection = K.sum(gt * pr, axis=axes) 49 | union = K.sum(gt + pr, axis=axes) - intersection 50 | iou = (intersection + smooth) / (union + smooth) 51 | 52 | # mean per image 53 | if per_image: 54 | iou = K.mean(iou, axis=0) 55 | 56 | # weighted mean per class 57 | iou = K.mean(iou * class_weights) 58 | 59 | return iou 60 | 61 | 62 | def IOU(gt, pr): 63 | """Change default parameters of IoU/Jaccard score 64 | 65 | Args: 66 | class_weights: 1. or list of class weights, len(weights) = C 67 | smooth: value to avoid division by zero 68 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 69 | else over whole batch 70 | threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round 71 | 72 | Returns: 73 | ``callable``: IoU/Jaccard score 74 | """ 75 | return iou_score(gt, pr) 76 | 77 | 78 | # ============================== F/Dice - score ============================== 79 | 80 | def f_score(gt, pr): 81 | class_weights=1; beta=1; smooth=1.; per_image=True; threshold=None 82 | r"""The F-score (Dice coefficient) can be interpreted as a weighted average of the precision and recall, 83 | where an F-score reaches its best value at 1 and worst score at 0. 84 | The relative contribution of ``precision`` and ``recall`` to the F1-score are equal. 85 | The formula for the F score is: 86 | 87 | .. math:: F_\beta(precision, recall) = (1 + \beta^2) \frac{precision \cdot recall} 88 | {\beta^2 \cdot precision + recall} 89 | 90 | The formula in terms of *Type I* and *Type II* errors: 91 | 92 | .. math:: F_\beta(A, B) = \frac{(1 + \beta^2) TP} {(1 + \beta^2) TP + \beta^2 FN + FP} 93 | 94 | 95 | where: 96 | TP - true positive; 97 | FP - false positive; 98 | FN - false negative; 99 | 100 | Args: 101 | gt: ground truth 4D keras tensor (B, H, W, C) 102 | pr: prediction 4D keras tensor (B, H, W, C) 103 | class_weights: 1. or list of class weights, len(weights) = C 104 | beta: f-score coefficient 105 | smooth: value to avoid division by zero 106 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 107 | else over whole batch 108 | threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round 109 | 110 | Returns: 111 | F-score in range [0, 1] 112 | 113 | """ 114 | if per_image: 115 | axes = [1, 2] 116 | else: 117 | axes = [0, 1, 2] 118 | 119 | if threshold is not None: 120 | pr = K.greater(pr, threshold) 121 | pr = K.cast(pr, K.floatx()) 122 | 123 | tp = K.sum(gt * pr, axis=axes) 124 | fp = K.sum(pr, axis=axes) - tp 125 | fn = K.sum(gt, axis=axes) - tp 126 | 127 | score = ((1 + beta ** 2) * tp + smooth) \ 128 | / ((1 + beta ** 2) * tp + beta ** 2 * fn + fp + smooth) 129 | 130 | # mean per image 131 | if per_image: 132 | score = K.mean(score, axis=0) 133 | 134 | # weighted mean per class 135 | score = K.mean(score * class_weights) 136 | 137 | return score 138 | 139 | 140 | def Dice(gt, pr): 141 | """Change default parameters of F-score score 142 | 143 | Args: 144 | class_weights: 1. or list of class weights, len(weights) = C 145 | smooth: value to avoid division by zero 146 | beta: f-score coefficient 147 | per_image: if ``True``, metric is calculated as mean over images in batch (B), 148 | else over whole batch 149 | threshold: value to round predictions (use ``>`` comparison), if ``None`` prediction prediction will not be round 150 | 151 | Returns: 152 | ``callable``: F-score 153 | """ 154 | return f_score(gt, pr) 155 | -------------------------------------------------------------------------------- /segmentation_tools.py: -------------------------------------------------------------------------------- 1 | import pdb 2 | import numpy as np 3 | from scipy.ndimage import label 4 | from skimage.util import img_as_float 5 | from skimage.segmentation import slic 6 | from skimage.segmentation import mark_boundaries 7 | 8 | 9 | # helpful slic tool 10 | def FSLIC(IMAGE, IM, NumSLIC, ComSLIC, SigSLIC, Initial=False): 11 | IMAGE = img_as_float(IMAGE) 12 | Segments = slic(IMAGE, n_segments=NumSLIC, sigma=SigSLIC, compactness=ComSLIC) 13 | if Initial == True: # if initial is true, it returns the fusedimage showing the segments 14 | Fusied_Image = mark_boundaries(IM, Segments, color = (1, 1, 1)) 15 | return (Segments, Fusied_Image[...,0]) 16 | else: 17 | return (Segments) 18 | 19 | 20 | 21 | # normalize image between zero and range provided 22 | def Normalize_Image(IMAGE, Range, Min=None, Max=None, flag_max_edition=None, flag_min_edition=None, bits_conversion=None): 23 | IMAGE = IMAGE.astype('float') 24 | 25 | if Min==None: Min = IMAGE.min() 26 | if Max==None: Max = IMAGE.max() 27 | 28 | if Min != Max: 29 | Out_Img = (IMAGE-Min)/(Max-Min) 30 | else: 31 | Out_Img = np.ones(Out_Img.shape) 32 | 33 | Out_Img = Out_Img*Range 34 | 35 | if flag_max_edition == None: 36 | try: 37 | Out_Img[Out_Img>Range] = Range 38 | except: 39 | Out_Img = Out_Img 40 | 41 | if flag_min_edition == None: 42 | try: 43 | Out_Img[Out_Img<0] = 0 44 | except: 45 | Out_Img = Out_Img 46 | 47 | if bits_conversion == None: 48 | if Range == 2**16-1: 49 | Out_Img = Out_Img.astype('uint16') 50 | else: 51 | Out_Img = Out_Img.astype('uint8') 52 | else: 53 | Out_Img = Out_Img.astype(bits_conversion) 54 | 55 | return(Out_Img) 56 | 57 | 58 | 59 | # trimming the image and its mask if it is given 60 | def cutting_image(IMG, skipping_rows, MASK=[], x_ratio=1, y_ratio=1, skip_columns_to=[]): 61 | if len(skip_columns_to) == 0: 62 | if round(IMG.shape[0]*x_ratio) * round(IMG.shape[1]*y_ratio) %2 == 0: 63 | IMG = IMG[skipping_rows:round(IMG.shape[0]*x_ratio), 64 | :round(IMG.shape[1]*y_ratio)] 65 | 66 | if len(MASK)>0: 67 | MASK = MASK[skipping_rows:round(MASK.shape[0]*x_ratio), 68 | :round(MASK.shape[1]*y_ratio)] 69 | else: 70 | IMG = IMG[skipping_rows:round(IMG.shape[0]*x_ratio), 71 | :round(IMG.shape[1]*y_ratio)-1] 72 | 73 | if len(MASK)>0: 74 | MASK = MASK[skipping_rows:round(MASK.shape[0]*x_ratio), 75 | :round(MASK.shape[1]*y_ratio)-1] 76 | 77 | else: 78 | IMG = IMG[skipping_rows:round(IMG.shape[0]*x_ratio), 79 | :int(skip_columns_to[0])] 80 | 81 | if len(MASK)>0: 82 | MASK = MASK[skipping_rows:round(MASK.shape[0]*x_ratio), 83 | :int(skip_columns_to[0])] 84 | 85 | return(IMG, MASK) 86 | 87 | 88 | 89 | # in a mask find the largest object 90 | def find_largest_obj(Mask): 91 | # one shows background and zero objects 92 | temp_mask = (np.logical_not(Mask)).astype("int") 93 | # make the first and last row zero to make sure it is not affected by noise 94 | temp_mask[0,:] = 0 95 | labeled_obj = label(temp_mask)[0] 96 | 97 | if labeled_obj.max()>1: 98 | BG_Label = labeled_obj[0, -1] 99 | Unique_labels, counts = np.unique(labeled_obj, return_counts=True) 100 | counts = np.delete(counts, np.where(Unique_labels==BG_Label), None) 101 | Unique_labels = np.delete(Unique_labels, np.where(Unique_labels==BG_Label), None) 102 | Max = Unique_labels[counts.argmax()] 103 | Mask[labeled_obj!=Max] = True 104 | return(Mask) 105 | -------------------------------------------------------------------------------- /timeout.py: -------------------------------------------------------------------------------- 1 | from functools import wraps 2 | import errno 3 | import os 4 | import signal 5 | 6 | class TimeoutError(Exception): 7 | pass 8 | 9 | def timeout(seconds=60, error_message=os.strerror(errno.ETIME)): 10 | def decorator(func): 11 | def _handle_timeout(signum, frame): 12 | raise TimeoutError(error_message) 13 | 14 | def wrapper(*args, **kwargs): 15 | signal.signal(signal.SIGALRM, _handle_timeout) 16 | signal.alarm(seconds) 17 | try: 18 | result = func(*args, **kwargs) 19 | finally: 20 | signal.alarm(0) 21 | return result 22 | 23 | return wraps(func)(wrapper) 24 | 25 | return decorator 26 | -------------------------------------------------------------------------------- /weight_selection.py: -------------------------------------------------------------------------------- 1 | # This is read and write functions needed for deeplearning 2 | import pdb, os 3 | import numpy as np 4 | from termcolor import colored 5 | 6 | # it finds the best weight for 20 percent of epochs and after 7 | def weight_selection(obj, parameter_for_selection, final_index=-1, max_index=-1): 8 | if obj.weight_selection_method != "NA": 9 | val_param = "val_"+parameter_for_selection 10 | tra_param = parameter_for_selection 11 | 12 | val_param = np.array(obj.json[val_param]) 13 | tra_param = np.array(obj.json[tra_param]) 14 | if max_index!=-1: 15 | val_param = val_param[:max_index] 16 | tra_param = tra_param[:max_index] 17 | 18 | if obj.weight_selection_method == "bvtw": # best val and train (train(50%)) 19 | 20 | 21 | final_param = (2*val_param+tra_param)/3 22 | final_param[:int(0.2*final_param.shape[0])] = 0 23 | 24 | max_index = np.argmax(final_param[::obj.save_period])*obj.save_period 25 | 26 | 27 | elif obj.weight_selection_method == "bvt": # best val and train (train(50%)) 28 | val_param = np.array(obj.json[val_param]) 29 | tra_param = np.array(obj.json[tra_param]) 30 | 31 | final_param = (val_param+tra_param)/2 32 | final_param[:int(0.2*final_param.shape[0])] = 0 33 | 34 | max_index = np.argmax(final_param[::obj.save_period])*obj.save_period 35 | 36 | 37 | elif obj.weight_selection_method == "bv": # best val 38 | val_param[:int(0.2*val_param.shape[0])] = 0 39 | max_index = np.argmax(val_param[::obj.save_period])*obj.save_period 40 | 41 | 42 | elif obj.weight_selection_method == "bt": # best training 43 | tra_param[:int(0.2*tra_param.shape[0])] = 0 44 | max_index = np.argmax(tra_param[::obj.save_period])*obj.save_period 45 | 46 | 47 | if final_index==-1: 48 | final_index = max_index 49 | 50 | Path, net_name = os.path.split(obj.model_path) 51 | if net_name.find("_model.h5")>-1: 52 | weight_name = net_name[:net_name.find("_model.h5")] 53 | else: 54 | weight_name = net_name[:net_name.find("_Model.h5")] 55 | weight_name = weight_name+"_weights_M_%08d.h5" % final_index 56 | 57 | print(colored("The best model is: "+weight_name % final_index, 'yellow')) 58 | 59 | obj.model.load_weights( os.path.join(Path, weight_name) ) 60 | 61 | print("The validation "+parameter_for_selection+" was "+ 62 | colored(str(round(val_param[final_index], 4)),'red')+ 63 | " and the training "+parameter_for_selection+" was "+ 64 | colored(str(round(tra_param[final_index], 4)),'red')) 65 | 66 | return(obj) 67 | --------------------------------------------------------------------------------