├── AGREEMENTS └── tdsc_abus2023_agreement.pdf ├── Final_Evaluation ├── cls_score.py ├── det_score.py └── seg_score.py ├── Images ├── abus_image.png ├── banner.png └── timeline.png ├── LICENSE ├── README.md ├── Valid_Metrics_On_Grand_Challenge ├── classification.py ├── detection.py └── segmentation.py └── how to build a docker image for your algorithm.pdf /AGREEMENTS/tdsc_abus2023_agreement.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerceptionComputingLab/TDSC-ABUS2023/a9eebe689fa6360399457d5f32a6f93a19f3d4bc/AGREEMENTS/tdsc_abus2023_agreement.pdf -------------------------------------------------------------------------------- /Final_Evaluation/cls_score.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | 4 | import pandas as pd 5 | from sklearn.metrics import accuracy_score, roc_auc_score 6 | 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument("team_name", type=str, help="The team name") 9 | 10 | 11 | def cls_score(gt_csv_path, pred_csv_path): 12 | # Calculate ACC AUC score per case 13 | 14 | gt_info = pd.read_csv(gt_csv_path) 15 | gt_info = gt_info["label"] 16 | pred_info = pd.read_csv(pred_csv_path) 17 | pred_info = pred_info.sort_values("case") 18 | pred_info = pred_info["prob"] 19 | label_map = {"M": 1, "B": 0} 20 | y_true = [label_map[label] for label in gt_info] 21 | y_pred = pred_info.apply(lambda x: 1 if x >= 0.5 else 0) 22 | acc = accuracy_score(y_true, y_pred) 23 | auc = roc_auc_score(y_true, pred_info) 24 | return { 25 | "accuracy": acc, 26 | "auc": auc, 27 | "score": (acc + auc) / 2, 28 | } 29 | 30 | 31 | if __name__ == "__main__": 32 | args = parser.parse_args() 33 | team = args.team_name 34 | if len(os.listdir(f"{team}/predict/Classification")) != 0: 35 | score = cls_score( 36 | "./Test/labels.csv", 37 | os.path.join( 38 | team, 39 | "predict", 40 | "Classification", 41 | os.listdir(f"{team}/predict/Classification")[0], 42 | ), 43 | ) 44 | print(f"{team} cls score: {score}") 45 | -------------------------------------------------------------------------------- /Final_Evaluation/det_score.py: -------------------------------------------------------------------------------- 1 | # This code if from https://github.com/M3DV/MELA-Challenge/tree/main/MELA 2 | # --*-- coding:utf-8 -*- 3 | 4 | import os 5 | 6 | import numpy as np 7 | import pandas as pd 8 | from matplotlib import pyplot as plt 9 | from tqdm import tqdm 10 | 11 | DEFAULT_KEY_FP = (0.125, 0.25, 0.5, 1, 2, 4, 8) 12 | 13 | 14 | def iou_3d(bbox1, bbox2): 15 | """ 16 | Calculates the intersection-over-union of two 3D bounding boxes. 17 | """ 18 | bbox1 = [float(x) for x in bbox1] 19 | bbox2 = [float(x) for x in bbox2] 20 | (x1_c, y1_c, z1_c, w1, h1, d1) = bbox1 21 | (x2_c, y2_c, z2_c, w2, h2, d2) = bbox2 22 | 23 | x1_1 = x1_c - w1 / 2 24 | x1_2 = x1_c + w1 / 2 25 | y1_1 = y1_c - h1 / 2 26 | y1_2 = y1_c + h1 / 2 27 | z1_1 = z1_c - d1 / 2 28 | z1_2 = z1_c + d1 / 2 29 | 30 | x2_1 = x2_c - w2 / 2 31 | x2_2 = x2_c + w2 / 2 32 | y2_1 = y2_c - h2 / 2 33 | y2_2 = y2_c + h2 / 2 34 | z2_1 = z2_c - d2 / 2 35 | z2_2 = z2_c + d2 / 2 36 | 37 | # get the overlap rectangle 38 | overlap_x1 = max(x1_1, x2_1) 39 | overlap_y1 = max(y1_1, y2_1) 40 | overlap_z1 = max(z1_1, z2_1) 41 | overlap_x2 = min(x1_2, x2_2) 42 | overlap_y2 = min(y1_2, y2_2) 43 | overlap_z2 = min(z1_2, z2_2) 44 | 45 | # check if there is an overlap 46 | if ( 47 | overlap_x2 - overlap_x1 <= 0 48 | or overlap_y2 - overlap_y1 <= 0 49 | or overlap_z2 - overlap_z1 <= 0 50 | ): 51 | return 0 52 | 53 | size_1 = (x1_2 - x1_1) * (y1_2 - y1_1) * (z1_2 - z1_1) 54 | size_2 = (x2_2 - x2_1) * (y2_2 - y2_1) * (z2_2 - z2_1) 55 | size_intersection = ( 56 | (overlap_x2 - overlap_x1) 57 | * (overlap_y2 - overlap_y1) 58 | * (overlap_z2 - overlap_z1) 59 | ) 60 | size_union = size_1 + size_2 - size_intersection 61 | return size_intersection / size_union 62 | 63 | 64 | def _compile_pred_metrics(iou_matrix, gt_info, pred_info): 65 | """ 66 | Compile prediction metrics into a Pandas DataFrame 67 | 68 | Parameters 69 | ---------- 70 | iou_matrix : numpy.ndarray 71 | IoU array with shape of (n_pred, n_gt). 72 | gt_info : pandas.DataFrame 73 | DataFrame containing GT information. 74 | pred_info : pandas.DataFrame 75 | DataFrame containing prediction information. 76 | 77 | Returns 78 | ------- 79 | pred_metrics : pandas.DataFrame 80 | A dataframe of prediction metrics. 81 | """ 82 | # meanings of each column: 83 | # pred_label -- The index of prediction 84 | # max_iou -- The highest IoU this prediction has with any certain GT 85 | # hit_label -- The GT label with which this prediction has the highest IoU 86 | # prob -- The confidence prediction of this prediction 87 | # num_gt -- Total number of GT in this volume 88 | 89 | pred_metrics = pd.DataFrame( 90 | np.zeros((iou_matrix.shape[0], 3)), 91 | columns=["pred_label", "max_iou", "hit_label"], 92 | ) 93 | pred_metrics["pred_label"] = np.arange(1, iou_matrix.shape[0] + 1) 94 | pred_metrics["max_iou"] = iou_matrix.max(axis=1) 95 | pred_metrics["hit_label"] = iou_matrix.argmax(axis=1) + 1 96 | 97 | pred_metrics["hit_label"] = pred_metrics.apply( 98 | lambda x: x["hit_label"] if x["max_iou"] > 0 else 0, axis=1 99 | ) 100 | # fill in the detection confidence 101 | 102 | pred_metrics = pred_metrics.merge( 103 | pred_info[["label_id", "probability"]], 104 | how="left", 105 | left_on="pred_label", 106 | right_on="label_id", 107 | ) 108 | pred_metrics.rename({"probability": "prob"}, axis=1, inplace=True) 109 | pred_metrics.drop("label_id", axis=1, inplace=True) 110 | 111 | pred_metrics = pred_metrics.merge( 112 | gt_info["label_id"], how="left", left_on="hit_label", right_on="label_id" 113 | ) 114 | pred_metrics.drop("label_id", axis=1, inplace=True) 115 | pred_metrics["num_gt"] = iou_matrix.shape[1] 116 | 117 | return pred_metrics 118 | 119 | 120 | def evaluate_single_prediction(gt_info, pred_info): 121 | """ 122 | Evaluate a single prediction. 123 | 124 | Parameters 125 | ---------- 126 | gt_info : pandas.DataFrame 127 | DataFrame containing GT information. 128 | pred_info : pandas.DataFrame 129 | DataFrame containing prediction information. 130 | 131 | Returns 132 | ------- 133 | pred_metrics : pandas.DataFrame 134 | A dataframe of prediction metrics. 135 | num_gt : int 136 | Number of GT in this case. 137 | """ 138 | # GT and prediction 139 | 140 | num_gt = len(gt_info) 141 | num_pred = len(pred_info) 142 | 143 | # if the prediction is empty, return empty pred_metrics 144 | if num_pred == 0: 145 | pred_metrics = pd.DataFrame() 146 | return pred_metrics, num_gt 147 | 148 | # if GT is empty 149 | if num_gt == 0: 150 | pred_metrics = pd.DataFrame( 151 | [ 152 | { 153 | "pred_label": i, 154 | "max_iou": 0, 155 | "hit_label": 0, 156 | "gt_class": "FP", 157 | "num_gt": 0, 158 | } 159 | for i in range(1, num_pred + 1) 160 | ] 161 | ) 162 | pred_metrics = pred_metrics.merge( 163 | pred_info[["label_id", "probability"]], 164 | how="left", 165 | left_on="pred_label", 166 | right_on="label_id", 167 | ) 168 | pred_metrics.rename({"probability": "prob"}, axis=1, inplace=True) 169 | pred_metrics.drop(["label_id"], axis=1, inplace=True) 170 | 171 | return pred_metrics, num_gt 172 | 173 | iou_matrix = np.zeros((num_gt, num_pred)) 174 | 175 | # iterate through all gt and prediction of seriesuid and evaluate predictions 176 | for gt_idx in range(num_gt): 177 | # get gt bbox info 178 | gt_bbox_info = gt_info.iloc[gt_idx] 179 | gt_bbox = [ 180 | gt_bbox_info["coordX"], 181 | gt_bbox_info["coordY"], 182 | gt_bbox_info["coordZ"], 183 | gt_bbox_info["x_length"], 184 | gt_bbox_info["y_length"], 185 | gt_bbox_info["z_length"], 186 | ] 187 | 188 | for pred_idx in range(num_pred): 189 | # get prediction bbox info 190 | pred_bbox_info = pred_info.iloc[pred_idx] 191 | pred_bbox = [ 192 | pred_bbox_info["coordX"], 193 | pred_bbox_info["coordY"], 194 | pred_bbox_info["coordZ"], 195 | pred_bbox_info["x_length"], 196 | pred_bbox_info["y_length"], 197 | pred_bbox_info["z_length"], 198 | ] 199 | # get iou of gt and pred bboxes 200 | gt_pred_iou = iou_3d(gt_bbox, pred_bbox) 201 | iou_matrix[gt_idx, pred_idx] = gt_pred_iou 202 | 203 | # get corresponding GT index, pred index and union index 204 | 205 | iou_matrix = iou_matrix.T 206 | pred_metrics = _compile_pred_metrics(iou_matrix, gt_info, pred_info) 207 | 208 | return pred_metrics, num_gt 209 | 210 | 211 | def _froc_single_thresh(df_list, num_gts, p_thresh, iou_thresh): 212 | """ 213 | Calculate the FROC for a single confidence threshold. 214 | 215 | Parameters 216 | ---------- 217 | df_list : list of pandas.DataFrame 218 | List of Pandas DataFrame of prediction metrics. 219 | num_gts : list of int 220 | List of number of GT in each volume. 221 | p_thresh : float 222 | The probability threshold of positive predictions. 223 | iou_thresh : float 224 | The IoU threshold of predictions being considered as "hit". 225 | 226 | Returns 227 | ------- 228 | fp : float 229 | False positives per scan for this threshold. 230 | recall : float 231 | Recall rate for this threshold. 232 | """ 233 | EPS = 1e-8 234 | 235 | total_gt = sum(num_gts) 236 | # collect all predictions above the probability threshold 237 | df_pos_pred = [df.loc[df["prob"] >= p_thresh] for df in df_list if len(df) > 0] 238 | 239 | # calculate total true positives 240 | total_tp = sum( 241 | [ 242 | len(df.loc[df["max_iou"] > iou_thresh, "hit_label"].unique()) 243 | for df in df_pos_pred 244 | ] 245 | ) 246 | 247 | # calculate total false positives 248 | total_fp = sum( 249 | [len(df) - len(df.loc[df["max_iou"] > iou_thresh]) for df in df_pos_pred] 250 | ) 251 | 252 | fp = (total_fp + EPS) / (len(df_list) + EPS) # average fp in every sample 253 | recall = (total_tp + EPS) / (total_gt + EPS) 254 | 255 | return fp, recall 256 | 257 | 258 | def _interpolate_recall_at_fp(fp_recall, key_fp): 259 | """ 260 | Calculate recall at key_fp using interpolation. 261 | 262 | Parameters 263 | ---------- 264 | fp_recall : pandas.DataFrame 265 | DataFrame of FP and recall. 266 | key_fp : float 267 | Key FP threshold at which the recall will be calculated. 268 | 269 | Returns 270 | ------- 271 | recall_at_fp : float 272 | Recall at key_fp. 273 | """ 274 | # get fp/recall interpolation points 275 | fp_recall_less_fp = fp_recall.loc[fp_recall.fp <= key_fp] 276 | fp_recall_more_fp = fp_recall.loc[fp_recall.fp >= key_fp] 277 | 278 | # if key_fp < min_fp, recall = 0 279 | if len(fp_recall_less_fp) == 0: 280 | return 0 281 | 282 | # if key_fp > max_fp, recall = max_recall 283 | if len(fp_recall_more_fp) == 0: 284 | return fp_recall.recall.max() 285 | 286 | fp_0 = fp_recall_less_fp["fp"].values[-1] 287 | fp_1 = fp_recall_more_fp["fp"].values[0] 288 | recall_0 = fp_recall_less_fp["recall"].values[-1] 289 | recall_1 = fp_recall_more_fp["recall"].values[0] 290 | recall_at_fp = recall_0 + (recall_1 - recall_0) * ( 291 | (key_fp - fp_0) / (fp_1 - fp_0 + 1e-8) 292 | ) 293 | 294 | return recall_at_fp 295 | 296 | 297 | def _get_key_recall(fp, recall, key_fp_list): 298 | """ 299 | Calculate recall at a series of FP threshold. 300 | 301 | Parameters 302 | ---------- 303 | fp : list of float 304 | List of FP at different probability thresholds. 305 | recall : list of float 306 | List of recall at different probability thresholds. 307 | key_fp_list : list of float 308 | List of key FP values. 309 | 310 | Returns 311 | ------- 312 | key_recall : list of float 313 | List of key recall at each key FP. 314 | """ 315 | fp_recall = pd.DataFrame({"fp": fp, "recall": recall}).sort_values("fp") 316 | key_recall = [ 317 | _interpolate_recall_at_fp(fp_recall, key_fp) for key_fp in key_fp_list 318 | ] 319 | 320 | return key_recall 321 | 322 | 323 | def froc(df_list, num_gts, iou_thresh=0.3, key_fp=DEFAULT_KEY_FP): 324 | """ 325 | Calculate the FROC curve. 326 | 327 | Parameters 328 | df_list : list of pandas.DataFrame 329 | List of prediction metrics. 330 | num_gts : list of int 331 | List of number of GT in each volume. 332 | iou_thresh : float 333 | The IoU threshold of predictions being considered as "hit". 334 | key_fp : tuple of float 335 | The key false positive per scan used in evaluating the sensitivity 336 | of the model. 337 | 338 | Returns 339 | ------- 340 | fp : list of float 341 | List of false positives per scan at different probability thresholds. 342 | recall : list of float 343 | List of recall at different probability thresholds. 344 | key_recall : list of float 345 | List of key recall corresponding to key FPs. 346 | avg_recall : float 347 | Average recall at key FPs. This is the evaluation metric we use 348 | in the detection track. 349 | """ 350 | fp_recall = [ 351 | _froc_single_thresh(df_list, num_gts, p_thresh, iou_thresh) 352 | for p_thresh in np.arange(0, 1, 0.005) 353 | ] 354 | fp = [x[0] for x in fp_recall] 355 | recall = [x[1] for x in fp_recall] 356 | key_recall = _get_key_recall(fp, recall, key_fp) 357 | avg_recall = np.mean(key_recall) 358 | 359 | return fp, recall, key_recall, avg_recall 360 | 361 | 362 | def plot_froc(fp, recall): 363 | """ 364 | Plot the FROC curve. 365 | 366 | Parameters 367 | ---------- 368 | fp : list of float 369 | List of false positive per scans at different confidence thresholds. 370 | recall : list of float 371 | List of recall at different confidence thresholds. 372 | """ 373 | _, ax = plt.subplots() 374 | ax.plot(fp, recall) 375 | ax.set_title("FROC") 376 | plt.savefig("froc.jpg") 377 | 378 | 379 | def evaluate(gt_csv_path, pred_csv_path): 380 | """ 381 | Evaluate predictions against the ground-truth. 382 | 383 | Parameters 384 | ---------- 385 | gt_csv_path : str 386 | The ground-truth csv. 387 | pred_csv_path : str 388 | The prediction csv. 389 | 390 | Returns 391 | ------- 392 | eval_results : dict 393 | Dictionary containing detection results. 394 | """ 395 | # GT and prediction information 396 | gt_info = pd.read_csv(gt_csv_path) 397 | pred_info = pd.read_csv(pred_csv_path) 398 | gt_pids = list(gt_info["public_id"].unique()) 399 | pred_pids = list(pred_info["public_id"].unique()) 400 | 401 | # GT and prediction directory sanity check 402 | for i in pred_pids: 403 | assert i in gt_pids, "Unmatched seriesuid (not included in test set)." 404 | 405 | eval_results = [] 406 | progress = tqdm(total=len(gt_pids)) 407 | for pid in gt_pids: 408 | # get GT array and information 409 | cur_gt_info = gt_info.loc[gt_info.public_id == pid].reset_index(drop=True) 410 | cur_gt_info["label_id"] = np.arange(1, len(cur_gt_info) + 1) 411 | 412 | # get prediction array and information 413 | cur_pred_info = pred_info.loc[pred_info.public_id == pid].reset_index(drop=True) 414 | cur_pred_info["label_id"] = np.arange(1, len(cur_pred_info) + 1) 415 | 416 | # perform evaluation 417 | eval_results.append(evaluate_single_prediction(cur_gt_info, cur_pred_info)) 418 | 419 | progress.update(1) 420 | 421 | progress.close() 422 | 423 | # detection results 424 | det_results = [x[0] for x in eval_results] 425 | num_gts = [x[1] for x in eval_results] 426 | 427 | # calculate the detection FROC 428 | fp, recall, key_recall, avg_recall = froc(det_results, num_gts) 429 | 430 | eval_results = { 431 | "detection": { 432 | "fp": fp, 433 | "recall": recall, 434 | "key_recall": key_recall, 435 | "average_recall": avg_recall, 436 | "max_recall": max(recall), 437 | "average_fp_at_max_recall": max(fp), 438 | } 439 | } 440 | 441 | return eval_results 442 | 443 | 444 | if __name__ == "__main__": 445 | import argparse 446 | 447 | parser = argparse.ArgumentParser() 448 | parser = argparse.ArgumentParser() 449 | parser.add_argument("team_name", type=str, help="The team name") 450 | args = parser.parse_args() 451 | team = args.team_name 452 | 453 | eval_results = evaluate( 454 | "./Test/bbx_labels.csv", 455 | os.path.join( 456 | team, 457 | "predict", 458 | "Detection", 459 | os.listdir(f"{team}/predict/Detection")[0], 460 | ), 461 | ) 462 | 463 | # detection metrics 464 | print("\nDetection metrics") 465 | print("=" * 64) 466 | print("Recall at key FP") 467 | froc_recall = pd.DataFrame( 468 | np.array(eval_results["detection"]["key_recall"]).reshape(1, -1), 469 | index=["Recall"], 470 | columns=[f"FP={str(x)}" for x in DEFAULT_KEY_FP], 471 | ) 472 | print(froc_recall) 473 | print("Average recall: {:.4f}".format(eval_results["detection"]["average_recall"])) 474 | print("Maximum recall: {:.4f}".format(eval_results["detection"]["max_recall"])) 475 | print( 476 | "Average FP per scan at maximum recall: {:.4f}".format( 477 | eval_results["detection"]["average_fp_at_max_recall"] 478 | ) 479 | ) 480 | 481 | # # plot/print FROC curve 482 | # print("FPR, Recall in FROC") 483 | # for fp, recall in zip( 484 | # reversed(eval_results["detection"]["fp"]), 485 | # reversed(eval_results["detection"]["recall"]), 486 | # ): 487 | # print(f"({fp:.8f}, {recall:.8f})") 488 | # plot_froc(eval_results["detection"]["fp"], eval_results["detection"]["recall"]) 489 | -------------------------------------------------------------------------------- /Final_Evaluation/seg_score.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import csv 3 | import os 4 | 5 | import SimpleITK 6 | from alive_progress import alive_it 7 | 8 | parser = argparse.ArgumentParser() 9 | parser.add_argument("team_name", type=str, help="The team name") 10 | 11 | 12 | def seg_score_per_case(gt_file, pred_file): 13 | # Calculate Dice HD score per case 14 | 15 | # Load the images for this case 16 | gt = SimpleITK.ReadImage(gt_file) 17 | pred = SimpleITK.ReadImage(pred_file) 18 | 19 | # Cast to the same type 20 | caster = SimpleITK.CastImageFilter() 21 | caster.SetOutputPixelType(SimpleITK.sitkUInt8) 22 | caster.SetNumberOfThreads(1) 23 | gt = caster.Execute(gt) 24 | pred = caster.Execute(pred) 25 | 26 | # Score the case 27 | overlap_measures = SimpleITK.LabelOverlapMeasuresImageFilter() 28 | overlap_measures.SetNumberOfThreads(1) 29 | overlap_measures.Execute(gt, pred) 30 | dice = overlap_measures.GetDiceCoefficient() 31 | try: 32 | hausdorff_filter = SimpleITK.HausdorffDistanceImageFilter() 33 | hausdorff_filter.Execute(gt, pred) 34 | except Exception: 35 | HD = float("inf") 36 | else: 37 | HD = hausdorff_filter.GetHausdorffDistance() 38 | 39 | return { 40 | "DiceCoefficient": dice, 41 | "HDCoefficient": HD, 42 | "score": dice - HD, 43 | } 44 | 45 | 46 | def calculate_seg_score_all(gt_path, pred_path, team): 47 | # Iterate over all cases in the test set and calculate the score 48 | 49 | def sort_filename(name): 50 | key = int(name.split("_")[1].split(".", 1)[0]) 51 | return key 52 | 53 | # Get the list of cases 54 | cases = os.listdir(gt_path) 55 | cases.sort(key=sort_filename) 56 | 57 | # Get the predict file format 58 | pred_cases = os.listdir(pred_path) 59 | pred_cases.sort(key=sort_filename) 60 | 61 | assert len(cases) == len(pred_cases) 62 | 63 | # Calculate the score for each case 64 | scores = [] 65 | csvfile = open(f"{team}/seg_score.csv", "w", encoding="utf-8") 66 | writer = csv.DictWriter( 67 | csvfile, fieldnames=[ 68 | "case", "DiceCoefficient", "HDCoefficient", "score"] 69 | ) 70 | writer.writeheader() 71 | for gt, pred in alive_it(zip(cases, pred_cases), total=len(pred_cases), title=team): 72 | res = seg_score_per_case( 73 | os.path.join(gt_path, gt), os.path.join(pred_path, pred) 74 | ) 75 | writer.writerow({**res, "case": gt}) 76 | scores.append(res) 77 | 78 | # Calculate the mean score 79 | mean_dice = sum([s["DiceCoefficient"] for s in scores]) / len(scores) 80 | mean_HD = sum([s["HDCoefficient"] for s in scores]) / len(scores) 81 | mean_score = sum([s["score"] for s in scores]) / len(scores) 82 | writer.writerow( 83 | { 84 | "case": team, 85 | "DiceCoefficient": mean_dice, 86 | "HDCoefficient": mean_HD, 87 | "score": mean_score, 88 | } 89 | ) 90 | csvfile.close() 91 | return {"dice": mean_dice, "HD": mean_HD, "score": mean_score} 92 | 93 | 94 | if __name__ == "__main__": 95 | args = parser.parse_args() 96 | team = args.team_name 97 | if len(os.listdir(f"{team}/predict/Segmentation")) != 0: 98 | seg_score = calculate_seg_score_all( 99 | "./Test/MASK", 100 | os.path.join(team, "predict", "Segmentation"), 101 | team, 102 | ) 103 | print(f"{team} seg score: {seg_score}") 104 | -------------------------------------------------------------------------------- /Images/abus_image.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerceptionComputingLab/TDSC-ABUS2023/a9eebe689fa6360399457d5f32a6f93a19f3d4bc/Images/abus_image.png -------------------------------------------------------------------------------- /Images/banner.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerceptionComputingLab/TDSC-ABUS2023/a9eebe689fa6360399457d5f32a6f93a19f3d4bc/Images/banner.png -------------------------------------------------------------------------------- /Images/timeline.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerceptionComputingLab/TDSC-ABUS2023/a9eebe689fa6360399457d5f32a6f93a19f3d4bc/Images/timeline.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # TDSC-ABUS2023 2 | 3 | > This is the official repository of [MICCAI 2023 TDSC-ABUS challenge](https://tdsc-abus2023.grand-challenge.org/) 4 | 5 | ![tdsc banner](/Images/banner.png) 6 | 7 | --- 8 | ## About the Challenge 9 | Breast cancer is one of the most common causes of death among women worldwide. Early detection helps in reducing the number of deaths. Automated 3D Breast Ultrasound is a newer approach for breast screening, which has many advantages over handheld mammography such as safety, speed, and higher detection rate of breast cancer. Thus it could prevail over the world in next several years. 10 | 11 | Tumor segmentation, classification and detection are three basic tasks in medical image analysis. These tasks are very challenging on 3D ABUS volumes for large tumor size and shape variation, irregular and ambiguous tumor boundaries, and low signal-to-noise ratio. Furthermore, there are rare open accessible ABUS datasets with well labeled tumor, which hinder the development of breast tumor segmentation, classification and detection systems. 12 | 13 | Thus, we try to host the first Tumor Segmentation, Classification and Detection Challenge on Automated 3D Breast Ultrasound 2023 (Named TSCD-ABUS2023) to start a new research topic and make a solid benchmark for 3D ABUS image segmentation, classification and detection tasks. 14 | 15 | We have collected 200 3D volumes with refined both tumor boundaries and categories labeling from an experienced clinician, 100 for the training dataset, 70 for the closed testing dataset and 30 for the opened validated dataset. Dice, HD, are adopted as evaluation metrics for segmentation, accuracy and AUC are used as evaluation metrics for classification and mAP@0.75 is taken for detection. This challenge will also promote the breast cancer treatment, interactions between researchers and interdisciplinary communication. 16 | 17 | ![abus_data](/Images/abus_image.png) 18 | 19 | --- 20 | ## Task 21 | Participants are required to **Detect**, **Segment** and **Classify** tumors in the 3D ABUS images. 22 | 23 | --- 24 | 25 | ## Schedule 26 | * Registration Opens: March 28, 2023 (11:59 PM GMT) 27 | * Training Dataset Release: April 6, 2023 (11:59 PM GMT) 28 | * Validation Dataset Release & Open Validation Leaderboard Submission: July 15, 2023 (11:59 PM GMT) 29 | * Validation Leaderboard Submission Deadline: August 20, 2023 (11:59 PM GMT) 30 | * Opening of Docker and Short Paper Submission for Testing Phase: August 20, 2023 (11:59 PM GMT) 31 | * Docker and Short Paper Submission Deadline: August 30, 2023 (11:59 PM GMT) 32 | * Winner Announcement & Invitation to Speakers: October 8, 2023 (11:59 PM GMT) 33 | 34 | ![timeline](/Images/timeline.png) 35 | 36 | --- 37 | 38 | ## Registration 39 | 40 | Please refer to the grand challenge site [https://tdsc-abus2023.grand-challenge.org/](https://tdsc-abus2023.grand-challenge.org/) 41 | 42 | --- 43 | 44 | ## Award 45 | 46 | Successful participation awards, which are electronic certificates, will be awarded to all teams that obtain valid test scores in the challenge leaderboard and complete technical paper submissions reviewed by the organizing committee. 47 | 48 | The top-1 team get best score on overall board will receive 300 dollars or electronic products with similar prices. The exquisite certificates will be awarded to all members of the Top-1 team. 49 | 50 | The top-1 team that win the first place on single task(Segmentation, Classification, Detection) board will receive 200 dollars or electronic products with similar prices. The exquisite certificates will be awarded to all members of the Top-1 team. 51 | 52 | --- 53 | 54 | ## Citation 55 | 56 | Any publication related to this challenge should reference our summary paper as provided below. 57 | 58 | [1] Gongning Luo, Mingwang Xu, Hongyu Chen, Xinjie Liang, Xing Tao, Dong Ni, Hyunsu Jeong, Chulhong Kim, Raphael Stock, Michael Baumgartner, Yannick Kirchhoff, Maximilian Rokuss, Klaus Maier-Hein, Zhikai Yang, Tianyu Fan, Nicolas Boutry, Dmitry Tereshchenko, Arthur Moine, Maximilien Charmetant, Jan Sauer, Hao Du, Xiang-Hui Bai, Vipul Pai Raikar, Ricardo Montoya-del-Angel, Robert Marti, Miguel Luna, Dongmin Lee, Abdul Qayyum, Moona Mazher, Qihui Guo, Changyan Wang, Navchetan Awasthi, Qiaochu Zhao, Wei Wang, Kuanquan Wang, Qiucheng Wang, Suyu Dong (2025). Tumor Detection, Segmentation and Classification Challenge on Automated 3D Breast Ultrasound: The TDSC-ABUS Challenge. arXiv:2501.15588. [ArXiv] 59 | -------------------------------------------------------------------------------- /Valid_Metrics_On_Grand_Challenge/classification.py: -------------------------------------------------------------------------------- 1 | from sklearn.metrics import accuracy_score, roc_auc_score 2 | 3 | import pandas as pd 4 | 5 | 6 | def score_aggregates(gt_csv_path, pred_csv_path): 7 | gt_info = pd.read_csv(gt_csv_path) 8 | gt_info = gt_info["class"] 9 | pred_info = pd.read_csv(pred_csv_path) 10 | pred_info = pred_info["prob"] 11 | label_map = {'M': 1, 'B': 0} 12 | y_true = [label_map[label] for label in gt_info] 13 | y_pred = pred_info 14 | return { 15 | "accuracy": accuracy_score(y_true, y_pred), 16 | "auc": roc_auc_score(y_true, y_pred), 17 | "score": (roc_auc_score(y_true, y_pred) + accuracy_score(y_true, y_pred)) / 2 18 | } 19 | 20 | 21 | if __name__ == "__main__": 22 | eval_results = score_aggregates("gt_path", "pred_path") 23 | -------------------------------------------------------------------------------- /Valid_Metrics_On_Grand_Challenge/detection.py: -------------------------------------------------------------------------------- 1 | #This code if from https://github.com/M3DV/MELA-Challenge/tree/main/MELA 2 | # --*-- coding:utf-8 -*- 3 | import cv2 4 | import csv 5 | import numpy as np 6 | import pandas as pd 7 | from matplotlib import pyplot as plt 8 | from tqdm import tqdm 9 | 10 | 11 | DEFAULT_KEY_FP = (0.125, 0.25, 0.5, 1, 2, 4, 8) 12 | 13 | 14 | def iou_3d(bbox1, bbox2): 15 | """ 16 | Calculates the intersection-over-union of two 3D bounding boxes. 17 | """ 18 | bbox1 = [float(x) for x in bbox1] 19 | bbox2 = [float(x) for x in bbox2] 20 | (x1_c, y1_c, z1_c, w1, h1, d1) = bbox1 21 | (x2_c, y2_c, z2_c, w2, h2, d2) = bbox2 22 | 23 | x1_1 = x1_c - w1 / 2 24 | x1_2 = x1_c + w1 / 2 25 | y1_1 = y1_c - h1 / 2 26 | y1_2 = y1_c + h1 / 2 27 | z1_1 = z1_c - d1 / 2 28 | z1_2 = z1_c + d1 / 2 29 | 30 | x2_1 = x2_c - w2 / 2 31 | x2_2 = x2_c + w2 / 2 32 | y2_1 = y2_c - h2 / 2 33 | y2_2 = y2_c + h2 / 2 34 | z2_1 = z2_c - d2 / 2 35 | z2_2 = z2_c + d2 / 2 36 | 37 | # get the overlap rectangle 38 | overlap_x1 = max(x1_1, x2_1) 39 | overlap_y1 = max(y1_1, y2_1) 40 | overlap_z1 = max(z1_1, z2_1) 41 | overlap_x2 = min(x1_2, x2_2) 42 | overlap_y2 = min(y1_2, y2_2) 43 | overlap_z2 = min(z1_2, z2_2) 44 | 45 | # check if there is an overlap 46 | if overlap_x2 - overlap_x1 <= 0 or overlap_y2 - overlap_y1 <= 0 or overlap_z2 - overlap_z1 <= 0: 47 | return 0 48 | 49 | size_1 = (x1_2 - x1_1) * (y1_2 - y1_1) * (z1_2 - z1_1) 50 | size_2 = (x2_2 - x2_1) * (y2_2 - y2_1) * (z2_2 - z2_1) 51 | size_intersection = (overlap_x2 - overlap_x1) * (overlap_y2 - overlap_y1) * (overlap_z2 - overlap_z1) 52 | size_union = size_1 + size_2 - size_intersection 53 | return size_intersection / size_union 54 | 55 | def _compile_pred_metrics(iou_matrix, gt_info, pred_info): 56 | """ 57 | Compile prediction metrics into a Pandas DataFrame 58 | 59 | Parameters 60 | ---------- 61 | iou_matrix : numpy.ndarray 62 | IoU array with shape of (n_pred, n_gt). 63 | gt_info : pandas.DataFrame 64 | DataFrame containing GT information. 65 | pred_info : pandas.DataFrame 66 | DataFrame containing prediction information. 67 | 68 | Returns 69 | ------- 70 | pred_metrics : pandas.DataFrame 71 | A dataframe of prediction metrics. 72 | """ 73 | # meanings of each column: 74 | # pred_label -- The index of prediction 75 | # max_iou -- The highest IoU this prediction has with any certain GT 76 | # hit_label -- The GT label with which this prediction has the highest IoU 77 | # prob -- The confidence prediction of this prediction 78 | # num_gt -- Total number of GT in this volume 79 | 80 | pred_metrics = pd.DataFrame(np.zeros((iou_matrix.shape[0], 3)), 81 | columns=["pred_label", "max_iou", "hit_label"]) 82 | pred_metrics["pred_label"] = np.arange(1, iou_matrix.shape[0] + 1) 83 | pred_metrics["max_iou"] = iou_matrix.max(axis=1) 84 | pred_metrics["hit_label"] = iou_matrix.argmax(axis=1) + 1 85 | 86 | pred_metrics["hit_label"] = pred_metrics.apply(lambda x: x["hit_label"] if x["max_iou"] > 0 else 0, axis=1) 87 | # fill in the detection confidence 88 | 89 | pred_metrics = pred_metrics.merge( 90 | pred_info[["label_id", "probability"]], 91 | how="left", left_on="pred_label", right_on="label_id") 92 | pred_metrics.rename({"probability": "prob"}, 93 | axis=1, inplace=True) 94 | pred_metrics.drop("label_id", axis=1, inplace=True) 95 | 96 | pred_metrics = pred_metrics.merge(gt_info["label_id"], 97 | how="left", left_on="hit_label", right_on="label_id") 98 | pred_metrics.drop("label_id", axis=1, inplace=True) 99 | pred_metrics["num_gt"] = iou_matrix.shape[1] 100 | 101 | return pred_metrics 102 | 103 | 104 | def evaluate_single_prediction(gt_info, pred_info): 105 | """ 106 | Evaluate a single prediction. 107 | 108 | Parameters 109 | ---------- 110 | gt_info : pandas.DataFrame 111 | DataFrame containing GT information. 112 | pred_info : pandas.DataFrame 113 | DataFrame containing prediction information. 114 | 115 | Returns 116 | ------- 117 | pred_metrics : pandas.DataFrame 118 | A dataframe of prediction metrics. 119 | num_gt : int 120 | Number of GT in this case. 121 | """ 122 | # GT and prediction 123 | 124 | num_gt = len(gt_info) 125 | num_pred = len(pred_info) 126 | 127 | # if the prediction is empty, return empty pred_metrics 128 | if num_pred == 0: 129 | pred_metrics = pd.DataFrame() 130 | return pred_metrics, num_gt 131 | 132 | # if GT is empty 133 | if num_gt == 0: 134 | pred_metrics = pd.DataFrame([ 135 | { 136 | "pred_label": i, 137 | "max_iou": 0, 138 | "hit_label": 0, 139 | "gt_class": "FP", 140 | "num_gt": 0 141 | } 142 | for i in range(1, num_pred + 1)]) 143 | pred_metrics = pred_metrics.merge( 144 | pred_info[["label_id", "probability"]], 145 | how="left", left_on="pred_label", right_on="label_id") 146 | pred_metrics.rename( 147 | {"probability": "prob"}, axis=1, 148 | inplace=True) 149 | pred_metrics.drop(["label_id"], axis=1, inplace=True) 150 | 151 | return pred_metrics, num_gt 152 | 153 | iou_matrix = np.zeros((num_gt, num_pred)) 154 | 155 | # iterate through all gt and prediction of seriesuid and evaluate predictions 156 | for gt_idx in range(num_gt): 157 | # get gt bbox info 158 | gt_bbox_info = gt_info.iloc[gt_idx] 159 | gt_bbox = [gt_bbox_info['coordX'], gt_bbox_info['coordY'], gt_bbox_info['coordZ'], 160 | gt_bbox_info['x_length'], gt_bbox_info['y_length'], gt_bbox_info['z_length']] 161 | 162 | for pred_idx in range(num_pred): 163 | # get prediction bbox info 164 | pred_bbox_info = pred_info.iloc[pred_idx] 165 | pred_bbox = [pred_bbox_info['coordX'], pred_bbox_info['coordY'], pred_bbox_info['coordZ'], 166 | pred_bbox_info['x_length'], pred_bbox_info['y_length'], pred_bbox_info['z_length']] 167 | # get iou of gt and pred bboxes 168 | gt_pred_iou = iou_3d(gt_bbox, pred_bbox) 169 | iou_matrix[gt_idx, pred_idx] = gt_pred_iou 170 | 171 | # get corresponding GT index, pred index and union index 172 | 173 | iou_matrix = iou_matrix.T 174 | pred_metrics = _compile_pred_metrics(iou_matrix, gt_info, pred_info) 175 | 176 | return pred_metrics, num_gt 177 | 178 | 179 | def _froc_single_thresh(df_list, num_gts, p_thresh, iou_thresh): 180 | """ 181 | Calculate the FROC for a single confidence threshold. 182 | 183 | Parameters 184 | ---------- 185 | df_list : list of pandas.DataFrame 186 | List of Pandas DataFrame of prediction metrics. 187 | num_gts : list of int 188 | List of number of GT in each volume. 189 | p_thresh : float 190 | The probability threshold of positive predictions. 191 | iou_thresh : float 192 | The IoU threshold of predictions being considered as "hit". 193 | 194 | Returns 195 | ------- 196 | fp : float 197 | False positives per scan for this threshold. 198 | recall : float 199 | Recall rate for this threshold. 200 | """ 201 | EPS = 1e-8 202 | 203 | total_gt = sum(num_gts) 204 | # collect all predictions above the probability threshold 205 | df_pos_pred = [df.loc[df["prob"] >= p_thresh] for df in df_list 206 | if len(df) > 0] 207 | 208 | # calculate total true positives 209 | total_tp = sum([len(df.loc[df["max_iou"] > iou_thresh, "hit_label"] \ 210 | .unique()) for df in df_pos_pred]) 211 | 212 | # calculate total false positives 213 | total_fp = sum([len(df) - len(df.loc[df["max_iou"] > iou_thresh]) 214 | for df in df_pos_pred]) 215 | 216 | fp = (total_fp + EPS) / (len(df_list) + EPS) # average fp in every sample 217 | recall = (total_tp + EPS) / (total_gt + EPS) 218 | 219 | return fp, recall 220 | 221 | 222 | def _interpolate_recall_at_fp(fp_recall, key_fp): 223 | """ 224 | Calculate recall at key_fp using interpolation. 225 | 226 | Parameters 227 | ---------- 228 | fp_recall : pandas.DataFrame 229 | DataFrame of FP and recall. 230 | key_fp : float 231 | Key FP threshold at which the recall will be calculated. 232 | 233 | Returns 234 | ------- 235 | recall_at_fp : float 236 | Recall at key_fp. 237 | """ 238 | # get fp/recall interpolation points 239 | fp_recall_less_fp = fp_recall.loc[fp_recall.fp <= key_fp] 240 | fp_recall_more_fp = fp_recall.loc[fp_recall.fp >= key_fp] 241 | 242 | # if key_fp < min_fp, recall = 0 243 | if len(fp_recall_less_fp) == 0: 244 | return 0 245 | 246 | # if key_fp > max_fp, recall = max_recall 247 | if len(fp_recall_more_fp) == 0: 248 | return fp_recall.recall.max() 249 | 250 | fp_0 = fp_recall_less_fp["fp"].values[-1] 251 | fp_1 = fp_recall_more_fp["fp"].values[0] 252 | recall_0 = fp_recall_less_fp["recall"].values[-1] 253 | recall_1 = fp_recall_more_fp["recall"].values[0] 254 | recall_at_fp = recall_0 + (recall_1 - recall_0) \ 255 | * ((key_fp - fp_0) / (fp_1 - fp_0 + 1e-8)) 256 | 257 | return recall_at_fp 258 | 259 | 260 | def _get_key_recall(fp, recall, key_fp_list): 261 | """ 262 | Calculate recall at a series of FP threshold. 263 | 264 | Parameters 265 | ---------- 266 | fp : list of float 267 | List of FP at different probability thresholds. 268 | recall : list of float 269 | List of recall at different probability thresholds. 270 | key_fp_list : list of float 271 | List of key FP values. 272 | 273 | Returns 274 | ------- 275 | key_recall : list of float 276 | List of key recall at each key FP. 277 | """ 278 | fp_recall = pd.DataFrame({"fp": fp, "recall": recall}).sort_values("fp") 279 | key_recall = [_interpolate_recall_at_fp(fp_recall, key_fp) 280 | for key_fp in key_fp_list] 281 | 282 | return key_recall 283 | 284 | 285 | def froc(df_list, num_gts, iou_thresh=0.3, key_fp=DEFAULT_KEY_FP): 286 | """ 287 | Calculate the FROC curve. 288 | 289 | Parameters 290 | df_list : list of pandas.DataFrame 291 | List of prediction metrics. 292 | num_gts : list of int 293 | List of number of GT in each volume. 294 | iou_thresh : float 295 | The IoU threshold of predictions being considered as "hit". 296 | key_fp : tuple of float 297 | The key false positive per scan used in evaluating the sensitivity 298 | of the model. 299 | 300 | Returns 301 | ------- 302 | fp : list of float 303 | List of false positives per scan at different probability thresholds. 304 | recall : list of float 305 | List of recall at different probability thresholds. 306 | key_recall : list of float 307 | List of key recall corresponding to key FPs. 308 | avg_recall : float 309 | Average recall at key FPs. This is the evaluation metric we use 310 | in the detection track. 311 | """ 312 | fp_recall = [_froc_single_thresh(df_list, num_gts, p_thresh, iou_thresh) 313 | for p_thresh in np.arange(0, 1, 0.005)] 314 | fp = [x[0] for x in fp_recall] 315 | recall = [x[1] for x in fp_recall] 316 | key_recall = _get_key_recall(fp, recall, key_fp) 317 | avg_recall = np.mean(key_recall) 318 | 319 | return fp, recall, key_recall, avg_recall 320 | 321 | 322 | def plot_froc(fp, recall): 323 | """ 324 | Plot the FROC curve. 325 | 326 | Parameters 327 | ---------- 328 | fp : list of float 329 | List of false positive per scans at different confidence thresholds. 330 | recall : list of float 331 | List of recall at different confidence thresholds. 332 | """ 333 | _, ax = plt.subplots() 334 | ax.plot(fp, recall) 335 | ax.set_title("FROC") 336 | plt.savefig("froc.jpg") 337 | 338 | 339 | def evaluate(gt_csv_path, pred_csv_path): 340 | """ 341 | Evaluate predictions against the ground-truth. 342 | 343 | Parameters 344 | ---------- 345 | gt_csv_path : str 346 | The ground-truth csv. 347 | pred_csv_path : str 348 | The prediction csv. 349 | 350 | Returns 351 | ------- 352 | eval_results : dict 353 | Dictionary containing detection results. 354 | """ 355 | # GT and prediction information 356 | gt_info = pd.read_csv(gt_csv_path) 357 | pred_info = pd.read_csv(pred_csv_path) 358 | gt_pids = list(gt_info["public_id"].unique()) 359 | pred_pids = list(pred_info["public_id"].unique()) 360 | 361 | # GT and prediction directory sanity check 362 | for i in pred_pids: 363 | assert i in gt_pids, \ 364 | "Unmatched seriesuid (not included in test set)." 365 | 366 | eval_results = [] 367 | progress = tqdm(total=len(gt_pids)) 368 | for pid in gt_pids: 369 | # get GT array and information 370 | cur_gt_info = gt_info.loc[gt_info.public_id == pid] \ 371 | .reset_index(drop=True) 372 | cur_gt_info['label_id'] = np.arange(1, len(cur_gt_info) + 1) 373 | 374 | # get prediction array and information 375 | cur_pred_info = pred_info.loc[pred_info.public_id == pid] \ 376 | .reset_index(drop=True) 377 | cur_pred_info['label_id'] = np.arange(1, len(cur_pred_info) + 1) 378 | 379 | # perform evaluation 380 | eval_results.append(evaluate_single_prediction(cur_gt_info, cur_pred_info)) 381 | 382 | progress.update(1) 383 | 384 | progress.close() 385 | 386 | # detection results 387 | det_results = [x[0] for x in eval_results] 388 | num_gts = [x[1] for x in eval_results] 389 | 390 | # calculate the detection FROC 391 | fp, recall, key_recall, avg_recall = froc(det_results, num_gts) 392 | 393 | eval_results = { 394 | "detection": { 395 | "fp": fp, 396 | "recall": recall, 397 | "key_recall": key_recall, 398 | "average_recall": avg_recall, 399 | "max_recall": max(recall), 400 | "average_fp_at_max_recall": max(fp), 401 | } 402 | } 403 | 404 | return eval_results 405 | 406 | 407 | if __name__ == "__main__": 408 | import argparse 409 | 410 | parser = argparse.ArgumentParser() 411 | parser.add_argument("--gt_dir", required=True) 412 | parser.add_argument("--pred_dir", required=True) 413 | parser.add_argument("--clf", default="True") 414 | args = parser.parse_args() 415 | eval_results = evaluate(args.gt_dir, args.pred_dir) 416 | 417 | # detection metrics 418 | print("\nDetection metrics") 419 | print("=" * 64) 420 | print("Recall at key FP") 421 | froc_recall = pd.DataFrame(np.array(eval_results["detection"]["key_recall"]) \ 422 | .reshape(1, -1), index=["Recall"], 423 | columns=[f"FP={str(x)}" for x in DEFAULT_KEY_FP]) 424 | print(froc_recall) 425 | print("Average recall: {:.4f}".format( 426 | eval_results["detection"]["average_recall"])) 427 | print("Maximum recall: {:.4f}".format( 428 | eval_results["detection"]["max_recall"] 429 | )) 430 | print("Average FP per scan at maximum recall: {:.4f}".format( 431 | eval_results["detection"]["average_fp_at_max_recall"] 432 | )) 433 | 434 | # plot/print FROC curve 435 | print("FPR, Recall in FROC") 436 | for fp, recall in zip(reversed(eval_results["detection"]["fp"]), 437 | reversed(eval_results["detection"]["recall"])): 438 | print(f"({fp:.8f}, {recall:.8f})") 439 | plot_froc(eval_results["detection"]["fp"], eval_results["detection"]["recall"]) 440 | -------------------------------------------------------------------------------- /Valid_Metrics_On_Grand_Challenge/segmentation.py: -------------------------------------------------------------------------------- 1 | import SimpleITK 2 | 3 | def score_case(gt_path, pred_path): 4 | # Load the images for this case 5 | gt = SimpleITK.ReadImage(gt_path) 6 | pred = SimpleITK.ReadImage(pred_path) 7 | 8 | # Cast to the same type 9 | caster = SimpleITK.CastImageFilter() 10 | caster.SetOutputPixelType(SimpleITK.sitkUInt8) 11 | caster.SetNumberOfThreads(1) 12 | gt = caster.Execute(gt) 13 | pred = caster.Execute(pred) 14 | 15 | # Score the case 16 | overlap_measures = SimpleITK.LabelOverlapMeasuresImageFilter() 17 | overlap_measures.SetNumberOfThreads(1) 18 | overlap_measures.Execute(gt, pred) 19 | hausdorff_filter = SimpleITK.HausdorffDistanceImageFilter() 20 | hausdorff_filter.Execute(gt, pred) 21 | 22 | return { 23 | 'DiceCoefficient': overlap_measures.GetDiceCoefficient(), 24 | 'HDCoefficient': hausdorff_filter.GetHausdorffDistance(), 25 | 'score': overlap_measures.GetDiceCoefficient()-hausdorff_filter.GetHausdorffDistance(), 26 | } 27 | 28 | if __name__ == "__main__": 29 | """ 30 | Evaluate a single case 31 | """ 32 | eval_results = score_case("gt_path", "pred_path") -------------------------------------------------------------------------------- /how to build a docker image for your algorithm.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PerceptionComputingLab/TDSC-ABUS2023/a9eebe689fa6360399457d5f32a6f93a19f3d4bc/how to build a docker image for your algorithm.pdf --------------------------------------------------------------------------------