├── EcoLocator ├── __init__.py ├── requirements.txt ├── __pycache__ │ ├── __init__.cpython-311.pyc │ └── class_ecolocator.cpython-311.pyc └── class_ecolocator.py ├── InputEco ├── TEST_01.png ├── TEST_02.png └── TEST_03_col.png ├── ImageToPreProcess ├── TEST_01.png ├── TEST_02.png └── TEST_03_col.png ├── Outputs ├── 1_NoWatermark │ ├── TEST_01.png │ ├── TEST_02.png │ └── TEST_03_col.png ├── 2_CroppedOutput │ ├── TEST_01.png │ ├── TEST_02.png │ └── TEST_03_col.png └── 3_SquaredOutput │ ├── TEST_01.png │ ├── TEST_02.png │ └── TEST_03_col.png ├── README.md ├── test_ecolocator.py └── LICENSE /EcoLocator/__init__.py: -------------------------------------------------------------------------------- 1 | from.class_ecolocator import EcoLoc -------------------------------------------------------------------------------- /EcoLocator/requirements.txt: -------------------------------------------------------------------------------- 1 | pillow 2 | matplotlib 3 | numpy 4 | tqdm -------------------------------------------------------------------------------- /InputEco/TEST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/InputEco/TEST_01.png -------------------------------------------------------------------------------- /InputEco/TEST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/InputEco/TEST_02.png -------------------------------------------------------------------------------- /InputEco/TEST_03_col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/InputEco/TEST_03_col.png -------------------------------------------------------------------------------- /ImageToPreProcess/TEST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/ImageToPreProcess/TEST_01.png -------------------------------------------------------------------------------- /ImageToPreProcess/TEST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/ImageToPreProcess/TEST_02.png -------------------------------------------------------------------------------- /ImageToPreProcess/TEST_03_col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/ImageToPreProcess/TEST_03_col.png -------------------------------------------------------------------------------- /Outputs/1_NoWatermark/TEST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/1_NoWatermark/TEST_01.png -------------------------------------------------------------------------------- /Outputs/1_NoWatermark/TEST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/1_NoWatermark/TEST_02.png -------------------------------------------------------------------------------- /Outputs/2_CroppedOutput/TEST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/2_CroppedOutput/TEST_01.png -------------------------------------------------------------------------------- /Outputs/2_CroppedOutput/TEST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/2_CroppedOutput/TEST_02.png -------------------------------------------------------------------------------- /Outputs/3_SquaredOutput/TEST_01.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/3_SquaredOutput/TEST_01.png -------------------------------------------------------------------------------- /Outputs/3_SquaredOutput/TEST_02.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/3_SquaredOutput/TEST_02.png -------------------------------------------------------------------------------- /Outputs/1_NoWatermark/TEST_03_col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/1_NoWatermark/TEST_03_col.png -------------------------------------------------------------------------------- /Outputs/2_CroppedOutput/TEST_03_col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/2_CroppedOutput/TEST_03_col.png -------------------------------------------------------------------------------- /Outputs/3_SquaredOutput/TEST_03_col.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/Outputs/3_SquaredOutput/TEST_03_col.png -------------------------------------------------------------------------------- /EcoLocator/__pycache__/__init__.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/EcoLocator/__pycache__/__init__.cpython-311.pyc -------------------------------------------------------------------------------- /EcoLocator/__pycache__/class_ecolocator.cpython-311.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/GDelCorso/EchoLocator/HEAD/EcoLocator/__pycache__/class_ecolocator.cpython-311.pyc -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # EchoLocator 2 | Package for cleaning echographic images. It provides a watermark removal strategy and an adaptive cropping methodology. 3 | The details on the implementation are reported in the paper "EchoLocator: an Open Source Python Package for the Standardisation of Echographic Images in Multicentre Analysis". 4 | -------------------------------------------------------------------------------- /test_ecolocator.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: utf-8 -*- 3 | """ 4 | Test the EcoLocator package, which provides a normalization strategy 5 | of ecographic images. 6 | """ 7 | 8 | #%% Import the package: 9 | import EcoLocator 10 | 11 | #%% Define the local input path 12 | input_path = 'InputEco' 13 | 14 | #%% Initialize the class 15 | my_EcoLoc = EcoLocator.EcoLoc(input_path) 16 | 17 | 18 | #%% Apply the pre-process removal 19 | im_temp_np = my_EcoLoc.pre_remove() 20 | 21 | 22 | #%% Apply the method which removes the watermarks 23 | my_EcoLoc.no_watermark() 24 | 25 | #%% Cropping method to crop and center the image 26 | my_EcoLoc.standard_crops(resolution_output = 360) 27 | 28 | #%% Square crops using polar coordinates 29 | my_EcoLoc.square_crops(resolution_output=360) -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Giulio Del Corso 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /EcoLocator/class_ecolocator.py: -------------------------------------------------------------------------------- 1 | ################################################################ 2 | # Libraries 3 | ################################################################ 4 | import os 5 | import numpy as np 6 | from PIL import Image 7 | from PIL import ImageFilter 8 | from tqdm import tqdm 9 | ################################################################ 10 | 11 | 12 | 13 | ################################################################ 14 | class EcoLoc(): 15 | ''' 16 | Main Class to define cropped and cleaned ecographic images. 17 | It requires the input location of the eco to crop and analyze. 18 | ''' 19 | 20 | ############################################################ 21 | # __init__ class: 22 | def __init__(self, 23 | input_local_path, 24 | no_watermark_local_path = '1_NoWatermark', 25 | cropped_local_path = '2_CroppedOutput', 26 | squared_local_path = '3_SquaredOutput' 27 | ): 28 | ''' 29 | Parameters 30 | ---------- 31 | input_local_path : str 32 | Local path to the input folder containing the eco images. 33 | no_watermark_local_path : str, optional 34 | Local path to the output folder containing the eco images 35 | without watermarks 36 | cropped_local_path : str, optional 37 | Local path to the output folder containing the eco images 38 | cropped 39 | squared_local_path : str, optional 40 | Local path to the output folder containing the eco images 41 | cropped and trasformed to be without black borders. 42 | ''' 43 | 44 | # Define the class attributes 45 | self.input_path = os.path.join(os.getcwd(), str(input_local_path)) 46 | self.no_watermark_path = os.path.join(os.getcwd(),'Outputs', 47 | str(no_watermark_local_path)) 48 | self.cropped_path = os.path.join(os.getcwd(),'Outputs', 49 | str(cropped_local_path)) 50 | self.cropped_path_res = os.path.join(os.getcwd(),'Outputs', 51 | str(cropped_local_path)+"_squared") 52 | self.squared_path = os.path.join(os.getcwd(), 'Outputs', 53 | str(squared_local_path)) 54 | 55 | # Generate the folder (if not existent): 56 | if os.path.isdir(self.input_path) == False: 57 | print("Warning, not existent input directory. \ 58 | Check the spelling.") 59 | 60 | # Make the output folder 61 | if os.path.isdir(os.path.join(os.getcwd(), 'Outputs')) == False: 62 | os.mkdir(os.path.join(os.getcwd(), 'Outputs')) 63 | 64 | # Generate the list of candidate images: 65 | self.input_image_list = os.listdir(self.input_path) 66 | 67 | try: 68 | self.input_image_list = [x for x in self.input_image_list if x.split('.')[-1] 69 | in ['jpeg', 'png', 'jpg']] 70 | except: 71 | pass 72 | 73 | 74 | # Check dimensions 75 | if len(self.input_image_list ) == 0: 76 | print("Warning: The number of echo contained in the input \ 77 | folder seems equal to 0.") 78 | ############################################################ 79 | 80 | 81 | 82 | ############################################################ 83 | def pre_remove(self, 84 | path_pre_remove = 'ImageToPreProcess', 85 | black_threshold = 5, 86 | reconstruct = True, 87 | color_threshold = 5): 88 | ''' 89 | Method for the preliminary removal of colour from images 90 | (characterised in that the watermarks overlap the ecographic cone). 91 | 92 | 93 | Parameters 94 | ---------- 95 | path_pre_remove : string, optional 96 | Folder containing the images to pre_remove. 97 | reconstruct : boolean, optional 98 | If True, it tries to reconstruct removed pixels 99 | from the surrounding 100 | black_threshold : int, optional 101 | Convert each pixel with value [0,black_threshold] to 0. 102 | color_threshold : int, optional 103 | Threshold used to define when a pixel is a grayscale value or RGB one. 104 | ''' 105 | 106 | # Initialize the path 107 | self.path_pre_remove = os.path.join(os.getcwd(), str(path_pre_remove)) 108 | 109 | list_images =os.listdir(os.path.join(os.getcwd(),self.path_pre_remove)) 110 | 111 | try: 112 | list_images = [x for x in list_images if x.split('.')[-1] 113 | in ['jpeg', 'png', 'jpg']] 114 | except: 115 | pass 116 | 117 | if os.path.isdir('InputEco')==False: 118 | os.mkdir('InputEco') 119 | 120 | for temp_image_name in tqdm(list_images): 121 | try: 122 | im_temp = Image.open(os.path.join(self.path_pre_remove, 123 | temp_image_name)).convert('RGB') 124 | 125 | # Operations: 126 | im_temp_np = np.array(im_temp) # Original image 127 | im_mask = np.array(im_temp)[:,:,0] # Mask to find colors 128 | 129 | 130 | # Aux function to find color and gray pixels 131 | def test_color(a,b,c,threshold=color_threshold): 132 | if max( abs(round(a)-round(b)),abs(round(a)-round(c)), abs(round(b)-round(c)))>threshold: 133 | return True 134 | else: 135 | return False 136 | 137 | # Aux function to find the first original pixel 138 | def close_pixel(i, j, im_temp_np, im_mask, direction): 139 | i_selected = i 140 | j_selected = j 141 | 142 | 143 | found = False 144 | try: 145 | if direction == 'bottom': 146 | # Bottom 147 | while found == False and i_selected0: 154 | i_selected -= 1 155 | if im_mask[i_selected, j_selected] ==0: 156 | found = True 157 | elif direction == 'left': 158 | # lef 159 | while found == False and j_selected>0: 160 | j_selected -= 1 161 | if im_mask[i_selected, j_selected] ==0: 162 | found = True 163 | elif direction == 'right': 164 | # right 165 | while found == False and j_selected0): 314 | fun_iterative_worm(starting_x-1, starting_y, 315 | start = False, actual_rec = actual_rec) 316 | 317 | # Go right: 318 | if visited_mask[starting_x, starting_y+1]==0 and \ 319 | (starting_y+1 < np.shape(temp_image_np)[1]-1): 320 | fun_iterative_worm(starting_x, starting_y+1, 321 | start = False, actual_rec = actual_rec) 322 | 323 | # Go left: 324 | if visited_mask[starting_x, starting_y-1]==0 and \ 325 | (starting_y-1 >0): 326 | fun_iterative_worm(starting_x, starting_y-1, 327 | start = False, actual_rec = actual_rec) 328 | 329 | elif start == False: 330 | # This is a candidate, sign it as visited to speed up 331 | visited_mask[starting_x, starting_y] = 1 332 | 333 | # If it is over the threshold, save it and iterate: 334 | if temp_image_np[starting_x, starting_y]>threshold: 335 | mask_array_np[starting_x, starting_y] = \ 336 | temp_image_np[starting_x, starting_y] 337 | 338 | # Go bottom: 339 | if visited_mask[starting_x+1, starting_y]==0 and \ 340 | (starting_x+1 < np.shape(temp_image_np)[0]-1): 341 | fun_iterative_worm(starting_x+1, starting_y, 342 | start = False, actual_rec = actual_rec) 343 | 344 | # Go top 345 | if visited_mask[starting_x-1, starting_y]==0 and \ 346 | (starting_x-1 >0): 347 | fun_iterative_worm(starting_x-1, starting_y, 348 | start = False, actual_rec = actual_rec) 349 | 350 | # Go right: 351 | if visited_mask[starting_x, starting_y+1]==0 and \ 352 | (starting_y+1 < np.shape(temp_image_np)[1]-1): 353 | fun_iterative_worm(starting_x, starting_y+1, 354 | start = False, actual_rec = actual_rec) 355 | 356 | # Go left: 357 | if visited_mask[starting_x, starting_y-1]==0 and \ 358 | (starting_y-1 >0): 359 | fun_iterative_worm(starting_x, starting_y-1, 360 | start = False, actual_rec = actual_rec) 361 | else: 362 | # Append the new starting seeds: 363 | global_last_seeds_list.append([starting_x, starting_y]) 364 | 365 | # First worm iteration: 366 | fun_iterative_worm(center_coordinates_x, center_coordinates_y) 367 | 368 | # Next worm iterations 369 | for temp_it in range(iteration): 370 | # Check over the seeds 371 | temp_list = global_last_seeds_list 372 | 373 | for seeds in temp_list: 374 | fun_iterative_worm(seeds[0], seeds[1]) 375 | 376 | # Convert back to image: 377 | watershed_image = Image.fromarray(np.uint8(mask_array_np)) 378 | 379 | # Save the clean image 380 | watershed_image.save(os.path.join(self.no_watermark_path, 381 | temp_image_name)) 382 | ############################################################ 383 | 384 | 385 | 386 | ############################################################ 387 | def standard_crops(self, 388 | threshold = 0, 389 | path_input = None, 390 | resolution_output = None, 391 | correct_center=10): 392 | ''' 393 | Method to crop the input image (with one single connected component) 394 | keeping a percentage (threshold) of the non-null values to provide 395 | an ending image without noisy/pixellated contours. 396 | 397 | Parameters 398 | ---------- 399 | threshold : int, optional 400 | Value in [0,100]. Describes the percentile of not 0 image 401 | that it is admissible to cut. 402 | 403 | path_input : str, optional 404 | If given it is the absolute path of images to be cropped. 405 | Otherwise it takes the no_watermark folder as inputs. 406 | It expects vertical eco without watermarks. 407 | 408 | resolution: int, optional 409 | The resolution of the squared image, if None: no rescaling is 410 | applied. 411 | 412 | correct_center: int, optional 413 | The amount of pixels around the center to accept the new center 414 | ''' 415 | 416 | if os.path.isdir(self.cropped_path) == False: 417 | # Generate the folder: 418 | os.mkdir(self.cropped_path) 419 | 420 | if path_input != None: 421 | # Overwrite the starting path 422 | self.no_watermark_path = str(path_input) 423 | 424 | # Define the list of images to be analyzed: 425 | input_image_list = os.listdir(self.no_watermark_path) 426 | 427 | try: 428 | input_image_list = [x for x in input_image_list if x.split('.')[-1] 429 | in ['jpeg', 'png', 'jpg']] 430 | except: 431 | pass 432 | 433 | 434 | 435 | # As a first step it defines the matrix of 1/0. 436 | # 1 for each pixel which is higher than threshold. 437 | 438 | # This matrix is used to calculate two vectors 439 | for temp_image_name in tqdm(input_image_list): 440 | # Import image 441 | temp_image = Image.open(os.path.join(self.no_watermark_path, 442 | temp_image_name)) 443 | 444 | # Convert to grey if RGB image 445 | try: 446 | temp_image_np = np.asarray(temp_image)[:,:,0] 447 | except: 448 | temp_image_np = np.asarray(temp_image)[:,:] 449 | 450 | vertical_np = np.zeros(np.shape(temp_image_np)[0]) 451 | for i_row in range(np.shape(temp_image_np)[0]): 452 | for j_col in range(np.shape(temp_image_np)[1]): 453 | if temp_image_np[i_row, j_col]>threshold: 454 | vertical_np[i_row] += 1 455 | 456 | # We use the plot to find the minimum values: 457 | # First reduce only vertically 458 | min_top = 0 # 0 is top 459 | 460 | found = False 461 | try: 462 | while found == False: 463 | if vertical_np[min_top+1]>0: 464 | found = True 465 | else: 466 | min_top += 1 467 | if min_top > np.shape(temp_image_np)[0]-1: 468 | found = True 469 | min_top = 0 470 | print("namefile mintop: ", temp_image_name) 471 | except: 472 | print("problems namefile mintop: ", temp_image_name) 473 | 474 | max_bottom = np.shape(temp_image_np)[0] 475 | 476 | found = False 477 | try: 478 | while found == False: 479 | if vertical_np[max_bottom-1]>0: 480 | found = True 481 | else: 482 | max_bottom -= 1 483 | if max_bottom < 1: 484 | found = True 485 | max_bottom = np.shape(temp_image_np)[0] 486 | print("namefile maxbottom: ", temp_image_name) 487 | except: 488 | print("problems namefile mintop: ", temp_image_name) 489 | 490 | temp_image_np = temp_image_np[min_top:max_bottom, :] 491 | 492 | # On the reduced image we have to found the minimum x and y 493 | number_of_points_to_check = 5 494 | 495 | # Find the left: 496 | center_left = 0 497 | 498 | # See from left to half 499 | i_temp = 0 500 | found = False 501 | while i_temp 0: 506 | found = True 507 | i_temp += 1 508 | 509 | # Find the right: 510 | center_right = np.shape(temp_image_np)[1] 511 | 512 | # See from right to half 513 | i_temp = 0 514 | found = False 515 | while i_temp 0: 521 | found = True 522 | i_temp += 1 523 | 524 | # Check on additional vertical points 525 | temp_number_of_points_to_check= number_of_points_to_check 526 | while center_right <= int(np.shape(temp_image_np)[1]/2)+correct_center or \ 527 | center_left >= int(np.shape(temp_image_np)[1]/2)-correct_center: 528 | temp_number_of_points_to_check=2*temp_number_of_points_to_check 529 | 530 | # Find the left: 531 | center_left = 0 532 | 533 | # See from left to half 534 | i_temp = 0 535 | found = False 536 | while i_temp < int(np.shape(temp_image_np)[1]/2) \ 537 | and found == False: 538 | center_left = i_temp 539 | # Check for the next value for several lines: 540 | for j_temp in range(temp_number_of_points_to_check): 541 | if temp_image_np[j_temp, i_temp+1]>0: 542 | found = True 543 | i_temp += 1 544 | 545 | # Find the right: 546 | center_right = np.shape(temp_image_np)[1] 547 | 548 | # See from right to half 549 | i_temp = 0 550 | found = False 551 | while i_temp < int(np.shape(temp_image_np)[1]/2) and \ 552 | found == False: 553 | #print("i_temp", i_temp) 554 | center_right = np.shape(temp_image_np)[1] -i_temp 555 | # Check for the next value for several lines: 556 | for j_temp in range(temp_number_of_points_to_check): 557 | if temp_image_np[j_temp, 558 | np.shape(temp_image_np)[1]-i_temp-1]>0: 559 | found = True 560 | i_temp += 1 561 | 562 | # Second step: Calculate horizontal distribution vector: 563 | horizontal_np = np.zeros(np.shape(temp_image_np)[1]) 564 | for j_col in range(np.shape(temp_image_np)[1]): 565 | for i_row in range(np.shape(temp_image_np)[0]): 566 | if temp_image_np[i_row, j_col]>threshold: 567 | horizontal_np[j_col] += 1 568 | 569 | min_left = 0 570 | 571 | found = False 572 | while found == False: 573 | if horizontal_np[min_left+1]>0: 574 | found = True 575 | else: 576 | min_left += 1 577 | 578 | max_right =np.shape(temp_image_np)[1] 579 | 580 | found = False 581 | while found == False: 582 | if horizontal_np[max_right-1]>0: 583 | found = True 584 | else: 585 | max_right -= 1 586 | 587 | # We choose if we want to keep them or not: 588 | # We have to calculate the two distances: 589 | left_distance = abs(min_left-center_left) 590 | right_distance = abs(max_right-center_right) 591 | best_distance = max(left_distance, right_distance) 592 | min_left = max(min(min_left, int(center_left-best_distance)),0) 593 | max_right = min(max(max_right, int(center_right+best_distance)), 594 | np.shape(temp_image_np)[1]-1) 595 | 596 | # These values are used to downsample the image: 597 | temp_reduced_image_np = temp_image_np[:, min_left:max_right] 598 | 599 | 600 | # Convert back to image: 601 | cropped_image = Image.fromarray(np.uint8(temp_reduced_image_np)) 602 | 603 | # Apply a different resolution: 604 | if resolution_output != None: 605 | cropped_image_res = cropped_image.resize((resolution_output, 606 | resolution_output)) 607 | cropped_image_res.save(os.path.join(self.cropped_path, 608 | temp_image_name)) 609 | 610 | # Save the clean image 611 | try: 612 | cropped_image_res.save(os.path.join(self.cropped_path, temp_image_name)) 613 | except: 614 | print("filename: ", temp_image_name, "dimension cropped: ", 615 | np.shape(temp_reduced_image_np)) 616 | ############################################################ 617 | 618 | 619 | 620 | ############################################################ 621 | def square_crops(self, 622 | delta_pixel_radius = 5, 623 | path_input = None, 624 | number_of_points_to_check = 10, 625 | resolution_input_x = None, 626 | resolution_input_y = None, 627 | reduced_resolution_x = 0.7, 628 | reduced_resolution_y = 0.9, 629 | resolution_output = None): 630 | 631 | ''' 632 | Method to convert in polar coordinates the cropped images. 633 | 634 | Parameters 635 | ---------- 636 | 637 | delta_pixel_radius : int, optional 638 | The delta x of the radius to check to speed up computation. 639 | The optimal is 1 but is slower. 640 | 641 | path_input : str, optional 642 | Alternative global input path. 643 | 644 | resolution_x : int, optional 645 | The number of pixel of the ending image (on x) 646 | 647 | resolution_y : int, optional 648 | The number of pixel of the ending image (on y) 649 | 650 | reduced_resolution_x/y : int, optional 651 | Amount of resolution lost (to improve quality of transform) 652 | ''' 653 | 654 | # Generate the folder: 655 | if os.path.isdir(self.squared_path) == False: 656 | os.mkdir(self.squared_path) 657 | 658 | # Overwrite the starting path 659 | if path_input != None: 660 | self.cropped_path = path_input 661 | 662 | # If resolution is given, resize the image: 663 | if resolution_input_x != None: 664 | resolution_x = resolution_input_x 665 | 666 | if resolution_input_y != None: 667 | resolution_y = resolution_input_y 668 | 669 | # Define the list of images to be analyzed: 670 | input_image_list = os.listdir(self.cropped_path) 671 | 672 | try: 673 | input_image_list = [x for x in input_image_list if x.split('.')[-1] 674 | in ['jpeg', 'png', 'jpg']] 675 | except: 676 | pass 677 | 678 | for temp_image_name in tqdm(input_image_list): 679 | # Import image 680 | temp_image = Image.open(os.path.join(self.cropped_path, temp_image_name)) 681 | 682 | # Convert to grey 683 | try: 684 | temp_image_np = np.asarray(temp_image)[:,:,0] 685 | except: 686 | temp_image_np = np.asarray(temp_image)[:,:] 687 | 688 | 689 | try: 690 | # If the resolution is not set, the method defines it as: 691 | if resolution_input_x == None: 692 | resolution_x = \ 693 | int(np.shape(temp_image_np)[1]*reduced_resolution_x) 694 | 695 | if resolution_input_y == None: 696 | resolution_y = \ 697 | int(np.shape(temp_image_np)[0]*reduced_resolution_y) 698 | 699 | 700 | # We assume that the two starting point has y value (row) 701 | # equal to 0 (it is oriented to the base). 702 | # We find the value of columns: 703 | # There is the risk that the first row is uncorrect, 704 | # so we check on a few pixels number_of_points_to_check 705 | 706 | # Find the left: 707 | center_left = 0 708 | y_center_left = 0 709 | 710 | # See from left to half (to define x1_l and y1_l) 711 | i_temp = 0 712 | found = False 713 | while i_temp < int(np.shape(temp_image_np)[1]/2) and\ 714 | found == False: 715 | center_left = i_temp 716 | # Check for the next value for several lines: 717 | for j_temp in range(number_of_points_to_check): 718 | if temp_image_np[j_temp, i_temp+1]>0: 719 | if found != True: 720 | y_center_left = j_temp 721 | found = True 722 | i_temp += 1 723 | 724 | # Find the right: 725 | center_right = np.shape(temp_image_np)[1] 726 | y_center_right = 0 727 | 728 | # Look from right to half 729 | i_temp = 0 730 | found = False 731 | while i_temp < int(np.shape(temp_image_np)[1]/2) and\ 732 | found == False: 733 | 734 | center_right = np.shape(temp_image_np)[1] -i_temp 735 | 736 | # Check for the next value for several lines: 737 | for j_temp in range(number_of_points_to_check): 738 | if temp_image_np[j_temp, 739 | np.shape(temp_image_np)[1]-i_temp-1]>0: 740 | if found != True: 741 | y_center_right = j_temp 742 | found = True 743 | i_temp += 1 744 | 745 | # See from top to bottom Right vertical point border 746 | i_temp = 0 747 | found = False 748 | while i_temp < int(np.shape(temp_image_np)[0])-1\ 749 | -number_of_points_to_check and found == False: 750 | top_vert_r = i_temp 751 | # Check for the next value for several lines: 752 | for j_temp in range(number_of_points_to_check): 753 | if temp_image_np[i_temp+1, 754 | int(np.shape(temp_image_np)[1])-1- j_temp]>0: 755 | found = True 756 | i_temp += 1 757 | 758 | # Given the two center point (o, center_left) and 759 | # (0, center_right) we have to choose the optimal candidate for 760 | # the radius. The "radius" is a value from 0 to maximum the 761 | # length of the image. We use bisection to speed up: 762 | min_radius_temp = delta_pixel_radius 763 | max_radius_temp = np.shape(temp_image_np)[1] 764 | 765 | n_iter = 0 766 | should_I_stop = False 767 | # Iterate bisection until maximum number or found: 768 | while n_iter < 10 and should_I_stop == False: 769 | candidate_radius = abs(max_radius_temp+min_radius_temp)/2 770 | 771 | n_iter += 1 772 | how_many_found = 0 773 | 774 | x1 = center_left 775 | y1 = 0 776 | x2 = center_left - candidate_radius 777 | y2 = int(np.shape(temp_image_np)[0]) 778 | 779 | x1_r = center_right 780 | y1_r = 0 781 | x2_r = center_right + candidate_radius 782 | y2_r = int(np.shape(temp_image_np)[0]) 783 | 784 | # Check how many points are inside the cone 785 | for y_row in range(int(np.shape(temp_image_np)[0])): 786 | for x_col in range(int(np.shape(temp_image_np)[1])): 787 | if temp_image_np[y_row, x_col]>0: 788 | 789 | if (y_row < (x_col-x1)*(y2-y1)/(x2-x1)+y1) or\ 790 | (y_row < (x_col-x1_r)*(y2_r-y1_r)/(x2_r-x1_r)+y1_r): 791 | how_many_found += 1 792 | 793 | # If how_many_found = 0 -> TOO wide: 794 | # If we did not find any -> The radius is too big 795 | if how_many_found == 0: 796 | max_radius_temp = candidate_radius 797 | # Found something, we have to increase the range: 798 | elif how_many_found > 0: 799 | min_radius_temp = candidate_radius 800 | 801 | if abs(max_radius_temp-min_radius_temp)<2: 802 | should_I_stop = True 803 | 804 | # Define the: center, inner radius, outher radius and angle. 805 | 806 | # Two bottom points: 807 | x2_r = int(np.shape(temp_image_np)[1]) 808 | y2_r = top_vert_r#int(np.shape(temp_image_np)[0]) 809 | 810 | # Median point 811 | x_m = int((center_left+center_right)/2) 812 | y_m = int(np.shape(temp_image_np)[0]) 813 | 814 | # Two top points: 815 | x1_r = max(center_right, 2*x_m -center_left) 816 | y1_r = y_center_right 817 | 818 | x1_l = min(center_left, 2*x_m -center_right) 819 | y1_l = y_center_left 820 | 821 | # We calculate the center of the cone, the x value is easy, 822 | # the y is the intersection: 823 | x_c = x_m 824 | y_c = (x_m-x1_r)*(y2_r-y1_r)/(x2_r-x1_r) +y1_r 825 | 826 | # Here we have to modifiy the image to add black pixels 827 | # on the bottom: 828 | # Candidates radia are the distances among the center and the 829 | # various pixels: 830 | 831 | # We calculate the maximum radius: 832 | R_max= np.sqrt((x_c-x_m)**2 +(y_c-y_m)**2) 833 | 834 | # We calculate the minimum radius: 835 | R_min= np.sqrt((x_c-x1_r)**2 +(y_c-y1_r)**2) # QUI ASSUME CHE y1 R sia su 0, pericoloso! 836 | 837 | # We calculate the radius (Radiant): 838 | hypothenuse = np.sqrt((x_c-x1_r)**2 +(y_c-y1_r)**2) 839 | alpha_top_right = abs(np.arccos(abs(y1_r-y_c)/hypothenuse)) 840 | 841 | hypothenuse = np.sqrt((x_c-x1_l)**2 +(y_c-y1_l)**2) 842 | alpha_top_left = abs(np.arccos(abs(y1_l-y_c)/hypothenuse)) 843 | alpha = min(alpha_top_right, alpha_top_left) 844 | 845 | # We can now define an empty matrix of given resolution: 846 | new_squared_image = np.zeros((resolution_y, resolution_x)) 847 | 848 | # For each pixel of this square image we find the original 849 | # value (the closest) in the polar space: 850 | for y_row in range(resolution_y): 851 | for x_col in range(resolution_x): 852 | corresponding_radius = \ 853 | (R_max-R_min)*(y_row/resolution_y)+R_min 854 | corresponding_angle = \ 855 | (x_col/resolution_x)*2*alpha - alpha 856 | 857 | if (int(corresponding_radius*\ 858 | np.cos(corresponding_angle)+y_c)>0) and\ 859 | (int(corresponding_radius\ 860 | *np.cos(corresponding_angle)+y_c)<\ 861 | np.shape(temp_image_np)[0]) and\ 862 | (int(x_m+corresponding_radius\ 863 | *np.sin(corresponding_angle)) > 0) and \ 864 | (int(x_m+corresponding_radius\ 865 | *np.sin(corresponding_angle)) < \ 866 | np.shape(temp_image_np)[1]): 867 | y_row_polar = min(max(int(corresponding_radius\ 868 | *np.cos(corresponding_angle)+y_c),0), 869 | np.shape(temp_image_np)[0]-1) 870 | x_col_polar = min(max(int(x_m+corresponding_radius\ 871 | *np.sin(corresponding_angle)),0), \ 872 | np.shape(temp_image_np)[1]-1) 873 | else: 874 | y_row_polar = 0 875 | x_col_polar = 0 876 | new_squared_image[y_row, x_col] = \ 877 | temp_image_np[y_row_polar, x_col_polar] 878 | 879 | # Convert back to image: 880 | polar_image = Image.fromarray(np.uint8(new_squared_image)) 881 | 882 | # Set up squared resolution: 883 | if resolution_output != None: 884 | polar_image = polar_image.resize\ 885 | ((resolution_output, resolution_output)) 886 | 887 | # Save the clean image 888 | polar_image.save(os.path.join(self.squared_path, \ 889 | temp_image_name)) 890 | except: 891 | print("problematic images: ", temp_image_name) 892 | ############################################################ --------------------------------------------------------------------------------