├── Matlab-Troubleshooting └── remove4thDimension.py ├── README.md ├── codalab.png ├── evaluation_notebook.ipynb ├── requirements.txt ├── submission-guide.md └── surface.py /Matlab-Troubleshooting/remove4thDimension.py: -------------------------------------------------------------------------------- 1 | # Script to fix corrupted Matlab nii Files. Fixes Shape mismatch due to Matlab issues. 2 | # Expects volumes and segmentations in the folder with the correct naming convention. 3 | 4 | import nibabel as nb 5 | import numpy as np 6 | 7 | 8 | if __name__ == "__main__": 9 | # Iterate over all volumes 10 | for case in range(70): 11 | print 'Starting with case %s' % case 12 | pred = nb.load('test-segmentation-' + str(case) + '.nii') 13 | volume = nb.load('test-volume-' + str(case) + '.nii') 14 | 15 | # Load the data 16 | pred_data = pred.get_data() 17 | 18 | # Squeeze Singeton Dimension Dimensions 19 | pred_data = np.squeeze(pred_data) 20 | 21 | if volume.shape != pred_data.shape: 22 | print 'Shape of volume %s and predictions %s do not match for case %s' % (volume.get_data().shape, pred_data.shape,case) 23 | # Check whether label is 2 24 | if np.max(pred_data)!=2: 25 | pred_data = pred_data * 2 26 | 27 | # Check if shapes match 28 | 29 | # Construct valid Nii Image 30 | img_to_save = nb.Nifti1Image(pred_data.astype(volume.get_data_dtype()), volume.get_affine(), header=volume.header) 31 | save_name = 'test-segmentation-' + str(case) + '_fixed.nii' 32 | # Saving 33 | print 'Saving %s' % save_name 34 | nb.save(img_to_save, save_name) 35 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # LITS-CHALLENGE 2 | This repository contains information about the LITS Challenge. 3 | ## Metrics 4 | Please look into evaluation_notebook.ipynb to see how we evalute on the lits-challenge.com 5 | ## Installation 6 | Install all required packages using: 7 | ``` 8 | pip install -r requirements.txt 9 | ``` 10 | Run a jupyter notebook server to see the evaluation_notebook.ipynb 11 | ``` 12 | jupyter notebook 13 | ``` 14 | Go in your browser to localhost:8888. 15 | -------------------------------------------------------------------------------- /codalab.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PatrickChrist/LITS-CHALLENGE/c0648928131970cc2a053d0be8c1cf4d6b7e934e/codalab.png -------------------------------------------------------------------------------- /evaluation_notebook.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "deletable": true, 7 | "editable": true 8 | }, 9 | "source": [ 10 | "# Evaluation script for LITS Challenge" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": 4, 16 | "metadata": { 17 | "collapsed": false, 18 | "deletable": true, 19 | "editable": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "from medpy import metric\n", 24 | "from surface import Surface\n", 25 | "import glob\n", 26 | "import nibabel as nb\n", 27 | "import numpy as np\n", 28 | "import os" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 2, 34 | "metadata": { 35 | "collapsed": true, 36 | "deletable": true, 37 | "editable": true 38 | }, 39 | "outputs": [], 40 | "source": [ 41 | "def get_scores(pred,label,vxlspacing):\n", 42 | "\tvolscores = {}\n", 43 | "\n", 44 | "\tvolscores['dice'] = metric.dc(pred,label)\n", 45 | "\tvolscores['jaccard'] = metric.binary.jc(pred,label)\n", 46 | "\tvolscores['voe'] = 1. - volscores['jaccard']\n", 47 | "\tvolscores['rvd'] = metric.ravd(label,pred)\n", 48 | "\n", 49 | "\tif np.count_nonzero(pred) ==0 or np.count_nonzero(label)==0:\n", 50 | "\t\tvolscores['assd'] = 0\n", 51 | "\t\tvolscores['msd'] = 0\n", 52 | "\telse:\n", 53 | "\t\tevalsurf = Surface(pred,label,physical_voxel_spacing = vxlspacing,mask_offset = [0.,0.,0.], reference_offset = [0.,0.,0.])\n", 54 | "\t\tvolscores['assd'] = evalsurf.get_average_symmetric_surface_distance()\n", 55 | "\n", 56 | "\t\tvolscores['msd'] = metric.hd(label,pred,voxelspacing=vxlspacing)\n", 57 | "\n", 58 | "\treturn volscores" 59 | ] 60 | }, 61 | { 62 | "cell_type": "markdown", 63 | "metadata": { 64 | "deletable": true, 65 | "editable": true 66 | }, 67 | "source": [ 68 | "## Load Labels and Predictions" 69 | ] 70 | }, 71 | { 72 | "cell_type": "code", 73 | "execution_count": null, 74 | "metadata": { 75 | "collapsed": true, 76 | "deletable": true, 77 | "editable": true 78 | }, 79 | "outputs": [], 80 | "source": [ 81 | "label_path = ''\n", 82 | "prob_path = ''\n", 83 | "\n", 84 | "labels = sorted(glob.glob(label_path+'label*.nii'))\n", 85 | "probs = sorted(glob.glob(prob_path+'probs*.nii'))" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": { 91 | "deletable": true, 92 | "editable": true 93 | }, 94 | "source": [ 95 | "# Loop through all volumes" 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": { 102 | "collapsed": true, 103 | "deletable": true, 104 | "editable": true 105 | }, 106 | "outputs": [], 107 | "source": [ 108 | "results = []\n", 109 | "outpath = '/data/results.csv'\n", 110 | "\n", 111 | "\n", 112 | "for label, prob in zip(labels,probs):\n", 113 | " loaded_label = nb.load(label)\n", 114 | " loaded_prob = nb.load(prob)\n", 115 | " \n", 116 | " liver_scores = get_scores(loaded_prob.get_data()>=1,loaded_label.get_data()>=1,loaded_label.header.get_zooms()[:3])\n", 117 | " lesion_scores = get_scores(loaded_prob.get_data()==2,loaded_label.get_data()==2,loaded_label.header.get_zooms()[:3])\n", 118 | " print \"Liver dice\",liver_scores['dice'], \"Lesion dice\", lesion_scores['dice']\n", 119 | " \n", 120 | " results.append([label, liver_scores, lesion_scores])\n", 121 | "\n", 122 | " #create line for csv file\n", 123 | " outstr = str(label) + ','\n", 124 | " for l in [liver_scores, lesion_scores]:\n", 125 | " for k,v in l.iteritems():\n", 126 | " outstr += str(v) + ','\n", 127 | " outstr += '\\n'\n", 128 | "\n", 129 | " #create header for csv file if necessary\n", 130 | " if not os.path.isfile(outpath):\n", 131 | " headerstr = 'Volume,'\n", 132 | " for k,v in liver_scores.iteritems():\n", 133 | " headerstr += 'Liver_' + k + ','\n", 134 | " for k,v in liver_scores.iteritems():\n", 135 | " headerstr += 'Lesion_' + k + ','\n", 136 | " headerstr += '\\n'\n", 137 | " outstr = headerstr + outstr\n", 138 | "\n", 139 | " #write to file\n", 140 | " f = open(outpath, 'a+')\n", 141 | " f.write(outstr)\n", 142 | " f.close()\n", 143 | " " 144 | ] 145 | } 146 | ], 147 | "metadata": { 148 | "kernelspec": { 149 | "display_name": "Python 2", 150 | "language": "python", 151 | "name": "python2" 152 | }, 153 | "language_info": { 154 | "codemirror_mode": { 155 | "name": "ipython", 156 | "version": 2 157 | }, 158 | "file_extension": ".py", 159 | "mimetype": "text/x-python", 160 | "name": "python", 161 | "nbconvert_exporter": "python", 162 | "pygments_lexer": "ipython2", 163 | "version": "2.7.10" 164 | } 165 | }, 166 | "nbformat": 4, 167 | "nbformat_minor": 0 168 | } 169 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | appdirs==1.4.0 2 | appnope==0.1.0 3 | backports-abc==0.5 4 | backports.shutil-get-terminal-size==1.0.0 5 | bleach==1.5.0 6 | certifi==2017.1.23 7 | configparser==3.5.0 8 | decorator==4.0.11 9 | entrypoints==0.2.2 10 | enum34==1.1.6 11 | functools32==3.2.3.post2 12 | html5lib==0.9999999 13 | ipykernel==4.5.2 14 | ipython==5.2.1 15 | ipython-genutils==0.1.0 16 | ipywidgets==5.2.2 17 | Jinja2==2.9.5 18 | jsonschema==2.5.1 19 | jupyter==1.0.0 20 | jupyter-client==4.4.0 21 | jupyter-console==5.0.0 22 | jupyter-core==4.2.1 23 | MarkupSafe==0.23 24 | MedPy==0.2.2 25 | mistune==0.7.3 26 | nbconvert==5.1.1 27 | nbformat==4.2.0 28 | nibabel==2.1.0 29 | notebook==4.3.2 30 | numpy==1.12.0 31 | packaging==16.8 32 | pandocfilters==1.4.1 33 | pathlib2==2.2.1 34 | pexpect==4.2.1 35 | pickleshare==0.7.4 36 | prompt-toolkit==1.0.10 37 | ptyprocess==0.5.1 38 | Pygments==2.2.0 39 | pyparsing==2.1.10 40 | pyzmq==16.0.2 41 | qtconsole==4.2.1 42 | scandir==1.4 43 | scipy==0.18.1 44 | simplegeneric==0.8.1 45 | singledispatch==3.4.0.3 46 | six==1.10.0 47 | terminado==0.6 48 | testpath==0.3 49 | tornado==4.4.2 50 | traitlets==4.3.1 51 | wcwidth==0.1.7 52 | widgetsnbextension==1.2.6 53 | -------------------------------------------------------------------------------- /submission-guide.md: -------------------------------------------------------------------------------- 1 | # Submission guide for the Liver Tumor Segmentation LITS Challenge 2 | This guide helps you successfully submit to the LITS Challenge www.lits-challenge.com hosted on CodaLab. 3 | ## Table of Content 4 | 1. Getting the test data 5 | 2. Preparing your submission 6 | 3. Upload your submission 7 | 4. Check your results 8 | 9 | ## 1. Getting the test data 10 | The test data will be available from March 4th 2017 on Codalab plattform. You will find the link to download the test data after login [here](https://competitions.codalab.org/competitions/15595#participate). The test data was drawn randomly from the overall contributed data pool. The test data was evaluated independently from three expierenced radiologists. 11 | ## 2. Preparing your submission 12 | In order to process your submission automatically, the evaluation program expects your submission to have a certain form: 13 | ### 2.1. Data format 14 | Your submission files should be in Nifti .nii data format. There exist tools for Python, Matlab and C++ to produce valid .nii files 15 | ### 2.2. Segmentation Value 16 | Since we evaluate the lesion/tumor class 2 only, our program expects the submitted segmentation to have 0 for background/non-lesion and 2 for lesion/tumor. We will not consider liver class 1, since this is not part of this challenge. 17 | ### 2.3. Numbers of Pixel and Voxel Spacing 18 | The submitted segmentation needs to have the same dimensions and the same voxel-spacing as the medical volume. A volume mismatch will lead to a non valid submission. 19 | ### 2.4. Naming conventation 20 | The automatic evaluation script expects the submitted segmentations to follow the following naming convention: 21 | ``` 22 | test-segmentation-X.nii 23 | ``` 24 | Where X is the number of the test-volume. For example for the test volume test-volume-7.nii the corresponding correct naming for the test segmentation is test-segmentation-7.nii 25 | ### 2.5. Zip Archive of all your files 26 | Codalab only accepts zip archives. Please zip all your test-segmentation-X.nii files into a single zip archive. The zip archives should not contain any other files or subfolder structures. 27 | ## 3. Upload your submission 28 | After applying steps 2.1-2.5 you are able to upload your submission to the Codalab submission system. Go to https://competitions.codalab.org/competitions/15595#participate-submit_results and upload your zip file. 29 | ## 4. Check your results 30 | After uploading your zip file, the plattform is unzipping your data and running the evaluation program. This can take up to serval hours. 31 | You can check your current status and error reports here.![Image of Codalab Submission](https://github.com/PatrickChrist/LITS-CHALLENGE/blob/PatrickChrist-guide/codalab.png) 32 | ## Important note from the evaluation page as a reminder 33 | However, to foster performance we provide liver masks during training time. During test time only the medical volumes not no liver mask will be available. 34 | That means there will be no liver mask for the test data. 35 | -------------------------------------------------------------------------------- /surface.py: -------------------------------------------------------------------------------- 1 | """ 2 | @package medpy.metric.surface 3 | Holds a metrics class computing surface metrics over two 3D-images contain each a binary object. 4 | 5 | Classes: 6 | - Surface: Computes different surface metrics between two 3D-images contain each an object. 7 | 8 | @author Oskar Maier 9 | @version r0.4.1 10 | @since 2011-12-01 11 | @status Release 12 | """ 13 | 14 | # build-in modules 15 | import math 16 | 17 | # third-party modules 18 | import scipy.spatial 19 | import scipy.ndimage.morphology 20 | 21 | # own modules 22 | 23 | # code 24 | class Surface(object): 25 | """ 26 | Computes different surface metrics between two 3D-images contain each an object. 27 | The surface of the objects is computed using a 18-neighbourhood edge detection. 28 | The distance metrics are computed over all points of the surfaces using the nearest 29 | neighbour approach. 30 | Beside this provides a number of statistics of the two images. 31 | 32 | During the initialization the edge detection is run for both images, taking up to 33 | 5 min (on 512^3 images). The first call to one of the metric measures triggers the 34 | computation of the nearest neighbours, taking up to 7 minutes (based on 250.000 edge 35 | point for each of the objects, which corresponds to a typical liver mask). All 36 | subsequent calls to one of the metrics measures can be expected be in the 37 | sub-millisecond area. 38 | 39 | Metrics defined in: 40 | Heimann, T.; van Ginneken, B.; Styner, M.A.; Arzhaeva, Y.; Aurich, V.; Bauer, C.; Beck, A.; Becker, C.; Beichel, R.; Bekes, G.; Bello, F.; Binnig, G.; Bischof, H.; Bornik, A.; Cashman, P.; Ying Chi; Cordova, A.; Dawant, B.M.; Fidrich, M.; Furst, J.D.; Furukawa, D.; Grenacher, L.; Hornegger, J.; Kainmuller, D.; Kitney, R.I.; Kobatake, H.; Lamecker, H.; Lange, T.; Jeongjin Lee; Lennon, B.; Rui Li; Senhu Li; Meinzer, H.-P.; Nemeth, G.; Raicu, D.S.; Rau, A.-M.; van Rikxoort, E.M.; Rousson, M.; Rusko, L.; Saddi, K.A.; Schmidt, G.; Seghers, D.; Shimizu, A.; Slagmolen, P.; Sorantin, E.; Soza, G.; Susomboon, R.; Waite, J.M.; Wimmer, A.; Wolf, I.; , "Comparison and Evaluation of Methods for Liver Segmentation From CT Datasets," Medical Imaging, IEEE Transactions on , vol.28, no.8, pp.1251-1265, Aug. 2009 41 | doi: 10.1109/TMI.2009.2013851 42 | """ 43 | 44 | # The edge points of the mask object. 45 | __mask_edge_points = None 46 | # The edge points of the reference object. 47 | __reference_edge_points = None 48 | # The nearest neighbours distances between mask and reference edge points. 49 | __mask_reference_nn = None 50 | # The nearest neighbours distances between reference and mask edge points. 51 | __reference_mask_nn = None 52 | # Distances of the two objects surface points. 53 | __distance_matrix = None 54 | 55 | def __init__(self, mask, reference, physical_voxel_spacing = [1,1,1], mask_offset = [0,0,0], reference_offset = [0,0,0]): 56 | """ 57 | Initialize the class with two binary images, each containing a single object. 58 | Assumes the input to be a representation of a 3D image, that fits one of the 59 | following formats: 60 | - 1. all 0 values denoting background, all others the foreground/object 61 | - 2. all False values denoting the background, all others the foreground/object 62 | The first image passed is referred to as 'mask', the second as 'reference'. This 63 | is only important for some metrics that are not symmetric (and therefore not 64 | really metrics). 65 | @param mask binary mask as an scipy array (3D image) 66 | @param reference binary reference as an scipy array (3D image) 67 | @param physical_voxel_spacing The physical voxel spacing of the two images 68 | (must be the same for both) 69 | @param mask_offset offset of the mask array to 0,0,0-origin 70 | @param reference_offset offset of the reference array to 0,0,0-origin 71 | """ 72 | # compute edge images 73 | mask_edge_image = Surface.compute_contour(mask) 74 | reference_edge_image = Surface.compute_contour(reference) 75 | 76 | # collect the object edge voxel positions 77 | # !TODO: When the distance matrix is already calculated here 78 | # these points don't have to be actually stored, only their number. 79 | # But there might be some later metric implementation that requires the 80 | # points and then it would be good to have them. What is better? 81 | mask_pts = mask_edge_image.nonzero() 82 | mask_edge_points = zip(mask_pts[0], mask_pts[1], mask_pts[2]) 83 | reference_pts = reference_edge_image.nonzero() 84 | reference_edge_points = zip(reference_pts[0], reference_pts[1], reference_pts[2]) 85 | 86 | # check if there is actually an object present 87 | if 0 >= len(mask_edge_points): 88 | raise Exception('The mask image does not seem to contain an object.') 89 | if 0 >= len(reference_edge_points): 90 | raise Exception('The reference image does not seem to contain an object.') 91 | 92 | # add offsets to the voxels positions and multiply with physical voxel spacing 93 | # to get the real positions in millimeters 94 | physical_voxel_spacing = scipy.array(physical_voxel_spacing) 95 | mask_edge_points += scipy.array(mask_offset) 96 | mask_edge_points *= physical_voxel_spacing 97 | reference_edge_points += scipy.array(reference_offset) 98 | reference_edge_points *= physical_voxel_spacing 99 | 100 | # set member vars 101 | self.__mask_edge_points = mask_edge_points 102 | self.__reference_edge_points = reference_edge_points 103 | 104 | def get_maximum_symmetric_surface_distance(self): 105 | """ 106 | Computes the maximum symmetric surface distance, also known as Hausdorff 107 | distance, between the two objects surfaces. 108 | 109 | @return the maximum symmetric surface distance in millimeters 110 | 111 | For a perfect segmentation this distance is 0. This metric is sensitive to 112 | outliers and returns the true maximum error. 113 | 114 | Metric definition: 115 | Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortest 116 | distance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as: 117 | \f[ 118 | d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A|| 119 | \f] 120 | where \f$||.||\f$ denotes the Euclidean distance. The maximum symmetric 121 | surface distance is then given by: 122 | \f[ 123 | MSD(A,B) = \max 124 | \left\{ 125 | \max_{s_A\in S(A)} d(s_A,S(B)), 126 | \max_{s_B\in S(B)} d(s_B,S(A)), 127 | \right\} 128 | \f] 129 | """ 130 | # Get the maximum of the nearest neighbour distances 131 | A_B_distance = self.get_mask_reference_nn().max() 132 | B_A_distance = self.get_reference_mask_nn().max() 133 | 134 | # compute result and return 135 | return max(A_B_distance, B_A_distance) 136 | 137 | def get_root_mean_square_symmetric_surface_distance(self): 138 | """ 139 | Computes the root mean square symmetric surface distance between the 140 | two objects surfaces. 141 | 142 | @return root mean square symmetric surface distance in millimeters 143 | 144 | For a perfect segmentation this distance is 0. This metric punishes large 145 | deviations from the true contour stronger than the average symmetric surface 146 | distance. 147 | 148 | Metric definition: 149 | Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortest 150 | distance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as: 151 | \f[ 152 | d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A|| 153 | \f] 154 | where \f$||.||\f$ denotes the Euclidean distance. The root mean square 155 | symmetric surface distance is then given by: 156 | \f[ 157 | RMSD(A,B) = 158 | \sqrt{\frac{1}{|S(A)|+|S(B)|}} 159 | \times 160 | \sqrt{ 161 | \sum_{s_A\in S(A)} d^2(s_A,S(B)) 162 | + 163 | \sum_{s_B\in S(B)} d^2(s_B,S(A)) 164 | } 165 | \f] 166 | """ 167 | # get object sizes 168 | mask_surface_size = len(self.get_mask_edge_points()) 169 | reference_surface_sice = len(self.get_reference_edge_points()) 170 | 171 | # get minimal nearest neighbours distances 172 | A_B_distances = self.get_mask_reference_nn() 173 | B_A_distances = self.get_reference_mask_nn() 174 | 175 | # square the distances 176 | A_B_distances_sqrt = A_B_distances * A_B_distances 177 | B_A_distances_sqrt = B_A_distances * B_A_distances 178 | 179 | # sum the minimal distances 180 | A_B_distances_sum = A_B_distances_sqrt.sum() 181 | B_A_distances_sum = B_A_distances_sqrt.sum() 182 | 183 | # compute result and return 184 | return math.sqrt(1. / (mask_surface_size + reference_surface_sice)) * math.sqrt(A_B_distances_sum + B_A_distances_sum) 185 | 186 | def get_average_symmetric_surface_distance(self): 187 | """ 188 | Computes the average symmetric surface distance between the 189 | two objects surfaces. 190 | 191 | @return average symmetric surface distance in millimeters 192 | 193 | For a perfect segmentation this distance is 0. 194 | 195 | Metric definition: 196 | Let \f$S(A)\f$ denote the set of surface voxels of \f$A\f$. The shortest 197 | distance of an arbitrary voxel \f$v\f$ to \f$S(A)\f$ is defined as: 198 | \f[ 199 | d(v,S(A)) = \min_{s_A\in S(A)} ||v-s_A|| 200 | \f] 201 | where \f$||.||\f$ denotes the Euclidean distance. The average symmetric 202 | surface distance is then given by: 203 | \f[ 204 | ASD(A,B) = 205 | \frac{1}{|S(A)|+|S(B)|} 206 | \left( 207 | \sum_{s_A\in S(A)} d(s_A,S(B)) 208 | + 209 | \sum_{s_B\in S(B)} d(s_B,S(A)) 210 | \right) 211 | \f] 212 | """ 213 | # get object sizes 214 | mask_surface_size = len(self.get_mask_edge_points()) 215 | reference_surface_sice = len(self.get_reference_edge_points()) 216 | 217 | # get minimal nearest neighbours distances 218 | A_B_distances = self.get_mask_reference_nn() 219 | B_A_distances = self.get_reference_mask_nn() 220 | 221 | # sum the minimal distances 222 | A_B_distances = A_B_distances.sum() 223 | B_A_distances = B_A_distances.sum() 224 | 225 | # compute result and return 226 | return 1. / (mask_surface_size + reference_surface_sice) * (A_B_distances + B_A_distances) 227 | 228 | def get_mask_reference_nn(self): 229 | """ 230 | @return The distances of the nearest neighbours of all mask edge points to all 231 | reference edge points. 232 | """ 233 | # Note: see note for @see get_reference_mask_nn 234 | if None == self.__mask_reference_nn: 235 | tree = scipy.spatial.cKDTree(self.get_mask_edge_points()) 236 | self.__mask_reference_nn, _ = tree.query(self.get_reference_edge_points()) 237 | return self.__mask_reference_nn 238 | 239 | def get_reference_mask_nn(self): 240 | """ 241 | @return The distances of the nearest neighbours of all reference edge points 242 | to all mask edge points. 243 | 244 | The underlying algorithm used for the scipy.spatial.KDTree implementation is 245 | based on: 246 | Sunil Arya, David M. Mount, Nathan S. Netanyahu, Ruth Silverman, and 247 | Angela Y. Wu. 1998. An optimal algorithm for approximate nearest neighbor 248 | searching fixed dimensions. J. ACM 45, 6 (November 1998), 891-923 249 | """ 250 | # Note: KDTree is faster than scipy.spatial.distance.cdist when the number of 251 | # voxels exceeds 10.000 (computationally tested). The maximum complexity is 252 | # O(D*N^2) vs. O(D*N*log(N), where D=3 and N=number of voxels 253 | if None == self.__reference_mask_nn: 254 | tree = scipy.spatial.cKDTree(self.get_reference_edge_points()) 255 | self.__reference_mask_nn, _ = tree.query(self.get_mask_edge_points()) 256 | return self.__reference_mask_nn 257 | 258 | def get_mask_edge_points(self): 259 | """ 260 | @return The edge points of the mask object. 261 | """ 262 | return self.__mask_edge_points 263 | 264 | def get_reference_edge_points(self): 265 | """ 266 | @return The edge points of the reference object. 267 | """ 268 | return self.__reference_edge_points 269 | 270 | @staticmethod 271 | def compute_contour(array): 272 | """ 273 | Uses a 18-neighbourhood filter to create an edge image of the input object. 274 | Assumes the input to be a representation of a 3D image, that fits one of the 275 | following formats: 276 | - 1. all 0 values denoting background, all others the foreground/object 277 | - 2. all False values denoting the background, all others the foreground/object 278 | The area outside the array is assumed to contain background voxels. The method 279 | does not ensure that the object voxels are actually connected, this is silently 280 | assumed. 281 | 282 | @param array a numpy array with only 0/N\{0} or False/True values. 283 | @return a boolean numpy array with the input objects edges 284 | """ 285 | # set 18-neighbourhood/conectivity (for 3D images) alias face-and-edge kernel 286 | # all values covered by 1/True passed to the function 287 | # as a 1D array in order left-right, top-down 288 | # Note: all in all 19 ones, as the center value 289 | # also has to be checked (if it is a masked pixel) 290 | # [[[0, 1, 0], [[1, 1, 1], [[0, 1, 0], 291 | # [1, 1, 1], [1, 1, 1], [1, 1, 1], 292 | # [0, 1, 0]], [1, 1, 1]], [0, 1, 0]]] 293 | footprint = scipy.ndimage.morphology.generate_binary_structure(3, 2) 294 | 295 | # create an erode version of the array 296 | erode_array = scipy.ndimage.morphology.binary_erosion(array, footprint) 297 | 298 | 299 | # xor the erode_array with the original and return 300 | return array ^ erode_array --------------------------------------------------------------------------------