├── docs ├── coffee.png ├── simpleITKAtEMBC19Logo.png ├── simpleitkSetupAndSchedule.pptx ├── simpleitkFundamentalConcepts.pptx ├── simpleitkHistoricalOverview.pptx ├── simpleitkNotebookDevelopmentTesting.pptx └── index.html ├── figures ├── hkaAngle.png └── ImageOriginAndSpacing.png ├── tests ├── requirements_testing.txt ├── additional_dictionary.txt └── test_notebooks.py ├── environment.yml ├── output └── .gitignore ├── README.md ├── .circleci └── config.yml ├── data └── manifest.json ├── setup.ipynb ├── registration_gui.py ├── utilities.py ├── downloaddata.py ├── LICENSE ├── 07_segmentation_and_shape_analysis.ipynb ├── 08_segmentation_evaluation.ipynb ├── 01_spatial_transformations.ipynb ├── 05_advanced_registration.ipynb └── 06_registration_application.ipynb /docs/coffee.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/coffee.png -------------------------------------------------------------------------------- /figures/hkaAngle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/figures/hkaAngle.png -------------------------------------------------------------------------------- /docs/simpleITKAtEMBC19Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/simpleITKAtEMBC19Logo.png -------------------------------------------------------------------------------- /figures/ImageOriginAndSpacing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/figures/ImageOriginAndSpacing.png -------------------------------------------------------------------------------- /docs/simpleitkSetupAndSchedule.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/simpleitkSetupAndSchedule.pptx -------------------------------------------------------------------------------- /docs/simpleitkFundamentalConcepts.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/simpleitkFundamentalConcepts.pptx -------------------------------------------------------------------------------- /docs/simpleitkHistoricalOverview.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/simpleitkHistoricalOverview.pptx -------------------------------------------------------------------------------- /docs/simpleitkNotebookDevelopmentTesting.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/EMBC2019_WORKSHOP/master/docs/simpleitkNotebookDevelopmentTesting.pptx -------------------------------------------------------------------------------- /tests/requirements_testing.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | markdown 3 | lxml 4 | pyenchant 5 | jupyter 6 | matplotlib 7 | ipywidgets 8 | numpy 9 | scipy 10 | pandas 11 | ipympl 12 | SimpleITK>=1.2.0 13 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: sitkpyEMBC19 2 | 3 | channels: 4 | - simpleitk 5 | 6 | dependencies: 7 | - python=3.7 8 | - jupyter 9 | - matplotlib 10 | - ipywidgets 11 | - numpy 12 | - scipy 13 | - pandas 14 | - SimpleITK>=1.2.0 15 | 16 | -------------------------------------------------------------------------------- /output/.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | #Maintain an empty directory in the git repository, where all files in this 3 | #directory will always be ignored by git: 4 | #http://stackoverflow.com/questions/115983/how-can-i-add-an-empty-directory-to-a-git-repository 5 | # 6 | * 7 | # Except this file 8 | !.gitignore 9 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > :warning: This repository has been archived. For up to date information, please see the [SimpleITK tutorial](https://simpleitk.org/TUTORIAL/) or the [SimpleITK notebooks repository](https://github.com/InsightSoftwareConsortium/SimpleITK-Notebooks). 2 | 3 | # SimpleITK: IEEE EMBC 2019 Workshop 4 | 5 | [![CircleCI](https://circleci.com/gh/SimpleITK/EMBC2019_WORKSHOP/tree/master.svg?style=svg)](https://circleci.com/gh/SimpleITK/EMBC2019_WORKSHOP/tree/master) 6 | 7 | This repository contains all of the material presented at the 8 | IEEE Engineering in Medicine and Biology Conference (EMBC) 2019, and the course's website. 9 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | workflows: 4 | version: 2 5 | test: 6 | jobs: 7 | - test-3.7 8 | jobs: 9 | test-3.7: &test-template 10 | docker: 11 | - image: circleci/python:3.7-stretch 12 | environment: 13 | ExternalData_OBJECT_STORES: /home/circleci/.ExternalData 14 | SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT: 1 15 | steps: 16 | - checkout 17 | 18 | - restore_cache: 19 | keys: 20 | - simpleitk-embc2019-{{ checksum "data/manifest.json" }} 21 | - simpleitk-embc2019- #use previous cache when the manifest changes 22 | 23 | - run: 24 | name: Data setup (if cache is not empty then symbolic link to it) 25 | command: | 26 | mkdir -p "${ExternalData_OBJECT_STORES}" 27 | if [ ! -z "$(ls -A ${ExternalData_OBJECT_STORES})" ]; then 28 | cp -as /home/circleci/.ExternalData/* data 29 | fi 30 | python downloaddata.py "${ExternalData_OBJECT_STORES}" data/manifest.json 31 | 32 | - run: 33 | name: Setup of Python environment 34 | command: | 35 | sudo apt-get update; sudo apt-get install enchant 36 | sudo pip install virtualenv 37 | virtualenv ~/sitkpy --no-site-packages 38 | ~/sitkpy/bin/pip install -r tests/requirements_testing.txt 39 | ~/sitkpy/bin/jupyter nbextension enable --py --sys-prefix widgetsnbextension 40 | 41 | - run: 42 | name: Activate environment and run the test 43 | command: | 44 | source ~/sitkpy/bin/activate 45 | ~/sitkpy/bin/pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook 46 | 47 | - save_cache: 48 | key: simpleitk-embc2019-{{ checksum "data/manifest.json" }} 49 | paths: 50 | - /home/circleci/.ExternalData 51 | 52 | 53 | -------------------------------------------------------------------------------- /data/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "SimpleITK.jpg" : { 3 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 4 | }, 5 | "CIRS057A_MR_CT_DICOM/readme.txt" : { 6 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef", 7 | "archive" : "true" 8 | }, 9 | "training_001_ct.mha" : { 10 | "sha512": "1b950bc42fddfcefc76b9203d5dd6c45960c4fa8dcb69b839d3d083270d3d4c9a9d378de3d3f914e432dc18fb44c9b9770d4db5580a70265f3e24e6cdb83015d" 11 | }, 12 | "training_001_mr_T1.mha" : { 13 | "sha512": "3d15477962fef5851207964c381ffe77c586a6f70f2a373ecd3b6b4dc50d51dc6cd893eb1bedabcd382a96f0dafac893ae9e5a7c2b7333f9ff3f0c6b7016c7bc" 14 | }, 15 | "POPI/meta/00-P.mhd" : { 16 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc", 17 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 18 | "archive" : "true" 19 | }, 20 | "POPI/meta/70-P.mhd" : { 21 | "sha512": "87c256ff441429babceab5f9886397f7c4b4f85525dfb5a786ed64b97f4779d3b313b3faf1449dddb7ba5ed49719ff0eea296a3367cdc98e753f597028a6f0e0", 22 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/70-MetaImage.tar", 23 | "archive" : "true" 24 | }, 25 | "POPI/landmarks/00-Landmarks.pts" : { 26 | "sha512": "7c2120b1f6d4b855aa11bf05dd987d677c219ca4bdfbd39234e7835285c45082c229fb5cc09e00e6bd91b339eeb1f700c597f4166633421a133c21ce773b25ad", 27 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/00-Landmarks.pts" 28 | }, 29 | "POPI/landmarks/70-Landmarks.pts" : { 30 | "sha512": "5bbcb192a275b30510fb1badcd12c9110ed7909d4353c76567ebb0aae61fb944a9c4f3d8cd8ffa0519d8621626d06db333c456eda9c68c3a03991e291760f44c", 31 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/70-Landmarks.pts" 32 | }, 33 | "POPI/masks/00-air-body-lungs.mhd" : { 34 | "sha512": "e20e93b316390ea53c59427a8ab770bb3ebda1f2e4c911382b753ec024d812de8a6c13d1919b77a1687c4f611acdb62ea92c05b2cc3ed065046fbdbe139538c8", 35 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/00Mask-MetaImage.tar", 36 | "archive" : "true" 37 | }, 38 | "POPI/masks/70-air-body-lungs.mhd" : { 39 | "sha512": "cbbd4b71b9771b36dc71fe6c564c96fde363878713680a624b9b307c4d9959835731c841be6b9304457d212350cc0ffac44385994b9bc9b2d8523e2463c664f8", 40 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/70Mask-MetaImage.tar", 41 | "archive" : "true" 42 | }, 43 | "fib_sem_bacillus_subtilis.mha": { 44 | "sha512": "5f7c34428362434c4ff3353307f8401ea38a18a68e9fc1705138232b4c70da2fcf3e2e8560ba917620578093edb392b418702edca3be0eafa23d6f52ced73314" 45 | }, 46 | "leg_panorama/readme.txt": { 47 | "archive": "true", 48 | "sha512":"0771b63d7f8ed19d16ca36de144d6570cc3f8d604be56626ceb932f6bbf60857282f52aad4158f85e8a01bb1c84222da5b23fd3df91ec46ebe625341f56d6bf9" 49 | }, 50 | "liverTumorSegmentations/Patient01Homo.mha": { 51 | "sha512": "c57e6c51bdd9dd46034df3c01e3187d884526dbcfcf8e056221205bac1a09098142692a1bc76f3834a78a809570e64544dbec9b9d264747383ee71e20b21d054" 52 | }, 53 | "liverTumorSegmentations/Patient01Homo_Rad01.mha": { 54 | "sha512": "e94fb4d96e5cc5dca3c68fc67f63e895b8a71011f5343b4399e122b8f6a43ec5d5055f939299e3d9955e59cd841ebeb2d2395568c10ce29a597c518db784a337" 55 | }, 56 | "liverTumorSegmentations/Patient01Homo_Rad02.mha": { 57 | "sha512": "e055aff99a1c05ab90b84af048dd94d32236dcb4e4b8ce0a99ba3658fe85cc7c8505b806a92611bcf5ecf4cd0dbe6cafc336efdb9fe49753d1fc4aed174ed8ba" 58 | }, 59 | "liverTumorSegmentations/Patient01Homo_Rad03.mha": { 60 | "sha512": "89e4040e17aba2fae50e0b59b2043203ab33ce3ae6ef90af8ebc8c032a6aaee35084bf1e34ce1a390d157b8fadae2fa7694203a0886f54cc9da5293dbaa5d0e7" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

SimpleITK: A Tool for Biomedical Image Processing, from Cells to Anatomical Structures

\n", 8 | "\n", 9 | "## Newcomers to Jupyter notebooks:\n", 10 | "1. We use two types of cells, code and markdown.\n", 11 | "2. To run a code cell, select it (mouse or arrow key so that it is highlighted) and then press shift+enter which also moves focus to the next cell or ctrl+enter which doesn't.\n", 12 | "3. Closing the browser window does not close the Jupyter server. To close the server, go to the terminal where you ran it and press ctrl+c twice.\n", 13 | "\n", 14 | "For additional details see the [Jupyter Notebook Quick Start Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/index.html).\n", 15 | "\n", 16 | "\n", 17 | "## Environment Setup for Course\n", 18 | "\n", 19 | "This notebook should be run prior to arriving at the course venue, as it requires network connectivity." 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "First, lets check that you have the SimpleITK version which you expect." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "import SimpleITK as sitk\n", 36 | "from downloaddata import fetch_data, fetch_data_all\n", 37 | "\n", 38 | "from ipywidgets import interact\n", 39 | "\n", 40 | "print(sitk.Version())" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "Next, we check that the auxiliary program(s) are correctly installed in your environment.\n", 48 | "\n", 49 | "We expect that you have an external image viewer installed. The default viewer is Fiji. If you have another viewer (i.e. ITK-SNAP or 3D Slicer) you will need to set an environment variable to point to it. This is done using an environment variable which can also be set from within a notebook as shown below." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": { 56 | "simpleitk_error_allowed": "Exception thrown in SimpleITK ImageViewer_Execute:" 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "# Uncomment the line below to change the default external viewer to your viewer of choice and test that it works.\n", 61 | "#%env SITK_SHOW_COMMAND path_to_program/ITK-SNAP \n", 62 | "\n", 63 | "# Retrieve an image from the network, read it and display using the external viewer\n", 64 | "image_viewer = sitk.ImageViewer()\n", 65 | "image_viewer.Execute(sitk.ReadImage(fetch_data(\"SimpleITK.jpg\")))" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Now we check that the ipywidgets will display correctly. When you run the following cell you should see a slider.\n", 73 | "\n", 74 | "If you don't see a slider please shutdown the Jupyter server, at the Anaconda command line prompt press Control-c twice, and then run the following command:\n", 75 | "\n", 76 | "```jupyter nbextension enable --py --sys-prefix widgetsnbextension```" 77 | ] 78 | }, 79 | { 80 | "cell_type": "code", 81 | "execution_count": null, 82 | "metadata": {}, 83 | "outputs": [], 84 | "source": [ 85 | "interact(lambda x: x, x=(0,10));" 86 | ] 87 | }, 88 | { 89 | "cell_type": "markdown", 90 | "metadata": {}, 91 | "source": [ 92 | "Finally, we download all of the data used in the notebooks in advance. This step is necessary as we will be running the notebooks without network connectivity.\n", 93 | "\n", 94 | "This may take a couple of minutes depending on your network." 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": {}, 101 | "outputs": [], 102 | "source": [ 103 | "import os\n", 104 | "\n", 105 | "fetch_data_all('data', os.path.join('data','manifest.json'))" 106 | ] 107 | }, 108 | { 109 | "cell_type": "markdown", 110 | "metadata": {}, 111 | "source": [ 112 | "

Next »

" 113 | ] 114 | } 115 | ], 116 | "metadata": { 117 | "kernelspec": { 118 | "display_name": "Python 3", 119 | "language": "python", 120 | "name": "python3" 121 | }, 122 | "language_info": { 123 | "codemirror_mode": { 124 | "name": "ipython", 125 | "version": 3 126 | }, 127 | "file_extension": ".py", 128 | "mimetype": "text/x-python", 129 | "name": "python", 130 | "nbconvert_exporter": "python", 131 | "pygments_lexer": "ipython3", 132 | "version": "3.7.3" 133 | } 134 | }, 135 | "nbformat": 4, 136 | "nbformat_minor": 2 137 | } 138 | -------------------------------------------------------------------------------- /registration_gui.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | # 6 | # Set of methods used for displaying the registration metric during the optimization. 7 | # 8 | 9 | # Callback invoked when the StartEvent happens, sets up our new data. 10 | def start_plot(): 11 | global metric_values, multires_iterations, ax, fig 12 | fig, ax = plt.subplots(1,1, figsize=(8,4)) 13 | 14 | metric_values = [] 15 | multires_iterations = [] 16 | plt.show() 17 | 18 | 19 | # Callback invoked when the EndEvent happens, do cleanup of data and figure. 20 | def end_plot(): 21 | global metric_values, multires_iterations, ax, fig 22 | 23 | del metric_values 24 | del multires_iterations 25 | del ax 26 | del fig 27 | 28 | # Callback invoked when the IterationEvent happens, update our data and display new figure. 29 | def plot_values(registration_method): 30 | global metric_values, multires_iterations, ax, fig 31 | 32 | metric_values.append(registration_method.GetMetricValue()) 33 | # Plot the similarity metric values 34 | ax.plot(metric_values, 'r') 35 | ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*') 36 | ax.set_xlabel('Iteration Number',fontsize=12) 37 | ax.set_ylabel('Metric Value',fontsize=12) 38 | fig.canvas.draw() 39 | 40 | # Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the 41 | # metric_values list. 42 | def update_multires_iterations(): 43 | global metric_values, multires_iterations 44 | multires_iterations.append(len(metric_values)) 45 | 46 | 47 | def overlay_binary_segmentation_contours(image, mask, window_min, window_max): 48 | """ 49 | Given a 2D image and mask: 50 | a. resample the image and mask into isotropic grid (required for display). 51 | b. rescale the image intensities using the given window information. 52 | c. overlay the contours computed from the mask onto the image. 53 | """ 54 | # Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid, 55 | # required for display. 56 | original_spacing = image.GetSpacing() 57 | original_size = image.GetSize() 58 | min_spacing = min(original_spacing) 59 | new_spacing = [min_spacing, min_spacing] 60 | new_size = [int(round(original_size[0]*(original_spacing[0]/min_spacing))), 61 | int(round(original_size[1]*(original_spacing[1]/min_spacing)))] 62 | resampled_img = sitk.Resample(image, new_size, sitk.Transform(), 63 | sitk.sitkLinear, image.GetOrigin(), 64 | new_spacing, image.GetDirection(), 0.0, 65 | image.GetPixelID()) 66 | resampled_msk = sitk.Resample(mask, new_size, sitk.Transform(), 67 | sitk.sitkNearestNeighbor, mask.GetOrigin(), 68 | new_spacing, mask.GetDirection(), 0.0, 69 | mask.GetPixelID()) 70 | 71 | # Create the overlay: cast the mask to expected label pixel type, and do the same for the image after 72 | # window-level, accounting for the high dynamic range of the CT. 73 | return sitk.LabelMapContourOverlay(sitk.Cast(resampled_msk, sitk.sitkLabelUInt8), 74 | sitk.Cast(sitk.IntensityWindowing(resampled_img, 75 | windowMinimum=window_min, 76 | windowMaximum=window_max), 77 | sitk.sitkUInt8), 78 | opacity = 1, 79 | contourThickness=[2,2]) 80 | 81 | 82 | def display_coronal_with_overlay(temporal_slice, coronal_slice, images, masks, label, window_min, window_max): 83 | """ 84 | Display a coronal slice from the 4D (3D+time) CT with a contour overlaid onto it. The contour is the edge of 85 | the specific label. 86 | """ 87 | img = images[temporal_slice][:,coronal_slice,:] 88 | msk = masks[temporal_slice][:,coronal_slice,:]==label 89 | 90 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 91 | # Flip the image so that corresponds to correct radiological view. 92 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 93 | plt.axis('off') 94 | plt.show() 95 | 96 | 97 | def display_coronal_with_label_maps_overlay(coronal_slice, mask_index, image, masks, label, window_min, window_max): 98 | """ 99 | Display a coronal slice from a 3D CT with a contour overlaid onto it. The contour is the edge of 100 | the specific label from the specific mask. Function is used to display results of transforming a segmentation 101 | using registration. 102 | """ 103 | img = image[:,coronal_slice,:] 104 | msk = masks[mask_index][:,coronal_slice,:]==label 105 | 106 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 107 | # Flip the image so that corresponds to correct radiological view. 108 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 109 | plt.axis('off') 110 | plt.show() 111 | -------------------------------------------------------------------------------- /tests/additional_dictionary.txt: -------------------------------------------------------------------------------- 1 | ANTSNeighborhoodCorrelation 2 | API 3 | Acknowledgements 4 | AddTransform 5 | AffineTransform 6 | Args 7 | BFGS 8 | BMP 9 | BSpline 10 | BSplineTransform 11 | BSplineTransformInitializer 12 | BSplineTransformInitializerFilter 13 | Baum 14 | Biancardi 15 | BinaryMorphologicalClosing 16 | BinaryMorphologicalOpening 17 | BinaryThreshold 18 | Biomechanics 19 | Broyden 20 | Bérard 21 | CBCT 22 | CIRS 23 | CREATIS 24 | CTs 25 | CastImageFilter 26 | CenteredTransformInitializer 27 | CenteredTransformInitializerFilter 28 | Centre 29 | CheckerBoardImageFilter 30 | Clarysse 31 | Clin 32 | ComposeImageFilter 33 | ConfidenceConnected 34 | ConjugateGradientLineSearch 35 | ConnectedComponentImageFilter 36 | ConnectedThreshold 37 | DAPI 38 | DICOM 39 | DTransform 40 | Decubitus 41 | DemonsMetric 42 | DemonsRegistrationFilter 43 | Diff 44 | DiffeomorphicDemonsRegistrationFilter 45 | DisplacementField 46 | DisplacementFieldTransform 47 | Docstring 48 | Docstrings 49 | Doxygen 50 | EachIteration 51 | EndEvent 52 | ExhaustiveOptimizer 53 | ExpandImageFilter 54 | FFD 55 | FFDL 56 | FFDR 57 | FFF 58 | FFP 59 | FFS 60 | FLE 61 | FRE 62 | FREs 63 | FastMarchingImageFilter 64 | FastSymmetricForcesDemonsRegistrationFilter 65 | FilterName 66 | FixedParameters 67 | Flickr 68 | FlipImageFilter 69 | GIF 70 | GaborSource 71 | Geissbuehler 72 | GeodesicActiveContour 73 | GetArrayFromImage 74 | GetArrayViewFromImage 75 | GetCenter 76 | GetHeight 77 | GetImageFromArray 78 | GetInverse 79 | GetMetaData 80 | GetMetaDataKeys 81 | GetPixel 82 | Goldfarb 83 | GradientDescent 84 | GradientDescentLineSearch 85 | HDF 86 | HDF5ImageIO 87 | HFDL 88 | HFDR 89 | HFP 90 | HFS 91 | HKA 92 | HU 93 | HasMetaDataKey 94 | HausdorffDistanceImageFilter 95 | Hein 96 | Hounsfield 97 | ICCR 98 | ID's 99 | IPython 100 | ITK 101 | ITK's 102 | ITKv 103 | ImageFileReader 104 | ImageFileReader's 105 | ImageIO 106 | ImageIOs 107 | ImageJ 108 | ImageRegistrationMethod 109 | ImageSeriesReader 110 | IntensityWindowingImageFilter 111 | InterpolatorEnum 112 | Interspeech 113 | IterationEvent 114 | JPEG 115 | JPEGImageIO 116 | JPEGs 117 | Jaccard 118 | Jirapatnakul 119 | JoinSeries 120 | JointHistogram 121 | JointHistogramMutualInformation 122 | Joskowicz 123 | Jupyter 124 | Kamath 125 | LBFGS 126 | LSMImageIO 127 | LabelContourImageFilter 128 | LabelMapContourOverlayImageFilter 129 | LabelOverlayImageFilter 130 | LabelShapeStatisticsImageFilter 131 | LabelToRGBImageFilter 132 | LandmarkBasedTransformInitializer 133 | LaplacianSegmentation 134 | Lasser 135 | Lim 136 | Lingala 137 | Linte 138 | LoadPrivateTagsOn 139 | Lobb 140 | Léon 141 | MATLAB 142 | MATLAB's 143 | MacCallum 144 | MacOS 145 | Mahalanobis 146 | Malpica 147 | Marschner 148 | MattesMutualInformation 149 | Maurer 150 | MaximumEntropy 151 | MeanSquares 152 | MetaDataDictionaryArrayUpdateOn 153 | MetaImageIO 154 | MetricEvaluate 155 | Narayanan 156 | Nayak 157 | NeighborhoodConnected 158 | Nelder 159 | NiftiImageIO 160 | Orthop 161 | Otsu's 162 | PNG 163 | POPI 164 | PairedPointDataManipulation 165 | Photogrammetric 166 | PixelIDValueEnum 167 | Popa 168 | Proc 169 | Pythonic 170 | RGB 171 | RIRE 172 | RLE 173 | ROIs 174 | ReadImage 175 | ReadImageInformation 176 | ReadTransform 177 | RegularStepGradientDescent 178 | Relat 179 | ResampleImageFilter 180 | Rheumatol 181 | Rueda 182 | SEM 183 | SPIE 184 | Sarrut 185 | ScalarChanAndVese 186 | ScalarToRGBColormapImageFilter 187 | ScaleSkewVersor 188 | ScaleTransform 189 | ScaleVersor 190 | SetAngle 191 | SetCenter 192 | SetDirection 193 | SetFileName 194 | SetFixedInitialTransform 195 | SetInitialTransform 196 | SetInitialTransformAsBSpline 197 | SetInterpolator 198 | SetMean 199 | SetMetricAsDemons 200 | SetMetricAsX 201 | SetMovingInitialTransform 202 | SetOptimizerAsConjugateGradientLineSearch 203 | SetOptimizerAsX 204 | SetOptimizerScalesFromIndexShift 205 | SetOptimizerScalesFromJacobian 206 | SetOptimizerScalesFromPhysicalShift 207 | SetOrigin 208 | SetParameters 209 | SetPixel 210 | SetProbability 211 | SetScale 212 | SetShrinkFactorsPerLevel 213 | SetSmoothingSigmasPerLevel 214 | SetSpacing 215 | SetStandardDeviation 216 | SetStandardDeviation 217 | ShapeDetection 218 | SimpleITK 219 | SimpleITK's 220 | SimpleITKv 221 | SmoothingSigmasAreSpecifiedInPhysicalUnitsOn 222 | StartEvent 223 | StatisticsImageFilter 224 | Subsampling 225 | SymmetricForcesDemonsRegistrationFilter 226 | TIFFImageIO 227 | TRE 228 | TREs 229 | Thirion 230 | ThresholdSegmentation 231 | TileImageFilter 232 | Toger 233 | Toutios 234 | TransformContinuousIndexToPhysicalPoint 235 | TransformPoint 236 | TranslationTransform 237 | UInt 238 | VGG 239 | VTKImageIO 240 | Valgus 241 | Vandemeulebroucke 242 | Varus 243 | Vaz 244 | VectorConfidenceConnected 245 | VersorRigid 246 | VersorTransform 247 | VolView 248 | WriteImage 249 | XC 250 | XVth 251 | XX 252 | YCbCr 253 | YY 254 | Yaniv 255 | ZYX 256 | Zhu 257 | Zikri 258 | al 259 | app 260 | argmin 261 | atol 262 | aug 263 | ay 264 | az 265 | backgroundValue 266 | behaviour 267 | bio 268 | booktabs 269 | bspline 270 | ccc 271 | characterisation 272 | circ 273 | colour 274 | colourmap 275 | condylar 276 | const 277 | convergenceMinimumValue 278 | convergenceWindowSize 279 | cryosectioning 280 | cthead 281 | ctrl 282 | dataframe 283 | debugOn 284 | defaultPixelValue 285 | dev 286 | disp 287 | displaystyle 288 | documentclass 289 | doi 290 | dropdown 291 | dy 292 | eikonal 293 | endospore 294 | endospores 295 | env 296 | estimateLearningRate 297 | euler 298 | faux 299 | fdata 300 | fiducial's 301 | fiducials 302 | floordiv 303 | fp 304 | frac 305 | fronto 306 | geq 307 | ggplot 308 | greyscale 309 | gui 310 | hausdorff 311 | homography 312 | honours 313 | iff 314 | img 315 | init 316 | initialNeighborhoodRadius 317 | inline 318 | inlined 319 | interp 320 | interpolator 321 | interpolators 322 | ipywidgets 323 | iso 324 | jn 325 | jpg 326 | jupyter 327 | jupyterlab 328 | labelForUndecidedPixels 329 | labelled 330 | labelling 331 | labextension 332 | lapply 333 | ldots 334 | learningRate 335 | leq 336 | linspace 337 | mathbb 338 | mathbf 339 | matplotlib 340 | meshgrid 341 | meshlab 342 | metricvalue 343 | mha 344 | minima 345 | multiscale 346 | myshow 347 | nD 348 | nbagg 349 | nbextension 350 | nms 351 | np 352 | num 353 | numberOfIterations 354 | numberOfSteps 355 | numpy 356 | offline 357 | optimizerScales 358 | originalControlPointDisplacements 359 | originalDisplacements 360 | otsu 361 | outlier 362 | outputDirection 363 | outputOrigin 364 | outputPixelType 365 | outputSpacing 366 | outputfile 367 | overcomplete 368 | overfit 369 | overfitting 370 | param 371 | pixelated 372 | plafond 373 | pn 374 | png 375 | pre 376 | prefixi 377 | pretrained 378 | py 379 | pyplot 380 | qs 381 | recognised 382 | referenceImage 383 | resample 384 | resampled 385 | resamples 386 | resampling 387 | rgb 388 | roi 389 | sagittal 390 | scaleFactors 391 | scipy 392 | segBlobs 393 | segChannel 394 | shrinkFactors 395 | sitk 396 | sitkAnnulus 397 | sitkBSpline 398 | sitkBall 399 | sitkBlackmanWindowedSinc 400 | sitkBox 401 | sitkComplexFloat 402 | sitkCosineWindowedSinc 403 | sitkCross 404 | sitkFloat 405 | sitkGaussian 406 | sitkHammingWindowedSinc 407 | sitkInt 408 | sitkLabelUInt 409 | sitkLanczosWindowedSinc 410 | sitkLinear 411 | sitkMultiResolutionIterationEvent 412 | sitkNearestNeighbor 413 | sitkUInt 414 | sitkUnknown 415 | sitkVectorFloat 416 | sitkVectorInt 417 | sitkVectorUInt 418 | sitkWelchWindowedSinc 419 | smoothingSigmas 420 | spatio 421 | spc 422 | stepLength 423 | subsampling 424 | subtilis 425 | supersampling 426 | sz 427 | textrm 428 | tfm 429 | thetaX 430 | thetaY 431 | thetaZ 432 | ticklabels 433 | tidyr 434 | tif 435 | timeit 436 | toolbar 437 | tranforms 438 | transform's 439 | truediv 440 | ttest 441 | tx 442 | txt 443 | ty 444 | tz 445 | uint 446 | usepackage 447 | vdots 448 | versor 449 | vertices 450 | vm 451 | voxel 452 | voxel's 453 | voxels 454 | vx 455 | vy 456 | vz 457 | widgetsnbextension 458 | wikimedia 459 | xn 460 | xpixels 461 | xtable 462 | xx 463 | xxx 464 | xy 465 | xz 466 | yn 467 | ypixels 468 | yy 469 | yyy 470 | yz 471 | zz 472 | zzz 473 | -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | SimpleITK Course @ IEEE EMBC 2019 4 | 5 | 6 | 7 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 55 | 64 | 65 | 66 | 67 | 269 | 270 |
68 | 69 |

70 |

71 | 72 | 73 | 74 | 75 | 100 |
76 |

77 | Dear workshop participants, 78 |

79 |

80 | To gain the most benefit from the workshop 81 | you will need to run code on your personal laptops. Detailed instructions with 82 | respect to expected software installation and configuration are found below. 83 |

84 |

85 | You are expected to configure your computer and download all of the course material and 86 | data before arriving at the conference venue. For those of you arriving from outside of Germany, 87 | please don't forget to bring a power plug adapter. As there are limited electrical outlets in the room, 88 | please make sure to fully charge your laptop before the workshop. 89 |

90 |

91 | To contact us with problems or questions, please post to this repository's GitHub issue reporting system (requires a GitHub user account). 92 |

93 | 94 |

95 | We look forward to seeing you in Berlin.
96 |          Brad and Ziv 97 |

98 | 99 |
101 | 102 |

Overview

103 | 104 |

105 | SimpleITK is a simplified 106 | programming interface to the algorithms and data structures of the Insight Toolkit 108 | (ITK). It supports bindings for multiple programming languages including C++, 109 | Python, R, Java, C#, Lua, Ruby and TCL. These bindings enable scientists to 110 | develop image analysis workflows in the programming language they are most 111 | familiar with. The toolkit supports more than 15 different image file formats, 112 | provides over 280 image analysis filters, and implements a unified interface to 113 | the ITK intensity-based registration framework. The SimpleITK user base is 114 | rapidly growing, with more than 100,000 downloads of the Python bindings in the 115 | past year. Finally, by combining SimpleITK’s Python bindings with the Jupyter 116 | notebook web application one can create an environment which facilitates 117 | collaborative and reproducible development of biomedical image analysis 118 | workflows. 119 |

120 | 121 |

122 | In this workshop, we will use a hands-on approach utilizing Jupyter 123 | notebooks to explore and experiment with various SimpleITK features in the 124 | Python programming language. Participants will follow along using their personal 125 | laptops, enabling them to explore the effects of code changes and parameter 126 | settings not covered by the instructor. Examples using anatomical and microscopy 127 | images will highlight the various capabilities of the toolkit. 128 |

129 | 130 |

131 | The workshop starts by introducing the toolkit’s two basic data elements, Images 132 | and Transformations. Combining the two, we illustrate how to perform image 133 | resampling and how to use SimpleITK components for image preparation and data 134 | augmentation in the context of deep learning. We then explore the features 135 | available in the toolkit’s registration framework. Finally, we illustrate the 136 | use of a variety of SimpleITK filters to perform segmentation and for segmentation 137 | evaluation. 138 |

139 | 140 | 141 |

142 | Beyond the notebooks used in this course you can find the main SimpleITK notebooks repository on GitHub. 143 |

144 | 145 |

Organizers

146 |
    147 |
  • Bradley C. Lowekamp, Medical Science & Computing and National Institutes of Health.
  • 148 |
  • Ziv Yaniv, Medical Science & Computing and National Institutes of Health.
  • 149 |
150 | 151 |

Prerequisites

152 | 153 |

154 | If you encounter problems or have questions, please post using this repository's 155 | GitHub issue 156 | reporting system (requires a GitHub user account). 157 |

158 | 159 |

160 | In this course we will use the Anaconda Python distribution. Please follow the 161 | instructions below to setup the environment we will use during the course. All 162 | commands below are issued on the command line (Linux/Mac - terminal, 163 | Windows - Anaconda Prompt). 164 |

165 | 166 |
    167 |
  1. 168 | Download and install the Fiji image viewer. This is the default image viewer used by SimpleITK: 169 |
      170 |
    • 171 | On Windows: Install into your user directory (e.g. C:\Users\[your_user_name]\). 172 |
    • 173 |
    • 174 | On Linux: Install into ~/bin/ . 175 |
    • 176 |
    • 177 | On Mac: Install into /Applications/ . 178 |
    • 179 |
    180 | 181 |
  2. 182 | 183 |
  3. 184 | Download and install the most 185 | recent version of Anaconda for your operating system. We assume it is installed 186 | in a directory named anaconda3. Regardless of the installer, we will be working 187 | with Python 3.7 188 |
  4. 189 | 190 |
  5. 191 |
      192 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu).
    • 193 |
    • On Linux/Mac: on the command line source path_to_anaconda3/bin/activate base
    • 194 |
    195 |
  6. 196 | 197 |
  7. 198 | Update the base anaconda environment and install the git version control system into it. 199 |
    conda update conda
    200 | conda update anaconda
    201 | conda install git
    202 | 
    203 |
  8. 204 | 205 |
  9. 206 | Clone this repository: 207 |
    git clone https://github.com/SimpleITK/EMBC2019_WORKSHOP.git
    208 | 
    209 |
  10. 210 | 211 |
  11. 212 | Create the virtual environment containing all packages required for the course: 213 |
    conda env create -f EMBC2019_WORKSHOP/environment.yml
    214 | 
    215 |
  12. 216 | 217 |
  13. 218 | Activate the virtual environment: 219 |
      220 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu)
      conda activate sitkpyEMBC19
    • 221 |
    • On Linux/Mac: on the command line
      source path_to_anaconda3/bin/activate sitkpyEMBC19
    • 222 |
    223 |
  14. 224 | 225 |
  15. 226 | Go over the setup notebook (requires internet connectivity). This notebook checks the environment setup and downloads 227 | all of the required data. 228 |
    cd EMBC2019_WORKSHOP
    229 | jupyter notebook setup.ipynb
    230 |
  16. 231 | 232 |
233 | 234 | 235 | 236 |

Program

237 |

238 | Click the launch binder button to try it out without installing (some display functions will not work): 239 |

240 | 241 |
    242 |
  • [8:30AM- 9:00AM] History and overview [ppt: setup and schedule, history, overview]
  • 243 |
  • [9:00AM- 10:00AM] Fundamentals: spatial transformations, images and resampling, data augmentation for deep learning.
  • 244 |
  • [10:00AM - 10:30AM] Coffee break [coffee]
  • 245 |
  • [10:30AM - 11:30AM] Registration: basic, advanced, application.
  • 246 |
  • [11:30AM - 12:15PM] Segmentation: workflow, evaluation.
  • 247 |
  • [12:15PM - 12:30PM] Notebook development and testing [ppt: development], concluding remarks.
  • 248 |
249 | 250 |

Supplementary Material

251 | 252 |

253 | For those interested in reading more about SimpleITK (Python and beyond): 254 |

255 | 267 | 268 |
271 | 272 | 273 | -------------------------------------------------------------------------------- /utilities.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | popi_body_label = 0 5 | popi_air_label = 1 6 | popi_lung_label = 2 7 | 8 | def read_POPI_points(file_name): 9 | """ 10 | Read the Point-validated Pixel-based Breathing Thorax Model (POPI) landmark points file. 11 | The file is an ASCII file with X Y Z coordinates in each line and the first line is a header. 12 | 13 | Args: 14 | file_name: full path to the file. 15 | Returns: 16 | (list(tuple)): List of points as tuples. 17 | """ 18 | with open(file_name,'r') as fp: 19 | lines = fp.readlines() 20 | points = [] 21 | # First line in the file is #X Y Z which we ignore. 22 | for line in lines[1:]: 23 | coordinates = line.split() 24 | if coordinates: 25 | points.append((float(coordinates[0]), float(coordinates[1]), float(coordinates[2]))) 26 | return points 27 | 28 | 29 | def point2str(point, precision=1): 30 | """ 31 | Format a point for printing, based on specified precision with trailing zeros. Uniform printing for vector-like data 32 | (tuple, numpy array, list). 33 | 34 | Args: 35 | point (vector-like): nD point with floating point coordinates. 36 | precision (int): Number of digits after the decimal point. 37 | Return: 38 | String represntation of the given point "xx.xxx yy.yyy zz.zzz...". 39 | """ 40 | return ' '.join(format(c, '.{0}f'.format(precision)) for c in point) 41 | 42 | 43 | def uniform_random_points(bounds, num_points): 44 | """ 45 | Generate random (uniform withing bounds) nD point cloud. Dimension is based on the number of pairs in the bounds input. 46 | 47 | Args: 48 | bounds (list(tuple-like)): list where each tuple defines the coordinate bounds. 49 | num_points (int): number of points to generate. 50 | 51 | Returns: 52 | list containing num_points numpy arrays whose coordinates are within the given bounds. 53 | """ 54 | internal_bounds = [sorted(b) for b in bounds] 55 | # Generate rows for each of the coordinates according to the given bounds, stack into an array, 56 | # and split into a list of points. 57 | mat = np.vstack([np.random.uniform(b[0], b[1], num_points) for b in internal_bounds]) 58 | return list(mat[:len(bounds)].T) 59 | 60 | 61 | def target_registration_errors(tx, point_list, reference_point_list, 62 | display_errors = False, min_err= None, max_err=None, figure_size=(8,6)): 63 | """ 64 | Distances between points transformed by the given transformation and their 65 | location in another coordinate system. When the points are only used to 66 | evaluate registration accuracy (not used in the registration) this is the 67 | Target Registration Error (TRE). 68 | 69 | Args: 70 | tx (SimpleITK.Transform): The transform we want to evaluate. 71 | point_list (list(tuple-like)): Points in fixed image 72 | cooredinate system. 73 | reference_point_list (list(tuple-like)): Points in moving image 74 | cooredinate system. 75 | display_errors (boolean): Display a 3D figure with the points from 76 | point_list color corresponding to the error. 77 | min_err, max_err (float): color range is linearly stretched between min_err 78 | and max_err. If these values are not given then 79 | the range of errors computed from the data is used. 80 | figure_size (tuple): Figure size in inches. 81 | 82 | Returns: 83 | (errors) [float]: list of TRE values. 84 | """ 85 | transformed_point_list = [tx.TransformPoint(p) for p in point_list] 86 | 87 | errors = [np.linalg.norm(np.array(p_fixed) - np.array(p_moving)) 88 | for p_fixed,p_moving in zip(transformed_point_list, reference_point_list)] 89 | if display_errors: 90 | from mpl_toolkits.mplot3d import Axes3D 91 | import matplotlib.pyplot as plt 92 | import matplotlib 93 | fig = plt.figure(figsize=figure_size) 94 | ax = fig.add_subplot(111, projection='3d') 95 | if not min_err: 96 | min_err = np.min(errors) 97 | if not max_err: 98 | max_err = np.max(errors) 99 | 100 | collection = ax.scatter(list(np.array(point_list).T)[0], 101 | list(np.array(point_list).T)[1], 102 | list(np.array(point_list).T)[2], 103 | marker = 'o', 104 | c = errors, 105 | vmin = min_err, 106 | vmax = max_err, 107 | cmap = matplotlib.cm.hot, 108 | label = 'original points') 109 | plt.colorbar(collection, shrink=0.8) 110 | plt.title('registration errors in mm', x=0.7, y=1.05) 111 | ax.set_xlabel('X') 112 | ax.set_ylabel('Y') 113 | ax.set_zlabel('Z') 114 | plt.show() 115 | 116 | return errors 117 | 118 | 119 | 120 | def print_transformation_differences(tx1, tx2): 121 | """ 122 | Check whether two transformations are "equivalent" in an arbitrary spatial region 123 | either 3D or 2D, [x=(-10,10), y=(-100,100), z=(-1000,1000)]. This is just a sanity check, 124 | as we are just looking at the effect of the transformations on a random set of points in 125 | the region. 126 | """ 127 | if tx1.GetDimension()==2 and tx2.GetDimension()==2: 128 | bounds = [(-10,10),(-100,100)] 129 | elif tx1.GetDimension()==3 and tx2.GetDimension()==3: 130 | bounds = [(-10,10),(-100,100), (-1000,1000)] 131 | else: 132 | raise ValueError('Transformation dimensions mismatch, or unsupported transformation dimensionality') 133 | num_points = 10 134 | point_list = uniform_random_points(bounds, num_points) 135 | tx1_point_list = [ tx1.TransformPoint(p) for p in point_list] 136 | differences = target_registration_errors(tx2, point_list, tx1_point_list) 137 | print('Differences - min: {:.2f}, max: {:.2f}, mean: {:.2f}, std: {:.2f}'.format(np.min(differences), np.max(differences), np.mean(differences), np.std(differences))) 138 | 139 | 140 | def display_displacement_scaling_effect(s, original_x_mat, original_y_mat, tx, original_control_point_displacements): 141 | """ 142 | This function displays the effects of the deformable transformation on a grid of points by scaling the 143 | initial displacements (either of control points for BSpline or the deformation field itself). It does 144 | assume that all points are contained in the range(-2.5,-2.5), (2.5,2.5). 145 | """ 146 | if tx.GetDimension() !=2: 147 | raise ValueError('display_displacement_scaling_effect only works in 2D') 148 | 149 | plt.scatter(original_x_mat, 150 | original_y_mat, 151 | marker='o', 152 | color='blue', label='original points') 153 | pointsX = [] 154 | pointsY = [] 155 | tx.SetParameters(s*original_control_point_displacements) 156 | 157 | for index, value in np.ndenumerate(original_x_mat): 158 | px,py = tx.TransformPoint((value, original_y_mat[index])) 159 | pointsX.append(px) 160 | pointsY.append(py) 161 | 162 | plt.scatter(pointsX, 163 | pointsY, 164 | marker='^', 165 | color='red', label='transformed points') 166 | plt.legend(loc=(0.25,1.01)) 167 | plt.xlim((-2.5,2.5)) 168 | plt.ylim((-2.5,2.5)) 169 | 170 | 171 | def parameter_space_regular_grid_sampling(*transformation_parameters): 172 | ''' 173 | Create a list representing a regular sampling of the parameter space. 174 | Args: 175 | *transformation_paramters : two or more numpy ndarrays representing parameter values. The order 176 | of the arrays should match the ordering of the SimpleITK transformation 177 | parameterization (e.g. Similarity2DTransform: scaling, rotation, tx, ty) 178 | Return: 179 | List of lists representing the regular grid sampling. 180 | 181 | Examples: 182 | #parameterization for 2D translation transform (tx,ty): [[1.0,1.0], [1.5,1.0], [2.0,1.0]] 183 | >>>> parameter_space_regular_grid_sampling(np.linspace(1.0,2.0,3), np.linspace(1.0,1.0,1)) 184 | ''' 185 | return [[np.asscalar(p) for p in parameter_values] 186 | for parameter_values in np.nditer(np.meshgrid(*transformation_parameters))] 187 | 188 | 189 | def similarity3D_parameter_space_regular_sampling(thetaX, thetaY, thetaZ, tx, ty, tz, scale): 190 | ''' 191 | Create a list representing a regular sampling of the 3D similarity transformation parameter space. As the 192 | SimpleITK rotation parameterization uses the vector portion of a versor we don't have an 193 | intuitive way of specifying rotations. We therefor use the ZYX Euler angle parametrization and convert to 194 | versor. 195 | Args: 196 | thetaX, thetaY, thetaZ: numpy ndarrays with the Euler angle values to use. 197 | tx, ty, tz: numpy ndarrays with the translation values to use. 198 | scale: numpy array with the scale values to use. 199 | Return: 200 | List of lists representing the parameter space sampling (vx,vy,vz,tx,ty,tz,s). 201 | ''' 202 | return [list(eul2quat(parameter_values[0],parameter_values[1], parameter_values[2])) + 203 | [np.asscalar(p) for p in parameter_values[3:]] for parameter_values in np.nditer(np.meshgrid(thetaX, thetaY, thetaZ, tx, ty, tz, scale))] 204 | 205 | 206 | def eul2quat(ax, ay, az, atol=1e-8): 207 | ''' 208 | Translate between Euler angle (ZYX) order and quaternion representation of a rotation. 209 | Args: 210 | ax: X rotation angle in radians. 211 | ay: Y rotation angle in radians. 212 | az: Z rotation angle in radians. 213 | atol: tolerance used for stable quaternion computation (qs==0 within this tolerance). 214 | Return: 215 | Numpy array with three entries representing the vectorial component of the quaternion. 216 | 217 | ''' 218 | # Create rotation matrix using ZYX Euler angles and then compute quaternion using entries. 219 | cx = np.cos(ax) 220 | cy = np.cos(ay) 221 | cz = np.cos(az) 222 | sx = np.sin(ax) 223 | sy = np.sin(ay) 224 | sz = np.sin(az) 225 | r=np.zeros((3,3)) 226 | r[0,0] = cz*cy 227 | r[0,1] = cz*sy*sx - sz*cx 228 | r[0,2] = cz*sy*cx+sz*sx 229 | 230 | r[1,0] = sz*cy 231 | r[1,1] = sz*sy*sx + cz*cx 232 | r[1,2] = sz*sy*cx - cz*sx 233 | 234 | r[2,0] = -sy 235 | r[2,1] = cy*sx 236 | r[2,2] = cy*cx 237 | 238 | # Compute quaternion: 239 | qs = 0.5*np.sqrt(r[0,0] + r[1,1] + r[2,2] + 1) 240 | qv = np.zeros(3) 241 | # If the scalar component of the quaternion is close to zero, we 242 | # compute the vector part using a numerically stable approach 243 | if np.isclose(qs,0.0,atol): 244 | i= np.argmax([r[0,0], r[1,1], r[2,2]]) 245 | j = (i+1)%3 246 | k = (j+1)%3 247 | w = np.sqrt(r[i,i] - r[j,j] - r[k,k] + 1) 248 | qv[i] = 0.5*w 249 | qv[j] = (r[i,j] + r[j,i])/(2*w) 250 | qv[k] = (r[i,k] + r[k,i])/(2*w) 251 | else: 252 | denom = 4*qs 253 | qv[0] = (r[2,1] - r[1,2])/denom; 254 | qv[1] = (r[0,2] - r[2,0])/denom; 255 | qv[2] = (r[1,0] - r[0,1])/denom; 256 | return qv 257 | -------------------------------------------------------------------------------- /downloaddata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Since we do not want to store large binary data files in our Git repository, 5 | we fetch_data_all from a network resource. 6 | 7 | The data we download is described in a json file. The file format is a dictionary 8 | of dictionaries. The top level key is the file name. The returned dictionary 9 | contains a sha512 checksum and possibly a url and boolean flag indicating 10 | the file is part of an archive. The sha512 checksum is mandatory. 11 | When the optional url is given, we attempt to download from that url, otherwise 12 | we attempt to download from the list of servers returned by the 13 | get_servers() function. Files that are contained in archives are 14 | identified by the archive flag. 15 | 16 | Example json file contents: 17 | 18 | { 19 | "SimpleITK.jpg": { 20 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 21 | }, 22 | "POPI/meta/00-P.mhd": { 23 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 24 | "archive": "true", 25 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc" 26 | }, 27 | "CIRS057A_MR_CT_DICOM/readme.txt": { 28 | "archive": "true", 29 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef" 30 | } 31 | } 32 | 33 | Notes: 34 | 1. The file we download can be inside an archive. In this case, the sha512 35 | checksum is that of the archive. 36 | 37 | """ 38 | 39 | import hashlib 40 | import sys 41 | import os 42 | import json 43 | 44 | import errno 45 | import warnings 46 | 47 | # http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook 48 | 49 | def url_download_report(bytes_so_far, url_download_size, total_size): 50 | percent = float(bytes_so_far) / total_size 51 | percent = round(percent * 100, 2) 52 | if bytes_so_far > url_download_size: 53 | # Note that the carriage return is at the begining of the 54 | # string and not the end. This accomodates usage in 55 | # IPython usage notebooks. Otherwise the string is not 56 | # displayed in the output. 57 | sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)" % 58 | (bytes_so_far, total_size, percent)) 59 | sys.stdout.flush() 60 | if bytes_so_far >= total_size: 61 | sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)\n" % 62 | (bytes_so_far, total_size, percent)) 63 | sys.stdout.flush() 64 | 65 | 66 | def url_download_read(url, outputfile, url_download_size=8192 * 2, report_hook=None): 67 | # Use the urllib2 to download the data. The Requests package, highly 68 | # recommended for this task, doesn't support the file scheme so we opted 69 | # for urllib2 which does. 70 | 71 | try: 72 | # Python 3 73 | from urllib.request import urlopen, URLError, HTTPError 74 | except ImportError: 75 | from urllib2 import urlopen, URLError, HTTPError 76 | from xml.dom import minidom 77 | 78 | # Open the url 79 | try: 80 | url_response = urlopen(url) 81 | except HTTPError as e: 82 | return "HTTP Error: {0} {1}\n".format(e.code, url) 83 | except URLError as e: 84 | return "URL Error: {0} {1}\n".format(e.reason, url) 85 | 86 | # We download all content types - the assumption is that the sha512 ensures 87 | # that what we received is the expected data. 88 | try: 89 | # Python 3 90 | content_length = url_response.info().get("Content-Length") 91 | except AttributeError: 92 | content_length = url_response.info().getheader("Content-Length") 93 | total_size = content_length.strip() 94 | total_size = int(total_size) 95 | bytes_so_far = 0 96 | with open(outputfile, "wb") as local_file: 97 | while 1: 98 | try: 99 | url_download = url_response.read(url_download_size) 100 | bytes_so_far += len(url_download) 101 | if not url_download: 102 | break 103 | local_file.write(url_download) 104 | # handle errors 105 | except HTTPError as e: 106 | return "HTTP Error: {0} {1}\n".format(e.code, url) 107 | except URLError as e: 108 | return "URL Error: {0} {1}\n".format(e.reason, url) 109 | if report_hook: 110 | report_hook(bytes_so_far, url_download_size, total_size) 111 | return "Downloaded Successfully" 112 | 113 | # http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python?rq=1 114 | def mkdir_p(path): 115 | try: 116 | os.makedirs(path) 117 | except OSError as exc: # Python >2.5 118 | if exc.errno == errno.EEXIST and os.path.isdir(path): 119 | pass 120 | else: 121 | raise 122 | 123 | #http://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically 124 | def deprecated(func): 125 | """This is a decorator which can be used to mark functions 126 | as deprecated. It will result in a warning being emmitted 127 | when the function is used.""" 128 | 129 | def new_func(*args, **kwargs): 130 | warnings.simplefilter('always', DeprecationWarning) #turn off filter 131 | warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2) 132 | warnings.simplefilter('default', DeprecationWarning) #reset filter 133 | return func(*args, **kwargs) 134 | 135 | new_func.__name__ = func.__name__ 136 | new_func.__doc__ = func.__doc__ 137 | new_func.__dict__.update(func.__dict__) 138 | return new_func 139 | 140 | def get_servers(): 141 | import os 142 | servers = list() 143 | # NIAID S3 data store 144 | servers.append( "https://s3.amazonaws.com/simpleitk/public/notebooks/SHA512/%(hash)" ) 145 | # Girder server hosted by kitware 146 | servers.append("https://data.kitware.com/api/v1/file/hashsum/sha512/%(hash)/download") 147 | # Local file store 148 | if 'ExternalData_OBJECT_STORES' in os.environ.keys(): 149 | local_object_stores = os.environ['ExternalData_OBJECT_STORES'] 150 | for local_object_store in local_object_stores.split(";"): 151 | servers.append( "file://{0}/SHA512/%(hash)".format(local_object_store) ) 152 | return servers 153 | 154 | 155 | def output_hash_is_valid(known_sha512, output_file): 156 | sha512 = hashlib.sha512() 157 | if not os.path.exists(output_file): 158 | return False 159 | with open(output_file, 'rb') as fp: 160 | for url_download in iter(lambda: fp.read(128 * sha512.block_size), b''): 161 | sha512.update(url_download) 162 | retreived_sha512 = sha512.hexdigest() 163 | return retreived_sha512 == known_sha512 164 | 165 | 166 | def fetch_data_one(onefilename, output_directory, manifest_file, verify=True, force=False): 167 | import tarfile, zipfile 168 | 169 | with open(manifest_file, 'r') as fp: 170 | manifest = json.load(fp) 171 | assert onefilename in manifest, "ERROR: {0} does not exist in {1}".format(onefilename, manifest_file) 172 | 173 | sys.stdout.write("Fetching {0}\n".format(onefilename)) 174 | output_file = os.path.realpath(os.path.join(output_directory, onefilename)) 175 | data_dictionary = manifest[onefilename] 176 | sha512 = data_dictionary['sha512'] 177 | # List of places where the file can be downloaded from 178 | all_urls = [] 179 | for url_base in get_servers(): 180 | all_urls.append(url_base.replace("%(hash)", sha512)) 181 | if "url" in data_dictionary: 182 | all_urls.append(data_dictionary["url"]) 183 | 184 | new_download = False 185 | 186 | for url in all_urls: 187 | # Only download if force is true or the file does not exist. 188 | if force or not os.path.exists(output_file): 189 | mkdir_p(os.path.dirname(output_file)) 190 | url_download_read(url, output_file, report_hook=url_download_report) 191 | # Check if a file was downloaded and has the correct hash 192 | if output_hash_is_valid(sha512, output_file): 193 | new_download = True 194 | # Stop looking once found 195 | break 196 | # If the file exists this means the hash is invalid we have a problem. 197 | elif os.path.exists(output_file): 198 | error_msg = "File " + output_file 199 | error_msg += " has incorrect hash value, " + sha512 + " was expected." 200 | raise Exception(error_msg) 201 | 202 | # Did not find the file anywhere. 203 | if not os.path.exists(output_file): 204 | error_msg = "File " + "\'" + os.path.basename(output_file) +"\'" 205 | error_msg += " could not be found in any of the following locations:\n" 206 | error_msg += ", ".join(all_urls) 207 | raise Exception(error_msg) 208 | 209 | if not new_download and verify: 210 | # If the file was part of an archive then we don't verify it. These 211 | # files are only verfied on download 212 | if ( not "archive" in data_dictionary) and ( not output_hash_is_valid(sha512, output_file) ): 213 | # Attempt to download if sha512 is incorrect. 214 | fetch_data_one(onefilename, output_directory, manifest_file, verify, 215 | force=True) 216 | # If the file is in an archive, unpack it. 217 | if tarfile.is_tarfile(output_file) or zipfile.is_zipfile(output_file): 218 | tmp_output_file = output_file + ".tmp" 219 | os.rename(output_file, tmp_output_file) 220 | if tarfile.is_tarfile(tmp_output_file): 221 | archive = tarfile.open(tmp_output_file) 222 | if zipfile.is_zipfile(tmp_output_file): 223 | archive = zipfile.ZipFile(tmp_output_file, 'r') 224 | archive.extractall(os.path.dirname(tmp_output_file)) 225 | archive.close() 226 | os.remove(tmp_output_file) 227 | 228 | return output_file 229 | 230 | 231 | def fetch_data_all(output_directory, manifest_file, verify=True): 232 | with open(manifest_file, 'r') as fp: 233 | manifest = json.load(fp) 234 | for filename in manifest: 235 | fetch_data_one(filename, output_directory, manifest_file, verify, 236 | force=False) 237 | 238 | def fetch_data(cache_file_name, verify=False, cache_directory_name="data"): 239 | """ 240 | fetch_data is a simplified interface that requires 241 | relative pathing with a manifest.json file located in the 242 | same cache_directory_name name. 243 | 244 | By default the cache_directory_name is "Data" relative to the current 245 | python script. An absolute path can also be given. 246 | """ 247 | if not os.path.isabs(cache_directory_name): 248 | cache_root_directory_name = os.path.dirname(__file__) 249 | cache_directory_name = os.path.join(cache_root_directory_name, cache_directory_name) 250 | cache_manifest_file = os.path.join(cache_directory_name, 'manifest.json') 251 | assert os.path.exists(cache_manifest_file), "ERROR, {0} does not exist".format(cache_manifest_file) 252 | return fetch_data_one(cache_file_name, cache_directory_name, cache_manifest_file, verify=verify) 253 | 254 | 255 | if __name__ == '__main__': 256 | 257 | 258 | if len(sys.argv) < 3: 259 | print('Usage: ' + sys.argv[0] + ' output_directory manifest.json') 260 | sys.exit(1) 261 | output_directory = sys.argv[1] 262 | if not os.path.exists(output_directory): 263 | os.makedirs(output_directory) 264 | manifest = sys.argv[2] 265 | fetch_data_all(output_directory, manifest) 266 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/test_notebooks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | import nbformat 5 | import pytest 6 | import markdown 7 | import re 8 | 9 | from enchant.checker import SpellChecker 10 | from enchant.tokenize import Filter, EmailFilter, URLFilter 11 | from enchant import DictWithPWL 12 | 13 | from lxml.html import document_fromstring, etree 14 | try: 15 | # Python 3 16 | from urllib.request import urlopen, URLError 17 | except ImportError: 18 | from urllib2 import urlopen, URLError 19 | 20 | 21 | 22 | """ 23 | run all tests: 24 | pytest -v --tb=short 25 | 26 | run python tests: 27 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook 28 | 29 | run specific Python test: 30 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook[setup.ipynb] 31 | 32 | -s : disable all capturing of output. 33 | """ 34 | 35 | class Test_notebooks(object): 36 | """ 37 | Testing of SimpleITK Jupyter notebooks: 38 | 1. Static analysis: 39 | Check that notebooks do not contain output (sanity check as these should 40 | not have been pushed to the repository). 41 | Check that all the URLs in the markdown cells are not broken. 42 | 2. Dynamic analysis: 43 | Run the notebook and check for errors. In some notebooks we 44 | intentionally cause errors to illustrate certain features of the toolkit. 45 | All code cells that intentionally generate an error are expected to be 46 | marked using the cell's metadata. In the notebook go to 47 | "View->Cell Toolbar->Edit Metadata and add the following json entry: 48 | 49 | "simpleitk_error_expected": simpleitk_error_message 50 | 51 | with the appropriate "simpleitk_error_message" text. 52 | Cells where an error is allowed, but not necessarily expected should be 53 | marked with the following json: 54 | 55 | "simpleitk_error_allowed": simpleitk_error_message 56 | 57 | The simpleitk_error_message is a substring of the generated error 58 | message, such as 'Exception thrown in SimpleITK Show:' 59 | 60 | To test notebooks that use too much memory (exceed the 4Gb allocated for the testing 61 | machine): 62 | 1. Create an enviornment variable named SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT 63 | 2. Import the setup_for_testing.py at the top of the notebook. This module will 64 | decorate the sitk.ReadImage so that after reading the initial image it is 65 | resampled by a factor of 4 in each dimension. 66 | 67 | Adding a test: 68 | Simply add the new notebook file name to the list of files decorating the test_python_notebook 69 | or test_r_notebook functions. DON'T FORGET THE COMMA. 70 | """ 71 | 72 | _allowed_error_markup = 'simpleitk_error_allowed' 73 | _expected_error_markup = 'simpleitk_error_expected' 74 | 75 | @pytest.mark.parametrize('notebook_file_name', 76 | ['setup.ipynb', 77 | '01_spatial_transformations.ipynb', 78 | '02_images_and_resampling.ipynb', 79 | '03_data_augmentation.ipynb', 80 | '04_basic_registration.ipynb', 81 | '05_advanced_registration.ipynb', 82 | '06_registration_application.ipynb', 83 | pytest.param('07_segmentation_and_shape_analysis.ipynb', marks=pytest.mark.skipif(os.environ.get('CIRCLECI')=='true', \ 84 | reason="runtime too long for CircleCI")), 85 | '08_segmentation_evaluation.ipynb']) 86 | def test_python_notebook(self, notebook_file_name): 87 | self.evaluate_notebook(self.absolute_path_python(notebook_file_name), 'python') 88 | 89 | 90 | def evaluate_notebook(self, path, kernel_name): 91 | """ 92 | Perform static and dynamic analysis of the notebook. 93 | Execute a notebook via nbconvert and print the results of the test (errors etc.) 94 | Args: 95 | path (string): Name of notebook to run. 96 | kernel_name (string): Which jupyter kernel to use to run the test. 97 | Relevant values are:'python2', 'python3', 'ir'. 98 | """ 99 | 100 | dir_name, file_name = os.path.split(path) 101 | if dir_name: 102 | os.chdir(dir_name) 103 | 104 | print('-------- begin (kernel {0}) {1} --------'.format(kernel_name,file_name)) 105 | no_static_errors = self.static_analysis(path) 106 | no_dynamic_errors = self.dynamic_analysis(path, kernel_name) 107 | print('-------- end (kernel {0}) {1} --------'.format(kernel_name,file_name)) 108 | assert(no_static_errors and no_dynamic_errors) 109 | 110 | 111 | def static_analysis(self, path): 112 | """ 113 | Perform static analysis of the notebook. 114 | Read the notebook and check that there is no ouput and that the links 115 | in the markdown cells are not broken. 116 | Args: 117 | path (string): Name of notebook. 118 | Return: 119 | boolean: True if static analysis succeeded, otherwise False. 120 | """ 121 | 122 | nb = nbformat.read(path, nbformat.current_nbformat) 123 | 124 | ####################### 125 | # Check that the notebook does not contain output from code cells 126 | # (should not be in the repository, but well...). 127 | ####################### 128 | no_unexpected_output = True 129 | 130 | # Check that the cell dictionary has an 'outputs' key and that it is 131 | # empty, relies on Python using short circuit evaluation so that we 132 | # don't get KeyError when retrieving the 'outputs' entry. 133 | cells_with_output = [c.source for c in nb.cells if 'outputs' in c and c.outputs] 134 | if cells_with_output: 135 | no_unexpected_output = False 136 | print('Cells with unexpected output:\n_____________________________') 137 | for cell in cells_with_output: 138 | print(cell+'\n---') 139 | else: 140 | print('no unexpected output') 141 | 142 | ####################### 143 | # Check that all the links in the markdown cells are valid/accessible. 144 | ####################### 145 | no_broken_links = True 146 | 147 | cells_and_broken_links = [] 148 | for c in nb.cells: 149 | if c.cell_type == 'markdown': 150 | html_tree = document_fromstring(markdown.markdown(c.source)) 151 | broken_links = [] 152 | #iterlinks() returns tuples of the form (element, attribute, link, pos) 153 | for document_link in html_tree.iterlinks(): 154 | try: 155 | if 'http' not in document_link[2]: # Local file. 156 | url = 'file://' + os.path.abspath(document_link[2]) 157 | else: # Remote file. 158 | url = document_link[2] 159 | urlopen(url) 160 | except URLError: 161 | broken_links.append(url) 162 | if broken_links: 163 | cells_and_broken_links.append((broken_links,c.source)) 164 | if cells_and_broken_links: 165 | no_broken_links = False 166 | print('Cells with broken links:\n________________________') 167 | for links, cell in cells_and_broken_links: 168 | print(cell+'\n') 169 | print('\tBroken links:') 170 | print('\t'+'\n\t'.join(links)+'\n---') 171 | else: 172 | print('no broken links') 173 | 174 | ####################### 175 | # Spell check all markdown cells and comments in code cells using the pyenchant spell checker. 176 | ####################### 177 | no_spelling_mistakes = True 178 | simpleitk_notebooks_dictionary = DictWithPWL('en_US', os.path.join(os.path.dirname(os.path.abspath(__file__)), 179 | 'additional_dictionary.txt')) 180 | spell_checker = SpellChecker(simpleitk_notebooks_dictionary, filters = [EmailFilter, URLFilter]) 181 | cells_and_spelling_mistakes = [] 182 | for c in nb.cells: 183 | spelling_mistakes = [] 184 | if c.cell_type == 'markdown': 185 | # Get the text as a string from the html without the markup which is replaced by space. 186 | spell_checker.set_text(' '.join(etree.XPath('//text()')(document_fromstring(markdown.markdown(c.source))))) 187 | elif c.cell_type == 'code': 188 | # Get all the comments and concatenate them into a single string separated by newlines. 189 | comment_lines = re.findall('#+.*',c.source) 190 | spell_checker.set_text('\n'.join(comment_lines)) 191 | for error in spell_checker: 192 | error_message = 'error: '+ '\'' + error.word +'\', ' + 'suggestions: ' + str(spell_checker.suggest()) 193 | spelling_mistakes.append(error_message) 194 | if spelling_mistakes: 195 | cells_and_spelling_mistakes.append((spelling_mistakes, c.source)) 196 | if cells_and_spelling_mistakes: 197 | no_spelling_mistakes = False 198 | print('Cells with spelling mistakes:\n________________________') 199 | for misspelled_words, cell in cells_and_spelling_mistakes: 200 | print(cell+'\n') 201 | print('\tMisspelled words and suggestions:') 202 | print('\t'+'\n\t'.join(misspelled_words)+'\n---') 203 | else: 204 | print('no spelling mistakes') 205 | 206 | return(no_unexpected_output and no_broken_links and no_spelling_mistakes) 207 | 208 | 209 | def dynamic_analysis(self, path, kernel_name): 210 | """ 211 | Perform dynamic analysis of the notebook. 212 | Execute a notebook via nbconvert and print the results of the test 213 | (errors etc.) 214 | Args: 215 | path (string): Name of notebook to run. 216 | kernel_name (string): Which jupyter kernel to use to run the test. 217 | Relevant values are:'python', 'ir'. 218 | Return: 219 | boolean: True if dynamic analysis succeeded, otherwise False. 220 | """ 221 | 222 | # Execute the notebook and allow errors (run all cells), output is 223 | # written to a temporary file which is automatically deleted. 224 | with tempfile.NamedTemporaryFile(suffix='.ipynb') as fout: 225 | args = ['jupyter', 'nbconvert', 226 | '--to', 'notebook', 227 | '--execute', 228 | '--ExecutePreprocessor.kernel_name='+kernel_name, 229 | '--ExecutePreprocessor.allow_errors=True', 230 | '--ExecutePreprocessor.timeout=600', # seconds till timeout 231 | '--output', fout.name, path] 232 | subprocess.check_call(args) 233 | nb = nbformat.read(fout.name, nbformat.current_nbformat) 234 | 235 | # Get all of the unexpected errors (logic: cell has output with an error 236 | # and no error is expected or the allowed/expected error is not the one which 237 | # was generated.) 238 | unexpected_errors = [(output.evalue, c.source) for c in nb.cells \ 239 | if 'outputs' in c for output in c.outputs \ 240 | if (output.output_type=='error') and \ 241 | (((Test_notebooks._allowed_error_markup not in c.metadata) and (Test_notebooks._expected_error_markup not in c.metadata))or \ 242 | ((Test_notebooks._allowed_error_markup in c.metadata) and (c.metadata[Test_notebooks._allowed_error_markup] not in output.evalue)) or \ 243 | ((Test_notebooks._expected_error_markup in c.metadata) and (c.metadata[Test_notebooks._expected_error_markup] not in output.evalue)))] 244 | 245 | no_unexpected_errors = True 246 | if unexpected_errors: 247 | no_unexpected_errors = False 248 | print('Cells with unexpected errors:\n_____________________________') 249 | for e, src in unexpected_errors: 250 | print(src) 251 | print('unexpected error: '+e) 252 | else: 253 | print('no unexpected errors') 254 | 255 | # Get all of the missing expected errors (logic: cell has output 256 | # but expected error was not generated.) 257 | missing_expected_errors = [] 258 | for c in nb.cells: 259 | if Test_notebooks._expected_error_markup in c.metadata: 260 | missing_error = True 261 | if 'outputs' in c: 262 | for output in c.outputs: 263 | if (output.output_type=='error') and (c.metadata[Test_notebooks._expected_error_markup] in output.evalue): 264 | missing_error = False 265 | if missing_error: 266 | missing_expected_errors.append((c.metadata[Test_notebooks._expected_error_markup],c.source)) 267 | 268 | no_missing_expected_errors = True 269 | if missing_expected_errors: 270 | no_missing_expected_errors = False 271 | print('\nCells with missing expected errors:\n___________________________________') 272 | for e, src in missing_expected_errors: 273 | print(src) 274 | print('missing expected error: '+e) 275 | else: 276 | print('no missing expected errors') 277 | 278 | return(no_unexpected_errors and no_missing_expected_errors) 279 | 280 | 281 | def absolute_path_python(self, notebook_file_name): 282 | return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', notebook_file_name)) 283 | -------------------------------------------------------------------------------- /07_segmentation_and_shape_analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Focused Ion Beam Scanning Electron Microscopy Image Segmentation

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK supports a large number of filters that facilitate classical segmentation algorithms (variety of thresholding algorithms, watersheds...).\n", 12 | "2. Once your data is segmented SimpleITK enables you to efficiently post process the segmentation (e.g. label distinct objects, analyze object shapes).\n", 13 | "\n", 14 | "This notebook will illustrate the use of SimpleITK for segmentation of bacteria from a 3D Focused Ion Beam Scanning Electron Microscopy (FIB-SEM) image. The specific bacterium is bacillus subtilis, a rod shaped organism naturally found in soil and plants. The bacteria have been subjected to stress to initiate the process of forming an endospore. These endospores can be seen as a generally dark ellipsoid inside the individual bacterium." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import SimpleITK as sitk\n", 24 | "import pandas as pd\n", 25 | "\n", 26 | "%matplotlib notebook\n", 27 | "\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "import gui\n", 30 | "from math import ceil\n", 31 | "from downloaddata import fetch_data as fdata" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# Load data\n", 39 | "\n", 40 | "Load the 3D volume and display it." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "img = sitk.ReadImage(fdata(\"fib_sem_bacillus_subtilis.mha\"))\n", 50 | "gui.MultiImageDisplay(image_list = [img], figure_size=(8,4));" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "# Segmentation\n", 58 | "\n", 59 | "To allow us to analyze the shape of whole bacteria we first need to segment them. We will do this in several steps:\n", 60 | "1. Separate the bacteria from the embedding resin background.\n", 61 | "2. Mark each potential bacterium with a unique label, to evaluate the segmentation.\n", 62 | "3. Remove small components and fill small holes using binary morphology operators (opening and closing).\n", 63 | "4. Use seed based watersheds to perform final segmentation.\n", 64 | "5. Remove bacterium that are connected to the image boundary." 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "## Separate the bacteria from the background\n", 72 | "\n", 73 | "Based on the visualization of the data above, it intuitively appears that the background and foreground are separable using a single intensity threshold. Our first step towards validating this observation is to plot the intensity distribution." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "plt.figure()\n", 83 | "plt.hist(sitk.GetArrayViewFromImage(img).flatten(), bins=100)\n", 84 | "plt.show()" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "The histogram is bi-modal with a clear separation, which we have manually identified as having an intensity value of 120.\n", 92 | "\n", 93 | "We can also use one of several binary threshold selection filters available in SimpleITK. " 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "threshold_filters = {'Otsu': sitk.OtsuThresholdImageFilter(),\n", 103 | " 'Triangle' : sitk.TriangleThresholdImageFilter(),\n", 104 | " 'Huang' : sitk.HuangThresholdImageFilter(),\n", 105 | " 'MaxEntropy' : sitk.MaximumEntropyThresholdImageFilter()}\n", 106 | "\n", 107 | "filter_selection = 'Manual'\n", 108 | "try:\n", 109 | " thresh_filter = threshold_filters[filter_selection]\n", 110 | " thresh_filter.SetInsideValue(0)\n", 111 | " thresh_filter.SetOutsideValue(1)\n", 112 | " thresh_img = thresh_filter.Execute(img)\n", 113 | " thresh_value = thresh_filter.GetThreshold()\n", 114 | "except KeyError:\n", 115 | " thresh_value = 120\n", 116 | " thresh_img = img>thresh_value\n", 117 | "\n", 118 | "print(\"Threshold used: \" + str(thresh_value)) \n", 119 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, thresh_img)], \n", 120 | " title_list = ['Binary Segmentation'], figure_size=(8,4));" 121 | ] 122 | }, 123 | { 124 | "cell_type": "markdown", 125 | "metadata": {}, 126 | "source": [ 127 | "# Mark each potential bacterium with unique label and evaluate" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 137 | "stats.Execute(sitk.ConnectedComponent(thresh_img))\n", 138 | "\n", 139 | "# Look at the distribution of sizes of connected components (bacteria).\n", 140 | "label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 141 | "\n", 142 | "plt.figure()\n", 143 | "plt.hist(label_sizes,bins=200)\n", 144 | "plt.title(\"Distribution of Object Sizes\")\n", 145 | "plt.xlabel(\"size in pixels\")\n", 146 | "plt.ylabel(\"number of objects\")\n", 147 | "plt.show()" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "The histogram above shows tens of thousands of very small labels which are not visually detected by looking at the segmentation." 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "## Remove small islands and holes\n", 162 | "\n", 163 | "Using binary morphological operations we remove small objects using the opening operation and fill small holes using the closing operation. The use of opening and closing by reconstruction maintains the boundary of the original objects." 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "cleaned_thresh_img = sitk.BinaryOpeningByReconstruction(thresh_img, [10, 10, 10])\n", 173 | "cleaned_thresh_img = sitk.BinaryClosingByReconstruction(cleaned_thresh_img, [10, 10, 10])\n", 174 | "\n", 175 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, cleaned_thresh_img)], \n", 176 | " title_list = ['Cleaned Binary Segmentation'], figure_size=(8,4));" 177 | ] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "metadata": {}, 182 | "source": [ 183 | "Check that the number of objects defined by the binary image is more reasonable." 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": null, 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [ 192 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 193 | "stats.Execute(sitk.ConnectedComponent(cleaned_thresh_img))\n", 194 | "\n", 195 | "# Look at the distribution of sizes of connected components (bacteria).\n", 196 | "label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 197 | "\n", 198 | "plt.figure()\n", 199 | "plt.hist(label_sizes,bins=200)\n", 200 | "plt.title(\"Distribution of Object Sizes\")\n", 201 | "plt.xlabel(\"size in pixels\")\n", 202 | "plt.ylabel(\"number of objects\")\n", 203 | "plt.show()" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "After the morphological operations, our binary image seems to have a reasonable number of objects, but is this true? We next look at the unique objects defined by this binary segmentation (each object is marked with a unique color)." 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [ 219 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, sitk.ConnectedComponent(cleaned_thresh_img))], \n", 220 | " title_list = ['Cleaned Binary Segmentation'],figure_size=(8,4));" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": {}, 226 | "source": [ 227 | "## Seed based watershed segmentation\n", 228 | "\n", 229 | "The bacteria appear to be segmented correctly from the background but not from each other. Using the visualization and histogram above we see that in 3D many of them are connected, even if on a slice by slice inspection they appear separate. " 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "dist_img = sitk.SignedMaurerDistanceMap(cleaned_thresh_img != 0, insideIsPositive=False, squaredDistance=False, useImageSpacing=False)\n", 239 | "radius = 10\n", 240 | "# Seeds have a distance of \"radius\" or more to the object boundary, they are uniquely labelled.\n", 241 | "seeds = sitk.ConnectedComponent(dist_img < -radius)\n", 242 | "# Relabel the seed objects using consecutive object labels while removing all objects with less than 15 pixels.\n", 243 | "seeds = sitk.RelabelComponent(seeds, minimumObjectSize=15)\n", 244 | "# Run the watershed segmentation using the distance map and seeds.\n", 245 | "ws = sitk.MorphologicalWatershedFromMarkers(dist_img, seeds, markWatershedLine=True)\n", 246 | "ws = sitk.Mask( ws, sitk.Cast(cleaned_thresh_img, ws.GetPixelID()))" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "Visualize the distance map, the unique seeds and final object segmentation." 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": null, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "gui.MultiImageDisplay(image_list = [dist_img,\n", 263 | " sitk.LabelOverlay(img, seeds),\n", 264 | " sitk.LabelOverlay(img, ws)], \n", 265 | " title_list = ['Segmentation Distance',\n", 266 | " 'Watershed Seeds',\n", 267 | " 'Binary Watershed Labeling'],\n", 268 | " shared_slider=True,\n", 269 | " horizontal=False,\n", 270 | " figure_size=(6,12));" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "## Removal of objects touching the image boundary\n", 278 | "\n", 279 | "We are not sure objects touching the image boundary are whole bacteria, so we remove them." 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": null, 285 | "metadata": {}, 286 | "outputs": [], 287 | "source": [ 288 | "# The image has a small black border which we account for here.\n", 289 | "bgp = sitk.BinaryGrindPeak( (ws!=0)| (img==0))\n", 290 | "non_border_seg = sitk.Mask( ws, bgp==0)\n", 291 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, non_border_seg)], \n", 292 | " title_list = ['Final Segmentation'],figure_size=(8,4));" 293 | ] 294 | }, 295 | { 296 | "cell_type": "markdown", 297 | "metadata": {}, 298 | "source": [ 299 | "# Object Analysis\n", 300 | "\n", 301 | "Once we have the segmented objects we look at their shapes and the intensity distributions inside the objects.\n", 302 | "\n", 303 | "Note that sizes are in nanometers. ITK and consequently SimpleITK are agnostic of the actual measurement units. It is up to you as the developer to explicitly use the correct units and more importantly, DO NOT MIX UNITS.\n", 304 | "\n", 305 | "We first compute all of the measurements we are interested in." 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [ 314 | "shape_stats = sitk.LabelShapeStatisticsImageFilter()\n", 315 | "shape_stats.ComputeOrientedBoundingBoxOn()\n", 316 | "shape_stats.Execute(non_border_seg)\n", 317 | "\n", 318 | "intensity_stats = sitk.LabelIntensityStatisticsImageFilter()\n", 319 | "intensity_stats.Execute(non_border_seg,img) " 320 | ] 321 | }, 322 | { 323 | "cell_type": "markdown", 324 | "metadata": {}, 325 | "source": [ 326 | "Insert the values into a pandas dataframe and display some descriptive statistics." 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": {}, 333 | "outputs": [], 334 | "source": [ 335 | "stats_list = [ (shape_stats.GetPhysicalSize(i),\n", 336 | " shape_stats.GetElongation(i),\n", 337 | " shape_stats.GetFlatness(i),\n", 338 | " shape_stats.GetOrientedBoundingBoxSize(i)[0],\n", 339 | " shape_stats.GetOrientedBoundingBoxSize(i)[2],\n", 340 | " intensity_stats.GetMean(i),\n", 341 | " intensity_stats.GetStandardDeviation(i),\n", 342 | " intensity_stats.GetSkewness(i)) for i in shape_stats.GetLabels()]\n", 343 | "cols=[\"Volume (nm^3)\",\n", 344 | " \"Elongation\",\n", 345 | " \"Flatness\",\n", 346 | " \"Oriented Bounding Box Minimum Size(nm)\",\n", 347 | " \"Oriented Bounding Box Maximum Size(nm)\",\n", 348 | " \"Intensity Mean\",\n", 349 | " \"Intensity Standard Deviation\",\n", 350 | " \"Intensity Skewness\"]\n", 351 | "\n", 352 | "# Create the pandas data frame and display descriptive statistics.\n", 353 | "stats = pd.DataFrame(data=stats_list, index=shape_stats.GetLabels(), columns=cols)\n", 354 | "stats.describe()" 355 | ] 356 | }, 357 | { 358 | "cell_type": "markdown", 359 | "metadata": {}, 360 | "source": [ 361 | "Create a plot to investigate the relationship, possible correlations, between volume and object shape characteristics (elongation, flatness, principal moments). " 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": null, 367 | "metadata": {}, 368 | "outputs": [], 369 | "source": [ 370 | "fig, axes = plt.subplots(nrows=len(cols), ncols=2, figsize=(6,4*len(cols)))\n", 371 | "axes[0,0].axis('off')\n", 372 | "\n", 373 | "stats.loc[:,cols[0]].plot.hist(ax=axes[0,1], bins=25)\n", 374 | "axes[0,1].set_xlabel(cols[0])\n", 375 | "axes[0,1].xaxis.set_label_position(\"top\")\n", 376 | "\n", 377 | "for i in range(1,len(cols)):\n", 378 | " c = cols[i]\n", 379 | " bar = stats.loc[:,[c]].plot.hist(ax=axes[i,0], bins=20,orientation='horizontal',legend=False)\n", 380 | " bar.set_ylabel(stats.loc[:,[c]].columns.values[0]) \n", 381 | " scatter = stats.plot.scatter(ax=axes[i,1],y=c,x=cols[0])\n", 382 | " scatter.set_ylabel('')\n", 383 | " # Remove axis labels from all plots except the last (they all share the labels)\n", 384 | " if(i

Next »

" 440 | ] 441 | } 442 | ], 443 | "metadata": { 444 | "kernelspec": { 445 | "display_name": "Python 3", 446 | "language": "python", 447 | "name": "python3" 448 | }, 449 | "language_info": { 450 | "codemirror_mode": { 451 | "name": "ipython", 452 | "version": 3 453 | }, 454 | "file_extension": ".py", 455 | "mimetype": "text/x-python", 456 | "name": "python", 457 | "nbconvert_exporter": "python", 458 | "pygments_lexer": "ipython3", 459 | "version": "3.6.8" 460 | } 461 | }, 462 | "nbformat": 4, 463 | "nbformat_minor": 2 464 | } 465 | -------------------------------------------------------------------------------- /08_segmentation_evaluation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Segmentation Evaluation

\n", 8 | "\n", 9 | "**Summary:**\n", 10 | "\n", 11 | "1. SimpleITK supports two ways of combining expert segmentations to obtain a reference segmentation.\n", 12 | "2. A variety of criteria used for evaluating a segmentation result are readily available or implemented in SimpleITK.\n", 13 | "\n", 14 | "Reference Segmentation\n", 15 | "\n", 16 | "Evaluating segmentation algorithms is most often done using reference data to which you compare your results. In the medical domain reference data is commonly obtained via manual segmentation by an expert (don't forget to thank your clinical colleagues for their hard work). When you are resource limited, the reference data may be defined by a single expert. This is less than ideal. When multiple experts provide you with their input then you can potentially combine them to obtain reference data that is closer to the ever elusive \"ground truth\". In this notebook we show two approaches to combining input from multiple observers, majority vote and the Simultaneous Truth and Performance Level\n", 17 | "Estimation [(STAPLE)](https://www.ncbi.nlm.nih.gov/pubmed/15250643) algorithm.\n", 18 | "\n", 19 | "Segmentation Evaluation\n", 20 | "\n", 21 | "Once we have a reference, we compare the algorithm's performance using multiple criteria, as usually there is no single evaluation measure that conveys all of the relevant information. In this notebook we illustrate the use of the following evaluation criteria:\n", 22 | "* Overlap measures:\n", 23 | " * Jaccard and Dice coefficients \n", 24 | " * false negative and false positive errors\n", 25 | "* Surface distance measures:\n", 26 | " * Hausdorff distance (symmetric)\n", 27 | " * mean, median, max and standard deviation between surfaces\n", 28 | "* Volume measures:\n", 29 | " * volume similarity $ \\frac{2*(v1-v2)}{v1+v2}$\n", 30 | "\n", 31 | "The relevant criteria are task dependent, so you need to ask yourself whether you are interested in detecting spurious errors or not (mean or max surface distance), whether over/under segmentation should be differentiated (volume similarity and Dice or just Dice), and what is the ratio between acceptable errors and the size of the segmented object (Dice coefficient may be too sensitive to small errors when the segmented object is small and not sensitive enough to large errors when the segmented object is large).\n", 32 | "\n", 33 | "In the context of segmentation challenges, algorithm rankings are often based on a weighted combination of these criteria. These ranking schemes are not necessarily robust, as discussed in \"[Why rankings of biomedical image analysis competitions should be interpreted with care](https://www.nature.com/articles/s41467-018-07619-7)\", L. Maier-Hein et al.\n", 34 | "\n", 35 | "The data we use in the notebook is a set of manually segmented liver tumors from a single clinical CT scan. A larger dataset (four scans) is freely available from this [MIDAS repository](http://www.insight-journal.org/midas/collection/view/38). The relevant publication is: T. Popa et al., \"Tumor Volume Measurement and Volume Measurement Comparison Plug-ins for VolView Using ITK\", SPIE Medical Imaging: Visualization, Image-Guided Procedures, and Display, 2006.\n" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "import SimpleITK as sitk\n", 45 | "\n", 46 | "import numpy as np\n", 47 | "\n", 48 | "from downloaddata import fetch_data as fdata\n", 49 | "%matplotlib inline\n", 50 | "import matplotlib.pyplot as plt\n", 51 | "import gui\n", 52 | "\n", 53 | "from ipywidgets import interact, fixed" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Utility method for display" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "code_folding": [] 68 | }, 69 | "outputs": [], 70 | "source": [ 71 | "def display_with_overlay(segmentation_number, slice_number, image, segs, window_min, window_max):\n", 72 | " \"\"\"\n", 73 | " Display a CT slice with segmented contours overlaid onto it. The contours are the edges of \n", 74 | " the labeled regions.\n", 75 | " \"\"\"\n", 76 | " img = image[:,:,slice_number]\n", 77 | " msk = segs[segmentation_number][:,:,slice_number]\n", 78 | " overlay_img = sitk.LabelMapContourOverlay(sitk.Cast(msk, sitk.sitkLabelUInt8), \n", 79 | " sitk.Cast(sitk.IntensityWindowing(img,\n", 80 | " windowMinimum=window_min, \n", 81 | " windowMaximum=window_max), \n", 82 | " sitk.sitkUInt8), \n", 83 | " opacity = 1, \n", 84 | " contourThickness=[2,2])\n", 85 | " #We assume the original slice is isotropic, otherwise the display would be distorted \n", 86 | " plt.imshow(sitk.GetArrayViewFromImage(overlay_img))\n", 87 | " plt.axis('off')\n", 88 | " plt.show()" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "## Fetch the data\n", 96 | "\n", 97 | "Retrieve a single CT scan and three manual delineations of a liver tumor. Visual inspection of the data highlights the variability between experts. " 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "image = sitk.ReadImage(fdata(\"liverTumorSegmentations/Patient01Homo.mha\"))\n", 107 | "segmentation_file_names = [\"liverTumorSegmentations/Patient01Homo_Rad01.mha\", \n", 108 | " \"liverTumorSegmentations/Patient01Homo_Rad02.mha\",\n", 109 | " \"liverTumorSegmentations/Patient01Homo_Rad03.mha\"]\n", 110 | " \n", 111 | "segmentations = [sitk.ReadImage(fdata(file_name), sitk.sitkUInt8) for file_name in segmentation_file_names]\n", 112 | " \n", 113 | "interact(display_with_overlay, segmentation_number=(0,len(segmentations)-1), \n", 114 | " slice_number = (0, image.GetSize()[2]-1), image = fixed(image),\n", 115 | " segs = fixed(segmentations), window_min = fixed(-1024), window_max=fixed(976));" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "## Derive a reference\n", 123 | "\n", 124 | "There are a variety of ways to derive a reference segmentation from multiple expert inputs (\"[A comparison of ground truth estimation methods](https://www.ncbi.nlm.nih.gov/pubmed/20033494)\", A. M. Biancardi, A. C. Jirapatnakul, A. P. Reeves).\n", 125 | "\n", 126 | "Two methods that are available in SimpleITK are [majority vote](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LabelVotingImageFilter.html) and the STAPLE algorithm ([single label](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1STAPLEImageFilter.html) or [multi label](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1MultiLabelSTAPLEImageFilter.html))." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "# Use the STAPLE algorithm to obtain the reference segmentation. This implementation of the original algorithm\n", 136 | "# combines a single label from multiple segmentations, the label is user specified. The result of the\n", 137 | "# filter is the voxel's probability of belonging to the foreground. We then have to threshold the result to obtain\n", 138 | "# a reference binary segmentation.\n", 139 | "foregroundValue = 1\n", 140 | "threshold = 0.95\n", 141 | "reference_segmentation_STAPLE_probabilities = sitk.STAPLE(segmentations, foregroundValue) \n", 142 | "# We use the overloaded operator to perform thresholding, another option is to use the BinaryThreshold function.\n", 143 | "reference_segmentation = reference_segmentation_STAPLE_probabilities > threshold\n", 144 | "\n", 145 | "manual_plus_staple = list(segmentations) \n", 146 | "# Append the reference segmentation to the list of manual segmentations\n", 147 | "manual_plus_staple.append(reference_segmentation)\n", 148 | "\n", 149 | "interact(display_with_overlay, segmentation_number=(0,len(manual_plus_staple)-1), \n", 150 | " slice_number = (0, image.GetSize()[1]-1), image = fixed(image),\n", 151 | " segs = fixed(manual_plus_staple), window_min = fixed(-1024), window_max=fixed(976));" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## Evaluate segmentations using the reference\n", 159 | "\n", 160 | "Once we derive a reference from our experts input we can compare segmentation results to it.\n", 161 | "\n", 162 | "Note that in this notebook we compare the expert segmentations to the reference derived from them. This is not relevant for algorithm evaluation, but it can potentially be used to rank your experts.\n", 163 | "\n", 164 | "In this specific implementation we take advantage of the fact that we have a binary segmentation with 1 for foreground and 0 for background." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": null, 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "from enum import Enum\n", 174 | "\n", 175 | "# Use enumerations to represent the various evaluation measures\n", 176 | "class OverlapMeasures(Enum):\n", 177 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 178 | "\n", 179 | "class SurfaceDistanceMeasures(Enum):\n", 180 | " hausdorff_distance, mean_surface_distance, median_surface_distance, std_surface_distance, max_surface_distance = range(5)\n", 181 | " \n", 182 | "# Empty numpy arrays to hold the results \n", 183 | "overlap_results = np.zeros((len(segmentations),len(OverlapMeasures.__members__.items()))) \n", 184 | "surface_distance_results = np.zeros((len(segmentations),len(SurfaceDistanceMeasures.__members__.items()))) \n", 185 | "\n", 186 | "# Compute the evaluation criteria\n", 187 | "\n", 188 | "# Note that for the overlap measures filter, because we are dealing with a single label we \n", 189 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 190 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 191 | "\n", 192 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 193 | "\n", 194 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside \n", 195 | "# relationship, is irrelevant)\n", 196 | "label = 1\n", 197 | "reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False, useImageSpacing=True))\n", 198 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 199 | "\n", 200 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 201 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 202 | "statistics_image_filter.Execute(reference_surface)\n", 203 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum()) \n", 204 | "\n", 205 | "for i, seg in enumerate(segmentations):\n", 206 | " # Overlap measures\n", 207 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 208 | " overlap_results[i,OverlapMeasures.jaccard.value] = overlap_measures_filter.GetJaccardCoefficient()\n", 209 | " overlap_results[i,OverlapMeasures.dice.value] = overlap_measures_filter.GetDiceCoefficient()\n", 210 | " overlap_results[i,OverlapMeasures.volume_similarity.value] = overlap_measures_filter.GetVolumeSimilarity()\n", 211 | " overlap_results[i,OverlapMeasures.false_negative.value] = overlap_measures_filter.GetFalseNegativeError()\n", 212 | " overlap_results[i,OverlapMeasures.false_positive.value] = overlap_measures_filter.GetFalsePositiveError()\n", 213 | " # Hausdorff distance\n", 214 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 215 | " \n", 216 | " surface_distance_results[i,SurfaceDistanceMeasures.hausdorff_distance.value] = hausdorff_distance_filter.GetHausdorffDistance()\n", 217 | " # Symmetric surface distance measures\n", 218 | " segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(seg, squaredDistance=False, useImageSpacing=True))\n", 219 | " segmented_surface = sitk.LabelContour(seg)\n", 220 | " \n", 221 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 222 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 223 | " seg2ref_distance_map = reference_distance_map*sitk.Cast(segmented_surface, sitk.sitkFloat32)\n", 224 | " ref2seg_distance_map = segmented_distance_map*sitk.Cast(reference_surface, sitk.sitkFloat32)\n", 225 | " \n", 226 | " # Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 227 | " statistics_image_filter.Execute(segmented_surface)\n", 228 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 229 | " \n", 230 | " # Get all non-zero distances and then add zero distances if required.\n", 231 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 232 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr!=0]) \n", 233 | " seg2ref_distances = seg2ref_distances + \\\n", 234 | " list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))\n", 235 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 236 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr!=0]) \n", 237 | " ref2seg_distances = ref2seg_distances + \\\n", 238 | " list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))\n", 239 | " \n", 240 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 241 | "\n", 242 | " # The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In \n", 243 | " # general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two \n", 244 | " # segmentations, though in our case it is. More on this below.\n", 245 | " surface_distance_results[i,SurfaceDistanceMeasures.mean_surface_distance.value] = np.mean(all_surface_distances)\n", 246 | " surface_distance_results[i,SurfaceDistanceMeasures.median_surface_distance.value] = np.median(all_surface_distances)\n", 247 | " surface_distance_results[i,SurfaceDistanceMeasures.std_surface_distance.value] = np.std(all_surface_distances)\n", 248 | " surface_distance_results[i,SurfaceDistanceMeasures.max_surface_distance.value] = np.max(all_surface_distances)\n", 249 | " \n", 250 | "# Print the matrices\n", 251 | "np.set_printoptions(precision=3)\n", 252 | "print(overlap_results)\n", 253 | "print(surface_distance_results)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "metadata": {}, 259 | "source": [ 260 | "## Improved output\n", 261 | "\n", 262 | "Using the [pandas](http://pandas.pydata.org/) package we can easily produce high quality output. " 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "metadata": {}, 269 | "outputs": [], 270 | "source": [ 271 | "import pandas as pd\n", 272 | "from IPython.display import display, HTML \n", 273 | "\n", 274 | "# Graft our results matrix into pandas data frames \n", 275 | "overlap_results_df = pd.DataFrame(data=overlap_results, index = list(range(len(segmentations))), \n", 276 | " columns=[name for name, _ in OverlapMeasures.__members__.items()]) \n", 277 | "surface_distance_results_df = pd.DataFrame(data=surface_distance_results, index = list(range(len(segmentations))), \n", 278 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()]) \n", 279 | "\n", 280 | "# Display the data as HTML tables and graphs\n", 281 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 282 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 283 | "overlap_results_df.plot(kind='bar').legend(bbox_to_anchor=(1.6,0.9))\n", 284 | "surface_distance_results_df.plot(kind='bar').legend(bbox_to_anchor=(1.6,0.9))" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "You can also export the data as a table for your LaTeX manuscript using the [to_latex](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_latex.html) function.\n", 292 | "Note: You will need to add the \\usepackage{booktabs} to your LaTeX document's preamble. \n", 293 | "\n", 294 | "To create the minimal LaTeX document which will allow you to see the difference between the tables below, copy paste:\n", 295 | "\n", 296 | "\\documentclass{article}\n", 297 | "\n", 298 | "\\usepackage{booktabs}\n", 299 | "\n", 300 | "\\begin{document}\n", 301 | "\n", 302 | "paste the tables here\n", 303 | "\n", 304 | "\\end{document}\n", 305 | "\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [ 314 | "# The formatting of the table using the default settings is less than ideal \n", 315 | "print(overlap_results_df.to_latex())\n", 316 | "\n", 317 | "# We can improve on this by specifying the table's column format and the float format\n", 318 | "print(overlap_results_df.to_latex(column_format='ccccccc', float_format=lambda x: '%.3f' % x))" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "## Visual Diff\n", 326 | "\n", 327 | "It is always nice to have a figure with a visual display of the difference between the segmentation and ground truth." 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": null, 333 | "metadata": {"simpleitk_error_allowed": "Exception thrown in SimpleITK Show:"}, 334 | "outputs": [], 335 | "source": [ 336 | "# Use the first segmentation \n", 337 | "segmentation = segmentations[0]\n", 338 | "\n", 339 | "# Save ink, the differences will be in black and background is white \n", 340 | "segmentation_diff = (segmentation==reference_segmentation)*255\n", 341 | "\n", 342 | "# Flatten for 2D presentation, create a montage from the volume\n", 343 | "num_slices = segmentation_diff.GetDepth()\n", 344 | "tile_w = int(np.sqrt(num_slices))\n", 345 | "tile_h = int(np.ceil(num_slices/tile_w))\n", 346 | "default_background_color = 255\n", 347 | "tile_image = sitk.Tile([segmentation_diff[:,:,i] for i in range(num_slices)], (tile_w, tile_h), default_background_color)\n", 348 | "sitk.Show(tile_image)" 349 | ] 350 | } 351 | ], 352 | "metadata": { 353 | "kernelspec": { 354 | "display_name": "Python 3", 355 | "language": "python", 356 | "name": "python3" 357 | }, 358 | "language_info": { 359 | "codemirror_mode": { 360 | "name": "ipython", 361 | "version": 3 362 | }, 363 | "file_extension": ".py", 364 | "mimetype": "text/x-python", 365 | "name": "python", 366 | "nbconvert_exporter": "python", 367 | "pygments_lexer": "ipython3", 368 | "version": "3.6.8" 369 | } 370 | }, 371 | "nbformat": 4, 372 | "nbformat_minor": 1 373 | } 374 | -------------------------------------------------------------------------------- /01_spatial_transformations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

SimpleITK Spatial Transformations

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "\n", 12 | "1. Points are represented by vector-like data types: Tuple, Numpy array, List.\n", 13 | "2. Matrices are represented by vector-like data types in row major order.\n", 14 | "3. Default transformation initialization as the identity transform.\n", 15 | "4. Angles specified in radians, distances specified in unknown but consistent units (nm,mm,m,km...).\n", 16 | "5. All global transformations **except translation** are of the form:\n", 17 | "$$T(\\mathbf{x}) = A(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c}$$\n", 18 | "\n", 19 | " Nomenclature (when printing your transformation):\n", 20 | "\n", 21 | " * Matrix: the matrix $A$\n", 22 | " * Center: the point $\\mathbf{c}$\n", 23 | " * Translation: the vector $\\mathbf{t}$\n", 24 | " * Offset: $\\mathbf{t} + \\mathbf{c} - A\\mathbf{c}$\n", 25 | "6. Bounded transformations, BSplineTransform and DisplacementFieldTransform, behave as the identity transform outside the defined bounds.\n", 26 | "7. DisplacementFieldTransform:\n", 27 | " * Initializing the DisplacementFieldTransform using an image requires that the image's pixel type be sitk.sitkVectorFloat64.\n", 28 | " * Initializing the DisplacementFieldTransform using an image will \"clear out\" your image (your alias to the image will point to an empty, zero sized, image).\n", 29 | "8. Composite transformations are applied in stack order (first added, last applied)." 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Transformation Types\n", 37 | "\n", 38 | "SimpleITK supports the following transformation types.\n", 39 | "\n", 40 | "\n", 41 | "\n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | "
TranslationTransform2D or 3D, translation
VersorTransform3D, rotation represented by a versor
VersorRigid3DTransform3D, rigid transformation with rotation represented by a versor
Euler2DTransform2D, rigid transformation with rotation represented by a Euler angle
Euler3DTransform3D, rigid transformation with rotation represented by Euler angles
Similarity2DTransform2D, composition of isotropic scaling and rigid transformation with rotation represented by a Euler angle
Similarity3DTransform3D, composition of isotropic scaling and rigid transformation with rotation represented by a versor
ScaleTransform2D or 3D, anisotropic scaling
ScaleVersor3DTransform3D, rigid transformation and anisotropic scale is added to the rotation matrix part (not composed as one would expect)
ScaleSkewVersor3DTransform3D, rigid transformation with anisotropic scale and skew matrices added to the rotation matrix part (not composed as one would expect)
AffineTransform2D or 3D, affine transformation.
BSplineTransform2D or 3D, deformable transformation represented by a sparse regular grid of control points.
DisplacementFieldTransform2D or 3D, deformable transformation represented as a dense regular grid of vectors.
TransformA generic transformation. Can represent any of the SimpleITK transformations, and a composite transformation (stack of transformations concatenated via composition, last added, first applied).
" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "import SimpleITK as sitk\n", 66 | "import utilities as util\n", 67 | "\n", 68 | "import numpy as np\n", 69 | "%matplotlib inline \n", 70 | "import matplotlib.pyplot as plt\n", 71 | "from ipywidgets import interact, fixed\n", 72 | "\n", 73 | "OUTPUT_DIR = \"output\"" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "We will introduce the transformation types, starting with translation and illustrating how to move from a lower to higher parameter space (e.g. translation to rigid). \n", 81 | "\n", 82 | "We start with the global transformations. All of them except translation are of the form:\n", 83 | "$$T(\\mathbf{x}) = A(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c}$$\n", 84 | "\n", 85 | "In ITK speak (when printing your transformation):\n", 86 | "" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "## TranslationTransform\n", 99 | "\n", 100 | "Create a translation and then transform a point and use the inverse transformation to get the original back." 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "dimension = 2 \n", 110 | "offset = [2]*dimension # use a Python trick to create the offset list based on the dimension\n", 111 | "translation = sitk.TranslationTransform(dimension, offset)\n", 112 | "print(translation)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "point = [10, 11] if dimension==2 else [10, 11, 12] # set point to match dimension\n", 122 | "transformed_point = translation.TransformPoint(point)\n", 123 | "translation_inverse = translation.GetInverse()\n", 124 | "print('original point: ' + util.point2str(point) + '\\n'\n", 125 | " 'transformed point: ' + util.point2str(transformed_point) + '\\n'\n", 126 | " 'back to original: ' + util.point2str(translation_inverse.TransformPoint(transformed_point)))" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "## Euler2DTransform\n", 134 | "\n", 135 | "Rigidly transform a 2D point using a Euler angle parameter specification.\n", 136 | "\n", 137 | "Notice that the dimensionality of the Euler angle based rigid transformation is associated with the class, unlike the translation which is set at construction.\n" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "point = [10, 11]\n", 147 | "rotation2D = sitk.Euler2DTransform()\n", 148 | "rotation2D.SetTranslation((7.2, 8.4))\n", 149 | "rotation2D.SetAngle(np.pi/2)\n", 150 | "print('original point: ' + util.point2str(point) + '\\n'\n", 151 | " 'transformed point: ' + util.point2str(rotation2D.TransformPoint(point)))" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## VersorTransform (rotation in 3D)\n", 159 | "\n", 160 | "Rotation using a versor, vector part of unit quaternion, parameterization. Quaternion defined by rotation of $\\theta$ radians around axis $n$, is $q = [n*\\sin(\\frac{\\theta}{2}), \\cos(\\frac{\\theta}{2})]$." 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "metadata": {}, 167 | "outputs": [], 168 | "source": [ 169 | "# Use a versor:\n", 170 | "rotation1 = sitk.VersorTransform([0,0,1,0])\n", 171 | "\n", 172 | "# Use axis-angle:\n", 173 | "rotation2 = sitk.VersorTransform((0,0,1), np.pi)\n", 174 | "\n", 175 | "# Use a matrix:\n", 176 | "rotation3 = sitk.VersorTransform()\n", 177 | "rotation3.SetMatrix([-1, 0, 0, 0, -1, 0, 0, 0, 1]);\n", 178 | "\n", 179 | "point = (10, 100, 1000)\n", 180 | "\n", 181 | "p1 = rotation1.TransformPoint(point)\n", 182 | "p2 = rotation2.TransformPoint(point)\n", 183 | "p3 = rotation3.TransformPoint(point)\n", 184 | "\n", 185 | "print('Points after transformation:\\np1=' + str(p1) + \n", 186 | " '\\np2='+ str(p2) + '\\np3='+ str(p3))" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "## Translation to Rigid [3D]\n", 194 | "\n", 195 | "We only need to copy the translational component." 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "dimension = 3 \n", 205 | "t =(1,2,3) \n", 206 | "translation = sitk.TranslationTransform(dimension, t)\n", 207 | "\n", 208 | "# Copy the translational component.\n", 209 | "rigid_euler = sitk.Euler3DTransform()\n", 210 | "rigid_euler.SetTranslation(translation.GetOffset())\n", 211 | "\n", 212 | "# Apply the transformations to the same set of random points and compare the results.\n", 213 | "util.print_transformation_differences(translation, rigid_euler)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": {}, 219 | "source": [ 220 | "## Rotation to Rigid [3D]\n", 221 | "Copy the matrix or versor and center of rotation." 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "rotation_center = (10, 10, 10)\n", 231 | "rotation = sitk.VersorTransform([0,0,1,0], rotation_center)\n", 232 | "\n", 233 | "rigid_versor = sitk.VersorRigid3DTransform()\n", 234 | "rigid_versor.SetRotation(rotation.GetVersor())\n", 235 | "#rigid_versor.SetCenter(rotation.GetCenter()) #intentional error, not copying center of rotation\n", 236 | "\n", 237 | "# Apply the transformations to the same set of random points and compare the results.\n", 238 | "util.print_transformation_differences(rotation, rigid_versor)" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "In the cell above, when we don't copy the center of rotation we have a constant error vector, $\\mathbf{c}$ - A$\\mathbf{c}$." 246 | ] 247 | }, 248 | { 249 | "cell_type": "markdown", 250 | "metadata": {}, 251 | "source": [ 252 | "## Similarity [2D]\n", 253 | "\n", 254 | "When the center of the similarity transformation is not at the origin the effect of the transformation is not what most of us expect. This is readily visible if we limit the transformation to scaling: $T(\\mathbf{x}) = s\\mathbf{x}-s\\mathbf{c} + \\mathbf{c}$. Changing the transformation's center results in scale + translation." 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [ 263 | "def display_center_effect(x, y, tx, point_list, xlim, ylim):\n", 264 | " tx.SetCenter((x,y))\n", 265 | " transformed_point_list = [ tx.TransformPoint(p) for p in point_list]\n", 266 | "\n", 267 | " plt.scatter(list(np.array(transformed_point_list).T)[0],\n", 268 | " list(np.array(transformed_point_list).T)[1],\n", 269 | " marker='^', \n", 270 | " color='red', label='transformed points')\n", 271 | " plt.scatter(list(np.array(point_list).T)[0],\n", 272 | " list(np.array(point_list).T)[1],\n", 273 | " marker='o', \n", 274 | " color='blue', label='original points')\n", 275 | " plt.xlim(xlim)\n", 276 | " plt.ylim(ylim)\n", 277 | " plt.legend(loc=(0.25,1.01))\n", 278 | "\n", 279 | "# 2D square centered on (0,0)\n", 280 | "points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]\n", 281 | "\n", 282 | "# Scale by 2 \n", 283 | "similarity = sitk.Similarity2DTransform();\n", 284 | "similarity.SetScale(2)\n", 285 | "\n", 286 | "interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(similarity), point_list = fixed(points), \n", 287 | " xlim = fixed((-10,10)),ylim = fixed((-10,10)));" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "## Rigid to Similarity [3D]\n", 295 | "Copy the translation, center, and matrix or versor." 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "metadata": {}, 302 | "outputs": [], 303 | "source": [ 304 | "rotation_center = (100, 100, 100)\n", 305 | "theta_x = 0.0\n", 306 | "theta_y = 0.0\n", 307 | "theta_z = np.pi/2.0\n", 308 | "translation = (1,2,3)\n", 309 | "\n", 310 | "rigid_euler = sitk.Euler3DTransform(rotation_center, theta_x, theta_y, theta_z, translation)\n", 311 | "\n", 312 | "similarity = sitk.Similarity3DTransform()\n", 313 | "similarity.SetMatrix(rigid_euler.GetMatrix())\n", 314 | "similarity.SetTranslation(rigid_euler.GetTranslation())\n", 315 | "similarity.SetCenter(rigid_euler.GetCenter())\n", 316 | "\n", 317 | "# Apply the transformations to the same set of random points and compare the results.\n", 318 | "util.print_transformation_differences(rigid_euler, similarity)" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "## Similarity to Affine [3D]\n", 326 | "Copy the translation, center and matrix." 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": {}, 333 | "outputs": [], 334 | "source": [ 335 | "rotation_center = (100, 100, 100)\n", 336 | "axis = (0,0,1)\n", 337 | "angle = np.pi/2.0\n", 338 | "translation = (1,2,3)\n", 339 | "scale_factor = 2.0\n", 340 | "similarity = sitk.Similarity3DTransform(scale_factor, axis, angle, translation, rotation_center)\n", 341 | "\n", 342 | "affine = sitk.AffineTransform(3)\n", 343 | "affine.SetMatrix(similarity.GetMatrix())\n", 344 | "affine.SetTranslation(similarity.GetTranslation())\n", 345 | "affine.SetCenter(similarity.GetCenter())\n", 346 | "\n", 347 | "# Apply the transformations to the same set of random points and compare the results.\n", 348 | "util.print_transformation_differences(similarity, affine)" 349 | ] 350 | }, 351 | { 352 | "cell_type": "markdown", 353 | "metadata": {}, 354 | "source": [ 355 | "## Scale Transform\n", 356 | "\n", 357 | "Just as the case was for the similarity transformation above, when the transformations center is not at the origin, instead of a pure anisotropic scaling we also have translation ($T(\\mathbf{x}) = \\mathbf{s}^T\\mathbf{x}-\\mathbf{s}^T\\mathbf{c} + \\mathbf{c}$)." 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "metadata": {}, 364 | "outputs": [], 365 | "source": [ 366 | "# 2D square centered on (0,0).\n", 367 | "points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]\n", 368 | "\n", 369 | "# Scale by half in x and 2 in y.\n", 370 | "scale = sitk.ScaleTransform(2, (0.5,2));\n", 371 | "\n", 372 | "# Interactively change the location of the center.\n", 373 | "interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(scale), point_list = fixed(points), \n", 374 | " xlim = fixed((-10,10)),ylim = fixed((-10,10)));" 375 | ] 376 | }, 377 | { 378 | "cell_type": "markdown", 379 | "metadata": {}, 380 | "source": [ 381 | "## Unintentional Misnomers (originally from ITK)\n", 382 | "\n", 383 | "Two transformation types whose names may mislead you are ScaleVersor and ScaleSkewVersor. Basing your choices on expectations without reading the documentation will surprise you.\n", 384 | "\n", 385 | "ScaleVersor - based on name expected a composition of transformations, in practice it is:\n", 386 | "$$T(x) = (R+S)(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c},\\;\\; \\textrm{where } S= \\left[\\begin{array}{ccc} s_0-1 & 0 & 0 \\\\ 0 & s_1-1 & 0 \\\\ 0 & 0 & s_2-1 \\end{array}\\right]$$ \n", 387 | "\n", 388 | "ScaleSkewVersor - based on name expected a composition of transformations, in practice it is:\n", 389 | "$$T(x) = (R+S+K)(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c},\\;\\; \\textrm{where } S = \\left[\\begin{array}{ccc} s_0-1 & 0 & 0 \\\\ 0 & s_1-1 & 0 \\\\ 0 & 0 & s_2-1 \\end{array}\\right]\\;\\; \\textrm{and } K = \\left[\\begin{array}{ccc} 0 & k_0 & k_1 \\\\ k_2 & 0 & k_3 \\\\ k_4 & k_5 & 0 \\end{array}\\right]$$ \n", 390 | "\n", 391 | "Note that ScaleSkewVersor is is an over-parametrized version of the affine transform, 15 parameters (scale, skew, versor, translation) vs. 12 parameters (matrix, translation)." 392 | ] 393 | }, 394 | { 395 | "cell_type": "markdown", 396 | "metadata": {}, 397 | "source": [ 398 | "## Bounded Transformations\n", 399 | "\n", 400 | "SimpleITK supports two types of bounded non-rigid transformations, BSplineTransform (sparse representation) and \tDisplacementFieldTransform (dense representation).\n", 401 | "\n", 402 | "Transforming a point that is outside the bounds will return the original point - identity transform." 403 | ] 404 | }, 405 | { 406 | "cell_type": "markdown", 407 | "metadata": {}, 408 | "source": [ 409 | "## BSpline\n", 410 | "Using a sparse set of control points to control a free form deformation. Using the cell below it is clear that the BSplineTransform allows for folding and tearing." 411 | ] 412 | }, 413 | { 414 | "cell_type": "code", 415 | "execution_count": null, 416 | "metadata": {}, 417 | "outputs": [], 418 | "source": [ 419 | "# Create the transformation (when working with images it is easier to use the BSplineTransformInitializer function\n", 420 | "# or its object oriented counterpart BSplineTransformInitializerFilter).\n", 421 | "dimension = 2\n", 422 | "spline_order = 3\n", 423 | "direction_matrix_row_major = [1.0,0.0,0.0,1.0] # identity, mesh is axis aligned\n", 424 | "origin = [-1.0,-1.0] \n", 425 | "domain_physical_dimensions = [2,2]\n", 426 | "\n", 427 | "bspline = sitk.BSplineTransform(dimension, spline_order)\n", 428 | "bspline.SetTransformDomainOrigin(origin)\n", 429 | "bspline.SetTransformDomainDirection(direction_matrix_row_major)\n", 430 | "bspline.SetTransformDomainPhysicalDimensions(domain_physical_dimensions)\n", 431 | "bspline.SetTransformDomainMeshSize((4,3))\n", 432 | "\n", 433 | "# Random displacement of the control points.\n", 434 | "originalControlPointDisplacements = np.random.random(len(bspline.GetParameters()))\n", 435 | "bspline.SetParameters(originalControlPointDisplacements)\n", 436 | "\n", 437 | "# Apply the BSpline transformation to a grid of points \n", 438 | "# starting the point set exactly at the origin of the BSpline mesh is problematic as\n", 439 | "# these points are considered outside the transformation's domain,\n", 440 | "# remove epsilon below and see what happens.\n", 441 | "numSamplesX = 10\n", 442 | "numSamplesY = 20\n", 443 | " \n", 444 | "coordsX = np.linspace(origin[0]+np.finfo(float).eps, origin[0] + domain_physical_dimensions[0], numSamplesX)\n", 445 | "coordsY = np.linspace(origin[1]+np.finfo(float).eps, origin[1] + domain_physical_dimensions[1], numSamplesY)\n", 446 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 447 | "\n", 448 | "interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),\n", 449 | " tx = fixed(bspline), original_control_point_displacements = fixed(originalControlPointDisplacements)); " 450 | ] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": {}, 455 | "source": [ 456 | "## DisplacementField\n", 457 | "\n", 458 | "A dense set of vectors representing the displacement inside the given domain. The most generic representation of a transformation." 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": {}, 465 | "outputs": [], 466 | "source": [ 467 | "# Create the displacement field. \n", 468 | " \n", 469 | "# When working with images the safer thing to do is use the image based constructor,\n", 470 | "# sitk.DisplacementFieldTransform(my_image), all the fixed parameters will be set correctly and the displacement\n", 471 | "# field is initialized using the vectors stored in the image. SimpleITK requires that the image's pixel type be \n", 472 | "# sitk.sitkVectorFloat64.\n", 473 | "displacement = sitk.DisplacementFieldTransform(2)\n", 474 | "field_size = [10,20]\n", 475 | "field_origin = [-1.0,-1.0] \n", 476 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 477 | "field_direction = [1,0,0,1] # direction cosine matrix (row major order) \n", 478 | "\n", 479 | "# Concatenate all the information into a single list\n", 480 | "displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 481 | "# Set the interpolator, either sitkLinear which is default or nearest neighbor\n", 482 | "displacement.SetInterpolator(sitk.sitkNearestNeighbor)\n", 483 | "\n", 484 | "originalDisplacements = np.random.random(len(displacement.GetParameters()))\n", 485 | "displacement.SetParameters(originalDisplacements)\n", 486 | "\n", 487 | "coordsX = np.linspace(field_origin[0], field_origin[0]+(field_size[0]-1)*field_spacing[0], field_size[0])\n", 488 | "coordsY = np.linspace(field_origin[1], field_origin[1]+(field_size[1]-1)*field_spacing[1], field_size[1])\n", 489 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 490 | "\n", 491 | "interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),\n", 492 | " tx = fixed(displacement), original_control_point_displacements = fixed(originalDisplacements)); " 493 | ] 494 | }, 495 | { 496 | "cell_type": "markdown", 497 | "metadata": {}, 498 | "source": [ 499 | "## Composite transform (Transform)\n", 500 | "\n", 501 | "The generic SimpleITK transform class. This class can represent both a single transformation (global, local), or a composite transformation (multiple transformations applied one after the other). This is the output typed returned by the SimpleITK registration framework. \n", 502 | "\n", 503 | "The choice of whether to use a composite transformation or compose transformations on your own has subtle differences in the registration framework.\n", 504 | "\n", 505 | "Composite transforms enable a combination of a global transformation with multiple local/bounded transformations. This is useful if we want to apply deformations only in regions that deform while other regions are only effected by the global transformation.\n", 506 | "\n", 507 | "The following code illustrates this, where the whole region is translated and subregions have different deformations." 508 | ] 509 | }, 510 | { 511 | "cell_type": "code", 512 | "execution_count": null, 513 | "metadata": {}, 514 | "outputs": [], 515 | "source": [ 516 | "# Global transformation.\n", 517 | "translation = sitk.TranslationTransform(2,(1.0,0.0))\n", 518 | "\n", 519 | "# Displacement in region 1.\n", 520 | "displacement1 = sitk.DisplacementFieldTransform(2)\n", 521 | "field_size = [10,20]\n", 522 | "field_origin = [-1.0,-1.0] \n", 523 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 524 | "field_direction = [1,0,0,1] # direction cosine matrix (row major order) \n", 525 | "\n", 526 | "# Concatenate all the information into a single list.\n", 527 | "displacement1.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 528 | "displacement1.SetParameters(np.ones(len(displacement1.GetParameters())))\n", 529 | "\n", 530 | "# Displacement in region 2.\n", 531 | "displacement2 = sitk.DisplacementFieldTransform(2)\n", 532 | "field_size = [10,20]\n", 533 | "field_origin = [1.0,-3] \n", 534 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 535 | "field_direction = [1,0,0,1] #direction cosine matrix (row major order) \n", 536 | "\n", 537 | "# Concatenate all the information into a single list.\n", 538 | "displacement2.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 539 | "displacement2.SetParameters(-1.0*np.ones(len(displacement2.GetParameters())))\n", 540 | "\n", 541 | "# Composite transform which applies the global and local transformations.\n", 542 | "composite = sitk.Transform(translation)\n", 543 | "composite.AddTransform(displacement1)\n", 544 | "composite.AddTransform(displacement2)\n", 545 | "\n", 546 | "# Apply the composite transformation to points in ([-1,-3],[3,1]) and \n", 547 | "# display the deformation using a quiver plot.\n", 548 | " \n", 549 | "# Generate points.\n", 550 | "numSamplesX = 10\n", 551 | "numSamplesY = 10 \n", 552 | "coordsX = np.linspace(-1.0, 3.0, numSamplesX)\n", 553 | "coordsY = np.linspace(-3.0, 1.0, numSamplesY)\n", 554 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 555 | "\n", 556 | "# Transform points and compute deformation vectors.\n", 557 | "pointsX = np.zeros(XX.shape)\n", 558 | "pointsY = np.zeros(XX.shape)\n", 559 | "for index, value in np.ndenumerate(XX):\n", 560 | " px,py = composite.TransformPoint((value, YY[index]))\n", 561 | " pointsX[index]=px - value \n", 562 | " pointsY[index]=py - YY[index]\n", 563 | " \n", 564 | "plt.quiver(XX, YY, pointsX, pointsY); " 565 | ] 566 | }, 567 | { 568 | "cell_type": "markdown", 569 | "metadata": {}, 570 | "source": [ 571 | "## Writing and Reading\n", 572 | "\n", 573 | "The SimpleITK.ReadTransform() returns a SimpleITK.Transform . The content of the file can be any of the SimpleITK transformations or a composite (set of transformations). " 574 | ] 575 | }, 576 | { 577 | "cell_type": "code", 578 | "execution_count": null, 579 | "metadata": {}, 580 | "outputs": [], 581 | "source": [ 582 | "import os\n", 583 | "\n", 584 | "# Create a 2D rigid transformation, write it to disk and read it back.\n", 585 | "basic_transform = sitk.Euler2DTransform()\n", 586 | "basic_transform.SetTranslation((1.0,2.0))\n", 587 | "basic_transform.SetAngle(np.pi/2)\n", 588 | "\n", 589 | "full_file_name = os.path.join(OUTPUT_DIR, 'euler2D.tfm')\n", 590 | "\n", 591 | "sitk.WriteTransform(basic_transform, full_file_name)\n", 592 | "\n", 593 | "# The ReadTransform function returns an sitk.Transform no matter the type of the transform \n", 594 | "# found in the file (global, bounded, composite).\n", 595 | "read_result = sitk.ReadTransform(full_file_name)\n", 596 | "\n", 597 | "print('Different types: '+ str(type(read_result) != type(basic_transform)))\n", 598 | "util.print_transformation_differences(basic_transform, read_result)\n", 599 | "\n", 600 | "\n", 601 | "# Create a composite transform then write and read.\n", 602 | "displacement = sitk.DisplacementFieldTransform(2)\n", 603 | "field_size = [10,20]\n", 604 | "field_origin = [-10.0,-100.0] \n", 605 | "field_spacing = [20.0/(field_size[0]-1),200.0/(field_size[1]-1)] \n", 606 | "field_direction = [1,0,0,1] #direction cosine matrix (row major order)\n", 607 | "\n", 608 | "# Concatenate all the information into a single list.\n", 609 | "displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 610 | "displacement.SetParameters(np.random.random(len(displacement.GetParameters())))\n", 611 | "\n", 612 | "composite_transform = sitk.Transform(basic_transform)\n", 613 | "composite_transform.AddTransform(displacement)\n", 614 | "\n", 615 | "full_file_name = os.path.join(OUTPUT_DIR, 'composite.tfm')\n", 616 | "\n", 617 | "sitk.WriteTransform(composite_transform, full_file_name)\n", 618 | "read_result = sitk.ReadTransform(full_file_name)\n", 619 | "\n", 620 | "util.print_transformation_differences(composite_transform, read_result) " 621 | ] 622 | }, 623 | { 624 | "cell_type": "markdown", 625 | "metadata": {}, 626 | "source": [ 627 | "

Next »

" 628 | ] 629 | } 630 | ], 631 | "metadata": { 632 | "kernelspec": { 633 | "display_name": "Python 3", 634 | "language": "python", 635 | "name": "python3" 636 | }, 637 | "language_info": { 638 | "codemirror_mode": { 639 | "name": "ipython", 640 | "version": 3 641 | }, 642 | "file_extension": ".py", 643 | "mimetype": "text/x-python", 644 | "name": "python", 645 | "nbconvert_exporter": "python", 646 | "pygments_lexer": "ipython3", 647 | "version": "3.6.8" 648 | } 649 | }, 650 | "nbformat": 4, 651 | "nbformat_minor": 2 652 | } 653 | -------------------------------------------------------------------------------- /05_advanced_registration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Advanced Registration

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK provides two flavors of non-rigid registration:\n", 12 | " * Free Form Deformation, BSpline based, and Demons using the ITKv4 registration framework.\n", 13 | " * A set of Demons filters that are independent of the registration framework (`DemonsRegistrationFilter, DiffeomorphicDemonsRegistrationFilter, FastSymmetricForcesDemonsRegistrationFilter, SymmetricForcesDemonsRegistrationFilter`).\n", 14 | "2. Registration evaluation:\n", 15 | " * Registration accuracy, the quantity of interest is the Target Registration Error (TRE).\n", 16 | " * TRE is spatially variant.\n", 17 | " * Surrogate metrics for evaluating registration accuracy such as segmentation overlaps are relevant, but are potentially deficient.\n", 18 | " * Registration time.\n", 19 | " * Acceptable values for TRE and runtime are context dependent." 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "import SimpleITK as sitk\n", 29 | "import registration_gui as rgui\n", 30 | "import utilities \n", 31 | "\n", 32 | "from downloaddata import fetch_data as fdata\n", 33 | "\n", 34 | "from ipywidgets import interact, fixed\n", 35 | "\n", 36 | "%matplotlib inline\n", 37 | "import matplotlib.pyplot as plt\n", 38 | "\n", 39 | "import numpy as np" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "## Data and Registration Task\n", 47 | "\n", 48 | "In this notebook we will use the Point-validated Pixel-based Breathing Thorax Model (POPI). This is a 4D (3D+time) thoracic-abdominal CT (10 CTs representing the respiratory cycle) with masks segmenting each of the CTs to air/body/lung, and a set of corresponding landmarks localized in each of the CT volumes.\n", 49 | "\n", 50 | "The registration problem we deal with is non-rigid alignment of the lungs throughout the respiratory cycle. This information is relevant for radiation therapy planning and execution.\n", 51 | "\n", 52 | "\n", 53 | "The POPI model is provided by the Léon Bérard Cancer Center & CREATIS Laboratory, Lyon, France. The relevant publication is:\n", 54 | "\n", 55 | "J. Vandemeulebroucke, D. Sarrut, P. Clarysse, \"The POPI-model, a point-validated pixel-based breathing thorax model\",\n", 56 | "Proc. XVth International Conference on the Use of Computers in Radiation Therapy (ICCR), Toronto, Canada, 2007.\n", 57 | "\n", 58 | "Additional 4D CT data sets with reference points are available from the CREATIS Laboratory here. " 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "images = []\n", 68 | "masks = []\n", 69 | "points = []\n", 70 | "image_indexes = [0,7]\n", 71 | "for i in image_indexes:\n", 72 | " image_file_name = 'POPI/meta/{0}0-P.mhd'.format(i)\n", 73 | " mask_file_name = 'POPI/masks/{0}0-air-body-lungs.mhd'.format(i)\n", 74 | " points_file_name = 'POPI/landmarks/{0}0-Landmarks.pts'.format(i)\n", 75 | " images.append(sitk.ReadImage(fdata(image_file_name), sitk.sitkFloat32)) \n", 76 | " masks.append(sitk.ReadImage(fdata(mask_file_name)))\n", 77 | " points.append(utilities.read_POPI_points(fdata(points_file_name)))\n", 78 | " \n", 79 | "interact(rgui.display_coronal_with_overlay, temporal_slice=(0,len(images)-1), \n", 80 | " coronal_slice = (0, images[0].GetSize()[1]-1), \n", 81 | " images = fixed(images), masks = fixed(masks), \n", 82 | " label=fixed(utilities.popi_lung_label), window_min = fixed(-1024), window_max=fixed(976));" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Free Form Deformation\n", 90 | "\n", 91 | "Define a BSplineTransform using a sparse set of grid points overlaid onto the fixed image's domain to deform it.\n", 92 | "\n", 93 | "For the current registration task we are fortunate in that we have a unique setting. The images are of the same patient during respiration so we can initialize the registration using the identity transform. Additionally, we have masks demarcating the lungs.\n", 94 | "\n", 95 | "We use the registration framework taking advantage of its ability to use masks that limit the similarity metric estimation to points lying inside our region of interest, the lungs." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "fixed_index = 0\n", 105 | "moving_index = 1\n", 106 | "\n", 107 | "fixed_image = images[fixed_index]\n", 108 | "fixed_image_mask = masks[fixed_index] == utilities.popi_lung_label\n", 109 | "fixed_points = points[fixed_index]\n", 110 | "\n", 111 | "moving_image = images[moving_index]\n", 112 | "moving_image_mask = masks[moving_index] == utilities.popi_lung_label\n", 113 | "moving_points = points[moving_index]" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "# Define a simple callback which allows us to monitor registration progress.\n", 123 | "def iteration_callback(filter):\n", 124 | " print('\\r{0:.2f}'.format(filter.GetMetricValue()), end='')\n", 125 | "\n", 126 | "registration_method = sitk.ImageRegistrationMethod()\n", 127 | " \n", 128 | "# Determine the number of BSpline control points using the physical \n", 129 | "# spacing we want for the finest resolution control grid. \n", 130 | "grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm\n", 131 | "image_physical_size = [size*spacing for size,spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())]\n", 132 | "mesh_size = [int(image_size/grid_spacing + 0.5) \\\n", 133 | " for image_size,grid_spacing in zip(image_physical_size,grid_physical_spacing)]\n", 134 | "# The starting mesh size will be 1/4 of the original, it will be refined by \n", 135 | "# the multi-resolution framework.\n", 136 | "mesh_size = [int(sz/4 + 0.5) for sz in mesh_size]\n", 137 | "\n", 138 | "initial_transform = sitk.BSplineTransformInitializer(image1 = fixed_image, \n", 139 | " transformDomainMeshSize = mesh_size, order=3) \n", 140 | "# Instead of the standard SetInitialTransform we use the BSpline specific method which also\n", 141 | "# accepts the scaleFactors parameter to refine the BSpline mesh. In this case we start with \n", 142 | "# the given mesh_size at the highest pyramid level then we double it in the next lower level and\n", 143 | "# in the full resolution image we use a mesh that is four times the original size.\n", 144 | "registration_method.SetInitialTransformAsBSpline(initial_transform,\n", 145 | " inPlace=False,\n", 146 | " scaleFactors=[1,2,4])\n", 147 | "\n", 148 | "registration_method.SetMetricAsMeanSquares()\n", 149 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 150 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 151 | "registration_method.SetMetricFixedMask(fixed_image_mask)\n", 152 | " \n", 153 | "registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])\n", 154 | "registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])\n", 155 | "registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n", 156 | "\n", 157 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 158 | "registration_method.SetOptimizerAsLBFGS2(solutionAccuracy=1e-2, numberOfIterations=100, deltaConvergenceTolerance=0.01)\n", 159 | "\n", 160 | "registration_method.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(registration_method))\n", 161 | "\n", 162 | "final_transformation = registration_method.Execute(fixed_image, moving_image)\n", 163 | "print('\\nOptimizer\\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "## Qualitative evaluation via segmentation transfer\n", 171 | "\n", 172 | "Transfer the segmentation from the moving image to the fixed image before and after registration and visually evaluate overlap." 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "transformed_segmentation = sitk.Resample(moving_image_mask,\n", 182 | " fixed_image,\n", 183 | " final_transformation, \n", 184 | " sitk.sitkNearestNeighbor,\n", 185 | " 0.0, \n", 186 | " moving_image_mask.GetPixelID())\n", 187 | "\n", 188 | "interact(rgui.display_coronal_with_overlay, temporal_slice=(0,1), \n", 189 | " coronal_slice = (0, fixed_image.GetSize()[1]-1), \n", 190 | " images = fixed([fixed_image,fixed_image]), masks = fixed([moving_image_mask, transformed_segmentation]), \n", 191 | " label=fixed(1), window_min = fixed(-1024), window_max=fixed(976));" 192 | ] 193 | }, 194 | { 195 | "cell_type": "markdown", 196 | "metadata": {}, 197 | "source": [ 198 | "### Quantitative evaluation \n", 199 | "\n", 200 | "The most appropriate evaluation is based on analysis of Target Registration Errors(TRE), which is defined as follows:\n", 201 | "\n", 202 | "Given the transformation $T_f^m$ and corresponding points in the two coordinate systems, $^fp,^mp$, points which were not used in the registration process, TRE is defined as $\\|T_f^m(^fp) - ^mp\\|$. \n", 203 | "\n", 204 | "We start by looking at some descriptive statistics of TRE:" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "initial_TRE = utilities.target_registration_errors(sitk.Transform(), fixed_points, moving_points)\n", 214 | "final_TRE = utilities.target_registration_errors(final_transformation, fixed_points, moving_points)\n", 215 | "\n", 216 | "print('Initial alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(initial_TRE), \n", 217 | " np.std(initial_TRE), \n", 218 | " np.max(initial_TRE)))\n", 219 | "print('Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(final_TRE), \n", 220 | " np.std(final_TRE), \n", 221 | " np.max(final_TRE)))" 222 | ] 223 | }, 224 | { 225 | "cell_type": "markdown", 226 | "metadata": {}, 227 | "source": [ 228 | "The above descriptive statistics do not convey the whole picture, we should also look at the TRE distributions before and after registration." 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [ 237 | "plt.hist(initial_TRE, bins=20, alpha=0.5, label='before registration', color='blue')\n", 238 | "plt.hist(final_TRE, bins=20, alpha=0.5, label='after registration', color='green')\n", 239 | "plt.legend()\n", 240 | "plt.title('TRE histogram');" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "Finally, we should also take into account the fact that TRE is spatially variant (think center of rotation). We therefore should also explore the distribution of errors as a function of the point location." 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": null, 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "initial_errors = utilities.target_registration_errors(sitk.Transform(), fixed_points, moving_points, display_errors = True)\n", 257 | "utilities.target_registration_errors(final_transformation, fixed_points, moving_points, \n", 258 | " min_err=min(initial_errors), max_err=max(initial_errors), display_errors = True);" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "Deciding whether a registration algorithm is appropriate for a specific problem is context dependent and is defined by the clinical/research needs both in terms of accuracy and computational complexity." 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "## Demons Based Registration\n", 273 | "\n", 274 | "SimpleITK includes a number of filters from the Demons registration family (originally introduced by J. P. Thirion):\n", 275 | "1. DemonsRegistrationFilter.\n", 276 | "2. DiffeomorphicDemonsRegistrationFilter.\n", 277 | "3. FastSymmetricForcesDemonsRegistrationFilter.\n", 278 | "4. SymmetricForcesDemonsRegistrationFilter.\n", 279 | "\n", 280 | "These are appropriate for mono-modal registration. As these filters are independent of the ImageRegistrationMethod we do not have access to the multiscale framework. Luckily it is easy to implement our own multiscale framework in SimpleITK, which is what we do in the next cell." 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": null, 286 | "metadata": {}, 287 | "outputs": [], 288 | "source": [ 289 | "def smooth_and_resample(image, shrink_factor, smoothing_sigma):\n", 290 | " \"\"\"\n", 291 | " Args:\n", 292 | " image: The image we want to resample.\n", 293 | " shrink_factor: A number greater than one, such that the new image's size is original_size/shrink_factor.\n", 294 | " smoothing_sigma: Sigma for Gaussian smoothing, this is in physical (image spacing) units, not pixels.\n", 295 | " Return:\n", 296 | " Image which is a result of smoothing the input and then resampling it using the given sigma and shrink factor.\n", 297 | " \"\"\"\n", 298 | " smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigma)\n", 299 | " \n", 300 | " original_spacing = image.GetSpacing()\n", 301 | " original_size = image.GetSize()\n", 302 | " new_size = [int(sz/float(shrink_factor) + 0.5) for sz in original_size]\n", 303 | " new_spacing = [((original_sz-1)*original_spc)/(new_sz-1) \n", 304 | " for original_sz, original_spc, new_sz in zip(original_size, original_spacing, new_size)]\n", 305 | " return sitk.Resample(smoothed_image, new_size, sitk.Transform(), \n", 306 | " sitk.sitkLinear, image.GetOrigin(),\n", 307 | " new_spacing, image.GetDirection(), 0.0, \n", 308 | " image.GetPixelID())\n", 309 | "\n", 310 | "\n", 311 | " \n", 312 | "def multiscale_demons(registration_algorithm,\n", 313 | " fixed_image, moving_image, initial_transform = None, \n", 314 | " shrink_factors=None, smoothing_sigmas=None):\n", 315 | " \"\"\"\n", 316 | " Run the given registration algorithm in a multiscale fashion. The original scale should not be given as input as the\n", 317 | " original images are implicitly incorporated as the base of the pyramid.\n", 318 | " Args:\n", 319 | " registration_algorithm: Any registration algorithm that has an Execute(fixed_image, moving_image, displacement_field_image)\n", 320 | " method.\n", 321 | " fixed_image: Resulting transformation maps points from this image's spatial domain to the moving image spatial domain.\n", 322 | " moving_image: Resulting transformation maps points from the fixed_image's spatial domain to this image's spatial domain.\n", 323 | " initial_transform: Any SimpleITK transform, used to initialize the displacement field.\n", 324 | " shrink_factors: Shrink factors relative to the original image's size.\n", 325 | " smoothing_sigmas: Amount of smoothing which is done prior to resmapling the image using the given shrink factor. These\n", 326 | " are in physical (image spacing) units.\n", 327 | " Returns: \n", 328 | " SimpleITK.DisplacementFieldTransform\n", 329 | " \"\"\"\n", 330 | " # Create image pyramid.\n", 331 | " fixed_images = [fixed_image]\n", 332 | " moving_images = [moving_image]\n", 333 | " if shrink_factors:\n", 334 | " for shrink_factor, smoothing_sigma in reversed(list(zip(shrink_factors, smoothing_sigmas))):\n", 335 | " fixed_images.append(smooth_and_resample(fixed_images[0], shrink_factor, smoothing_sigma))\n", 336 | " moving_images.append(smooth_and_resample(moving_images[0], shrink_factor, smoothing_sigma))\n", 337 | " \n", 338 | " # Create initial displacement field at lowest resolution. \n", 339 | " # Currently, the pixel type is required to be sitkVectorFloat64 because of a constraint imposed by the Demons filters.\n", 340 | " if initial_transform:\n", 341 | " initial_displacement_field = sitk.TransformToDisplacementField(initial_transform, \n", 342 | " sitk.sitkVectorFloat64,\n", 343 | " fixed_images[-1].GetSize(),\n", 344 | " fixed_images[-1].GetOrigin(),\n", 345 | " fixed_images[-1].GetSpacing(),\n", 346 | " fixed_images[-1].GetDirection())\n", 347 | " else:\n", 348 | " initial_displacement_field = sitk.Image(fixed_images[-1].GetWidth(), \n", 349 | " fixed_images[-1].GetHeight(),\n", 350 | " fixed_images[-1].GetDepth(),\n", 351 | " sitk.sitkVectorFloat64)\n", 352 | " initial_displacement_field.CopyInformation(fixed_images[-1])\n", 353 | " \n", 354 | " # Run the registration. \n", 355 | " initial_displacement_field = registration_algorithm.Execute(fixed_images[-1], \n", 356 | " moving_images[-1], \n", 357 | " initial_displacement_field)\n", 358 | " # Start at the top of the pyramid and work our way down. \n", 359 | " for f_image, m_image in reversed(list(zip(fixed_images[0:-1], moving_images[0:-1]))):\n", 360 | " initial_displacement_field = sitk.Resample (initial_displacement_field, f_image)\n", 361 | " initial_displacement_field = registration_algorithm.Execute(f_image, m_image, initial_displacement_field)\n", 362 | " return sitk.DisplacementFieldTransform(initial_displacement_field)" 363 | ] 364 | }, 365 | { 366 | "cell_type": "markdown", 367 | "metadata": {}, 368 | "source": [ 369 | "Now we will use our newly minted multiscale framework to perform registration with the Demons filters. Some things you can easily try out by editing the code below:\n", 370 | "1. Is there really a need for multiscale - just call the multiscale_demons method without the shrink_factors and smoothing_sigmas parameters.\n", 371 | "2. Which Demons filter should you use - configure the other filters and see if our selection is the best choice (accuracy/time)." 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": null, 377 | "metadata": {}, 378 | "outputs": [], 379 | "source": [ 380 | "# Define a simple callback which allows us to monitor registration progress.\n", 381 | "def iteration_callback(filter):\n", 382 | " print('\\r{0}: {1:.2f}'.format(filter.GetElapsedIterations(), filter.GetMetric()), end='')\n", 383 | " \n", 384 | "# Select a Demons filter and configure it.\n", 385 | "demons_filter = sitk.FastSymmetricForcesDemonsRegistrationFilter()\n", 386 | "demons_filter.SetNumberOfIterations(20)\n", 387 | "# Regularization (update field - viscous, total field - elastic).\n", 388 | "demons_filter.SetSmoothDisplacementField(True)\n", 389 | "demons_filter.SetStandardDeviations(2.0)\n", 390 | "\n", 391 | "# Add our simple callback to the registration filter.\n", 392 | "demons_filter.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(demons_filter))\n", 393 | "\n", 394 | "# Run the registration.\n", 395 | "tx = multiscale_demons(registration_algorithm=demons_filter, \n", 396 | " fixed_image = fixed_image, \n", 397 | " moving_image = moving_image,\n", 398 | " shrink_factors = [4,2],\n", 399 | " smoothing_sigmas = [8,4])\n", 400 | "\n", 401 | "# look at the final TREs.\n", 402 | "final_TRE = utilities.target_registration_errors(tx, fixed_points, moving_points, display_errors = True)\n", 403 | "\n", 404 | "print('Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(final_TRE), \n", 405 | " np.std(final_TRE), \n", 406 | " np.max(final_TRE)))" 407 | ] 408 | }, 409 | { 410 | "cell_type": "markdown", 411 | "metadata": {}, 412 | "source": [ 413 | "## Quantitative Evaluation II (Segmentation)\n", 414 | "\n", 415 | "While the use of corresponding points to evaluate registration is the desired approach, it is often not applicable. In many cases there are only a few distinct points which can be localized in the two images, possibly too few to serve as a metric for evaluating the registration result across the whole region of interest. \n", 416 | "\n", 417 | "An alternative approach is to use segmentation. In this approach, we independently segment the structures of interest in the two images. After registration we transfer the segmentation from one image to the other and compare the original and registration induced segmentations.\n" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": null, 423 | "metadata": {}, 424 | "outputs": [], 425 | "source": [ 426 | "# Transfer the segmentation via the estimated transformation. \n", 427 | "# Nearest Neighbor interpolation so we don't introduce new labels.\n", 428 | "transformed_labels = sitk.Resample(masks[moving_index],\n", 429 | " fixed_image,\n", 430 | " tx, \n", 431 | " sitk.sitkNearestNeighbor,\n", 432 | " 0.0, \n", 433 | " masks[moving_index].GetPixelID())" 434 | ] 435 | }, 436 | { 437 | "cell_type": "markdown", 438 | "metadata": {}, 439 | "source": [ 440 | "We have now replaced the task of evaluating registration with that of evaluating segmentation." 441 | ] 442 | }, 443 | { 444 | "cell_type": "code", 445 | "execution_count": null, 446 | "metadata": {}, 447 | "outputs": [], 448 | "source": [ 449 | "# Often referred to as ground truth, but we prefer reference as the truth is never known.\n", 450 | "reference_segmentation = fixed_image_mask\n", 451 | "# Segmentations before and after registration\n", 452 | "segmentations = [moving_image_mask, transformed_labels == utilities.popi_lung_label]" 453 | ] 454 | }, 455 | { 456 | "cell_type": "code", 457 | "execution_count": null, 458 | "metadata": {}, 459 | "outputs": [], 460 | "source": [ 461 | "from enum import Enum\n", 462 | "\n", 463 | "# Use enumerations to represent the various evaluation measures\n", 464 | "class OverlapMeasures(Enum):\n", 465 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 466 | "\n", 467 | "class SurfaceDistanceMeasures(Enum):\n", 468 | " hausdorff_distance, mean_surface_distance, median_surface_distance, std_surface_distance, max_surface_distance = range(5)\n", 469 | " \n", 470 | "# Empty numpy arrays to hold the results \n", 471 | "overlap_results = np.zeros((len(segmentations),len(OverlapMeasures.__members__.items()))) \n", 472 | "surface_distance_results = np.zeros((len(segmentations),len(SurfaceDistanceMeasures.__members__.items()))) \n", 473 | "\n", 474 | "# Compute the evaluation criteria\n", 475 | "\n", 476 | "# Note that for the overlap measures filter, because we are dealing with a single label we \n", 477 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 478 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 479 | "\n", 480 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 481 | "\n", 482 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside \n", 483 | "# relationship, is irrelevant)\n", 484 | "label = 1\n", 485 | "reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False))\n", 486 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 487 | "\n", 488 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 489 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 490 | "statistics_image_filter.Execute(reference_surface)\n", 491 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum()) \n", 492 | "\n", 493 | "for i, seg in enumerate(segmentations):\n", 494 | " # Overlap measures\n", 495 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 496 | " overlap_results[i,OverlapMeasures.jaccard.value] = overlap_measures_filter.GetJaccardCoefficient()\n", 497 | " overlap_results[i,OverlapMeasures.dice.value] = overlap_measures_filter.GetDiceCoefficient()\n", 498 | " overlap_results[i,OverlapMeasures.volume_similarity.value] = overlap_measures_filter.GetVolumeSimilarity()\n", 499 | " overlap_results[i,OverlapMeasures.false_negative.value] = overlap_measures_filter.GetFalseNegativeError()\n", 500 | " overlap_results[i,OverlapMeasures.false_positive.value] = overlap_measures_filter.GetFalsePositiveError()\n", 501 | " # Hausdorff distance\n", 502 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 503 | " surface_distance_results[i,SurfaceDistanceMeasures.hausdorff_distance.value] = hausdorff_distance_filter.GetHausdorffDistance()\n", 504 | " # Symmetric surface distance measures\n", 505 | " segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(seg, squaredDistance=False))\n", 506 | " segmented_surface = sitk.LabelContour(seg)\n", 507 | " \n", 508 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 509 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 510 | " seg2ref_distance_map = reference_distance_map*sitk.Cast(segmented_surface, sitk.sitkFloat32)\n", 511 | " ref2seg_distance_map = segmented_distance_map*sitk.Cast(reference_surface, sitk.sitkFloat32)\n", 512 | " \n", 513 | " # Get the number of pixels in the segmented surface by counting all pixels that are 1.\n", 514 | " statistics_image_filter.Execute(segmented_surface)\n", 515 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 516 | " \n", 517 | " # Get all non-zero distances and then add zero distances if required.\n", 518 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 519 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr!=0]) \n", 520 | " seg2ref_distances = seg2ref_distances + \\\n", 521 | " list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))\n", 522 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 523 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr!=0]) \n", 524 | " ref2seg_distances = ref2seg_distances + \\\n", 525 | " list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))\n", 526 | " \n", 527 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 528 | " \n", 529 | " surface_distance_results[i,SurfaceDistanceMeasures.mean_surface_distance.value] = np.mean(all_surface_distances)\n", 530 | " surface_distance_results[i,SurfaceDistanceMeasures.median_surface_distance.value] = np.median(all_surface_distances)\n", 531 | " surface_distance_results[i,SurfaceDistanceMeasures.std_surface_distance.value] = np.std(all_surface_distances)\n", 532 | " surface_distance_results[i,SurfaceDistanceMeasures.max_surface_distance.value] = np.max(all_surface_distances)\n", 533 | "\n", 534 | "import pandas as pd\n", 535 | "from IPython.display import display, HTML \n", 536 | "\n", 537 | "# Graft our results matrix into pandas data frames \n", 538 | "overlap_results_df = pd.DataFrame(data=overlap_results, index=[\"before registration\", \"after registration\"], \n", 539 | " columns=[name for name, _ in OverlapMeasures.__members__.items()]) \n", 540 | "surface_distance_results_df = pd.DataFrame(data=surface_distance_results, index=[\"before registration\", \"after registration\"], \n", 541 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()]) \n", 542 | "\n", 543 | "# Display the data as HTML tables and graphs\n", 544 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 545 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 546 | "overlap_results_df.plot(kind='bar', rot=1).legend(bbox_to_anchor=(1.6,0.9))\n", 547 | "surface_distance_results_df.plot(kind='bar', rot=1).legend(bbox_to_anchor=(1.6,0.9)); " 548 | ] 549 | }, 550 | { 551 | "cell_type": "markdown", 552 | "metadata": { 553 | "collapsed": true 554 | }, 555 | "source": [ 556 | "

Next »

" 557 | ] 558 | } 559 | ], 560 | "metadata": { 561 | "kernelspec": { 562 | "display_name": "Python 3", 563 | "language": "python", 564 | "name": "python3" 565 | }, 566 | "language_info": { 567 | "codemirror_mode": { 568 | "name": "ipython", 569 | "version": 3 570 | }, 571 | "file_extension": ".py", 572 | "mimetype": "text/x-python", 573 | "name": "python", 574 | "nbconvert_exporter": "python", 575 | "pygments_lexer": "ipython3", 576 | "version": "3.6.8" 577 | } 578 | }, 579 | "nbformat": 4, 580 | "nbformat_minor": 2 581 | } 582 | -------------------------------------------------------------------------------- /06_registration_application.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Clinical Application: Creating a Lower Limb Panoramic X-ray

\n", 8 | "\n", 9 | "**Summary:**\n", 10 | "1. Successful registration is highly dependent on initialization. Use your domain knowledge to obtain plausible initializations.\n", 11 | "\n", 12 | "Measurement of knee alignment is useful for diagnosis of arthritic conditions and for planning and evaluation of surgical interventions. Alignment is measured by the hip-knee-ankle ($HKA$) angle in standing, load bearing, x-ray images. The angle is defined by the femoral and tibial mechanical axes. The femoral axis is defined by the center of the femur head and the mid condylar point. The tibial axis is defined by the center of the tibial plateau to the center of the tibial plafond. \n", 13 | "\n", 14 | "\n", 15 | "\n", 16 | "
\n", 17 | " \n", 18 | "
Hip-Knee-Ankle angle defined by the femoral mechanical axis (solid red line with dashed extension), and tibial mechanical axis (solid blue line).
\n", 19 | "
\n", 20 | "\n", 21 | "\n", 22 | "The three stances defined by the $HKA$ angle are:\n", 23 | " 1. Neutral alignment, $HKA=0^o$.\n", 24 | " 2. Varus, bow-legged, $HKA<0^o$.\n", 25 | " 3. Valgus, knock-kneed, $HKA>0^o$.\n", 26 | "\n", 27 | "For additional information see:\n", 28 | "1. T. D. Cooke et al., \"[Frontal plane knee alignment: a call for standardized measurement](https://www.ncbi.nlm.nih.gov/pubmed/17787049)\", J Rheumatol. 2007.\n", 29 | "2. A. F. Kamath et al., \"[What is Varus or Valgus Knee Alignment?: A Call for a Uniform Radiographic Classification](https://www.ncbi.nlm.nih.gov/pubmed/20361279)\", Clin Orthop Relat Res. 2010.\n", 30 | "\n", 31 | "For a robust estimate of the $HKA$ angle we would like to use a single image that contains the anatomy from the femoral head down to the ankle. Acquisition of such an image with standard x-ray imaging devices is not possible. It is achievable by acquiring multiple partially overlapping images and aligning, registering, them to the same coordinate system. The subject of this notebook. \n", 32 | "\n", 33 | "This notebook is based in part on the work described in: \"A marker-free registration method for standing X-ray panorama reconstruction for hip-knee-ankle axis deformity assessment\", Y. K. Ben-Zikri, Z. Yaniv, K. Baum, C. A. Linte, *Computer Methods in Biomechanics and Biomedical Engineering: Imaging & Visualization*, DOI:10.1080/21681163.2018.1537859.\n", 34 | "\n" 35 | ] 36 | }, 37 | { 38 | "cell_type": "code", 39 | "execution_count": null, 40 | "metadata": {}, 41 | "outputs": [], 42 | "source": [ 43 | "import SimpleITK as sitk\n", 44 | "import numpy as np\n", 45 | "import os.path\n", 46 | "import copy\n", 47 | "\n", 48 | "%matplotlib notebook\n", 49 | "import gui\n", 50 | "import matplotlib.pyplot as plt\n", 51 | "\n", 52 | "#utility method that either downloads data from the Girder repository or\n", 53 | "#if already downloaded returns the file name for reading from disk (cached data)\n", 54 | "from downloaddata import fetch_data as fdata" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "metadata": {}, 60 | "source": [ 61 | "## Loading data" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": {}, 68 | "outputs": [], 69 | "source": [ 70 | "# Fetch all of the data associated with this example.\n", 71 | "data_directory = os.path.dirname(fdata(\"leg_panorama/readme.txt\"))\n", 72 | "\n", 73 | "hip_image = sitk.ReadImage(os.path.join(data_directory,'hip.mha'))\n", 74 | "knee_image = sitk.ReadImage(os.path.join(data_directory,'knee.mha'))\n", 75 | "ankle_image = sitk.ReadImage(os.path.join(data_directory,'ankle.mha'))\n", 76 | "\n", 77 | "gui.multi_image_display2D([hip_image, knee_image, ankle_image], figure_size=(10,4));" 78 | ] 79 | }, 80 | { 81 | "cell_type": "markdown", 82 | "metadata": {}, 83 | "source": [ 84 | "## Getting to know your data\n", 85 | "\n", 86 | "As our goal is to register the images we need to identify an appropriate **similarity metric** and **transformation type**. \n", 87 | "\n", 88 | "### Similarity metric\n", 89 | "\n", 90 | "Given that we are using the same device to acquire multiple partially overlapping images, we would expect that the intensities for the same anatomical structures are the same in all images. We start by visually inspecting the images displayed above. If you hover the cursor over the images you will see the intensity value on the bottom right.\n", 91 | "\n", 92 | "We next plot the histogram for one of the images." 93 | ] 94 | }, 95 | { 96 | "cell_type": "code", 97 | "execution_count": null, 98 | "metadata": {}, 99 | "outputs": [], 100 | "source": [ 101 | "intensity_profile_image = knee_image\n", 102 | "fig = plt.figure()\n", 103 | "plt.hist(sitk.GetArrayViewFromImage(intensity_profile_image).flatten(),bins=100);" 104 | ] 105 | }, 106 | { 107 | "cell_type": "markdown", 108 | "metadata": {}, 109 | "source": [ 110 | "Notice that the image has a high dynamic range which is mapped to a low dynamic range when displayed, so we cannot observe all underlying intensity variations. Ideally intensity variations in x-ray images only occur when there are variations in the imaged object. In practice, we can observe non uniform intensities due to the structure of the x-ray device (e.g. absorption of photons by the x-ray anode,known as the [heel effect](https://en.wikipedia.org/wiki/Heel_effect)).\n", 111 | "\n", 112 | "In the next code cells we define a rectangular region of interest (use left mouse button, click and drag to define) in an area that is expected to have uniform intensity values (air) and plot the mean intensity per row. You can readily notice that there are intensity variations in what is expected to be a uniform region." 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": { 119 | "scrolled": false 120 | }, 121 | "outputs": [], 122 | "source": [ 123 | "# The ROI we specify is in a region that is expected to have uniform intensities.\n", 124 | "# You can clear this ROI and specify your own in the GUI below.\n", 125 | "roi_list = [((396, 436), (52, 1057))]\n", 126 | "roi_gui = gui.ROIDataAquisition(intensity_profile_image, figure_size=(8,4))\n", 127 | "roi_gui.set_rois(roi_list)" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "# Get the region of interest (first entry in the list of ROIs)\n", 137 | "roi = roi_gui.get_rois()[0]\n", 138 | "intensities = sitk.GetArrayFromImage(intensity_profile_image[roi[0][0]:roi[0][1], roi[1][0]:roi[1][1]])\n", 139 | "\n", 140 | "fig, axes = plt.subplots(1, 2, sharey=True)\n", 141 | "fig.suptitle('intensity variations (mean row value)')\n", 142 | "axes[0].imshow(intensities, cmap=plt.cm.Greys_r)\n", 143 | "axes[0].set_axis_off()\n", 144 | "axes[1].plot(intensities.mean(axis=1), range(intensities.shape[0]))\n", 145 | "axes[1].get_yaxis().set_visible(False)\n", 146 | "axes[1].tick_params(axis='x', rotation=-90)\n", 147 | "plt.box(on=None)" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "Given our observations above, we will use **correlation** as our similarity measure and not mean squares." 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "### Transformation type\n", 162 | "\n", 163 | "In general, the x-ray machine is modeled as a pinhole camera, with our images acquired using a fronto-parallel setup and the camera undergoing translation. This simplifies the general model from a homography transformation between images to a **planar translation**. For a detailed derivation see Z. Yaniv, L. Joskowicz, \"[Long Bone Panoramas from Fluoroscopic X-ray Images](https://www.ncbi.nlm.nih.gov/pubmed/14719684)\", IEEE Trans Med Imaging. 2004. \n", 164 | "\n", 165 | "While our transformation type is translation, looking at multiple triplets of images we observed that the size of overlapping regions, expected translations, has significant variability. Consequentially, we will use a heuristic **exploration-exploitation** approach to improve the robustness of our registration approach.\n", 166 | "\n", 167 | "\n", 168 | "\n", 169 | "## Registration - Exploration Step\n", 170 | "\n", 171 | "As image overlap has considerable variation we will use the ExhaustiveOptimizer to obtain several starting points, our exploration step. We then start a standard registration using these initial transformation estimates, our exploitation step. Finally we select the best transformation from the exploitation step." 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "class Evaluate2DTranslationCorrelation:\n", 181 | " '''\n", 182 | " Class for evaluating the correlation value for a given set of \n", 183 | " 2D translations between two images. The general relationship between\n", 184 | " the fixed and moving images is assumed (fixed is \"below\" the moving).\n", 185 | " We use the Exhaustive optimizer to sample the possible set of translations\n", 186 | " and an observer to tabulate the results.\n", 187 | " \n", 188 | " In this class we abuse the Python dictionary by using a float\n", 189 | " value as the key. This is a unique situation in which the floating\n", 190 | " values are fixed (not resulting from various computations) so that we \n", 191 | " can compare them for exact equality. This means they have the \n", 192 | " same hash value in the dictionary.\n", 193 | " '''\n", 194 | " def __init__(self, metric_sampling_percentage, min_row_overlap, \n", 195 | " max_row_overlap, column_overlap, dx_step_num,\n", 196 | " dy_step_num):\n", 197 | " '''\n", 198 | " Args:\n", 199 | " metric_sampling_percentage: Percentage of samples to use\n", 200 | " when computing correlation.\n", 201 | " min_row_overlap: Minimal number of rows that overlap between \n", 202 | " the two images.\n", 203 | " max_row_overlap: Maximal number of rows that overlap between \n", 204 | " the two images.\n", 205 | " column_overlap: Maximal translation in columns either in positive\n", 206 | " and negative direction.\n", 207 | " dx_step_num: Number of samples in parameter space for translation along \n", 208 | " the x axis is 2*dx_step_num+1.\n", 209 | " dy_step_num: Number of samples in parameter space for translation along \n", 210 | " the y axis is 2*dy_step_num+1.\n", 211 | " \n", 212 | " '''\n", 213 | " self._registration_values_dict = {}\n", 214 | " self.X = None\n", 215 | " self.Y = None\n", 216 | " self.C = None\n", 217 | " self._metric_sampling_percentage = metric_sampling_percentage\n", 218 | " self._min_row_overlap = min_row_overlap\n", 219 | " self._max_row_overlap = max_row_overlap\n", 220 | " self._column_overlap = column_overlap\n", 221 | " self._dx_step_num = dx_step_num\n", 222 | " self._dy_step_num = dy_step_num\n", 223 | " \n", 224 | " def _start_observer(self):\n", 225 | " self._registration_values_dict = {}\n", 226 | " self.X = None\n", 227 | " self.Y = None\n", 228 | " self.C = None\n", 229 | " \n", 230 | "\n", 231 | " def _iteration_observer(self, registration_method):\n", 232 | " x,y = registration_method.GetOptimizerPosition()\n", 233 | " if y in self._registration_values_dict.keys():\n", 234 | " self._registration_values_dict[y].append((x,registration_method.GetMetricValue()))\n", 235 | " else:\n", 236 | " self._registration_values_dict[y]= [(x,registration_method.GetMetricValue())]\n", 237 | "\n", 238 | " \n", 239 | " def evaluate(self, fixed_image, moving_image):\n", 240 | " '''\n", 241 | " Assume the fixed image is lower than the moving image (e.g. fixed=knee, moving=hip).\n", 242 | " The transformations map points in the fixed_image to the moving_image.\n", 243 | " Args:\n", 244 | " fixed_image: Image to use as fixed image in the registration.\n", 245 | " moving_image: Image to use as moving image in the registration.\n", 246 | " '''\n", 247 | " minimal_overlap = np.array(moving_image.TransformContinuousIndexToPhysicalPoint((-self._column_overlap, moving_image.GetHeight()-self._min_row_overlap))) - np.array(fixed_image.GetOrigin())\n", 248 | " maximal_overlap = np.array(moving_image.TransformContinuousIndexToPhysicalPoint((self._column_overlap, moving_image.GetHeight()-self._max_row_overlap))) - np.array(fixed_image.GetOrigin())\n", 249 | " transform = sitk.TranslationTransform(2,((maximal_overlap[0]+minimal_overlap[0])/2.0,(maximal_overlap[1]+minimal_overlap[1])/2.0))\n", 250 | " \n", 251 | " # Total number of evaluations, translations along the y axis in both directions around the initial\n", 252 | " # value is 2*dy_step_num+1.\n", 253 | " dy_step_length = (maximal_overlap[1] - minimal_overlap[1])/(2*self._dy_step_num)\n", 254 | " dx_step_length = (maximal_overlap[0] - minimal_overlap[0])/(2*self._dx_step_num)\n", 255 | " step_length = dx_step_length\n", 256 | " parameter_scales = [1, dy_step_length/dx_step_length]\n", 257 | "\n", 258 | " registration_method = sitk.ImageRegistrationMethod()\n", 259 | " registration_method.SetMetricAsCorrelation()\n", 260 | " registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 261 | " registration_method.SetMetricSamplingPercentage(self._metric_sampling_percentage) \n", 262 | " registration_method.SetInitialTransform(transform, inPlace=True)\n", 263 | " registration_method.SetOptimizerAsExhaustive(numberOfSteps=[self._dx_step_num, self._dy_step_num], \n", 264 | " stepLength = step_length)\n", 265 | " registration_method.SetOptimizerScales(parameter_scales)\n", 266 | "\n", 267 | " registration_method.AddCommand(sitk.sitkIterationEvent, lambda: self._iteration_observer(registration_method))\n", 268 | " registration_method.AddCommand(sitk.sitkStartEvent, self._start_observer )\n", 269 | " registration_method.Execute(fixed_image, moving_image)\n", 270 | "\n", 271 | " # Convert the data obtained by the observer to three numpy arrays X,Y,C \n", 272 | " x_lists = []\n", 273 | " val_lists = []\n", 274 | " for k in self._registration_values_dict.keys():\n", 275 | " x_list, val_list = zip(*(sorted(self._registration_values_dict[k])))\n", 276 | " x_lists.append(x_list)\n", 277 | " val_lists.append(val_list)\n", 278 | " \n", 279 | " self.X = np.array(x_lists)\n", 280 | " self.C = np.array(val_lists)\n", 281 | " self.Y = np.array([list(self._registration_values_dict.keys()),]*self.X.shape[1]).transpose()\n", 282 | "\n", 283 | " def get_raw_data(self):\n", 284 | " '''\n", 285 | " Get the raw data, the translations and corresponding correlation values.\n", 286 | " Returns:\n", 287 | " A tuple of three numpy arrays (X,Y,C) where (X[i], Y[i]) are the translation\n", 288 | " and C[i] is the correlation value for that translation.\n", 289 | " '''\n", 290 | " return (np.copy(self.X), np.copy(self.Y), np.copy(self.C)) \n", 291 | " \n", 292 | " \n", 293 | " def get_candidates(self, num_candidates, correlation_threshold, nms_radius=2):\n", 294 | " '''\n", 295 | " Get the best (most correlated, minimal correlation value) transformations\n", 296 | " from the sample set.\n", 297 | " Args:\n", 298 | " num_candidates: Maximal number of candidates to return.\n", 299 | " correlation_threshold: Minimal correlation value required for returning\n", 300 | " a candidate.\n", 301 | " nms_radius: Non-Minima (the optimizer is negating the correlation) suppression \n", 302 | " region around the local minimum.\n", 303 | " Returns: \n", 304 | " List of tuples containing (transform, correlation). The order of the transformations\n", 305 | " in the list is based on the correlation value (best correlation is entry zero). \n", 306 | " '''\n", 307 | " candidates = []\n", 308 | " _C = np.copy(self.C)\n", 309 | " done = num_candidates-len(candidates)<=0\n", 310 | " while not done:\n", 311 | " min_index = np.unravel_index(_C.argmin(), _C.shape)\n", 312 | " if(-_C[min_index]

Next »

" 560 | ] 561 | } 562 | ], 563 | "metadata": { 564 | "kernelspec": { 565 | "display_name": "Python 3", 566 | "language": "python", 567 | "name": "python3" 568 | }, 569 | "language_info": { 570 | "codemirror_mode": { 571 | "name": "ipython", 572 | "version": 3 573 | }, 574 | "file_extension": ".py", 575 | "mimetype": "text/x-python", 576 | "name": "python", 577 | "nbconvert_exporter": "python", 578 | "pygments_lexer": "ipython3", 579 | "version": "3.6.8" 580 | } 581 | }, 582 | "nbformat": 4, 583 | "nbformat_minor": 2 584 | } 585 | --------------------------------------------------------------------------------