├── figures ├── hkaAngle.png └── ImageOriginAndSpacing.png ├── docs ├── simpleITKAtNIH19Logo.png ├── simpleitkFundamentalConcepts.pptx ├── simpleitkHistoricalOverview.pptx ├── simpleitkNotebookDevelopmentTesting.pptx ├── citations.bib └── index.html ├── tests ├── requirements_testing.txt ├── additional_dictionary.txt └── test_notebooks.py ├── environment.yml ├── output └── .gitignore ├── clean_cit.sh ├── README.md ├── .circleci └── config.yml ├── data └── manifest.json ├── setup.ipynb ├── registration_gui.py ├── utilities.py ├── downloaddata.py ├── LICENSE ├── 07_segmentation_and_shape_analysis.ipynb ├── 08_segmentation_evaluation.ipynb ├── 01_spatial_transformations.ipynb ├── 09_results_visualization.ipynb └── 05_advanced_registration.ipynb /figures/hkaAngle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/figures/hkaAngle.png -------------------------------------------------------------------------------- /docs/simpleITKAtNIH19Logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/docs/simpleITKAtNIH19Logo.png -------------------------------------------------------------------------------- /figures/ImageOriginAndSpacing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/figures/ImageOriginAndSpacing.png -------------------------------------------------------------------------------- /docs/simpleitkFundamentalConcepts.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/docs/simpleitkFundamentalConcepts.pptx -------------------------------------------------------------------------------- /docs/simpleitkHistoricalOverview.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/docs/simpleitkHistoricalOverview.pptx -------------------------------------------------------------------------------- /docs/simpleitkNotebookDevelopmentTesting.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/NIH2019_COURSE/master/docs/simpleitkNotebookDevelopmentTesting.pptx -------------------------------------------------------------------------------- /tests/requirements_testing.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | markdown 3 | lxml 4 | pyenchant 5 | jupyter 6 | matplotlib 7 | ipywidgets 8 | numpy 9 | scipy 10 | pandas 11 | SimpleITK>=1.2.2 12 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: sitkpyNIH19 2 | 3 | channels: 4 | - simpleitk 5 | 6 | dependencies: 7 | - python=3.7 8 | - jupyter 9 | - matplotlib 10 | - ipywidgets 11 | - numpy 12 | - scipy 13 | - pandas 14 | - SimpleITK>=1.2.0 15 | 16 | -------------------------------------------------------------------------------- /output/.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | #Maintain an empty directory in the git repository, where all files in this 3 | #directory will always be ignored by git: 4 | #http://stackoverflow.com/questions/115983/how-can-i-add-an-empty-directory-to-a-git-repository 5 | # 6 | * 7 | # Except this file 8 | !.gitignore 9 | -------------------------------------------------------------------------------- /clean_cit.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | # Cleanup script when a course uses shared computing resources. 5 | # Remove the anaconda environment we created and remove the notebooks directory. 6 | # 7 | 8 | ANACONDA_ACTIVATE_PATH=${HOME}/"anaconda3/bin/activate" 9 | ENV_NAME="sitkpyNIH19" 10 | NOTEBOOKS_LOCATION=${HOME}/"Desktop/NIH2019_COURSE" 11 | 12 | source ${ANACONDA_ACTIVATE_PATH} 13 | conda env remove --name ${ENV_NAME} 14 | \rm -rf ${NOTEBOOKS_LOCATION} 15 | 16 | 17 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | > :warning: This repository has been archived. For up to date information, please see the [SimpleITK tutorial](https://simpleitk.org/TUTORIAL/) or the [SimpleITK notebooks repository](https://github.com/InsightSoftwareConsortium/SimpleITK-Notebooks). 2 | 3 | # SimpleITK: NIH 2019 Course 4 | 5 | [![CircleCI](https://circleci.com/gh/SimpleITK/NIH2019_COURSE.svg?style=svg)](https://circleci.com/gh/SimpleITK/NIH2019_COURSE) 6 | 7 | This repository contains all of the material presented at the 8 | NIH Introduction to SimpleITK 2019, and the course's website. 9 | -------------------------------------------------------------------------------- /docs/citations.bib: -------------------------------------------------------------------------------- 1 | 2 | @article{JSSv086i08, 3 | author = {Richard Beare and Bradley Lowekamp and Ziv Yaniv}, 4 | title = {Image Segmentation, Registration and Characterization in R with SimpleITK}, 5 | journal = {Journal of Statistical Software, Articles}, 6 | volume = {86}, 7 | number = {8}, 8 | year = {2018}, 9 | keywords = {image processing; image segmentation; image registration; medical imaging; R}, 10 | issn = {1548-7660}, 11 | pages = {1--35}, 12 | doi = {10.18637/jss.v086.i08}, 13 | url = {https://www.jstatsoft.org/v086/i08} 14 | } 15 | 16 | @article{Yaniv2018, 17 | author = {Yaniv, Ziv and Lowekamp, Bradley C. and Johnson, Hans J. and Beare, Richard}, 18 | title = {SimpleITK Image-Analysis Notebooks: a Collaborative Environment for Education and Reproducible Research}, 19 | journal = {Journal of Digital Imaging}, 20 | year = {2018}, 21 | month = {Jun}, 22 | day = {01}, 23 | volume = {31}, 24 | number = {3}, 25 | pages = {290--303}, 26 | issn = {1618-727X}, 27 | doi = {10.1007/s10278-017-0037-8}, 28 | url = {https://doi.org/10.1007/s10278-017-0037-8} 29 | } 30 | 31 | @article{10.3389/fninf.2013.00045, 32 | author = {Lowekamp, Bradley and Chen, David and Ibanez, Luis and Blezek, Daniel}, 33 | title = {The Design of SimpleITK}, 34 | journal = {Frontiers in Neuroinformatics}, 35 | volume = {7}, 36 | pages = {45}, 37 | year = {2013}, 38 | url = {https://www.frontiersin.org/article/10.3389/fninf.2013.00045}, 39 | doi = {10.3389/fninf.2013.00045}, 40 | issn = {1662-5196}, 41 | } 42 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | workflows: 4 | version: 2 5 | test: 6 | jobs: 7 | - test-3.7 8 | jobs: 9 | test-3.7: &test-template 10 | docker: 11 | - image: circleci/python:3.7-stretch 12 | environment: 13 | ExternalData_OBJECT_STORES: /home/circleci/.ExternalData 14 | SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT: 1 15 | steps: 16 | - checkout 17 | 18 | - restore_cache: 19 | keys: 20 | - simpleitk-nih2019-{{ checksum "data/manifest.json" }} 21 | - simpleitk-nih2019- #use previous cache when the manifest changes 22 | 23 | - run: 24 | name: Data setup (if cache is not empty then symbolic link to it) 25 | command: | 26 | mkdir -p "${ExternalData_OBJECT_STORES}" 27 | if [ ! -z "$(ls -A ${ExternalData_OBJECT_STORES})" ]; then 28 | cp -as /home/circleci/.ExternalData/* data 29 | fi 30 | python downloaddata.py "${ExternalData_OBJECT_STORES}" data/manifest.json 31 | 32 | - run: 33 | name: Setup of Python environment 34 | command: | 35 | sudo apt-get update; sudo apt-get install enchant 36 | sudo pip install virtualenv 37 | virtualenv ~/sitkpy --no-site-packages 38 | ~/sitkpy/bin/pip install -r tests/requirements_testing.txt 39 | ~/sitkpy/bin/jupyter nbextension enable --py --sys-prefix widgetsnbextension 40 | 41 | - run: 42 | name: Activate environment and run the test 43 | command: | 44 | source ~/sitkpy/bin/activate 45 | ~/sitkpy/bin/pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook 46 | 47 | - save_cache: 48 | key: simpleitk-nih2019-{{ checksum "data/manifest.json" }} 49 | paths: 50 | - /home/circleci/.ExternalData 51 | 52 | 53 | -------------------------------------------------------------------------------- /data/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "SimpleITK.jpg" : { 3 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 4 | }, 5 | "CIRS057A_MR_CT_DICOM/readme.txt" : { 6 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef", 7 | "archive" : "true" 8 | }, 9 | "training_001_ct.mha" : { 10 | "sha512": "1b950bc42fddfcefc76b9203d5dd6c45960c4fa8dcb69b839d3d083270d3d4c9a9d378de3d3f914e432dc18fb44c9b9770d4db5580a70265f3e24e6cdb83015d" 11 | }, 12 | "training_001_mr_T1.mha" : { 13 | "sha512": "3d15477962fef5851207964c381ffe77c586a6f70f2a373ecd3b6b4dc50d51dc6cd893eb1bedabcd382a96f0dafac893ae9e5a7c2b7333f9ff3f0c6b7016c7bc" 14 | }, 15 | "POPI/meta/00-P.mhd" : { 16 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc", 17 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 18 | "archive" : "true" 19 | }, 20 | "POPI/meta/70-P.mhd" : { 21 | "sha512": "87c256ff441429babceab5f9886397f7c4b4f85525dfb5a786ed64b97f4779d3b313b3faf1449dddb7ba5ed49719ff0eea296a3367cdc98e753f597028a6f0e0", 22 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/70-MetaImage.tar", 23 | "archive" : "true" 24 | }, 25 | "POPI/landmarks/00-Landmarks.pts" : { 26 | "sha512": "7c2120b1f6d4b855aa11bf05dd987d677c219ca4bdfbd39234e7835285c45082c229fb5cc09e00e6bd91b339eeb1f700c597f4166633421a133c21ce773b25ad", 27 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/00-Landmarks.pts" 28 | }, 29 | "POPI/landmarks/70-Landmarks.pts" : { 30 | "sha512": "5bbcb192a275b30510fb1badcd12c9110ed7909d4353c76567ebb0aae61fb944a9c4f3d8cd8ffa0519d8621626d06db333c456eda9c68c3a03991e291760f44c", 31 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/70-Landmarks.pts" 32 | }, 33 | "POPI/masks/00-air-body-lungs.mhd" : { 34 | "sha512": "e20e93b316390ea53c59427a8ab770bb3ebda1f2e4c911382b753ec024d812de8a6c13d1919b77a1687c4f611acdb62ea92c05b2cc3ed065046fbdbe139538c8", 35 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/00Mask-MetaImage.tar", 36 | "archive" : "true" 37 | }, 38 | "POPI/masks/70-air-body-lungs.mhd" : { 39 | "sha512": "cbbd4b71b9771b36dc71fe6c564c96fde363878713680a624b9b307c4d9959835731c841be6b9304457d212350cc0ffac44385994b9bc9b2d8523e2463c664f8", 40 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/70Mask-MetaImage.tar", 41 | "archive" : "true" 42 | }, 43 | "fib_sem_bacillus_subtilis.mha": { 44 | "sha512": "5f7c34428362434c4ff3353307f8401ea38a18a68e9fc1705138232b4c70da2fcf3e2e8560ba917620578093edb392b418702edca3be0eafa23d6f52ced73314" 45 | }, 46 | "leg_panorama/readme.txt": { 47 | "archive": "true", 48 | "sha512":"0771b63d7f8ed19d16ca36de144d6570cc3f8d604be56626ceb932f6bbf60857282f52aad4158f85e8a01bb1c84222da5b23fd3df91ec46ebe625341f56d6bf9" 49 | }, 50 | "liverTumorSegmentations/Patient01Homo.mha": { 51 | "sha512": "c57e6c51bdd9dd46034df3c01e3187d884526dbcfcf8e056221205bac1a09098142692a1bc76f3834a78a809570e64544dbec9b9d264747383ee71e20b21d054" 52 | }, 53 | "liverTumorSegmentations/Patient01Homo_Rad01.mha": { 54 | "sha512": "e94fb4d96e5cc5dca3c68fc67f63e895b8a71011f5343b4399e122b8f6a43ec5d5055f939299e3d9955e59cd841ebeb2d2395568c10ce29a597c518db784a337" 55 | }, 56 | "liverTumorSegmentations/Patient01Homo_Rad02.mha": { 57 | "sha512": "e055aff99a1c05ab90b84af048dd94d32236dcb4e4b8ce0a99ba3658fe85cc7c8505b806a92611bcf5ecf4cd0dbe6cafc336efdb9fe49753d1fc4aed174ed8ba" 58 | }, 59 | "liverTumorSegmentations/Patient01Homo_Rad03.mha": { 60 | "sha512": "89e4040e17aba2fae50e0b59b2043203ab33ce3ae6ef90af8ebc8c032a6aaee35084bf1e34ce1a390d157b8fadae2fa7694203a0886f54cc9da5293dbaa5d0e7" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

SimpleITK: A Tool for Biomedical Image Processing, from Cells to Anatomical Structures

\n", 8 | "\n", 9 | "## Newcomers to Jupyter notebooks:\n", 10 | "1. We use two types of cells, code and markdown.\n", 11 | "2. To run a code cell, select it (mouse or arrow key so that it is highlighted) and then press shift+enter which also moves focus to the next cell or ctrl+enter which doesn't.\n", 12 | "3. Closing the browser window does not close the Jupyter server. To close the server, go to the terminal where you ran it and press ctrl+c twice.\n", 13 | "\n", 14 | "For additional details see the [Jupyter Notebook Quick Start Guide](https://jupyter-notebook-beginner-guide.readthedocs.io/en/latest/index.html).\n", 15 | "\n", 16 | "\n", 17 | "## Environment Setup for Course\n", 18 | "\n", 19 | "This notebook should be run prior to arriving at the course venue, as it requires network connectivity." 20 | ] 21 | }, 22 | { 23 | "cell_type": "markdown", 24 | "metadata": {}, 25 | "source": [ 26 | "First, lets check that you have the SimpleITK version which you expect." 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "import SimpleITK as sitk\n", 36 | "from downloaddata import fetch_data, fetch_data_all\n", 37 | "\n", 38 | "from ipywidgets import interact\n", 39 | "\n", 40 | "print(sitk.Version())" 41 | ] 42 | }, 43 | { 44 | "cell_type": "markdown", 45 | "metadata": {}, 46 | "source": [ 47 | "Next, we check that the auxiliary program(s) are correctly installed in your environment.\n", 48 | "\n", 49 | "We expect that you have an external image viewer installed. The default viewer is Fiji. If you have another viewer (i.e. ITK-SNAP or 3D Slicer) you will need to set an environment variable to point to it. This is done using an environment variable which can also be set from within a notebook as shown below." 50 | ] 51 | }, 52 | { 53 | "cell_type": "code", 54 | "execution_count": null, 55 | "metadata": { 56 | "simpleitk_error_allowed": "Exception thrown in SimpleITK ImageViewer_Execute:" 57 | }, 58 | "outputs": [], 59 | "source": [ 60 | "# Uncomment the line below to change the default external viewer to your viewer of choice and test that it works.\n", 61 | "#%env SITK_SHOW_COMMAND path_to_program/ITK-SNAP \n", 62 | "\n", 63 | "# Retrieve an image from the network, read it and display using the external viewer\n", 64 | "image_viewer = sitk.ImageViewer()\n", 65 | "image_viewer.Execute(sitk.ReadImage(fetch_data(\"SimpleITK.jpg\")))" 66 | ] 67 | }, 68 | { 69 | "cell_type": "markdown", 70 | "metadata": {}, 71 | "source": [ 72 | "Now we check that the ipywidgets will display correctly. When you run the following cell you should see a slider.\n", 73 | "\n", 74 | "If you don't see a slider please shutdown the Jupyter server, at the Anaconda command line prompt press Control-c twice, and then run the following command:\n", 75 | "\n", 76 | "1. If using jupyter notebook\n", 77 | "```jupyter nbextension enable --py --sys-prefix widgetsnbextension```\n", 78 | "\n", 79 | "2. If using jupyter lab\n", 80 | "```jupyter labextension install @jupyter-widgets/jupyterlab-manager```" 81 | ] 82 | }, 83 | { 84 | "cell_type": "code", 85 | "execution_count": null, 86 | "metadata": {}, 87 | "outputs": [], 88 | "source": [ 89 | "interact(lambda x: x, x=(0,10));" 90 | ] 91 | }, 92 | { 93 | "cell_type": "markdown", 94 | "metadata": {}, 95 | "source": [ 96 | "Finally, we download all of the data used in the notebooks in advance. This step is necessary as we will be running the notebooks without network connectivity.\n", 97 | "\n", 98 | "This may take a couple of minutes depending on your network." 99 | ] 100 | }, 101 | { 102 | "cell_type": "code", 103 | "execution_count": null, 104 | "metadata": {}, 105 | "outputs": [], 106 | "source": [ 107 | "import os\n", 108 | "\n", 109 | "fetch_data_all('data', os.path.join('data','manifest.json'))" 110 | ] 111 | }, 112 | { 113 | "cell_type": "markdown", 114 | "metadata": {}, 115 | "source": [ 116 | "

Next »

" 117 | ] 118 | } 119 | ], 120 | "metadata": { 121 | "kernelspec": { 122 | "display_name": "Python 3", 123 | "language": "python", 124 | "name": "python3" 125 | }, 126 | "language_info": { 127 | "codemirror_mode": { 128 | "name": "ipython", 129 | "version": 3 130 | }, 131 | "file_extension": ".py", 132 | "mimetype": "text/x-python", 133 | "name": "python", 134 | "nbconvert_exporter": "python", 135 | "pygments_lexer": "ipython3", 136 | "version": "3.7.4" 137 | } 138 | }, 139 | "nbformat": 4, 140 | "nbformat_minor": 2 141 | } 142 | -------------------------------------------------------------------------------- /registration_gui.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | # 6 | # Set of methods used for displaying the registration metric during the optimization. 7 | # 8 | 9 | # Callback invoked when the StartEvent happens, sets up our new data. 10 | def start_plot(): 11 | global metric_values, multires_iterations, ax, fig 12 | fig, ax = plt.subplots(1,1, figsize=(8,4)) 13 | 14 | metric_values = [] 15 | multires_iterations = [] 16 | plt.show() 17 | 18 | 19 | # Callback invoked when the EndEvent happens, do cleanup of data and figure. 20 | def end_plot(): 21 | global metric_values, multires_iterations, ax, fig 22 | 23 | del metric_values 24 | del multires_iterations 25 | del ax 26 | del fig 27 | 28 | # Callback invoked when the IterationEvent happens, update our data and display new figure. 29 | def plot_values(registration_method): 30 | global metric_values, multires_iterations, ax, fig 31 | 32 | metric_values.append(registration_method.GetMetricValue()) 33 | # Plot the similarity metric values 34 | ax.plot(metric_values, 'r') 35 | ax.plot(multires_iterations, [metric_values[index] for index in multires_iterations], 'b*') 36 | ax.set_xlabel('Iteration Number',fontsize=12) 37 | ax.set_ylabel('Metric Value',fontsize=12) 38 | fig.canvas.draw() 39 | 40 | # Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the 41 | # metric_values list. 42 | def update_multires_iterations(): 43 | global metric_values, multires_iterations 44 | multires_iterations.append(len(metric_values)) 45 | 46 | 47 | def overlay_binary_segmentation_contours(image, mask, window_min, window_max): 48 | """ 49 | Given a 2D image and mask: 50 | a. resample the image and mask into isotropic grid (required for display). 51 | b. rescale the image intensities using the given window information. 52 | c. overlay the contours computed from the mask onto the image. 53 | """ 54 | # Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid, 55 | # required for display. 56 | original_spacing = image.GetSpacing() 57 | original_size = image.GetSize() 58 | min_spacing = min(original_spacing) 59 | new_spacing = [min_spacing, min_spacing] 60 | new_size = [int(round(original_size[0]*(original_spacing[0]/min_spacing))), 61 | int(round(original_size[1]*(original_spacing[1]/min_spacing)))] 62 | resampled_img = sitk.Resample(image, new_size, sitk.Transform(), 63 | sitk.sitkLinear, image.GetOrigin(), 64 | new_spacing, image.GetDirection(), 0.0, 65 | image.GetPixelID()) 66 | resampled_msk = sitk.Resample(mask, new_size, sitk.Transform(), 67 | sitk.sitkNearestNeighbor, mask.GetOrigin(), 68 | new_spacing, mask.GetDirection(), 0.0, 69 | mask.GetPixelID()) 70 | 71 | # Create the overlay: cast the mask to expected label pixel type, and do the same for the image after 72 | # window-level, accounting for the high dynamic range of the CT. 73 | return sitk.LabelMapContourOverlay(sitk.Cast(resampled_msk, sitk.sitkLabelUInt8), 74 | sitk.Cast(sitk.IntensityWindowing(resampled_img, 75 | windowMinimum=window_min, 76 | windowMaximum=window_max), 77 | sitk.sitkUInt8), 78 | opacity = 1, 79 | contourThickness=[2,2]) 80 | 81 | 82 | def display_coronal_with_overlay(temporal_slice, coronal_slice, images, masks, label, window_min, window_max): 83 | """ 84 | Display a coronal slice from the 4D (3D+time) CT with a contour overlaid onto it. The contour is the edge of 85 | the specific label. 86 | """ 87 | img = images[temporal_slice][:,coronal_slice,:] 88 | msk = masks[temporal_slice][:,coronal_slice,:]==label 89 | 90 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 91 | # Flip the image so that corresponds to correct radiological view. 92 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 93 | plt.axis('off') 94 | plt.show() 95 | 96 | 97 | def display_coronal_with_label_maps_overlay(coronal_slice, mask_index, image, masks, label, window_min, window_max): 98 | """ 99 | Display a coronal slice from a 3D CT with a contour overlaid onto it. The contour is the edge of 100 | the specific label from the specific mask. Function is used to display results of transforming a segmentation 101 | using registration. 102 | """ 103 | img = image[:,coronal_slice,:] 104 | msk = masks[mask_index][:,coronal_slice,:]==label 105 | 106 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 107 | # Flip the image so that corresponds to correct radiological view. 108 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 109 | plt.axis('off') 110 | plt.show() 111 | -------------------------------------------------------------------------------- /tests/additional_dictionary.txt: -------------------------------------------------------------------------------- 1 | Acknowledgements 2 | AddTransform 3 | AffineTransform 4 | al 5 | ANTSNeighborhoodCorrelation 6 | API 7 | app 8 | argmin 9 | Args 10 | atol 11 | aug 12 | ay 13 | az 14 | backgroundValue 15 | Baum 16 | behaviour 17 | Bérard 18 | BFGS 19 | Biancardi 20 | BinaryMorphologicalClosing 21 | BinaryMorphologicalOpening 22 | BinaryThreshold 23 | bio 24 | Biomechanics 25 | BMP 26 | booktabs 27 | Broyden 28 | bspline 29 | BSpline 30 | BSplineTransform 31 | BSplineTransformInitializer 32 | BSplineTransformInitializerFilter 33 | CastImageFilter 34 | CBCT 35 | ccc 36 | CenteredTransformInitializer 37 | CenteredTransformInitializerFilter 38 | Centre 39 | characterisation 40 | CheckerBoardImageFilter 41 | circ 42 | CIRS 43 | Clarysse 44 | Clin 45 | colour 46 | colourmap 47 | ComposeImageFilter 48 | condylar 49 | ConfidenceConnected 50 | ConjugateGradientLineSearch 51 | ConnectedComponentImageFilter 52 | ConnectedThreshold 53 | const 54 | convergenceMinimumValue 55 | convergenceWindowSize 56 | CREATIS 57 | cryosectioning 58 | cthead 59 | ctrl 60 | CTs 61 | DAPI 62 | dataframe 63 | debugOn 64 | Decubitus 65 | defaultPixelValue 66 | DemonsMetric 67 | DemonsRegistrationFilter 68 | dev 69 | DICOM 70 | Diff 71 | DiffeomorphicDemonsRegistrationFilter 72 | disp 73 | DisplacementField 74 | DisplacementFieldTransform 75 | displaystyle 76 | Docstring 77 | Docstrings 78 | documentclass 79 | doi 80 | Doxygen 81 | dropdown 82 | DTransform 83 | dy 84 | EachIteration 85 | eikonal 86 | EndEvent 87 | endospore 88 | endospores 89 | env 90 | estimateLearningRate 91 | euler 92 | ExhaustiveOptimizer 93 | ExpandImageFilter 94 | FastMarchingImageFilter 95 | FastSymmetricForcesDemonsRegistrationFilter 96 | faux 97 | fdata 98 | FFD 99 | FFDL 100 | FFDR 101 | FFF 102 | FFP 103 | FFS 104 | fiducials 105 | fiducial's 106 | FilterName 107 | FixedParameters 108 | FLE 109 | Flickr 110 | FlipImageFilter 111 | floordiv 112 | fp 113 | frac 114 | FRE 115 | FREs 116 | fronto 117 | GaborSource 118 | Geissbuehler 119 | GeodesicActiveContour 120 | geq 121 | GetArrayFromImage 122 | GetArrayViewFromImage 123 | GetCenter 124 | GetHeight 125 | GetImageFromArray 126 | GetInverse 127 | GetMetaData 128 | GetMetaDataKeys 129 | GetPixel 130 | ggplot 131 | GIF 132 | Goldfarb 133 | GradientDescent 134 | GradientDescentLineSearch 135 | greyscale 136 | gui 137 | HasMetaDataKey 138 | hausdorff 139 | HausdorffDistanceImageFilter 140 | HDF 141 | HDF5ImageIO 142 | Hein 143 | HFDL 144 | HFDR 145 | HFP 146 | HFS 147 | HKA 148 | homography 149 | honours 150 | Hounsfield 151 | HU 152 | ICCR 153 | ID's 154 | iff 155 | ImageFileReader 156 | ImageFileReader's 157 | ImageIO 158 | ImageIOs 159 | ImageJ 160 | ImageRegistrationMethod 161 | ImageSeriesReader 162 | img 163 | init 164 | initialNeighborhoodRadius 165 | inline 166 | inlined 167 | IntensityWindowingImageFilter 168 | interp 169 | interpolator 170 | InterpolatorEnum 171 | interpolators 172 | Interspeech 173 | IPython 174 | ipywidgets 175 | iso 176 | IterationEvent 177 | ITK 178 | ITK's 179 | ITKv 180 | Jaccard 181 | Jirapatnakul 182 | jn 183 | JoinSeries 184 | JointHistogram 185 | JointHistogramMutualInformation 186 | Joskowicz 187 | JPEG 188 | JPEGImageIO 189 | JPEGs 190 | jpg 191 | jupyter 192 | Jupyter 193 | jupyterlab 194 | Kamath 195 | LabelContourImageFilter 196 | labelForUndecidedPixels 197 | labelled 198 | labelling 199 | LabelMapContourOverlayImageFilter 200 | LabelOverlayImageFilter 201 | LabelShapeStatisticsImageFilter 202 | LabelToRGBImageFilter 203 | labextension 204 | LandmarkBasedTransformInitializer 205 | LaplacianSegmentation 206 | lapply 207 | Lasser 208 | LBFGS 209 | ldots 210 | learningRate 211 | Léon 212 | leq 213 | Lim 214 | Lingala 215 | linspace 216 | Linte 217 | LoadPrivateTagsOn 218 | Lobb 219 | LSMImageIO 220 | MacCallum 221 | MacOS 222 | Mahalanobis 223 | Malpica 224 | Marschner 225 | mathbb 226 | mathbf 227 | MATLAB 228 | MATLAB's 229 | matplotlib 230 | MattesMutualInformation 231 | Maurer 232 | MaximumEntropy 233 | MeanSquares 234 | meshgrid 235 | meshlab 236 | MetaDataDictionaryArrayUpdateOn 237 | MetaImageIO 238 | MetricEvaluate 239 | metricvalue 240 | mha 241 | minima 242 | multiscale 243 | myshow 244 | Narayanan 245 | Nayak 246 | nbagg 247 | nbextension 248 | nD 249 | NeighborhoodConnected 250 | Nelder 251 | NiftiImageIO 252 | nms 253 | np 254 | num 255 | numberOfIterations 256 | numberOfSteps 257 | numpy 258 | offline 259 | optimizerScales 260 | originalControlPointDisplacements 261 | originalDisplacements 262 | Orthop 263 | otsu 264 | Otsu's 265 | outlier 266 | outputDirection 267 | outputfile 268 | outputOrigin 269 | outputPixelType 270 | outputSpacing 271 | overcomplete 272 | overfit 273 | overfitting 274 | PairedPointDataManipulation 275 | param 276 | Photogrammetric 277 | pixelated 278 | PixelIDValueEnum 279 | plafond 280 | pn 281 | png 282 | PNG 283 | Popa 284 | popi 285 | POPI 286 | pre 287 | prefixi 288 | pretrained 289 | Proc 290 | py 291 | pyplot 292 | Pythonic 293 | qs 294 | ReadImage 295 | ReadImageInformation 296 | ReadTransform 297 | recognised 298 | referenceImage 299 | RegularStepGradientDescent 300 | Relat 301 | resample 302 | resampled 303 | ResampleImageFilter 304 | resamples 305 | resampling 306 | rgb 307 | RGB 308 | Rheumatol 309 | RIRE 310 | RLE 311 | roi 312 | ROIs 313 | Rueda 314 | sagittal 315 | Sarrut 316 | ScalarChanAndVese 317 | ScalarToRGBColormapImageFilter 318 | scaleFactors 319 | ScaleSkewVersor 320 | ScaleTransform 321 | ScaleVersor 322 | scipy 323 | segBlobs 324 | segChannel 325 | SEM 326 | SetAngle 327 | SetCenter 328 | SetDirection 329 | SetFileName 330 | SetFixedInitialTransform 331 | SetInitialTransform 332 | SetInitialTransformAsBSpline 333 | SetInterpolator 334 | SetMean 335 | SetMetricAsDemons 336 | SetMetricAsX 337 | SetMovingInitialTransform 338 | SetOptimizerAsConjugateGradientLineSearch 339 | SetOptimizerAsX 340 | SetOptimizerScalesFromIndexShift 341 | SetOptimizerScalesFromJacobian 342 | SetOptimizerScalesFromPhysicalShift 343 | SetOrigin 344 | SetParameters 345 | SetPixel 346 | SetProbability 347 | SetScale 348 | SetShrinkFactorsPerLevel 349 | SetSmoothingSigmasPerLevel 350 | SetSpacing 351 | SetStandardDeviation 352 | SetStandardDeviation 353 | ShapeDetection 354 | shrinkFactors 355 | SimpleITK 356 | SimpleITK's 357 | SimpleITKv 358 | sitk 359 | sitkAnnulus 360 | sitkBall 361 | sitkBlackmanWindowedSinc 362 | sitkBox 363 | sitkBSpline 364 | sitkComplexFloat 365 | sitkCosineWindowedSinc 366 | sitkCross 367 | sitkFloat 368 | sitkGaussian 369 | sitkHammingWindowedSinc 370 | sitkInt 371 | sitkLabelUInt 372 | sitkLanczosWindowedSinc 373 | sitkLinear 374 | sitkMultiResolutionIterationEvent 375 | sitkNearestNeighbor 376 | sitkUInt 377 | sitkUnknown 378 | sitkVectorFloat 379 | sitkVectorInt 380 | sitkVectorUInt 381 | sitkWelchWindowedSinc 382 | smoothingSigmas 383 | SmoothingSigmasAreSpecifiedInPhysicalUnitsOn 384 | spatio 385 | spc 386 | SPIE 387 | StartEvent 388 | StatisticsImageFilter 389 | stepLength 390 | subsampling 391 | Subsampling 392 | subtilis 393 | supersampling 394 | SymmetricForcesDemonsRegistrationFilter 395 | sz 396 | textrm 397 | tfm 398 | thetaX 399 | thetaY 400 | thetaZ 401 | Thirion 402 | ThresholdSegmentation 403 | ticklabels 404 | tidyr 405 | tif 406 | TIFFImageIO 407 | TileImageFilter 408 | timeit 409 | Toger 410 | toolbar 411 | Toutios 412 | tranforms 413 | TransformContinuousIndexToPhysicalPoint 414 | TransformPoint 415 | transform's 416 | TranslationTransform 417 | TRE 418 | TREs 419 | truediv 420 | ttest 421 | tx 422 | txt 423 | ty 424 | tz 425 | uint 426 | UInt 427 | usepackage 428 | Valgus 429 | Vandemeulebroucke 430 | Varus 431 | Vaz 432 | vdots 433 | VectorConfidenceConnected 434 | versor 435 | VersorRigid 436 | VersorTransform 437 | vertices 438 | VGG 439 | vm 440 | VolView 441 | voxel 442 | voxels 443 | voxel's 444 | VTKImageIO 445 | vx 446 | vy 447 | vz 448 | widgetsnbextension 449 | wikimedia 450 | WriteImage 451 | XC 452 | xn 453 | xpixels 454 | xtable 455 | XVth 456 | xx 457 | XX 458 | xxx 459 | xy 460 | xz 461 | Yaniv 462 | YCbCr 463 | yn 464 | ypixels 465 | yy 466 | YY 467 | yyy 468 | yz 469 | Zhu 470 | Zikri 471 | ZYX 472 | zz 473 | zzz 474 | -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | Introduction to SimpleITK - NIH Course 4 | 5 | 6 | 7 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 55 | 64 | 65 | 66 | 67 | 250 | 251 |
68 | 69 |

70 |

71 | 72 | 73 | 74 | 75 |

Overview

76 | 77 |

78 | SimpleITK is a simplified 79 | programming interface to the algorithms and data structures of the Insight Toolkit 81 | (ITK). It supports bindings for multiple programming languages including C++, 82 | Python, R, Java, C#, Lua, Ruby and TCL. These bindings enable scientists to 83 | develop image analysis workflows in the programming language they are most 84 | familiar with. The toolkit supports more than 15 different image file formats, 85 | provides over 280 image analysis filters, and implements a unified interface to 86 | the ITK intensity-based registration framework. The SimpleITK user base is 87 | rapidly growing, with more than 100,000 downloads of the Python bindings in the 88 | past year. Finally, by combining SimpleITK’s Python bindings with the Jupyter 89 | notebook web application one can create an environment which facilitates 90 | collaborative and reproducible development of biomedical image analysis 91 | workflows. 92 |

93 | 94 |

95 | In this course, we will use a hands-on approach utilizing Jupyter 96 | notebooks to explore and experiment with various SimpleITK features in the 97 | Python programming language. Participants will follow along using the provided 98 | computers or their own laptops, enabling them to explore the effects of code 99 | changes and parameter settings not covered by the instructor. Examples using 100 | anatomical and microscopy images will highlight the various capabilities of 101 | the toolkit. 102 |

103 | 104 |

105 | The course starts by introducing the toolkit’s two basic data elements, Images 106 | and Transformations. Combining the two, we illustrate how to perform image 107 | resampling and how to use SimpleITK components for image preparation and data 108 | augmentation in the context of deep learning. We then explore the features 109 | available in the toolkit’s registration framework and illustrate the 110 | use of a variety of SimpleITK filters to perform segmentation and for segmentation 111 | evaluation. Finally, we illustrate how to use toolkit components to visualize the 112 | results of registration and segmentation. 113 |

114 | 115 | 116 |

117 | Beyond the notebooks used in this course you can find the main SimpleITK notebooks repository on GitHub. 118 |

119 | 120 |

Instructors

121 |
    122 |
  • David T. Chen, Bioinformatics and Computational Biosciences Branch, NIAID and Medical Science & Computing LLC.
  • 123 |
  • Bradley C. Lowekamp, Bioinformatics and Computational Biosciences Branch, NIAID and Medical Science & Computing LLC.
  • 124 |
  • Ziv Yaniv, Bioinformatics and Computational Biosciences Branch, NIAID and Medical Science & Computing LLC.
  • 125 |
126 | 127 |

Prerequisites

128 | 129 |

130 | If you intend to work on your own laptop you will need to follow the instrutions 131 | below, otherwise all you need to do is show up for the course.

132 | 133 | If you encounter problems or have questions, please post using this repository's 134 | GitHub issue 135 | reporting system (requires a GitHub user account). 136 |

137 | 138 |

139 | In this course we will use the Anaconda Python distribution. Please follow the 140 | instructions below to setup the environment we will use during the course. All 141 | commands below are issued on the command line (Linux/Mac - terminal, 142 | Windows - Anaconda Prompt). 143 |

144 | 145 |
    146 |
  1. 147 | Download and install the Fiji image viewer. This is the default image viewer used by SimpleITK: 148 |
      149 |
    • 150 | On Windows: Install into your user directory (e.g. C:\Users\[your_user_name]\). 151 |
    • 152 |
    • 153 | On Linux: Install into ~/bin/ . 154 |
    • 155 |
    • 156 | On Mac: Install into /Applications/ . 157 |
    • 158 |
    159 | 160 |
  2. 161 | 162 |
  3. 163 | Download and install the most 164 | recent version of Anaconda for your operating system. We assume it is installed 165 | in a directory named anaconda3. Regardless of the installer, we will be working 166 | with Python 3.7 167 |
  4. 168 | 169 |
  5. 170 |
      171 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu).
    • 172 |
    • On Linux/Mac: on the command line source path_to_anaconda3/bin/activate base
    • 173 |
    174 |
  6. 175 | 176 |
  7. 177 | Update the base anaconda environment and install the git version control system into it. 178 |
    conda update conda
    179 | conda update anaconda
    180 | conda install git
    181 | 
    182 |
  8. 183 | 184 |
  9. 185 | Clone this repository: 186 |
    git clone https://github.com/SimpleITK/NIH2019_COURSE.git
    187 | 
    188 |
  10. 189 | 190 |
  11. 191 | Create the virtual environment containing all packages required for the course: 192 |
    conda env create -f NIH2019_COURSE/environment.yml
    193 | 
    194 |
  12. 195 | 196 |
  13. 197 | Activate the virtual environment: 198 |
      199 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu)
      conda activate sitkpyNIH19
    • 200 |
    • On Linux/Mac: on the command line
      source path_to_anaconda3/bin/activate sitkpyNIH19
    • 201 |
    202 |
  14. 203 | 204 |
  15. 205 | Go over the setup notebook (requires internet connectivity). This notebook checks the environment setup and downloads 206 | all of the required data. You can either go the traditional route (guaranteed to work), or the modern route (should work). 207 |
    cd NIH2019_COURSE
    208 | jupyter notebook setup.ipynb
    209 |
  16. 210 |
211 | 212 | 213 | 214 |

Program

215 |

216 | Click the launch binder button to try things out without installing (some display functions will not work): 217 |

218 | 219 |
    220 |
  • [9:30AM- 10:45AM] History and overview [ppt]. Fundamentals [ppt]: spatial transformations, images and resampling.
  • 221 |
  • [10:45AM - 11:00AM] Break.
  • 222 |
  • [11:00AM- 12:30PM] Data augmentation for deep learning. Registration: basic and advanced.
  • 223 |
  • [12:30PM - 1:15PM] Lunch.
  • 224 |
  • [1:15PM - 2:30PM] Registration: example workflow. Segmentation: example workflow.
  • 225 |
  • [2:30PM - 2:45PM] Break.
  • 226 |
  • [2:45PM - 4:00PM] Segmentation evaluation. Results visualization. Notebook development and testing [ppt].
  • 227 |
228 | 229 |

Supplementary Material

230 | 231 |

232 | For those interested in reading more about SimpleITK (Python and beyond): 233 |

234 | 246 | 247 | If you find that SimpleITK has been useful in your research, you can cite it via citations.bib. 248 | 249 |
252 | 253 | 254 | -------------------------------------------------------------------------------- /utilities.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | popi_body_label = 0 5 | popi_air_label = 1 6 | popi_lung_label = 2 7 | 8 | def read_POPI_points(file_name): 9 | """ 10 | Read the Point-validated Pixel-based Breathing Thorax Model (POPI) landmark points file. 11 | The file is an ASCII file with X Y Z coordinates in each line and the first line is a header. 12 | 13 | Args: 14 | file_name: full path to the file. 15 | Returns: 16 | (list(tuple)): List of points as tuples. 17 | """ 18 | with open(file_name,'r') as fp: 19 | lines = fp.readlines() 20 | points = [] 21 | # First line in the file is #X Y Z which we ignore. 22 | for line in lines[1:]: 23 | coordinates = line.split() 24 | if coordinates: 25 | points.append((float(coordinates[0]), float(coordinates[1]), float(coordinates[2]))) 26 | return points 27 | 28 | 29 | def point2str(point, precision=1): 30 | """ 31 | Format a point for printing, based on specified precision with trailing zeros. Uniform printing for vector-like data 32 | (tuple, numpy array, list). 33 | 34 | Args: 35 | point (vector-like): nD point with floating point coordinates. 36 | precision (int): Number of digits after the decimal point. 37 | Return: 38 | String represntation of the given point "xx.xxx yy.yyy zz.zzz...". 39 | """ 40 | return ' '.join(format(c, '.{0}f'.format(precision)) for c in point) 41 | 42 | 43 | def uniform_random_points(bounds, num_points): 44 | """ 45 | Generate random (uniform withing bounds) nD point cloud. Dimension is based on the number of pairs in the bounds input. 46 | 47 | Args: 48 | bounds (list(tuple-like)): list where each tuple defines the coordinate bounds. 49 | num_points (int): number of points to generate. 50 | 51 | Returns: 52 | list containing num_points numpy arrays whose coordinates are within the given bounds. 53 | """ 54 | internal_bounds = [sorted(b) for b in bounds] 55 | # Generate rows for each of the coordinates according to the given bounds, stack into an array, 56 | # and split into a list of points. 57 | mat = np.vstack([np.random.uniform(b[0], b[1], num_points) for b in internal_bounds]) 58 | return list(mat[:len(bounds)].T) 59 | 60 | 61 | def target_registration_errors(tx, point_list, reference_point_list, 62 | display_errors = False, min_err= None, max_err=None, figure_size=(8,6)): 63 | """ 64 | Distances between points transformed by the given transformation and their 65 | location in another coordinate system. When the points are only used to 66 | evaluate registration accuracy (not used in the registration) this is the 67 | Target Registration Error (TRE). 68 | 69 | Args: 70 | tx (SimpleITK.Transform): The transform we want to evaluate. 71 | point_list (list(tuple-like)): Points in fixed image 72 | cooredinate system. 73 | reference_point_list (list(tuple-like)): Points in moving image 74 | cooredinate system. 75 | display_errors (boolean): Display a 3D figure with the points from 76 | point_list color corresponding to the error. 77 | min_err, max_err (float): color range is linearly stretched between min_err 78 | and max_err. If these values are not given then 79 | the range of errors computed from the data is used. 80 | figure_size (tuple): Figure size in inches. 81 | 82 | Returns: 83 | (errors) [float]: list of TRE values. 84 | """ 85 | transformed_point_list = [tx.TransformPoint(p) for p in point_list] 86 | 87 | errors = [np.linalg.norm(np.array(p_fixed) - np.array(p_moving)) 88 | for p_fixed,p_moving in zip(transformed_point_list, reference_point_list)] 89 | if display_errors: 90 | from mpl_toolkits.mplot3d import Axes3D 91 | import matplotlib.pyplot as plt 92 | import matplotlib 93 | fig = plt.figure(figsize=figure_size) 94 | ax = fig.add_subplot(111, projection='3d') 95 | if not min_err: 96 | min_err = np.min(errors) 97 | if not max_err: 98 | max_err = np.max(errors) 99 | 100 | collection = ax.scatter(list(np.array(point_list).T)[0], 101 | list(np.array(point_list).T)[1], 102 | list(np.array(point_list).T)[2], 103 | marker = 'o', 104 | c = errors, 105 | vmin = min_err, 106 | vmax = max_err, 107 | cmap = matplotlib.cm.hot, 108 | label = 'original points') 109 | plt.colorbar(collection, shrink=0.8) 110 | plt.title('registration errors in mm', x=0.7, y=1.05) 111 | ax.set_xlabel('X') 112 | ax.set_ylabel('Y') 113 | ax.set_zlabel('Z') 114 | plt.show() 115 | 116 | return errors 117 | 118 | 119 | 120 | def print_transformation_differences(tx1, tx2): 121 | """ 122 | Check whether two transformations are "equivalent" in an arbitrary spatial region 123 | either 3D or 2D, [x=(-10,10), y=(-100,100), z=(-1000,1000)]. This is just a sanity check, 124 | as we are just looking at the effect of the transformations on a random set of points in 125 | the region. 126 | """ 127 | if tx1.GetDimension()==2 and tx2.GetDimension()==2: 128 | bounds = [(-10,10),(-100,100)] 129 | elif tx1.GetDimension()==3 and tx2.GetDimension()==3: 130 | bounds = [(-10,10),(-100,100), (-1000,1000)] 131 | else: 132 | raise ValueError('Transformation dimensions mismatch, or unsupported transformation dimensionality') 133 | num_points = 10 134 | point_list = uniform_random_points(bounds, num_points) 135 | tx1_point_list = [ tx1.TransformPoint(p) for p in point_list] 136 | differences = target_registration_errors(tx2, point_list, tx1_point_list) 137 | print('Differences - min: {:.2f}, max: {:.2f}, mean: {:.2f}, std: {:.2f}'.format(np.min(differences), np.max(differences), np.mean(differences), np.std(differences))) 138 | 139 | 140 | def display_displacement_scaling_effect(s, original_x_mat, original_y_mat, tx, original_control_point_displacements): 141 | """ 142 | This function displays the effects of the deformable transformation on a grid of points by scaling the 143 | initial displacements (either of control points for BSpline or the deformation field itself). It does 144 | assume that all points are contained in the range(-2.5,-2.5), (2.5,2.5). 145 | """ 146 | if tx.GetDimension() !=2: 147 | raise ValueError('display_displacement_scaling_effect only works in 2D') 148 | 149 | plt.scatter(original_x_mat, 150 | original_y_mat, 151 | marker='o', 152 | color='blue', label='original points') 153 | pointsX = [] 154 | pointsY = [] 155 | tx.SetParameters(s*original_control_point_displacements) 156 | 157 | for index, value in np.ndenumerate(original_x_mat): 158 | px,py = tx.TransformPoint((value, original_y_mat[index])) 159 | pointsX.append(px) 160 | pointsY.append(py) 161 | 162 | plt.scatter(pointsX, 163 | pointsY, 164 | marker='^', 165 | color='red', label='transformed points') 166 | plt.legend(loc=(0.25,1.01)) 167 | plt.xlim((-2.5,2.5)) 168 | plt.ylim((-2.5,2.5)) 169 | 170 | 171 | def parameter_space_regular_grid_sampling(*transformation_parameters): 172 | ''' 173 | Create a list representing a regular sampling of the parameter space. 174 | Args: 175 | *transformation_paramters : two or more numpy ndarrays representing parameter values. The order 176 | of the arrays should match the ordering of the SimpleITK transformation 177 | parameterization (e.g. Similarity2DTransform: scaling, rotation, tx, ty) 178 | Return: 179 | List of lists representing the regular grid sampling. 180 | 181 | Examples: 182 | #parameterization for 2D translation transform (tx,ty): [[1.0,1.0], [1.5,1.0], [2.0,1.0]] 183 | >>>> parameter_space_regular_grid_sampling(np.linspace(1.0,2.0,3), np.linspace(1.0,1.0,1)) 184 | ''' 185 | return [[np.asscalar(p) for p in parameter_values] 186 | for parameter_values in np.nditer(np.meshgrid(*transformation_parameters))] 187 | 188 | 189 | def similarity3D_parameter_space_regular_sampling(thetaX, thetaY, thetaZ, tx, ty, tz, scale): 190 | ''' 191 | Create a list representing a regular sampling of the 3D similarity transformation parameter space. As the 192 | SimpleITK rotation parameterization uses the vector portion of a versor we don't have an 193 | intuitive way of specifying rotations. We therefor use the ZYX Euler angle parametrization and convert to 194 | versor. 195 | Args: 196 | thetaX, thetaY, thetaZ: numpy ndarrays with the Euler angle values to use. 197 | tx, ty, tz: numpy ndarrays with the translation values to use. 198 | scale: numpy array with the scale values to use. 199 | Return: 200 | List of lists representing the parameter space sampling (vx,vy,vz,tx,ty,tz,s). 201 | ''' 202 | return [list(eul2quat(parameter_values[0],parameter_values[1], parameter_values[2])) + 203 | [np.asscalar(p) for p in parameter_values[3:]] for parameter_values in np.nditer(np.meshgrid(thetaX, thetaY, thetaZ, tx, ty, tz, scale))] 204 | 205 | 206 | def eul2quat(ax, ay, az, atol=1e-8): 207 | ''' 208 | Translate between Euler angle (ZYX) order and quaternion representation of a rotation. 209 | Args: 210 | ax: X rotation angle in radians. 211 | ay: Y rotation angle in radians. 212 | az: Z rotation angle in radians. 213 | atol: tolerance used for stable quaternion computation (qs==0 within this tolerance). 214 | Return: 215 | Numpy array with three entries representing the vectorial component of the quaternion. 216 | 217 | ''' 218 | # Create rotation matrix using ZYX Euler angles and then compute quaternion using entries. 219 | cx = np.cos(ax) 220 | cy = np.cos(ay) 221 | cz = np.cos(az) 222 | sx = np.sin(ax) 223 | sy = np.sin(ay) 224 | sz = np.sin(az) 225 | r=np.zeros((3,3)) 226 | r[0,0] = cz*cy 227 | r[0,1] = cz*sy*sx - sz*cx 228 | r[0,2] = cz*sy*cx+sz*sx 229 | 230 | r[1,0] = sz*cy 231 | r[1,1] = sz*sy*sx + cz*cx 232 | r[1,2] = sz*sy*cx - cz*sx 233 | 234 | r[2,0] = -sy 235 | r[2,1] = cy*sx 236 | r[2,2] = cy*cx 237 | 238 | # Compute quaternion: 239 | qs = 0.5*np.sqrt(r[0,0] + r[1,1] + r[2,2] + 1) 240 | qv = np.zeros(3) 241 | # If the scalar component of the quaternion is close to zero, we 242 | # compute the vector part using a numerically stable approach 243 | if np.isclose(qs,0.0,atol): 244 | i= np.argmax([r[0,0], r[1,1], r[2,2]]) 245 | j = (i+1)%3 246 | k = (j+1)%3 247 | w = np.sqrt(r[i,i] - r[j,j] - r[k,k] + 1) 248 | qv[i] = 0.5*w 249 | qv[j] = (r[i,j] + r[j,i])/(2*w) 250 | qv[k] = (r[i,k] + r[k,i])/(2*w) 251 | else: 252 | denom = 4*qs 253 | qv[0] = (r[2,1] - r[1,2])/denom; 254 | qv[1] = (r[0,2] - r[2,0])/denom; 255 | qv[2] = (r[1,0] - r[0,1])/denom; 256 | return qv 257 | -------------------------------------------------------------------------------- /downloaddata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Since we do not want to store large binary data files in our Git repository, 5 | we fetch_data_all from a network resource. 6 | 7 | The data we download is described in a json file. The file format is a dictionary 8 | of dictionaries. The top level key is the file name. The returned dictionary 9 | contains a sha512 checksum and possibly a url and boolean flag indicating 10 | the file is part of an archive. The sha512 checksum is mandatory. 11 | When the optional url is given, we attempt to download from that url, otherwise 12 | we attempt to download from the list of servers returned by the 13 | get_servers() function. Files that are contained in archives are 14 | identified by the archive flag. 15 | 16 | Example json file contents: 17 | 18 | { 19 | "SimpleITK.jpg": { 20 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 21 | }, 22 | "POPI/meta/00-P.mhd": { 23 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 24 | "archive": "true", 25 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc" 26 | }, 27 | "CIRS057A_MR_CT_DICOM/readme.txt": { 28 | "archive": "true", 29 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef" 30 | } 31 | } 32 | 33 | Notes: 34 | 1. The file we download can be inside an archive. In this case, the sha512 35 | checksum is that of the archive. 36 | 37 | """ 38 | 39 | import hashlib 40 | import sys 41 | import os 42 | import json 43 | 44 | import errno 45 | import warnings 46 | 47 | # http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook 48 | 49 | def url_download_report(bytes_so_far, url_download_size, total_size): 50 | percent = float(bytes_so_far) / total_size 51 | percent = round(percent * 100, 2) 52 | if bytes_so_far > url_download_size: 53 | # Note that the carriage return is at the begining of the 54 | # string and not the end. This accomodates usage in 55 | # IPython usage notebooks. Otherwise the string is not 56 | # displayed in the output. 57 | sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)" % 58 | (bytes_so_far, total_size, percent)) 59 | sys.stdout.flush() 60 | if bytes_so_far >= total_size: 61 | sys.stdout.write("\rDownloaded %d of %d bytes (%0.2f%%)\n" % 62 | (bytes_so_far, total_size, percent)) 63 | sys.stdout.flush() 64 | 65 | 66 | def url_download_read(url, outputfile, url_download_size=8192 * 2, report_hook=None): 67 | # Use the urllib2 to download the data. The Requests package, highly 68 | # recommended for this task, doesn't support the file scheme so we opted 69 | # for urllib2 which does. 70 | 71 | try: 72 | # Python 3 73 | from urllib.request import urlopen, URLError, HTTPError 74 | except ImportError: 75 | from urllib2 import urlopen, URLError, HTTPError 76 | from xml.dom import minidom 77 | 78 | # Open the url 79 | try: 80 | url_response = urlopen(url) 81 | except HTTPError as e: 82 | return "HTTP Error: {0} {1}\n".format(e.code, url) 83 | except URLError as e: 84 | return "URL Error: {0} {1}\n".format(e.reason, url) 85 | 86 | # We download all content types - the assumption is that the sha512 ensures 87 | # that what we received is the expected data. 88 | try: 89 | # Python 3 90 | content_length = url_response.info().get("Content-Length") 91 | except AttributeError: 92 | content_length = url_response.info().getheader("Content-Length") 93 | total_size = content_length.strip() 94 | total_size = int(total_size) 95 | bytes_so_far = 0 96 | with open(outputfile, "wb") as local_file: 97 | while 1: 98 | try: 99 | url_download = url_response.read(url_download_size) 100 | bytes_so_far += len(url_download) 101 | if not url_download: 102 | break 103 | local_file.write(url_download) 104 | # handle errors 105 | except HTTPError as e: 106 | return "HTTP Error: {0} {1}\n".format(e.code, url) 107 | except URLError as e: 108 | return "URL Error: {0} {1}\n".format(e.reason, url) 109 | if report_hook: 110 | report_hook(bytes_so_far, url_download_size, total_size) 111 | return "Downloaded Successfully" 112 | 113 | # http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python?rq=1 114 | def mkdir_p(path): 115 | try: 116 | os.makedirs(path) 117 | except OSError as exc: # Python >2.5 118 | if exc.errno == errno.EEXIST and os.path.isdir(path): 119 | pass 120 | else: 121 | raise 122 | 123 | #http://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically 124 | def deprecated(func): 125 | """This is a decorator which can be used to mark functions 126 | as deprecated. It will result in a warning being emmitted 127 | when the function is used.""" 128 | 129 | def new_func(*args, **kwargs): 130 | warnings.simplefilter('always', DeprecationWarning) #turn off filter 131 | warnings.warn("Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2) 132 | warnings.simplefilter('default', DeprecationWarning) #reset filter 133 | return func(*args, **kwargs) 134 | 135 | new_func.__name__ = func.__name__ 136 | new_func.__doc__ = func.__doc__ 137 | new_func.__dict__.update(func.__dict__) 138 | return new_func 139 | 140 | def get_servers(): 141 | import os 142 | servers = list() 143 | # NIAID S3 data store 144 | servers.append( "https://s3.amazonaws.com/simpleitk/public/notebooks/SHA512/%(hash)" ) 145 | # Girder server hosted by kitware 146 | servers.append("https://data.kitware.com/api/v1/file/hashsum/sha512/%(hash)/download") 147 | # Local file store 148 | if 'ExternalData_OBJECT_STORES' in os.environ.keys(): 149 | local_object_stores = os.environ['ExternalData_OBJECT_STORES'] 150 | for local_object_store in local_object_stores.split(";"): 151 | servers.append( "file://{0}/SHA512/%(hash)".format(local_object_store) ) 152 | return servers 153 | 154 | 155 | def output_hash_is_valid(known_sha512, output_file): 156 | sha512 = hashlib.sha512() 157 | if not os.path.exists(output_file): 158 | return False 159 | with open(output_file, 'rb') as fp: 160 | for url_download in iter(lambda: fp.read(128 * sha512.block_size), b''): 161 | sha512.update(url_download) 162 | retreived_sha512 = sha512.hexdigest() 163 | return retreived_sha512 == known_sha512 164 | 165 | 166 | def fetch_data_one(onefilename, output_directory, manifest_file, verify=True, force=False): 167 | import tarfile, zipfile 168 | 169 | with open(manifest_file, 'r') as fp: 170 | manifest = json.load(fp) 171 | assert onefilename in manifest, "ERROR: {0} does not exist in {1}".format(onefilename, manifest_file) 172 | 173 | sys.stdout.write("Fetching {0}\n".format(onefilename)) 174 | output_file = os.path.realpath(os.path.join(output_directory, onefilename)) 175 | data_dictionary = manifest[onefilename] 176 | sha512 = data_dictionary['sha512'] 177 | # List of places where the file can be downloaded from 178 | all_urls = [] 179 | for url_base in get_servers(): 180 | all_urls.append(url_base.replace("%(hash)", sha512)) 181 | if "url" in data_dictionary: 182 | all_urls.append(data_dictionary["url"]) 183 | 184 | new_download = False 185 | 186 | for url in all_urls: 187 | # Only download if force is true or the file does not exist. 188 | if force or not os.path.exists(output_file): 189 | mkdir_p(os.path.dirname(output_file)) 190 | url_download_read(url, output_file, report_hook=url_download_report) 191 | # Check if a file was downloaded and has the correct hash 192 | if output_hash_is_valid(sha512, output_file): 193 | new_download = True 194 | # Stop looking once found 195 | break 196 | # If the file exists this means the hash is invalid we have a problem. 197 | elif os.path.exists(output_file): 198 | error_msg = "File " + output_file 199 | error_msg += " has incorrect hash value, " + sha512 + " was expected." 200 | raise Exception(error_msg) 201 | 202 | # Did not find the file anywhere. 203 | if not os.path.exists(output_file): 204 | error_msg = "File " + "\'" + os.path.basename(output_file) +"\'" 205 | error_msg += " could not be found in any of the following locations:\n" 206 | error_msg += ", ".join(all_urls) 207 | raise Exception(error_msg) 208 | 209 | if not new_download and verify: 210 | # If the file was part of an archive then we don't verify it. These 211 | # files are only verfied on download 212 | if ( not "archive" in data_dictionary) and ( not output_hash_is_valid(sha512, output_file) ): 213 | # Attempt to download if sha512 is incorrect. 214 | fetch_data_one(onefilename, output_directory, manifest_file, verify, 215 | force=True) 216 | # If the file is in an archive, unpack it. 217 | if tarfile.is_tarfile(output_file) or zipfile.is_zipfile(output_file): 218 | tmp_output_file = output_file + ".tmp" 219 | os.rename(output_file, tmp_output_file) 220 | if tarfile.is_tarfile(tmp_output_file): 221 | archive = tarfile.open(tmp_output_file) 222 | if zipfile.is_zipfile(tmp_output_file): 223 | archive = zipfile.ZipFile(tmp_output_file, 'r') 224 | archive.extractall(os.path.dirname(tmp_output_file)) 225 | archive.close() 226 | os.remove(tmp_output_file) 227 | 228 | return output_file 229 | 230 | 231 | def fetch_data_all(output_directory, manifest_file, verify=True): 232 | with open(manifest_file, 'r') as fp: 233 | manifest = json.load(fp) 234 | for filename in manifest: 235 | fetch_data_one(filename, output_directory, manifest_file, verify, 236 | force=False) 237 | 238 | def fetch_data(cache_file_name, verify=False, cache_directory_name="data"): 239 | """ 240 | fetch_data is a simplified interface that requires 241 | relative pathing with a manifest.json file located in the 242 | same cache_directory_name name. 243 | 244 | By default the cache_directory_name is "Data" relative to the current 245 | python script. An absolute path can also be given. 246 | """ 247 | if not os.path.isabs(cache_directory_name): 248 | cache_root_directory_name = os.path.dirname(__file__) 249 | cache_directory_name = os.path.join(cache_root_directory_name, cache_directory_name) 250 | cache_manifest_file = os.path.join(cache_directory_name, 'manifest.json') 251 | assert os.path.exists(cache_manifest_file), "ERROR, {0} does not exist".format(cache_manifest_file) 252 | return fetch_data_one(cache_file_name, cache_directory_name, cache_manifest_file, verify=verify) 253 | 254 | 255 | if __name__ == '__main__': 256 | 257 | 258 | if len(sys.argv) < 3: 259 | print('Usage: ' + sys.argv[0] + ' output_directory manifest.json') 260 | sys.exit(1) 261 | output_directory = sys.argv[1] 262 | if not os.path.exists(output_directory): 263 | os.makedirs(output_directory) 264 | manifest = sys.argv[2] 265 | fetch_data_all(output_directory, manifest) 266 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /tests/test_notebooks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | import nbformat 5 | import pytest 6 | import markdown 7 | import re 8 | 9 | from enchant.checker import SpellChecker 10 | from enchant.tokenize import Filter, EmailFilter, URLFilter 11 | from enchant import DictWithPWL 12 | 13 | from lxml.html import document_fromstring, etree 14 | try: 15 | # Python 3 16 | from urllib.request import urlopen, URLError 17 | except ImportError: 18 | from urllib2 import urlopen, URLError 19 | 20 | 21 | 22 | """ 23 | run all tests: 24 | pytest -v --tb=short 25 | 26 | run python tests: 27 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook 28 | 29 | run specific Python test: 30 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook[setup.ipynb] 31 | 32 | -s : disable all capturing of output. 33 | """ 34 | 35 | class Test_notebooks(object): 36 | """ 37 | Testing of SimpleITK Jupyter notebooks: 38 | 1. Static analysis: 39 | Check that notebooks do not contain output (sanity check as these should 40 | not have been pushed to the repository). 41 | Check that all the URLs in the markdown cells are not broken. 42 | 2. Dynamic analysis: 43 | Run the notebook and check for errors. In some notebooks we 44 | intentionally cause errors to illustrate certain features of the toolkit. 45 | All code cells that intentionally generate an error are expected to be 46 | marked using the cell's metadata. In the notebook go to 47 | "View->Cell Toolbar->Edit Metadata and add the following json entry: 48 | 49 | "simpleitk_error_expected": simpleitk_error_message 50 | 51 | with the appropriate "simpleitk_error_message" text. 52 | Cells where an error is allowed, but not necessarily expected should be 53 | marked with the following json: 54 | 55 | "simpleitk_error_allowed": simpleitk_error_message 56 | 57 | The simpleitk_error_message is a substring of the generated error 58 | message, such as 'Exception thrown in SimpleITK Show:' 59 | 60 | To test notebooks that use too much memory (exceed the 4Gb allocated for the testing 61 | machine): 62 | 1. Create an enviornment variable named SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT 63 | 2. Import the setup_for_testing.py at the top of the notebook. This module will 64 | decorate the sitk.ReadImage so that after reading the initial image it is 65 | resampled by a factor of 4 in each dimension. 66 | 67 | Adding a test: 68 | Simply add the new notebook file name to the list of files decorating the test_python_notebook 69 | or test_r_notebook functions. DON'T FORGET THE COMMA. 70 | """ 71 | 72 | _allowed_error_markup = 'simpleitk_error_allowed' 73 | _expected_error_markup = 'simpleitk_error_expected' 74 | 75 | @pytest.mark.parametrize('notebook_file_name', 76 | ['setup.ipynb', 77 | '01_spatial_transformations.ipynb', 78 | '02_images_and_resampling.ipynb', 79 | '03_data_augmentation.ipynb', 80 | '04_basic_registration.ipynb', 81 | '05_advanced_registration.ipynb', 82 | '06_registration_application.ipynb', 83 | pytest.param('07_segmentation_and_shape_analysis.ipynb', marks=pytest.mark.skipif(os.environ.get('CIRCLECI')=='true', \ 84 | reason="runtime too long for CircleCI")), 85 | '08_segmentation_evaluation.ipynb', 86 | '09_results_visualization.ipynb']) 87 | def test_python_notebook(self, notebook_file_name): 88 | self.evaluate_notebook(self.absolute_path_python(notebook_file_name), 'python') 89 | 90 | 91 | def evaluate_notebook(self, path, kernel_name): 92 | """ 93 | Perform static and dynamic analysis of the notebook. 94 | Execute a notebook via nbconvert and print the results of the test (errors etc.) 95 | Args: 96 | path (string): Name of notebook to run. 97 | kernel_name (string): Which jupyter kernel to use to run the test. 98 | Relevant values are:'python2', 'python3', 'ir'. 99 | """ 100 | 101 | dir_name, file_name = os.path.split(path) 102 | if dir_name: 103 | os.chdir(dir_name) 104 | 105 | print('-------- begin (kernel {0}) {1} --------'.format(kernel_name,file_name)) 106 | no_static_errors = self.static_analysis(path) 107 | no_dynamic_errors = self.dynamic_analysis(path, kernel_name) 108 | print('-------- end (kernel {0}) {1} --------'.format(kernel_name,file_name)) 109 | assert(no_static_errors and no_dynamic_errors) 110 | 111 | 112 | def static_analysis(self, path): 113 | """ 114 | Perform static analysis of the notebook. 115 | Read the notebook and check that there is no ouput and that the links 116 | in the markdown cells are not broken. 117 | Args: 118 | path (string): Name of notebook. 119 | Return: 120 | boolean: True if static analysis succeeded, otherwise False. 121 | """ 122 | 123 | nb = nbformat.read(path, nbformat.current_nbformat) 124 | 125 | ####################### 126 | # Check that the notebook does not contain output from code cells 127 | # (should not be in the repository, but well...). 128 | ####################### 129 | no_unexpected_output = True 130 | 131 | # Check that the cell dictionary has an 'outputs' key and that it is 132 | # empty, relies on Python using short circuit evaluation so that we 133 | # don't get KeyError when retrieving the 'outputs' entry. 134 | cells_with_output = [c.source for c in nb.cells if 'outputs' in c and c.outputs] 135 | if cells_with_output: 136 | no_unexpected_output = False 137 | print('Cells with unexpected output:\n_____________________________') 138 | for cell in cells_with_output: 139 | print(cell+'\n---') 140 | else: 141 | print('no unexpected output') 142 | 143 | ####################### 144 | # Check that all the links in the markdown cells are valid/accessible. 145 | ####################### 146 | no_broken_links = True 147 | 148 | cells_and_broken_links = [] 149 | for c in nb.cells: 150 | if c.cell_type == 'markdown': 151 | html_tree = document_fromstring(markdown.markdown(c.source)) 152 | broken_links = [] 153 | #iterlinks() returns tuples of the form (element, attribute, link, pos) 154 | for document_link in html_tree.iterlinks(): 155 | try: 156 | if 'http' not in document_link[2]: # Local file. 157 | url = 'file://' + os.path.abspath(document_link[2]) 158 | else: # Remote file. 159 | url = document_link[2] 160 | urlopen(url) 161 | except URLError: 162 | broken_links.append(url) 163 | if broken_links: 164 | cells_and_broken_links.append((broken_links,c.source)) 165 | if cells_and_broken_links: 166 | no_broken_links = False 167 | print('Cells with broken links:\n________________________') 168 | for links, cell in cells_and_broken_links: 169 | print(cell+'\n') 170 | print('\tBroken links:') 171 | print('\t'+'\n\t'.join(links)+'\n---') 172 | else: 173 | print('no broken links') 174 | 175 | ####################### 176 | # Spell check all markdown cells and comments in code cells using the pyenchant spell checker. 177 | ####################### 178 | no_spelling_mistakes = True 179 | simpleitk_notebooks_dictionary = DictWithPWL('en_US', os.path.join(os.path.dirname(os.path.abspath(__file__)), 180 | 'additional_dictionary.txt')) 181 | spell_checker = SpellChecker(simpleitk_notebooks_dictionary, filters = [EmailFilter, URLFilter]) 182 | cells_and_spelling_mistakes = [] 183 | for c in nb.cells: 184 | spelling_mistakes = [] 185 | if c.cell_type == 'markdown': 186 | # Get the text as a string from the html without the markup which is replaced by space. 187 | spell_checker.set_text(' '.join(etree.XPath('//text()')(document_fromstring(markdown.markdown(c.source))))) 188 | elif c.cell_type == 'code': 189 | # Get all the comments and concatenate them into a single string separated by newlines. 190 | comment_lines = re.findall('#+.*',c.source) 191 | spell_checker.set_text('\n'.join(comment_lines)) 192 | for error in spell_checker: 193 | error_message = 'error: '+ '\'' + error.word +'\', ' + 'suggestions: ' + str(spell_checker.suggest()) 194 | spelling_mistakes.append(error_message) 195 | if spelling_mistakes: 196 | cells_and_spelling_mistakes.append((spelling_mistakes, c.source)) 197 | if cells_and_spelling_mistakes: 198 | no_spelling_mistakes = False 199 | print('Cells with spelling mistakes:\n________________________') 200 | for misspelled_words, cell in cells_and_spelling_mistakes: 201 | print(cell+'\n') 202 | print('\tMisspelled words and suggestions:') 203 | print('\t'+'\n\t'.join(misspelled_words)+'\n---') 204 | else: 205 | print('no spelling mistakes') 206 | 207 | return(no_unexpected_output and no_broken_links and no_spelling_mistakes) 208 | 209 | 210 | def dynamic_analysis(self, path, kernel_name): 211 | """ 212 | Perform dynamic analysis of the notebook. 213 | Execute a notebook via nbconvert and print the results of the test 214 | (errors etc.) 215 | Args: 216 | path (string): Name of notebook to run. 217 | kernel_name (string): Which jupyter kernel to use to run the test. 218 | Relevant values are:'python', 'ir'. 219 | Return: 220 | boolean: True if dynamic analysis succeeded, otherwise False. 221 | """ 222 | 223 | # Execute the notebook and allow errors (run all cells), output is 224 | # written to a temporary file which is automatically deleted. 225 | with tempfile.NamedTemporaryFile(suffix='.ipynb') as fout: 226 | args = ['jupyter', 'nbconvert', 227 | '--to', 'notebook', 228 | '--execute', 229 | '--ExecutePreprocessor.kernel_name='+kernel_name, 230 | '--ExecutePreprocessor.allow_errors=True', 231 | '--ExecutePreprocessor.timeout=600', # seconds till timeout 232 | '--output', fout.name, path] 233 | subprocess.check_call(args) 234 | nb = nbformat.read(fout.name, nbformat.current_nbformat) 235 | 236 | # Get all of the unexpected errors (logic: cell has output with an error 237 | # and no error is expected or the allowed/expected error is not the one which 238 | # was generated.) 239 | unexpected_errors = [(output.evalue, c.source) for c in nb.cells \ 240 | if 'outputs' in c for output in c.outputs \ 241 | if (output.output_type=='error') and \ 242 | (((Test_notebooks._allowed_error_markup not in c.metadata) and (Test_notebooks._expected_error_markup not in c.metadata))or \ 243 | ((Test_notebooks._allowed_error_markup in c.metadata) and (c.metadata[Test_notebooks._allowed_error_markup] not in output.evalue)) or \ 244 | ((Test_notebooks._expected_error_markup in c.metadata) and (c.metadata[Test_notebooks._expected_error_markup] not in output.evalue)))] 245 | 246 | no_unexpected_errors = True 247 | if unexpected_errors: 248 | no_unexpected_errors = False 249 | print('Cells with unexpected errors:\n_____________________________') 250 | for e, src in unexpected_errors: 251 | print(src) 252 | print('unexpected error: '+e) 253 | else: 254 | print('no unexpected errors') 255 | 256 | # Get all of the missing expected errors (logic: cell has output 257 | # but expected error was not generated.) 258 | missing_expected_errors = [] 259 | for c in nb.cells: 260 | if Test_notebooks._expected_error_markup in c.metadata: 261 | missing_error = True 262 | if 'outputs' in c: 263 | for output in c.outputs: 264 | if (output.output_type=='error') and (c.metadata[Test_notebooks._expected_error_markup] in output.evalue): 265 | missing_error = False 266 | if missing_error: 267 | missing_expected_errors.append((c.metadata[Test_notebooks._expected_error_markup],c.source)) 268 | 269 | no_missing_expected_errors = True 270 | if missing_expected_errors: 271 | no_missing_expected_errors = False 272 | print('\nCells with missing expected errors:\n___________________________________') 273 | for e, src in missing_expected_errors: 274 | print(src) 275 | print('missing expected error: '+e) 276 | else: 277 | print('no missing expected errors') 278 | 279 | return(no_unexpected_errors and no_missing_expected_errors) 280 | 281 | 282 | def absolute_path_python(self, notebook_file_name): 283 | return os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', notebook_file_name)) 284 | -------------------------------------------------------------------------------- /07_segmentation_and_shape_analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Focused Ion Beam Scanning Electron Microscopy Image Segmentation

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK supports a large number of filters that facilitate classical segmentation algorithms (variety of thresholding algorithms, watersheds...).\n", 12 | "2. Once your data is segmented SimpleITK enables you to efficiently post process the segmentation (e.g. label distinct objects, analyze object shapes).\n", 13 | "\n", 14 | "This notebook will illustrate the use of SimpleITK for segmentation of bacteria from a 3D Focused Ion Beam Scanning Electron Microscopy (FIB-SEM) image. The specific bacterium is bacillus subtilis, a rod shaped organism naturally found in soil and plants. The bacteria have been subjected to stress to initiate the process of forming an endospore. These endospores can be seen as a generally dark ellipsoid inside the individual bacterium." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import SimpleITK as sitk\n", 24 | "import pandas as pd\n", 25 | "\n", 26 | "%matplotlib notebook\n", 27 | "\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "import gui\n", 30 | "from math import ceil\n", 31 | "from downloaddata import fetch_data as fdata" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# Load data\n", 39 | "\n", 40 | "Load the 3D volume and display it." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "img = sitk.ReadImage(fdata(\"fib_sem_bacillus_subtilis.mha\"))\n", 50 | "gui.MultiImageDisplay(image_list = [img], figure_size=(8,4));" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "# Segmentation\n", 58 | "\n", 59 | "To allow us to analyze the shape of whole bacteria we first need to segment them. We will do this in several steps:\n", 60 | "1. Separate the bacteria from the embedding resin background.\n", 61 | "2. Mark each potential bacterium with a unique label, to evaluate the segmentation.\n", 62 | "3. Remove small components and fill small holes using binary morphology operators (opening and closing).\n", 63 | "4. Use seed based watersheds to perform final segmentation.\n", 64 | "5. Remove bacterium that are connected to the image boundary." 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "## Separate the bacteria from the background\n", 72 | "\n", 73 | "Based on the visualization of the data above, it intuitively appears that the background and foreground are separable using a single intensity threshold. Our first step towards validating this observation is to plot the intensity distribution." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "plt.figure()\n", 83 | "plt.hist(sitk.GetArrayViewFromImage(img).flatten(), bins=100)\n", 84 | "plt.show()" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "The histogram is bi-modal with a clear separation, which we have manually identified as having an intensity value of 120.\n", 92 | "\n", 93 | "We can also use one of several binary threshold selection filters available in SimpleITK. " 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "threshold_filters = {'Otsu': sitk.OtsuThresholdImageFilter(),\n", 103 | " 'Triangle' : sitk.TriangleThresholdImageFilter(),\n", 104 | " 'Huang' : sitk.HuangThresholdImageFilter(),\n", 105 | " 'MaxEntropy' : sitk.MaximumEntropyThresholdImageFilter()}\n", 106 | "\n", 107 | "filter_selection = 'Manual'\n", 108 | "try:\n", 109 | " thresh_filter = threshold_filters[filter_selection]\n", 110 | " thresh_filter.SetInsideValue(0)\n", 111 | " thresh_filter.SetOutsideValue(1)\n", 112 | " thresh_img = thresh_filter.Execute(img)\n", 113 | " thresh_value = thresh_filter.GetThreshold()\n", 114 | "except KeyError:\n", 115 | " thresh_value = 120\n", 116 | " thresh_img = img>thresh_value\n", 117 | "\n", 118 | "print(\"Threshold used: \" + str(thresh_value)) \n", 119 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, thresh_img)], \n", 120 | " title_list = ['Binary Segmentation'], figure_size=(8,4));" 121 | ] 122 | }, 123 | { 124 | "cell_type": "markdown", 125 | "metadata": {}, 126 | "source": [ 127 | "# Mark each potential bacterium with unique label and evaluate" 128 | ] 129 | }, 130 | { 131 | "cell_type": "code", 132 | "execution_count": null, 133 | "metadata": {}, 134 | "outputs": [], 135 | "source": [ 136 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 137 | "stats.Execute(sitk.ConnectedComponent(thresh_img))\n", 138 | "\n", 139 | "# Look at the distribution of sizes of connected components (bacteria).\n", 140 | "label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 141 | "\n", 142 | "plt.figure()\n", 143 | "plt.hist(label_sizes,bins=200)\n", 144 | "plt.title(\"Distribution of Object Sizes\")\n", 145 | "plt.xlabel(\"size in pixels\")\n", 146 | "plt.ylabel(\"number of objects\")\n", 147 | "plt.show()" 148 | ] 149 | }, 150 | { 151 | "cell_type": "markdown", 152 | "metadata": {}, 153 | "source": [ 154 | "The histogram above shows tens of thousands of very small labels which are not visually detected by looking at the segmentation." 155 | ] 156 | }, 157 | { 158 | "cell_type": "markdown", 159 | "metadata": {}, 160 | "source": [ 161 | "## Remove small islands and holes\n", 162 | "\n", 163 | "Using binary morphological operations we remove small objects using the opening operation and fill small holes using the closing operation. The use of opening and closing by reconstruction maintains the boundary of the original objects." 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "cleaned_thresh_img = sitk.BinaryOpeningByReconstruction(thresh_img, [10, 10, 10])\n", 173 | "cleaned_thresh_img = sitk.BinaryClosingByReconstruction(cleaned_thresh_img, [10, 10, 10])\n", 174 | "\n", 175 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, cleaned_thresh_img)], \n", 176 | " title_list = ['Cleaned Binary Segmentation'], figure_size=(8,4));" 177 | ] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "metadata": {}, 182 | "source": [ 183 | "Check that the number of objects defined by the binary image is more reasonable." 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": null, 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [ 192 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 193 | "stats.Execute(sitk.ConnectedComponent(cleaned_thresh_img))\n", 194 | "\n", 195 | "# Look at the distribution of sizes of connected components (bacteria).\n", 196 | "label_sizes = [ stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 197 | "\n", 198 | "plt.figure()\n", 199 | "plt.hist(label_sizes,bins=200)\n", 200 | "plt.title(\"Distribution of Object Sizes\")\n", 201 | "plt.xlabel(\"size in pixels\")\n", 202 | "plt.ylabel(\"number of objects\")\n", 203 | "plt.show()" 204 | ] 205 | }, 206 | { 207 | "cell_type": "markdown", 208 | "metadata": {}, 209 | "source": [ 210 | "After the morphological operations, our binary image seems to have a reasonable number of objects, but is this true? We next look at the unique objects defined by this binary segmentation (each object is marked with a unique color)." 211 | ] 212 | }, 213 | { 214 | "cell_type": "code", 215 | "execution_count": null, 216 | "metadata": {}, 217 | "outputs": [], 218 | "source": [ 219 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, sitk.ConnectedComponent(cleaned_thresh_img))], \n", 220 | " title_list = ['Cleaned Binary Segmentation'],figure_size=(8,4));" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": {}, 226 | "source": [ 227 | "## Seed based watershed segmentation\n", 228 | "\n", 229 | "The bacteria appear to be segmented correctly from the background but not from each other. Using the visualization and histogram above we see that in 3D many of them are connected, even if on a slice by slice inspection they appear separate. " 230 | ] 231 | }, 232 | { 233 | "cell_type": "code", 234 | "execution_count": null, 235 | "metadata": {}, 236 | "outputs": [], 237 | "source": [ 238 | "dist_img = sitk.SignedMaurerDistanceMap(cleaned_thresh_img != 0, insideIsPositive=False, squaredDistance=False, useImageSpacing=False)\n", 239 | "radius = 10\n", 240 | "# Seeds have a distance of \"radius\" or more to the object boundary, they are uniquely labelled.\n", 241 | "seeds = sitk.ConnectedComponent(dist_img < -radius)\n", 242 | "# Relabel the seed objects using consecutive object labels while removing all objects with less than 15 pixels.\n", 243 | "seeds = sitk.RelabelComponent(seeds, minimumObjectSize=15)\n", 244 | "# Run the watershed segmentation using the distance map and seeds.\n", 245 | "ws = sitk.MorphologicalWatershedFromMarkers(dist_img, seeds, markWatershedLine=True)\n", 246 | "ws = sitk.Mask( ws, sitk.Cast(cleaned_thresh_img, ws.GetPixelID()))" 247 | ] 248 | }, 249 | { 250 | "cell_type": "markdown", 251 | "metadata": {}, 252 | "source": [ 253 | "Visualize the distance map, the unique seeds and final object segmentation." 254 | ] 255 | }, 256 | { 257 | "cell_type": "code", 258 | "execution_count": null, 259 | "metadata": {}, 260 | "outputs": [], 261 | "source": [ 262 | "gui.MultiImageDisplay(image_list = [dist_img,\n", 263 | " sitk.LabelOverlay(img, seeds),\n", 264 | " sitk.LabelOverlay(img, ws)], \n", 265 | " title_list = ['Segmentation Distance',\n", 266 | " 'Watershed Seeds',\n", 267 | " 'Binary Watershed Labeling'],\n", 268 | " shared_slider=True,\n", 269 | " horizontal=False,\n", 270 | " figure_size=(6,12));" 271 | ] 272 | }, 273 | { 274 | "cell_type": "markdown", 275 | "metadata": {}, 276 | "source": [ 277 | "## Removal of objects touching the image boundary\n", 278 | "\n", 279 | "We are not sure objects touching the image boundary are whole bacteria, so we remove them." 280 | ] 281 | }, 282 | { 283 | "cell_type": "code", 284 | "execution_count": null, 285 | "metadata": {}, 286 | "outputs": [], 287 | "source": [ 288 | "# The image has a small black border which we account for here.\n", 289 | "bgp = sitk.BinaryGrindPeak( (ws!=0)| (img==0))\n", 290 | "non_border_seg = sitk.Mask( ws, bgp==0)\n", 291 | "gui.MultiImageDisplay(image_list = [sitk.LabelOverlay(img, non_border_seg)], \n", 292 | " title_list = ['Final Segmentation'],figure_size=(8,4));" 293 | ] 294 | }, 295 | { 296 | "cell_type": "markdown", 297 | "metadata": {}, 298 | "source": [ 299 | "# Object Analysis\n", 300 | "\n", 301 | "Once we have the segmented objects we look at their shapes and the intensity distributions inside the objects.\n", 302 | "\n", 303 | "Note that sizes are in nanometers. ITK and consequently SimpleITK are agnostic of the actual measurement units. It is up to you as the developer to explicitly use the correct units and more importantly, DO NOT MIX UNITS.\n", 304 | "\n", 305 | "We first compute all of the measurements we are interested in." 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [ 314 | "shape_stats = sitk.LabelShapeStatisticsImageFilter()\n", 315 | "shape_stats.ComputeOrientedBoundingBoxOn()\n", 316 | "shape_stats.Execute(non_border_seg)\n", 317 | "\n", 318 | "intensity_stats = sitk.LabelIntensityStatisticsImageFilter()\n", 319 | "intensity_stats.Execute(non_border_seg,img) " 320 | ] 321 | }, 322 | { 323 | "cell_type": "markdown", 324 | "metadata": {}, 325 | "source": [ 326 | "Insert the values into a pandas dataframe and display some descriptive statistics." 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": {}, 333 | "outputs": [], 334 | "source": [ 335 | "stats_list = [ (shape_stats.GetPhysicalSize(i),\n", 336 | " shape_stats.GetElongation(i),\n", 337 | " shape_stats.GetFlatness(i),\n", 338 | " shape_stats.GetOrientedBoundingBoxSize(i)[0],\n", 339 | " shape_stats.GetOrientedBoundingBoxSize(i)[2],\n", 340 | " intensity_stats.GetMean(i),\n", 341 | " intensity_stats.GetStandardDeviation(i),\n", 342 | " intensity_stats.GetSkewness(i)) for i in shape_stats.GetLabels()]\n", 343 | "cols=[\"Volume (nm^3)\",\n", 344 | " \"Elongation\",\n", 345 | " \"Flatness\",\n", 346 | " \"Oriented Bounding Box Minimum Size(nm)\",\n", 347 | " \"Oriented Bounding Box Maximum Size(nm)\",\n", 348 | " \"Intensity Mean\",\n", 349 | " \"Intensity Standard Deviation\",\n", 350 | " \"Intensity Skewness\"]\n", 351 | "\n", 352 | "# Create the pandas data frame and display descriptive statistics.\n", 353 | "stats = pd.DataFrame(data=stats_list, index=shape_stats.GetLabels(), columns=cols)\n", 354 | "stats.describe()" 355 | ] 356 | }, 357 | { 358 | "cell_type": "markdown", 359 | "metadata": {}, 360 | "source": [ 361 | "Create a plot to investigate the relationship, possible correlations, between volume and object shape characteristics (elongation, flatness, principal moments). " 362 | ] 363 | }, 364 | { 365 | "cell_type": "code", 366 | "execution_count": null, 367 | "metadata": {}, 368 | "outputs": [], 369 | "source": [ 370 | "fig, axes = plt.subplots(nrows=len(cols), ncols=2, figsize=(6,4*len(cols)))\n", 371 | "axes[0,0].axis('off')\n", 372 | "\n", 373 | "stats.loc[:,cols[0]].plot.hist(ax=axes[0,1], bins=25)\n", 374 | "axes[0,1].set_xlabel(cols[0])\n", 375 | "axes[0,1].xaxis.set_label_position(\"top\")\n", 376 | "\n", 377 | "for i in range(1,len(cols)):\n", 378 | " c = cols[i]\n", 379 | " bar = stats.loc[:,[c]].plot.hist(ax=axes[i,0], bins=20,orientation='horizontal',legend=False)\n", 380 | " bar.set_ylabel(stats.loc[:,[c]].columns.values[0]) \n", 381 | " scatter = stats.plot.scatter(ax=axes[i,1],y=c,x=cols[0])\n", 382 | " scatter.set_ylabel('')\n", 383 | " # Remove axis labels from all plots except the last (they all share the labels)\n", 384 | " if(i

Next »

" 440 | ] 441 | } 442 | ], 443 | "metadata": { 444 | "kernelspec": { 445 | "display_name": "Python 3", 446 | "language": "python", 447 | "name": "python3" 448 | }, 449 | "language_info": { 450 | "codemirror_mode": { 451 | "name": "ipython", 452 | "version": 3 453 | }, 454 | "file_extension": ".py", 455 | "mimetype": "text/x-python", 456 | "name": "python", 457 | "nbconvert_exporter": "python", 458 | "pygments_lexer": "ipython3", 459 | "version": "3.7.3" 460 | } 461 | }, 462 | "nbformat": 4, 463 | "nbformat_minor": 2 464 | } 465 | -------------------------------------------------------------------------------- /08_segmentation_evaluation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Segmentation Evaluation

\n", 8 | "\n", 9 | "**Summary:**\n", 10 | "\n", 11 | "1. SimpleITK supports two ways of combining expert segmentations to obtain a reference segmentation.\n", 12 | "2. A variety of criteria used for evaluating a segmentation result are readily available or implemented in SimpleITK.\n", 13 | "\n", 14 | "Reference Segmentation\n", 15 | "\n", 16 | "Evaluating segmentation algorithms is most often done using reference data to which you compare your results. In the medical domain reference data is commonly obtained via manual segmentation by an expert (don't forget to thank your clinical colleagues for their hard work). When you are resource limited, the reference data may be defined by a single expert. This is less than ideal. When multiple experts provide you with their input then you can potentially combine them to obtain reference data that is closer to the ever elusive \"ground truth\". In this notebook we show two approaches to combining input from multiple observers, majority vote and the Simultaneous Truth and Performance Level\n", 17 | "Estimation [(STAPLE)](https://www.ncbi.nlm.nih.gov/pubmed/15250643) algorithm.\n", 18 | "\n", 19 | "Segmentation Evaluation\n", 20 | "\n", 21 | "Once we have a reference, we compare the algorithm's performance using multiple criteria, as usually there is no single evaluation measure that conveys all of the relevant information. In this notebook we illustrate the use of the following evaluation criteria:\n", 22 | "* Overlap measures:\n", 23 | " * Jaccard and Dice coefficients \n", 24 | " * false negative and false positive errors\n", 25 | "* Surface distance measures:\n", 26 | " * Hausdorff distance (symmetric)\n", 27 | " * mean, median, max and standard deviation between surfaces\n", 28 | "* Volume measures:\n", 29 | " * volume similarity $ \\frac{2*(v1-v2)}{v1+v2}$\n", 30 | "\n", 31 | "The relevant criteria are task dependent, so you need to ask yourself whether you are interested in detecting spurious errors or not (mean or max surface distance), whether over/under segmentation should be differentiated (volume similarity and Dice or just Dice), and what is the ratio between acceptable errors and the size of the segmented object (Dice coefficient may be too sensitive to small errors when the segmented object is small and not sensitive enough to large errors when the segmented object is large).\n", 32 | "\n", 33 | "In the context of segmentation challenges, algorithm rankings are often based on a weighted combination of these criteria. These ranking schemes are not necessarily robust, as discussed in \"[Why rankings of biomedical image analysis competitions should be interpreted with care](https://www.nature.com/articles/s41467-018-07619-7)\", L. Maier-Hein et al.\n", 34 | "\n", 35 | "The data we use in the notebook is a set of manually segmented liver tumors from a single clinical CT scan. A larger dataset (four scans) is freely available from this [MIDAS repository](http://www.insight-journal.org/midas/collection/view/38). The relevant publication is: T. Popa et al., \"Tumor Volume Measurement and Volume Measurement Comparison Plug-ins for VolView Using ITK\", SPIE Medical Imaging: Visualization, Image-Guided Procedures, and Display, 2006.\n" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "import SimpleITK as sitk\n", 45 | "\n", 46 | "import numpy as np\n", 47 | "\n", 48 | "from downloaddata import fetch_data as fdata\n", 49 | "%matplotlib inline\n", 50 | "import matplotlib.pyplot as plt\n", 51 | "import gui\n", 52 | "\n", 53 | "from ipywidgets import interact, fixed" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "## Utility method for display" 61 | ] 62 | }, 63 | { 64 | "cell_type": "code", 65 | "execution_count": null, 66 | "metadata": { 67 | "code_folding": [] 68 | }, 69 | "outputs": [], 70 | "source": [ 71 | "def display_with_overlay(segmentation_number, slice_number, image, segs, window_min, window_max):\n", 72 | " \"\"\"\n", 73 | " Display a CT slice with segmented contours overlaid onto it. The contours are the edges of \n", 74 | " the labeled regions.\n", 75 | " \"\"\"\n", 76 | " img = image[:,:,slice_number]\n", 77 | " msk = segs[segmentation_number][:,:,slice_number]\n", 78 | " overlay_img = sitk.LabelMapContourOverlay(sitk.Cast(msk, sitk.sitkLabelUInt8), \n", 79 | " sitk.Cast(sitk.IntensityWindowing(img,\n", 80 | " windowMinimum=window_min, \n", 81 | " windowMaximum=window_max), \n", 82 | " sitk.sitkUInt8), \n", 83 | " opacity = 1, \n", 84 | " contourThickness=[2,2])\n", 85 | " #We assume the original slice is isotropic, otherwise the display would be distorted \n", 86 | " plt.imshow(sitk.GetArrayViewFromImage(overlay_img))\n", 87 | " plt.axis('off')\n", 88 | " plt.show()" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "## Fetch the data\n", 96 | "\n", 97 | "Retrieve a single CT scan and three manual delineations of a liver tumor. Visual inspection of the data highlights the variability between experts. " 98 | ] 99 | }, 100 | { 101 | "cell_type": "code", 102 | "execution_count": null, 103 | "metadata": {}, 104 | "outputs": [], 105 | "source": [ 106 | "image = sitk.ReadImage(fdata(\"liverTumorSegmentations/Patient01Homo.mha\"))\n", 107 | "segmentation_file_names = [\"liverTumorSegmentations/Patient01Homo_Rad01.mha\", \n", 108 | " \"liverTumorSegmentations/Patient01Homo_Rad02.mha\",\n", 109 | " \"liverTumorSegmentations/Patient01Homo_Rad03.mha\"]\n", 110 | " \n", 111 | "segmentations = [sitk.ReadImage(fdata(file_name), sitk.sitkUInt8) for file_name in segmentation_file_names]\n", 112 | " \n", 113 | "interact(display_with_overlay, segmentation_number=(0,len(segmentations)-1), \n", 114 | " slice_number = (0, image.GetSize()[2]-1), image = fixed(image),\n", 115 | " segs = fixed(segmentations), window_min = fixed(-1024), window_max=fixed(976));" 116 | ] 117 | }, 118 | { 119 | "cell_type": "markdown", 120 | "metadata": {}, 121 | "source": [ 122 | "## Derive a reference\n", 123 | "\n", 124 | "There are a variety of ways to derive a reference segmentation from multiple expert inputs (\"[A comparison of ground truth estimation methods](https://www.ncbi.nlm.nih.gov/pubmed/20033494)\", A. M. Biancardi, A. C. Jirapatnakul, A. P. Reeves).\n", 125 | "\n", 126 | "Two methods that are available in SimpleITK are [majority vote](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LabelVotingImageFilter.html) and the STAPLE algorithm ([single label](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1STAPLEImageFilter.html) or [multi label](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1MultiLabelSTAPLEImageFilter.html))." 127 | ] 128 | }, 129 | { 130 | "cell_type": "code", 131 | "execution_count": null, 132 | "metadata": {}, 133 | "outputs": [], 134 | "source": [ 135 | "# Use the STAPLE algorithm to obtain the reference segmentation. This implementation of the original algorithm\n", 136 | "# combines a single label from multiple segmentations, the label is user specified. The result of the\n", 137 | "# filter is the voxel's probability of belonging to the foreground. We then have to threshold the result to obtain\n", 138 | "# a reference binary segmentation.\n", 139 | "foregroundValue = 1\n", 140 | "threshold = 0.95\n", 141 | "reference_segmentation_STAPLE_probabilities = sitk.STAPLE(segmentations, foregroundValue) \n", 142 | "# We use the overloaded operator to perform thresholding, another option is to use the BinaryThreshold function.\n", 143 | "reference_segmentation = reference_segmentation_STAPLE_probabilities > threshold\n", 144 | "\n", 145 | "manual_plus_staple = list(segmentations) \n", 146 | "# Append the reference segmentation to the list of manual segmentations\n", 147 | "manual_plus_staple.append(reference_segmentation)\n", 148 | "\n", 149 | "interact(display_with_overlay, segmentation_number=(0,len(manual_plus_staple)-1), \n", 150 | " slice_number = (0, image.GetSize()[1]-1), image = fixed(image),\n", 151 | " segs = fixed(manual_plus_staple), window_min = fixed(-1024), window_max=fixed(976));" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## Evaluate segmentations using the reference\n", 159 | "\n", 160 | "Once we derive a reference from our experts input we can compare segmentation results to it.\n", 161 | "\n", 162 | "Note that in this notebook we compare the expert segmentations to the reference derived from them. This is not relevant for algorithm evaluation, but it can potentially be used to rank your experts.\n", 163 | "\n", 164 | "In this specific implementation we take advantage of the fact that we have a binary segmentation with 1 for foreground and 0 for background." 165 | ] 166 | }, 167 | { 168 | "cell_type": "code", 169 | "execution_count": null, 170 | "metadata": {}, 171 | "outputs": [], 172 | "source": [ 173 | "from enum import Enum\n", 174 | "\n", 175 | "# Use enumerations to represent the various evaluation measures\n", 176 | "class OverlapMeasures(Enum):\n", 177 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 178 | "\n", 179 | "class SurfaceDistanceMeasures(Enum):\n", 180 | " hausdorff_distance, mean_surface_distance, median_surface_distance, std_surface_distance, max_surface_distance = range(5)\n", 181 | " \n", 182 | "# Empty numpy arrays to hold the results \n", 183 | "overlap_results = np.zeros((len(segmentations),len(OverlapMeasures.__members__.items()))) \n", 184 | "surface_distance_results = np.zeros((len(segmentations),len(SurfaceDistanceMeasures.__members__.items()))) \n", 185 | "\n", 186 | "# Compute the evaluation criteria\n", 187 | "\n", 188 | "# Note that for the overlap measures filter, because we are dealing with a single label we \n", 189 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 190 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 191 | "\n", 192 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 193 | "\n", 194 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside \n", 195 | "# relationship, is irrelevant)\n", 196 | "label = 1\n", 197 | "reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False, useImageSpacing=True))\n", 198 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 199 | "\n", 200 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 201 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 202 | "statistics_image_filter.Execute(reference_surface)\n", 203 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum()) \n", 204 | "\n", 205 | "for i, seg in enumerate(segmentations):\n", 206 | " # Overlap measures\n", 207 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 208 | " overlap_results[i,OverlapMeasures.jaccard.value] = overlap_measures_filter.GetJaccardCoefficient()\n", 209 | " overlap_results[i,OverlapMeasures.dice.value] = overlap_measures_filter.GetDiceCoefficient()\n", 210 | " overlap_results[i,OverlapMeasures.volume_similarity.value] = overlap_measures_filter.GetVolumeSimilarity()\n", 211 | " overlap_results[i,OverlapMeasures.false_negative.value] = overlap_measures_filter.GetFalseNegativeError()\n", 212 | " overlap_results[i,OverlapMeasures.false_positive.value] = overlap_measures_filter.GetFalsePositiveError()\n", 213 | " # Hausdorff distance\n", 214 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 215 | " \n", 216 | " surface_distance_results[i,SurfaceDistanceMeasures.hausdorff_distance.value] = hausdorff_distance_filter.GetHausdorffDistance()\n", 217 | " # Symmetric surface distance measures\n", 218 | " segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(seg, squaredDistance=False, useImageSpacing=True))\n", 219 | " segmented_surface = sitk.LabelContour(seg)\n", 220 | " \n", 221 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 222 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 223 | " seg2ref_distance_map = reference_distance_map*sitk.Cast(segmented_surface, sitk.sitkFloat32)\n", 224 | " ref2seg_distance_map = segmented_distance_map*sitk.Cast(reference_surface, sitk.sitkFloat32)\n", 225 | " \n", 226 | " # Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 227 | " statistics_image_filter.Execute(segmented_surface)\n", 228 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 229 | " \n", 230 | " # Get all non-zero distances and then add zero distances if required.\n", 231 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 232 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr!=0]) \n", 233 | " seg2ref_distances = seg2ref_distances + \\\n", 234 | " list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))\n", 235 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 236 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr!=0]) \n", 237 | " ref2seg_distances = ref2seg_distances + \\\n", 238 | " list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))\n", 239 | " \n", 240 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 241 | "\n", 242 | " # The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In \n", 243 | " # general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two \n", 244 | " # segmentations, though in our case it is. More on this below.\n", 245 | " surface_distance_results[i,SurfaceDistanceMeasures.mean_surface_distance.value] = np.mean(all_surface_distances)\n", 246 | " surface_distance_results[i,SurfaceDistanceMeasures.median_surface_distance.value] = np.median(all_surface_distances)\n", 247 | " surface_distance_results[i,SurfaceDistanceMeasures.std_surface_distance.value] = np.std(all_surface_distances)\n", 248 | " surface_distance_results[i,SurfaceDistanceMeasures.max_surface_distance.value] = np.max(all_surface_distances)\n", 249 | " \n", 250 | "# Print the matrices\n", 251 | "np.set_printoptions(precision=3)\n", 252 | "print(overlap_results)\n", 253 | "print(surface_distance_results)" 254 | ] 255 | }, 256 | { 257 | "cell_type": "markdown", 258 | "metadata": {}, 259 | "source": [ 260 | "## Improved output\n", 261 | "\n", 262 | "Using the [pandas](http://pandas.pydata.org/) package we can easily produce high quality output. " 263 | ] 264 | }, 265 | { 266 | "cell_type": "code", 267 | "execution_count": null, 268 | "metadata": {}, 269 | "outputs": [], 270 | "source": [ 271 | "import pandas as pd\n", 272 | "from IPython.display import display, HTML \n", 273 | "\n", 274 | "# Graft our results matrix into pandas data frames \n", 275 | "overlap_results_df = pd.DataFrame(data=overlap_results, index = list(range(len(segmentations))), \n", 276 | " columns=[name for name, _ in OverlapMeasures.__members__.items()]) \n", 277 | "surface_distance_results_df = pd.DataFrame(data=surface_distance_results, index = list(range(len(segmentations))), \n", 278 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()]) \n", 279 | "\n", 280 | "# Display the data as HTML tables and graphs\n", 281 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 282 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 283 | "overlap_results_df.plot(kind='bar').legend(bbox_to_anchor=(1.6,0.9))\n", 284 | "surface_distance_results_df.plot(kind='bar').legend(bbox_to_anchor=(1.6,0.9))" 285 | ] 286 | }, 287 | { 288 | "cell_type": "markdown", 289 | "metadata": {}, 290 | "source": [ 291 | "You can also export the data as a table for your LaTeX manuscript using the [to_latex](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_latex.html) function.\n", 292 | "Note: You will need to add the \\usepackage{booktabs} to your LaTeX document's preamble. \n", 293 | "\n", 294 | "To create the minimal LaTeX document which will allow you to see the difference between the tables below, copy paste:\n", 295 | "\n", 296 | "\\documentclass{article}\n", 297 | "\n", 298 | "\\usepackage{booktabs}\n", 299 | "\n", 300 | "\\begin{document}\n", 301 | "\n", 302 | "paste the tables here\n", 303 | "\n", 304 | "\\end{document}\n", 305 | "\n" 306 | ] 307 | }, 308 | { 309 | "cell_type": "code", 310 | "execution_count": null, 311 | "metadata": {}, 312 | "outputs": [], 313 | "source": [ 314 | "# The formatting of the table using the default settings is less than ideal \n", 315 | "print(overlap_results_df.to_latex())\n", 316 | "\n", 317 | "# We can improve on this by specifying the table's column format and the float format\n", 318 | "print(overlap_results_df.to_latex(column_format='ccccccc', float_format=lambda x: '%.3f' % x))" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "## Visual Diff\n", 326 | "\n", 327 | "It is always nice to have a figure with a visual display of the difference between the segmentation and ground truth." 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": null, 333 | "metadata": { 334 | "simpleitk_error_allowed": "Exception thrown in SimpleITK Show:" 335 | }, 336 | "outputs": [], 337 | "source": [ 338 | "# Use the first segmentation \n", 339 | "segmentation = segmentations[0]\n", 340 | "\n", 341 | "# Save ink, the differences will be in black and background is white \n", 342 | "segmentation_diff = (segmentation==reference_segmentation)*255\n", 343 | "\n", 344 | "# Flatten for 2D presentation, create a montage from the volume\n", 345 | "num_slices = segmentation_diff.GetDepth()\n", 346 | "tile_w = int(np.sqrt(num_slices))\n", 347 | "tile_h = int(np.ceil(num_slices/tile_w))\n", 348 | "default_background_color = 255\n", 349 | "tile_image = sitk.Tile([segmentation_diff[:,:,i] for i in range(num_slices)], (tile_w, tile_h), default_background_color)\n", 350 | "sitk.Show(tile_image)" 351 | ] 352 | }, 353 | { 354 | "cell_type": "markdown", 355 | "metadata": {}, 356 | "source": [ 357 | "

Next »

" 358 | ] 359 | } 360 | ], 361 | "metadata": { 362 | "kernelspec": { 363 | "display_name": "Python 3", 364 | "language": "python", 365 | "name": "python3" 366 | }, 367 | "language_info": { 368 | "codemirror_mode": { 369 | "name": "ipython", 370 | "version": 3 371 | }, 372 | "file_extension": ".py", 373 | "mimetype": "text/x-python", 374 | "name": "python", 375 | "nbconvert_exporter": "python", 376 | "pygments_lexer": "ipython3", 377 | "version": "3.7.3" 378 | } 379 | }, 380 | "nbformat": 4, 381 | "nbformat_minor": 1 382 | } 383 | -------------------------------------------------------------------------------- /01_spatial_transformations.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

SimpleITK Spatial Transformations

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "\n", 12 | "1. Points are represented by vector-like data types: Tuple, Numpy array, List.\n", 13 | "2. Matrices are represented by vector-like data types in row major order.\n", 14 | "3. Default transformation initialization as the identity transform.\n", 15 | "4. Angles specified in radians, distances specified in unknown but consistent units (nm,mm,m,km...).\n", 16 | "5. All global transformations **except translation** are of the form:\n", 17 | "$$T(\\mathbf{x}) = A(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c}$$\n", 18 | "\n", 19 | " Nomenclature (when printing your transformation):\n", 20 | "\n", 21 | " * Matrix: the matrix $A$\n", 22 | " * Center: the point $\\mathbf{c}$\n", 23 | " * Translation: the vector $\\mathbf{t}$\n", 24 | " * Offset: $\\mathbf{t} + \\mathbf{c} - A\\mathbf{c}$\n", 25 | "6. Bounded transformations, BSplineTransform and DisplacementFieldTransform, behave as the identity transform outside the defined bounds.\n", 26 | "7. DisplacementFieldTransform:\n", 27 | " * Initializing the DisplacementFieldTransform using an image requires that the image's pixel type be sitk.sitkVectorFloat64.\n", 28 | " * Initializing the DisplacementFieldTransform using an image will \"clear out\" your image (your alias to the image will point to an empty, zero sized, image).\n", 29 | "8. Composite transformations are applied in stack order (first added, last applied)." 30 | ] 31 | }, 32 | { 33 | "cell_type": "markdown", 34 | "metadata": {}, 35 | "source": [ 36 | "## Transformation Types\n", 37 | "\n", 38 | "SimpleITK supports the following transformation types.\n", 39 | "\n", 40 | "\n", 41 | "\n", 42 | " \n", 43 | " \n", 44 | " \n", 45 | " \n", 46 | " \n", 47 | " \n", 48 | " \n", 49 | " \n", 50 | " \n", 51 | " \n", 52 | " \n", 53 | " \n", 54 | " \n", 55 | " \n", 56 | "
TranslationTransform2D or 3D, translation
VersorTransform3D, rotation represented by a versor
VersorRigid3DTransform3D, rigid transformation with rotation represented by a versor
Euler2DTransform2D, rigid transformation with rotation represented by a Euler angle
Euler3DTransform3D, rigid transformation with rotation represented by Euler angles
Similarity2DTransform2D, composition of isotropic scaling and rigid transformation with rotation represented by a Euler angle
Similarity3DTransform3D, composition of isotropic scaling and rigid transformation with rotation represented by a versor
ScaleTransform2D or 3D, anisotropic scaling
ScaleVersor3DTransform3D, rigid transformation and anisotropic scale is added to the rotation matrix part (not composed as one would expect)
ScaleSkewVersor3DTransform3D, rigid transformation with anisotropic scale and skew matrices added to the rotation matrix part (not composed as one would expect)
AffineTransform2D or 3D, affine transformation.
BSplineTransform2D or 3D, deformable transformation represented by a sparse regular grid of control points.
DisplacementFieldTransform2D or 3D, deformable transformation represented as a dense regular grid of vectors.
TransformA generic transformation. Can represent any of the SimpleITK transformations, and a composite transformation (stack of transformations concatenated via composition, last added, first applied).
" 57 | ] 58 | }, 59 | { 60 | "cell_type": "code", 61 | "execution_count": null, 62 | "metadata": {}, 63 | "outputs": [], 64 | "source": [ 65 | "import SimpleITK as sitk\n", 66 | "import utilities as util\n", 67 | "\n", 68 | "import numpy as np\n", 69 | "%matplotlib inline \n", 70 | "import matplotlib.pyplot as plt\n", 71 | "from ipywidgets import interact, fixed\n", 72 | "\n", 73 | "OUTPUT_DIR = \"output\"" 74 | ] 75 | }, 76 | { 77 | "cell_type": "markdown", 78 | "metadata": {}, 79 | "source": [ 80 | "We will introduce the transformation types, starting with translation and illustrating how to move from a lower to higher parameter space (e.g. translation to rigid). \n", 81 | "\n", 82 | "We start with the global transformations. All of them except translation are of the form:\n", 83 | "$$T(\\mathbf{x}) = A(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c}$$\n", 84 | "\n", 85 | "In ITK speak (when printing your transformation):\n", 86 | "" 92 | ] 93 | }, 94 | { 95 | "cell_type": "markdown", 96 | "metadata": {}, 97 | "source": [ 98 | "## TranslationTransform\n", 99 | "\n", 100 | "Create a translation and then transform a point and use the inverse transformation to get the original back." 101 | ] 102 | }, 103 | { 104 | "cell_type": "code", 105 | "execution_count": null, 106 | "metadata": {}, 107 | "outputs": [], 108 | "source": [ 109 | "dimension = 2 \n", 110 | "offset = [2]*dimension # use a Python trick to create the offset list based on the dimension\n", 111 | "translation = sitk.TranslationTransform(dimension, offset)\n", 112 | "print(translation)" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": {}, 119 | "outputs": [], 120 | "source": [ 121 | "point = [10, 11] if dimension==2 else [10, 11, 12] # set point to match dimension\n", 122 | "transformed_point = translation.TransformPoint(point)\n", 123 | "translation_inverse = translation.GetInverse()\n", 124 | "print('original point: ' + util.point2str(point) + '\\n'\n", 125 | " 'transformed point: ' + util.point2str(transformed_point) + '\\n'\n", 126 | " 'back to original: ' + util.point2str(translation_inverse.TransformPoint(transformed_point)))" 127 | ] 128 | }, 129 | { 130 | "cell_type": "markdown", 131 | "metadata": {}, 132 | "source": [ 133 | "## Euler2DTransform\n", 134 | "\n", 135 | "Rigidly transform a 2D point using a Euler angle parameter specification.\n", 136 | "\n", 137 | "Notice that the dimensionality of the Euler angle based rigid transformation is associated with the class, unlike the translation which is set at construction.\n" 138 | ] 139 | }, 140 | { 141 | "cell_type": "code", 142 | "execution_count": null, 143 | "metadata": {}, 144 | "outputs": [], 145 | "source": [ 146 | "point = [10, 11]\n", 147 | "rotation2D = sitk.Euler2DTransform()\n", 148 | "rotation2D.SetTranslation((7.2, 8.4))\n", 149 | "rotation2D.SetAngle(np.pi/2)\n", 150 | "print('original point: ' + util.point2str(point) + '\\n'\n", 151 | " 'transformed point: ' + util.point2str(rotation2D.TransformPoint(point)))" 152 | ] 153 | }, 154 | { 155 | "cell_type": "markdown", 156 | "metadata": {}, 157 | "source": [ 158 | "## VersorTransform (rotation in 3D)\n", 159 | "\n", 160 | "Rotation using a versor, vector part of unit quaternion, parameterization. Quaternion defined by rotation of $\\theta$ radians around axis $n$, is $q = [n*\\sin(\\frac{\\theta}{2}), \\cos(\\frac{\\theta}{2})]$." 161 | ] 162 | }, 163 | { 164 | "cell_type": "code", 165 | "execution_count": null, 166 | "metadata": {}, 167 | "outputs": [], 168 | "source": [ 169 | "# Use a versor:\n", 170 | "rotation1 = sitk.VersorTransform([0,0,1,0])\n", 171 | "\n", 172 | "# Use axis-angle:\n", 173 | "rotation2 = sitk.VersorTransform((0,0,1), np.pi)\n", 174 | "\n", 175 | "# Use a matrix:\n", 176 | "rotation3 = sitk.VersorTransform()\n", 177 | "rotation3.SetMatrix([-1, 0, 0, 0, -1, 0, 0, 0, 1]);\n", 178 | "\n", 179 | "point = (10, 100, 1000)\n", 180 | "\n", 181 | "p1 = rotation1.TransformPoint(point)\n", 182 | "p2 = rotation2.TransformPoint(point)\n", 183 | "p3 = rotation3.TransformPoint(point)\n", 184 | "\n", 185 | "print('Points after transformation:\\np1=' + str(p1) + \n", 186 | " '\\np2='+ str(p2) + '\\np3='+ str(p3))" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "## Translation to Rigid [3D]\n", 194 | "\n", 195 | "We only need to copy the translational component." 196 | ] 197 | }, 198 | { 199 | "cell_type": "code", 200 | "execution_count": null, 201 | "metadata": {}, 202 | "outputs": [], 203 | "source": [ 204 | "dimension = 3 \n", 205 | "t =(1,2,3) \n", 206 | "translation = sitk.TranslationTransform(dimension, t)\n", 207 | "\n", 208 | "# Copy the translational component.\n", 209 | "rigid_euler = sitk.Euler3DTransform()\n", 210 | "rigid_euler.SetTranslation(translation.GetOffset())\n", 211 | "\n", 212 | "# Apply the transformations to the same set of random points and compare the results.\n", 213 | "util.print_transformation_differences(translation, rigid_euler)" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": {}, 219 | "source": [ 220 | "## Rotation to Rigid [3D]\n", 221 | "Copy the matrix or versor and center of rotation." 222 | ] 223 | }, 224 | { 225 | "cell_type": "code", 226 | "execution_count": null, 227 | "metadata": {}, 228 | "outputs": [], 229 | "source": [ 230 | "rotation_center = (10, 10, 10)\n", 231 | "rotation = sitk.VersorTransform([0,0,1,0], rotation_center)\n", 232 | "\n", 233 | "rigid_versor = sitk.VersorRigid3DTransform()\n", 234 | "rigid_versor.SetRotation(rotation.GetVersor())\n", 235 | "#rigid_versor.SetCenter(rotation.GetCenter()) #intentional error, not copying center of rotation\n", 236 | "\n", 237 | "# Apply the transformations to the same set of random points and compare the results.\n", 238 | "util.print_transformation_differences(rotation, rigid_versor)" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "In the cell above, when we don't copy the center of rotation we have a constant error vector, $\\mathbf{c}$ - A$\\mathbf{c}$." 246 | ] 247 | }, 248 | { 249 | "cell_type": "markdown", 250 | "metadata": {}, 251 | "source": [ 252 | "## Similarity [2D]\n", 253 | "\n", 254 | "When the center of the similarity transformation is not at the origin the effect of the transformation is not what most of us expect. This is readily visible if we limit the transformation to scaling: $T(\\mathbf{x}) = s\\mathbf{x}-s\\mathbf{c} + \\mathbf{c}$. Changing the transformation's center results in scale + translation." 255 | ] 256 | }, 257 | { 258 | "cell_type": "code", 259 | "execution_count": null, 260 | "metadata": {}, 261 | "outputs": [], 262 | "source": [ 263 | "def display_center_effect(x, y, tx, point_list, xlim, ylim):\n", 264 | " tx.SetCenter((x,y))\n", 265 | " transformed_point_list = [ tx.TransformPoint(p) for p in point_list]\n", 266 | "\n", 267 | " plt.scatter(list(np.array(transformed_point_list).T)[0],\n", 268 | " list(np.array(transformed_point_list).T)[1],\n", 269 | " marker='^', \n", 270 | " color='red', label='transformed points')\n", 271 | " plt.scatter(list(np.array(point_list).T)[0],\n", 272 | " list(np.array(point_list).T)[1],\n", 273 | " marker='o', \n", 274 | " color='blue', label='original points')\n", 275 | " plt.xlim(xlim)\n", 276 | " plt.ylim(ylim)\n", 277 | " plt.legend(loc=(0.25,1.01))\n", 278 | "\n", 279 | "# 2D square centered on (0,0)\n", 280 | "points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]\n", 281 | "\n", 282 | "# Scale by 2 \n", 283 | "similarity = sitk.Similarity2DTransform();\n", 284 | "similarity.SetScale(2)\n", 285 | "\n", 286 | "interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(similarity), point_list = fixed(points), \n", 287 | " xlim = fixed((-10,10)),ylim = fixed((-10,10)));" 288 | ] 289 | }, 290 | { 291 | "cell_type": "markdown", 292 | "metadata": {}, 293 | "source": [ 294 | "## Rigid to Similarity [3D]\n", 295 | "Copy the translation, center, and matrix or versor." 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "metadata": {}, 302 | "outputs": [], 303 | "source": [ 304 | "rotation_center = (100, 100, 100)\n", 305 | "theta_x = 0.0\n", 306 | "theta_y = 0.0\n", 307 | "theta_z = np.pi/2.0\n", 308 | "translation = (1,2,3)\n", 309 | "\n", 310 | "rigid_euler = sitk.Euler3DTransform(rotation_center, theta_x, theta_y, theta_z, translation)\n", 311 | "\n", 312 | "similarity = sitk.Similarity3DTransform()\n", 313 | "similarity.SetMatrix(rigid_euler.GetMatrix())\n", 314 | "similarity.SetTranslation(rigid_euler.GetTranslation())\n", 315 | "similarity.SetCenter(rigid_euler.GetCenter())\n", 316 | "\n", 317 | "# Apply the transformations to the same set of random points and compare the results.\n", 318 | "util.print_transformation_differences(rigid_euler, similarity)" 319 | ] 320 | }, 321 | { 322 | "cell_type": "markdown", 323 | "metadata": {}, 324 | "source": [ 325 | "## Similarity to Affine [3D]\n", 326 | "Copy the translation, center and matrix." 327 | ] 328 | }, 329 | { 330 | "cell_type": "code", 331 | "execution_count": null, 332 | "metadata": {}, 333 | "outputs": [], 334 | "source": [ 335 | "rotation_center = (100, 100, 100)\n", 336 | "axis = (0,0,1)\n", 337 | "angle = np.pi/2.0\n", 338 | "translation = (1,2,3)\n", 339 | "scale_factor = 2.0\n", 340 | "similarity = sitk.Similarity3DTransform(scale_factor, axis, angle, translation, rotation_center)\n", 341 | "\n", 342 | "affine = sitk.AffineTransform(3)\n", 343 | "affine.SetMatrix(similarity.GetMatrix())\n", 344 | "affine.SetTranslation(similarity.GetTranslation())\n", 345 | "affine.SetCenter(similarity.GetCenter())\n", 346 | "\n", 347 | "# Apply the transformations to the same set of random points and compare the results.\n", 348 | "util.print_transformation_differences(similarity, affine)" 349 | ] 350 | }, 351 | { 352 | "cell_type": "markdown", 353 | "metadata": {}, 354 | "source": [ 355 | "## Scale Transform\n", 356 | "\n", 357 | "Just as the case was for the similarity transformation above, when the transformations center is not at the origin, instead of a pure anisotropic scaling we also have translation ($T(\\mathbf{x}) = \\mathbf{s}^T\\mathbf{x}-\\mathbf{s}^T\\mathbf{c} + \\mathbf{c}$)." 358 | ] 359 | }, 360 | { 361 | "cell_type": "code", 362 | "execution_count": null, 363 | "metadata": {}, 364 | "outputs": [], 365 | "source": [ 366 | "# 2D square centered on (0,0).\n", 367 | "points = [np.array((-1.0,-1.0)), np.array((-1.0,1.0)), np.array((1.0,1.0)), np.array((1.0,-1.0))]\n", 368 | "\n", 369 | "# Scale by half in x and 2 in y.\n", 370 | "scale = sitk.ScaleTransform(2, (0.5,2));\n", 371 | "\n", 372 | "# Interactively change the location of the center.\n", 373 | "interact(display_center_effect, x=(-10,10), y=(-10,10),tx = fixed(scale), point_list = fixed(points), \n", 374 | " xlim = fixed((-10,10)),ylim = fixed((-10,10)));" 375 | ] 376 | }, 377 | { 378 | "cell_type": "markdown", 379 | "metadata": {}, 380 | "source": [ 381 | "## Unintentional Misnomers (originally from ITK)\n", 382 | "\n", 383 | "Two transformation types whose names may mislead you are ScaleVersor and ScaleSkewVersor. Basing your choices on expectations without reading the documentation will surprise you.\n", 384 | "\n", 385 | "ScaleVersor - based on name expected a composition of transformations, in practice it is:\n", 386 | "$$T(x) = (R+S)(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c},\\;\\; \\textrm{where } S= \\left[\\begin{array}{ccc} s_0-1 & 0 & 0 \\\\ 0 & s_1-1 & 0 \\\\ 0 & 0 & s_2-1 \\end{array}\\right]$$ \n", 387 | "\n", 388 | "ScaleSkewVersor - based on name expected a composition of transformations, in practice it is:\n", 389 | "$$T(x) = (R+S+K)(\\mathbf{x}-\\mathbf{c}) + \\mathbf{t} + \\mathbf{c},\\;\\; \\textrm{where } S = \\left[\\begin{array}{ccc} s_0-1 & 0 & 0 \\\\ 0 & s_1-1 & 0 \\\\ 0 & 0 & s_2-1 \\end{array}\\right]\\;\\; \\textrm{and } K = \\left[\\begin{array}{ccc} 0 & k_0 & k_1 \\\\ k_2 & 0 & k_3 \\\\ k_4 & k_5 & 0 \\end{array}\\right]$$ \n", 390 | "\n", 391 | "Note that ScaleSkewVersor is is an over-parametrized version of the affine transform, 15 parameters (scale, skew, versor, translation) vs. 12 parameters (matrix, translation)." 392 | ] 393 | }, 394 | { 395 | "cell_type": "markdown", 396 | "metadata": {}, 397 | "source": [ 398 | "## Bounded Transformations\n", 399 | "\n", 400 | "SimpleITK supports two types of bounded non-rigid transformations, BSplineTransform (sparse representation) and \tDisplacementFieldTransform (dense representation).\n", 401 | "\n", 402 | "Transforming a point that is outside the bounds will return the original point - identity transform." 403 | ] 404 | }, 405 | { 406 | "cell_type": "markdown", 407 | "metadata": {}, 408 | "source": [ 409 | "## BSpline\n", 410 | "Using a sparse set of control points to control a free form deformation. Using the cell below it is clear that the BSplineTransform allows for folding and tearing." 411 | ] 412 | }, 413 | { 414 | "cell_type": "code", 415 | "execution_count": null, 416 | "metadata": {}, 417 | "outputs": [], 418 | "source": [ 419 | "# Create the transformation (when working with images it is easier to use the BSplineTransformInitializer function\n", 420 | "# or its object oriented counterpart BSplineTransformInitializerFilter).\n", 421 | "dimension = 2\n", 422 | "spline_order = 3\n", 423 | "direction_matrix_row_major = [1.0,0.0,0.0,1.0] # identity, mesh is axis aligned\n", 424 | "origin = [-1.0,-1.0] \n", 425 | "domain_physical_dimensions = [2,2]\n", 426 | "\n", 427 | "bspline = sitk.BSplineTransform(dimension, spline_order)\n", 428 | "bspline.SetTransformDomainOrigin(origin)\n", 429 | "bspline.SetTransformDomainDirection(direction_matrix_row_major)\n", 430 | "bspline.SetTransformDomainPhysicalDimensions(domain_physical_dimensions)\n", 431 | "bspline.SetTransformDomainMeshSize((4,3))\n", 432 | "\n", 433 | "# Random displacement of the control points.\n", 434 | "originalControlPointDisplacements = np.random.random(len(bspline.GetParameters()))\n", 435 | "bspline.SetParameters(originalControlPointDisplacements)\n", 436 | "\n", 437 | "# Apply the BSpline transformation to a grid of points \n", 438 | "# starting the point set exactly at the origin of the BSpline mesh is problematic as\n", 439 | "# these points are considered outside the transformation's domain,\n", 440 | "# remove epsilon below and see what happens.\n", 441 | "numSamplesX = 10\n", 442 | "numSamplesY = 20\n", 443 | " \n", 444 | "coordsX = np.linspace(origin[0]+np.finfo(float).eps, origin[0] + domain_physical_dimensions[0], numSamplesX)\n", 445 | "coordsY = np.linspace(origin[1]+np.finfo(float).eps, origin[1] + domain_physical_dimensions[1], numSamplesY)\n", 446 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 447 | "\n", 448 | "interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),\n", 449 | " tx = fixed(bspline), original_control_point_displacements = fixed(originalControlPointDisplacements)); " 450 | ] 451 | }, 452 | { 453 | "cell_type": "markdown", 454 | "metadata": {}, 455 | "source": [ 456 | "## DisplacementField\n", 457 | "\n", 458 | "A dense set of vectors representing the displacement inside the given domain. The most generic representation of a transformation." 459 | ] 460 | }, 461 | { 462 | "cell_type": "code", 463 | "execution_count": null, 464 | "metadata": {}, 465 | "outputs": [], 466 | "source": [ 467 | "# Create the displacement field. \n", 468 | " \n", 469 | "# When working with images the safer thing to do is use the image based constructor,\n", 470 | "# sitk.DisplacementFieldTransform(my_image), all the fixed parameters will be set correctly and the displacement\n", 471 | "# field is initialized using the vectors stored in the image. SimpleITK requires that the image's pixel type be \n", 472 | "# sitk.sitkVectorFloat64.\n", 473 | "displacement = sitk.DisplacementFieldTransform(2)\n", 474 | "field_size = [10,20]\n", 475 | "field_origin = [-1.0,-1.0] \n", 476 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 477 | "field_direction = [1,0,0,1] # direction cosine matrix (row major order) \n", 478 | "\n", 479 | "# Concatenate all the information into a single list\n", 480 | "displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 481 | "# Set the interpolator, either sitkLinear which is default or nearest neighbor\n", 482 | "displacement.SetInterpolator(sitk.sitkNearestNeighbor)\n", 483 | "\n", 484 | "originalDisplacements = np.random.random(len(displacement.GetParameters()))\n", 485 | "displacement.SetParameters(originalDisplacements)\n", 486 | "\n", 487 | "coordsX = np.linspace(field_origin[0], field_origin[0]+(field_size[0]-1)*field_spacing[0], field_size[0])\n", 488 | "coordsY = np.linspace(field_origin[1], field_origin[1]+(field_size[1]-1)*field_spacing[1], field_size[1])\n", 489 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 490 | "\n", 491 | "interact(util.display_displacement_scaling_effect, s= (-1.5,1.5), original_x_mat = fixed(XX), original_y_mat = fixed(YY),\n", 492 | " tx = fixed(displacement), original_control_point_displacements = fixed(originalDisplacements)); " 493 | ] 494 | }, 495 | { 496 | "cell_type": "markdown", 497 | "metadata": {}, 498 | "source": [ 499 | "## Composite transform (Transform)\n", 500 | "\n", 501 | "The generic SimpleITK transform class. This class can represent both a single transformation (global, local), or a composite transformation (multiple transformations applied one after the other). This is the output typed returned by the SimpleITK registration framework. \n", 502 | "\n", 503 | "The choice of whether to use a composite transformation or compose transformations on your own has subtle differences in the registration framework.\n", 504 | "\n", 505 | "Composite transforms enable a combination of a global transformation with multiple local/bounded transformations. This is useful if we want to apply deformations only in regions that deform while other regions are only effected by the global transformation.\n", 506 | "\n", 507 | "The following code illustrates this, where the whole region is translated and subregions have different deformations." 508 | ] 509 | }, 510 | { 511 | "cell_type": "code", 512 | "execution_count": null, 513 | "metadata": {}, 514 | "outputs": [], 515 | "source": [ 516 | "# Global transformation.\n", 517 | "translation = sitk.TranslationTransform(2,(1.0,0.0))\n", 518 | "\n", 519 | "# Displacement in region 1.\n", 520 | "displacement1 = sitk.DisplacementFieldTransform(2)\n", 521 | "field_size = [10,20]\n", 522 | "field_origin = [-1.0,-1.0] \n", 523 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 524 | "field_direction = [1,0,0,1] # direction cosine matrix (row major order) \n", 525 | "\n", 526 | "# Concatenate all the information into a single list.\n", 527 | "displacement1.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 528 | "displacement1.SetParameters(np.ones(len(displacement1.GetParameters())))\n", 529 | "\n", 530 | "# Displacement in region 2.\n", 531 | "displacement2 = sitk.DisplacementFieldTransform(2)\n", 532 | "field_size = [10,20]\n", 533 | "field_origin = [1.0,-3] \n", 534 | "field_spacing = [2.0/9.0,2.0/19.0] \n", 535 | "field_direction = [1,0,0,1] #direction cosine matrix (row major order) \n", 536 | "\n", 537 | "# Concatenate all the information into a single list.\n", 538 | "displacement2.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 539 | "displacement2.SetParameters(-1.0*np.ones(len(displacement2.GetParameters())))\n", 540 | "\n", 541 | "# Composite transform which applies the global and local transformations.\n", 542 | "composite = sitk.Transform(translation)\n", 543 | "composite.AddTransform(displacement1)\n", 544 | "composite.AddTransform(displacement2)\n", 545 | "\n", 546 | "# Apply the composite transformation to points in ([-1,-3],[3,1]) and \n", 547 | "# display the deformation using a quiver plot.\n", 548 | " \n", 549 | "# Generate points.\n", 550 | "numSamplesX = 10\n", 551 | "numSamplesY = 10 \n", 552 | "coordsX = np.linspace(-1.0, 3.0, numSamplesX)\n", 553 | "coordsY = np.linspace(-3.0, 1.0, numSamplesY)\n", 554 | "XX, YY = np.meshgrid(coordsX, coordsY)\n", 555 | "\n", 556 | "# Transform points and compute deformation vectors.\n", 557 | "pointsX = np.zeros(XX.shape)\n", 558 | "pointsY = np.zeros(XX.shape)\n", 559 | "for index, value in np.ndenumerate(XX):\n", 560 | " px,py = composite.TransformPoint((value, YY[index]))\n", 561 | " pointsX[index]=px - value \n", 562 | " pointsY[index]=py - YY[index]\n", 563 | " \n", 564 | "plt.quiver(XX, YY, pointsX, pointsY); " 565 | ] 566 | }, 567 | { 568 | "cell_type": "markdown", 569 | "metadata": {}, 570 | "source": [ 571 | "## Writing and Reading\n", 572 | "\n", 573 | "The SimpleITK.ReadTransform() returns a SimpleITK.Transform . The content of the file can be any of the SimpleITK transformations or a composite (set of transformations). " 574 | ] 575 | }, 576 | { 577 | "cell_type": "code", 578 | "execution_count": null, 579 | "metadata": {}, 580 | "outputs": [], 581 | "source": [ 582 | "import os\n", 583 | "\n", 584 | "# Create a 2D rigid transformation, write it to disk and read it back.\n", 585 | "basic_transform = sitk.Euler2DTransform()\n", 586 | "basic_transform.SetTranslation((1.0,2.0))\n", 587 | "basic_transform.SetAngle(np.pi/2)\n", 588 | "\n", 589 | "full_file_name = os.path.join(OUTPUT_DIR, 'euler2D.tfm')\n", 590 | "\n", 591 | "sitk.WriteTransform(basic_transform, full_file_name)\n", 592 | "\n", 593 | "# The ReadTransform function returns an sitk.Transform no matter the type of the transform \n", 594 | "# found in the file (global, bounded, composite).\n", 595 | "read_result = sitk.ReadTransform(full_file_name)\n", 596 | "\n", 597 | "print('Different types: '+ str(type(read_result) != type(basic_transform)))\n", 598 | "util.print_transformation_differences(basic_transform, read_result)\n", 599 | "\n", 600 | "\n", 601 | "# Create a composite transform then write and read.\n", 602 | "displacement = sitk.DisplacementFieldTransform(2)\n", 603 | "field_size = [10,20]\n", 604 | "field_origin = [-10.0,-100.0] \n", 605 | "field_spacing = [20.0/(field_size[0]-1),200.0/(field_size[1]-1)] \n", 606 | "field_direction = [1,0,0,1] #direction cosine matrix (row major order)\n", 607 | "\n", 608 | "# Concatenate all the information into a single list.\n", 609 | "displacement.SetFixedParameters(field_size+field_origin+field_spacing+field_direction)\n", 610 | "displacement.SetParameters(np.random.random(len(displacement.GetParameters())))\n", 611 | "\n", 612 | "composite_transform = sitk.Transform(basic_transform)\n", 613 | "composite_transform.AddTransform(displacement)\n", 614 | "\n", 615 | "full_file_name = os.path.join(OUTPUT_DIR, 'composite.tfm')\n", 616 | "\n", 617 | "sitk.WriteTransform(composite_transform, full_file_name)\n", 618 | "read_result = sitk.ReadTransform(full_file_name)\n", 619 | "\n", 620 | "util.print_transformation_differences(composite_transform, read_result) " 621 | ] 622 | }, 623 | { 624 | "cell_type": "markdown", 625 | "metadata": {}, 626 | "source": [ 627 | "

Next »

" 628 | ] 629 | } 630 | ], 631 | "metadata": { 632 | "kernelspec": { 633 | "display_name": "Python 3", 634 | "language": "python", 635 | "name": "python3" 636 | }, 637 | "language_info": { 638 | "codemirror_mode": { 639 | "name": "ipython", 640 | "version": 3 641 | }, 642 | "file_extension": ".py", 643 | "mimetype": "text/x-python", 644 | "name": "python", 645 | "nbconvert_exporter": "python", 646 | "pygments_lexer": "ipython3", 647 | "version": "3.6.8" 648 | } 649 | }, 650 | "nbformat": 4, 651 | "nbformat_minor": 2 652 | } 653 | -------------------------------------------------------------------------------- /09_results_visualization.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Visualization of Segmentation and Registration Results

\n", 8 | "\n", 9 | "In this notebook we illustrate various ways one can display the results of segmentation and registration algorithms so that they can be easily incorporated into a manuscript. For interactive data exploration we recommend using dedicated programs (e.g. 3D slicer). \n", 10 | "\n", 11 | "Two key points to remember when working with bio-medical images:\n", 12 | "\n", 13 | "1. Most often images have a high dynamic range. Thus, to write them to file in a format appropriate for use in a manuscript we will need to map the intensities to a low dynamic range (e.g. [0,255]). In SimpleITK this is readily done with the [IntensityWindowingImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1IntensityWindowingImageFilter.html).\n", 14 | "2. Images may have non-isotropic spacing between pixels. The file formats appropriate for use in a manuscript (e.g. png, jpg) assume isotropic pixel spacing. This requires that we resample the image before writing to disk. The function `make_isotropic` in the code cell bellow resolves this issue. \n", 15 | "\n", 16 | "The following filters and their procedural counterparts are useful for various image creation tasks, as illustrated in this notebook:\n", 17 | " * [CastImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1CastImageFilter.html)\n", 18 | " * [ResampleImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ResampleImageFilter.html), one of the more important filters in your toolbox.\n", 19 | " * [TileImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1TileImageFilter.html)\n", 20 | " * [CheckerBoardImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1CheckerBoardImageFilter.html)\n", 21 | " * [ComposeImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ComposeImageFilter.html)\n", 22 | " * [LabelToRGBImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LabelToRGBImageFilter.html)\n", 23 | " * [ScalarToRGBColormapImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ScalarToRGBColormapImageFilter.html)\n", 24 | " * [LabelOverlayImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LabelOverlayImageFilter.html)\n", 25 | " * [LabelContourImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1LabelContourImageFilter.html)\n", 26 | " * [LabelMapContourOverlayImageFilter](https://itk.org/SimpleITKDoxygen/html/namespaceitk_1_1simple.html#a4f6af69f85171e44bcff90d7860d456e)" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "%matplotlib notebook\n", 36 | "\n", 37 | "import numpy as np\n", 38 | "\n", 39 | "import SimpleITK as sitk\n", 40 | "# Utility method that either downloads data from the Girder repository or\n", 41 | "# if already downloaded returns the file name for reading from disk (cached data).\n", 42 | "from downloaddata import fetch_data as fdata\n", 43 | "\n", 44 | "import utilities\n", 45 | "import gui\n", 46 | "\n", 47 | "# Always write output to a separate directory, we don't want to pollute the source directory. \n", 48 | "import os\n", 49 | "OUTPUT_DIR = 'output'\n", 50 | "\n", 51 | "def make_isotropic(image, interpolator = sitk.sitkLinear):\n", 52 | " '''\n", 53 | " Resample an image to isotropic pixels (using smallest spacing from original) and save to file. Many file formats \n", 54 | " (jpg, png,...) expect the pixels to be isotropic. By default the function uses a linear interpolator. For\n", 55 | " label images one should use the sitkNearestNeighbor interpolator so as not to introduce non-existant labels.\n", 56 | " '''\n", 57 | " original_spacing = image.GetSpacing()\n", 58 | " # Image is already isotropic, just return a copy.\n", 59 | " if all(spc == original_spacing[0] for spc in original_spacing):\n", 60 | " return sitk.Image(image)\n", 61 | " # Make image isotropic via resampling.\n", 62 | " original_size = image.GetSize()\n", 63 | " min_spacing = min(original_spacing)\n", 64 | " new_spacing = [min_spacing]*image.GetDimension()\n", 65 | " new_size = [int(round(osz*ospc/min_spacing)) for osz,ospc in zip(original_size, original_spacing)]\n", 66 | " return sitk.Resample(image, new_size, sitk.Transform(), interpolator,\n", 67 | " image.GetOrigin(), new_spacing, image.GetDirection(), 0,\n", 68 | " image.GetPixelID())" 69 | ] 70 | }, 71 | { 72 | "cell_type": "markdown", 73 | "metadata": {}, 74 | "source": [ 75 | "# Combining two images\n", 76 | "\n", 77 | "There are a variety of ways we can overlay two (partially) overlapping images onto each other. The common approaches include:\n", 78 | "1. Use of alpha blending.\n", 79 | "2. Use of a checkerboard pattern with the pixel values in adjacent squares/boxes taken from each of the images.\n", 80 | "3. When the pixel values are scalars (gray scale images), combine the two images in different channels, resulting in a color image.\n", 81 | "\n", 82 | "We will start by loading two images whose content luckily overlaps in physical space. Before we can combine the two, we need to resample one of them so that they both occupy the same spatial region. In addition we should also rescale the intensities so that they occupy the same range. In our case we will map them to [0,255], based on the desired windowing." 83 | ] 84 | }, 85 | { 86 | "cell_type": "code", 87 | "execution_count": null, 88 | "metadata": {}, 89 | "outputs": [], 90 | "source": [ 91 | "img1 = sitk.ReadImage(fdata(\"training_001_mr_T1.mha\"))\n", 92 | "img2_original = sitk.ReadImage(fdata(\"training_001_ct.mha\"))\n", 93 | "img2 = sitk.Resample(img2_original, img1)\n", 94 | "\n", 95 | "# Obtain foreground masks for the two images using Otsu thresholding, we use these later on.\n", 96 | "msk1 = sitk.OtsuThreshold(img1,0,1)\n", 97 | "msk2 = sitk.OtsuThreshold(img2,0,1)\n", 98 | "\n", 99 | "gui.MultiImageDisplay(image_list = [img1, img2], \n", 100 | " title_list = ['image1', 'image2'],\n", 101 | " figure_size=(9,3));" 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "# Having identified the desired intensity range for each of the \n", 111 | "# images using the GUI above, we use these values to perform intensity windowing and map the intensity values\n", 112 | "# to [0,255] and cast to 8-bit unsigned int\n", 113 | "img1_255 = sitk.Cast(sitk.IntensityWindowing(img1, windowMinimum=2, windowMaximum=657, \n", 114 | " outputMinimum=0.0, outputMaximum=255.0), sitk.sitkUInt8)\n", 115 | "img2_255 = sitk.Cast(sitk.IntensityWindowing(img2, windowMinimum=-1018, windowMaximum=1126, \n", 116 | " outputMinimum=0.0, outputMaximum=255.0), sitk.sitkUInt8)" 117 | ] 118 | }, 119 | { 120 | "cell_type": "markdown", 121 | "metadata": {}, 122 | "source": [ 123 | "## Alpha blending\n", 124 | "\n", 125 | "Alpha blending combines the pixels from the two images as follows:\n", 126 | "$$\n", 127 | "I_{output} = \\alpha I_1 + (1-\\alpha)I_2,\\;\\;\\; \\alpha \\in[0.0,1.0]\n", 128 | "$$\n", 129 | "\n", 130 | "When our images consist of a foreground and background we can use alpha blending in a manner that takes this into account. Instead of blending all of the pixels using the formula above, we use this formula only in the regions where the foregrounds overlap. In regions where the foreground from one image overlaps with the background of the other we simply copy the foreground. This improves visibility as we are not blending a region that contains information with an empty region.\n", 131 | "\n", 132 | "The code below allows us to experiment with various alpha blending strategies." 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "def mask_image_multiply(mask, image):\n", 142 | " components_per_pixel = image.GetNumberOfComponentsPerPixel()\n", 143 | " if components_per_pixel == 1:\n", 144 | " return mask*image\n", 145 | " else:\n", 146 | " return sitk.Compose([mask*sitk.VectorIndexSelectionCast(image,channel) for channel in range(components_per_pixel)])\n", 147 | "\n", 148 | "def alpha_blend(image1, image2, alpha = 0.5, mask1=None, mask2=None):\n", 149 | " '''\n", 150 | " Alaph blend two images, pixels can be scalars or vectors.\n", 151 | " The region that is alpha blended is controled by the given masks.\n", 152 | " '''\n", 153 | " \n", 154 | " if not mask1:\n", 155 | " mask1 = sitk.Image(image1.GetSize(), sitk.sitkFloat32) + 1.0\n", 156 | " mask1.CopyInformation(image1)\n", 157 | " else:\n", 158 | " mask1 = sitk.Cast(mask1, sitk.sitkFloat32)\n", 159 | " if not mask2:\n", 160 | " mask2 = sitk.Image(image2.GetSize(),sitk.sitkFloat32) + 1\n", 161 | " mask2.CopyInformation(image2)\n", 162 | " else: \n", 163 | " mask2 = sitk.Cast(mask2, sitk.sitkFloat32)\n", 164 | "\n", 165 | " components_per_pixel = image1.GetNumberOfComponentsPerPixel()\n", 166 | " if components_per_pixel>1:\n", 167 | " img1 = sitk.Cast(image1, sitk.sitkVectorFloat32)\n", 168 | " img2 = sitk.Cast(image2, sitk.sitkVectorFloat32)\n", 169 | " else:\n", 170 | " img1 = sitk.Cast(image1, sitk.sitkFloat32)\n", 171 | " img2 = sitk.Cast(image2, sitk.sitkFloat32)\n", 172 | " \n", 173 | " intersection_mask = mask1*mask2\n", 174 | " \n", 175 | " intersection_image = mask_image_multiply(alpha*intersection_mask, img1) + \\\n", 176 | " mask_image_multiply((1-alpha)*intersection_mask, img2)\n", 177 | " return intersection_image + mask_image_multiply(mask2-intersection_mask, img2) + \\\n", 178 | " mask_image_multiply(mask1-intersection_mask, img1)" 179 | ] 180 | }, 181 | { 182 | "cell_type": "markdown", 183 | "metadata": {}, 184 | "source": [ 185 | "We now create 3D images using all four combinations of alpha-blending and masks. As we are working with a 3D image and we want to save it as a figure for use in a manuscript, we will create a 2D montage image using the axial slices from the volumes." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": null, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [ 194 | "# Combine the two volumes\n", 195 | "images_list = [(alpha_blend(img1_255, img2_255), 'alpha_blend_standard'), \n", 196 | " (alpha_blend(img1_255, img2_255, mask1=msk1), 'alpha_blend_mask1'),\n", 197 | " (alpha_blend(img1_255, img2_255, mask2=msk2),'alpha_blend_mask2'),\n", 198 | " (alpha_blend(img1_255, img2_255, mask1=msk1, mask2=msk2),'alpha_blend_mask1_mask2')]\n", 199 | "\n", 200 | "# Tile the volumes using the x-y plane (axial slices)\n", 201 | "all_montages = []\n", 202 | "for img,img_name in images_list:\n", 203 | " num_slices = img.GetDepth()\n", 204 | " tile_w = int(np.sqrt(num_slices))\n", 205 | " tile_h = int(np.ceil(num_slices/tile_w))\n", 206 | " tile_image = sitk.Tile([img[:,:,i] for i in range(num_slices)], (tile_w, tile_h))\n", 207 | " sitk.WriteImage(sitk.Cast(tile_image, sitk.sitkUInt8), os.path.join(OUTPUT_DIR,img_name+'.png'))\n", 208 | " all_montages.append(tile_image)\n", 209 | "\n", 210 | "# Display all montages by combining them into a faux volume. Notice that scrolling through this\n", 211 | "# volume creates the illusion of motion due to the change in intensities (the interested\n", 212 | "# reader is referred to \"Visual dissociations of movement, position, and stereo depth: Some phenomenal \n", 213 | "# phenomena\", R. L. Gregory, P. F. Heard).\n", 214 | "gui.MultiImageDisplay(image_list = [sitk.JoinSeries(all_montages)],\n", 215 | " title_list = ['Montages With Different Alpha Blending Strategies'],\n", 216 | " figure_size=(8,8));" 217 | ] 218 | }, 219 | { 220 | "cell_type": "markdown", 221 | "metadata": {}, 222 | "source": [ 223 | "## Checkerboard\n", 224 | "\n", 225 | "Combine the original and the intensity windowed images using the checkerboard pattern. This illustrates the need to map both images to the same intensity range. This is particularly visible in the background region, where both images contain air. \n", 226 | "\n", 227 | "You can specify the number of checkerboard tiles per dimension as illustrated below. The actual number of checkerboard tiles depends on the number of pixels per dimension and the specified number of tiles. You may get more tiles than specified, for example try specifying [4,4,7] below." 228 | ] 229 | }, 230 | { 231 | "cell_type": "code", 232 | "execution_count": null, 233 | "metadata": {}, 234 | "outputs": [], 235 | "source": [ 236 | "gui.MultiImageDisplay(image_list = [sitk.CheckerBoard(img1, img2, [4,4,4]), sitk.CheckerBoard(img1_255, img2_255, (10,10,4))],\n", 237 | " title_list = ['original intensities', 'rescaled intensities'],\n", 238 | " figure_size=(9,3));" 239 | ] 240 | }, 241 | { 242 | "cell_type": "markdown", 243 | "metadata": {}, 244 | "source": [ 245 | "## Combine scalar images to create color image\n", 246 | "\n", 247 | "There are a variety of ways we can combine the scalar images to create a color image. Some of these combinations should be avoided as they are not discernible by a significant portion of the population (i.e. red-green channel encoding). For additional details see:\n", 248 | "\n", 249 | "M. Geissbuehler, T. Lasser, \"How to display data by color schemes compatible with red-green color perception deficiencies\", Opt Express., 21(8):9862-74, 2013. \n", 250 | "\n", 251 | "\n" 252 | ] 253 | }, 254 | { 255 | "cell_type": "code", 256 | "execution_count": null, 257 | "metadata": {}, 258 | "outputs": [], 259 | "source": [ 260 | "zeros = sitk.Image(img1_255.GetSize(), img1_255.GetPixelID())\n", 261 | "zeros.CopyInformation(img1_255)\n", 262 | "\n", 263 | "gui.MultiImageDisplay(image_list = [sitk.Cast(sitk.Compose(img1_255, img2_255, zeros), sitk.sitkVectorUInt8),\n", 264 | " sitk.Cast(sitk.Compose(img1_255, img2_255, img1_255), sitk.sitkVectorUInt8),\n", 265 | " sitk.Cast(sitk.Compose(img1_255, 0.5*img1_255+0.5*img2_255, img2_255), sitk.sitkVectorUInt8)],\n", 266 | " title_list= ['avoid red-green', 'use magenta-green', 'use orange-blue'],\n", 267 | " figure_size=(9,3));" 268 | ] 269 | }, 270 | { 271 | "cell_type": "markdown", 272 | "metadata": {}, 273 | "source": [ 274 | "## Overlay scalar image onto another via color map\n", 275 | "\n", 276 | "In some situations we have an underlying image (spatial structures) and we want to overlay a scalar based pseudo-color image on top of it.\n", 277 | "\n", 278 | "This is relevant for presenting co-registered PET/CT data, with the PET providing functional information and the CT providing the underlying spatial structures. A similar use case in the context of deep learning is to display activation maps illustrating the regions in an image on which the network is focusing its attention for the particular class.\n", 279 | "\n", 280 | "The two main decisions we make are:\n", 281 | "1. Selection of pseudo-color scheme using the [ScalarToRGBColormapImageFilter](https://itk.org/SimpleITKDoxygen/html/classitk_1_1simple_1_1ScalarToRGBColormapImageFilter.html) which supports a variety of color maps.\n", 282 | "2. Alpha blending approach (alpha value and usage of masks as done above).\n" 283 | ] 284 | }, 285 | { 286 | "cell_type": "code", 287 | "execution_count": null, 288 | "metadata": {}, 289 | "outputs": [], 290 | "source": [ 291 | "# Create a signed distance map which we will overlay onto the original image using \n", 292 | "# pseudo-coloring. We are only interested in locations that are at a distance of [0,512] from the object \n", 293 | "distance_map = sitk.SignedMaurerDistanceMap(msk1)\n", 294 | "# Get binary region of interest mask.\n", 295 | "roi = sitk.Cast(distance_map>0.0, sitk.sitkFloat32)*sitk.Cast(distance_map<512.0, sitk.sitkFloat32)\n", 296 | "roi_distance_map = roi*distance_map\n", 297 | "overlay_color_img = sitk.ScalarToRGBColormap(roi_distance_map, \n", 298 | " sitk.ScalarToRGBColormapImageFilter.Jet)\n", 299 | "\n", 300 | "# Combine the color overlay volume with the spatial structure volume using alpha blending\n", 301 | "# and cast to a three component vector 8 bit unsigned int. We can readily incorporate a\n", 302 | "# mask into the blend (pun intended). By adding mask2=roi we can limit the overlay to\n", 303 | "# the region of interest.\n", 304 | "combined_volume = sitk.Cast(alpha_blend(sitk.Compose(img1_255, img1_255, img1_255), \n", 305 | " overlay_color_img), \n", 306 | " sitk.sitkVectorUInt8)\n", 307 | "\n", 308 | "# Given a volume we can either create a montage as above or we can take a representative\n", 309 | "# slice (axial/sagittal/coronal). As image formats used in manuscripts assume isotropic \n", 310 | "# pixels we need to ensure this before we write to disk.\n", 311 | "all_central_slices = [combined_volume[:,:,int(combined_volume.GetDepth()/2.0 + 0.5)],\n", 312 | " combined_volume[:,int(combined_volume.GetHeight()/2.0 + 0.5),:],\n", 313 | " combined_volume[int(combined_volume.GetWidth()/2.0 + 0.5),:,:]]\n", 314 | "\n", 315 | "# Resample to isotropic pixels and write to file.\n", 316 | "for i, img in enumerate(all_central_slices):\n", 317 | " all_central_slices[i] = make_isotropic(img)\n", 318 | " sitk.WriteImage(all_central_slices[i], \n", 319 | " os.path.join(OUTPUT_DIR,'color_overlay{0}.png'.format(i)))\n", 320 | " \n", 321 | "gui.multi_image_display2D([sitk.Tile(all_central_slices,(1,3))], \n", 322 | " figure_size=(4,4),horizontal=False);" 323 | ] 324 | }, 325 | { 326 | "cell_type": "markdown", 327 | "metadata": {}, 328 | "source": [ 329 | "# Combining an image and segmentation\n", 330 | "\n", 331 | "To display the results of segmentation in context, we need to overlay them onto the original image. There are two common options for doing this:\n", 332 | " 1. Map the segmentation labels to a color image and alpha blend onto the original image.\n", 333 | " 2. Overlay the segmentation boundaries onto the original image.\n", 334 | "\n", 335 | "We illustrate both approaches below.\n", 336 | "\n", 337 | "For this example we use the Point-validated Pixel-based Breathing Thorax Model (POPI) model. The model is provided by the Léon Bérard Cancer Center & CREATIS Laboratory, Lyon, France. The relevant publication is:\n", 338 | "\n", 339 | "J. Vandemeulebroucke, D. Sarrut, P. Clarysse, \"The POPI-model, a point-validated pixel-based breathing thorax model\", Proc. XVth International Conference on the Use of Computers in Radiation Therapy (ICCR), Toronto, Canada, 2007." 340 | ] 341 | }, 342 | { 343 | "cell_type": "code", 344 | "execution_count": null, 345 | "metadata": {}, 346 | "outputs": [], 347 | "source": [ 348 | "img = sitk.ReadImage(fdata('POPI/meta/00-P.mhd'))\n", 349 | "segmentation = sitk.ReadImage(fdata('POPI/masks/00-air-body-lungs.mhd'))\n", 350 | "\n", 351 | "gui.MultiImageDisplay(image_list = [img, segmentation, sitk.LabelToRGB(segmentation)], \n", 352 | " title_list = ['image', 'raw segmentation labels', 'segmentation labels in color'],\n", 353 | " figure_size=(9,3), shared_slider=True);" 354 | ] 355 | }, 356 | { 357 | "cell_type": "markdown", 358 | "metadata": {}, 359 | "source": [ 360 | "We will work with the central coronal slice from this dataset." 361 | ] 362 | }, 363 | { 364 | "cell_type": "code", 365 | "execution_count": null, 366 | "metadata": {}, 367 | "outputs": [], 368 | "source": [ 369 | "# Identify the desired intensity range for our image using the GUI above, mapping the high dynamic range\n", 370 | "# image to a low dynamic range, [0,255], extract the central coronal slice and we flip it for display purposes.\n", 371 | "coronal_255 = sitk.Cast(sitk.IntensityWindowing(img[:,int(img.GetHeight()/2),:][:,::-1], \n", 372 | " windowMinimum=-1000, windowMaximum=170, \n", 373 | " outputMinimum=0.0, outputMaximum=255.0), sitk.sitkUInt8)\n", 374 | "coronal_255_isotropic = make_isotropic(coronal_255)\n", 375 | "\n", 376 | "coronal_segmentation = segmentation[:,int(segmentation.GetHeight()/2),:][:,::-1]\n", 377 | "# Use nearest neighbor interpolation for a label image.\n", 378 | "coronal_segmentation_isotropic = make_isotropic(coronal_segmentation, sitk.sitkNearestNeighbor)" 379 | ] 380 | }, 381 | { 382 | "cell_type": "markdown", 383 | "metadata": {}, 384 | "source": [ 385 | "In many cases the values in a label image are not appropriate for direct display. For instance the values in our\n", 386 | "label image are 0,1,2. You can see the values if you hover your cursor over the raw segmentation label image above (figure's bottom right corner). \n", 387 | "\n", 388 | "In theory we could map these intensities to [0,255] and save the image. In practice we may have more than 256 labels and therefor it is preferable to map the labels to colors and save the color image." 389 | ] 390 | }, 391 | { 392 | "cell_type": "code", 393 | "execution_count": null, 394 | "metadata": {}, 395 | "outputs": [], 396 | "source": [ 397 | "# Use the default color map when mapping labels to colors and write the image.\n", 398 | "sitk.WriteImage(sitk.LabelToRGB(coronal_segmentation_isotropic),os.path.join(OUTPUT_DIR, 'coronal_segmentation.png'))" 399 | ] 400 | }, 401 | { 402 | "cell_type": "markdown", 403 | "metadata": {}, 404 | "source": [ 405 | "## Overlay segmentation labels onto original image\n", 406 | "\n", 407 | "SimpleITK allows you to overlay the segmentation labels onto the original image using a color map and alpha blending. You can specify the value for alpha blending, the color map (there is a default color map), and the background label value which will not be overlaid with a label.\n", 408 | "\n", 409 | "The color map in SimpleITK is a set of values in the RGB color space strung together. For example [255, 0, 0, 0, 255, 0] is a two entry color map with red and green. To create a human readable color map, use lists to represent the colors and string them together, as done below." 410 | ] 411 | }, 412 | { 413 | "cell_type": "code", 414 | "execution_count": null, 415 | "metadata": {}, 416 | "outputs": [], 417 | "source": [ 418 | "# Overlay the segmentation using default color map and an alpha value of 0.5\n", 419 | "coronal_combined1 = sitk.LabelOverlay(image=coronal_255_isotropic, \n", 420 | " labelImage=coronal_segmentation_isotropic,\n", 421 | " opacity=0.5, backgroundValue=utilities.popi_air_label)\n", 422 | "\n", 423 | "# Create an \"interesting\" color map and specify backgroundValue to a non existent label\n", 424 | "# so that the background label is also overlaid.\n", 425 | "pink= [255,105,180]\n", 426 | "green = [0,255,0]\n", 427 | "gold = [255,215,0]\n", 428 | "coronal_combined2 = sitk.LabelOverlay(image=coronal_255_isotropic, \n", 429 | " labelImage=coronal_segmentation_isotropic,\n", 430 | " opacity=0.5, backgroundValue = -1.0,\n", 431 | " colormap=pink+green+gold)\n", 432 | "\n", 433 | "# Display the two images as a faux volume, JoinSeries, approach.\n", 434 | "gui.MultiImageDisplay(image_list = [sitk.JoinSeries([coronal_combined1, coronal_combined2])], \n", 435 | " title_list = ['overlaid labels'],\n", 436 | " figure_size=(9,3), shared_slider=True);" 437 | ] 438 | }, 439 | { 440 | "cell_type": "markdown", 441 | "metadata": {}, 442 | "source": [ 443 | "## Overlay segmentation boundaries onto original image\n", 444 | "\n", 445 | "We can readily obtain the segmentation boundaries from the raw segmentation. We then either just save the contours as an image or we can directly overly them onto the image. \n", 446 | "\n", 447 | "Some points to note:\n", 448 | " 1. When working with 3D images and segmentations, our boundaries are surfaces. When these surfaces are intersected with 2D planes they may define a region and not a contour, which is what we usually expect (e.g. slice 24 in the results displayed by the following code cell).\n", 449 | " 2. When the labels are next to each other, they share a boundary. As a consequence, drawing the boundaries may result in contours overwriting each other or in contour crossings." 450 | ] 451 | }, 452 | { 453 | "cell_type": "code", 454 | "execution_count": null, 455 | "metadata": {}, 456 | "outputs": [], 457 | "source": [ 458 | "red = [255,0,0]\n", 459 | "green = [0,255,0]\n", 460 | "blue = [0,0,255]\n", 461 | "\n", 462 | "# red goes to the first label, green to second, blue to third\n", 463 | "# utilities.popi_body_label=0, utilities.popi_air_label=1, utilities.popi_lung_label=2 \n", 464 | "contour_image = sitk.LabelToRGB(sitk.LabelContour(segmentation, fullyConnected=True, backgroundValue=255), \n", 465 | " colormap=red+green+blue , backgroundValue=255)\n", 466 | "gui.MultiImageDisplay(image_list = [contour_image], \n", 467 | " figure_size=(9,3));" 468 | ] 469 | }, 470 | { 471 | "cell_type": "markdown", 472 | "metadata": {}, 473 | "source": [ 474 | "In the following code cell we overlay the contours onto the original image. We use a filter that is intended for usage with a label-map image as opposed to a label image. The former is a more efficient data structure for representing segmentations (run length encoded). We therefor need to cast the label image to a label-map image.\n", 475 | "We can also set several visualization related parameters such as overlay opacity, contour thickness, priority of overlay (which label overwrites which if they overlap) etc." 476 | ] 477 | }, 478 | { 479 | "cell_type": "code", 480 | "execution_count": null, 481 | "metadata": {}, 482 | "outputs": [], 483 | "source": [ 484 | "contour_overlaid_image = sitk.LabelMapContourOverlay(sitk.Cast(coronal_segmentation_isotropic, sitk.sitkLabelUInt8), \n", 485 | " coronal_255_isotropic, \n", 486 | " opacity = 1, \n", 487 | " contourThickness=[4,4],\n", 488 | " dilationRadius= [3,3],\n", 489 | " colormap=red+green+blue)\n", 490 | "gui.multi_image_display2D([contour_overlaid_image], figure_size=(6,3));" 491 | ] 492 | }, 493 | { 494 | "cell_type": "markdown", 495 | "metadata": {}, 496 | "source": [ 497 | "# Comparing two segmentations\n", 498 | "\n", 499 | "In this section we show how to create a binary image illustrating all the locations where two segmentations differ. This is a trivial one liner in SimpleITK.\n", 500 | "\n", 501 | "The following cell modifies our original coronal segmentation by dilating the body region in the top half of the image and dilating the lung region in the bottom half of the image." 502 | ] 503 | }, 504 | { 505 | "cell_type": "code", 506 | "execution_count": null, 507 | "metadata": {}, 508 | "outputs": [], 509 | "source": [ 510 | "binary_dilate_filter = sitk.BinaryDilateImageFilter()\n", 511 | "binary_dilate_filter.SetKernelRadius(2)\n", 512 | "mid_size = int(coronal_segmentation_isotropic.GetHeight()/2)\n", 513 | "\n", 514 | "# Over-segment the body region on the upper image region.\n", 515 | "binary_dilate_filter.SetForegroundValue(utilities.popi_body_label)\n", 516 | "top_segmentation = binary_dilate_filter.Execute(coronal_segmentation_isotropic[:,0:mid_size])\n", 517 | "# Over-segment the lung region on the lower image region.\n", 518 | "binary_dilate_filter.SetForegroundValue(utilities.popi_lung_label)\n", 519 | "bottom_segmentation = binary_dilate_filter.Execute(coronal_segmentation_isotropic[:,mid_size:])\n", 520 | "\n", 521 | "modified_segmentation = sitk.Tile(top_segmentation,bottom_segmentation, (1,2))\n", 522 | "modified_segmentation.CopyInformation(coronal_segmentation_isotropic)\n", 523 | "# Faux volume which allows us to visually compare the two segmentations by switching back and\n", 524 | "# forth between them.\n", 525 | "gui.MultiImageDisplay(image_list = [sitk.JoinSeries(coronal_segmentation_isotropic, modified_segmentation)], \n", 526 | " figure_size=(6,3));" 527 | ] 528 | }, 529 | { 530 | "cell_type": "markdown", 531 | "metadata": {}, 532 | "source": [ 533 | "To see where the two segmentations differ, we directly compare them. If we don't want to waste ink, we can invert the result so that black pixels are the foreground and white the background. " 534 | ] 535 | }, 536 | { 537 | "cell_type": "code", 538 | "execution_count": null, 539 | "metadata": {}, 540 | "outputs": [], 541 | "source": [ 542 | "diff_image = (coronal_segmentation_isotropic!=modified_segmentation)\n", 543 | "sitk.WriteImage(diff_image*255, os.path.join(OUTPUT_DIR,'segmentation_differences.jpg'))\n", 544 | "sitk.WriteImage((diff_image!=1)*255, os.path.join(OUTPUT_DIR,'segmentation_differences_inverted.jpg'))" 545 | ] 546 | } 547 | ], 548 | "metadata": { 549 | "kernelspec": { 550 | "display_name": "Python 3", 551 | "language": "python", 552 | "name": "python3" 553 | }, 554 | "language_info": { 555 | "codemirror_mode": { 556 | "name": "ipython", 557 | "version": 3 558 | }, 559 | "file_extension": ".py", 560 | "mimetype": "text/x-python", 561 | "name": "python", 562 | "nbconvert_exporter": "python", 563 | "pygments_lexer": "ipython3", 564 | "version": "3.7.4" 565 | } 566 | }, 567 | "nbformat": 4, 568 | "nbformat_minor": 2 569 | } 570 | -------------------------------------------------------------------------------- /05_advanced_registration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Advanced Registration

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK provides two flavors of non-rigid registration:\n", 12 | " * Free Form Deformation, BSpline based, and Demons using the ITKv4 registration framework.\n", 13 | " * A set of Demons filters that are independent of the registration framework (`DemonsRegistrationFilter, DiffeomorphicDemonsRegistrationFilter, FastSymmetricForcesDemonsRegistrationFilter, SymmetricForcesDemonsRegistrationFilter`).\n", 14 | "2. Registration evaluation:\n", 15 | " * Registration accuracy, the quantity of interest is the Target Registration Error (TRE).\n", 16 | " * TRE is spatially variant.\n", 17 | " * Surrogate metrics for evaluating registration accuracy such as segmentation overlaps are relevant, but are potentially deficient.\n", 18 | " * Registration time.\n", 19 | " * Acceptable values for TRE and runtime are context dependent." 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "import SimpleITK as sitk\n", 29 | "import registration_gui as rgui\n", 30 | "import utilities \n", 31 | "\n", 32 | "from downloaddata import fetch_data as fdata\n", 33 | "\n", 34 | "from ipywidgets import interact, fixed\n", 35 | "\n", 36 | "%matplotlib inline\n", 37 | "import matplotlib.pyplot as plt\n", 38 | "\n", 39 | "import numpy as np" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "## Data and Registration Task\n", 47 | "\n", 48 | "In this notebook we will use the Point-validated Pixel-based Breathing Thorax Model (POPI). This is a 4D (3D+time) thoracic-abdominal CT (10 CTs representing the respiratory cycle) with masks segmenting each of the CTs to air/body/lung, and a set of corresponding landmarks localized in each of the CT volumes.\n", 49 | "\n", 50 | "The registration problem we deal with is non-rigid alignment of the lungs throughout the respiratory cycle. This information is relevant for radiation therapy planning and execution.\n", 51 | "\n", 52 | "\n", 53 | "The POPI model is provided by the Léon Bérard Cancer Center & CREATIS Laboratory, Lyon, France. The relevant publication is:\n", 54 | "\n", 55 | "J. Vandemeulebroucke, D. Sarrut, P. Clarysse, \"The POPI-model, a point-validated pixel-based breathing thorax model\",\n", 56 | "Proc. XVth International Conference on the Use of Computers in Radiation Therapy (ICCR), Toronto, Canada, 2007.\n", 57 | "\n", 58 | "Additional 4D CT data sets with reference points are available from the CREATIS Laboratory here. " 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "images = []\n", 68 | "masks = []\n", 69 | "points = []\n", 70 | "image_indexes = [0,7]\n", 71 | "for i in image_indexes:\n", 72 | " image_file_name = 'POPI/meta/{0}0-P.mhd'.format(i)\n", 73 | " mask_file_name = 'POPI/masks/{0}0-air-body-lungs.mhd'.format(i)\n", 74 | " points_file_name = 'POPI/landmarks/{0}0-Landmarks.pts'.format(i)\n", 75 | " images.append(sitk.ReadImage(fdata(image_file_name), sitk.sitkFloat32)) \n", 76 | " masks.append(sitk.ReadImage(fdata(mask_file_name)))\n", 77 | " points.append(utilities.read_POPI_points(fdata(points_file_name)))\n", 78 | " \n", 79 | "interact(rgui.display_coronal_with_overlay, temporal_slice=(0,len(images)-1), \n", 80 | " coronal_slice = (0, images[0].GetSize()[1]-1), \n", 81 | " images = fixed(images), masks = fixed(masks), \n", 82 | " label=fixed(utilities.popi_lung_label), window_min = fixed(-1024), window_max=fixed(976));" 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Free Form Deformation\n", 90 | "\n", 91 | "Define a BSplineTransform using a sparse set of grid points overlaid onto the fixed image's domain to deform it.\n", 92 | "\n", 93 | "For the current registration task we are fortunate in that we have a unique setting. The images are of the same patient during respiration so we can initialize the registration using the identity transform. Additionally, we have masks demarcating the lungs.\n", 94 | "\n", 95 | "We use the registration framework taking advantage of its ability to use masks that limit the similarity metric estimation to points lying inside our region of interest, the lungs." 96 | ] 97 | }, 98 | { 99 | "cell_type": "code", 100 | "execution_count": null, 101 | "metadata": {}, 102 | "outputs": [], 103 | "source": [ 104 | "fixed_index = 0\n", 105 | "moving_index = 1\n", 106 | "\n", 107 | "fixed_image = images[fixed_index]\n", 108 | "fixed_image_mask = masks[fixed_index] == utilities.popi_lung_label\n", 109 | "fixed_points = points[fixed_index]\n", 110 | "\n", 111 | "moving_image = images[moving_index]\n", 112 | "moving_image_mask = masks[moving_index] == utilities.popi_lung_label\n", 113 | "moving_points = points[moving_index]" 114 | ] 115 | }, 116 | { 117 | "cell_type": "code", 118 | "execution_count": null, 119 | "metadata": {}, 120 | "outputs": [], 121 | "source": [ 122 | "# Define a simple callback which allows us to monitor registration progress.\n", 123 | "def iteration_callback(filter):\n", 124 | " print('\\r{0:.2f}'.format(filter.GetMetricValue()), end='')\n", 125 | "\n", 126 | "registration_method = sitk.ImageRegistrationMethod()\n", 127 | " \n", 128 | "# Determine the number of BSpline control points using the physical \n", 129 | "# spacing we want for the finest resolution control grid. \n", 130 | "grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm\n", 131 | "image_physical_size = [size*spacing for size,spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())]\n", 132 | "mesh_size = [int(image_size/grid_spacing + 0.5) \\\n", 133 | " for image_size,grid_spacing in zip(image_physical_size,grid_physical_spacing)]\n", 134 | "# The starting mesh size will be 1/4 of the original, it will be refined by \n", 135 | "# the multi-resolution framework.\n", 136 | "mesh_size = [int(sz/4 + 0.5) for sz in mesh_size]\n", 137 | "\n", 138 | "initial_transform = sitk.BSplineTransformInitializer(image1 = fixed_image, \n", 139 | " transformDomainMeshSize = mesh_size, order=3) \n", 140 | "# Instead of the standard SetInitialTransform we use the BSpline specific method which also\n", 141 | "# accepts the scaleFactors parameter to refine the BSpline mesh. In this case we start with \n", 142 | "# the given mesh_size at the highest pyramid level then we double it in the next lower level and\n", 143 | "# in the full resolution image we use a mesh that is four times the original size.\n", 144 | "registration_method.SetInitialTransformAsBSpline(initial_transform,\n", 145 | " inPlace=False,\n", 146 | " scaleFactors=[1,2,4])\n", 147 | "\n", 148 | "registration_method.SetMetricAsMeanSquares()\n", 149 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 150 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 151 | "registration_method.SetMetricFixedMask(fixed_image_mask)\n", 152 | " \n", 153 | "registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])\n", 154 | "registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])\n", 155 | "registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n", 156 | "\n", 157 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 158 | "registration_method.SetOptimizerAsLBFGS2(solutionAccuracy=1e-2, numberOfIterations=100, deltaConvergenceTolerance=0.01)\n", 159 | "\n", 160 | "registration_method.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(registration_method))\n", 161 | "\n", 162 | "final_transformation = registration_method.Execute(fixed_image, moving_image)\n", 163 | "print('\\nOptimizer\\'s stopping condition, {0}'.format(registration_method.GetOptimizerStopConditionDescription()))" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "## Qualitative evaluation via segmentation transfer\n", 171 | "\n", 172 | "Transfer the segmentation from the moving image to the fixed image before and after registration and visually evaluate overlap." 173 | ] 174 | }, 175 | { 176 | "cell_type": "code", 177 | "execution_count": null, 178 | "metadata": {}, 179 | "outputs": [], 180 | "source": [ 181 | "transformed_segmentation = sitk.Resample(moving_image_mask,\n", 182 | " fixed_image,\n", 183 | " final_transformation, \n", 184 | " sitk.sitkNearestNeighbor,\n", 185 | " 0.0, \n", 186 | " moving_image_mask.GetPixelID())\n", 187 | "\n", 188 | "interact(rgui.display_coronal_with_overlay, temporal_slice=(0,1), \n", 189 | " coronal_slice = (0, fixed_image.GetSize()[1]-1), \n", 190 | " images = fixed([fixed_image,fixed_image]), masks = fixed([moving_image_mask, transformed_segmentation]), \n", 191 | " label=fixed(1), window_min = fixed(-1024), window_max=fixed(976));" 192 | ] 193 | }, 194 | { 195 | "cell_type": "markdown", 196 | "metadata": {}, 197 | "source": [ 198 | "### Quantitative evaluation \n", 199 | "\n", 200 | "The most appropriate evaluation is based on analysis of Target Registration Errors(TRE), which is defined as follows:\n", 201 | "\n", 202 | "Given the transformation $T_f^m$ and corresponding points in the two coordinate systems, $^fp,^mp$, points which were not used in the registration process, TRE is defined as $\\|T_f^m(^fp) - ^mp\\|$. \n", 203 | "\n", 204 | "We start by looking at some descriptive statistics of TRE:" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": null, 210 | "metadata": {}, 211 | "outputs": [], 212 | "source": [ 213 | "initial_TRE = utilities.target_registration_errors(sitk.Transform(), fixed_points, moving_points)\n", 214 | "final_TRE = utilities.target_registration_errors(final_transformation, fixed_points, moving_points)\n", 215 | "\n", 216 | "print('Initial alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(initial_TRE), \n", 217 | " np.std(initial_TRE), \n", 218 | " np.max(initial_TRE)))\n", 219 | "print('Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(final_TRE), \n", 220 | " np.std(final_TRE), \n", 221 | " np.max(final_TRE)))" 222 | ] 223 | }, 224 | { 225 | "cell_type": "markdown", 226 | "metadata": {}, 227 | "source": [ 228 | "The above descriptive statistics do not convey the whole picture, we should also look at the TRE distributions before and after registration." 229 | ] 230 | }, 231 | { 232 | "cell_type": "code", 233 | "execution_count": null, 234 | "metadata": {}, 235 | "outputs": [], 236 | "source": [ 237 | "plt.hist(initial_TRE, bins=20, alpha=0.5, label='before registration', color='blue')\n", 238 | "plt.hist(final_TRE, bins=20, alpha=0.5, label='after registration', color='green')\n", 239 | "plt.legend()\n", 240 | "plt.title('TRE histogram');" 241 | ] 242 | }, 243 | { 244 | "cell_type": "markdown", 245 | "metadata": {}, 246 | "source": [ 247 | "Finally, we should also take into account the fact that TRE is spatially variant (think center of rotation). We therefore should also explore the distribution of errors as a function of the point location." 248 | ] 249 | }, 250 | { 251 | "cell_type": "code", 252 | "execution_count": null, 253 | "metadata": {}, 254 | "outputs": [], 255 | "source": [ 256 | "initial_errors = utilities.target_registration_errors(sitk.Transform(), fixed_points, moving_points, display_errors = True)\n", 257 | "utilities.target_registration_errors(final_transformation, fixed_points, moving_points, \n", 258 | " min_err=min(initial_errors), max_err=max(initial_errors), display_errors = True);" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "Deciding whether a registration algorithm is appropriate for a specific problem is context dependent and is defined by the clinical/research needs both in terms of accuracy and computational complexity." 266 | ] 267 | }, 268 | { 269 | "cell_type": "markdown", 270 | "metadata": {}, 271 | "source": [ 272 | "## Demons Based Registration\n", 273 | "\n", 274 | "SimpleITK includes a number of filters from the Demons registration family (originally introduced by J. P. Thirion):\n", 275 | "1. DemonsRegistrationFilter.\n", 276 | "2. DiffeomorphicDemonsRegistrationFilter.\n", 277 | "3. FastSymmetricForcesDemonsRegistrationFilter.\n", 278 | "4. SymmetricForcesDemonsRegistrationFilter.\n", 279 | "\n", 280 | "These are appropriate for mono-modal registration. As these filters are independent of the ImageRegistrationMethod we do not have access to the multiscale framework. Luckily it is easy to implement our own multiscale framework in SimpleITK, which is what we do in the next cell." 281 | ] 282 | }, 283 | { 284 | "cell_type": "code", 285 | "execution_count": null, 286 | "metadata": {}, 287 | "outputs": [], 288 | "source": [ 289 | "def smooth_and_resample(image, shrink_factor, smoothing_sigma):\n", 290 | " \"\"\"\n", 291 | " Args:\n", 292 | " image: The image we want to resample.\n", 293 | " shrink_factor: A number greater than one, such that the new image's size is original_size/shrink_factor.\n", 294 | " smoothing_sigma: Sigma for Gaussian smoothing, this is in physical (image spacing) units, not pixels.\n", 295 | " Return:\n", 296 | " Image which is a result of smoothing the input and then resampling it using the given sigma and shrink factor.\n", 297 | " \"\"\"\n", 298 | " smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigma)\n", 299 | " \n", 300 | " original_spacing = image.GetSpacing()\n", 301 | " original_size = image.GetSize()\n", 302 | " new_size = [int(sz/float(shrink_factor) + 0.5) for sz in original_size]\n", 303 | " new_spacing = [((original_sz-1)*original_spc)/(new_sz-1) \n", 304 | " for original_sz, original_spc, new_sz in zip(original_size, original_spacing, new_size)]\n", 305 | " return sitk.Resample(smoothed_image, new_size, sitk.Transform(), \n", 306 | " sitk.sitkLinear, image.GetOrigin(),\n", 307 | " new_spacing, image.GetDirection(), 0.0, \n", 308 | " image.GetPixelID())\n", 309 | "\n", 310 | "\n", 311 | " \n", 312 | "def multiscale_demons(registration_algorithm,\n", 313 | " fixed_image, moving_image, initial_transform = None, \n", 314 | " shrink_factors=None, smoothing_sigmas=None):\n", 315 | " \"\"\"\n", 316 | " Run the given registration algorithm in a multiscale fashion. The original scale should not be given as input as the\n", 317 | " original images are implicitly incorporated as the base of the pyramid.\n", 318 | " Args:\n", 319 | " registration_algorithm: Any registration algorithm that has an Execute(fixed_image, moving_image, displacement_field_image)\n", 320 | " method.\n", 321 | " fixed_image: Resulting transformation maps points from this image's spatial domain to the moving image spatial domain.\n", 322 | " moving_image: Resulting transformation maps points from the fixed_image's spatial domain to this image's spatial domain.\n", 323 | " initial_transform: Any SimpleITK transform, used to initialize the displacement field.\n", 324 | " shrink_factors: Shrink factors relative to the original image's size.\n", 325 | " smoothing_sigmas: Amount of smoothing which is done prior to resmapling the image using the given shrink factor. These\n", 326 | " are in physical (image spacing) units.\n", 327 | " Returns: \n", 328 | " SimpleITK.DisplacementFieldTransform\n", 329 | " \"\"\"\n", 330 | " # Create image pyramid.\n", 331 | " fixed_images = [fixed_image]\n", 332 | " moving_images = [moving_image]\n", 333 | " if shrink_factors:\n", 334 | " for shrink_factor, smoothing_sigma in reversed(list(zip(shrink_factors, smoothing_sigmas))):\n", 335 | " fixed_images.append(smooth_and_resample(fixed_images[0], shrink_factor, smoothing_sigma))\n", 336 | " moving_images.append(smooth_and_resample(moving_images[0], shrink_factor, smoothing_sigma))\n", 337 | " \n", 338 | " # Create initial displacement field at lowest resolution. \n", 339 | " # Currently, the pixel type is required to be sitkVectorFloat64 because of a constraint imposed by the Demons filters.\n", 340 | " if initial_transform:\n", 341 | " initial_displacement_field = sitk.TransformToDisplacementField(initial_transform, \n", 342 | " sitk.sitkVectorFloat64,\n", 343 | " fixed_images[-1].GetSize(),\n", 344 | " fixed_images[-1].GetOrigin(),\n", 345 | " fixed_images[-1].GetSpacing(),\n", 346 | " fixed_images[-1].GetDirection())\n", 347 | " else:\n", 348 | " initial_displacement_field = sitk.Image(fixed_images[-1].GetWidth(), \n", 349 | " fixed_images[-1].GetHeight(),\n", 350 | " fixed_images[-1].GetDepth(),\n", 351 | " sitk.sitkVectorFloat64)\n", 352 | " initial_displacement_field.CopyInformation(fixed_images[-1])\n", 353 | " \n", 354 | " # Run the registration. \n", 355 | " initial_displacement_field = registration_algorithm.Execute(fixed_images[-1], \n", 356 | " moving_images[-1], \n", 357 | " initial_displacement_field)\n", 358 | " # Start at the top of the pyramid and work our way down. \n", 359 | " for f_image, m_image in reversed(list(zip(fixed_images[0:-1], moving_images[0:-1]))):\n", 360 | " initial_displacement_field = sitk.Resample (initial_displacement_field, f_image)\n", 361 | " initial_displacement_field = registration_algorithm.Execute(f_image, m_image, initial_displacement_field)\n", 362 | " return sitk.DisplacementFieldTransform(initial_displacement_field)" 363 | ] 364 | }, 365 | { 366 | "cell_type": "markdown", 367 | "metadata": {}, 368 | "source": [ 369 | "Now we will use our newly minted multiscale framework to perform registration with the Demons filters. Some things you can easily try out by editing the code below:\n", 370 | "1. Is there really a need for multiscale - just call the multiscale_demons method without the shrink_factors and smoothing_sigmas parameters.\n", 371 | "2. Which Demons filter should you use - configure the other filters and see if our selection is the best choice (accuracy/time)." 372 | ] 373 | }, 374 | { 375 | "cell_type": "code", 376 | "execution_count": null, 377 | "metadata": {}, 378 | "outputs": [], 379 | "source": [ 380 | "# Define a simple callback which allows us to monitor registration progress.\n", 381 | "def iteration_callback(filter):\n", 382 | " print('\\r{0}: {1:.2f}'.format(filter.GetElapsedIterations(), filter.GetMetric()), end='')\n", 383 | " \n", 384 | "# Select a Demons filter and configure it.\n", 385 | "demons_filter = sitk.FastSymmetricForcesDemonsRegistrationFilter()\n", 386 | "demons_filter.SetNumberOfIterations(20)\n", 387 | "# Regularization (update field - viscous, total field - elastic).\n", 388 | "demons_filter.SetSmoothDisplacementField(True)\n", 389 | "demons_filter.SetStandardDeviations(2.0)\n", 390 | "\n", 391 | "# Add our simple callback to the registration filter.\n", 392 | "demons_filter.AddCommand(sitk.sitkIterationEvent, lambda: iteration_callback(demons_filter))\n", 393 | "\n", 394 | "# Run the registration.\n", 395 | "tx = multiscale_demons(registration_algorithm=demons_filter, \n", 396 | " fixed_image = fixed_image, \n", 397 | " moving_image = moving_image,\n", 398 | " shrink_factors = [4,2],\n", 399 | " smoothing_sigmas = [8,4])\n", 400 | "\n", 401 | "# look at the final TREs.\n", 402 | "final_TRE = utilities.target_registration_errors(tx, fixed_points, moving_points, display_errors = True)\n", 403 | "\n", 404 | "print('Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}'.format(np.mean(final_TRE), \n", 405 | " np.std(final_TRE), \n", 406 | " np.max(final_TRE)))" 407 | ] 408 | }, 409 | { 410 | "cell_type": "markdown", 411 | "metadata": {}, 412 | "source": [ 413 | "## Quantitative Evaluation II (Segmentation)\n", 414 | "\n", 415 | "While the use of corresponding points to evaluate registration is the desired approach, it is often not applicable. In many cases there are only a few distinct points which can be localized in the two images, possibly too few to serve as a metric for evaluating the registration result across the whole region of interest. \n", 416 | "\n", 417 | "An alternative approach is to use segmentation. In this approach, we independently segment the structures of interest in the two images. After registration we transfer the segmentation from one image to the other and compare the original and registration induced segmentations.\n" 418 | ] 419 | }, 420 | { 421 | "cell_type": "code", 422 | "execution_count": null, 423 | "metadata": {}, 424 | "outputs": [], 425 | "source": [ 426 | "# Transfer the segmentation via the estimated transformation. \n", 427 | "# Nearest Neighbor interpolation so we don't introduce new labels.\n", 428 | "transformed_labels = sitk.Resample(masks[moving_index],\n", 429 | " fixed_image,\n", 430 | " tx, \n", 431 | " sitk.sitkNearestNeighbor,\n", 432 | " 0.0, \n", 433 | " masks[moving_index].GetPixelID())" 434 | ] 435 | }, 436 | { 437 | "cell_type": "markdown", 438 | "metadata": {}, 439 | "source": [ 440 | "We have now replaced the task of evaluating registration with that of evaluating segmentation." 441 | ] 442 | }, 443 | { 444 | "cell_type": "code", 445 | "execution_count": null, 446 | "metadata": {}, 447 | "outputs": [], 448 | "source": [ 449 | "# Often referred to as ground truth, but we prefer reference as the truth is never known.\n", 450 | "reference_segmentation = fixed_image_mask\n", 451 | "# Segmentations before and after registration\n", 452 | "segmentations = [moving_image_mask, transformed_labels == utilities.popi_lung_label]" 453 | ] 454 | }, 455 | { 456 | "cell_type": "code", 457 | "execution_count": null, 458 | "metadata": {}, 459 | "outputs": [], 460 | "source": [ 461 | "from enum import Enum\n", 462 | "\n", 463 | "# Use enumerations to represent the various evaluation measures\n", 464 | "class OverlapMeasures(Enum):\n", 465 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 466 | "\n", 467 | "class SurfaceDistanceMeasures(Enum):\n", 468 | " hausdorff_distance, mean_surface_distance, median_surface_distance, std_surface_distance, max_surface_distance = range(5)\n", 469 | " \n", 470 | "# Empty numpy arrays to hold the results \n", 471 | "overlap_results = np.zeros((len(segmentations),len(OverlapMeasures.__members__.items()))) \n", 472 | "surface_distance_results = np.zeros((len(segmentations),len(SurfaceDistanceMeasures.__members__.items()))) \n", 473 | "\n", 474 | "# Compute the evaluation criteria\n", 475 | "\n", 476 | "# Note that for the overlap measures filter, because we are dealing with a single label we \n", 477 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 478 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 479 | "\n", 480 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 481 | "\n", 482 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside \n", 483 | "# relationship, is irrelevant)\n", 484 | "label = 1\n", 485 | "reference_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False))\n", 486 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 487 | "\n", 488 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 489 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 490 | "statistics_image_filter.Execute(reference_surface)\n", 491 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum()) \n", 492 | "\n", 493 | "for i, seg in enumerate(segmentations):\n", 494 | " # Overlap measures\n", 495 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 496 | " overlap_results[i,OverlapMeasures.jaccard.value] = overlap_measures_filter.GetJaccardCoefficient()\n", 497 | " overlap_results[i,OverlapMeasures.dice.value] = overlap_measures_filter.GetDiceCoefficient()\n", 498 | " overlap_results[i,OverlapMeasures.volume_similarity.value] = overlap_measures_filter.GetVolumeSimilarity()\n", 499 | " overlap_results[i,OverlapMeasures.false_negative.value] = overlap_measures_filter.GetFalseNegativeError()\n", 500 | " overlap_results[i,OverlapMeasures.false_positive.value] = overlap_measures_filter.GetFalsePositiveError()\n", 501 | " # Hausdorff distance\n", 502 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 503 | " surface_distance_results[i,SurfaceDistanceMeasures.hausdorff_distance.value] = hausdorff_distance_filter.GetHausdorffDistance()\n", 504 | " # Symmetric surface distance measures\n", 505 | " segmented_distance_map = sitk.Abs(sitk.SignedMaurerDistanceMap(seg, squaredDistance=False))\n", 506 | " segmented_surface = sitk.LabelContour(seg)\n", 507 | " \n", 508 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 509 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 510 | " seg2ref_distance_map = reference_distance_map*sitk.Cast(segmented_surface, sitk.sitkFloat32)\n", 511 | " ref2seg_distance_map = segmented_distance_map*sitk.Cast(reference_surface, sitk.sitkFloat32)\n", 512 | " \n", 513 | " # Get the number of pixels in the segmented surface by counting all pixels that are 1.\n", 514 | " statistics_image_filter.Execute(segmented_surface)\n", 515 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 516 | " \n", 517 | " # Get all non-zero distances and then add zero distances if required.\n", 518 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 519 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr!=0]) \n", 520 | " seg2ref_distances = seg2ref_distances + \\\n", 521 | " list(np.zeros(num_segmented_surface_pixels - len(seg2ref_distances)))\n", 522 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 523 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr!=0]) \n", 524 | " ref2seg_distances = ref2seg_distances + \\\n", 525 | " list(np.zeros(num_reference_surface_pixels - len(ref2seg_distances)))\n", 526 | " \n", 527 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 528 | " \n", 529 | " surface_distance_results[i,SurfaceDistanceMeasures.mean_surface_distance.value] = np.mean(all_surface_distances)\n", 530 | " surface_distance_results[i,SurfaceDistanceMeasures.median_surface_distance.value] = np.median(all_surface_distances)\n", 531 | " surface_distance_results[i,SurfaceDistanceMeasures.std_surface_distance.value] = np.std(all_surface_distances)\n", 532 | " surface_distance_results[i,SurfaceDistanceMeasures.max_surface_distance.value] = np.max(all_surface_distances)\n", 533 | "\n", 534 | "import pandas as pd\n", 535 | "from IPython.display import display, HTML \n", 536 | "\n", 537 | "# Graft our results matrix into pandas data frames \n", 538 | "overlap_results_df = pd.DataFrame(data=overlap_results, index=[\"before registration\", \"after registration\"], \n", 539 | " columns=[name for name, _ in OverlapMeasures.__members__.items()]) \n", 540 | "surface_distance_results_df = pd.DataFrame(data=surface_distance_results, index=[\"before registration\", \"after registration\"], \n", 541 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()]) \n", 542 | "\n", 543 | "# Display the data as HTML tables and graphs\n", 544 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 545 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: '%.3f' % x)))\n", 546 | "overlap_results_df.plot(kind='bar', rot=1).legend(bbox_to_anchor=(1.6,0.9))\n", 547 | "surface_distance_results_df.plot(kind='bar', rot=1).legend(bbox_to_anchor=(1.6,0.9)); " 548 | ] 549 | }, 550 | { 551 | "cell_type": "markdown", 552 | "metadata": { 553 | "collapsed": true 554 | }, 555 | "source": [ 556 | "

Next »

" 557 | ] 558 | } 559 | ], 560 | "metadata": { 561 | "kernelspec": { 562 | "display_name": "Python 3", 563 | "language": "python", 564 | "name": "python3" 565 | }, 566 | "language_info": { 567 | "codemirror_mode": { 568 | "name": "ipython", 569 | "version": 3 570 | }, 571 | "file_extension": ".py", 572 | "mimetype": "text/x-python", 573 | "name": "python", 574 | "nbconvert_exporter": "python", 575 | "pygments_lexer": "ipython3", 576 | "version": "3.6.8" 577 | } 578 | }, 579 | "nbformat": 4, 580 | "nbformat_minor": 2 581 | } 582 | --------------------------------------------------------------------------------