├── .github ├── dependabot.yml └── workflows │ └── main.yml ├── .gitignore ├── 00_setup.ipynb ├── 01_spatial_transformations.ipynb ├── 02_images_and_resampling.ipynb ├── 03_trust_but_verify.ipynb ├── 04_data_augmentation.ipynb ├── 05_basic_registration.ipynb ├── 06_advanced_registration.ipynb ├── 07_registration_application.ipynb ├── 08_segmentation_and_shape_analysis.ipynb ├── 09_segmentation_evaluation.ipynb ├── 10_results_visualization.ipynb ├── LICENSE ├── README.md ├── binder ├── requirements.txt └── runtime.txt ├── characterize_data.py ├── data └── manifest.json ├── docs ├── index.html ├── mec2020.png ├── simpleitk.bib ├── simpleitkFundamentalConcepts.pptx ├── simpleitkHistoricalOverview.pptx └── simpleitkLogo.jpg ├── downloaddata.py ├── environment.yml ├── environment_dev.yml ├── figures ├── ITKv4RegistrationComponentsDiagram.svg ├── ImageOriginAndSpacing.png ├── hkaAngle.png ├── registrationFrameworkTransformations.svg └── resampling.svg ├── gui.py ├── output └── .gitignore ├── registration_gui.py ├── tests ├── additional_dictionary.txt ├── requirements_testing.txt └── test_notebooks.py └── utilities.py /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | - package-ecosystem: "github-actions" 4 | directory: "/" 5 | schedule: 6 | interval: "weekly" 7 | day: "sunday" 8 | -------------------------------------------------------------------------------- /.github/workflows/main.yml: -------------------------------------------------------------------------------- 1 | name: Notebook Testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | paths-ignore: 8 | - 'docs/**' 9 | - 'binder/**' 10 | - 'figures/**' 11 | pull_request: 12 | branches: 13 | - main 14 | paths-ignore: 15 | - 'docs/**' 16 | - 'binder/**' 17 | - 'figures/**' 18 | schedule: 19 | # run testing on the first of each month 5am ET / 9am UTC 20 | - cron: '0 9 1 * *' 21 | 22 | # Enable manual running of workflow, so we can force execution 23 | workflow_dispatch: 24 | 25 | jobs: 26 | lint: 27 | runs-on: ubuntu-latest 28 | steps: 29 | - uses: actions/checkout@v4 30 | - name: Set up Python 3.9 31 | uses: actions/setup-python@v5 32 | with: 33 | python-version: 3.9 34 | - name: Install and run black for notebooks 35 | run: | 36 | python -m pip install --upgrade pip 37 | python -m pip install black[jupyter] 38 | black --check --diff --verbose . 39 | test: 40 | needs: lint 41 | strategy: 42 | matrix: 43 | # not using macos-latest because pyenchant doesn't work with the new macos-14 arm64 44 | os: [ubuntu-latest, macos-13, windows-latest] 45 | inputs: ["00_ or 01_ or 02_ or 03_ or 04_ or 05_", "06_ or 07_ or 08_ or 09_ or 10_"] 46 | runs-on: ${{ matrix.os }} 47 | steps: 48 | - uses: actions/checkout@v4 49 | - uses: actions/cache@v4 50 | id: cache 51 | with: 52 | path: | 53 | data 54 | key: notebook-data-${{ hashFiles('data/manifest.json') }} 55 | restore-keys: | 56 | notebook-data-${{ hashFiles('data/manifest.json') }} 57 | - name: Set up Python 3.9 58 | uses: actions/setup-python@v5 59 | with: 60 | python-version: 3.9 61 | - name: Install enchant on non windows systems 62 | shell: bash 63 | run: | 64 | if [ "$RUNNER_OS" == "Linux" ]; then 65 | sudo apt-get update 66 | sudo apt-get install enchant-2 67 | elif [ "$RUNNER_OS" == "macOS" ]; then 68 | brew update 69 | brew install enchant 70 | fi 71 | # on windows, the pyenchant package includes enchant 72 | - name: Install dependencies 73 | run: | 74 | python -m pip install --upgrade pip 75 | python -m pip install -r tests/requirements_testing.txt 76 | jupyter nbextension enable --py --sys-prefix widgetsnbextension 77 | - name: Download data 78 | if: steps.cache.outputs.cache-hit != 'true' 79 | run: python downloaddata.py data/ data/manifest.json 80 | - name: run the test 81 | env: 82 | SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT: 1 83 | run: | 84 | pytest -v --tb=short -k "${{matrix.inputs}}" tests/test_notebooks.py::Test_notebooks::test_python_notebook 85 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | Data 3 | build 4 | *.o 5 | \#*\# 6 | *\# 7 | *~ 8 | *.pyc 9 | *.orig 10 | .ipynb_checkpoints/ 11 | .*.swp 12 | .mha 13 | .jpg 14 | -------------------------------------------------------------------------------- /00_setup.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

SimpleITK: image analysis for all levels of programming expertise

\n", 8 | "\n", 9 | "## Newcomers to Jupyter notebooks:\n", 10 | "1. We use two types of cells, code and markdown.\n", 11 | "2. To run a code cell, select it (mouse or arrow key so that it is highlighted) and then press shift+enter which also moves focus to the next cell or ctrl+enter which doesn't.\n", 12 | "3. Closing the browser window does not close the Jupyter server. To close the server, go to the terminal where you ran it and press ctrl+c twice.\n", 13 | "\n", 14 | "For additional details see the [Jupyter project documentation](https://jupyter.org/documentation) on Jupyter Notebook or JupyterLab.\n", 15 | "\n", 16 | "## Convenience\n", 17 | "By default the contents of the Jupyter notebooks do not occupy the full browser window width. To take advantage of the full window width you can either configure each notebook independently by adding the following into a code cell:\n", 18 | "```\n", 19 | "from IPython.core.display import display, HTML\n", 20 | "display(HTML(\"\"))\n", 21 | "```\n", 22 | "Or apply this configuration to all notebooks by adding the following to\n", 23 | "the custom.css jupyter configuration file:\n", 24 | "```\n", 25 | ".container { width:100% !important; }\n", 26 | "```\n", 27 | "On OSX/Linux this file is found in `~/.jupyter/custom/custom.css` on windows it is\n", 28 | "found in `C:\\Users\\[your_user_name]\\.jupyter\\custom\\custom.css`.\n", 29 | "\n", 30 | "## Environment Setup for Course\n", 31 | "\n", 32 | "This notebook should be run prior to arriving at the course venue, as it requires network connectivity." 33 | ] 34 | }, 35 | { 36 | "cell_type": "markdown", 37 | "metadata": {}, 38 | "source": [ 39 | "First, lets check that you have the SimpleITK version which you expect." 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "import SimpleITK as sitk\n", 49 | "from downloaddata import fetch_data, fetch_data_all\n", 50 | "\n", 51 | "from ipywidgets import interact\n", 52 | "\n", 53 | "print(sitk.Version())" 54 | ] 55 | }, 56 | { 57 | "cell_type": "markdown", 58 | "metadata": {}, 59 | "source": [ 60 | "Next, we check that the auxiliary program(s) are correctly installed in your environment.\n", 61 | "\n", 62 | "We expect that you have an external image viewer installed. The default viewer is Fiji. If you have another viewer (i.e. ITK-SNAP or 3D Slicer) you will need to set an environment variable to point to it. This is done using an environment variable which can also be set from within a notebook as shown below." 63 | ] 64 | }, 65 | { 66 | "cell_type": "code", 67 | "execution_count": null, 68 | "metadata": { 69 | "simpleitk_error_allowed": "Exception thrown in SimpleITK ImageViewer_Execute:" 70 | }, 71 | "outputs": [], 72 | "source": [ 73 | "# Retrieve an image from the network, read it and display using the external viewer\n", 74 | "image_viewer = sitk.ImageViewer()\n", 75 | "# Uncomment the line below to change the default external viewer to your viewer of choice and test that it works.\n", 76 | "# image_viewer.SetApplication('/Applications/ITK-SNAP.app/Contents/MacOS/ITK-SNAP')\n", 77 | "\n", 78 | "image_viewer.Execute(sitk.ReadImage(fetch_data(\"SimpleITK.jpg\")))" 79 | ] 80 | }, 81 | { 82 | "cell_type": "markdown", 83 | "metadata": {}, 84 | "source": [ 85 | "Now we check that the ipywidgets will display correctly. When you run the following cell you should see a slider.\n", 86 | "\n", 87 | "If you don't see a slider please shutdown the Jupyter server, at the Anaconda command line prompt press Control-c twice, and then run the following command:\n", 88 | "\n", 89 | "```jupyter nbextension enable --py --sys-prefix widgetsnbextension```" 90 | ] 91 | }, 92 | { 93 | "cell_type": "code", 94 | "execution_count": null, 95 | "metadata": {}, 96 | "outputs": [], 97 | "source": [ 98 | "interact(lambda x: x, x=(0, 10));" 99 | ] 100 | }, 101 | { 102 | "cell_type": "markdown", 103 | "metadata": {}, 104 | "source": [ 105 | "Finally, we download all of the data used in the notebooks in advance. This step is necessary as we will be running the notebooks without network connectivity.\n", 106 | "\n", 107 | "This may take a couple of minutes depending on your network." 108 | ] 109 | }, 110 | { 111 | "cell_type": "code", 112 | "execution_count": null, 113 | "metadata": {}, 114 | "outputs": [], 115 | "source": [ 116 | "import os\n", 117 | "\n", 118 | "fetch_data_all(\"data\", os.path.join(\"data\", \"manifest.json\"))" 119 | ] 120 | }, 121 | { 122 | "cell_type": "markdown", 123 | "metadata": {}, 124 | "source": [ 125 | "

Next »

" 126 | ] 127 | } 128 | ], 129 | "metadata": { 130 | "kernelspec": { 131 | "display_name": "Python 3 (ipykernel)", 132 | "language": "python", 133 | "name": "python3" 134 | }, 135 | "language_info": { 136 | "codemirror_mode": { 137 | "name": "ipython", 138 | "version": 3 139 | }, 140 | "file_extension": ".py", 141 | "mimetype": "text/x-python", 142 | "name": "python", 143 | "nbconvert_exporter": "python", 144 | "pygments_lexer": "ipython3", 145 | "version": "3.9.13" 146 | } 147 | }, 148 | "nbformat": 4, 149 | "nbformat_minor": 2 150 | } 151 | -------------------------------------------------------------------------------- /05_basic_registration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Basic Registration

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "\n", 12 | "1. Creating an instance of the registration framework requires selection of the following components:\n", 13 | " * Optimizer.\n", 14 | " * Similarity metric.\n", 15 | " * Interpolator.\n", 16 | "2. The registration framework only supports images with sitkFloat32 and sitkFloat64 pixel types (use the SimpleITK Cast() function if your image's pixel type is something else).\n", 17 | "\n", 18 | "3. Successful registration is highly dependent on initialization. In general you can:\n", 19 | " * Use auxiliary information or user interaction to obtain an initial transformation (avoid resampling).\n", 20 | " * Center the images using the CenteredTransformInitializer.\n", 21 | " * Coarsely sample the parameter space using the Exhaustive Optimizer to obtain one or more initial transformation estimates.\n", 22 | " * Manually initialize, via direct manipulation of transformation parameters and visualization or localization of corresponding points in the two images and then use the LandmarkBasedTransformInitializer." 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## Registration Components \n", 30 | "\n", 31 | "

\n", 32 | "\n", 33 | "There are many options for creating an instance of the registration framework, all of which are configured in SimpleITK via methods of the ImageRegistrationMethod class. This class encapsulates many of the components available in ITK for constructing a registration instance.\n", 34 | "\n", 35 | "Currently, the available choices from the following groups of ITK components are:\n", 36 | "\n", 37 | "### Optimizers\n", 38 | "\n", 39 | "The SimpleITK registration framework supports several optimizer types via the SetOptimizerAsX() methods, these include:\n", 40 | "\n", 41 | "\n", 79 | "\n", 80 | " \n", 81 | "### Similarity metrics\n", 82 | "\n", 83 | "The SimpleITK registration framework supports several metric types via the SetMetricAsX() methods, these include:\n", 84 | "\n", 85 | "\n", 105 | "\n", 106 | "\n", 107 | "### Interpolators\n", 108 | "\n", 109 | "The SimpleITK registration framework supports several interpolators via the SetInterpolator() method, which receives one of\n", 110 | "the following enumerations:\n", 111 | "" 122 | ] 123 | }, 124 | { 125 | "cell_type": "code", 126 | "execution_count": null, 127 | "metadata": {}, 128 | "outputs": [], 129 | "source": [ 130 | "import SimpleITK as sitk\n", 131 | "from downloaddata import fetch_data as fdata\n", 132 | "\n", 133 | "%matplotlib notebook\n", 134 | "import gui\n", 135 | "import registration_gui as rgui\n", 136 | "\n", 137 | "import numpy as np\n", 138 | "import os\n", 139 | "\n", 140 | "OUTPUT_DIR = \"output\"" 141 | ] 142 | }, 143 | { 144 | "cell_type": "markdown", 145 | "metadata": {}, 146 | "source": [ 147 | "## Read images\n", 148 | "\n", 149 | "We first read the images, specifying the pixel type that is required for registration (Float32 or Float64) and look at them. In this notebook we use a CT and MR image from the same patient. These are part of the training data from the Retrospective Image Registration Evaluation (RIRE) project." 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "fixed_image = sitk.ReadImage(fdata(\"training_001_ct.mha\"), sitk.sitkFloat32)\n", 159 | "moving_image = sitk.ReadImage(fdata(\"training_001_mr_T1.mha\"), sitk.sitkFloat32)\n", 160 | "\n", 161 | "ct_window_level = [835, 162]\n", 162 | "mr_window_level = [1036, 520]\n", 163 | "\n", 164 | "gui.MultiImageDisplay(\n", 165 | " image_list=[fixed_image, moving_image],\n", 166 | " title_list=[\"fixed\", \"moving\"],\n", 167 | " figure_size=(8, 4),\n", 168 | " window_level_list=[ct_window_level, mr_window_level],\n", 169 | ");" 170 | ] 171 | }, 172 | { 173 | "cell_type": "markdown", 174 | "metadata": {}, 175 | "source": [ 176 | "## Classic Registration\n", 177 | "\n", 178 | "Estimate a 3D rigid transformation between images of different modalities. \n", 179 | "\n", 180 | "We have made the following choices with respect to initialization and registration component settings:\n", 181 | "\n", 182 | "\n", 201 | "\n", 202 | "We initialize registration by aligning the centers of the two volumes. To qualitatively evaluate the result we use a linked cursor approach, click on one image and the corresponding point is added to the other image." 203 | ] 204 | }, 205 | { 206 | "cell_type": "code", 207 | "execution_count": null, 208 | "metadata": {}, 209 | "outputs": [], 210 | "source": [ 211 | "initial_transform = sitk.CenteredTransformInitializer(\n", 212 | " fixed_image,\n", 213 | " moving_image,\n", 214 | " sitk.Euler3DTransform(),\n", 215 | " sitk.CenteredTransformInitializerFilter.GEOMETRY,\n", 216 | ")\n", 217 | "\n", 218 | "gui.RegistrationPointDataAquisition(\n", 219 | " fixed_image,\n", 220 | " moving_image,\n", 221 | " figure_size=(8, 4),\n", 222 | " known_transformation=initial_transform,\n", 223 | " fixed_window_level=ct_window_level,\n", 224 | " moving_window_level=mr_window_level,\n", 225 | ");" 226 | ] 227 | }, 228 | { 229 | "cell_type": "markdown", 230 | "metadata": {}, 231 | "source": [ 232 | "Run the next cell three times:\n", 233 | "1. As is.\n", 234 | "2. Uncomment the automated optimizer scale setting so that a change in rotation (radians) has a similar effect to a change in translation (mm).\n", 235 | "3. Uncomment the multi-resolution settings." 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": null, 241 | "metadata": {}, 242 | "outputs": [], 243 | "source": [ 244 | "registration_method = sitk.ImageRegistrationMethod()\n", 245 | "\n", 246 | "# Similarity metric settings.\n", 247 | "registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)\n", 248 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 249 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 250 | "\n", 251 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 252 | "\n", 253 | "# Optimizer settings.\n", 254 | "registration_method.SetOptimizerAsGradientDescent(\n", 255 | " learningRate=1.0,\n", 256 | " numberOfIterations=100,\n", 257 | " convergenceMinimumValue=1e-6,\n", 258 | " convergenceWindowSize=10,\n", 259 | ")\n", 260 | "# registration_method.SetOptimizerScalesFromPhysicalShift()\n", 261 | "\n", 262 | "# Setup for the multi-resolution framework.\n", 263 | "# registration_method.SetShrinkFactorsPerLevel(shrinkFactors = [4,2,1])\n", 264 | "# registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2,1,0])\n", 265 | "# registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n", 266 | "\n", 267 | "# Don't optimize in-place, we would possibly like to run this cell multiple times.\n", 268 | "registration_method.SetInitialTransform(initial_transform, inPlace=False)\n", 269 | "\n", 270 | "# Connect all of the observers so that we can perform plotting during registration.\n", 271 | "registration_method.AddCommand(sitk.sitkStartEvent, rgui.start_plot)\n", 272 | "registration_method.AddCommand(sitk.sitkEndEvent, rgui.end_plot)\n", 273 | "registration_method.AddCommand(\n", 274 | " sitk.sitkMultiResolutionIterationEvent, rgui.update_multires_iterations\n", 275 | ")\n", 276 | "registration_method.AddCommand(\n", 277 | " sitk.sitkIterationEvent, lambda: rgui.plot_values(registration_method)\n", 278 | ")\n", 279 | "\n", 280 | "final_transform = registration_method.Execute(fixed_image, moving_image)\n", 281 | "\n", 282 | "# Always check the reason optimization terminated.\n", 283 | "print(\"Final metric value: {0}\".format(registration_method.GetMetricValue()))\n", 284 | "print(\n", 285 | " \"Optimizer's stopping condition, {0}\".format(\n", 286 | " registration_method.GetOptimizerStopConditionDescription()\n", 287 | " )\n", 288 | ")" 289 | ] 290 | }, 291 | { 292 | "cell_type": "markdown", 293 | "metadata": {}, 294 | "source": [ 295 | "Qualitatively evaluate the result using a linked cursor approach (visual evaluation):" 296 | ] 297 | }, 298 | { 299 | "cell_type": "code", 300 | "execution_count": null, 301 | "metadata": {}, 302 | "outputs": [], 303 | "source": [ 304 | "gui.RegistrationPointDataAquisition(\n", 305 | " fixed_image,\n", 306 | " moving_image,\n", 307 | " figure_size=(8, 4),\n", 308 | " known_transformation=final_transform,\n", 309 | " fixed_window_level=ct_window_level,\n", 310 | " moving_window_level=mr_window_level,\n", 311 | ");" 312 | ] 313 | }, 314 | { 315 | "cell_type": "markdown", 316 | "metadata": {}, 317 | "source": [ 318 | "If we are satisfied with the results, save them to file." 319 | ] 320 | }, 321 | { 322 | "cell_type": "code", 323 | "execution_count": null, 324 | "metadata": {}, 325 | "outputs": [], 326 | "source": [ 327 | "moving_resampled = sitk.Resample(\n", 328 | " moving_image,\n", 329 | " fixed_image,\n", 330 | " final_transform,\n", 331 | " sitk.sitkLinear,\n", 332 | " 0.0,\n", 333 | " moving_image.GetPixelID(),\n", 334 | ")\n", 335 | "sitk.WriteImage(\n", 336 | " moving_resampled, os.path.join(OUTPUT_DIR, \"RIRE_training_001_mr_T1_resampled.mha\")\n", 337 | ")\n", 338 | "sitk.WriteTransform(\n", 339 | " final_transform, os.path.join(OUTPUT_DIR, \"RIRE_training_001_CT_2_mr_T1.tfm\")\n", 340 | ")" 341 | ] 342 | }, 343 | { 344 | "cell_type": "markdown", 345 | "metadata": {}, 346 | "source": [ 347 | "## ITKv4 Coordinate Systems\n", 348 | "\n", 349 | "Unlike the classical registration approach where the fixed and moving images are treated differently, the ITKv4 registration framework allows you to treat both images in the same manner. This is achieved by introducing a third coordinate system, the virtual image domain.\n", 350 | "\n", 351 | "

\n", 352 | "\n", 353 | "Thus, the ITK v4 registration framework deals with three transformations:\n", 354 | "\n", 365 | "\n", 366 | "The transformation that maps points from the fixed to moving image domains is thus: $^M\\mathbf{p} = T_m(T_{opt}(T_f^{-1}(^F\\mathbf{p})))$\n", 367 | "\n", 368 | "We now modify the previous example to use $T_{opt}$ and $T_m$." 369 | ] 370 | }, 371 | { 372 | "cell_type": "code", 373 | "execution_count": null, 374 | "metadata": {}, 375 | "outputs": [], 376 | "source": [ 377 | "registration_method = sitk.ImageRegistrationMethod()\n", 378 | "\n", 379 | "# Similarity metric settings.\n", 380 | "registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)\n", 381 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 382 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 383 | "\n", 384 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 385 | "\n", 386 | "# Optimizer settings.\n", 387 | "registration_method.SetOptimizerAsGradientDescent(\n", 388 | " learningRate=1.0,\n", 389 | " numberOfIterations=100,\n", 390 | " convergenceMinimumValue=1e-6,\n", 391 | " convergenceWindowSize=10,\n", 392 | ")\n", 393 | "registration_method.SetOptimizerScalesFromPhysicalShift()\n", 394 | "\n", 395 | "# Setup for the multi-resolution framework.\n", 396 | "registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])\n", 397 | "registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])\n", 398 | "registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n", 399 | "\n", 400 | "# Set the initial moving and optimized transforms.\n", 401 | "optimized_transform = sitk.Euler3DTransform()\n", 402 | "registration_method.SetMovingInitialTransform(initial_transform)\n", 403 | "registration_method.SetInitialTransform(optimized_transform, inPlace=False)\n", 404 | "\n", 405 | "# Connect all of the observers so that we can perform plotting during registration.\n", 406 | "registration_method.AddCommand(sitk.sitkStartEvent, rgui.start_plot)\n", 407 | "registration_method.AddCommand(sitk.sitkEndEvent, rgui.end_plot)\n", 408 | "registration_method.AddCommand(\n", 409 | " sitk.sitkMultiResolutionIterationEvent, rgui.update_multires_iterations\n", 410 | ")\n", 411 | "registration_method.AddCommand(\n", 412 | " sitk.sitkIterationEvent, lambda: rgui.plot_values(registration_method)\n", 413 | ")\n", 414 | "\n", 415 | "# Need to compose the transformations after registration.\n", 416 | "final_transform_v4 = sitk.CompositeTransform(\n", 417 | " [registration_method.Execute(fixed_image, moving_image), initial_transform]\n", 418 | ")\n", 419 | "\n", 420 | "# Always check the reason optimization terminated.\n", 421 | "print(\"Final metric value: {0}\".format(registration_method.GetMetricValue()))\n", 422 | "print(\n", 423 | " \"Optimizer's stopping condition, {0}\".format(\n", 424 | " registration_method.GetOptimizerStopConditionDescription()\n", 425 | " )\n", 426 | ")" 427 | ] 428 | }, 429 | { 430 | "cell_type": "code", 431 | "execution_count": null, 432 | "metadata": {}, 433 | "outputs": [], 434 | "source": [ 435 | "gui.RegistrationPointDataAquisition(\n", 436 | " fixed_image,\n", 437 | " moving_image,\n", 438 | " figure_size=(8, 4),\n", 439 | " known_transformation=final_transform_v4,\n", 440 | " fixed_window_level=ct_window_level,\n", 441 | " moving_window_level=mr_window_level,\n", 442 | ");" 443 | ] 444 | }, 445 | { 446 | "cell_type": "markdown", 447 | "metadata": { 448 | "collapsed": true 449 | }, 450 | "source": [ 451 | "## Initialization\n", 452 | "\n", 453 | "Initialization effects both the runtime and convergence to the correct minimum. Ideally our transformation is initialized close to the correct solution ensuring convergence in a timely manner. Problem specific initialization will often yield better results than the more generic solutions we show below. As a rule of thumb, use as much prior information (external to the image content) as you can to initialize your registration task.\n", 454 | "\n", 455 | "Common initializations in the generic setting:\n", 456 | "1. Do nothing (a.k.a. hope/unique setting) - initialize using the identity transformation.\n", 457 | "2. CenteredTransformInitializer (GEOMETRY or MOMENTS) - translation based initialization, align the centers of the images or their centers of mass (intensity based).\n", 458 | "3. Use the exhaustive optimizer as a first step - never underestimate brute force.\n", 459 | "4. Manual initialization - allow an operator to control parameter settings using a GUI with visual feedback or identify multiple corresponding points in the two images. \n", 460 | "\n", 461 | "\n", 462 | "We start by loading our data, CT and MR scans of the CIRS (Norfolk, VA, USA) abdominal phantom." 463 | ] 464 | }, 465 | { 466 | "cell_type": "code", 467 | "execution_count": null, 468 | "metadata": {}, 469 | "outputs": [], 470 | "source": [ 471 | "data_directory = os.path.dirname(fdata(\"CIRS057A_MR_CT_DICOM/readme.txt\"))\n", 472 | "\n", 473 | "ct_window_level = [1727, -320]\n", 474 | "mr_window_level = [355, 178]\n", 475 | "\n", 476 | "fixed_series_ID = \"1.2.840.113619.2.290.3.3233817346.783.1399004564.515\"\n", 477 | "moving_series_ID = \"1.3.12.2.1107.5.2.18.41548.30000014030519285935000000933\"\n", 478 | "\n", 479 | "fixed_series_filenames = sitk.ImageSeriesReader_GetGDCMSeriesFileNames(\n", 480 | " data_directory, fixed_series_ID\n", 481 | ")\n", 482 | "moving_series_filenames = sitk.ImageSeriesReader_GetGDCMSeriesFileNames(\n", 483 | " data_directory, moving_series_ID\n", 484 | ")\n", 485 | "\n", 486 | "fixed_image = sitk.ReadImage(fixed_series_filenames, sitk.sitkFloat32)\n", 487 | "moving_image = sitk.ReadImage(moving_series_filenames, sitk.sitkFloat32)" 488 | ] 489 | }, 490 | { 491 | "cell_type": "markdown", 492 | "metadata": {}, 493 | "source": [ 494 | "### Identity transform initialization" 495 | ] 496 | }, 497 | { 498 | "cell_type": "code", 499 | "execution_count": null, 500 | "metadata": {}, 501 | "outputs": [], 502 | "source": [ 503 | "initial_transform = sitk.Transform()\n", 504 | "gui.RegistrationPointDataAquisition(\n", 505 | " fixed_image,\n", 506 | " moving_image,\n", 507 | " figure_size=(8, 4),\n", 508 | " known_transformation=initial_transform,\n", 509 | " fixed_window_level=ct_window_level,\n", 510 | " moving_window_level=mr_window_level,\n", 511 | ");" 512 | ] 513 | }, 514 | { 515 | "cell_type": "markdown", 516 | "metadata": {}, 517 | "source": [ 518 | "When working with clinical images, the DICOM tags define the orientation and position of the anatomy in the volume. The tags of interest are:\n", 519 | "\n", 535 | "\n", 536 | "SimpleITK/ITK takes this information into account when loading DICOM images. \n", 537 | "\n", 538 | "But we are working with DICOM images, so why aren't the images oriented correctly using the identity transformation?\n", 539 | "\n", 540 | "Well, the patient position in the scanner is manually entered by the technician meaning that errors may occur, though rarely. For our data, a phantom, it is unclear which side is the \"head\" and which is the \"feet\" so the technicians entered reasonable values for each scan. " 541 | ] 542 | }, 543 | { 544 | "cell_type": "code", 545 | "execution_count": null, 546 | "metadata": {}, 547 | "outputs": [], 548 | "source": [ 549 | "reader = sitk.ImageFileReader()\n", 550 | "reader.SetFileName(fixed_series_filenames[0])\n", 551 | "reader.ReadImageInformation()\n", 552 | "print(\n", 553 | " \"Patient name: \"\n", 554 | " + reader.GetMetaData(\"0010|0010\")\n", 555 | " + \", Patient position:\"\n", 556 | " + reader.GetMetaData(\"0018|5100\")\n", 557 | ")\n", 558 | "reader.SetFileName(moving_series_filenames[0])\n", 559 | "reader.ReadImageInformation()\n", 560 | "print(\n", 561 | " \"Patient name: \"\n", 562 | " + reader.GetMetaData(\"0010|0010\")\n", 563 | " + \", Patient position:\"\n", 564 | " + reader.GetMetaData(\"0018|5100\")\n", 565 | ")" 566 | ] 567 | }, 568 | { 569 | "cell_type": "markdown", 570 | "metadata": {}, 571 | "source": [ 572 | "### CenteredTransformInitializer initialization \n", 573 | "Compare GEOMETRY and MOMENTS based approaches:" 574 | ] 575 | }, 576 | { 577 | "cell_type": "code", 578 | "execution_count": null, 579 | "metadata": {}, 580 | "outputs": [], 581 | "source": [ 582 | "initial_transform = sitk.CenteredTransformInitializer(\n", 583 | " fixed_image,\n", 584 | " moving_image,\n", 585 | " sitk.Euler3DTransform(),\n", 586 | " sitk.CenteredTransformInitializerFilter.GEOMETRY,\n", 587 | ")\n", 588 | "gui.RegistrationPointDataAquisition(\n", 589 | " fixed_image,\n", 590 | " moving_image,\n", 591 | " figure_size=(8, 4),\n", 592 | " known_transformation=initial_transform,\n", 593 | " fixed_window_level=ct_window_level,\n", 594 | " moving_window_level=mr_window_level,\n", 595 | ");" 596 | ] 597 | }, 598 | { 599 | "cell_type": "markdown", 600 | "metadata": {}, 601 | "source": [ 602 | "### Exhaustive optimizer initialization\n", 603 | "\n", 604 | "The following initialization approach is a combination of using prior knowledge and the exhaustive optimizer. We know that the scans are acquired with the \"patient\" either supine (on their back) or prone (on their stomach) and that the scan direction (head-to-feet or feet-to-head) is along the images' z axis. \n", 605 | "We use the CenteredTransformInitializer to initialize the translation and the exhaustive optimizer to obtain an initial rigid transformation.\n", 606 | "\n", 607 | "The exhaustive optimizer evaluates the similarity metric on a grid in parameter space centered on the parameters of the initial transform. This grid is defined using three elements:\n", 608 | "1. numberOfSteps.\n", 609 | "2. stepLength.\n", 610 | "3. optimizer scales.\n", 611 | "\n", 612 | "The similarity metric is evaluated on the resulting parameter grid:\n", 613 | "initial_parameters ± numberOfSteps × stepLength × optimizerScales\n", 614 | "\n", 615 | "***Example***:\n", 616 | "1. numberOfSteps=[1,0,2,0,0,0]\n", 617 | "2. stepLength = np.pi\n", 618 | "3. optimizer scales = [1,1,0.5,1,1,1]\n", 619 | "\n", 620 | "Will perform 15 metric evaluations ($\\displaystyle\\prod_i (2*numberOfSteps[i] + 1)$).\n", 621 | "\n", 622 | "The parameter values for the second parameter and the last three parameters are the initial parameter values. The parameter values for the first parameter are $v_{init}-\\pi, v_{init}, v_{init}+\\pi$ and the parameter values for the third parameter are $v_{init}-\\pi, v_{init}-\\pi/2, v_{init}, v_{init}+\\pi/2, v_{init}+\\pi$.\n", 623 | "\n", 624 | "The transformation corresponding to the lowest similarity metric is returned." 625 | ] 626 | }, 627 | { 628 | "cell_type": "code", 629 | "execution_count": null, 630 | "metadata": {}, 631 | "outputs": [], 632 | "source": [ 633 | "initial_transform = sitk.CenteredTransformInitializer(\n", 634 | " fixed_image,\n", 635 | " moving_image,\n", 636 | " sitk.Euler3DTransform(),\n", 637 | " sitk.CenteredTransformInitializerFilter.MOMENTS,\n", 638 | ")\n", 639 | "registration_method = sitk.ImageRegistrationMethod()\n", 640 | "registration_method.SetMetricAsMattesMutualInformation(numberOfHistogramBins=50)\n", 641 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 642 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 643 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 644 | "# The order of parameters for the Euler3DTransform is [angle_x, angle_y, angle_z, t_x, t_y, t_z]. The parameter\n", 645 | "# sampling grid is centered on the initial_transform parameter values, that are all zero for the rotations. Given\n", 646 | "# the number of steps, their length and optimizer scales we have:\n", 647 | "# angle_x = 0\n", 648 | "# angle_y = -pi, 0, pi\n", 649 | "# angle_z = -pi, 0, pi\n", 650 | "registration_method.SetOptimizerAsExhaustive(\n", 651 | " numberOfSteps=[0, 1, 1, 0, 0, 0], stepLength=np.pi\n", 652 | ")\n", 653 | "registration_method.SetOptimizerScales([1, 1, 1, 1, 1, 1])\n", 654 | "\n", 655 | "# Perform the registration in-place so that the initial_transform is modified.\n", 656 | "registration_method.SetInitialTransform(initial_transform, inPlace=True)\n", 657 | "registration_method.Execute(fixed_image, moving_image)\n", 658 | "\n", 659 | "gui.RegistrationPointDataAquisition(\n", 660 | " fixed_image,\n", 661 | " moving_image,\n", 662 | " figure_size=(8, 4),\n", 663 | " known_transformation=initial_transform,\n", 664 | " fixed_window_level=ct_window_level,\n", 665 | " moving_window_level=mr_window_level,\n", 666 | ");" 667 | ] 668 | }, 669 | { 670 | "cell_type": "markdown", 671 | "metadata": {}, 672 | "source": [ 673 | "#### Manual initialization\n", 674 | "\n", 675 | "When all else fails, a human in the loop will almost always be able to robustly initialize the registration.\n", 676 | "\n", 677 | "In the example below we identify corresponding points to compute an initial rigid transformation. \n", 678 | "\n", 679 | "**Note**: There is no correspondence between the fiducial markers on the phantom." 680 | ] 681 | }, 682 | { 683 | "cell_type": "code", 684 | "execution_count": null, 685 | "metadata": {}, 686 | "outputs": [], 687 | "source": [ 688 | "point_acquisition_interface = gui.RegistrationPointDataAquisition(\n", 689 | " fixed_image,\n", 690 | " moving_image,\n", 691 | " figure_size=(8, 4),\n", 692 | " fixed_window_level=ct_window_level,\n", 693 | " moving_window_level=mr_window_level,\n", 694 | ");" 695 | ] 696 | }, 697 | { 698 | "cell_type": "code", 699 | "execution_count": null, 700 | "metadata": {}, 701 | "outputs": [], 702 | "source": [ 703 | "# Get the manually specified points and compute the transformation.\n", 704 | "\n", 705 | "fixed_image_points, moving_image_points = point_acquisition_interface.get_points()\n", 706 | "\n", 707 | "# Previously localized points (here so that the testing passes):\n", 708 | "fixed_image_points = [\n", 709 | " (24.062587103074605, 14.594981536981521, -58.75),\n", 710 | " (6.178716135332678, 53.93949766601378, -58.75),\n", 711 | " (74.14383149714774, -69.04462737237648, -76.25),\n", 712 | " (109.74899278747029, -14.905272533666817, -76.25),\n", 713 | "]\n", 714 | "moving_image_points = [\n", 715 | " (4.358707846364581, 60.46357110706131, -71.53120422363281),\n", 716 | " (24.09010295252645, 98.21840981673873, -71.53120422363281),\n", 717 | " (-52.11888008581127, -26.57984635768439, -58.53120422363281),\n", 718 | " (-87.46150681392184, 28.73904765153219, -58.53120422363281),\n", 719 | "]\n", 720 | "\n", 721 | "fixed_image_points_flat = [c for p in fixed_image_points for c in p]\n", 722 | "moving_image_points_flat = [c for p in moving_image_points for c in p]\n", 723 | "initial_transformation = sitk.LandmarkBasedTransformInitializer(\n", 724 | " sitk.VersorRigid3DTransform(), fixed_image_points_flat, moving_image_points_flat\n", 725 | ")\n", 726 | "gui.RegistrationPointDataAquisition(\n", 727 | " fixed_image,\n", 728 | " moving_image,\n", 729 | " figure_size=(8, 4),\n", 730 | " known_transformation=initial_transform,\n", 731 | " fixed_window_level=ct_window_level,\n", 732 | " moving_window_level=mr_window_level,\n", 733 | ");" 734 | ] 735 | }, 736 | { 737 | "cell_type": "markdown", 738 | "metadata": {}, 739 | "source": [ 740 | "

Next »

" 741 | ] 742 | } 743 | ], 744 | "metadata": { 745 | "kernelspec": { 746 | "display_name": "Python 3 (ipykernel)", 747 | "language": "python", 748 | "name": "python3" 749 | }, 750 | "language_info": { 751 | "codemirror_mode": { 752 | "name": "ipython", 753 | "version": 3 754 | }, 755 | "file_extension": ".py", 756 | "mimetype": "text/x-python", 757 | "name": "python", 758 | "nbconvert_exporter": "python", 759 | "pygments_lexer": "ipython3", 760 | "version": "3.9.13" 761 | } 762 | }, 763 | "nbformat": 4, 764 | "nbformat_minor": 2 765 | } 766 | -------------------------------------------------------------------------------- /06_advanced_registration.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Advanced Registration

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK provides two flavors of non-rigid registration:\n", 12 | " * Free Form Deformation, BSpline based, and Demons using the ITKv4 registration framework.\n", 13 | " * A set of Demons filters that are independent of the registration framework (`DemonsRegistrationFilter, DiffeomorphicDemonsRegistrationFilter, FastSymmetricForcesDemonsRegistrationFilter, SymmetricForcesDemonsRegistrationFilter`).\n", 14 | "2. Registration evaluation:\n", 15 | " * Registration accuracy, the quantity of interest is the Target Registration Error (TRE).\n", 16 | " * TRE is spatially variant.\n", 17 | " * Surrogate metrics for evaluating registration accuracy such as segmentation overlaps are relevant, but are potentially deficient.\n", 18 | " * Registration time.\n", 19 | " * Acceptable values for TRE and runtime are context dependent." 20 | ] 21 | }, 22 | { 23 | "cell_type": "code", 24 | "execution_count": null, 25 | "metadata": {}, 26 | "outputs": [], 27 | "source": [ 28 | "import SimpleITK as sitk\n", 29 | "import registration_gui as rgui\n", 30 | "import utilities\n", 31 | "\n", 32 | "from downloaddata import fetch_data as fdata\n", 33 | "\n", 34 | "from ipywidgets import interact, fixed\n", 35 | "\n", 36 | "%matplotlib inline\n", 37 | "import matplotlib.pyplot as plt\n", 38 | "\n", 39 | "import numpy as np" 40 | ] 41 | }, 42 | { 43 | "cell_type": "markdown", 44 | "metadata": {}, 45 | "source": [ 46 | "## Data and Registration Task\n", 47 | "\n", 48 | "In this notebook we will use the Point-validated Pixel-based Breathing Thorax Model (POPI). This is a 4D (3D+time) thoracic-abdominal CT (10 CTs representing the respiratory cycle) with masks segmenting each of the CTs to air/body/lung, and a set of corresponding landmarks localized in each of the CT volumes.\n", 49 | "\n", 50 | "The registration problem we deal with is non-rigid alignment of the lungs throughout the respiratory cycle. This information is relevant for radiation therapy planning and execution.\n", 51 | "\n", 52 | "\n", 53 | "The POPI model is provided by the Léon Bérard Cancer Center & CREATIS Laboratory, Lyon, France. The relevant publication is:\n", 54 | "\n", 55 | "J. Vandemeulebroucke, D. Sarrut, P. Clarysse, \"The POPI-model, a point-validated pixel-based breathing thorax model\",\n", 56 | "Proc. XVth International Conference on the Use of Computers in Radiation Therapy (ICCR), Toronto, Canada, 2007.\n", 57 | "\n", 58 | "Additional 4D CT data sets with reference points are available from the CREATIS Laboratory here. " 59 | ] 60 | }, 61 | { 62 | "cell_type": "code", 63 | "execution_count": null, 64 | "metadata": {}, 65 | "outputs": [], 66 | "source": [ 67 | "images = []\n", 68 | "masks = []\n", 69 | "points = []\n", 70 | "image_indexes = [0, 7]\n", 71 | "for i in image_indexes:\n", 72 | " image_file_name = \"POPI/meta/{0}0-P.mhd\".format(i)\n", 73 | " mask_file_name = \"POPI/masks/{0}0-air-body-lungs.mhd\".format(i)\n", 74 | " points_file_name = \"POPI/landmarks/{0}0-Landmarks.pts\".format(i)\n", 75 | " images.append(sitk.ReadImage(fdata(image_file_name), sitk.sitkFloat32))\n", 76 | " masks.append(sitk.ReadImage(fdata(mask_file_name)))\n", 77 | " points.append(utilities.read_POPI_points(fdata(points_file_name)))\n", 78 | "\n", 79 | "interact(\n", 80 | " rgui.display_coronal_with_overlay,\n", 81 | " temporal_slice=(0, len(images) - 1),\n", 82 | " coronal_slice=(0, images[0].GetSize()[1] - 1),\n", 83 | " images=fixed(images),\n", 84 | " masks=fixed(masks),\n", 85 | " label=fixed(utilities.popi_lung_label),\n", 86 | " window_min=fixed(-1024),\n", 87 | " window_max=fixed(976),\n", 88 | ");" 89 | ] 90 | }, 91 | { 92 | "cell_type": "markdown", 93 | "metadata": {}, 94 | "source": [ 95 | "## Free Form Deformation\n", 96 | "\n", 97 | "Define a BSplineTransform using a sparse set of grid points overlaid onto the fixed image's domain to deform it.\n", 98 | "\n", 99 | "For the current registration task we are fortunate in that we have a unique setting. The images are of the same patient during respiration so we can initialize the registration using the identity transform. Additionally, we have masks demarcating the lungs.\n", 100 | "\n", 101 | "We use the registration framework taking advantage of its ability to use masks that limit the similarity metric estimation to points lying inside our region of interest, the lungs." 102 | ] 103 | }, 104 | { 105 | "cell_type": "code", 106 | "execution_count": null, 107 | "metadata": {}, 108 | "outputs": [], 109 | "source": [ 110 | "fixed_index = 0\n", 111 | "moving_index = 1\n", 112 | "\n", 113 | "fixed_image = images[fixed_index]\n", 114 | "fixed_image_mask = masks[fixed_index] == utilities.popi_lung_label\n", 115 | "fixed_points = points[fixed_index]\n", 116 | "\n", 117 | "moving_image = images[moving_index]\n", 118 | "moving_image_mask = masks[moving_index] == utilities.popi_lung_label\n", 119 | "moving_points = points[moving_index]" 120 | ] 121 | }, 122 | { 123 | "cell_type": "code", 124 | "execution_count": null, 125 | "metadata": {}, 126 | "outputs": [], 127 | "source": [ 128 | "# Define a simple callback which allows us to monitor registration progress.\n", 129 | "def iteration_callback(filter):\n", 130 | " print(\"\\r{0:.2f}\".format(filter.GetMetricValue()), end=\"\")\n", 131 | "\n", 132 | "\n", 133 | "registration_method = sitk.ImageRegistrationMethod()\n", 134 | "\n", 135 | "# Determine the number of BSpline control points using the physical\n", 136 | "# spacing we want for the finest resolution control grid.\n", 137 | "grid_physical_spacing = [50.0, 50.0, 50.0] # A control point every 50mm\n", 138 | "image_physical_size = [\n", 139 | " size * spacing\n", 140 | " for size, spacing in zip(fixed_image.GetSize(), fixed_image.GetSpacing())\n", 141 | "]\n", 142 | "mesh_size = [\n", 143 | " int(image_size / grid_spacing + 0.5)\n", 144 | " for image_size, grid_spacing in zip(image_physical_size, grid_physical_spacing)\n", 145 | "]\n", 146 | "# The starting mesh size will be 1/4 of the original, it will be refined by\n", 147 | "# the multi-resolution framework.\n", 148 | "mesh_size = [int(sz / 4 + 0.5) for sz in mesh_size]\n", 149 | "\n", 150 | "initial_transform = sitk.BSplineTransformInitializer(\n", 151 | " image1=fixed_image, transformDomainMeshSize=mesh_size, order=3\n", 152 | ")\n", 153 | "# Instead of the standard SetInitialTransform we use the BSpline specific method which also\n", 154 | "# accepts the scaleFactors parameter to refine the BSpline mesh. In this case we start with\n", 155 | "# the given mesh_size at the highest pyramid level then we double it in the next lower level and\n", 156 | "# in the full resolution image we use a mesh that is four times the original size.\n", 157 | "registration_method.SetInitialTransformAsBSpline(\n", 158 | " initial_transform, inPlace=False, scaleFactors=[1, 2, 4]\n", 159 | ")\n", 160 | "\n", 161 | "registration_method.SetMetricAsMeanSquares()\n", 162 | "registration_method.SetMetricSamplingStrategy(registration_method.RANDOM)\n", 163 | "registration_method.SetMetricSamplingPercentage(0.01)\n", 164 | "registration_method.SetMetricFixedMask(fixed_image_mask)\n", 165 | "\n", 166 | "registration_method.SetShrinkFactorsPerLevel(shrinkFactors=[4, 2, 1])\n", 167 | "registration_method.SetSmoothingSigmasPerLevel(smoothingSigmas=[2, 1, 0])\n", 168 | "registration_method.SmoothingSigmasAreSpecifiedInPhysicalUnitsOn()\n", 169 | "\n", 170 | "registration_method.SetInterpolator(sitk.sitkLinear)\n", 171 | "registration_method.SetOptimizerAsLBFGS2(\n", 172 | " solutionAccuracy=1e-2, numberOfIterations=100, deltaConvergenceTolerance=0.01\n", 173 | ")\n", 174 | "\n", 175 | "registration_method.AddCommand(\n", 176 | " sitk.sitkIterationEvent, lambda: iteration_callback(registration_method)\n", 177 | ")\n", 178 | "\n", 179 | "final_transformation = registration_method.Execute(fixed_image, moving_image)\n", 180 | "print(\n", 181 | " \"\\nOptimizer's stopping condition, {0}\".format(\n", 182 | " registration_method.GetOptimizerStopConditionDescription()\n", 183 | " )\n", 184 | ")" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": {}, 190 | "source": [ 191 | "## Qualitative evaluation via segmentation transfer\n", 192 | "\n", 193 | "Transfer the segmentation from the moving image to the fixed image before and after registration and visually evaluate overlap." 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "transformed_segmentation = sitk.Resample(\n", 203 | " moving_image_mask,\n", 204 | " fixed_image,\n", 205 | " final_transformation,\n", 206 | " sitk.sitkNearestNeighbor,\n", 207 | " 0.0,\n", 208 | " moving_image_mask.GetPixelID(),\n", 209 | ")\n", 210 | "\n", 211 | "interact(\n", 212 | " rgui.display_coronal_with_overlay,\n", 213 | " temporal_slice=(0, 1),\n", 214 | " coronal_slice=(0, fixed_image.GetSize()[1] - 1),\n", 215 | " images=fixed([fixed_image, fixed_image]),\n", 216 | " masks=fixed([moving_image_mask, transformed_segmentation]),\n", 217 | " label=fixed(1),\n", 218 | " window_min=fixed(-1024),\n", 219 | " window_max=fixed(976),\n", 220 | ");" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": {}, 226 | "source": [ 227 | "### Quantitative evaluation \n", 228 | "\n", 229 | "The most appropriate evaluation is based on analysis of Target Registration Errors(TRE), which is defined as follows:\n", 230 | "\n", 231 | "Given the transformation $T_f^m$ and corresponding points in the two coordinate systems, $^fp,^mp$, points which were not used in the registration process, TRE is defined as $\\|T_f^m(^fp) - ^mp\\|$. \n", 232 | "\n", 233 | "We start by looking at some descriptive statistics of TRE:" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": null, 239 | "metadata": {}, 240 | "outputs": [], 241 | "source": [ 242 | "initial_TRE = utilities.target_registration_errors(\n", 243 | " sitk.Transform(), fixed_points, moving_points\n", 244 | ")\n", 245 | "final_TRE = utilities.target_registration_errors(\n", 246 | " final_transformation, fixed_points, moving_points\n", 247 | ")\n", 248 | "\n", 249 | "print(\n", 250 | " \"Initial alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}\".format(\n", 251 | " np.mean(initial_TRE), np.std(initial_TRE), np.max(initial_TRE)\n", 252 | " )\n", 253 | ")\n", 254 | "print(\n", 255 | " \"Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}\".format(\n", 256 | " np.mean(final_TRE), np.std(final_TRE), np.max(final_TRE)\n", 257 | " )\n", 258 | ")" 259 | ] 260 | }, 261 | { 262 | "cell_type": "markdown", 263 | "metadata": {}, 264 | "source": [ 265 | "The above descriptive statistics do not convey the whole picture, we should also look at the TRE distributions before and after registration." 266 | ] 267 | }, 268 | { 269 | "cell_type": "code", 270 | "execution_count": null, 271 | "metadata": {}, 272 | "outputs": [], 273 | "source": [ 274 | "plt.hist(initial_TRE, bins=20, alpha=0.5, label=\"before registration\", color=\"blue\")\n", 275 | "plt.hist(final_TRE, bins=20, alpha=0.5, label=\"after registration\", color=\"green\")\n", 276 | "plt.legend()\n", 277 | "plt.title(\"TRE histogram\");" 278 | ] 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "metadata": {}, 283 | "source": [ 284 | "Finally, we should also take into account the fact that TRE is spatially variant (think center of rotation). We therefore should also explore the distribution of errors as a function of the point location." 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "initial_errors = utilities.target_registration_errors(\n", 294 | " sitk.Transform(), fixed_points, moving_points, display_errors=True\n", 295 | ")\n", 296 | "utilities.target_registration_errors(\n", 297 | " final_transformation,\n", 298 | " fixed_points,\n", 299 | " moving_points,\n", 300 | " min_err=min(initial_errors),\n", 301 | " max_err=max(initial_errors),\n", 302 | " display_errors=True,\n", 303 | ");" 304 | ] 305 | }, 306 | { 307 | "cell_type": "markdown", 308 | "metadata": {}, 309 | "source": [ 310 | "Deciding whether a registration algorithm is appropriate for a specific problem is context dependent and is defined by the clinical/research needs both in terms of accuracy and computational complexity." 311 | ] 312 | }, 313 | { 314 | "cell_type": "markdown", 315 | "metadata": {}, 316 | "source": [ 317 | "## Demons Based Registration\n", 318 | "\n", 319 | "SimpleITK includes a number of filters from the Demons registration family (originally introduced by J. P. Thirion):\n", 320 | "1. DemonsRegistrationFilter.\n", 321 | "2. DiffeomorphicDemonsRegistrationFilter.\n", 322 | "3. FastSymmetricForcesDemonsRegistrationFilter.\n", 323 | "4. SymmetricForcesDemonsRegistrationFilter.\n", 324 | "\n", 325 | "These are appropriate for mono-modal registration. As these filters are independent of the ImageRegistrationMethod we do not have access to the multiscale framework. Luckily it is easy to implement our own multiscale framework in SimpleITK, which is what we do in the next cell." 326 | ] 327 | }, 328 | { 329 | "cell_type": "code", 330 | "execution_count": null, 331 | "metadata": {}, 332 | "outputs": [], 333 | "source": [ 334 | "def smooth_and_resample(image, shrink_factor, smoothing_sigma):\n", 335 | " \"\"\"\n", 336 | " Args:\n", 337 | " image: The image we want to resample.\n", 338 | " shrink_factor: A number greater than one, such that the new image's size is original_size/shrink_factor.\n", 339 | " smoothing_sigma: Sigma for Gaussian smoothing, this is in physical (image spacing) units, not pixels.\n", 340 | " Return:\n", 341 | " Image which is a result of smoothing the input and then resampling it using the given sigma and shrink factor.\n", 342 | " \"\"\"\n", 343 | " smoothed_image = sitk.SmoothingRecursiveGaussian(image, smoothing_sigma)\n", 344 | "\n", 345 | " original_spacing = image.GetSpacing()\n", 346 | " original_size = image.GetSize()\n", 347 | " new_size = [int(sz / float(shrink_factor) + 0.5) for sz in original_size]\n", 348 | " new_spacing = [\n", 349 | " ((original_sz - 1) * original_spc) / (new_sz - 1)\n", 350 | " for original_sz, original_spc, new_sz in zip(\n", 351 | " original_size, original_spacing, new_size\n", 352 | " )\n", 353 | " ]\n", 354 | " return sitk.Resample(\n", 355 | " smoothed_image,\n", 356 | " new_size,\n", 357 | " sitk.Transform(),\n", 358 | " sitk.sitkLinear,\n", 359 | " image.GetOrigin(),\n", 360 | " new_spacing,\n", 361 | " image.GetDirection(),\n", 362 | " 0.0,\n", 363 | " image.GetPixelID(),\n", 364 | " )\n", 365 | "\n", 366 | "\n", 367 | "def multiscale_demons(\n", 368 | " registration_algorithm,\n", 369 | " fixed_image,\n", 370 | " moving_image,\n", 371 | " initial_transform=None,\n", 372 | " shrink_factors=None,\n", 373 | " smoothing_sigmas=None,\n", 374 | "):\n", 375 | " \"\"\"\n", 376 | " Run the given registration algorithm in a multiscale fashion. The original scale should not be given as input as the\n", 377 | " original images are implicitly incorporated as the base of the pyramid.\n", 378 | " Args:\n", 379 | " registration_algorithm: Any registration algorithm that has an Execute(fixed_image, moving_image, displacement_field_image)\n", 380 | " method.\n", 381 | " fixed_image: Resulting transformation maps points from this image's spatial domain to the moving image spatial domain.\n", 382 | " moving_image: Resulting transformation maps points from the fixed_image's spatial domain to this image's spatial domain.\n", 383 | " initial_transform: Any SimpleITK transform, used to initialize the displacement field.\n", 384 | " shrink_factors: Shrink factors relative to the original image's size.\n", 385 | " smoothing_sigmas: Amount of smoothing which is done prior to resmapling the image using the given shrink factor. These\n", 386 | " are in physical (image spacing) units.\n", 387 | " Returns:\n", 388 | " SimpleITK.DisplacementFieldTransform\n", 389 | " \"\"\"\n", 390 | " # Create image pyramid.\n", 391 | " fixed_images = [fixed_image]\n", 392 | " moving_images = [moving_image]\n", 393 | " if shrink_factors:\n", 394 | " for shrink_factor, smoothing_sigma in reversed(\n", 395 | " list(zip(shrink_factors, smoothing_sigmas))\n", 396 | " ):\n", 397 | " fixed_images.append(\n", 398 | " smooth_and_resample(fixed_images[0], shrink_factor, smoothing_sigma)\n", 399 | " )\n", 400 | " moving_images.append(\n", 401 | " smooth_and_resample(moving_images[0], shrink_factor, smoothing_sigma)\n", 402 | " )\n", 403 | "\n", 404 | " # Create initial displacement field at lowest resolution.\n", 405 | " # Currently, the pixel type is required to be sitkVectorFloat64 because of a constraint imposed by the Demons filters.\n", 406 | " if initial_transform:\n", 407 | " initial_displacement_field = sitk.TransformToDisplacementField(\n", 408 | " initial_transform,\n", 409 | " sitk.sitkVectorFloat64,\n", 410 | " fixed_images[-1].GetSize(),\n", 411 | " fixed_images[-1].GetOrigin(),\n", 412 | " fixed_images[-1].GetSpacing(),\n", 413 | " fixed_images[-1].GetDirection(),\n", 414 | " )\n", 415 | " else:\n", 416 | " initial_displacement_field = sitk.Image(\n", 417 | " fixed_images[-1].GetWidth(),\n", 418 | " fixed_images[-1].GetHeight(),\n", 419 | " fixed_images[-1].GetDepth(),\n", 420 | " sitk.sitkVectorFloat64,\n", 421 | " )\n", 422 | " initial_displacement_field.CopyInformation(fixed_images[-1])\n", 423 | "\n", 424 | " # Run the registration.\n", 425 | " initial_displacement_field = registration_algorithm.Execute(\n", 426 | " fixed_images[-1], moving_images[-1], initial_displacement_field\n", 427 | " )\n", 428 | " # Start at the top of the pyramid and work our way down.\n", 429 | " for f_image, m_image in reversed(\n", 430 | " list(zip(fixed_images[0:-1], moving_images[0:-1]))\n", 431 | " ):\n", 432 | " initial_displacement_field = sitk.Resample(initial_displacement_field, f_image)\n", 433 | " initial_displacement_field = registration_algorithm.Execute(\n", 434 | " f_image, m_image, initial_displacement_field\n", 435 | " )\n", 436 | " return sitk.DisplacementFieldTransform(initial_displacement_field)" 437 | ] 438 | }, 439 | { 440 | "cell_type": "markdown", 441 | "metadata": {}, 442 | "source": [ 443 | "Now we will use our newly minted multiscale framework to perform registration with the Demons filters. Some things you can easily try out by editing the code below:\n", 444 | "1. Is there really a need for multiscale - just call the multiscale_demons method without the shrink_factors and smoothing_sigmas parameters.\n", 445 | "2. Which Demons filter should you use - configure the other filters and see if our selection is the best choice (accuracy/time)." 446 | ] 447 | }, 448 | { 449 | "cell_type": "code", 450 | "execution_count": null, 451 | "metadata": {}, 452 | "outputs": [], 453 | "source": [ 454 | "# Define a simple callback which allows us to monitor registration progress.\n", 455 | "def iteration_callback(filter):\n", 456 | " print(\n", 457 | " \"\\r{0}: {1:.2f}\".format(filter.GetElapsedIterations(), filter.GetMetric()),\n", 458 | " end=\"\",\n", 459 | " )\n", 460 | "\n", 461 | "\n", 462 | "# Select a Demons filter and configure it.\n", 463 | "demons_filter = sitk.FastSymmetricForcesDemonsRegistrationFilter()\n", 464 | "demons_filter.SetNumberOfIterations(20)\n", 465 | "# Regularization (update field - viscous, total field - elastic).\n", 466 | "demons_filter.SetSmoothDisplacementField(True)\n", 467 | "demons_filter.SetStandardDeviations(2.0)\n", 468 | "\n", 469 | "# Add our simple callback to the registration filter.\n", 470 | "demons_filter.AddCommand(\n", 471 | " sitk.sitkIterationEvent, lambda: iteration_callback(demons_filter)\n", 472 | ")\n", 473 | "\n", 474 | "# Run the registration.\n", 475 | "tx = multiscale_demons(\n", 476 | " registration_algorithm=demons_filter,\n", 477 | " fixed_image=fixed_image,\n", 478 | " moving_image=moving_image,\n", 479 | " shrink_factors=[4, 2],\n", 480 | " smoothing_sigmas=[8, 4],\n", 481 | ")\n", 482 | "\n", 483 | "# look at the final TREs.\n", 484 | "final_TRE = utilities.target_registration_errors(\n", 485 | " tx, fixed_points, moving_points, display_errors=True\n", 486 | ")\n", 487 | "\n", 488 | "print(\n", 489 | " \"Final alignment errors in millimeters, mean(std): {:.2f}({:.2f}), max: {:.2f}\".format(\n", 490 | " np.mean(final_TRE), np.std(final_TRE), np.max(final_TRE)\n", 491 | " )\n", 492 | ")" 493 | ] 494 | }, 495 | { 496 | "cell_type": "markdown", 497 | "metadata": {}, 498 | "source": [ 499 | "## Quantitative Evaluation II (Segmentation)\n", 500 | "\n", 501 | "While the use of corresponding points to evaluate registration is the desired approach, it is often not applicable. In many cases there are only a few distinct points which can be localized in the two images, possibly too few to serve as a metric for evaluating the registration result across the whole region of interest. \n", 502 | "\n", 503 | "An alternative approach is to use segmentation. In this approach, we independently segment the structures of interest in the two images. After registration we transfer the segmentation from one image to the other and compare the original and registration induced segmentations.\n" 504 | ] 505 | }, 506 | { 507 | "cell_type": "code", 508 | "execution_count": null, 509 | "metadata": {}, 510 | "outputs": [], 511 | "source": [ 512 | "# Transfer the segmentation via the estimated transformation.\n", 513 | "# Nearest Neighbor interpolation so we don't introduce new labels.\n", 514 | "transformed_labels = sitk.Resample(\n", 515 | " masks[moving_index],\n", 516 | " fixed_image,\n", 517 | " tx,\n", 518 | " sitk.sitkNearestNeighbor,\n", 519 | " 0.0,\n", 520 | " masks[moving_index].GetPixelID(),\n", 521 | ")" 522 | ] 523 | }, 524 | { 525 | "cell_type": "markdown", 526 | "metadata": {}, 527 | "source": [ 528 | "We have now replaced the task of evaluating registration with that of evaluating segmentation." 529 | ] 530 | }, 531 | { 532 | "cell_type": "code", 533 | "execution_count": null, 534 | "metadata": {}, 535 | "outputs": [], 536 | "source": [ 537 | "# Often referred to as ground truth, but we prefer reference as the truth is never known.\n", 538 | "reference_segmentation = fixed_image_mask\n", 539 | "# Segmentations before and after registration\n", 540 | "segmentations = [moving_image_mask, transformed_labels == utilities.popi_lung_label]" 541 | ] 542 | }, 543 | { 544 | "cell_type": "code", 545 | "execution_count": null, 546 | "metadata": {}, 547 | "outputs": [], 548 | "source": [ 549 | "from enum import Enum\n", 550 | "\n", 551 | "\n", 552 | "# Use enumerations to represent the various evaluation measures\n", 553 | "class OverlapMeasures(Enum):\n", 554 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 555 | "\n", 556 | "\n", 557 | "class SurfaceDistanceMeasures(Enum):\n", 558 | " (\n", 559 | " hausdorff_distance,\n", 560 | " mean_surface_distance,\n", 561 | " median_surface_distance,\n", 562 | " std_surface_distance,\n", 563 | " max_surface_distance,\n", 564 | " ) = range(5)\n", 565 | "\n", 566 | "\n", 567 | "# Empty numpy arrays to hold the results\n", 568 | "overlap_results = np.zeros(\n", 569 | " (len(segmentations), len(OverlapMeasures.__members__.items()))\n", 570 | ")\n", 571 | "surface_distance_results = np.zeros(\n", 572 | " (len(segmentations), len(SurfaceDistanceMeasures.__members__.items()))\n", 573 | ")\n", 574 | "\n", 575 | "# Compute the evaluation criteria\n", 576 | "\n", 577 | "# Note that for the overlap measures filter, because we are dealing with a single label we\n", 578 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 579 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 580 | "\n", 581 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 582 | "\n", 583 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside\n", 584 | "# relationship, is irrelevant)\n", 585 | "label = 1\n", 586 | "reference_distance_map = sitk.Abs(\n", 587 | " sitk.SignedMaurerDistanceMap(reference_segmentation, squaredDistance=False)\n", 588 | ")\n", 589 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 590 | "\n", 591 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 592 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 593 | "statistics_image_filter.Execute(reference_surface)\n", 594 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum())\n", 595 | "\n", 596 | "for i, seg in enumerate(segmentations):\n", 597 | " # Overlap measures\n", 598 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 599 | " overlap_results[i, OverlapMeasures.jaccard.value] = (\n", 600 | " overlap_measures_filter.GetJaccardCoefficient()\n", 601 | " )\n", 602 | " overlap_results[i, OverlapMeasures.dice.value] = (\n", 603 | " overlap_measures_filter.GetDiceCoefficient()\n", 604 | " )\n", 605 | " overlap_results[i, OverlapMeasures.volume_similarity.value] = (\n", 606 | " overlap_measures_filter.GetVolumeSimilarity()\n", 607 | " )\n", 608 | " overlap_results[i, OverlapMeasures.false_negative.value] = (\n", 609 | " overlap_measures_filter.GetFalseNegativeError()\n", 610 | " )\n", 611 | " overlap_results[i, OverlapMeasures.false_positive.value] = (\n", 612 | " overlap_measures_filter.GetFalsePositiveError()\n", 613 | " )\n", 614 | " # Hausdorff distance\n", 615 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 616 | " surface_distance_results[i, SurfaceDistanceMeasures.hausdorff_distance.value] = (\n", 617 | " hausdorff_distance_filter.GetHausdorffDistance()\n", 618 | " )\n", 619 | " # Symmetric surface distance measures\n", 620 | " segmented_distance_map = sitk.Abs(\n", 621 | " sitk.SignedMaurerDistanceMap(seg, squaredDistance=False)\n", 622 | " )\n", 623 | " segmented_surface = sitk.LabelContour(seg)\n", 624 | "\n", 625 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 626 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 627 | " seg2ref_distance_map = reference_distance_map * sitk.Cast(\n", 628 | " segmented_surface, sitk.sitkFloat32\n", 629 | " )\n", 630 | " ref2seg_distance_map = segmented_distance_map * sitk.Cast(\n", 631 | " reference_surface, sitk.sitkFloat32\n", 632 | " )\n", 633 | "\n", 634 | " # Get the number of pixels in the segmented surface by counting all pixels that are 1.\n", 635 | " statistics_image_filter.Execute(segmented_surface)\n", 636 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 637 | "\n", 638 | " # Get all non-zero distances and then add zero distances if required.\n", 639 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 640 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])\n", 641 | " seg2ref_distances = seg2ref_distances + list(\n", 642 | " np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))\n", 643 | " )\n", 644 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 645 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])\n", 646 | " ref2seg_distances = ref2seg_distances + list(\n", 647 | " np.zeros(num_reference_surface_pixels - len(ref2seg_distances))\n", 648 | " )\n", 649 | "\n", 650 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 651 | "\n", 652 | " surface_distance_results[i, SurfaceDistanceMeasures.mean_surface_distance.value] = (\n", 653 | " np.mean(all_surface_distances)\n", 654 | " )\n", 655 | " surface_distance_results[\n", 656 | " i, SurfaceDistanceMeasures.median_surface_distance.value\n", 657 | " ] = np.median(all_surface_distances)\n", 658 | " surface_distance_results[i, SurfaceDistanceMeasures.std_surface_distance.value] = (\n", 659 | " np.std(all_surface_distances)\n", 660 | " )\n", 661 | " surface_distance_results[i, SurfaceDistanceMeasures.max_surface_distance.value] = (\n", 662 | " np.max(all_surface_distances)\n", 663 | " )\n", 664 | "\n", 665 | "import pandas as pd\n", 666 | "from IPython.display import display, HTML\n", 667 | "\n", 668 | "# Graft our results matrix into pandas data frames\n", 669 | "overlap_results_df = pd.DataFrame(\n", 670 | " data=overlap_results,\n", 671 | " index=[\"before registration\", \"after registration\"],\n", 672 | " columns=[name for name, _ in OverlapMeasures.__members__.items()],\n", 673 | ")\n", 674 | "surface_distance_results_df = pd.DataFrame(\n", 675 | " data=surface_distance_results,\n", 676 | " index=[\"before registration\", \"after registration\"],\n", 677 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()],\n", 678 | ")\n", 679 | "\n", 680 | "# Display the data as HTML tables and graphs\n", 681 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: \"%.3f\" % x)))\n", 682 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: \"%.3f\" % x)))\n", 683 | "overlap_results_df.plot(kind=\"bar\", rot=1).legend(bbox_to_anchor=(1.6, 0.9))\n", 684 | "surface_distance_results_df.plot(kind=\"bar\", rot=1).legend(bbox_to_anchor=(1.6, 0.9));" 685 | ] 686 | }, 687 | { 688 | "cell_type": "markdown", 689 | "metadata": { 690 | "collapsed": true 691 | }, 692 | "source": [ 693 | "

Next »

" 694 | ] 695 | } 696 | ], 697 | "metadata": { 698 | "kernelspec": { 699 | "display_name": "Python 3 (ipykernel)", 700 | "language": "python", 701 | "name": "python3" 702 | }, 703 | "language_info": { 704 | "codemirror_mode": { 705 | "name": "ipython", 706 | "version": 3 707 | }, 708 | "file_extension": ".py", 709 | "mimetype": "text/x-python", 710 | "name": "python", 711 | "nbconvert_exporter": "python", 712 | "pygments_lexer": "ipython3", 713 | "version": "3.9.13" 714 | } 715 | }, 716 | "nbformat": 4, 717 | "nbformat_minor": 2 718 | } 719 | -------------------------------------------------------------------------------- /08_segmentation_and_shape_analysis.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Focused Ion Beam Scanning Electron Microscopy Image Segmentation

\n", 8 | "\n", 9 | "\n", 10 | "**Summary:**\n", 11 | "1. SimpleITK supports a large number of filters that facilitate classical segmentation algorithms (variety of thresholding algorithms, watersheds...).\n", 12 | "2. Once your data is segmented SimpleITK enables you to efficiently post process the segmentation (e.g. label distinct objects, analyze object shapes).\n", 13 | "\n", 14 | "This notebook will illustrate the use of SimpleITK for segmentation of bacteria from a 3D Focused Ion Beam Scanning Electron Microscopy (FIB-SEM) image. The specific bacterium is bacillus subtilis, a rod shaped organism naturally found in soil and plants. The bacteria have been subjected to stress to initiate the process of forming an endospore. These endospores can be seen as a generally dark ellipsoid inside the individual bacterium." 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import SimpleITK as sitk\n", 24 | "import pandas as pd\n", 25 | "\n", 26 | "%matplotlib notebook\n", 27 | "\n", 28 | "import matplotlib.pyplot as plt\n", 29 | "import gui\n", 30 | "from math import ceil\n", 31 | "from downloaddata import fetch_data as fdata" 32 | ] 33 | }, 34 | { 35 | "cell_type": "markdown", 36 | "metadata": {}, 37 | "source": [ 38 | "# Load data\n", 39 | "\n", 40 | "Load the 3D volume and display it." 41 | ] 42 | }, 43 | { 44 | "cell_type": "code", 45 | "execution_count": null, 46 | "metadata": {}, 47 | "outputs": [], 48 | "source": [ 49 | "img = sitk.ReadImage(fdata(\"fib_sem_bacillus_subtilis.mha\"))\n", 50 | "gui.MultiImageDisplay(image_list=[img], figure_size=(8, 4));" 51 | ] 52 | }, 53 | { 54 | "cell_type": "markdown", 55 | "metadata": {}, 56 | "source": [ 57 | "# Segmentation\n", 58 | "\n", 59 | "To allow us to analyze the shape of whole bacteria we first need to segment them. We will do this in several steps:\n", 60 | "1. Separate the bacteria from the embedding resin background.\n", 61 | "2. Mark each potential bacterium with a unique label, to evaluate the segmentation.\n", 62 | "3. Remove small components and fill small holes using binary morphology operators (opening and closing).\n", 63 | "4. Use seed based watersheds to perform final segmentation.\n", 64 | "5. Remove bacterium that are connected to the image boundary." 65 | ] 66 | }, 67 | { 68 | "cell_type": "markdown", 69 | "metadata": {}, 70 | "source": [ 71 | "## Separate the bacteria from the background\n", 72 | "\n", 73 | "Based on the visualization of the data above, it intuitively appears that the background and foreground are separable using a single intensity threshold. Our first step towards validating this observation is to plot the intensity distribution." 74 | ] 75 | }, 76 | { 77 | "cell_type": "code", 78 | "execution_count": null, 79 | "metadata": {}, 80 | "outputs": [], 81 | "source": [ 82 | "plt.figure()\n", 83 | "plt.hist(sitk.GetArrayViewFromImage(img).flatten(), bins=100)\n", 84 | "plt.show()" 85 | ] 86 | }, 87 | { 88 | "cell_type": "markdown", 89 | "metadata": {}, 90 | "source": [ 91 | "The histogram is bi-modal with a clear separation, which we have manually identified as having an intensity value of 120.\n", 92 | "\n", 93 | "We can also use one of several binary threshold selection filters available in SimpleITK. " 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": null, 99 | "metadata": {}, 100 | "outputs": [], 101 | "source": [ 102 | "threshold_filters = {\n", 103 | " \"Otsu\": sitk.OtsuThresholdImageFilter(),\n", 104 | " \"Triangle\": sitk.TriangleThresholdImageFilter(),\n", 105 | " \"Huang\": sitk.HuangThresholdImageFilter(),\n", 106 | " \"MaxEntropy\": sitk.MaximumEntropyThresholdImageFilter(),\n", 107 | "}\n", 108 | "\n", 109 | "filter_selection = \"Manual\"\n", 110 | "try:\n", 111 | " thresh_filter = threshold_filters[filter_selection]\n", 112 | " thresh_filter.SetInsideValue(0)\n", 113 | " thresh_filter.SetOutsideValue(1)\n", 114 | " thresh_img = thresh_filter.Execute(img)\n", 115 | " thresh_value = thresh_filter.GetThreshold()\n", 116 | "except KeyError:\n", 117 | " thresh_value = 120\n", 118 | " thresh_img = img > thresh_value\n", 119 | "\n", 120 | "print(\"Threshold used: \" + str(thresh_value))\n", 121 | "gui.MultiImageDisplay(\n", 122 | " image_list=[sitk.LabelOverlay(img, thresh_img)],\n", 123 | " title_list=[\"Binary Segmentation\"],\n", 124 | " figure_size=(8, 4),\n", 125 | ");" 126 | ] 127 | }, 128 | { 129 | "cell_type": "markdown", 130 | "metadata": {}, 131 | "source": [ 132 | "# Mark each potential bacterium with unique label and evaluate" 133 | ] 134 | }, 135 | { 136 | "cell_type": "code", 137 | "execution_count": null, 138 | "metadata": {}, 139 | "outputs": [], 140 | "source": [ 141 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 142 | "stats.Execute(sitk.ConnectedComponent(thresh_img))\n", 143 | "\n", 144 | "# Look at the distribution of sizes of connected components (bacteria).\n", 145 | "label_sizes = [stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 146 | "\n", 147 | "plt.figure()\n", 148 | "plt.hist(label_sizes, bins=200)\n", 149 | "plt.title(\"Distribution of Object Sizes\")\n", 150 | "plt.xlabel(\"size in pixels\")\n", 151 | "plt.ylabel(\"number of objects\")\n", 152 | "plt.show()" 153 | ] 154 | }, 155 | { 156 | "cell_type": "markdown", 157 | "metadata": {}, 158 | "source": [ 159 | "The histogram above shows tens of thousands of very small labels which are not visually detected by looking at the segmentation." 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "## Remove small islands and holes\n", 167 | "\n", 168 | "Using binary morphological operations we remove small objects using the opening operation and fill small holes using the closing operation. The use of opening and closing by reconstruction maintains the boundary of the original objects." 169 | ] 170 | }, 171 | { 172 | "cell_type": "code", 173 | "execution_count": null, 174 | "metadata": {}, 175 | "outputs": [], 176 | "source": [ 177 | "cleaned_thresh_img = sitk.BinaryOpeningByReconstruction(thresh_img, [10, 10, 10])\n", 178 | "cleaned_thresh_img = sitk.BinaryClosingByReconstruction(\n", 179 | " cleaned_thresh_img, [10, 10, 10]\n", 180 | ")\n", 181 | "\n", 182 | "gui.MultiImageDisplay(\n", 183 | " image_list=[sitk.LabelOverlay(img, cleaned_thresh_img)],\n", 184 | " title_list=[\"Cleaned Binary Segmentation\"],\n", 185 | " figure_size=(8, 4),\n", 186 | ");" 187 | ] 188 | }, 189 | { 190 | "cell_type": "markdown", 191 | "metadata": {}, 192 | "source": [ 193 | "Check that the number of objects defined by the binary image is more reasonable." 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "stats = sitk.LabelShapeStatisticsImageFilter()\n", 203 | "stats.Execute(sitk.ConnectedComponent(cleaned_thresh_img))\n", 204 | "\n", 205 | "# Look at the distribution of sizes of connected components (bacteria).\n", 206 | "label_sizes = [stats.GetNumberOfPixels(l) for l in stats.GetLabels() if l != 1]\n", 207 | "\n", 208 | "plt.figure()\n", 209 | "plt.hist(label_sizes, bins=200)\n", 210 | "plt.title(\"Distribution of Object Sizes\")\n", 211 | "plt.xlabel(\"size in pixels\")\n", 212 | "plt.ylabel(\"number of objects\")\n", 213 | "plt.show()" 214 | ] 215 | }, 216 | { 217 | "cell_type": "markdown", 218 | "metadata": {}, 219 | "source": [ 220 | "After the morphological operations, our binary image seems to have a reasonable number of objects, but is this true? We next look at the unique objects defined by this binary segmentation (each object is marked with a unique color)." 221 | ] 222 | }, 223 | { 224 | "cell_type": "code", 225 | "execution_count": null, 226 | "metadata": {}, 227 | "outputs": [], 228 | "source": [ 229 | "gui.MultiImageDisplay(\n", 230 | " image_list=[sitk.LabelOverlay(img, sitk.ConnectedComponent(cleaned_thresh_img))],\n", 231 | " title_list=[\"Cleaned Binary Segmentation\"],\n", 232 | " figure_size=(8, 4),\n", 233 | ");" 234 | ] 235 | }, 236 | { 237 | "cell_type": "markdown", 238 | "metadata": {}, 239 | "source": [ 240 | "## Seed based watershed segmentation\n", 241 | "\n", 242 | "The bacteria appear to be segmented correctly from the background but not from each other. Using the visualization and histogram above we see that in 3D many of them are connected, even if on a slice by slice inspection they appear separate. " 243 | ] 244 | }, 245 | { 246 | "cell_type": "code", 247 | "execution_count": null, 248 | "metadata": {}, 249 | "outputs": [], 250 | "source": [ 251 | "dist_img = sitk.SignedMaurerDistanceMap(\n", 252 | " cleaned_thresh_img != 0,\n", 253 | " insideIsPositive=False,\n", 254 | " squaredDistance=False,\n", 255 | " useImageSpacing=False,\n", 256 | ")\n", 257 | "radius = 10\n", 258 | "# Seeds have a distance of \"radius\" or more to the object boundary, they are uniquely labelled.\n", 259 | "seeds = sitk.ConnectedComponent(dist_img < -radius)\n", 260 | "# Relabel the seed objects using consecutive object labels while removing all objects with less than 15 pixels.\n", 261 | "seeds = sitk.RelabelComponent(seeds, minimumObjectSize=15)\n", 262 | "# Run the watershed segmentation using the distance map and seeds.\n", 263 | "ws = sitk.MorphologicalWatershedFromMarkers(dist_img, seeds, markWatershedLine=True)\n", 264 | "ws = sitk.Mask(ws, sitk.Cast(cleaned_thresh_img, sitk.sitkUInt8))" 265 | ] 266 | }, 267 | { 268 | "cell_type": "markdown", 269 | "metadata": {}, 270 | "source": [ 271 | "Visualize the distance map, the unique seeds and final object segmentation." 272 | ] 273 | }, 274 | { 275 | "cell_type": "code", 276 | "execution_count": null, 277 | "metadata": {}, 278 | "outputs": [], 279 | "source": [ 280 | "gui.MultiImageDisplay(\n", 281 | " image_list=[dist_img, sitk.LabelOverlay(img, seeds), sitk.LabelOverlay(img, ws)],\n", 282 | " title_list=[\n", 283 | " \"Segmentation Distance\",\n", 284 | " \"Watershed Seeds\",\n", 285 | " \"Binary Watershed Labeling\",\n", 286 | " ],\n", 287 | " shared_slider=True,\n", 288 | " horizontal=False,\n", 289 | " figure_size=(6, 12),\n", 290 | ");" 291 | ] 292 | }, 293 | { 294 | "cell_type": "markdown", 295 | "metadata": {}, 296 | "source": [ 297 | "## Removal of objects touching the image boundary\n", 298 | "\n", 299 | "We are not sure objects touching the image boundary are whole bacteria, so we remove them." 300 | ] 301 | }, 302 | { 303 | "cell_type": "code", 304 | "execution_count": null, 305 | "metadata": {}, 306 | "outputs": [], 307 | "source": [ 308 | "# The image has a small black border which we account for here.\n", 309 | "bgp = sitk.BinaryGrindPeak((ws != 0) | (img == 0))\n", 310 | "non_border_seg = sitk.Mask(ws, bgp == 0)\n", 311 | "gui.MultiImageDisplay(\n", 312 | " image_list=[sitk.LabelOverlay(img, non_border_seg)],\n", 313 | " title_list=[\"Final Segmentation\"],\n", 314 | " figure_size=(8, 4),\n", 315 | ");" 316 | ] 317 | }, 318 | { 319 | "cell_type": "markdown", 320 | "metadata": {}, 321 | "source": [ 322 | "# Object Analysis\n", 323 | "\n", 324 | "Once we have the segmented objects we look at their shapes and the intensity distributions inside the objects.\n", 325 | "\n", 326 | "Note that sizes are in nanometers. ITK and consequently SimpleITK are agnostic of the actual measurement units. It is up to you as the developer to explicitly use the correct units and more importantly, DO NOT MIX UNITS.\n", 327 | "\n", 328 | "We first compute all of the measurements we are interested in." 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "metadata": {}, 335 | "outputs": [], 336 | "source": [ 337 | "shape_stats = sitk.LabelShapeStatisticsImageFilter()\n", 338 | "shape_stats.ComputeOrientedBoundingBoxOn()\n", 339 | "shape_stats.Execute(non_border_seg)\n", 340 | "\n", 341 | "intensity_stats = sitk.LabelIntensityStatisticsImageFilter()\n", 342 | "intensity_stats.Execute(non_border_seg, img)" 343 | ] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": {}, 348 | "source": [ 349 | "Insert the values into a pandas dataframe and display some descriptive statistics." 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": null, 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [ 358 | "stats_list = [\n", 359 | " (\n", 360 | " shape_stats.GetPhysicalSize(i),\n", 361 | " shape_stats.GetElongation(i),\n", 362 | " shape_stats.GetFlatness(i),\n", 363 | " shape_stats.GetOrientedBoundingBoxSize(i)[0],\n", 364 | " shape_stats.GetOrientedBoundingBoxSize(i)[2],\n", 365 | " intensity_stats.GetMean(i),\n", 366 | " intensity_stats.GetStandardDeviation(i),\n", 367 | " intensity_stats.GetSkewness(i),\n", 368 | " )\n", 369 | " for i in shape_stats.GetLabels()\n", 370 | "]\n", 371 | "cols = [\n", 372 | " \"Volume (nm^3)\",\n", 373 | " \"Elongation\",\n", 374 | " \"Flatness\",\n", 375 | " \"Oriented Bounding Box Minimum Size(nm)\",\n", 376 | " \"Oriented Bounding Box Maximum Size(nm)\",\n", 377 | " \"Intensity Mean\",\n", 378 | " \"Intensity Standard Deviation\",\n", 379 | " \"Intensity Skewness\",\n", 380 | "]\n", 381 | "\n", 382 | "# Create the pandas data frame and display descriptive statistics.\n", 383 | "stats = pd.DataFrame(data=stats_list, index=shape_stats.GetLabels(), columns=cols)\n", 384 | "stats.describe()" 385 | ] 386 | }, 387 | { 388 | "cell_type": "markdown", 389 | "metadata": {}, 390 | "source": [ 391 | "Create a plot to investigate the relationship, possible correlations, between volume and object shape characteristics (elongation, flatness, principal moments). " 392 | ] 393 | }, 394 | { 395 | "cell_type": "code", 396 | "execution_count": null, 397 | "metadata": {}, 398 | "outputs": [], 399 | "source": [ 400 | "fig, axes = plt.subplots(nrows=len(cols), ncols=2, figsize=(6, 4 * len(cols)))\n", 401 | "axes[0, 0].axis(\"off\")\n", 402 | "\n", 403 | "stats.loc[:, cols[0]].plot.hist(ax=axes[0, 1], bins=25)\n", 404 | "axes[0, 1].set_xlabel(cols[0])\n", 405 | "axes[0, 1].xaxis.set_label_position(\"top\")\n", 406 | "\n", 407 | "for i in range(1, len(cols)):\n", 408 | " c = cols[i]\n", 409 | " bar = stats.loc[:, [c]].plot.hist(\n", 410 | " ax=axes[i, 0], bins=20, orientation=\"horizontal\", legend=False\n", 411 | " )\n", 412 | " bar.set_ylabel(stats.loc[:, [c]].columns.values[0])\n", 413 | " scatter = stats.plot.scatter(ax=axes[i, 1], y=c, x=cols[0])\n", 414 | " scatter.set_ylabel(\"\")\n", 415 | " # Remove axis labels from all plots except the last (they all share the labels)\n", 416 | " if i < len(cols) - 1:\n", 417 | " bar.set_xlabel(\"\")\n", 418 | " scatter.set_xlabel(\"\")\n", 419 | "# Adjust the spacing between plot columns and set the plots to have a tight\n", 420 | "# layout inside the figure.\n", 421 | "plt.subplots_adjust(wspace=0.4)\n", 422 | "plt.tight_layout()" 423 | ] 424 | }, 425 | { 426 | "cell_type": "markdown", 427 | "metadata": {}, 428 | "source": [ 429 | "Finally, we visualize a lineup of the bacteria using a coordinate system that is defined by the oriented bounding box enclosing each of them. " 430 | ] 431 | }, 432 | { 433 | "cell_type": "code", 434 | "execution_count": null, 435 | "metadata": { 436 | "scrolled": false 437 | }, 438 | "outputs": [], 439 | "source": [ 440 | "bacteria_labels = shape_stats.GetLabels()\n", 441 | "bacteria_volumes = [shape_stats.GetPhysicalSize(label) for label in bacteria_labels]\n", 442 | "num_images = 5 # number of bacteria images we want to display\n", 443 | "\n", 444 | "bacteria_labels_volume_sorted = [\n", 445 | " label for _, label in sorted(zip(bacteria_volumes, bacteria_labels))\n", 446 | "]\n", 447 | "\n", 448 | "resampler = sitk.ResampleImageFilter()\n", 449 | "aligned_image_spacing = [10, 10, 10] # in nanometers\n", 450 | "\n", 451 | "for label in bacteria_labels_volume_sorted[0:num_images]:\n", 452 | " aligned_image_size = [\n", 453 | " int(\n", 454 | " ceil(\n", 455 | " shape_stats.GetOrientedBoundingBoxSize(label)[i]\n", 456 | " / aligned_image_spacing[i]\n", 457 | " )\n", 458 | " )\n", 459 | " for i in range(3)\n", 460 | " ]\n", 461 | " direction_mat = shape_stats.GetOrientedBoundingBoxDirection(label)\n", 462 | " aligned_image_direction = [\n", 463 | " direction_mat[0],\n", 464 | " direction_mat[3],\n", 465 | " direction_mat[6],\n", 466 | " direction_mat[1],\n", 467 | " direction_mat[4],\n", 468 | " direction_mat[7],\n", 469 | " direction_mat[2],\n", 470 | " direction_mat[5],\n", 471 | " direction_mat[8],\n", 472 | " ]\n", 473 | " resampler.SetOutputDirection(aligned_image_direction)\n", 474 | " resampler.SetOutputOrigin(shape_stats.GetOrientedBoundingBoxOrigin(label))\n", 475 | " resampler.SetOutputSpacing(aligned_image_spacing)\n", 476 | " resampler.SetSize(aligned_image_size)\n", 477 | "\n", 478 | " obb_img = resampler.Execute(img)\n", 479 | " # Change the image axes order so that we have a nice display.\n", 480 | " obb_img = sitk.PermuteAxes(obb_img, [2, 1, 0])\n", 481 | " gui.MultiImageDisplay(image_list=[obb_img], title_list=[\"OBB_{0}\".format(label)])" 482 | ] 483 | }, 484 | { 485 | "cell_type": "markdown", 486 | "metadata": {}, 487 | "source": [ 488 | "

Next »

" 489 | ] 490 | } 491 | ], 492 | "metadata": { 493 | "kernelspec": { 494 | "display_name": "Python 3 (ipykernel)", 495 | "language": "python", 496 | "name": "python3" 497 | }, 498 | "language_info": { 499 | "codemirror_mode": { 500 | "name": "ipython", 501 | "version": 3 502 | }, 503 | "file_extension": ".py", 504 | "mimetype": "text/x-python", 505 | "name": "python", 506 | "nbconvert_exporter": "python", 507 | "pygments_lexer": "ipython3", 508 | "version": "3.9.13" 509 | } 510 | }, 511 | "nbformat": 4, 512 | "nbformat_minor": 2 513 | } 514 | -------------------------------------------------------------------------------- /09_segmentation_evaluation.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Segmentation Evaluation

\n", 8 | "\n", 9 | "**Summary:**\n", 10 | "\n", 11 | "1. SimpleITK supports two ways of combining expert segmentations to obtain a reference segmentation.\n", 12 | "2. A variety of criteria used for evaluating a segmentation result are readily available or implemented in SimpleITK.\n", 13 | "\n", 14 | "Reference Segmentation\n", 15 | "\n", 16 | "Evaluating segmentation algorithms is most often done using reference data to which you compare your results. In the medical domain reference data is commonly obtained via manual segmentation by an expert (don't forget to thank your clinical colleagues for their hard work). When you are resource limited, the reference data may be defined by a single expert. This is less than ideal. When multiple experts provide you with their input then you can potentially combine them to obtain reference data that is closer to the ever elusive \"ground truth\". In this notebook we show two approaches to combining input from multiple observers, majority vote and the Simultaneous Truth and Performance Level\n", 17 | "Estimation [(STAPLE)](https://www.ncbi.nlm.nih.gov/pubmed/15250643) algorithm.\n", 18 | "\n", 19 | "Segmentation Evaluation\n", 20 | "\n", 21 | "Once we have a reference, we compare the algorithm's performance using multiple criteria, as usually there is no single evaluation measure that conveys all of the relevant information. In this notebook we illustrate the use of the following evaluation criteria:\n", 22 | "* Overlap measures:\n", 23 | " * Jaccard and Dice coefficients \n", 24 | " * false negative and false positive errors\n", 25 | "* Surface distance measures:\n", 26 | " * Hausdorff distance (symmetric)\n", 27 | " * mean, median, max and standard deviation between surfaces\n", 28 | "* Volume measures:\n", 29 | " * volume similarity $ \\frac{2*(v1-v2)}{v1+v2}$\n", 30 | "\n", 31 | "The relevant criteria are task dependent, so you need to ask yourself whether you are interested in detecting spurious errors or not (mean or max surface distance), whether over/under segmentation should be differentiated (volume similarity and Dice or just Dice), and what is the ratio between acceptable errors and the size of the segmented object (Dice coefficient may be too sensitive to small errors when the segmented object is small and not sensitive enough to large errors when the segmented object is large).\n", 32 | "\n", 33 | "In the context of segmentation challenges, algorithm rankings are often based on a weighted combination of these criteria. These ranking schemes are not necessarily robust, as discussed in \"[Why rankings of biomedical image analysis competitions should be interpreted with care](https://www.nature.com/articles/s41467-018-07619-7)\", L. Maier-Hein et al.\n", 34 | "\n", 35 | "The data we use in the notebook is a set of manually segmented liver tumors from a single clinical CT scan. The relevant publication is: T. Popa et al., \"Tumor Volume Measurement and Volume Measurement Comparison Plug-ins for VolView Using ITK\", SPIE Medical Imaging: Visualization, Image-Guided Procedures, and Display, 2006.\n" 36 | ] 37 | }, 38 | { 39 | "cell_type": "code", 40 | "execution_count": null, 41 | "metadata": {}, 42 | "outputs": [], 43 | "source": [ 44 | "import SimpleITK as sitk\n", 45 | "\n", 46 | "import numpy as np\n", 47 | "\n", 48 | "from downloaddata import fetch_data as fdata\n", 49 | "\n", 50 | "%matplotlib inline\n", 51 | "import matplotlib.pyplot as plt\n", 52 | "import gui\n", 53 | "\n", 54 | "from ipywidgets import interact, fixed" 55 | ] 56 | }, 57 | { 58 | "cell_type": "markdown", 59 | "metadata": {}, 60 | "source": [ 61 | "## Utility method for display" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": null, 67 | "metadata": { 68 | "code_folding": [] 69 | }, 70 | "outputs": [], 71 | "source": [ 72 | "def display_with_overlay(\n", 73 | " segmentation_number, slice_number, image, segs, window_min, window_max\n", 74 | "):\n", 75 | " \"\"\"\n", 76 | " Display a CT slice with segmented contours overlaid onto it. The contours are the edges of\n", 77 | " the labeled regions.\n", 78 | " \"\"\"\n", 79 | " img = image[:, :, slice_number]\n", 80 | " msk = segs[segmentation_number][:, :, slice_number]\n", 81 | " overlay_img = sitk.LabelMapContourOverlay(\n", 82 | " sitk.Cast(msk, sitk.sitkLabelUInt8),\n", 83 | " sitk.Cast(\n", 84 | " sitk.IntensityWindowing(\n", 85 | " img, windowMinimum=window_min, windowMaximum=window_max\n", 86 | " ),\n", 87 | " sitk.sitkUInt8,\n", 88 | " ),\n", 89 | " opacity=1,\n", 90 | " contourThickness=[2, 2],\n", 91 | " )\n", 92 | " # We assume the original slice is isotropic, otherwise the display would be distorted\n", 93 | " plt.imshow(sitk.GetArrayViewFromImage(overlay_img))\n", 94 | " plt.axis(\"off\")\n", 95 | " plt.show()" 96 | ] 97 | }, 98 | { 99 | "cell_type": "markdown", 100 | "metadata": {}, 101 | "source": [ 102 | "## Fetch the data\n", 103 | "\n", 104 | "Retrieve a single CT scan and three manual delineations of a liver tumor. Visual inspection of the data highlights the variability between experts. " 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": null, 110 | "metadata": {}, 111 | "outputs": [], 112 | "source": [ 113 | "image = sitk.ReadImage(fdata(\"liverTumorSegmentations/Patient01Homo.mha\"))\n", 114 | "segmentation_file_names = [\n", 115 | " \"liverTumorSegmentations/Patient01Homo_Rad01.mha\",\n", 116 | " \"liverTumorSegmentations/Patient01Homo_Rad02.mha\",\n", 117 | " \"liverTumorSegmentations/Patient01Homo_Rad03.mha\",\n", 118 | "]\n", 119 | "\n", 120 | "segmentations = [\n", 121 | " sitk.ReadImage(fdata(file_name), sitk.sitkUInt8)\n", 122 | " for file_name in segmentation_file_names\n", 123 | "]\n", 124 | "\n", 125 | "interact(\n", 126 | " display_with_overlay,\n", 127 | " segmentation_number=(0, len(segmentations) - 1),\n", 128 | " slice_number=(0, image.GetSize()[2] - 1),\n", 129 | " image=fixed(image),\n", 130 | " segs=fixed(segmentations),\n", 131 | " window_min=fixed(-1024),\n", 132 | " window_max=fixed(976),\n", 133 | ");" 134 | ] 135 | }, 136 | { 137 | "cell_type": "markdown", 138 | "metadata": {}, 139 | "source": [ 140 | "## Derive a reference\n", 141 | "\n", 142 | "There are a variety of ways to derive a reference segmentation from multiple expert inputs (\"[A comparison of ground truth estimation methods](https://www.ncbi.nlm.nih.gov/pubmed/20033494)\", A. M. Biancardi, A. C. Jirapatnakul, A. P. Reeves).\n", 143 | "\n", 144 | "Two methods that are available in SimpleITK are [majority vote](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1LabelVotingImageFilter.html) and the STAPLE algorithm ([single label](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1STAPLEImageFilter.html) or [multi label](https://simpleitk.org/doxygen/latest/html/classitk_1_1simple_1_1MultiLabelSTAPLEImageFilter.html))." 145 | ] 146 | }, 147 | { 148 | "cell_type": "code", 149 | "execution_count": null, 150 | "metadata": {}, 151 | "outputs": [], 152 | "source": [ 153 | "# Use the STAPLE algorithm to obtain the reference segmentation. This implementation of the original algorithm\n", 154 | "# combines a single label from multiple segmentations, the label is user specified. The result of the\n", 155 | "# filter is the voxel's probability of belonging to the foreground. We then have to threshold the result to obtain\n", 156 | "# a reference binary segmentation.\n", 157 | "foregroundValue = 1\n", 158 | "threshold = 0.95\n", 159 | "reference_segmentation_STAPLE_probabilities = sitk.STAPLE(\n", 160 | " segmentations, foregroundValue\n", 161 | ")\n", 162 | "# We use the overloaded operator to perform thresholding, another option is to use the BinaryThreshold function.\n", 163 | "reference_segmentation = reference_segmentation_STAPLE_probabilities > threshold\n", 164 | "\n", 165 | "manual_plus_staple = list(segmentations)\n", 166 | "# Append the reference segmentation to the list of manual segmentations\n", 167 | "manual_plus_staple.append(reference_segmentation)\n", 168 | "\n", 169 | "interact(\n", 170 | " display_with_overlay,\n", 171 | " segmentation_number=(0, len(manual_plus_staple) - 1),\n", 172 | " slice_number=(0, image.GetSize()[2] - 1),\n", 173 | " image=fixed(image),\n", 174 | " segs=fixed(manual_plus_staple),\n", 175 | " window_min=fixed(-1024),\n", 176 | " window_max=fixed(976),\n", 177 | ");" 178 | ] 179 | }, 180 | { 181 | "cell_type": "markdown", 182 | "metadata": {}, 183 | "source": [ 184 | "## Evaluate segmentations using the reference\n", 185 | "\n", 186 | "Once we derive a reference from our experts input we can compare segmentation results to it.\n", 187 | "\n", 188 | "Note that in this notebook we compare the expert segmentations to the reference derived from them. This is not relevant for algorithm evaluation, but it can potentially be used to rank your experts.\n", 189 | "\n", 190 | "In this specific implementation we take advantage of the fact that we have a binary segmentation with 1 for foreground and 0 for background." 191 | ] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": null, 196 | "metadata": {}, 197 | "outputs": [], 198 | "source": [ 199 | "from enum import Enum\n", 200 | "\n", 201 | "\n", 202 | "# Use enumerations to represent the various evaluation measures\n", 203 | "class OverlapMeasures(Enum):\n", 204 | " jaccard, dice, volume_similarity, false_negative, false_positive = range(5)\n", 205 | "\n", 206 | "\n", 207 | "class SurfaceDistanceMeasures(Enum):\n", 208 | " (\n", 209 | " hausdorff_distance,\n", 210 | " mean_surface_distance,\n", 211 | " median_surface_distance,\n", 212 | " std_surface_distance,\n", 213 | " max_surface_distance,\n", 214 | " ) = range(5)\n", 215 | "\n", 216 | "\n", 217 | "# Empty numpy arrays to hold the results\n", 218 | "overlap_results = np.zeros(\n", 219 | " (len(segmentations), len(OverlapMeasures.__members__.items()))\n", 220 | ")\n", 221 | "surface_distance_results = np.zeros(\n", 222 | " (len(segmentations), len(SurfaceDistanceMeasures.__members__.items()))\n", 223 | ")\n", 224 | "\n", 225 | "# Compute the evaluation criteria\n", 226 | "\n", 227 | "# Note that for the overlap measures filter, because we are dealing with a single label we\n", 228 | "# use the combined, all labels, evaluation measures without passing a specific label to the methods.\n", 229 | "overlap_measures_filter = sitk.LabelOverlapMeasuresImageFilter()\n", 230 | "\n", 231 | "hausdorff_distance_filter = sitk.HausdorffDistanceImageFilter()\n", 232 | "\n", 233 | "# Use the absolute values of the distance map to compute the surface distances (distance map sign, outside or inside\n", 234 | "# relationship, is irrelevant)\n", 235 | "label = 1\n", 236 | "reference_distance_map = sitk.Abs(\n", 237 | " sitk.SignedMaurerDistanceMap(\n", 238 | " reference_segmentation, squaredDistance=False, useImageSpacing=True\n", 239 | " )\n", 240 | ")\n", 241 | "reference_surface = sitk.LabelContour(reference_segmentation)\n", 242 | "\n", 243 | "statistics_image_filter = sitk.StatisticsImageFilter()\n", 244 | "# Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 245 | "statistics_image_filter.Execute(reference_surface)\n", 246 | "num_reference_surface_pixels = int(statistics_image_filter.GetSum())\n", 247 | "\n", 248 | "for i, seg in enumerate(segmentations):\n", 249 | " # Overlap measures\n", 250 | " overlap_measures_filter.Execute(reference_segmentation, seg)\n", 251 | " overlap_results[i, OverlapMeasures.jaccard.value] = (\n", 252 | " overlap_measures_filter.GetJaccardCoefficient()\n", 253 | " )\n", 254 | " overlap_results[i, OverlapMeasures.dice.value] = (\n", 255 | " overlap_measures_filter.GetDiceCoefficient()\n", 256 | " )\n", 257 | " overlap_results[i, OverlapMeasures.volume_similarity.value] = (\n", 258 | " overlap_measures_filter.GetVolumeSimilarity()\n", 259 | " )\n", 260 | " overlap_results[i, OverlapMeasures.false_negative.value] = (\n", 261 | " overlap_measures_filter.GetFalseNegativeError()\n", 262 | " )\n", 263 | " overlap_results[i, OverlapMeasures.false_positive.value] = (\n", 264 | " overlap_measures_filter.GetFalsePositiveError()\n", 265 | " )\n", 266 | " # Hausdorff distance\n", 267 | " hausdorff_distance_filter.Execute(reference_segmentation, seg)\n", 268 | "\n", 269 | " surface_distance_results[i, SurfaceDistanceMeasures.hausdorff_distance.value] = (\n", 270 | " hausdorff_distance_filter.GetHausdorffDistance()\n", 271 | " )\n", 272 | " # Symmetric surface distance measures\n", 273 | " segmented_distance_map = sitk.Abs(\n", 274 | " sitk.SignedMaurerDistanceMap(seg, squaredDistance=False, useImageSpacing=True)\n", 275 | " )\n", 276 | " segmented_surface = sitk.LabelContour(seg)\n", 277 | "\n", 278 | " # Multiply the binary surface segmentations with the distance maps. The resulting distance\n", 279 | " # maps contain non-zero values only on the surface (they can also contain zero on the surface)\n", 280 | " seg2ref_distance_map = reference_distance_map * sitk.Cast(\n", 281 | " segmented_surface, sitk.sitkFloat32\n", 282 | " )\n", 283 | " ref2seg_distance_map = segmented_distance_map * sitk.Cast(\n", 284 | " reference_surface, sitk.sitkFloat32\n", 285 | " )\n", 286 | "\n", 287 | " # Get the number of pixels in the reference surface by counting all pixels that are 1.\n", 288 | " statistics_image_filter.Execute(segmented_surface)\n", 289 | " num_segmented_surface_pixels = int(statistics_image_filter.GetSum())\n", 290 | "\n", 291 | " # Get all non-zero distances and then add zero distances if required.\n", 292 | " seg2ref_distance_map_arr = sitk.GetArrayViewFromImage(seg2ref_distance_map)\n", 293 | " seg2ref_distances = list(seg2ref_distance_map_arr[seg2ref_distance_map_arr != 0])\n", 294 | " seg2ref_distances = seg2ref_distances + list(\n", 295 | " np.zeros(num_segmented_surface_pixels - len(seg2ref_distances))\n", 296 | " )\n", 297 | " ref2seg_distance_map_arr = sitk.GetArrayViewFromImage(ref2seg_distance_map)\n", 298 | " ref2seg_distances = list(ref2seg_distance_map_arr[ref2seg_distance_map_arr != 0])\n", 299 | " ref2seg_distances = ref2seg_distances + list(\n", 300 | " np.zeros(num_reference_surface_pixels - len(ref2seg_distances))\n", 301 | " )\n", 302 | "\n", 303 | " all_surface_distances = seg2ref_distances + ref2seg_distances\n", 304 | "\n", 305 | " # The maximum of the symmetric surface distances is the Hausdorff distance between the surfaces. In\n", 306 | " # general, it is not equal to the Hausdorff distance between all voxel/pixel points of the two\n", 307 | " # segmentations, though in our case it is. More on this below.\n", 308 | " surface_distance_results[i, SurfaceDistanceMeasures.mean_surface_distance.value] = (\n", 309 | " np.mean(all_surface_distances)\n", 310 | " )\n", 311 | " surface_distance_results[\n", 312 | " i, SurfaceDistanceMeasures.median_surface_distance.value\n", 313 | " ] = np.median(all_surface_distances)\n", 314 | " surface_distance_results[i, SurfaceDistanceMeasures.std_surface_distance.value] = (\n", 315 | " np.std(all_surface_distances)\n", 316 | " )\n", 317 | " surface_distance_results[i, SurfaceDistanceMeasures.max_surface_distance.value] = (\n", 318 | " np.max(all_surface_distances)\n", 319 | " )\n", 320 | "\n", 321 | "# Print the matrices\n", 322 | "np.set_printoptions(precision=3)\n", 323 | "print(overlap_results)\n", 324 | "print(surface_distance_results)" 325 | ] 326 | }, 327 | { 328 | "cell_type": "markdown", 329 | "metadata": {}, 330 | "source": [ 331 | "## Improved output\n", 332 | "\n", 333 | "Using the [pandas](http://pandas.pydata.org/) package we can easily produce high quality output. " 334 | ] 335 | }, 336 | { 337 | "cell_type": "code", 338 | "execution_count": null, 339 | "metadata": {}, 340 | "outputs": [], 341 | "source": [ 342 | "import pandas as pd\n", 343 | "from IPython.display import display, HTML\n", 344 | "\n", 345 | "# Graft our results matrix into pandas data frames\n", 346 | "overlap_results_df = pd.DataFrame(\n", 347 | " data=overlap_results,\n", 348 | " index=list(range(len(segmentations))),\n", 349 | " columns=[name for name, _ in OverlapMeasures.__members__.items()],\n", 350 | ")\n", 351 | "surface_distance_results_df = pd.DataFrame(\n", 352 | " data=surface_distance_results,\n", 353 | " index=list(range(len(segmentations))),\n", 354 | " columns=[name for name, _ in SurfaceDistanceMeasures.__members__.items()],\n", 355 | ")\n", 356 | "\n", 357 | "# Display the data as HTML tables and graphs\n", 358 | "display(HTML(overlap_results_df.to_html(float_format=lambda x: \"%.3f\" % x)))\n", 359 | "display(HTML(surface_distance_results_df.to_html(float_format=lambda x: \"%.3f\" % x)))\n", 360 | "overlap_results_df.plot(kind=\"bar\").legend(bbox_to_anchor=(1.6, 0.9))\n", 361 | "surface_distance_results_df.plot(kind=\"bar\").legend(bbox_to_anchor=(1.6, 0.9))" 362 | ] 363 | }, 364 | { 365 | "cell_type": "markdown", 366 | "metadata": {}, 367 | "source": [ 368 | "You can also export the data as a table for your LaTeX manuscript using the [to_latex](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.to_latex.html) function.\n", 369 | "Note: You will need to add the \\usepackage{booktabs} to your LaTeX document's preamble. \n", 370 | "\n", 371 | "To create the minimal LaTeX document which will allow you to see the difference between the tables below, copy paste:\n", 372 | "\n", 373 | "\\documentclass{article}\n", 374 | "\n", 375 | "\\usepackage{booktabs}\n", 376 | "\n", 377 | "\\begin{document}\n", 378 | "\n", 379 | "paste the tables here\n", 380 | "\n", 381 | "\\end{document}\n", 382 | "\n" 383 | ] 384 | }, 385 | { 386 | "cell_type": "code", 387 | "execution_count": null, 388 | "metadata": {}, 389 | "outputs": [], 390 | "source": [ 391 | "# The formatting of the table using the default settings is less than ideal\n", 392 | "print(overlap_results_df.to_latex())\n", 393 | "\n", 394 | "# We can improve on this by specifying the table's column format and the float format\n", 395 | "print(\n", 396 | " overlap_results_df.to_latex(\n", 397 | " column_format=\"ccccccc\", float_format=lambda x: \"%.3f\" % x\n", 398 | " )\n", 399 | ")" 400 | ] 401 | }, 402 | { 403 | "cell_type": "markdown", 404 | "metadata": {}, 405 | "source": [ 406 | "## Visual Diff\n", 407 | "\n", 408 | "It is always nice to have a figure with a visual display of the difference between the segmentation and ground truth." 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "metadata": { 415 | "simpleitk_error_allowed": "Exception thrown in SimpleITK Show:" 416 | }, 417 | "outputs": [], 418 | "source": [ 419 | "# Use the first segmentation\n", 420 | "segmentation = segmentations[0]\n", 421 | "\n", 422 | "# Save ink, the differences will be in black and background is white\n", 423 | "segmentation_diff = (segmentation == reference_segmentation) * 255\n", 424 | "\n", 425 | "# Flatten for 2D presentation, create a montage from the volume\n", 426 | "num_slices = segmentation_diff.GetDepth()\n", 427 | "tile_w = int(np.sqrt(num_slices))\n", 428 | "tile_h = int(np.ceil(num_slices / tile_w))\n", 429 | "default_background_color = 255\n", 430 | "tile_image = sitk.Tile(\n", 431 | " [segmentation_diff[:, :, i] for i in range(num_slices)],\n", 432 | " (tile_w, tile_h),\n", 433 | " default_background_color,\n", 434 | ")\n", 435 | "sitk.Show(tile_image)" 436 | ] 437 | }, 438 | { 439 | "cell_type": "markdown", 440 | "metadata": {}, 441 | "source": [ 442 | "

Next »

" 443 | ] 444 | } 445 | ], 446 | "metadata": { 447 | "kernelspec": { 448 | "display_name": "Python 3 (ipykernel)", 449 | "language": "python", 450 | "name": "python3" 451 | }, 452 | "language_info": { 453 | "codemirror_mode": { 454 | "name": "ipython", 455 | "version": 3 456 | }, 457 | "file_extension": ".py", 458 | "mimetype": "text/x-python", 459 | "name": "python", 460 | "nbconvert_exporter": "python", 461 | "pygments_lexer": "ipython3", 462 | "version": "3.9.13" 463 | } 464 | }, 465 | "nbformat": 4, 466 | "nbformat_minor": 2 467 | } 468 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Notebook Testing](https://github.com/SimpleITK/TUTORIAL/actions/workflows/main.yml/badge.svg?branch=main)](https://github.com/SimpleITK/TUTORIAL/actions/workflows/main.yml)           [![Tutorial Website](https://img.shields.io/badge/tutorial-website-brightgreen)](https://simpleitk.org/TUTORIAL/)          [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/SimpleITK/TUTORIAL/main?filepath=01_spatial_transformations.ipynb)          [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) 2 | 3 | # Welcome to SimpleITK! 4 | 5 | 6 | This repository contains the code used in the SimpleITK tutorial. 7 | 8 | If you are looking for the quickest way to become a proficient user of SimpleITK we highly recommend that you complete this tutorial. It includes several hours of instructional material, and is well worth the time investment. So, [go to the website](https://simpleitk.org/TUTORIAL/) and start your journey to Insight. 9 | 10 |       Enjoy
11 | The SimpleITK Team 12 | -------------------------------------------------------------------------------- /binder/requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter 2 | matplotlib 3 | ipywidgets 4 | itkwidgets 5 | numpy 6 | scipy 7 | pandas 8 | multiprocess 9 | SimpleITK>=2.2.0 10 | 11 | 12 | -------------------------------------------------------------------------------- /binder/runtime.txt: -------------------------------------------------------------------------------- 1 | python-3.9 2 | -------------------------------------------------------------------------------- /characterize_data.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import pandas as pd 3 | import numpy as np 4 | import os 5 | import sys 6 | import shutil 7 | import subprocess 8 | import platform 9 | 10 | # We use the multiprocess package instead of the official 11 | # multiprocessing as it currently has several issues as discussed 12 | # on the software carpentry page: https://hpc-carpentry.github.io/hpc-python/06-parallel/ 13 | import multiprocess as mp 14 | from functools import partial 15 | import argparse 16 | 17 | import hashlib 18 | import tempfile 19 | 20 | # Maximal number of parallel processes we run. 21 | MAX_PROCESSES = 15 22 | 23 | """ 24 | This script inspects/charachterizes images in a given directory structure. It 25 | recuresivly traverses the directories and either inspects the files one by one 26 | or if in DICOM series inspection mode, inspects the data on a per series basis 27 | (all 2D series files combined into a single 3D image). 28 | 29 | To run the script one needs to specify: 30 | 1. Root of the data directory. 31 | 2. Output file name. 32 | 3. The analysis type to perform per_file or per_series. The latter indicates 33 | we are only interested in DICOM files. When run using per_file empty lines 34 | in the results file are due to: 35 | a. The file is not an image or is a corrupt image file. 36 | b. SimpleITK was unable to read the image file (contact us with an example). 37 | 4. Optional SimpleITK imageIO to use. The default value is 38 | the empty string, indicating that all file types should be read. 39 | To see the set of ImageIO types supported by your version of SimpleITK, 40 | call ImageFileReader::GetRegisteredImageIOs() or simply print an 41 | ImageFileReader object. 42 | 5. Optional exteranl applications to run. Their return value (zero or 43 | non zero) is used to log success or failure. A nice example is the 44 | dciodvfy program from David Clunie (https://www.dclunie.com/dicom3tools.html) 45 | which validates compliance with the DICOM standard. 46 | 6. When the external applications are provided corrosponding column headings 47 | are also required. These are used in the output csv file. 48 | 7. Optional metadata keys. These are image specific keys such as DICOM tags 49 | or other metadata tags that may be found in the image. The content of the 50 | tags is written to the result file. 51 | 8. When the metadata tags are provided corrosponding column headings 52 | are also required. These are used in the output csv file. 53 | 54 | Examples: 55 | Run a generic file analysis: 56 | python characterize_data.py data/ output/generic_image_data_report.csv per_file \ 57 | --imageIO "" --external_applications ./dciodvfy --external_applications_headings "DICOM Compliant" \ 58 | --metadata_keys "0008|0060" "0018|5101" --metadata_keys_headings "modality" "radiographic view" 59 | 60 | 61 | Run a DICOM series based analysis: 62 | python characterize_data.py data/ output/DICOM_image_data_report.csv per_series \ 63 | --metadata_keys "0008|0060" "0018|5101" --metadata_keys_headings "modality" "radiographic view" 64 | """ 65 | 66 | 67 | def inspect_image(sitk_image, image_info, current_index, meta_data_keys=[]): 68 | """ 69 | Inspect a SimpleITK image, return a list of parameters characterizing the image. 70 | 71 | Parameters 72 | ---------- 73 | sitk_image (SimpleITK.Image): Input image for inspection. 74 | image_info (list): Image information is written to this list, starting at current_index. 75 | [,,,MD5 intensity hash, 76 | image size, image spacing, image origin, axis direction, 77 | pixel type, min intensity, max intensity, 78 | meta data_1...meta_data_n,,,] 79 | current_index (int): Starting index into the image_info list. 80 | meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to 81 | inspect. 82 | Returns 83 | ------- 84 | index to the next empty entry in the image_info list. 85 | The image_info list is filled with the following values: 86 | MD5 intensity hash - Enable identification of duplicate images in terms of intensity. 87 | This is different from SimpleITK image equality where the 88 | same intensities with different image spacing/origin/direction cosine 89 | are considered different images as they occupy a different spatial 90 | region. 91 | image size - number of pixels in each dimension. 92 | pixel type - type of pixels (scalar - gray, vector - gray or color). 93 | min/max intensity - if a scalar image, min and max values. 94 | meta data_i - value of image's metadata dictionary for given key (e.g. . 95 | """ 96 | image_info[current_index] = hashlib.md5( 97 | sitk.GetArrayViewFromImage(sitk_image) 98 | ).hexdigest() 99 | current_index = current_index + 1 100 | image_info[current_index] = sitk_image.GetSize() 101 | current_index = current_index + 1 102 | image_info[current_index] = sitk_image.GetSpacing() 103 | current_index = current_index + 1 104 | image_info[current_index] = sitk_image.GetOrigin() 105 | current_index = current_index + 1 106 | image_info[current_index] = sitk_image.GetDirection() 107 | current_index = current_index + 1 108 | if ( 109 | sitk_image.GetNumberOfComponentsPerPixel() == 1 110 | ): # greyscale image, get the min/max pixel values 111 | image_info[current_index] = sitk_image.GetPixelIDTypeAsString() + " gray" 112 | current_index = current_index + 1 113 | mmfilter = sitk.MinimumMaximumImageFilter() 114 | mmfilter.Execute(sitk_image) 115 | image_info[current_index] = mmfilter.GetMinimum() 116 | current_index = current_index + 1 117 | image_info[current_index] = mmfilter.GetMaximum() 118 | current_index = current_index + 1 119 | else: # either a color image or a greyscale image masquerading as a color one 120 | pixel_type = sitk_image.GetPixelIDTypeAsString() 121 | channels = [ 122 | sitk.GetArrayFromImage(sitk.VectorIndexSelectionCast(sitk_image, i)) 123 | for i in range(sitk_image.GetNumberOfComponentsPerPixel()) 124 | ] 125 | if np.array_equal(channels[0], channels[1]) and np.array_equal( 126 | channels[0], channels[2] 127 | ): 128 | pixel_type = pixel_type + " {0} channels gray".format( 129 | sitk_image.GetNumberOfComponentsPerPixel() 130 | ) 131 | else: 132 | pixel_type = pixel_type + " {0} channels color".format( 133 | sitk_image.GetNumberOfComponentsPerPixel() 134 | ) 135 | image_info[current_index] = pixel_type 136 | current_index = current_index + 3 137 | img_keys = sitk_image.GetMetaDataKeys() 138 | for k in meta_data_keys: 139 | if k in img_keys: 140 | image_info[current_index] = sitk_image.GetMetaData(k) 141 | current_index = current_index + 1 142 | return current_index 143 | 144 | 145 | def inspect_single_file(file_name, imageIO="", meta_data_keys=[], external_programs=[]): 146 | """ 147 | Inspect a file using the specified imageIO, returning a list with the relevant information. 148 | 149 | Parameters 150 | ---------- 151 | file_name (str): Image file name. 152 | imageIO (str): Name of image IO to use. To see the list of registered image IOs use the 153 | ImageFileReader::GetRegisteredImageIOs() or print an ImageFileReader. 154 | The empty string indicates to read all file formats supported by SimpleITK. 155 | meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to 156 | inspect. 157 | external_programs(list(str)): A list of programs we will run with the file_name as input 158 | the return value 'succeeded' or 'failed' is recorded. This is useful 159 | for example if you need to validate conformance to a standard 160 | such as DICOM. 161 | 162 | Returns 163 | ------- 164 | list with the following entries: [file name, MD5 intensity hash, 165 | image size, image spacing, image origin, axis direction, 166 | pixel type, min intensity, max intensity, 167 | meta data_1...meta_data_n, 168 | external_program_res_1...external_program_res_m] 169 | If the given file is not readable by SimpleITK, the only meaningful entry in the list 170 | will be the file name (all other values will be either None or NaN). 171 | """ 172 | file_info = [None] * (9 + len(meta_data_keys) + len(external_programs)) 173 | file_info[0] = file_name 174 | current_index = 1 175 | try: 176 | reader = sitk.ImageFileReader() 177 | reader.SetImageIO(imageIO) 178 | reader.SetFileName(file_name) 179 | img = reader.Execute() 180 | current_index = inspect_image(img, file_info, current_index, meta_data_keys) 181 | for p in external_programs: 182 | try: 183 | # run the external programs, check the return value, and capture all output so it 184 | # doesn't appear on screen. The CalledProcessError exception is raised if the 185 | # external program fails (returns non zero value). 186 | subprocess.run([p, file_name], check=True, capture_output=True) 187 | file_info[current_index] = "succeeded" 188 | except: 189 | file_info[current_index] = "failed" 190 | current_index = current_index + 1 191 | except: 192 | pass 193 | return file_info 194 | 195 | 196 | def inspect_files( 197 | root_dir, 198 | imageIO="", 199 | meta_data_keys=[], 200 | external_programs=[], 201 | additional_column_names=[], 202 | ): 203 | """ 204 | Iterate over a directory structure and return a pandas dataframe with the relevant information for the 205 | image files. This also includes non image files. The resulting dataframe will only include the file name 206 | if that file wasn't successfuly read by SimpleITK. The two reasons for failure are: (1) the user specified 207 | imageIO isn't compatible with the file format (user is only interested in reading jpg and the file 208 | format is mha) or (2) the file could not be read by the SimpleITK IO (corrupt file or unexpected limitation of 209 | SimpleITK). 210 | 211 | Parameters 212 | ---------- 213 | root_dir (str): Path to the root of the data directory. Traverse the directory structure 214 | and inspect every file (also report non image files, in which 215 | case the only valid entry will be the file name). 216 | imageIO (str): Name of image IO to use. To see the list of registered image IOs use the 217 | ImageFileReader::GetRegisteredImageIOs() or print an ImageFileReader. 218 | The empty string indicates to read all file formats supported by SimpleITK. 219 | meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to 220 | inspect. 221 | external_programs(list(str)): A list of programs we will run with the file_name as input 222 | the return value 'succeeded' or 'failed' is recorded. This 223 | is useful for example if you need to validate conformance 224 | to a standard such as DICOM. 225 | additional_column_names (list(str)): Column names corrosponding to the contents of the 226 | meta_data_keys and external_programs lists. 227 | Returns 228 | ------- 229 | pandas DataFrame: Each row in the data frame corresponds to a single file. 230 | 231 | """ 232 | if len(meta_data_keys) + len(external_programs) != len(additional_column_names): 233 | raise ValueError("Number of additional column names does not match expected.") 234 | column_names = [ 235 | "file name", 236 | "MD5 intensity hash", 237 | "image size", 238 | "image spacing", 239 | "image origin", 240 | "axis direction", 241 | "pixel type", 242 | "min intensity", 243 | "max intensity", 244 | ] + additional_column_names 245 | all_file_names = [] 246 | for dir_name, subdir_names, file_names in os.walk(root_dir): 247 | all_file_names += [ 248 | os.path.join(os.path.abspath(dir_name), fname) for fname in file_names 249 | ] 250 | # Get list of lists describing the results and then combine into a dataframe, faster 251 | # than appending to the dataframe one by one. Use parallel processing to speed things up. 252 | if platform.system() == "Windows": 253 | res = map( 254 | partial( 255 | inspect_single_file, 256 | imageIO=imageIO, 257 | meta_data_keys=meta_data_keys, 258 | external_programs=external_programs, 259 | ), 260 | all_file_names, 261 | ) 262 | else: 263 | with mp.Pool(processes=MAX_PROCESSES) as pool: 264 | res = pool.map( 265 | partial( 266 | inspect_single_file, 267 | imageIO=imageIO, 268 | meta_data_keys=meta_data_keys, 269 | external_programs=external_programs, 270 | ), 271 | all_file_names, 272 | ) 273 | return pd.DataFrame(res, columns=column_names) 274 | 275 | 276 | def inspect_single_series(series_data, meta_data_keys=[]): 277 | """ 278 | Inspect a single DICOM series (DICOM heirarchy of patient-study-series-image). 279 | This can be a single file, or multiple files such as a CT or 280 | MR volume. 281 | 282 | Parameters 283 | ---------- 284 | series_data (two entry tuple): First entry is study:series, second entry is the list of 285 | files comprising this series. 286 | meta_data_keys(list(str)): The image's meta-data dictionary keys whose value we want to 287 | inspect. 288 | Returns 289 | ------- 290 | list with the following entries: [study:series, MD5 intensity hash, 291 | image size, image spacing, image origin, axis direction, 292 | pixel type, min intensity, max intensity, 293 | meta data_1...meta_data_n] 294 | """ 295 | series_info = [None] * (9 + len(meta_data_keys)) 296 | series_info[0] = series_data[1] 297 | current_index = 1 298 | try: 299 | reader = sitk.ImageSeriesReader() 300 | reader.MetaDataDictionaryArrayUpdateOn() 301 | reader.LoadPrivateTagsOn() 302 | _, sid = series_data[0].split(":") 303 | file_names = series_data[1] 304 | # As the files comprising a series with multiple files can reside in 305 | # separate directories and SimpleITK expects them to be in a single directory 306 | # we use a tempdir and symbolic links to enable SimpleITK to read the series as 307 | # a single image. Additionally the files are renamed as they may have resided in 308 | # separate directories with the same file name. Finally, unfortunately on windows 309 | # we copy the files to the tempdir as the os.symlink documentation says that 310 | # "On newer versions of Windows 10, unprivileged accounts can create symlinks 311 | # if Developer Mode is enabled. When Developer Mode is not available/enabled, 312 | # the SeCreateSymbolicLinkPrivilege privilege is required, or the process must be 313 | # run as an administrator." 314 | with tempfile.TemporaryDirectory() as tmpdirname: 315 | if platform.system() == "Windows": 316 | for i, fname in enumerate(file_names): 317 | shutil.copy( 318 | os.path.abspath(fname), os.path.join(tmpdirname, str(i)) 319 | ) 320 | else: 321 | for i, fname in enumerate(file_names): 322 | os.symlink(os.path.abspath(fname), os.path.join(tmpdirname, str(i))) 323 | reader.SetFileNames( 324 | sitk.ImageSeriesReader_GetGDCMSeriesFileNames(tmpdirname, sid) 325 | ) 326 | img = reader.Execute() 327 | for k in meta_data_keys: 328 | if reader.HasMetaDataKey(0, k): 329 | img.SetMetaData(k, reader.GetMetaData(0, k)) 330 | inspect_image(img, series_info, current_index, meta_data_keys) 331 | except: 332 | pass 333 | return series_info 334 | 335 | 336 | def inspect_series(root_dir, meta_data_keys=[], additional_column_names=[]): 337 | """ 338 | Inspect all series found in the directory structure. A series does not have to 339 | be in a single directory (the files are located in the subtree and combined 340 | into a single image). 341 | 342 | Parameters 343 | ---------- 344 | root_dir (str): Path to the root of the data directory. Traverse the directory structure 345 | and inspect every series. If the series is comprised of multiple image files 346 | they do not have to be in the same directory. The only expectation is that all 347 | images from the series are under the root_dir. 348 | meta_data_keys(list(str)): The series meta-data dictionary keys whose value we want to 349 | inspect. 350 | additional_column_names (list(str)): Column names corrosponding to the contents of the 351 | meta_data_keys list. 352 | Returns 353 | ------- 354 | pandas DataFrame: Each row in the data frame corresponds to a single file. 355 | """ 356 | if len(meta_data_keys) != len(additional_column_names): 357 | raise ValueError("Number of additional column names does not match expected.") 358 | column_names = [ 359 | "files", 360 | "MD5 intensity hash", 361 | "image size", 362 | "image spacing", 363 | "image origin", 364 | "axis direction", 365 | "pixel type", 366 | "min intensity", 367 | "max intensity", 368 | ] + additional_column_names 369 | all_series_files = {} 370 | reader = sitk.ImageFileReader() 371 | # collect the file names of all series into a dictionary with the key being 372 | # study:series. 373 | for dir_name, subdir_names, file_names in os.walk(root_dir): 374 | sids = sitk.ImageSeriesReader_GetGDCMSeriesIDs(dir_name) 375 | for sid in sids: 376 | file_names = sitk.ImageSeriesReader_GetGDCMSeriesFileNames(dir_name, sid) 377 | reader.SetFileName(file_names[0]) 378 | reader.ReadImageInformation() 379 | study = reader.GetMetaData("0020|000d") 380 | key = "{0}:{1}".format(study, sid) 381 | if key in all_series_files: 382 | all_series_files[key].extend(file_names) 383 | else: 384 | all_series_files[key] = list(file_names) 385 | # Get list of lists describing the results and then combine into a dataframe, faster 386 | # than appending to the dataframe one by one. 387 | res = [ 388 | inspect_single_series(series_data, meta_data_keys) 389 | for series_data in all_series_files.items() 390 | ] 391 | return pd.DataFrame(res, columns=column_names) 392 | 393 | 394 | def main(argv=None): 395 | parser = argparse.ArgumentParser() 396 | parser.add_argument( 397 | "root_of_data_directory", help="path to the topmost directory containing data" 398 | ) 399 | parser.add_argument("output_file", help="output csv file path") 400 | parser.add_argument( 401 | "analysis_type", 402 | default="per_file", 403 | help='type of analysis, "per_file" or "per_series"', 404 | ) 405 | parser.add_argument( 406 | "--imageIO", 407 | default="", 408 | help="SimpleITK imageIO to use for reading (e.g. BMPImageIO)", 409 | ) 410 | parser.add_argument( 411 | "--external_applications", 412 | default=[], 413 | nargs="*", 414 | help="paths to external applications", 415 | ) 416 | parser.add_argument( 417 | "--external_applications_headings", 418 | default=[], 419 | nargs="*", 420 | help="titles of the results columns for external applications", 421 | ) 422 | parser.add_argument( 423 | "--metadata_keys", 424 | nargs="*", 425 | default=[], 426 | help="inspect values of these metadata keys (DICOM tags or other keys stored in the file)", 427 | ) 428 | parser.add_argument( 429 | "--metadata_keys_headings", 430 | default=[], 431 | nargs="*", 432 | help="titles of the results columns for the metadata_keys", 433 | ) 434 | 435 | args = parser.parse_args(argv) 436 | if len(args.external_applications) != len(args.external_applications_headings): 437 | print("Number of external applications and their headings do not match.") 438 | sys.exit(1) 439 | if len(args.metadata_keys) != len(args.metadata_keys_headings): 440 | print("Number of metadata keys and their headings do not match.") 441 | sys.exit(1) 442 | if args.analysis_type not in ["per_file", "per_series"]: 443 | print("Unexpected analysis type.") 444 | sys.exit(1) 445 | 446 | if args.analysis_type == "per_file": 447 | df = inspect_files( 448 | args.root_of_data_directory, 449 | imageIO=args.imageIO, 450 | meta_data_keys=args.metadata_keys, 451 | external_programs=args.external_applications, 452 | additional_column_names=args.metadata_keys_headings 453 | + args.external_applications_headings, 454 | ) 455 | df.to_csv(args.output_file, index=False) 456 | sys.exit(0) 457 | if args.analysis_type == "per_series": 458 | df = inspect_series( 459 | args.root_of_data_directory, 460 | meta_data_keys=args.metadata_keys, 461 | additional_column_names=args.metadata_keys_headings, 462 | ) 463 | df.to_csv(args.output_file, index=False) 464 | sys.exit(0) 465 | 466 | 467 | if __name__ == "__main__": 468 | sys.exit(main()) 469 | -------------------------------------------------------------------------------- /data/manifest.json: -------------------------------------------------------------------------------- 1 | { 2 | "SimpleITK.jpg" : { 3 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 4 | }, 5 | "CIRS057A_MR_CT_DICOM/readme.txt" : { 6 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef", 7 | "archive" : "true" 8 | }, 9 | "training_001_ct.mha" : { 10 | "sha512": "1b950bc42fddfcefc76b9203d5dd6c45960c4fa8dcb69b839d3d083270d3d4c9a9d378de3d3f914e432dc18fb44c9b9770d4db5580a70265f3e24e6cdb83015d" 11 | }, 12 | "training_001_mr_T1.mha" : { 13 | "sha512": "3d15477962fef5851207964c381ffe77c586a6f70f2a373ecd3b6b4dc50d51dc6cd893eb1bedabcd382a96f0dafac893ae9e5a7c2b7333f9ff3f0c6b7016c7bc" 14 | }, 15 | "POPI/meta/00-P.mhd" : { 16 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc", 17 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 18 | "archive" : "true" 19 | }, 20 | "POPI/meta/70-P.mhd" : { 21 | "sha512": "87c256ff441429babceab5f9886397f7c4b4f85525dfb5a786ed64b97f4779d3b313b3faf1449dddb7ba5ed49719ff0eea296a3367cdc98e753f597028a6f0e0", 22 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/70-MetaImage.tar", 23 | "archive" : "true" 24 | }, 25 | "POPI/landmarks/00-Landmarks.pts" : { 26 | "sha512": "7c2120b1f6d4b855aa11bf05dd987d677c219ca4bdfbd39234e7835285c45082c229fb5cc09e00e6bd91b339eeb1f700c597f4166633421a133c21ce773b25ad", 27 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/00-Landmarks.pts" 28 | }, 29 | "POPI/landmarks/70-Landmarks.pts" : { 30 | "sha512": "5bbcb192a275b30510fb1badcd12c9110ed7909d4353c76567ebb0aae61fb944a9c4f3d8cd8ffa0519d8621626d06db333c456eda9c68c3a03991e291760f44c", 31 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Landmarks/70-Landmarks.pts" 32 | }, 33 | "POPI/masks/00-air-body-lungs.mhd" : { 34 | "sha512": "e20e93b316390ea53c59427a8ab770bb3ebda1f2e4c911382b753ec024d812de8a6c13d1919b77a1687c4f611acdb62ea92c05b2cc3ed065046fbdbe139538c8", 35 | "url" : "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/00Mask-MetaImage.tar", 36 | "archive" : "true" 37 | }, 38 | "POPI/masks/70-air-body-lungs.mhd" : { 39 | "sha512": "cbbd4b71b9771b36dc71fe6c564c96fde363878713680a624b9b307c4d9959835731c841be6b9304457d212350cc0ffac44385994b9bc9b2d8523e2463c664f8", 40 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Masks/70Mask-MetaImage.tar", 41 | "archive" : "true" 42 | }, 43 | "fib_sem_bacillus_subtilis.mha": { 44 | "sha512": "5f7c34428362434c4ff3353307f8401ea38a18a68e9fc1705138232b4c70da2fcf3e2e8560ba917620578093edb392b418702edca3be0eafa23d6f52ced73314" 45 | }, 46 | "leg_panorama/readme.txt": { 47 | "archive": "true", 48 | "sha512":"0771b63d7f8ed19d16ca36de144d6570cc3f8d604be56626ceb932f6bbf60857282f52aad4158f85e8a01bb1c84222da5b23fd3df91ec46ebe625341f56d6bf9" 49 | }, 50 | "liverTumorSegmentations/Patient01Homo.mha": { 51 | "sha512": "c57e6c51bdd9dd46034df3c01e3187d884526dbcfcf8e056221205bac1a09098142692a1bc76f3834a78a809570e64544dbec9b9d264747383ee71e20b21d054" 52 | }, 53 | "liverTumorSegmentations/Patient01Homo_Rad01.mha": { 54 | "sha512": "e94fb4d96e5cc5dca3c68fc67f63e895b8a71011f5343b4399e122b8f6a43ec5d5055f939299e3d9955e59cd841ebeb2d2395568c10ce29a597c518db784a337" 55 | }, 56 | "liverTumorSegmentations/Patient01Homo_Rad02.mha": { 57 | "sha512": "e055aff99a1c05ab90b84af048dd94d32236dcb4e4b8ce0a99ba3658fe85cc7c8505b806a92611bcf5ecf4cd0dbe6cafc336efdb9fe49753d1fc4aed174ed8ba" 58 | }, 59 | "liverTumorSegmentations/Patient01Homo_Rad03.mha": { 60 | "sha512": "89e4040e17aba2fae50e0b59b2043203ab33ce3ae6ef90af8ebc8c032a6aaee35084bf1e34ce1a390d157b8fadae2fa7694203a0886f54cc9da5293dbaa5d0e7" 61 | } 62 | } 63 | -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | SimpleITK Tutorial 4 | 5 | 6 | 7 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 56 | 60 | 79 | 80 | 81 | 82 | 83 | 291 | 292 |
84 | 85 |

86 |

87 | 88 | 89 | 90 |

91 | If you encounter problems or have tutorial specific questions, please post on 92 | the tutorial's GitHub issue 93 | reporting system (requires a GitHub user account). For general SimpleITK questions, please 94 | use the ITK discourse forum. 95 |

96 |

Overview

97 | 98 | 99 |

100 | SimpleITK is a simplified programming 101 | interface to the algorithms and data 102 | structures of the Insight Toolkit (ITK) for 104 | segmentation, registration and 105 | advanced image analysis. It supports bindings for multiple programming languages 106 | including C++, Python, R, Java, C#, Lua, Ruby and TCL. Combining SimpleITK’s 107 | Python bindings with the Jupyter 108 | notebook web application creates an environment 109 | which facilitates collaborative development of biomedical image analysis 110 | workflows. 111 |

112 | 113 |

114 | In this tutorial, we use a hands-on approach utilizing Python and Jupyter notebooks to 115 | explore and experiment with various SimpleITK features. You can browse the Jupyter notebooks on 116 | your own, watch the videos associated with these notebooks or work your way through the notebooks 117 | following along with the videos. 118 |

119 | 120 |

121 | Additional details and notebooks can be found on the main SimpleITK 123 | notebooks repository. 124 |

125 | 126 | 127 |

Setup

128 | 129 | 130 | 131 |

132 | In this tutorial we will use the Anaconda Python distribution. Please follow the 133 | instructions below to setup the environment. All 134 | commands below are issued on the command line (Linux/Mac - terminal, 135 | Windows - Anaconda Prompt). 136 |

137 | 138 |
    139 |
  1. 140 | Download and install the Fiji image viewer. This is the default image viewer used by SimpleITK: 141 |
      142 |
    • 143 | On Windows: Install into your user directory (e.g. C:\Users\[your_user_name]\). 144 |
    • 145 |
    • 146 | On Linux: Install into ~/bin/ . 147 |
    • 148 |
    • 149 | On Mac: Install into /Applications/ or ~/Applications/ . 150 |
    • 151 |
    152 | 153 |
  2. 154 | 155 |
  3. 156 | Download and install the most 157 | recent version of Anaconda for your operating system. We assume it is installed 158 | in a directory named anaconda3. Regardless of the installer, we will be working 159 | with Python 3.9 160 |
  4. 161 | 162 |
  5. 163 |
      164 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu).
    • 165 |
    • On Linux/Mac: on the command line source path_to_anaconda3/bin/activate base
    • 166 |
    167 |
  6. 168 | 169 |
  7. 170 | Update the base anaconda environment and install the git version control system into it. 171 |
    conda update conda
    172 | conda update anaconda
    173 | conda install git
    174 | 
    175 |
  8. 176 | 177 |
  9. 178 | Clone this repository: 179 |
    git clone https://github.com/SimpleITK/TUTORIAL.git
    180 | 
    181 |
  10. 182 | 183 |
  11. 184 | Create the virtual environment containing all packages required for the course: 185 |
    conda env create -f TUTORIAL/environment.yml
    186 | 
    187 |
  12. 188 | 189 |
  13. 190 | Activate the virtual environment: 191 |
      192 |
    • On Windows: open the Anaconda Prompt (found under the Anaconda3 start menu)
      193 | conda activate sitkpy
    • 194 |
    • On Linux/Mac: on the command line
      source path_to_anaconda3/bin/activate 195 | sitkpy
    • 196 |
    197 |
  14. 198 | 199 |
200 | 201 | 202 | 203 |

Tutorial - v2.0.0

204 |

205 | Click the launch binder button to try things out without installing 206 | 208 | , some display functions that use an external 209 | viewer will not work. 210 |

211 |

212 | The videos may differ slightly from the current notebooks as they were created for the initial tutorial version, v1.0.0. 213 |

214 |
    215 |
  • 216 | Start the tutorial by running the setup notebook. This notebook checks the environment setup and 217 | downloads all of the required data. At the bottom of each notebook you will find a button that will 218 | open the next notebook. 219 |
    220 | 	
    221 | cd TUTORIAL
    222 | jupyter notebook 00_setup.ipynb
    223 | 
    224 | 
    225 |
  • 226 |
  • Introduction [video]: 227 |
      228 |
    • 229 | History [ppt]. 230 |
    • 231 |
    • 232 | Overview and fundamental concepts [ppt]. 233 |
    • 234 |
    235 |
  • Foundations [video]: 236 |
      237 |
    • Spatial transformations [notebook].
    • 238 |
    • Images and resampling [notebook].
    • 239 |
    240 |
  • 241 | Big Data [video]: 242 |
      243 |
    • Efficient inspection of large image collections [notebook].
    • 244 |
    • Data augmentation for deep learning [notebook].
    • 245 |
    246 |
  • 247 |
  • 248 | Registration [video]: 249 |
      250 |
    • The basic framework and initialization [notebook].
    • 251 |
    • Nonrigid local domain transformations (FFD, Demons), and results evaluation [notebook].
    • 252 |
    • Medical application example [notebook].
    • 253 |
    254 |
  • 255 | Segmentation [video]: 256 |
      257 |
    • Microscopy application and shape analysis [notebook].
    • 258 |
    • Results evaluation [notebook].
    • 259 |
    260 |
  • 261 |
  • Results visualization [video, notebook].
  • 262 |
263 | 264 |

Support the Toolkit

265 | 266 |

267 | Star us on GitHub (requires GitHub account): 268 |

273 |

274 |

275 | If you find that SimpleITK has been useful in your research, cite the appropriate paper (citations.bib):

277 | 289 | 290 |
293 | 294 | 295 | -------------------------------------------------------------------------------- /docs/mec2020.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/docs/mec2020.png -------------------------------------------------------------------------------- /docs/simpleitk.bib: -------------------------------------------------------------------------------- 1 | 2 | @article{beare2018, 3 | author = {Richard Beare and Bradley Lowekamp and Ziv Yaniv}, 4 | title = {Image Segmentation, Registration and Characterization in {R} with {SimpleITK}}, 5 | journal = {Journal of Statistical Software}, 6 | volume = {86}, 7 | number = {8}, 8 | year = {2018}, 9 | pages = {1--35}, 10 | doi = {10.18637/jss.v086.i08}, 11 | url = {https://www.jstatsoft.org/v086/i08} 12 | } 13 | 14 | @article{yaniv2018, 15 | author = {Yaniv, Ziv and Lowekamp, Bradley C. and Johnson, Hans J. and Beare, Richard}, 16 | title = {{SimpleITK} Image-Analysis Notebooks: a Collaborative Environment for Education and 17 | Reproducible Research}, 18 | journal = {Journal of Digital Imaging}, 19 | year = {2018}, 20 | volume = {31}, 21 | number = {3}, 22 | pages = {290--303}, 23 | doi = {10.1007/s10278-017-0037-8}, 24 | url = {https://doi.org/10.1007/s10278-017-0037-8} 25 | } 26 | 27 | @article{lowekamp2013, 28 | author = {Lowekamp, Bradley and Chen, David and Ib{\'{a}}{\~{n}}ez, Luis and Blezek, Daniel}, 29 | title = {The Design of {SimpleITK}}, 30 | journal = {Frontiers in Neuroinformatics}, 31 | volume = {7}, 32 | pages = {45}, 33 | year = {2013}, 34 | url = {https://www.frontiersin.org/article/10.3389/fninf.2013.00045}, 35 | doi = {10.3389/fninf.2013.00045}, 36 | } 37 | -------------------------------------------------------------------------------- /docs/simpleitkFundamentalConcepts.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/docs/simpleitkFundamentalConcepts.pptx -------------------------------------------------------------------------------- /docs/simpleitkHistoricalOverview.pptx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/docs/simpleitkHistoricalOverview.pptx -------------------------------------------------------------------------------- /docs/simpleitkLogo.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/docs/simpleitkLogo.jpg -------------------------------------------------------------------------------- /downloaddata.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Since we do not want to store large binary data files in our Git repository, 5 | we fetch_data_all from a network resource. 6 | 7 | The data we download is described in a json file. The file format is a dictionary 8 | of dictionaries. The top level key is the file name. The returned dictionary 9 | contains a sha512 checksum and possibly a url and boolean flag indicating 10 | the file is part of an archive. The sha512 checksum is mandatory. 11 | When the optional url is given, we attempt to download from that url, otherwise 12 | we attempt to download from the list of servers returned by the 13 | get_servers() function. Files that are contained in archives are 14 | identified by the archive flag. 15 | 16 | Example json file contents: 17 | 18 | { 19 | "SimpleITK.jpg": { 20 | "sha512": "f1b5ce1bf9d7ebc0bd66f1c3bc0f90d9e9798efc7d0c5ea7bebe0435f173355b27df632971d1771dc1fc3743c76753e6a28f6ed43c5531866bfa2b38f1b8fd46" 21 | }, 22 | "POPI/meta/00-P.mhd": { 23 | "url": "http://tux.creatis.insa-lyon.fr/~srit/POPI/Images/MetaImage/00-MetaImage.tar", 24 | "archive": "true", 25 | "sha512": "09fcb39c787eee3822040fcbf30d9c83fced4246c518a24ab14537af4b06ebd438e2f36be91e6e26f42a56250925cf1bfa0d2f2f2192ed2b98e6a9fb5f5069fc" 26 | }, 27 | "CIRS057A_MR_CT_DICOM/readme.txt": { 28 | "archive": "true", 29 | "sha512": "d5130cfca8467c4efe1c6b4057684651d7b74a8e7028d9402aff8e3d62287761b215bc871ad200d4f177b462f7c9358f1518e6e48cece2b51c6d8e3bb89d3eef" 30 | } 31 | } 32 | 33 | Notes: 34 | 1. The file we download can be inside an archive. In this case, the sha512 35 | checksum is that of the archive. 36 | 37 | """ 38 | 39 | import hashlib 40 | import sys 41 | import os 42 | import json 43 | 44 | import errno 45 | import warnings 46 | 47 | # http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook 48 | 49 | 50 | def url_download_report(bytes_so_far, url_download_size, total_size): 51 | percent = float(bytes_so_far) / total_size 52 | percent = round(percent * 100, 2) 53 | if bytes_so_far > url_download_size: 54 | # Note that the carriage return is at the beginning of the 55 | # string and not the end. This accommodates usage in 56 | # IPython usage notebooks. Otherwise the string is not 57 | # displayed in the output. 58 | sys.stdout.write( 59 | "\rDownloaded %d of %d bytes (%0.2f%%)" 60 | % (bytes_so_far, total_size, percent) 61 | ) 62 | sys.stdout.flush() 63 | if bytes_so_far >= total_size: 64 | sys.stdout.write( 65 | "\rDownloaded %d of %d bytes (%0.2f%%)\n" 66 | % (bytes_so_far, total_size, percent) 67 | ) 68 | sys.stdout.flush() 69 | 70 | 71 | def url_download_read(url, outputfile, url_download_size=8192 * 2, report_hook=None): 72 | # Use the urllib2 to download the data. The Requests package, highly 73 | # recommended for this task, doesn't support the file scheme so we opted 74 | # for urllib2 which does. 75 | 76 | try: 77 | # Python 3 78 | from urllib.request import urlopen, URLError, HTTPError 79 | except ImportError: 80 | from urllib2 import urlopen, URLError, HTTPError 81 | from xml.dom import minidom 82 | 83 | # Open the url 84 | try: 85 | url_response = urlopen(url) 86 | except HTTPError as e: 87 | return "HTTP Error: {0} {1}\n".format(e.code, url) 88 | except URLError as e: 89 | return "URL Error: {0} {1}\n".format(e.reason, url) 90 | 91 | # We download all content types - the assumption is that the sha512 ensures 92 | # that what we received is the expected data. 93 | try: 94 | # Python 3 95 | content_length = url_response.info().get("Content-Length") 96 | except AttributeError: 97 | content_length = url_response.info().getheader("Content-Length") 98 | total_size = content_length.strip() 99 | total_size = int(total_size) 100 | bytes_so_far = 0 101 | with open(outputfile, "wb") as local_file: 102 | while 1: 103 | try: 104 | url_download = url_response.read(url_download_size) 105 | bytes_so_far += len(url_download) 106 | if not url_download: 107 | break 108 | local_file.write(url_download) 109 | # handle errors 110 | except HTTPError as e: 111 | return "HTTP Error: {0} {1}\n".format(e.code, url) 112 | except URLError as e: 113 | return "URL Error: {0} {1}\n".format(e.reason, url) 114 | if report_hook: 115 | report_hook(bytes_so_far, url_download_size, total_size) 116 | return "Downloaded Successfully" 117 | 118 | 119 | # http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python?rq=1 120 | def mkdir_p(path): 121 | try: 122 | os.makedirs(path) 123 | except OSError as exc: # Python >2.5 124 | if exc.errno == errno.EEXIST and os.path.isdir(path): 125 | pass 126 | else: 127 | raise 128 | 129 | 130 | # http://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically 131 | def deprecated(func): 132 | """This is a decorator which can be used to mark functions 133 | as deprecated. It will result in a warning being emmitted 134 | when the function is used.""" 135 | 136 | def new_func(*args, **kwargs): 137 | warnings.simplefilter("always", DeprecationWarning) # turn off filter 138 | warnings.warn( 139 | "Call to deprecated function {}.".format(func.__name__), 140 | category=DeprecationWarning, 141 | stacklevel=2, 142 | ) 143 | warnings.simplefilter("default", DeprecationWarning) # reset filter 144 | return func(*args, **kwargs) 145 | 146 | new_func.__name__ = func.__name__ 147 | new_func.__doc__ = func.__doc__ 148 | new_func.__dict__.update(func.__dict__) 149 | return new_func 150 | 151 | 152 | def get_servers(): 153 | import os 154 | 155 | servers = list() 156 | # NIAID S3 data store 157 | servers.append("https://s3.amazonaws.com/simpleitk/public/notebooks/SHA512/%(hash)") 158 | # Girder server hosted by kitware 159 | servers.append( 160 | "https://data.kitware.com/api/v1/file/hashsum/sha512/%(hash)/download" 161 | ) 162 | # Local file store 163 | if "ExternalData_OBJECT_STORES" in os.environ.keys(): 164 | local_object_stores = os.environ["ExternalData_OBJECT_STORES"] 165 | for local_object_store in local_object_stores.split(";"): 166 | servers.append("file://{0}/SHA512/%(hash)".format(local_object_store)) 167 | return servers 168 | 169 | 170 | def output_hash_is_valid(known_sha512, output_file): 171 | sha512 = hashlib.sha512() 172 | if not os.path.exists(output_file): 173 | return False 174 | with open(output_file, "rb") as fp: 175 | for url_download in iter(lambda: fp.read(128 * sha512.block_size), b""): 176 | sha512.update(url_download) 177 | retreived_sha512 = sha512.hexdigest() 178 | return retreived_sha512 == known_sha512 179 | 180 | 181 | def fetch_data_one( 182 | onefilename, output_directory, manifest_file, verify=True, force=False 183 | ): 184 | import tarfile, zipfile 185 | 186 | with open(manifest_file, "r") as fp: 187 | manifest = json.load(fp) 188 | assert onefilename in manifest, "ERROR: {0} does not exist in {1}".format( 189 | onefilename, manifest_file 190 | ) 191 | 192 | sys.stdout.write("Fetching {0}\n".format(onefilename)) 193 | output_file = os.path.realpath(os.path.join(output_directory, onefilename)) 194 | data_dictionary = manifest[onefilename] 195 | sha512 = data_dictionary["sha512"] 196 | # List of places where the file can be downloaded from 197 | all_urls = [] 198 | for url_base in get_servers(): 199 | all_urls.append(url_base.replace("%(hash)", sha512)) 200 | if "url" in data_dictionary: 201 | all_urls.append(data_dictionary["url"]) 202 | 203 | new_download = False 204 | 205 | for url in all_urls: 206 | # Only download if force is true or the file does not exist. 207 | if force or not os.path.exists(output_file): 208 | mkdir_p(os.path.dirname(output_file)) 209 | url_download_read(url, output_file, report_hook=url_download_report) 210 | # Check if a file was downloaded and has the correct hash 211 | if output_hash_is_valid(sha512, output_file): 212 | new_download = True 213 | # Stop looking once found 214 | break 215 | # If the file exists this means the hash is invalid we have a problem. 216 | elif os.path.exists(output_file): 217 | error_msg = "File " + output_file 218 | error_msg += " has incorrect hash value, " + sha512 + " was expected." 219 | raise Exception(error_msg) 220 | 221 | # Did not find the file anywhere. 222 | if not os.path.exists(output_file): 223 | error_msg = "File " + "'" + os.path.basename(output_file) + "'" 224 | error_msg += " could not be found in any of the following locations:\n" 225 | error_msg += ", ".join(all_urls) 226 | raise Exception(error_msg) 227 | 228 | if not new_download and verify: 229 | # If the file was part of an archive then we don't verify it. These 230 | # files are only verified on download 231 | if (not "archive" in data_dictionary) and ( 232 | not output_hash_is_valid(sha512, output_file) 233 | ): 234 | # Attempt to download if sha512 is incorrect. 235 | fetch_data_one( 236 | onefilename, output_directory, manifest_file, verify, force=True 237 | ) 238 | # If the file is in an archive, unpack it. 239 | if tarfile.is_tarfile(output_file) or zipfile.is_zipfile(output_file): 240 | tmp_output_file = output_file + ".tmp" 241 | os.rename(output_file, tmp_output_file) 242 | if tarfile.is_tarfile(tmp_output_file): 243 | archive = tarfile.open(tmp_output_file) 244 | if zipfile.is_zipfile(tmp_output_file): 245 | archive = zipfile.ZipFile(tmp_output_file, "r") 246 | archive.extractall(os.path.dirname(tmp_output_file)) 247 | archive.close() 248 | os.remove(tmp_output_file) 249 | 250 | return output_file 251 | 252 | 253 | def fetch_data_all(output_directory, manifest_file, verify=True): 254 | with open(manifest_file, "r") as fp: 255 | manifest = json.load(fp) 256 | for filename in manifest: 257 | fetch_data_one(filename, output_directory, manifest_file, verify, force=False) 258 | 259 | 260 | def fetch_data(cache_file_name, verify=False, cache_directory_name="data"): 261 | """ 262 | fetch_data is a simplified interface that requires 263 | relative pathing with a manifest.json file located in the 264 | same cache_directory_name name. 265 | 266 | By default the cache_directory_name is "Data" relative to the current 267 | python script. An absolute path can also be given. 268 | """ 269 | if not os.path.isabs(cache_directory_name): 270 | cache_root_directory_name = os.path.dirname(__file__) 271 | cache_directory_name = os.path.join( 272 | cache_root_directory_name, cache_directory_name 273 | ) 274 | cache_manifest_file = os.path.join(cache_directory_name, "manifest.json") 275 | assert os.path.exists(cache_manifest_file), "ERROR, {0} does not exist".format( 276 | cache_manifest_file 277 | ) 278 | return fetch_data_one( 279 | cache_file_name, cache_directory_name, cache_manifest_file, verify=verify 280 | ) 281 | 282 | 283 | if __name__ == "__main__": 284 | if len(sys.argv) < 3: 285 | print("Usage: " + sys.argv[0] + " output_directory manifest.json") 286 | sys.exit(1) 287 | output_directory = sys.argv[1] 288 | if not os.path.exists(output_directory): 289 | os.makedirs(output_directory) 290 | manifest = sys.argv[2] 291 | fetch_data_all(output_directory, manifest) 292 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: sitkpy 2 | 3 | channels: 4 | - defaults 5 | - conda-forge 6 | 7 | dependencies: 8 | - python=3.9 9 | - jupyter 10 | - matplotlib 11 | - ipywidgets 12 | - numpy 13 | - scipy 14 | - pandas 15 | - multiprocess 16 | - SimpleITK>=2.2.0 17 | - pip 18 | - pip: 19 | - itkwidgets 20 | 21 | -------------------------------------------------------------------------------- /environment_dev.yml: -------------------------------------------------------------------------------- 1 | name: sitkpy_dev 2 | 3 | channels: 4 | - defaults 5 | - conda-forge 6 | 7 | dependencies: 8 | - python=3.9 9 | - jupyter 10 | - matplotlib 11 | - ipywidgets 12 | - numpy 13 | - scipy 14 | - pandas 15 | - multiprocess 16 | - SimpleITK>=2.2.0 17 | - pytest 18 | - markdown 19 | - lxml 20 | - pip 21 | - pip: 22 | - black[jupyter] 23 | - pyenchant 24 | - itkwidgets[notebook] 25 | 26 | -------------------------------------------------------------------------------- /figures/ImageOriginAndSpacing.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/figures/ImageOriginAndSpacing.png -------------------------------------------------------------------------------- /figures/hkaAngle.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SimpleITK/TUTORIAL/1d9ea028ae9a066f93d21a58ae2ad126707f6d89/figures/hkaAngle.png -------------------------------------------------------------------------------- /output/.gitignore: -------------------------------------------------------------------------------- 1 | # 2 | #Maintain an empty directory in the git repository, where all files in this 3 | #directory will always be ignored by git: 4 | #http://stackoverflow.com/questions/115983/how-can-i-add-an-empty-directory-to-a-git-repository 5 | # 6 | * 7 | # Except this file 8 | !.gitignore 9 | -------------------------------------------------------------------------------- /registration_gui.py: -------------------------------------------------------------------------------- 1 | import SimpleITK as sitk 2 | import matplotlib.pyplot as plt 3 | import numpy as np 4 | 5 | # 6 | # Set of methods used for displaying the registration metric during the optimization. 7 | # 8 | 9 | 10 | # Callback invoked when the StartEvent happens, sets up our new data. 11 | def start_plot(): 12 | global metric_values, multires_iterations, ax, fig 13 | fig, ax = plt.subplots(1, 1, figsize=(8, 4)) 14 | 15 | metric_values = [] 16 | multires_iterations = [] 17 | plt.show() 18 | 19 | 20 | # Callback invoked when the EndEvent happens, do cleanup of data and figure. 21 | def end_plot(): 22 | global metric_values, multires_iterations, ax, fig 23 | 24 | del metric_values 25 | del multires_iterations 26 | del ax 27 | del fig 28 | 29 | 30 | # Callback invoked when the IterationEvent happens, update our data and display new figure. 31 | def plot_values(registration_method): 32 | global metric_values, multires_iterations, ax, fig 33 | 34 | metric_values.append(registration_method.GetMetricValue()) 35 | # Plot the similarity metric values 36 | ax.plot(metric_values, "r") 37 | ax.plot( 38 | multires_iterations, 39 | [metric_values[index] for index in multires_iterations], 40 | "b*", 41 | ) 42 | ax.set_xlabel("Iteration Number", fontsize=12) 43 | ax.set_ylabel("Metric Value", fontsize=12) 44 | fig.canvas.draw() 45 | 46 | 47 | # Callback invoked when the sitkMultiResolutionIterationEvent happens, update the index into the 48 | # metric_values list. 49 | def update_multires_iterations(): 50 | global metric_values, multires_iterations 51 | multires_iterations.append(len(metric_values)) 52 | 53 | 54 | def overlay_binary_segmentation_contours(image, mask, window_min, window_max): 55 | """ 56 | Given a 2D image and mask: 57 | a. resample the image and mask into isotropic grid (required for display). 58 | b. rescale the image intensities using the given window information. 59 | c. overlay the contours computed from the mask onto the image. 60 | """ 61 | # Resample the image (linear interpolation) and mask (nearest neighbor interpolation) into an isotropic grid, 62 | # required for display. 63 | original_spacing = image.GetSpacing() 64 | original_size = image.GetSize() 65 | min_spacing = min(original_spacing) 66 | new_spacing = [min_spacing, min_spacing] 67 | new_size = [ 68 | int(round(original_size[0] * (original_spacing[0] / min_spacing))), 69 | int(round(original_size[1] * (original_spacing[1] / min_spacing))), 70 | ] 71 | resampled_img = sitk.Resample( 72 | image, 73 | new_size, 74 | sitk.Transform(), 75 | sitk.sitkLinear, 76 | image.GetOrigin(), 77 | new_spacing, 78 | image.GetDirection(), 79 | 0.0, 80 | image.GetPixelID(), 81 | ) 82 | resampled_msk = sitk.Resample( 83 | mask, 84 | new_size, 85 | sitk.Transform(), 86 | sitk.sitkNearestNeighbor, 87 | mask.GetOrigin(), 88 | new_spacing, 89 | mask.GetDirection(), 90 | 0.0, 91 | mask.GetPixelID(), 92 | ) 93 | 94 | # Create the overlay: cast the mask to expected label pixel type, and do the same for the image after 95 | # window-level, accounting for the high dynamic range of the CT. 96 | return sitk.LabelMapContourOverlay( 97 | sitk.Cast(resampled_msk, sitk.sitkLabelUInt8), 98 | sitk.Cast( 99 | sitk.IntensityWindowing( 100 | resampled_img, windowMinimum=window_min, windowMaximum=window_max 101 | ), 102 | sitk.sitkUInt8, 103 | ), 104 | opacity=1, 105 | contourThickness=[2, 2], 106 | ) 107 | 108 | 109 | def display_coronal_with_overlay( 110 | temporal_slice, coronal_slice, images, masks, label, window_min, window_max 111 | ): 112 | """ 113 | Display a coronal slice from the 4D (3D+time) CT with a contour overlaid onto it. The contour is the edge of 114 | the specific label. 115 | """ 116 | img = images[temporal_slice][:, coronal_slice, :] 117 | msk = masks[temporal_slice][:, coronal_slice, :] == label 118 | 119 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 120 | # Flip the image so that corresponds to correct radiological view. 121 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 122 | plt.axis("off") 123 | plt.show() 124 | 125 | 126 | def display_coronal_with_label_maps_overlay( 127 | coronal_slice, mask_index, image, masks, label, window_min, window_max 128 | ): 129 | """ 130 | Display a coronal slice from a 3D CT with a contour overlaid onto it. The contour is the edge of 131 | the specific label from the specific mask. Function is used to display results of transforming a segmentation 132 | using registration. 133 | """ 134 | img = image[:, coronal_slice, :] 135 | msk = masks[mask_index][:, coronal_slice, :] == label 136 | 137 | overlay_img = overlay_binary_segmentation_contours(img, msk, window_min, window_max) 138 | # Flip the image so that corresponds to correct radiological view. 139 | plt.imshow(np.flipud(sitk.GetArrayFromImage(overlay_img))) 140 | plt.axis("off") 141 | plt.show() 142 | -------------------------------------------------------------------------------- /tests/additional_dictionary.txt: -------------------------------------------------------------------------------- 1 | ANTSNeighborhoodCorrelation 2 | API 3 | Acknowledgements 4 | AddTransform 5 | Affine 6 | AffineTransform 7 | Another's 8 | Args 9 | Azulay 10 | BFGS 11 | BMP 12 | BMPImageIO 13 | BSpline 14 | BSplineTransform 15 | BSplineTransformInitializer 16 | BSplineTransformInitializerFilter 17 | Baum 18 | Biancardi 19 | BinaryMorphologicalClosing 20 | BinaryMorphologicalOpening 21 | BinaryThreshold 22 | BioRadImageIO 23 | Biomechanics 24 | Bitwise 25 | Broyden 26 | Bruker 27 | Bérard 28 | CBCT 29 | CIRS 30 | CREATIS 31 | CTs 32 | CXRs 33 | CancerCare 34 | CastImageFilter 35 | CenteredTransformInitializer 36 | CenteredTransformInitializerFilter 37 | Centre 38 | CheckerBoardImageFilter 39 | Clarysse 40 | Clin 41 | CoRR 42 | Colocalization 43 | ComposeImageFilter 44 | ComposeScaleSkewVersor 45 | CompositeTransform 46 | ConfidenceConnected 47 | ConjugateGradientLineSearch 48 | ConnectedComponentImageFilter 49 | ConnectedThreshold 50 | Cy 51 | DAPI 52 | DICOM 53 | DTransform 54 | Decubitus 55 | Demner 56 | DemonsMetric 57 | DemonsRegistrationFilter 58 | Diff 59 | DiffeomorphicDemonsRegistrationFilter 60 | DisplacementField 61 | DisplacementFieldTransform 62 | Docstring 63 | Docstrings 64 | Doxygen 65 | EachIteration 66 | EndEvent 67 | Etienne 68 | ExhaustiveOptimizer 69 | ExpandImageFilter 70 | FFD 71 | FFDL 72 | FFDR 73 | FFF 74 | FFP 75 | FFS 76 | FITC 77 | FLE 78 | FRE 79 | FREs 80 | FastMarchingImageFilter 81 | FastSymmetricForcesDemonsRegistrationFilter 82 | FilterName 83 | FixedParameters 84 | Flickr 85 | FlipImageFilter 86 | Fushman 87 | GDCMImageIO 88 | GIF 89 | GaborSource 90 | Geissbuehler 91 | Genomic 92 | GeodesicActiveContour 93 | GetArrayFromImage 94 | GetArrayViewFromImage 95 | GetCenter 96 | GetHeight 97 | GetImageFromArray 98 | GetInverse 99 | GetMetaData 100 | GetMetaDataKeys 101 | GetPixel 102 | GetRegisteredImageIOs 103 | GiplImageIO 104 | Goldfarb 105 | GradientDescent 106 | GradientDescentLineSearch 107 | HDF 108 | HDF5ImageIO 109 | HFDL 110 | HFDR 111 | HFP 112 | HFS 113 | HKA 114 | HU 115 | HasMetaDataKey 116 | HausdorffDistanceImageFilter 117 | Hein 118 | Hounsfield 119 | ICCR 120 | ID's 121 | IPython 122 | IQR 123 | ITK 124 | ITK's 125 | ITKv 126 | ImageFileReader 127 | ImageFileReader's 128 | ImageIO 129 | ImageIOs 130 | ImageJ 131 | ImageRegistrationMethod 132 | ImageSelection 133 | ImageSeriesReader 134 | IntensityWindowingImageFilter 135 | InterpolatorEnum 136 | Interspeech 137 | InverseDisplacementFieldImageFilter 138 | InvertDisplacementFieldImageFilter 139 | IterationEvent 140 | IterativeInverseDisplacementFieldImageFilter 141 | JPEG 142 | JPEGImageIO 143 | JPEGs 144 | Jaccard 145 | Jacobian 146 | Javascript 147 | Jirapatnakul 148 | JoinSeries 149 | JointHistogram 150 | JointHistogramMutualInformation 151 | Joskowicz 152 | Jupyter 153 | JupyterLab 154 | Kamath 155 | LBFGS 156 | LSMImageIO 157 | LaTeX 158 | LabelContourImageFilter 159 | LabelMapContourOverlayImageFilter 160 | LabelOverlayImageFilter 161 | LabelShapeStatisticsImageFilter 162 | LabelToRGBImageFilter 163 | LandmarkBasedTransformInitializer 164 | LaplacianSegmentation 165 | Lasser 166 | Leygue 167 | Lim 168 | Lingala 169 | Linte 170 | LoadPrivateTagsOn 171 | Lobb 172 | Léon 173 | MATLAB 174 | MATLAB's 175 | MINCImageIO 176 | MRCImageIO 177 | MacCallum 178 | MacOS 179 | Mahalanobis 180 | Maier 181 | Malpica 182 | Marschner 183 | MattesMutualInformation 184 | Maurer 185 | MaximumEntropy 186 | MeanSquares 187 | MetaDataDictionaryArrayUpdateOn 188 | MetaImageIO 189 | MetricEvaluate 190 | Narayanan 191 | Nayak 192 | NeighborhoodConnected 193 | Nelder 194 | NiftiImageIO 195 | NrrdImageIO 196 | Nyquist 197 | OSX 198 | OpenI 199 | Optimizers 200 | Orthop 201 | Otsu's 202 | PNG 203 | PNGImageIO 204 | POPI 205 | PairedPointDataManipulation 206 | Photogrammetric 207 | Photometric 208 | PixelIDValueEnum 209 | Popa 210 | Proc 211 | Pythonic 212 | RGB 213 | RIRE 214 | RLE 215 | ROIs 216 | RSK 217 | Radiographic 218 | ReadImage 219 | ReadImageInformation 220 | ReadTransform 221 | RegularStepGradientDescent 222 | Relat 223 | ResampleImageFilter 224 | Rheumatol 225 | Rueda 226 | SEM 227 | SPIE 228 | Sarrut 229 | ScalarChanAndVese 230 | ScalarToRGBColormapImageFilter 231 | ScaleSkewVersor 232 | ScaleTransform 233 | ScaleVersor 234 | Segmentations 235 | SetAngle 236 | SetApplication 237 | SetCenter 238 | SetDirection 239 | SetFixedInitialTransform 240 | SetInitialTransform 241 | SetInitialTransformAsBSpline 242 | SetInterpolator 243 | SetMean 244 | SetMetricAsDemons 245 | SetMetricAsX 246 | SetMovingInitialTransform 247 | SetOptimizerAsConjugateGradientLineSearch 248 | SetOptimizerAsX 249 | SetOptimizerScalesFromIndexShift 250 | SetOptimizerScalesFromJacobian 251 | SetOptimizerScalesFromPhysicalShift 252 | SetOrigin 253 | SetParameters 254 | SetPixel 255 | SetProbability 256 | SetScale 257 | SetShrinkFactorsPerLevel 258 | SetSmoothingSigmasPerLevel 259 | SetSpacing 260 | SetStandardDeviation 261 | ShapeDetection 262 | Sigmoid 263 | SimpleITK 264 | SimpleITK's 265 | SimpleITKv 266 | SmoothingSigmasAreSpecifiedInPhysicalUnitsOn 267 | Sorensen 268 | StartEvent 269 | StatisticsImageFilter 270 | StimulateImageIO 271 | Subsampling 272 | SymmetricForcesDemonsRegistrationFilter 273 | TIFFImageIO 274 | TRE 275 | TREs 276 | Thirion 277 | ThresholdSegmentation 278 | Thresholding 279 | TileImageFilter 280 | Toger 281 | Toutios 282 | TransformContinuousIndexToPhysicalPoint 283 | TransformEnum 284 | TransformPoint 285 | TransformToDisplacementFieldFilter 286 | TranslationTransform 287 | UInt 288 | Uncomment 289 | VGG 290 | VTK 291 | VTKImageIO 292 | Valgus 293 | Vandemeulebroucke 294 | Varus 295 | Vaz 296 | VectorConfidenceConnected 297 | VersorRigid 298 | VersorTransform 299 | VolView 300 | WriteImage 301 | XC 302 | XVth 303 | XX 304 | YCbCr 305 | YY 306 | Yaniv 307 | ZYX 308 | Zhu 309 | Zikri 310 | accessors 311 | affine 312 | al 313 | algorithmically 314 | anisotropic 315 | app 316 | argmin 317 | atol 318 | aug 319 | ay 320 | az 321 | backgroundValue 322 | behaviour 323 | bio 324 | booktabs 325 | boolean 326 | bspline 327 | ccc 328 | centroid 329 | characterisation 330 | circ 331 | colocalization 332 | colocalizations 333 | colocalized 334 | colour 335 | colourmap 336 | condylar 337 | const 338 | convergenceMinimumValue 339 | convergenceWindowSize 340 | convolutional 341 | cryosectioning 342 | css 343 | csv 344 | cthead 345 | ctrl 346 | customizable 347 | dapi 348 | dataframe 349 | dataset 350 | datasets 351 | dciodvfy 352 | debugOn 353 | defaultPixelValue 354 | deformable 355 | dev 356 | dimensionality 357 | disp 358 | displaystyle 359 | dissociations 360 | documentclass 361 | doi 362 | dropdown 363 | dseqImageIO 364 | dy 365 | eikonal 366 | endospore 367 | endospores 368 | env 369 | estimateLearningRate 370 | et 371 | euler 372 | faux 373 | fdata 374 | fiducial 375 | fiducial's 376 | fiducials 377 | floordiv 378 | fp 379 | frac 380 | fronto 381 | func 382 | geq 383 | ggplot 384 | glyphs 385 | greyscale 386 | gui 387 | hausdorff 388 | homography 389 | honours 390 | iff 391 | imageIO 392 | img 393 | init 394 | initialNeighborhoodRadius 395 | initializations 396 | initializer 397 | inline 398 | inlined 399 | interp 400 | interpolator 401 | interpolators 402 | intra 403 | ipywidgets 404 | iso 405 | itkwidgets 406 | jn 407 | jpg 408 | jupyter 409 | labelForUndecidedPixels 410 | labelled 411 | labelling 412 | lapply 413 | ldots 414 | learningRate 415 | leq 416 | linspace 417 | localizations 418 | luminance 419 | mathbb 420 | mathbf 421 | matplotlib 422 | meshgrid 423 | meshlab 424 | metadata 425 | metric's 426 | metricvalue 427 | mha 428 | midas 429 | minima 430 | multiprocess 431 | multiscale 432 | myshow 433 | nD 434 | nanometers 435 | natively 436 | nbagg 437 | nbextension 438 | nm 439 | nms 440 | np 441 | num 442 | numberOfIterations 443 | numberOfSteps 444 | numpy 445 | offline 446 | operatively 447 | optimizer's 448 | optimizerScales 449 | optimizers 450 | originalControlPointDisplacements 451 | originalDisplacements 452 | otsu 453 | outlier 454 | outputDirection 455 | outputOrigin 456 | outputPixelType 457 | outputSpacing 458 | outputfile 459 | overcomplete 460 | overfit 461 | overfitting 462 | parallelization 463 | param 464 | parametrization 465 | parametrized 466 | pixelated 467 | plafond 468 | pn 469 | png 470 | popi 471 | pre 472 | prefixi 473 | preprocessing 474 | preselected 475 | pretrained 476 | py 477 | pyplot 478 | qs 479 | quaternion 480 | radiographic 481 | radiological 482 | recognised 483 | redisplay 484 | referenceImage 485 | resample 486 | resampled 487 | resamples 488 | resampling 489 | rescale 490 | rgb 491 | roi 492 | runtime 493 | sRGB 494 | sagittal 495 | scaleFactors 496 | scipy 497 | segBlobs 498 | segChannel 499 | segmentations 500 | shrinkFactors 501 | sigmoid 502 | sitk 503 | sitkAnnulus 504 | sitkBSpline 505 | sitkBall 506 | sitkBlackmanWindowedSinc 507 | sitkBox 508 | sitkComplexFloat 509 | sitkCosineWindowedSinc 510 | sitkCross 511 | sitkFloat 512 | sitkGaussian 513 | sitkHammingWindowedSinc 514 | sitkInt 515 | sitkLabelUInt 516 | sitkLanczosWindowedSinc 517 | sitkLinear 518 | sitkMultiResolutionIterationEvent 519 | sitkNearestNeighbor 520 | sitkUInt 521 | sitkUnknown 522 | sitkVectorFloat 523 | sitkVectorInt 524 | sitkVectorUInt 525 | sitkWelchWindowedSinc 526 | smoothingSigmas 527 | spacings 528 | spatio 529 | spc 530 | sqrt 531 | stepLength 532 | subregions 533 | subsampling 534 | subtilis 535 | supersampling 536 | sys 537 | sz 538 | tempdir 539 | textrm 540 | tfm 541 | tgz 542 | th 543 | thetaX 544 | thetaY 545 | thetaZ 546 | thresholded 547 | thresholding 548 | ticklabels 549 | tidyr 550 | tif 551 | timeit 552 | toolbar 553 | tranforms 554 | transform's 555 | translational 556 | truediv 557 | ttest 558 | tx 559 | txt 560 | ty 561 | tz 562 | uint 563 | uncomment 564 | undistorted 565 | usepackage 566 | vdots 567 | versor 568 | vertices 569 | vm 570 | voxel 571 | voxel's 572 | voxels 573 | vx 574 | vy 575 | vz 576 | widgetsnbextension 577 | wikimedia 578 | workflow 579 | xn 580 | xpixels 581 | xtable 582 | xx 583 | xxx 584 | xy 585 | xyz 586 | xz 587 | yn 588 | ypixels 589 | yy 590 | yyy 591 | yz 592 | zyx 593 | zz 594 | zzz 595 | -------------------------------------------------------------------------------- /tests/requirements_testing.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | markdown 3 | lxml 4 | pyenchant 5 | jupyter 6 | matplotlib 7 | ipywidgets 8 | itkwidgets 9 | numpy 10 | scipy 11 | pandas 12 | multiprocess 13 | SimpleITK>=2.2.0 14 | -------------------------------------------------------------------------------- /tests/test_notebooks.py: -------------------------------------------------------------------------------- 1 | import os 2 | import subprocess 3 | import tempfile 4 | import nbformat 5 | import pytest 6 | import markdown 7 | import re 8 | 9 | from enchant.checker import SpellChecker 10 | from enchant.tokenize import Filter, EmailFilter, URLFilter 11 | from enchant import DictWithPWL 12 | 13 | from lxml.html import document_fromstring, etree 14 | from urllib.request import urlopen, URLError 15 | 16 | 17 | """ 18 | run all tests: 19 | pytest -v --tb=short 20 | 21 | run python tests: 22 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook 23 | 24 | run specific Python test: 25 | pytest -v --tb=short tests/test_notebooks.py::Test_notebooks::test_python_notebook[00_setup.ipynb] 26 | 27 | -s : disable all capturing of output. 28 | """ 29 | 30 | 31 | class Test_notebooks(object): 32 | """ 33 | Testing of SimpleITK Jupyter notebooks: 34 | 1. Static analysis: 35 | Check that notebooks do not contain output (sanity check as these should 36 | not have been pushed to the repository). 37 | Check that all the URLs in the markdown cells are not broken. 38 | 2. Dynamic analysis: 39 | Run the notebook and check for errors. In some notebooks we 40 | intentionally cause errors to illustrate certain features of the toolkit. 41 | All code cells that intentionally generate an error are expected to be 42 | marked using the cell's metadata. In the notebook go to 43 | "View->Cell Toolbar->Edit Metadata and add the following json entry: 44 | 45 | "simpleitk_error_expected": simpleitk_error_message 46 | 47 | with the appropriate "simpleitk_error_message" text. 48 | Cells where an error is allowed, but not necessarily expected should be 49 | marked with the following json: 50 | 51 | "simpleitk_error_allowed": simpleitk_error_message 52 | 53 | The simpleitk_error_message is a substring of the generated error 54 | message, such as 'Exception thrown in SimpleITK Show:' 55 | 56 | To test notebooks that use too much memory (exceed the 4Gb allocated for the testing 57 | machine): 58 | 1. Create an enviornment variable named SIMPLE_ITK_MEMORY_CONSTRAINED_ENVIRONMENT 59 | 2. Import the setup_for_testing.py at the top of the notebook. This module will 60 | decorate the sitk.ReadImage so that after reading the initial image it is 61 | resampled by a factor of 4 in each dimension. 62 | 63 | Adding a test: 64 | Simply add the new notebook file name to the list of files decorating the test_python_notebook 65 | or test_r_notebook functions. DON'T FORGET THE COMMA. 66 | """ 67 | 68 | _allowed_error_markup = "simpleitk_error_allowed" 69 | _expected_error_markup = "simpleitk_error_expected" 70 | 71 | @pytest.mark.parametrize( 72 | "notebook_file_name", 73 | [ 74 | "00_setup.ipynb", 75 | "01_spatial_transformations.ipynb", 76 | "02_images_and_resampling.ipynb", 77 | "03_trust_but_verify.ipynb", 78 | "04_data_augmentation.ipynb", 79 | "05_basic_registration.ipynb", 80 | "06_advanced_registration.ipynb", 81 | "07_registration_application.ipynb", 82 | "08_segmentation_and_shape_analysis.ipynb", 83 | "09_segmentation_evaluation.ipynb", 84 | "10_results_visualization.ipynb", 85 | ], 86 | ) 87 | def test_python_notebook(self, notebook_file_name): 88 | self.evaluate_notebook(self.absolute_path_python(notebook_file_name), "python") 89 | 90 | def evaluate_notebook(self, path, kernel_name): 91 | """ 92 | Perform static and dynamic analysis of the notebook. 93 | Execute a notebook via nbconvert and print the results of the test (errors etc.) 94 | Args: 95 | path (string): Name of notebook to run. 96 | kernel_name (string): Which jupyter kernel to use to run the test. 97 | Relevant values are:'python2', 'python3', 'ir'. 98 | """ 99 | 100 | dir_name, file_name = os.path.split(path) 101 | if dir_name: 102 | os.chdir(dir_name) 103 | 104 | print("-------- begin (kernel {0}) {1} --------".format(kernel_name, file_name)) 105 | no_static_errors = self.static_analysis(path) 106 | no_dynamic_errors = self.dynamic_analysis(path, kernel_name) 107 | print("-------- end (kernel {0}) {1} --------".format(kernel_name, file_name)) 108 | assert no_static_errors and no_dynamic_errors 109 | 110 | def static_analysis(self, path): 111 | """ 112 | Perform static analysis of the notebook. 113 | Read the notebook and check that there is no ouput and that the links 114 | in the markdown cells are not broken. 115 | Args: 116 | path (string): Name of notebook. 117 | Return: 118 | boolean: True if static analysis succeeded, otherwise False. 119 | """ 120 | 121 | nb = nbformat.read(path, nbformat.current_nbformat) 122 | 123 | ####################### 124 | # Check that the notebook does not contain output from code cells 125 | # (should not be in the repository, but well...). 126 | ####################### 127 | no_unexpected_output = True 128 | 129 | # Check that the cell dictionary has an 'outputs' key and that it is 130 | # empty, relies on Python using short circuit evaluation so that we 131 | # don't get KeyError when retrieving the 'outputs' entry. 132 | cells_with_output = [c.source for c in nb.cells if "outputs" in c and c.outputs] 133 | if cells_with_output: 134 | no_unexpected_output = False 135 | print("Cells with unexpected output:\n_____________________________") 136 | for cell in cells_with_output: 137 | print(cell + "\n---") 138 | else: 139 | print("no unexpected output") 140 | 141 | ####################### 142 | # Check that all the links in the markdown cells are valid/accessible. 143 | ####################### 144 | no_broken_links = True 145 | 146 | cells_and_broken_links = [] 147 | for c in nb.cells: 148 | if c.cell_type == "markdown": 149 | html_tree = document_fromstring(markdown.markdown(c.source)) 150 | broken_links = [] 151 | # iterlinks() returns tuples of the form (element, attribute, link, pos) 152 | for document_link in html_tree.iterlinks(): 153 | try: 154 | if ( 155 | "http" not in document_link[2] 156 | ): # Local file (url uses forward slashes, windows backwards). 157 | url = "file:///" + os.path.abspath( 158 | document_link[2] 159 | ).replace("\\", "/") 160 | else: # Remote file. 161 | url = document_link[2] 162 | urlopen(url) 163 | except URLError: 164 | broken_links.append(url) 165 | if broken_links: 166 | cells_and_broken_links.append((broken_links, c.source)) 167 | if cells_and_broken_links: 168 | no_broken_links = False 169 | print("Cells with broken links:\n________________________") 170 | for links, cell in cells_and_broken_links: 171 | print(cell + "\n") 172 | print("\tBroken links:") 173 | print("\t" + "\n\t".join(links) + "\n---") 174 | else: 175 | print("no broken links") 176 | 177 | ####################### 178 | # Spell check all markdown cells and comments in code cells using the pyenchant spell checker. 179 | ####################### 180 | no_spelling_mistakes = True 181 | simpleitk_notebooks_dictionary = DictWithPWL( 182 | "en_US", 183 | os.path.join( 184 | os.path.dirname(os.path.abspath(__file__)), "additional_dictionary.txt" 185 | ), 186 | ) 187 | spell_checker = SpellChecker( 188 | simpleitk_notebooks_dictionary, filters=[EmailFilter, URLFilter] 189 | ) 190 | cells_and_spelling_mistakes = [] 191 | for c in nb.cells: 192 | spelling_mistakes = [] 193 | if c.cell_type == "markdown": 194 | # Get the text as a string from the html without the markup which is replaced by space. 195 | spell_checker.set_text( 196 | " ".join( 197 | etree.XPath("//text()")( 198 | document_fromstring(markdown.markdown(c.source)) 199 | ) 200 | ) 201 | ) 202 | elif c.cell_type == "code": 203 | # Get all the comments and concatenate them into a single string separated by newlines. 204 | comment_lines = re.findall("#+.*", c.source) 205 | spell_checker.set_text("\n".join(comment_lines)) 206 | for error in spell_checker: 207 | error_message = ( 208 | "error: " 209 | + "'" 210 | + error.word 211 | + "', " 212 | + "suggestions: " 213 | + str(spell_checker.suggest()) 214 | ) 215 | spelling_mistakes.append(error_message) 216 | if spelling_mistakes: 217 | cells_and_spelling_mistakes.append((spelling_mistakes, c.source)) 218 | if cells_and_spelling_mistakes: 219 | no_spelling_mistakes = False 220 | print("Cells with spelling mistakes:\n________________________") 221 | for misspelled_words, cell in cells_and_spelling_mistakes: 222 | print(cell + "\n") 223 | print("\tMisspelled words and suggestions:") 224 | print("\t" + "\n\t".join(misspelled_words) + "\n---") 225 | else: 226 | print("no spelling mistakes") 227 | 228 | return no_unexpected_output and no_broken_links and no_spelling_mistakes 229 | 230 | def dynamic_analysis(self, path, kernel_name): 231 | """ 232 | Perform dynamic analysis of the notebook. 233 | Execute a notebook via nbconvert and print the results of the test 234 | (errors etc.) 235 | Args: 236 | path (string): Name of notebook to run. 237 | kernel_name (string): Which jupyter kernel to use to run the test. 238 | Relevant values are:'python', 'ir'. 239 | Return: 240 | boolean: True if dynamic analysis succeeded, otherwise False. 241 | """ 242 | 243 | # Execute the notebook and allow errors (run all cells), output is 244 | # written to a temporary file which should be automatically deleted. 245 | # Windows has a bug with temporary files. On windows, if delete=True 246 | # the file is kept open and cannot be read from 247 | # (see https://github.com/python/cpython/issues/58451). 248 | # We set the delete flag to False on windows and True on all 249 | # other operating systems, circumventing the issue. 250 | with tempfile.NamedTemporaryFile( 251 | suffix=".ipynb", delete=os.name != "nt" 252 | ) as fout: 253 | output_dir, output_fname = os.path.split(fout.name) 254 | args = [ 255 | "jupyter", 256 | "nbconvert", 257 | "--to", 258 | "notebook", 259 | "--execute", 260 | "--ExecutePreprocessor.kernel_name=" + kernel_name, 261 | "--ExecutePreprocessor.allow_errors=True", 262 | "--ExecutePreprocessor.timeout=600", # seconds till timeout 263 | "--output-dir", 264 | output_dir, 265 | "--output", 266 | output_fname, 267 | path, 268 | ] 269 | subprocess.check_call(args) 270 | nb = nbformat.read(fout.name, nbformat.current_nbformat) 271 | 272 | # Get all of the unexpected errors (logic: cell has output with an error 273 | # and no error is expected or the allowed/expected error is not the one which 274 | # was generated.) 275 | unexpected_errors = [ 276 | (output.evalue, c.source) 277 | for c in nb.cells 278 | if "outputs" in c 279 | for output in c.outputs 280 | if (output.output_type == "error") 281 | and ( 282 | ( 283 | (Test_notebooks._allowed_error_markup not in c.metadata) 284 | and (Test_notebooks._expected_error_markup not in c.metadata) 285 | ) 286 | or ( 287 | (Test_notebooks._allowed_error_markup in c.metadata) 288 | and ( 289 | c.metadata[Test_notebooks._allowed_error_markup] 290 | not in output.evalue 291 | ) 292 | ) 293 | or ( 294 | (Test_notebooks._expected_error_markup in c.metadata) 295 | and ( 296 | c.metadata[Test_notebooks._expected_error_markup] 297 | not in output.evalue 298 | ) 299 | ) 300 | ) 301 | ] 302 | 303 | no_unexpected_errors = True 304 | if unexpected_errors: 305 | no_unexpected_errors = False 306 | print("Cells with unexpected errors:\n_____________________________") 307 | for e, src in unexpected_errors: 308 | print(src) 309 | print("unexpected error: " + e) 310 | else: 311 | print("no unexpected errors") 312 | 313 | # Get all of the missing expected errors (logic: cell has output 314 | # but expected error was not generated.) 315 | missing_expected_errors = [] 316 | for c in nb.cells: 317 | if Test_notebooks._expected_error_markup in c.metadata: 318 | missing_error = True 319 | if "outputs" in c: 320 | for output in c.outputs: 321 | if (output.output_type == "error") and ( 322 | c.metadata[Test_notebooks._expected_error_markup] 323 | in output.evalue 324 | ): 325 | missing_error = False 326 | if missing_error: 327 | missing_expected_errors.append( 328 | (c.metadata[Test_notebooks._expected_error_markup], c.source) 329 | ) 330 | 331 | no_missing_expected_errors = True 332 | if missing_expected_errors: 333 | no_missing_expected_errors = False 334 | print( 335 | "\nCells with missing expected errors:\n___________________________________" 336 | ) 337 | for e, src in missing_expected_errors: 338 | print(src) 339 | print("missing expected error: " + e) 340 | else: 341 | print("no missing expected errors") 342 | 343 | return no_unexpected_errors and no_missing_expected_errors 344 | 345 | def absolute_path_python(self, notebook_file_name): 346 | return os.path.abspath( 347 | os.path.join( 348 | os.path.dirname(os.path.abspath(__file__)), "..", notebook_file_name 349 | ) 350 | ) 351 | -------------------------------------------------------------------------------- /utilities.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | 4 | popi_body_label = 0 5 | popi_air_label = 1 6 | popi_lung_label = 2 7 | 8 | 9 | def read_POPI_points(file_name): 10 | """ 11 | Read the Point-validated Pixel-based Breathing Thorax Model (POPI) landmark points file. 12 | The file is an ASCII file with X Y Z coordinates in each line and the first line is a header. 13 | 14 | Args: 15 | file_name: full path to the file. 16 | Returns: 17 | (list(tuple)): List of points as tuples. 18 | """ 19 | with open(file_name, "r") as fp: 20 | lines = fp.readlines() 21 | points = [] 22 | # First line in the file is #X Y Z which we ignore. 23 | for line in lines[1:]: 24 | coordinates = line.split() 25 | if coordinates: 26 | points.append( 27 | ( 28 | float(coordinates[0]), 29 | float(coordinates[1]), 30 | float(coordinates[2]), 31 | ) 32 | ) 33 | return points 34 | 35 | 36 | def point2str(point, precision=1): 37 | """ 38 | Format a point for printing, based on specified precision with trailing zeros. Uniform printing for vector-like data 39 | (tuple, numpy array, list). 40 | 41 | Args: 42 | point (vector-like): nD point with floating point coordinates. 43 | precision (int): Number of digits after the decimal point. 44 | Return: 45 | String represntation of the given point "xx.xxx yy.yyy zz.zzz...". 46 | """ 47 | return " ".join(format(c, ".{0}f".format(precision)) for c in point) 48 | 49 | 50 | def uniform_random_points(bounds, num_points): 51 | """ 52 | Generate random (uniform withing bounds) nD point cloud. Dimension is based on the number of pairs in the bounds input. 53 | 54 | Args: 55 | bounds (list(tuple-like)): list where each tuple defines the coordinate bounds. 56 | num_points (int): number of points to generate. 57 | 58 | Returns: 59 | list containing num_points numpy arrays whose coordinates are within the given bounds. 60 | """ 61 | internal_bounds = [sorted(b) for b in bounds] 62 | # Generate rows for each of the coordinates according to the given bounds, stack into an array, 63 | # and split into a list of points. 64 | mat = np.vstack( 65 | [np.random.uniform(b[0], b[1], num_points) for b in internal_bounds] 66 | ) 67 | return list(mat[: len(bounds)].T) 68 | 69 | 70 | def target_registration_errors( 71 | tx, 72 | point_list, 73 | reference_point_list, 74 | display_errors=False, 75 | min_err=None, 76 | max_err=None, 77 | figure_size=(8, 6), 78 | ): 79 | """ 80 | Distances between points transformed by the given transformation and their 81 | location in another coordinate system. When the points are only used to 82 | evaluate registration accuracy (not used in the registration) this is the 83 | Target Registration Error (TRE). 84 | 85 | Args: 86 | tx (SimpleITK.Transform): The transform we want to evaluate. 87 | point_list (list(tuple-like)): Points in fixed image 88 | cooredinate system. 89 | reference_point_list (list(tuple-like)): Points in moving image 90 | cooredinate system. 91 | display_errors (boolean): Display a 3D figure with the points from 92 | point_list color corresponding to the error. 93 | min_err, max_err (float): color range is linearly stretched between min_err 94 | and max_err. If these values are not given then 95 | the range of errors computed from the data is used. 96 | figure_size (tuple): Figure size in inches. 97 | 98 | Returns: 99 | (errors) [float]: list of TRE values. 100 | """ 101 | transformed_point_list = [tx.TransformPoint(p) for p in point_list] 102 | 103 | errors = [ 104 | np.linalg.norm(np.array(p_fixed) - np.array(p_moving)) 105 | for p_fixed, p_moving in zip(transformed_point_list, reference_point_list) 106 | ] 107 | if display_errors: 108 | from mpl_toolkits.mplot3d import Axes3D 109 | import matplotlib.pyplot as plt 110 | import matplotlib 111 | 112 | fig = plt.figure(figsize=figure_size) 113 | ax = fig.add_subplot(111, projection="3d") 114 | if not min_err: 115 | min_err = np.min(errors) 116 | if not max_err: 117 | max_err = np.max(errors) 118 | 119 | collection = ax.scatter( 120 | list(np.array(point_list).T)[0], 121 | list(np.array(point_list).T)[1], 122 | list(np.array(point_list).T)[2], 123 | marker="o", 124 | c=errors, 125 | vmin=min_err, 126 | vmax=max_err, 127 | cmap=matplotlib.cm.hot, 128 | label="original points", 129 | ) 130 | plt.colorbar(collection, shrink=0.8) 131 | plt.title("registration errors in mm", x=0.7, y=1.05) 132 | ax.set_xlabel("X") 133 | ax.set_ylabel("Y") 134 | ax.set_zlabel("Z") 135 | plt.show() 136 | 137 | return errors 138 | 139 | 140 | def print_transformation_differences(tx1, tx2): 141 | """ 142 | Check whether two transformations are "equivalent" in an arbitrary spatial region 143 | either 3D or 2D, [x=(-10,10), y=(-100,100), z=(-1000,1000)]. This is just a sanity check, 144 | as we are just looking at the effect of the transformations on a random set of points in 145 | the region. 146 | """ 147 | if tx1.GetDimension() == 2 and tx2.GetDimension() == 2: 148 | bounds = [(-10, 10), (-100, 100)] 149 | elif tx1.GetDimension() == 3 and tx2.GetDimension() == 3: 150 | bounds = [(-10, 10), (-100, 100), (-1000, 1000)] 151 | else: 152 | raise ValueError( 153 | "Transformation dimensions mismatch, or unsupported transformation dimensionality" 154 | ) 155 | num_points = 10 156 | point_list = uniform_random_points(bounds, num_points) 157 | tx1_point_list = [tx1.TransformPoint(p) for p in point_list] 158 | differences = target_registration_errors(tx2, point_list, tx1_point_list) 159 | print( 160 | "Differences - min: {:.2f}, max: {:.2f}, mean: {:.2f}, std: {:.2f}".format( 161 | np.min(differences), 162 | np.max(differences), 163 | np.mean(differences), 164 | np.std(differences), 165 | ) 166 | ) 167 | 168 | 169 | def display_displacement_scaling_effect( 170 | s, original_x_mat, original_y_mat, tx, original_control_point_displacements 171 | ): 172 | """ 173 | This function displays the effects of the deformable transformation on a grid of points by scaling the 174 | initial displacements (either of control points for BSpline or the deformation field itself). It does 175 | assume that all points are contained in the range(-2.5,-2.5), (2.5,2.5). 176 | """ 177 | if tx.GetDimension() != 2: 178 | raise ValueError("display_displacement_scaling_effect only works in 2D") 179 | 180 | plt.scatter( 181 | original_x_mat, 182 | original_y_mat, 183 | marker="o", 184 | color="blue", 185 | label="original points", 186 | ) 187 | pointsX = [] 188 | pointsY = [] 189 | tx.SetParameters(s * original_control_point_displacements) 190 | 191 | for index, value in np.ndenumerate(original_x_mat): 192 | px, py = tx.TransformPoint((value, original_y_mat[index])) 193 | pointsX.append(px) 194 | pointsY.append(py) 195 | 196 | plt.scatter(pointsX, pointsY, marker="^", color="red", label="transformed points") 197 | plt.legend(loc=(0.25, 1.01)) 198 | plt.xlim((-2.5, 2.5)) 199 | plt.ylim((-2.5, 2.5)) 200 | 201 | 202 | def parameter_space_regular_grid_sampling(*transformation_parameters): 203 | """ 204 | Create a list representing a regular sampling of the parameter space. 205 | Args: 206 | *transformation_paramters : two or more numpy ndarrays representing parameter values. The order 207 | of the arrays should match the ordering of the SimpleITK transformation 208 | parameterization (e.g. Similarity2DTransform: scaling, rotation, tx, ty) 209 | Return: 210 | List of lists representing the regular grid sampling. 211 | 212 | Examples: 213 | #parameterization for 2D translation transform (tx,ty): [[1.0,1.0], [1.5,1.0], [2.0,1.0]] 214 | >>>> parameter_space_regular_grid_sampling(np.linspace(1.0,2.0,3), np.linspace(1.0,1.0,1)) 215 | """ 216 | return [ 217 | [p.item() for p in parameter_values] 218 | for parameter_values in np.nditer(np.meshgrid(*transformation_parameters)) 219 | ] 220 | 221 | 222 | def similarity3D_parameter_space_regular_sampling( 223 | thetaX, thetaY, thetaZ, tx, ty, tz, scale 224 | ): 225 | """ 226 | Create a list representing a regular sampling of the 3D similarity transformation parameter space. As the 227 | SimpleITK rotation parameterization uses the vector portion of a versor we don't have an 228 | intuitive way of specifying rotations. We therefor use the ZYX Euler angle parametrization and convert to 229 | versor. 230 | Args: 231 | thetaX, thetaY, thetaZ: numpy ndarrays with the Euler angle values to use. 232 | tx, ty, tz: numpy ndarrays with the translation values to use. 233 | scale: numpy array with the scale values to use. 234 | Return: 235 | List of lists representing the parameter space sampling (vx,vy,vz,tx,ty,tz,s). 236 | """ 237 | return [ 238 | list(eul2quat(parameter_values[0], parameter_values[1], parameter_values[2])) 239 | + [np.asscalar(p) for p in parameter_values[3:]] 240 | for parameter_values in np.nditer( 241 | np.meshgrid(thetaX, thetaY, thetaZ, tx, ty, tz, scale) 242 | ) 243 | ] 244 | 245 | 246 | def eul2quat(ax, ay, az, atol=1e-8): 247 | """ 248 | Translate between Euler angle (ZYX) order and quaternion representation of a rotation. 249 | Args: 250 | ax: X rotation angle in radians. 251 | ay: Y rotation angle in radians. 252 | az: Z rotation angle in radians. 253 | atol: tolerance used for stable quaternion computation (qs==0 within this tolerance). 254 | Return: 255 | Numpy array with three entries representing the vectorial component of the quaternion. 256 | 257 | """ 258 | # Create rotation matrix using ZYX Euler angles and then compute quaternion using entries. 259 | cx = np.cos(ax) 260 | cy = np.cos(ay) 261 | cz = np.cos(az) 262 | sx = np.sin(ax) 263 | sy = np.sin(ay) 264 | sz = np.sin(az) 265 | r = np.zeros((3, 3)) 266 | r[0, 0] = cz * cy 267 | r[0, 1] = cz * sy * sx - sz * cx 268 | r[0, 2] = cz * sy * cx + sz * sx 269 | 270 | r[1, 0] = sz * cy 271 | r[1, 1] = sz * sy * sx + cz * cx 272 | r[1, 2] = sz * sy * cx - cz * sx 273 | 274 | r[2, 0] = -sy 275 | r[2, 1] = cy * sx 276 | r[2, 2] = cy * cx 277 | 278 | # Compute quaternion: 279 | qs = 0.5 * np.sqrt(r[0, 0] + r[1, 1] + r[2, 2] + 1) 280 | qv = np.zeros(3) 281 | # If the scalar component of the quaternion is close to zero, we 282 | # compute the vector part using a numerically stable approach 283 | if np.isclose(qs, 0.0, atol): 284 | i = np.argmax([r[0, 0], r[1, 1], r[2, 2]]) 285 | j = (i + 1) % 3 286 | k = (j + 1) % 3 287 | w = np.sqrt(r[i, i] - r[j, j] - r[k, k] + 1) 288 | qv[i] = 0.5 * w 289 | qv[j] = (r[i, j] + r[j, i]) / (2 * w) 290 | qv[k] = (r[i, k] + r[k, i]) / (2 * w) 291 | else: 292 | denom = 4 * qs 293 | qv[0] = (r[2, 1] - r[1, 2]) / denom 294 | qv[1] = (r[0, 2] - r[2, 0]) / denom 295 | qv[2] = (r[1, 0] - r[0, 1]) / denom 296 | return qv 297 | --------------------------------------------------------------------------------