├── .github
└── ISSUE_TEMPLATE
│ ├── bug_report.md
│ ├── feature_request.md
│ └── questions_help_support.md
├── .gitignore
├── CMakeLists.txt
├── LICENSE
├── README.md
├── Screenshots
├── Icon
│ ├── bias.png
│ ├── ghosting.png
│ ├── motion.png
│ ├── script.py
│ └── spike.png
├── TorchIO.png
├── usage_1.png
├── usage_2.png
├── usage_3.png
└── usage_4.png
├── TorchIO.png
├── TorchIOModule
├── CMakeLists.txt
├── Resources
│ └── Icons
│ │ └── TorchIOModule.png
├── Testing
│ ├── CMakeLists.txt
│ └── Python
│ │ └── CMakeLists.txt
└── TorchIOModule.py
└── TorchIOTransforms
├── CMakeLists.txt
├── Resources
└── Icons
│ ├── TorchIOTransforms.png
│ └── TorchIOTransforms_old.png
├── Testing
├── CMakeLists.txt
└── Python
│ └── CMakeLists.txt
├── TorchIOTransforms.py
└── TorchIOTransformsLib
├── CoordinatesWidget.py
├── HistogramStandardization.py
├── RandomAffine.py
├── RandomAnisotropy.py
├── RandomBiasField.py
├── RandomBlur.py
├── RandomElasticDeformation.py
├── RandomGamma.py
├── RandomGhosting.py
├── RandomMotion.py
├── RandomSpike.py
├── Transform.py
└── __init__.py
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F41B Bug Report"
3 | about: Create a report to help us improve this extension
4 | title: ''
5 | labels: bug
6 | assignees: ''
7 |
8 | ---
9 |
10 |
11 | **🐛Bug**
12 |
13 |
14 |
15 | **To reproduce**
16 |
17 |
18 |
19 |
20 |
21 | **Expected behavior**
22 |
23 |
24 |
25 |
26 | **Actual behavior**
27 |
28 |
29 |
30 |
31 |
32 | ```python-traceback
33 | # Paste the whole error stack trace from the Python console here
34 | ```
35 |
36 | **Slicer version**
37 |
38 |
39 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "\U0001F680 Feature Request"
3 | about: Submit a proposal/request for a new feature
4 | title: ''
5 | labels: 'enhancement'
6 | assignees: ''
7 |
8 | ---
9 |
10 | **🚀 Feature**
11 |
12 |
13 | **Motivation**
14 |
15 |
16 |
17 | **Pitch**
18 |
19 |
20 |
21 | **Alternatives**
22 |
23 |
24 |
25 | **Additional context**
26 |
27 |
28 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/questions_help_support.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: "❓Questions/Help/Support"
3 | about: Do you need support? Use the Discussions tab instead of opening an issue.
4 | title: ''
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | For questions relating to usage, please do not create an issue.
11 |
12 | You can post on the [Discussions tab](https://github.com/fepegar/SlicerTorchIO/discussions) instead.
13 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | pip-wheel-metadata/
24 | share/python-wheels/
25 | *.egg-info/
26 | .installed.cfg
27 | *.egg
28 | MANIFEST
29 |
30 | # PyInstaller
31 | # Usually these files are written by a python script from a template
32 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
33 | *.manifest
34 | *.spec
35 |
36 | # Installer logs
37 | pip-log.txt
38 | pip-delete-this-directory.txt
39 |
40 | # Unit test / coverage reports
41 | htmlcov/
42 | .tox/
43 | .nox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | *.py,cover
51 | .hypothesis/
52 | .pytest_cache/
53 |
54 | # Translations
55 | *.mo
56 | *.pot
57 |
58 | # Django stuff:
59 | *.log
60 | local_settings.py
61 | db.sqlite3
62 | db.sqlite3-journal
63 |
64 | # Flask stuff:
65 | instance/
66 | .webassets-cache
67 |
68 | # Scrapy stuff:
69 | .scrapy
70 |
71 | # Sphinx documentation
72 | docs/_build/
73 |
74 | # PyBuilder
75 | target/
76 |
77 | # Jupyter Notebook
78 | .ipynb_checkpoints
79 |
80 | # IPython
81 | profile_default/
82 | ipython_config.py
83 |
84 | # pyenv
85 | .python-version
86 |
87 | # pipenv
88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies
90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not
91 | # install all needed dependencies.
92 | #Pipfile.lock
93 |
94 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow
95 | __pypackages__/
96 |
97 | # Celery stuff
98 | celerybeat-schedule
99 | celerybeat.pid
100 |
101 | # SageMath parsed files
102 | *.sage.py
103 |
104 | # Environments
105 | .env
106 | .venv
107 | env/
108 | venv/
109 | ENV/
110 | env.bak/
111 | venv.bak/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 | .dmypy.json
126 | dmypy.json
127 |
128 | # Pyre type checker
129 | .pyre/
130 |
131 | .vscode
132 |
133 | .DS_Store
134 |
135 |
--------------------------------------------------------------------------------
/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | cmake_minimum_required(VERSION 3.13.4)
2 |
3 | project(TorchIO)
4 |
5 | #-----------------------------------------------------------------------------
6 | # Extension meta-information
7 | set(EXTENSION_HOMEPAGE "https://torchio.readthedocs.io/interfaces/slicer.html")
8 | set(EXTENSION_CATEGORY "Utilities")
9 | set(EXTENSION_CONTRIBUTORS "Fernando Pérez-García (University College London and King's College London)")
10 | set(EXTENSION_DESCRIPTION "TorchIO is a Python package containing a set of tools to efficiently read, preprocess, sample, augment, and write 3D medical images in deep learning applications written in PyTorch, including intensity and spatial transforms for data augmentation and preprocessing. Transforms include typical computer vision operations such as random affine transformations and also domain-specific ones such as simulation of intensity artifacts due to MRI magnetic field inhomogeneity or k-space motion artifacts.")
11 | set(EXTENSION_ICONURL "https://raw.githubusercontent.com/fepegar/SlicerTorchIO/master/TorchIOTransforms/Resources/Icons/TorchIOTransforms.png")
12 | set(EXTENSION_SCREENSHOTURLS "https://raw.githubusercontent.com/fepegar/SlicerTorchIO/master/Screenshots/TorchIO.png")
13 | set(EXTENSION_DEPENDS "PyTorch") # Specified as a list or "NA" if no dependencies
14 |
15 | #-----------------------------------------------------------------------------
16 | # Extension dependencies
17 | find_package(Slicer REQUIRED)
18 | include(${Slicer_USE_FILE})
19 |
20 | #-----------------------------------------------------------------------------
21 | # Extension modules
22 | add_subdirectory(TorchIOTransforms)
23 | add_subdirectory(TorchIOModule)
24 | ## NEXT_MODULE
25 |
26 | #-----------------------------------------------------------------------------
27 | include(${Slicer_EXTENSION_GENERATE_CONFIG})
28 | include(${Slicer_EXTENSION_CPACK})
29 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | For more information, please see:
3 |
4 | http://www.slicer.org
5 |
6 | The 3D Slicer license below is a BSD style license, with extensions
7 | to cover contributions and other issues specific to 3D Slicer.
8 |
9 |
10 | 3D Slicer Contribution and Software License Agreement ("Agreement")
11 | Version 1.0 (December 20, 2005)
12 |
13 | This Agreement covers contributions to and downloads from the 3D
14 | Slicer project ("Slicer") maintained by The Brigham and Women's
15 | Hospital, Inc. ("Brigham"). Part A of this Agreement applies to
16 | contributions of software and/or data to Slicer (including making
17 | revisions of or additions to code and/or data already in Slicer). Part
18 | B of this Agreement applies to downloads of software and/or data from
19 | Slicer. Part C of this Agreement applies to all transactions with
20 | Slicer. If you distribute Software (as defined below) downloaded from
21 | Slicer, all of the paragraphs of Part B of this Agreement must be
22 | included with and apply to such Software.
23 |
24 | Your contribution of software and/or data to Slicer (including prior
25 | to the date of the first publication of this Agreement, each a
26 | "Contribution") and/or downloading, copying, modifying, displaying,
27 | distributing or use of any software and/or data from Slicer
28 | (collectively, the "Software") constitutes acceptance of all of the
29 | terms and conditions of this Agreement. If you do not agree to such
30 | terms and conditions, you have no right to contribute your
31 | Contribution, or to download, copy, modify, display, distribute or use
32 | the Software.
33 |
34 | PART A. CONTRIBUTION AGREEMENT - License to Brigham with Right to
35 | Sublicense ("Contribution Agreement").
36 |
37 | 1. As used in this Contribution Agreement, "you" means the individual
38 | contributing the Contribution to Slicer and the institution or
39 | entity which employs or is otherwise affiliated with such
40 | individual in connection with such Contribution.
41 |
42 | 2. This Contribution Agreement applies to all Contributions made to
43 | Slicer, including without limitation Contributions made prior to
44 | the date of first publication of this Agreement. If at any time you
45 | make a Contribution to Slicer, you represent that (i) you are
46 | legally authorized and entitled to make such Contribution and to
47 | grant all licenses granted in this Contribution Agreement with
48 | respect to such Contribution; (ii) if your Contribution includes
49 | any patient data, all such data is de-identified in accordance with
50 | U.S. confidentiality and security laws and requirements, including
51 | but not limited to the Health Insurance Portability and
52 | Accountability Act (HIPAA) and its regulations, and your disclosure
53 | of such data for the purposes contemplated by this Agreement is
54 | properly authorized and in compliance with all applicable laws and
55 | regulations; and (iii) you have preserved in the Contribution all
56 | applicable attributions, copyright notices and licenses for any
57 | third party software or data included in the Contribution.
58 |
59 | 3. Except for the licenses granted in this Agreement, you reserve all
60 | right, title and interest in your Contribution.
61 |
62 | 4. You hereby grant to Brigham, with the right to sublicense, a
63 | perpetual, worldwide, non-exclusive, no charge, royalty-free,
64 | irrevocable license to use, reproduce, make derivative works of,
65 | display and distribute the Contribution. If your Contribution is
66 | protected by patent, you hereby grant to Brigham, with the right to
67 | sublicense, a perpetual, worldwide, non-exclusive, no-charge,
68 | royalty-free, irrevocable license under your interest in patent
69 | rights covering the Contribution, to make, have made, use, sell and
70 | otherwise transfer your Contribution, alone or in combination with
71 | any other code.
72 |
73 | 5. You acknowledge and agree that Brigham may incorporate your
74 | Contribution into Slicer and may make Slicer available to members
75 | of the public on an open source basis under terms substantially in
76 | accordance with the Software License set forth in Part B of this
77 | Agreement. You further acknowledge and agree that Brigham shall
78 | have no liability arising in connection with claims resulting from
79 | your breach of any of the terms of this Agreement.
80 |
81 | 6. YOU WARRANT THAT TO THE BEST OF YOUR KNOWLEDGE YOUR CONTRIBUTION
82 | DOES NOT CONTAIN ANY CODE THAT REQURES OR PRESCRIBES AN "OPEN
83 | SOURCE LICENSE" FOR DERIVATIVE WORKS (by way of non-limiting
84 | example, the GNU General Public License or other so-called
85 | "reciprocal" license that requires any derived work to be licensed
86 | under the GNU General Public License or other "open source
87 | license").
88 |
89 | PART B. DOWNLOADING AGREEMENT - License from Brigham with Right to
90 | Sublicense ("Software License").
91 |
92 | 1. As used in this Software License, "you" means the individual
93 | downloading and/or using, reproducing, modifying, displaying and/or
94 | distributing the Software and the institution or entity which
95 | employs or is otherwise affiliated with such individual in
96 | connection therewith. The Brigham and Women's Hospital,
97 | Inc. ("Brigham") hereby grants you, with right to sublicense, with
98 | respect to Brigham's rights in the software, and data, if any,
99 | which is the subject of this Software License (collectively, the
100 | "Software"), a royalty-free, non-exclusive license to use,
101 | reproduce, make derivative works of, display and distribute the
102 | Software, provided that:
103 |
104 | (a) you accept and adhere to all of the terms and conditions of this
105 | Software License;
106 |
107 | (b) in connection with any copy of or sublicense of all or any portion
108 | of the Software, all of the terms and conditions in this Software
109 | License shall appear in and shall apply to such copy and such
110 | sublicense, including without limitation all source and executable
111 | forms and on any user documentation, prefaced with the following
112 | words: "All or portions of this licensed product (such portions are
113 | the "Software") have been obtained under license from The Brigham and
114 | Women's Hospital, Inc. and are subject to the following terms and
115 | conditions:"
116 |
117 | (c) you preserve and maintain all applicable attributions, copyright
118 | notices and licenses included in or applicable to the Software;
119 |
120 | (d) modified versions of the Software must be clearly identified and
121 | marked as such, and must not be misrepresented as being the original
122 | Software; and
123 |
124 | (e) you consider making, but are under no obligation to make, the
125 | source code of any of your modifications to the Software freely
126 | available to others on an open source basis.
127 |
128 | 2. The license granted in this Software License includes without
129 | limitation the right to (i) incorporate the Software into
130 | proprietary programs (subject to any restrictions applicable to
131 | such programs), (ii) add your own copyright statement to your
132 | modifications of the Software, and (iii) provide additional or
133 | different license terms and conditions in your sublicenses of
134 | modifications of the Software; provided that in each case your use,
135 | reproduction or distribution of such modifications otherwise
136 | complies with the conditions stated in this Software License.
137 |
138 | 3. This Software License does not grant any rights with respect to
139 | third party software, except those rights that Brigham has been
140 | authorized by a third party to grant to you, and accordingly you
141 | are solely responsible for (i) obtaining any permissions from third
142 | parties that you need to use, reproduce, make derivative works of,
143 | display and distribute the Software, and (ii) informing your
144 | sublicensees, including without limitation your end-users, of their
145 | obligations to secure any such required permissions.
146 |
147 | 4. The Software has been designed for research purposes only and has
148 | not been reviewed or approved by the Food and Drug Administration
149 | or by any other agency. YOU ACKNOWLEDGE AND AGREE THAT CLINICAL
150 | APPLICATIONS ARE NEITHER RECOMMENDED NOR ADVISED. Any
151 | commercialization of the Software is at the sole risk of the party
152 | or parties engaged in such commercialization. You further agree to
153 | use, reproduce, make derivative works of, display and distribute
154 | the Software in compliance with all applicable governmental laws,
155 | regulations and orders, including without limitation those relating
156 | to export and import control.
157 |
158 | 5. The Software is provided "AS IS" and neither Brigham nor any
159 | contributor to the software (each a "Contributor") shall have any
160 | obligation to provide maintenance, support, updates, enhancements
161 | or modifications thereto. BRIGHAM AND ALL CONTRIBUTORS SPECIFICALLY
162 | DISCLAIM ALL EXPRESS AND IMPLIED WARRANTIES OF ANY KIND INCLUDING,
163 | BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR
164 | A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
165 | BRIGHAM OR ANY CONTRIBUTOR BE LIABLE TO ANY PARTY FOR DIRECT,
166 | INDIRECT, SPECIAL, INCIDENTAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES
167 | HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY ARISING IN ANY WAY
168 | RELATED TO THE SOFTWARE, EVEN IF BRIGHAM OR ANY CONTRIBUTOR HAS
169 | BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. TO THE MAXIMUM
170 | EXTENT NOT PROHIBITED BY LAW OR REGULATION, YOU FURTHER ASSUME ALL
171 | LIABILITY FOR YOUR USE, REPRODUCTION, MAKING OF DERIVATIVE WORKS,
172 | DISPLAY, LICENSE OR DISTRIBUTION OF THE SOFTWARE AND AGREE TO
173 | INDEMNIFY AND HOLD HARMLESS BRIGHAM AND ALL CONTRIBUTORS FROM AND
174 | AGAINST ANY AND ALL CLAIMS, SUITS, ACTIONS, DEMANDS AND JUDGMENTS
175 | ARISING THEREFROM.
176 |
177 | 6. None of the names, logos or trademarks of Brigham or any of
178 | Brigham's affiliates or any of the Contributors, or any funding
179 | agency, may be used to endorse or promote products produced in
180 | whole or in part by operation of the Software or derived from or
181 | based on the Software without specific prior written permission
182 | from the applicable party.
183 |
184 | 7. Any use, reproduction or distribution of the Software which is not
185 | in accordance with this Software License shall automatically revoke
186 | all rights granted to you under this Software License and render
187 | Paragraphs 1 and 2 of this Software License null and void.
188 |
189 | 8. This Software License does not grant any rights in or to any
190 | intellectual property owned by Brigham or any Contributor except
191 | those rights expressly granted hereunder.
192 |
193 | PART C. MISCELLANEOUS
194 |
195 | This Agreement shall be governed by and construed in accordance with
196 | the laws of The Commonwealth of Massachusetts without regard to
197 | principles of conflicts of law. This Agreement shall supercede and
198 | replace any license terms that you may have agreed to previously with
199 | respect to Slicer.
200 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | > *Tools like TorchIO are a symptom of the maturation of medical AI research using deep learning techniques*.
10 |
11 | Jack Clark, Policy Director
12 | at [OpenAI](https://openai.com/) ([link](https://jack-clark.net/2020/03/17/)).
13 |
14 | ---
15 |
16 | This repository contains the code for a [3D Slicer](https://www.slicer.org/)
17 | extension that can be used to experiment with the
18 | [TorchIO](https://torchio.readthedocs.io/) Python package without any coding.
19 |
20 | More information on the extension can be found on the [TorchIO documentation](https://torchio.readthedocs.io/interfaces/index.html#d-slicer-gui).
21 |
22 | Please use the **Preview Release** of 3D Slicer.
23 |
24 |
25 |
121 |
122 |
123 | ## Installation
124 |
125 | The extension can be installed using
126 | [Extensions Manager](https://www.slicer.org/wiki/Documentation/4.10/SlicerApplication/ExtensionsManager).
127 |
128 | ## Continuous integration
129 |
130 | The build status can be checked on [CDash](https://slicer.cdash.org/index.php?project=SlicerPreview&filtercount=1&showfilters=1&field1=buildname&compare1=63&value1=torchio).
131 |
132 | ## Credits
133 |
134 | If you like this repository, please click on Star!
135 |
136 | If you use this tool for your research, please cite our paper:
137 |
138 | [F. Pérez-García, R. Sparks, and S. Ourselin. *TorchIO: a Python library for efficient loading, preprocessing, augmentation and patch-based sampling of medical images in deep learning*. Computer Methods and Programs in Biomedicine (June 2021), p. 106236. ISSN: 0169-2607.doi:10.1016/j.cmpb.2021.106236.](https://doi.org/10.1016/j.cmpb.2021.106236)
139 |
140 | BibTeX entry:
141 |
142 | ```bibtex
143 | @article{perez-garcia_torchio_2021,
144 | title = {TorchIO: a Python library for efficient loading, preprocessing, augmentation and patch-based sampling of medical images in deep learning},
145 | journal = {Computer Methods and Programs in Biomedicine},
146 | pages = {106236},
147 | year = {2021},
148 | issn = {0169-2607},
149 | doi = {https://doi.org/10.1016/j.cmpb.2021.106236},
150 | url = {https://www.sciencedirect.com/science/article/pii/S0169260721003102},
151 | author = {P{\'e}rez-Garc{\'i}a, Fernando and Sparks, Rachel and Ourselin, S{\'e}bastien},
152 | }
153 | ```
154 |
155 | This project is supported by the following institutions:
156 |
157 | - [Engineering and Physical Sciences Research Council (EPSRC) & UK Research and Innovation (UKRI)](https://epsrc.ukri.org/)
158 | - [EPSRC Centre for Doctoral Training in Intelligent, Integrated Imaging In Healthcare (i4health)](https://www.ucl.ac.uk/intelligent-imaging-healthcare/) (University College London)
159 | - [Wellcome / EPSRC Centre for Interventional and Surgical Sciences (WEISS), University College London (UCL)](https://www.ucl.ac.uk/interventional-surgical-sciences/) (University College London)
160 | - [School of Biomedical Engineering & Imaging Sciences (BMEIS), King's College London](https://www.kcl.ac.uk/bmeis) (King's College London)
161 |
--------------------------------------------------------------------------------
/Screenshots/Icon/bias.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/Icon/bias.png
--------------------------------------------------------------------------------
/Screenshots/Icon/ghosting.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/Icon/ghosting.png
--------------------------------------------------------------------------------
/Screenshots/Icon/motion.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/Icon/motion.png
--------------------------------------------------------------------------------
/Screenshots/Icon/script.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | from PIL import Image
3 | nw = np.array(Image.open('motion.png'))
4 | ne = np.array(Image.open('ghosting.png'))
5 | sw = np.array(Image.open('spike.png'))
6 | se = np.array(Image.open('bias.png'))
7 | result = nw.copy()
8 | si, sj, _ = result.shape
9 | sih = si // 2
10 | sjh = sj // 2 + 28
11 | result[sih:] = sw[sih:]
12 | result[:, sjh:] = ne[:, sjh:]
13 | result[:, sjh:] = ne[:, sjh:]
14 | result[sih:, sjh:] = se[sih:, sjh:]
15 | result = result[:, 80:-74] # make square
16 | image = Image.fromarray(result)
17 | image = image.resize((128, 128), Image.LANCZOS)
18 | image.save('mosaic.png')
19 |
--------------------------------------------------------------------------------
/Screenshots/Icon/spike.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/Icon/spike.png
--------------------------------------------------------------------------------
/Screenshots/TorchIO.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/TorchIO.png
--------------------------------------------------------------------------------
/Screenshots/usage_1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/usage_1.png
--------------------------------------------------------------------------------
/Screenshots/usage_2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/usage_2.png
--------------------------------------------------------------------------------
/Screenshots/usage_3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/usage_3.png
--------------------------------------------------------------------------------
/Screenshots/usage_4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/Screenshots/usage_4.png
--------------------------------------------------------------------------------
/TorchIO.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/TorchIO.png
--------------------------------------------------------------------------------
/TorchIOModule/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #-----------------------------------------------------------------------------
2 | set(MODULE_NAME TorchIOModule)
3 |
4 | #-----------------------------------------------------------------------------
5 | set(MODULE_PYTHON_SCRIPTS
6 | ${MODULE_NAME}.py
7 | )
8 |
9 | set(MODULE_PYTHON_RESOURCES
10 | Resources/Icons/${MODULE_NAME}.png
11 | )
12 |
13 | #-----------------------------------------------------------------------------
14 | slicerMacroBuildScriptedModule(
15 | NAME ${MODULE_NAME}
16 | SCRIPTS ${MODULE_PYTHON_SCRIPTS}
17 | RESOURCES ${MODULE_PYTHON_RESOURCES}
18 | WITH_GENERIC_TESTS
19 | )
20 |
21 | #-----------------------------------------------------------------------------
22 | if(BUILD_TESTING)
23 |
24 | # Register the unittest subclass in the main script as a ctest.
25 | # Note that the test will also be available at runtime.
26 | slicer_add_python_unittest(SCRIPT ${MODULE_NAME}.py)
27 |
28 | # Additional build-time testing
29 | add_subdirectory(Testing)
30 | endif()
31 |
--------------------------------------------------------------------------------
/TorchIOModule/Resources/Icons/TorchIOModule.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/TorchIOModule/Resources/Icons/TorchIOModule.png
--------------------------------------------------------------------------------
/TorchIOModule/Testing/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_subdirectory(Python)
2 |
--------------------------------------------------------------------------------
/TorchIOModule/Testing/Python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | #slicer_add_python_unittest(SCRIPT ${MODULE_NAME}ModuleTest.py)
3 |
--------------------------------------------------------------------------------
/TorchIOModule/TorchIOModule.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from contextlib import contextmanager
3 |
4 | import numpy as np
5 | import SimpleITK as sitk
6 |
7 | import qt, slicer
8 | import sitkUtils as su
9 | from slicer.ScriptedLoadableModule import (
10 | ScriptedLoadableModule,
11 | ScriptedLoadableModuleLogic,
12 | )
13 |
14 | import PyTorchUtils
15 |
16 |
17 | MRML_LABEL = 'vtkMRMLLabelMapVolumeNode'
18 | MRML_SCALAR = 'vtkMRMLScalarVolumeNode'
19 |
20 |
21 | class TorchIOModule(ScriptedLoadableModule):
22 |
23 | def __init__(self, parent):
24 | ScriptedLoadableModule.__init__(self, parent)
25 | self.parent.title = 'TorchIO Abstract Module'
26 | self.parent.categories = []
27 | self.parent.dependencies = []
28 | self.parent.contributors = [
29 | "Fernando Perez-Garcia (University College London and King's College London)"
30 | ]
31 | self.parent.helpText = (
32 | 'This module can be used to quickly visualize the effect of each'
33 | ' transform parameter. That way, users can have an intuitive feeling of'
34 | ' what the output of a transform looks like without any coding at all.\n\n'
35 | )
36 | self.parent.helpText += self.getDefaultModuleDocumentationLink()
37 | self.parent.acknowledgementText = (
38 | 'This work was was funded by the Engineering and Physical Sciences'
39 | ' Research Council (EPSRC) and supported by the UCL Centre for Doctoral'
40 | ' Training in Intelligent, Integrated Imaging in Healthcare, the UCL'
41 | ' Wellcome / EPSRC Centre for Interventional and Surgical Sciences (WEISS),'
42 | ' and the School of Biomedical Engineering & Imaging Sciences (BMEIS)'
43 | " of King's College London."
44 | )
45 |
46 | def getDefaultModuleDocumentationLink(self):
47 | docsUrl = 'https://torchio.readthedocs.io/slicer.html'
48 | linkText = f'See the documentation for more information.'
49 | return linkText
50 |
51 |
52 | class TorchIOModuleLogic(ScriptedLoadableModuleLogic):
53 | def __init__(self):
54 | self._torchio = None
55 | self.torchLogic = PyTorchUtils.PyTorchUtilsLogic()
56 |
57 | @property
58 | def torchio(self):
59 | if self._torchio is None:
60 | logging.info('Importing torchio...')
61 | self._torchio = self.importTorchIO()
62 | return self._torchio
63 |
64 | def importTorchIO(self):
65 | if not self.torchLogic.torchInstalled():
66 | logging.info('PyTorch module not found')
67 | torch = self.torchLogic.installTorch(askConfirmation=True)
68 | if torch is None:
69 | slicer.util.errorDisplay(
70 | 'PyTorch needs to be installed to use the TorchIO extension.'
71 | ' Please reload this module to install PyTorch.'
72 | )
73 | return
74 | try:
75 | import torchio
76 | except ModuleNotFoundError:
77 | with self.showWaitCursor(), self.peakPythonConsole():
78 | torchio = self.installTorchIO()
79 | logging.info(f'TorchIO {torchio.__version__} imported correctly')
80 | return torchio
81 |
82 | @staticmethod
83 | def installTorchIO(confirm=True):
84 | if confirm and not slicer.app.commandOptions().testingEnabled:
85 | install = slicer.util.confirmOkCancelDisplay(
86 | 'TorchIO will be downloaded and installed now. The process might take some minutes.'
87 | )
88 | if not install:
89 | logging.info('Installation of TorchIO aborted by user')
90 | return None
91 | slicer.util.pip_install('torchio')
92 | import torchio
93 | logging.info(f'TorchIO {torchio.__version__} installed correctly')
94 | return torchio
95 |
96 | def getTorchIOImageFromVolumeNode(self, volumeNode):
97 | image = su.PullVolumeFromSlicer(volumeNode)
98 | tio = self.torchio
99 | if volumeNode.IsA('vtkMRMLScalarVolumeNode'):
100 | image = sitk.Cast(image, sitk.sitkFloat32)
101 | class_ = tio.ScalarImage
102 | elif volumeNode.IsA('vtkMRMLLabelMapVolumeNode'):
103 | class_ = tio.LabelMap
104 | tensor, affine = tio.io.sitk_to_nib(image)
105 | return class_(tensor=tensor, affine=affine)
106 |
107 | def getVolumeNodeFromTorchIOImage(self, image, outputVolumeNode=None):
108 | tio = self.torchio
109 | kwargs = {}
110 | if outputVolumeNode is None:
111 | kwargs = {'className': MRML_LABEL if isinstance(image, tio.LabelMap) else MRML_SCALAR}
112 | else:
113 | kwargs = {'targetNode': outputVolumeNode}
114 | su.PushVolumeToSlicer(image.as_sitk(), **kwargs)
115 | return outputVolumeNode
116 |
117 | def getPythonConsoleWidget(self):
118 | return slicer.util.mainWindow().pythonConsole().parent()
119 |
120 | @contextmanager
121 | def peakPythonConsole(self, show=True):
122 | if slicer.app.testingEnabled():
123 | show = False
124 | if show:
125 | console = self.getPythonConsoleWidget()
126 | pythonVisible = console.visible
127 | console.setVisible(True)
128 | yield
129 | if show:
130 | console.setVisible(pythonVisible)
131 |
132 | @contextmanager
133 | def showWaitCursor(self, show=True):
134 | if show:
135 | qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)
136 | yield
137 | if show:
138 | qt.QApplication.restoreOverrideCursor()
139 |
140 | def getNodesFromSubject(self, subject):
141 | nodes = {}
142 | for name, image in subject.get_images_dict(intensity_only=False).items():
143 | nodes[name] = self.getVolumeNodeFromTorchIOImage(image, name=name)
144 | return nodes
145 |
146 | def getColin(self, version=1998):
147 | colin = self.torchio.datasets.Colin27(version=version)
148 | nodes = self.getNodesFromSubject(colin)
149 | if version == 1998:
150 | slicer.util.setSliceViewerLayers(
151 | background=nodes['t1'],
152 | label=nodes['brain'],
153 | )
154 | elif version == 2008:
155 | slicer.util.setSliceViewerLayers(
156 | background=nodes['t1'],
157 | foreground=nodes['t2'],
158 | label=nodes['cls'],
159 | )
160 |
--------------------------------------------------------------------------------
/TorchIOTransforms/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | #-----------------------------------------------------------------------------
2 | set(MODULE_NAME TorchIOTransforms)
3 |
4 | #-----------------------------------------------------------------------------
5 | set(MODULE_PYTHON_SCRIPTS
6 | ${MODULE_NAME}.py
7 | ${MODULE_NAME}Lib/__init__
8 | ${MODULE_NAME}Lib/CoordinatesWidget
9 | ${MODULE_NAME}Lib/HistogramStandardization
10 | ${MODULE_NAME}Lib/RandomAffine
11 | ${MODULE_NAME}Lib/RandomGamma
12 | ${MODULE_NAME}Lib/RandomBlur
13 | ${MODULE_NAME}Lib/RandomBiasField
14 | ${MODULE_NAME}Lib/RandomAnisotropy
15 | ${MODULE_NAME}Lib/RandomElasticDeformation
16 | ${MODULE_NAME}Lib/RandomGhosting
17 | ${MODULE_NAME}Lib/RandomMotion
18 | ${MODULE_NAME}Lib/RandomSpike
19 | ${MODULE_NAME}Lib/Transform
20 | )
21 |
22 | set(MODULE_PYTHON_RESOURCES
23 | Resources/Icons/${MODULE_NAME}.png
24 | )
25 |
26 | #-----------------------------------------------------------------------------
27 | slicerMacroBuildScriptedModule(
28 | NAME ${MODULE_NAME}
29 | SCRIPTS ${MODULE_PYTHON_SCRIPTS}
30 | RESOURCES ${MODULE_PYTHON_RESOURCES}
31 | WITH_GENERIC_TESTS
32 | )
33 |
34 | #-----------------------------------------------------------------------------
35 | if(BUILD_TESTING)
36 |
37 | # Register the unittest subclass in the main script as a ctest.
38 | # Note that the test will also be available at runtime.
39 | slicer_add_python_unittest(SCRIPT ${MODULE_NAME}.py)
40 |
41 | # Additional build-time testing
42 | add_subdirectory(Testing)
43 | endif()
44 |
--------------------------------------------------------------------------------
/TorchIOTransforms/Resources/Icons/TorchIOTransforms.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/TorchIOTransforms/Resources/Icons/TorchIOTransforms.png
--------------------------------------------------------------------------------
/TorchIOTransforms/Resources/Icons/TorchIOTransforms_old.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/fepegar/SlicerTorchIO/ddd293b48f026be0fed8558c01cb5a9159c3a26c/TorchIOTransforms/Resources/Icons/TorchIOTransforms_old.png
--------------------------------------------------------------------------------
/TorchIOTransforms/Testing/CMakeLists.txt:
--------------------------------------------------------------------------------
1 | add_subdirectory(Python)
2 |
--------------------------------------------------------------------------------
/TorchIOTransforms/Testing/Python/CMakeLists.txt:
--------------------------------------------------------------------------------
1 |
2 | #slicer_add_python_unittest(SCRIPT ${MODULE_NAME}ModuleTest.py)
3 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransforms.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import traceback
3 | from pathlib import Path
4 | from contextlib import contextmanager
5 |
6 | import numpy as np
7 |
8 | import qt
9 | import ctk
10 | import slicer
11 | from slicer.ScriptedLoadableModule import (
12 | ScriptedLoadableModule,
13 | ScriptedLoadableModuleWidget,
14 | ScriptedLoadableModuleTest,
15 | )
16 |
17 | import TorchIOTransformsLib
18 | from TorchIOModule import TorchIOModuleLogic
19 |
20 |
21 | TRANSFORMS = list(sorted(transformName for transformName in TorchIOTransformsLib.__all__))
22 |
23 |
24 | class TorchIOTransforms(ScriptedLoadableModule):
25 |
26 | def __init__(self, parent):
27 | ScriptedLoadableModule.__init__(self, parent)
28 | self.parent.title = 'TorchIO Transforms'
29 | self.parent.categories = ['Utilities']
30 | self.parent.dependencies = []
31 | self.parent.contributors = [
32 | 'Fernando Perez-Garcia'
33 | ' (University College London and King\'s College London)'
34 | ]
35 | self.parent.helpText = (
36 | 'This module can be used to quickly visualize the effect of each'
37 | ' transform parameter. That way, users can have an intuitive feeling of'
38 | ' what the output of a transform looks like without any coding at all.\n\n'
39 | )
40 | self.parent.helpText += self.getDefaultModuleDocumentationLink()
41 | self.parent.acknowledgementText = (
42 | 'This work is supported by the EPSRC-funded UCL Centre for Doctoral'
43 | ' Training in Medical Imaging (EP/L016478/1). This publication represents'
44 | ' in part independent research commissioned by the Wellcome Trust Health'
45 | ' Innovation Challenge Fund (WT106882). The views expressed in this'
46 | ' publication are those of the authors and not necessarily those of the'
47 | ' Wellcome Trust.'
48 | )
49 |
50 | def getDefaultModuleDocumentationLink(self):
51 | docsUrl = 'https://torchio.readthedocs.io/interfaces/index.html#d-slicer-gui'
52 | linkText = f'See the documentation for more information.'
53 | return linkText
54 |
55 |
56 | class TorchIOTransformsWidget(ScriptedLoadableModuleWidget):
57 |
58 | def setup(self):
59 | ScriptedLoadableModuleWidget.setup(self)
60 | self.logic = TorchIOTransformsLogic()
61 | if self.logic.torchio is None: # make sure PyTorch and TorchIO are installed
62 | return
63 | self.transforms = []
64 | self.currentTransform = None
65 | self.makeGUI()
66 | self.onVolumeSelectorModified()
67 | slicer.torchio = self
68 | self.backgroundNode = None
69 |
70 | def makeGUI(self):
71 | self.addNodesButton()
72 | self.addTransformButton()
73 | self.addTransforms()
74 | self.addToggleApplyButtons()
75 | # Add vertical spacer
76 | self.layout.addStretch(1)
77 |
78 | def addNodesButton(self):
79 | self.nodesButton = ctk.ctkCollapsibleButton()
80 | self.nodesButton.text = 'Volumes'
81 | self.layout.addWidget(self.nodesButton)
82 | nodesLayout = qt.QFormLayout(self.nodesButton)
83 |
84 | goToSampleDataButton = qt.QPushButton('Go to Sample Data module')
85 | goToSampleDataButton.clicked.connect(lambda: slicer.util.selectModule('SampleData'))
86 | nodesLayout.addWidget(goToSampleDataButton)
87 |
88 | self.inputSelector = slicer.qMRMLNodeComboBox()
89 | self.inputSelector.nodeTypes = ['vtkMRMLVolumeNode']
90 | self.inputSelector.addEnabled = False
91 | self.inputSelector.removeEnabled = True
92 | self.inputSelector.noneEnabled = False
93 | self.inputSelector.setMRMLScene(slicer.mrmlScene)
94 | self.inputSelector.currentNodeChanged.connect(self.onVolumeSelectorModified)
95 | nodesLayout.addRow('Input volume: ', self.inputSelector)
96 |
97 | self.outputSelector = slicer.qMRMLNodeComboBox()
98 | self.outputSelector.nodeTypes = ['vtkMRMLScalarVolumeNode']
99 | self.outputSelector.selectNodeUponCreation = False
100 | self.outputSelector.addEnabled = False
101 | self.outputSelector.removeEnabled = True
102 | self.outputSelector.noneEnabled = True
103 | self.outputSelector.setMRMLScene(slicer.mrmlScene)
104 | self.outputSelector.noneDisplay = 'Create new volume'
105 | self.outputSelector.currentNodeChanged.connect(self.onVolumeSelectorModified)
106 | nodesLayout.addRow('Output volume: ', self.outputSelector)
107 |
108 | def addTransformButton(self):
109 | self.transformsButton = ctk.ctkCollapsibleButton()
110 | self.transformsButton.text = 'Transforms'
111 | self.layout.addWidget(self.transformsButton)
112 | self.transformsLayout = qt.QFormLayout(self.transformsButton)
113 |
114 | self.transformsComboBox = qt.QComboBox()
115 | self.transformsLayout.addWidget(self.transformsComboBox)
116 |
117 | def addTransforms(self):
118 | self.transformsComboBox.addItems(TRANSFORMS)
119 | for transformName in TRANSFORMS:
120 | transform = self.logic.getTransform(transformName)
121 | self.transforms.append(transform)
122 | transform.hide()
123 | self.transformsLayout.addWidget(transform.groupBox)
124 | self.transformsComboBox.currentIndex = -1
125 | self.transformsComboBox.currentIndexChanged.connect(self.onTransformsComboBox)
126 |
127 | def addToggleApplyButtons(self):
128 | toggleApplyFrame = qt.QFrame()
129 | toggleApplyLayout = qt.QHBoxLayout(toggleApplyFrame)
130 |
131 | self.toggleButton = qt.QPushButton('Toggle volumes')
132 | self.toggleButton.clicked.connect(self.onToggleButton)
133 | self.toggleButton.setDisabled(True)
134 | toggleApplyLayout.addWidget(self.toggleButton)
135 |
136 | self.applyButton = qt.QPushButton('Apply transform')
137 | self.applyButton.clicked.connect(self.onApplyButton)
138 | self.applyButton.setDisabled(True)
139 | toggleApplyLayout.addWidget(self.applyButton)
140 |
141 | self.layout.addWidget(toggleApplyFrame)
142 |
143 | def onTransformsComboBox(self):
144 | transformName = self.transformsComboBox.currentText
145 | for transform in self.transforms:
146 | if transform.name == transformName:
147 | self.currentTransform = transform
148 | transform.show()
149 | else:
150 | transform.hide()
151 | self.onVolumeSelectorModified()
152 |
153 | def onVolumeSelectorModified(self):
154 | self.applyButton.setDisabled(
155 | self.inputSelector.currentNode() is None
156 | or self.currentTransform is None
157 | )
158 | self.toggleButton.setEnabled(
159 | self.inputSelector.currentNode() is not None
160 | and self.outputSelector.currentNode() is not None
161 | )
162 |
163 | def onToggleButton(self):
164 | inputNode = self.inputSelector.currentNode()
165 | outputNode = self.outputSelector.currentNode()
166 | if self.backgroundNode is None:
167 | self.backgroundNode = inputNode
168 | self.backgroundNode = outputNode if self.backgroundNode is inputNode else inputNode
169 | foregroundNode = outputNode if self.backgroundNode is inputNode else inputNode
170 | slicer.util.setSliceViewerLayers(
171 | background=self.backgroundNode,
172 | foreground=foregroundNode,
173 | foregroundOpacity=0,
174 | )
175 |
176 | def onApplyButton(self):
177 | inputVolumeNode = self.inputSelector.currentNode()
178 | outputVolumeNode = self.outputSelector.currentNode()
179 |
180 | if outputVolumeNode is None:
181 | name = f'{inputVolumeNode.GetName()} {self.currentTransform.name}'
182 | outputVolumeNode = slicer.mrmlScene.AddNewNodeByClass(
183 | inputVolumeNode.GetClassName(),
184 | name,
185 | )
186 | outputVolumeNode.CreateDefaultDisplayNodes()
187 | self.outputSelector.currentNodeID = outputVolumeNode.GetID()
188 | try:
189 | kwargs = self.currentTransform.getKwargs()
190 | logging.info(f'Transform args: {kwargs}')
191 | self.currentTransform(inputVolumeNode, outputVolumeNode)
192 | except:
193 | message = 'Error applying the transform.'
194 | detailedText = (
195 | f'Transform kwargs:\n{kwargs}\n\n'
196 | f'Error details:\n{traceback.format_exc()}'
197 | )
198 | slicer.util.errorDisplay(message, detailedText=detailedText)
199 | return
200 | inputDisplayNode = inputVolumeNode.GetDisplayNode()
201 | inputColorNodeID = inputDisplayNode.GetColorNodeID()
202 | outputDisplayNode = outputVolumeNode.GetDisplayNode()
203 | outputDisplayNode.SetAndObserveColorNodeID(inputColorNodeID)
204 | if outputVolumeNode.IsA('vtkMRMLLabelMapVolumeNode'):
205 | slicer.util.setSliceViewerLayers(label=outputVolumeNode)
206 | else:
207 | outputDisplayNode.SetAutoWindowLevel(False)
208 | wmin, wmax = inputDisplayNode.GetWindowLevelMin(), inputDisplayNode.GetWindowLevelMax()
209 | outputDisplayNode.SetWindowLevelMinMax(wmin, wmax)
210 | slicer.util.setSliceViewerLayers(background=outputVolumeNode)
211 |
212 |
213 | class TorchIOTransformsLogic(TorchIOModuleLogic):
214 | def getTransform(self, transformName):
215 | import TorchIOTransformsLib
216 | return getattr(TorchIOTransformsLib, transformName)()
217 |
218 | def applyTransform(self, inputNode, outputNode, transformName):
219 | if outputNode is None:
220 | outputNode = slicer.mrmlScene.AddNewNodeByClass(inputNode.GetClassName())
221 | transform = self.getTransform(transformName)
222 | with self.showWaitCursor():
223 | transform(inputNode, outputNode)
224 |
225 |
226 | class TorchIOTransformsTest(ScriptedLoadableModuleTest):
227 | def setUp(self):
228 | """ Do whatever is needed to reset the state - typically a scene clear will be enough.
229 | """
230 | slicer.mrmlScene.Clear(0)
231 | self.landmarksPath = Path(slicer.util.tempDirectory()) / 'landmarks.npy'
232 | landmarks = np.array(
233 | [3.55271368e-15, 7.04965436e-02, 5.11962268e-01, 8.81293798e-01,
234 | 1.08523250e+00, 1.51833266e+00, 3.08140233e+00, 1.15454687e+01,
235 | 2.78108498e+01, 3.42262691e+01, 3.99556984e+01, 5.26837071e+01,
236 | 1.00000000e+02]
237 | )
238 | np.save(self.landmarksPath, landmarks)
239 | # Landmarks unused for now
240 |
241 | def tearDown(self):
242 | self.landmarksPath.unlink()
243 |
244 | def runTest(self):
245 | """Run as few or as many tests as needed here.
246 | """
247 | self.setUp()
248 | self.test_TorchIOTransforms()
249 | self.tearDown()
250 |
251 | def _delayDisplay(self, message):
252 | if not slicer.app.testingEnabled():
253 | self.delayDisplay(message)
254 |
255 | def test_TorchIOTransforms(self):
256 | self._delayDisplay("Starting the test")
257 | import SampleData
258 | volumeNode = SampleData.downloadSample('MRHead')
259 | self._delayDisplay('Finished with download and loading')
260 | logic = TorchIOTransformsLogic()
261 | for transformName in TRANSFORMS:
262 | if transformName == 'HistogramStandardization':
263 | # This transform can't be run with default input parameters
264 | continue
265 | self._delayDisplay(f'Applying {transformName}...')
266 | logic.applyTransform(
267 | volumeNode,
268 | volumeNode,
269 | transformName,
270 | )
271 | self._delayDisplay(f'{transformName} passed!')
272 | self._delayDisplay('Test passed!')
273 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/CoordinatesWidget.py:
--------------------------------------------------------------------------------
1 | import ctk
2 |
3 | class CoordinatesWidget:
4 | def __init__(self, decimals=0, coordinates=(0, 0, 0), step=1):
5 | self.widget = ctk.ctkCoordinatesWidget()
6 | self.widget.decimals = decimals
7 | self.widget.singleStep = step
8 | self.decimals = decimals
9 | self.setCoordinates(coordinates)
10 |
11 | def getCoordinates(self):
12 | return self._stringToCoords(self.widget.coordinates)
13 |
14 | def setCoordinates(self, coordinates):
15 | if not isinstance(coordinates, tuple):
16 | coordinates = 3 * (coordinates,)
17 | self.widget.coordinates = self._coordsToString(coordinates)
18 |
19 | def _coordsToString(self, coordinates):
20 | return ','.join(str(n) for n in coordinates)
21 |
22 | def _stringToCoords(self, string):
23 | cast = int if self.decimals == 0 else float
24 | return tuple([cast(n) for n in string.split(',')])
25 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/HistogramStandardization.py:
--------------------------------------------------------------------------------
1 | import qt
2 |
3 | from .Transform import Transform
4 |
5 |
6 | class HistogramStandardization(Transform):
7 | def setup(self):
8 | self.landmarksLineEdit = qt.QLineEdit()
9 | self.layout.addRow('Path to landmarks: ', self.landmarksLineEdit)
10 |
11 | def getArgs(self):
12 | path = arg = self.landmarksLineEdit.text
13 | if path.endswith('.npy'): # I should modify the transform to accept this
14 | import numpy as np
15 | arg = {'img': np.load(path)}
16 | return arg,
17 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomAffine.py:
--------------------------------------------------------------------------------
1 | import qt
2 |
3 | from .Transform import Transform
4 |
5 |
6 | class RandomAffine(Transform):
7 | def setup(self):
8 | scale = self.getDefaultValue('scales')
9 | scales = 1 - scale, 1 + scale
10 | self.scalesSlider = self.makeRangeWidget(0.5, *scales, 2, 0.01, 'scales')
11 | self.layout.addRow('Scales: ', self.scalesSlider)
12 |
13 | degrees = self.getDefaultValue('degrees')
14 | self.degreesSlider = self.makeRangeWidget(
15 | -180, -degrees, degrees, 180, 1, 'degrees')
16 | self.layout.addRow('Degrees: ', self.degreesSlider)
17 |
18 | translation = self.getDefaultValue('translation')
19 | self.translationSlider = self.makeRangeWidget(
20 | -50, -translation, translation, 50, 0.5, 'translation')
21 | self.layout.addRow('Translation: ', self.translationSlider)
22 |
23 | self.interpolationComboBox = self.makeInterpolationComboBox()
24 | self.layout.addRow('Interpolation: ', self.interpolationComboBox)
25 |
26 | self.isotropicCheckBox = qt.QCheckBox('Isotropic')
27 | self.layout.addWidget(self.isotropicCheckBox)
28 |
29 | arg = 'default_pad_value'
30 | self.padLineEdit = qt.QLineEdit(self.getDefaultValue(arg))
31 | self.padLineEdit.setToolTip(self.getArgDocstring(arg))
32 | self.layout.addRow('Padding: ', self.padLineEdit)
33 |
34 | def getPadArg(self):
35 | text = self.padLineEdit.text
36 | try:
37 | value = float(text)
38 | except ValueError:
39 | value = text
40 | return value
41 |
42 | def getKwargs(self):
43 | kwargs = dict(
44 | scales=self.getSliderRange(self.scalesSlider),
45 | degrees=self.getSliderRange(self.degreesSlider),
46 | translation=self.getSliderRange(self.translationSlider),
47 | isotropic=self.isotropicCheckBox.isChecked(),
48 | image_interpolation=self.getInterpolation(),
49 | default_pad_value=self.getPadArg(),
50 | )
51 | return kwargs
52 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomAnisotropy.py:
--------------------------------------------------------------------------------
1 | from .Transform import Transform
2 |
3 |
4 | class RandomAnisotropy(Transform):
5 | def setup(self):
6 | self.axesLayout, self.axesDict = self.makeAxesLayout()
7 | self.layout.addRow('Axes: ', self.axesLayout)
8 |
9 | a, b = self.getDefaultValue('downsampling')
10 | self.downsamplingSlider = self.makeRangeWidget(
11 | 1, a, b, 10, 0.01, 'downsampling')
12 | self.layout.addRow('Downsampling factor: ', self.downsamplingSlider)
13 |
14 | def getKwargs(self):
15 | kwargs = dict(
16 | axes=tuple([n for n in range(3) if self.axesDict[n].isChecked()]),
17 | downsampling=self.getSliderRange(self.downsamplingSlider),
18 | )
19 | return kwargs
20 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomBiasField.py:
--------------------------------------------------------------------------------
1 | import qt
2 | import slicer
3 |
4 | from .Transform import Transform
5 |
6 |
7 | class RandomBiasField(Transform):
8 | def setup(self):
9 | self.coefficientsSlider = slicer.qMRMLSliderWidget()
10 | self.coefficientsSlider.singleStep = 0.01
11 | self.coefficientsSlider.maximum = 2
12 | self.coefficientsSlider.value = self.getDefaultValue('coefficients')
13 | self.layout.addRow('Coefficients: ', self.coefficientsSlider)
14 |
15 | self.orderSpinBox = qt.QSpinBox()
16 | self.orderSpinBox.maximum = 6
17 | self.orderSpinBox.value = self.getDefaultValue('order')
18 | self.layout.addRow('Polynomial order: ', self.orderSpinBox)
19 |
20 | def getKwargs(self):
21 | kwargs = dict(
22 | coefficients=self.coefficientsSlider.value,
23 | order=self.orderSpinBox.value,
24 | )
25 | return kwargs
26 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomBlur.py:
--------------------------------------------------------------------------------
1 | from .Transform import Transform
2 |
3 |
4 | class RandomBlur(Transform):
5 | def setup(self):
6 | stds = self.getDefaultValue('std')
7 | self.stdSlider = self.makeRangeWidget(0, *stds, 5, 0.01, 'std')
8 | self.layout.addRow('Standard deviation: ', self.stdSlider)
9 |
10 | def getKwargs(self):
11 | kwargs = dict(
12 | std=self.getSliderRange(self.stdSlider),
13 | )
14 | return kwargs
15 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomElasticDeformation.py:
--------------------------------------------------------------------------------
1 | import qt
2 |
3 | from .Transform import Transform
4 | from .CoordinatesWidget import CoordinatesWidget
5 |
6 |
7 | class RandomElasticDeformation(Transform):
8 | def setup(self):
9 | self.controlPointsWidget = CoordinatesWidget(
10 | decimals=0,
11 | coordinates=self.getDefaultValue('num_control_points'),
12 | )
13 | self.layout.addRow('Control points: ', self.controlPointsWidget.widget)
14 |
15 | self.maxDisplacementWidget = CoordinatesWidget(
16 | decimals=2,
17 | coordinates=self.getDefaultValue('max_displacement'),
18 | step=0.1,
19 | )
20 | self.layout.addRow('Maximum displacement: ', self.maxDisplacementWidget.widget)
21 |
22 | self.lockedBordersSpinBox = qt.QSpinBox()
23 | self.lockedBordersSpinBox.maximum = 2
24 | self.lockedBordersSpinBox.value = self.getDefaultValue('locked_borders')
25 | self.layout.addRow('Locked borders: ', self.lockedBordersSpinBox)
26 |
27 | self.interpolationComboBox = self.makeInterpolationComboBox()
28 | self.layout.addRow('Interpolation: ', self.interpolationComboBox)
29 |
30 | # arg = 'default_pad_value'
31 | # self.padLineEdit = qt.QLineEdit(self.getDefaultValue(arg))
32 | # self.padLineEdit.setToolTip(self.getArgDocstring(arg))
33 | # self.layout.addRow('Padding: ', self.padLineEdit)
34 |
35 | # def getPadArg(self):
36 | # text = self.padLineEdit.text
37 | # try:
38 | # value = float(text)
39 | # except ValueError:
40 | # value = text
41 | # return value
42 |
43 | def getKwargs(self):
44 | kwargs = dict(
45 | num_control_points=self.controlPointsWidget.getCoordinates(),
46 | max_displacement=self.maxDisplacementWidget.getCoordinates(),
47 | locked_borders=self.lockedBordersSpinBox.value,
48 | image_interpolation=self.getInterpolation(),
49 | # default_pad_value=self.getPadArg(),
50 | )
51 | return kwargs
52 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomGamma.py:
--------------------------------------------------------------------------------
1 | from .Transform import Transform
2 |
3 |
4 | class RandomGamma(Transform):
5 | def setup(self):
6 | logs = self.getDefaultValue('log_gamma')
7 | self.logGammaSlider = self.makeRangeWidget(-2, *logs, 2, 0.01, 'log_gamma')
8 | self.layout.addRow('Log of gamma: ', self.logGammaSlider)
9 |
10 | def getKwargs(self):
11 | kwargs = dict(
12 | log_gamma=self.getSliderRange(self.logGammaSlider),
13 | )
14 | return kwargs
15 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomGhosting.py:
--------------------------------------------------------------------------------
1 | import qt
2 | import slicer
3 | import numpy as np
4 |
5 | from .Transform import Transform
6 |
7 |
8 | class RandomGhosting(Transform):
9 | def setup(self):
10 | self.numGhostsSpinBox = qt.QSpinBox()
11 | self.numGhostsSpinBox.maximum = 50
12 | default = int(np.mean(self.getDefaultValue('num_ghosts')))
13 | self.numGhostsSpinBox.value = default
14 | self.layout.addRow('Ghosts: ', self.numGhostsSpinBox)
15 |
16 | self.axesLayout, self.axesDict = self.makeAxesLayout()
17 | self.layout.addRow('Axes: ', self.axesLayout)
18 |
19 | self.intensitySlider = slicer.qMRMLSliderWidget()
20 | self.intensitySlider.singleStep = 0.01
21 | self.intensitySlider.maximum = 1
22 | self.intensitySlider.value = np.mean(self.getDefaultValue('intensity'))
23 | self.layout.addRow('Intensity: ', self.intensitySlider)
24 |
25 | def getKwargs(self):
26 | kwargs = dict(
27 | num_ghosts=self.numGhostsSpinBox.value,
28 | axes=tuple([n for n in range(3) if self.axesDict[n].isChecked()]),
29 | intensity=2 * (self.intensitySlider.value,),
30 | )
31 | return kwargs
32 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomMotion.py:
--------------------------------------------------------------------------------
1 | import qt
2 |
3 | from .Transform import Transform
4 |
5 |
6 | class RandomMotion(Transform):
7 | def setup(self):
8 | degrees = self.getDefaultValue('degrees')
9 | self.degreesSlider = self.makeRangeWidget(
10 | -180, -degrees, degrees, 180, 1, 'degrees')
11 | self.layout.addRow('Degrees: ', self.degreesSlider)
12 |
13 | translation = self.getDefaultValue('translation')
14 | self.translationSlider = self.makeRangeWidget(
15 | -50, -translation, translation, 50, 1, 'translation')
16 | self.layout.addRow('Translation: ', self.translationSlider)
17 |
18 | self.numTransformsSpinBox = qt.QSpinBox()
19 | self.numTransformsSpinBox.maximum = 10
20 | self.numTransformsSpinBox.value = self.getDefaultValue('num_transforms')
21 | self.layout.addRow('Number of motions: ', self.numTransformsSpinBox)
22 |
23 | self.interpolationComboBox = self.makeInterpolationComboBox()
24 | self.layout.addRow('Interpolation: ', self.interpolationComboBox)
25 |
26 | def getKwargs(self):
27 | kwargs = dict(
28 | degrees=self.getSliderRange(self.degreesSlider),
29 | translation=self.getSliderRange(self.translationSlider),
30 | num_transforms=self.numTransformsSpinBox.value,
31 | image_interpolation=self.getInterpolation(),
32 | )
33 | return kwargs
34 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/RandomSpike.py:
--------------------------------------------------------------------------------
1 | import qt
2 | import slicer
3 | import numpy as np
4 |
5 | from .Transform import Transform
6 |
7 |
8 | class RandomSpike(Transform):
9 | def setup(self):
10 | self.numSpikesSpinBox = qt.QSpinBox()
11 | self.numSpikesSpinBox.maximum = 10
12 | self.numSpikesSpinBox.value = self.getDefaultValue('num_spikes')
13 | self.layout.addRow('Spikes: ', self.numSpikesSpinBox)
14 |
15 | self.intensitySlider = slicer.qMRMLSliderWidget()
16 | self.intensitySlider.singleStep = 0.01
17 | self.intensitySlider.maximum = 5
18 | self.intensitySlider.value = np.mean(self.getDefaultValue('intensity'))
19 | self.layout.addRow('Intensity: ', self.intensitySlider)
20 |
21 | def getKwargs(self):
22 | kwargs = dict(
23 | num_spikes=self.numSpikesSpinBox.value,
24 | intensity=self.intensitySlider.value,
25 | )
26 | return kwargs
27 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/Transform.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import inspect
3 | import importlib
4 |
5 | import qt
6 | import slicer
7 | import sitkUtils as su
8 |
9 |
10 | class Transform:
11 | def __init__(self):
12 | self.groupBox = qt.QGroupBox('Parameters')
13 | self.layout = qt.QFormLayout(self.groupBox)
14 | self.setup()
15 |
16 | def getHelpLink(self):
17 | docs = 'https://torchio.readthedocs.io'
18 | type_ = self.transformType
19 | return f'{docs}/transforms/{type_}.html#torchio.transforms.{self.name}'
20 |
21 | def setup(self):
22 | raise NotImplementedError
23 |
24 | @property
25 | def name(self):
26 | return self.__class__.__name__
27 |
28 | @property
29 | def transformType(self):
30 | return 'augmentation' if self.name.startswith('Random') else 'preprocessing'
31 |
32 | def show(self):
33 | self.groupBox.show()
34 |
35 | def hide(self):
36 | self.groupBox.hide()
37 |
38 | def getTransformClass(self):
39 | klass = getattr(importlib.import_module('torchio'), self.name)
40 | return klass
41 |
42 | def getArgs(self):
43 | return ()
44 |
45 | def getKwargs(self):
46 | return {}
47 |
48 | def getTransform(self):
49 | klass = self.getTransformClass()
50 | args = self.getArgs()
51 | kwargs = self.getKwargs()
52 | return klass(*args, **kwargs)
53 |
54 | def makeInterpolationComboBox(self):
55 | from torchio.transforms.interpolation import Interpolation
56 | values = [key.name.lower().capitalize() for key in Interpolation]
57 | comboBox = qt.QComboBox()
58 | comboBox.addItems(values)
59 | comboBox.setCurrentIndex(1) # linear
60 | comboBox.setToolTip(self.getArgDocstring('image_interpolation'))
61 | return comboBox
62 |
63 | def getInterpolation(self):
64 | return self.interpolationComboBox.currentText.lower()
65 |
66 | def makeRangeWidget(self, minimum, minimumValue, maximumValue, maximum, step, name):
67 | slider = slicer.qMRMLRangeWidget()
68 | slider.minimum = minimum
69 | slider.maximum = maximum
70 | slider.minimumValue = minimumValue
71 | slider.maximumValue = maximumValue
72 | slider.singleStep = step
73 | slider.setToolTip(self.getArgDocstring(name))
74 | slider.symmetricMoves = True
75 | return slider
76 |
77 | def makeAxesLayout(self):
78 | layout = qt.QHBoxLayout()
79 | axesDict = {n: qt.QCheckBox(str(n)) for n in range(3)}
80 | for widget in axesDict.values():
81 | widget.setChecked(True)
82 | layout.addWidget(widget)
83 | return layout, axesDict
84 |
85 | def getSignature(self):
86 | klass = self.getTransformClass()
87 | return inspect.signature(klass)
88 |
89 | def getDefaultValue(self, kwarg):
90 | signature = self.getSignature()
91 | return signature.parameters[kwarg].default
92 |
93 | def getDocstring(self):
94 | return inspect.getdoc(self.getTransformClass())
95 |
96 | def getArgDocstring(self, arg):
97 | # TODO: learn regex
98 | lines = self.getDocstring().splitlines()
99 | line = [line for line in lines if line.startswith(f' {arg}:')][0]
100 | index = lines.index(line)
101 | line = line.replace(f'{arg}: ', '')
102 | argLines = [line]
103 | for line in lines[index + 1:]:
104 | if line.startswith(8 * ' '):
105 | break
106 | argLines.append(line)
107 | return '\n'.join(argLines)
108 |
109 | def getSliderRange(self, slider):
110 | return slider.minimumValue, slider.maximumValue
111 |
112 | def __call__(self, inputVolumeNode, outputVolumeNode):
113 | import torchio
114 | image = su.PullVolumeFromSlicer(inputVolumeNode)
115 | tensor, affine = torchio.io.sitk_to_nib(image)
116 | if inputVolumeNode.IsA('vtkMRMLScalarVolumeNode'):
117 | image = torchio.ScalarImage(tensor=tensor, affine=affine)
118 | elif inputVolumeNode.IsA('vtkMRMLLabelMapVolumeNode'):
119 | image = torchio.LabelMap(tensor=tensor, affine=affine)
120 | subject = torchio.Subject(image=image) # to get transform history
121 | transformed = self.getTransform()(subject)
122 | deterministicApplied = transformed.get_applied_transforms()[0]
123 | logging.info(f'Applied transform: {deterministicApplied}')
124 | transformedImage = transformed.image
125 | image = torchio.io.nib_to_sitk(transformedImage.data, transformedImage.affine)
126 | su.PushVolumeToSlicer(image, targetNode=outputVolumeNode)
127 | return outputVolumeNode
128 |
--------------------------------------------------------------------------------
/TorchIOTransforms/TorchIOTransformsLib/__init__.py:
--------------------------------------------------------------------------------
1 | from .RandomBlur import RandomBlur
2 | from .RandomGamma import RandomGamma
3 | from .RandomSpike import RandomSpike
4 | from .RandomAffine import RandomAffine
5 | from .RandomMotion import RandomMotion
6 | from .RandomGhosting import RandomGhosting
7 | from .RandomBiasField import RandomBiasField
8 | from .RandomAnisotropy import RandomAnisotropy
9 | from .RandomElasticDeformation import RandomElasticDeformation
10 | from .HistogramStandardization import HistogramStandardization
11 |
12 |
13 | __all__ = (
14 | 'RandomBlur',
15 | 'RandomGamma',
16 | 'RandomSpike',
17 | 'RandomAffine',
18 | 'RandomMotion',
19 | 'RandomGhosting',
20 | 'RandomBiasField',
21 | 'RandomAnisotropy',
22 | 'RandomElasticDeformation',
23 | 'HistogramStandardization',
24 | )
25 |
--------------------------------------------------------------------------------