├── .gitignore
├── .pypirc
├── .travis.yml
├── LICENSE
├── MANIFEST.in
├── Makefile
├── README.md
├── README.rst
├── SECURITY.md
├── conf.py
├── coverage_wrapper.py
├── deepneuro
├── __init__.py
├── augmentation
│ ├── __init__.py
│ ├── augment.py
│ └── subsample.py
├── container
│ ├── Dockerfile.deepneuro
│ ├── Dockerfile.deepneuro_development
│ ├── Dockerfile.deepneuro_development_nopackage
│ ├── __init__.py
│ ├── container_cli.py
│ └── deploy.py
├── core.py
├── data
│ ├── __init__.py
│ ├── data_collection.py
│ ├── data_group.py
│ ├── data_load.py
│ ├── data_split.py
│ ├── data_utilities.py
│ └── sampling.py
├── external
│ └── __init__.py
├── interface
│ ├── __init__.py
│ ├── master_cli.py
│ └── web_wrapper.py
├── load
│ ├── __init__.py
│ └── load.py
├── models
│ ├── __init__.py
│ ├── blocks.py
│ ├── callbacks.py
│ ├── cost_functions.py
│ ├── dn_ops.py
│ ├── gan.py
│ ├── keras_model.py
│ ├── keras_pretrained.py
│ ├── minimal.py
│ ├── model.py
│ ├── ops.py
│ ├── progressive_growing_gan.py
│ ├── tensorflow_model.py
│ └── unet.py
├── outputs
│ ├── __init__.py
│ ├── classification.py
│ ├── gan.py
│ ├── inference.py
│ ├── interpretability.py
│ ├── measure.py
│ ├── output.py
│ ├── radiomics.py
│ ├── segmentation.py
│ ├── statistics.py
│ ├── toy.py
│ └── visualization.py
├── package_test
│ ├── __init__.py
│ ├── ci_test.py
│ ├── entrypoint.sh
│ ├── package_test.py
│ ├── pipeline_test.py
│ └── utilities_test.py
├── pipelines
│ ├── Dockerfile_base
│ ├── Ischemic_Stroke
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── Singularity.deepneuro_segment_ischemic_stroke
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── predict.py
│ │ ├── resources
│ │ │ └── icon.png
│ │ └── train.py
│ ├── Segment_Brain_Mets
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── Singularity.deepneuro_segment_mets
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── predict.py
│ │ ├── resources
│ │ │ └── icon.png
│ │ └── train.py
│ ├── Segment_GBM
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── Singularity.deepneuro_segment_gbm
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── predict.py
│ │ ├── resources
│ │ │ └── icon.png
│ │ └── template.py
│ ├── Skull_Stripping
│ │ ├── Dockerfile
│ │ ├── README.md
│ │ ├── Singularity.deepneuro_skullstripping
│ │ ├── __init__.py
│ │ ├── cli.py
│ │ ├── predict.py
│ │ └── resources
│ │ │ └── icon.png
│ ├── __init__.py
│ ├── cli
│ │ └── core.py
│ ├── echo_count.txt
│ ├── sample_template.yml
│ ├── shared.py
│ ├── template.py
│ └── templates
│ │ ├── Dockerfile_template
│ │ ├── README_template.md
│ │ └── cli_template.py
├── postprocessing
│ ├── __init__.py
│ ├── classification.py
│ ├── label.py
│ ├── postprocessor.py
│ ├── signal.py
│ ├── statistics.py
│ └── transform.py
├── preprocessing
│ ├── __init__.py
│ ├── append_output.py
│ ├── classes.py
│ ├── models.py
│ ├── preprocessor.py
│ ├── signal.py
│ ├── skullstrip.py
│ └── transform.py
└── utilities
│ ├── __init__.py
│ ├── conversion.py
│ ├── util.py
│ └── visualize.py
├── entrypoint.sh
├── index.rst
├── make.bat
├── misc
└── DeepInfer
│ └── Segment_GBM
│ ├── DeepNeuro_Glioblastoma.json
│ ├── Dockerfile
│ ├── entrypoint.py
│ └── entrypoint.sh
├── notebooks
├── Call_External_Packages.ipynb
├── Medical_Decathlon_Task02_Heart.ipynb
├── Preprocess_and_Augment.ipynb
├── Publication_Figures.ipynb
├── Run_Inference.ipynb
├── Train_Model.ipynb
├── Use_DeepNeuro_Pretrained_Models_Docker.ipynb
└── resources
│ ├── 3D_Slicer_Logo.png
│ ├── docker_module_icon.png
│ ├── external_package_icon.png
│ ├── model_inference_icon.png
│ ├── train_model_icon.png
│ └── train_preprocess_icon.png
├── package_resources
└── logos
│ ├── DeepNeuro.PNG
│ └── DeepNeuro_alt.PNG
├── setup.cfg
├── setup.py
└── tox.ini
/.gitignore:
--------------------------------------------------------------------------------
1 | # DeepNeuro ignores
2 | *.h5
3 | *.nii.gz
4 | *.nii
5 | *.dcm
6 | *.IMA
7 | *.lprof
8 | *.png
9 | *.model.*
10 | checkpoint
11 | *.csv
12 | *.DS_Store
13 | *.gz
14 | *.bak
15 | *.
16 | deepneuro/local
17 | *.zip
18 | *.tar.gz
19 | /_build
20 | *.hdf5
21 | *.gif
22 | *.ipynb
23 |
24 | # Byte-compiled / optimized / DLL files
25 | __pycache__/
26 | *.py[cod]
27 | *$py.class
28 |
29 | # C extensions
30 | *.so
31 |
32 | # Distribution / packaging
33 | .Python
34 | env/
35 | build/
36 | develop-eggs/
37 | dist/
38 | downloads/
39 | eggs/
40 | .eggs/
41 | lib/
42 | lib64/
43 | parts/
44 | sdist/
45 | var/
46 | wheels/
47 | *.egg-info/
48 | .installed.cfg
49 | *.egg
50 |
51 | # PyInstaller
52 | # Usually these files are written by a python script from a template
53 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
54 | *.manifest
55 | *.spec
56 |
57 | # Installer logs
58 | pip-log.txt
59 | pip-delete-this-directory.txt
60 |
61 | # Unit test / coverage reports
62 | htmlcov/
63 | .tox/
64 | .coverage
65 | .coverage.*
66 | .cache
67 | nosetests.xml
68 | coverage.xml
69 | *.cover
70 | .hypothesis/
71 |
72 | # Translations
73 | *.mo
74 | *.pot
75 |
76 | # Django stuff:
77 | *.log
78 | local_settings.py
79 |
80 | # Flask stuff:
81 | instance/
82 | .webassets-cache
83 |
84 | # Scrapy stuff:
85 | .scrapy
86 |
87 | # Sphinx documentation
88 | docs/_build/
89 |
90 | # PyBuilder
91 | target/
92 |
93 | # Jupyter Notebook
94 | .ipynb_checkpoints
95 |
96 | # pyenv
97 | .python-version
98 |
99 | # celery beat schedule file
100 | celerybeat-schedule
101 |
102 | # SageMath parsed files
103 | *.sage.py
104 |
105 | # dotenv
106 | .env
107 |
108 | # virtualenv
109 | .venv
110 | venv/
111 | ENV/
112 |
113 | # Spyder project settings
114 | .spyderproject
115 | .spyproject
116 |
117 | # Rope project settings
118 | .ropeproject
119 |
120 | # mkdocs documentation
121 | /site
122 |
123 | # mypy
124 | .mypy_cache/
125 |
--------------------------------------------------------------------------------
/.pypirc:
--------------------------------------------------------------------------------
1 | [distutils]
2 | index-servers=
3 | testpypi
4 | pypi
5 |
6 | [testpypi]
7 | repository = https://testpypi.python.org/pypi
8 | username = QTIM
9 |
10 | [pypi]
11 | repository = https://pypi.python.org/pypi
12 | username = QTIM
--------------------------------------------------------------------------------
/.travis.yml:
--------------------------------------------------------------------------------
1 | language: python
2 |
3 | python:
4 | - "3.5"
5 |
6 | env:
7 | - TOX_ENV=flake8
8 | - TOX_ENV=py35
9 |
10 | before_install:
11 | - pip install --upgrade pip setuptools wheel
12 | - pip install --only-binary=numpy,scipy numpy scipy
13 |
14 | # command to install dependencies
15 |
16 | install:
17 | # - git clone https://github.com/sphinx-doc/sphinx
18 | # - cd sphinx
19 | # - python setup.py install
20 | # - cd ..
21 | # - pip install travis-sphinx
22 | # - pip install numpydoc
23 | # - pip install sphinx_rtd_theme
24 | - pip install tox
25 |
26 | notifications:
27 | slack: qtim:jp1UTPVP5wscM2R6eyFHJIb4
28 |
29 | script:
30 | - tox -e $TOX_ENV
31 | # - travis-sphinx build --nowarn
32 |
33 | # after_success:
34 | # - travis-sphinx deploy
35 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 QTIM Lab
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include LICENSE.txt
2 | include README.rst
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS =
6 | SPHINXBUILD = sphinx-build
7 | SOURCEDIR = .
8 | BUILDDIR = _build
9 |
10 | # Put it first so that "make" without argument is like "make help".
11 | help:
12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
13 |
14 | .PHONY: help Makefile
15 |
16 | # Catch-all target: route all unknown targets to Sphinx using the new
17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
18 | %: Makefile
19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | 
2 |
3 | [](https://travis-ci.org/QTIM-Lab/DeepNeuro)
4 |
5 | # DeepNeuro
6 |
7 | A deep learning python package for neuroimaging data. Focused on validated command-line tools you can use today. Created by the Quantitative Tumor Imaging Lab at the Martinos Center (Harvard-MIT Program in Health, Sciences, and Technology / Massachusetts General Hospital).
8 |
9 | ## Table of Contents
10 | - [:question: About](#about)
11 | - [:floppy_disk: Installation](#installation)
12 | - [:mortar_board: Tutorials](#tutorials)
13 | - [:gift: Modules](#modules)
14 | - [:speech_balloon: Contact](#contact)
15 | - [:mega: Citation](#citation)
16 | - [:yellow_heart: Acknowledgements](#acknowledgements)
17 |
18 | ## About
19 | DeepNeuro is an open-source toolset of deep learning applications for neuroimaging. We have several goals for this package:
20 |
21 | * Provide easy-to-use command line tools for neuroimaging using deep learning.
22 | * Create Docker containers for each tool and all out-of-package pre-processing steps, so they can each can be run without having install prerequisite libraries.
23 | * Provide freely available deep learning models trained on a wealth of neuroimaging data.
24 | * Provide training scripts and links to publically-available data to replicate the results of DeepNeuro's models.
25 | * Provide implementations of popular models for medical imaging data, and pre-processed datasets for educational purposes.
26 |
27 | This package is under active development, but we encourage users to both try the modules with pre-trained modules highlighted below, and try their hand at making their own DeepNeuro modules using the tutorials below.
28 |
29 | ## Installation
30 |
31 | 1. Install Docker from Docker's website here: https://www.docker.com/get-started. Follow instructions on that link to get Docker set up properly on your workstation.
32 |
33 | 2. Install the Docker Engine Utility for NVIDIA GPUs, AKA nvidia-docker. You can find installation instructions at their Github page, here: https://github.com/NVIDIA/nvidia-docker
34 |
35 | 3. Pull the DeepNeuro Docker container from https://hub.docker.com/r/qtimlab/deepneuro_segment_gbm/. Use the command "docker pull qtimlab/deepneuro"
36 |
37 | 4. If you want to run DeepNeuro outside of a Docker container, you can install the DeepNeuro Python package locally using the pip package manager. On the command line, run ```pip install deepneuro```
38 |
39 | ## Tutorials
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
53 |
54 |
55 |
56 |
57 |
58 |
59 | ## Modules
60 |
61 |
62 |
63 |
64 |
65 |
66 |
67 |
68 |
69 |
70 |
71 |
72 |
73 |
74 |
75 |
76 |
77 |
78 |
79 |
80 |
81 |
82 |
83 |
84 |
85 | ## Citation
86 |
87 | If you use DeepNeuro in your published work, please cite:
88 |
89 | Beers, A., Brown, J., Chang, K., Hoebel, K., Patel, J., Ly, K. Ina, Tolaney, S.M., Brastianos, P., Rosen, B., Gerstner, E., and Kalpathy-Cramer, J. (2020). DeepNeuro: an open-source deep learning toolbox for neuroimaging. Neuroinformatics. DOI: 10.1007/s12021-020-09477-5. PMID: 32578020
90 |
91 | If you use the MRI skull-stripping or glioblastoma segmentation modules, please cite:
92 |
93 | Chang, K., Beers, A.L., Bai, H.X., Brown, J.M., Ly, K.I., Li, X., Senders, J.T., Kavouridis, V.K., Boaro, A., Su, C., Bi, W.L., Rapalino, O., Liao, W., Shen, Q., Zhou, H., Xiao, B., Wang, Y., Zhang, P.J., Pinho, M.C., Wen, P.Y., Batchelor, T.T., Boxerman, J.L., Arnaout, O., Rosen, B.R., Gerstner, E.R., Yang, L., Huang, R.Y., and Kalpathy-Cramer, J., 2019. Automatic assessment of glioma burden: A deep learning algorithm for fully automated volumetric and bi-dimensional measurement. Neuro-Oncology. DOI: 10.1093/neuonc/noz106. PMID: 31190077
94 |
95 | ## Contact
96 |
97 | DeepNeuro is under active development, and you may run into errors or want additional features. Send any questions or requests for methods to qtimlab@gmail.com. You can also submit a Github issue if you run into a bug.
98 |
99 | ## Acknowledgements
100 |
101 | The Center for Clinical Data Science at Massachusetts General Hospital and the Brigham and Woman's Hospital provided technical and hardware support for the development of DeepNeuro, including access to graphics processing units. The DeepNeuro project is also indebted to the following Github repository for the 3D UNet by user ellisdg, which formed the original kernel for much of its code in early stages. Long live open source deep learning :)
102 |
103 | ## Disclaimer
104 |
105 | This software package and the deep learning models within are intended for research purposes only and have not yet been validated for clinical use.
106 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | .. figure:: ./package_resources/logos/DeepNeuro_alt.PNG?raw=true
2 | :alt: DeepNeuro
3 |
4 | Alt text
5 | |Build Status|
6 |
7 | DeepNeuro
8 | =========
9 |
10 | A deep learning python package for neuroimaging data. Focused on validated command-line tools you
11 | can use today. Created by the Quantitative Tumor Imaging Lab at the Martinos Center (Harvard-MIT
12 | Program in Health, Sciences, and Technology / Massachusetts General Hospital).
13 |
14 | Table of Contents
15 | -----------------
16 |
17 | - `:question: About <#about>`__
18 | - `:floppy\_disk: Installation <#installation>`__
19 | - `:mortar\_board: Tutorials <#tutorials>`__
20 | - `:gift: Modules <#modules>`__
21 | - `:speech\_balloon: Contact <#contact>`__
22 | - `:mega: Citation <#citation>`__
23 | - `:yellow\_heart: Acknowledgements <#acknowledgements>`__
24 |
25 | About
26 | -----
27 |
28 | DeepNeuro is an open-source toolset of deep learning applications for neuroimaging. We have several
29 | goals for this package:
30 |
31 | - Provide easy-to-use command line tools for neuroimaging using deep learning.
32 | - Create Docker containers for each tool and all out-of-package pre-processing steps, so they can
33 | each can be run without having install prerequisite libraries.
34 | - Provide freely available deep learning models trained on a wealth of neuroimaging data.
35 | - Provide training scripts and links to publically-available data to replicate the results of
36 | DeepNeuro's models.
37 | - Provide implementations of popular models for medical imaging data, and pre-processed datasets
38 | for educational purposes.
39 |
40 | This package is under active development, but we encourage users to both try the modules with
41 | pre-trained modules highlighted below, and try their hand at making their own DeepNeuro modules
42 | using the tutorials below.
43 |
44 | Installation
45 | ------------
46 |
47 | 1. Install Docker from Docker's website here: https://www.docker.com/get-started. Follow
48 | instructions on that link to get Docker set up properly on your workstation.
49 |
50 | 2. Install the Docker Engine Utility for NVIDIA GPUs, AKA nvidia-docker. You can find installation
51 | instructions at their Github page, here: https://github.com/NVIDIA/nvidia-docker
52 |
53 | 3. Pull the DeepNeuro Docker container from
54 | https://hub.docker.com/r/qtimlab/deepneuro\_segment\_gbm/. Use the command "docker pull
55 | qtimlab/deepneuro"
56 |
57 | 4. If you want to run DeepNeuro outside of a Docker container, you can install the DeepNeuro Python
58 | package locally using the pip package manager. On the command line, run ``pip install deepneuro``
59 |
60 | Tutorials
61 | ---------
62 |
63 | .. raw:: html
64 |
65 |
66 |
67 | .. raw:: html
68 |
69 |
70 |
71 |
72 |
73 | .. raw:: html
74 |
75 |
76 |
77 |
78 |
79 | .. raw:: html
80 |
81 |
82 |
83 | Modules
84 | -------
85 |
86 | .. raw:: html
87 |
88 |
89 |
90 | .. raw:: html
91 |
92 |
93 |
94 |
95 |
96 | .. raw:: html
97 |
98 |
99 |
100 |
101 |
102 | .. raw:: html
103 |
104 |
105 |
106 |
107 |
108 | .. raw:: html
109 |
110 |
111 |
112 | Contact
113 | -------
114 |
115 | DeepNeuro is under active development, and you may run into errors or want additional features. Send
116 | any questions or requests for methods to abeers@mgh.harvard.edu. You can also submit a Github issue
117 | if you run into a bug.
118 |
119 | Citation
120 | --------
121 |
122 | If you use DeepNeuro in your published work, please cite:
123 |
124 | Beers, A., Brown, J., Chang, K., Hoebel, K., Gerstner, E., Rosen, B., & Kalpathy-Cramer, J. (2018).
125 | DeepNeuro: an open-source deep learning toolbox for neuroimaging. arXiv preprint arXiv:1808.04589.
126 |
127 | @article{beers2018deepneuro, title={DeepNeuro: an open-source deep learning toolbox for
128 | neuroimaging}, author={Beers, Andrew and Brown, James and Chang, Ken and Hoebel, Katharina and
129 | Gerstner, Elizabeth and Rosen, Bruce and Kalpathy-Cramer, Jayashree}, journal={arXiv preprint
130 | arXiv:1808.04589}, year={2018} }
131 |
132 | Acknowledgements
133 | ----------------
134 |
135 | The Center for Clinical Data Science at Massachusetts General Hospital and the Brigham and Woman's
136 | Hospital provided technical and hardware support for the development of DeepNeuro, including access
137 | to graphics processing units. The DeepNeuro project is also indebted to the following Github
138 | repository for the 3D UNet by user ellisdg, which formed the original kernel for much of its code in
139 | early stages. Long live open source deep learning :)
140 |
141 | .. |Build Status| image:: https://travis-ci.org/QTIM-Lab/DeepNeuro.svg?branch=master
142 | :target: https://travis-ci.org/QTIM-Lab/DeepNeuro
143 |
--------------------------------------------------------------------------------
/SECURITY.md:
--------------------------------------------------------------------------------
1 | # Security Policy
2 |
3 | ## Reporting a Vulnerability
4 |
5 | If you have found a security vulnerability with DeepNeuro, please report it to qtim.lab@gmail.com. We will address your concerns as promptly as possible.
6 |
--------------------------------------------------------------------------------
/conf.py:
--------------------------------------------------------------------------------
1 | # -*- coding: utf-8 -*-
2 | #
3 | # Configuration file for the Sphinx documentation builder.
4 | #
5 | # This file does only contain a selection of the most common options. For a
6 | # full list see the documentation:
7 | # http://www.sphinx-doc.org/en/master/config
8 |
9 | # -- Path setup --------------------------------------------------------------
10 |
11 | # If extensions (or modules to document with autodoc) are in another directory,
12 | # add these directories to sys.path here. If the directory is relative to the
13 | # documentation root, use os.path.abspath to make it absolute, like shown here.
14 | #
15 | # import os
16 | # import sys
17 | # sys.path.insert(0, os.path.abspath('.'))
18 |
19 |
20 | # -- Project information -----------------------------------------------------
21 |
22 | project = 'DeepNeuro'
23 | copyright = '2018, Andrew Beers, the Quantiative Tumor Imaging Lab @ The Martinos Center'
24 | author = 'Andrew Beers, the Quantiative Tumor Imaging Lab @ The Martinos Center'
25 |
26 | # The short X.Y version
27 | version = ''
28 | # The full version, including alpha/beta/rc tags
29 | release = '0.1.2'
30 |
31 |
32 | # -- General configuration ---------------------------------------------------
33 |
34 | # If your documentation needs a minimal Sphinx version, state it here.
35 | #
36 | # needs_sphinx = '1.0'
37 |
38 | # Add any Sphinx extension module names here, as strings. They can be
39 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 | # ones.
41 | extensions = [
42 | 'sphinx.ext.autodoc',
43 | 'sphinx.ext.doctest',
44 | 'sphinx.ext.mathjax',
45 | 'sphinx.ext.viewcode',
46 | 'sphinx.ext.githubpages',
47 | 'numpydoc'
48 | ]
49 |
50 | # Add any paths that contain templates here, relative to this directory.
51 | # templates_path = ['_templates']
52 |
53 | # The suffix(es) of source filenames.
54 | # You can specify multiple suffix as a list of string:
55 | #
56 | # source_suffix = ['.rst', '.md']
57 | source_suffix = '.rst'
58 |
59 | # The master toctree document.
60 | master_doc = 'index'
61 |
62 | # The language for content autogenerated by Sphinx. Refer to documentation
63 | # for a list of supported languages.
64 | #
65 | # This is also used if you do content translation via gettext catalogs.
66 | # Usually you set "language" from the command line for these cases.
67 | language = None
68 |
69 | # List of patterns, relative to source directory, that match files and
70 | # directories to ignore when looking for source files.
71 | # This pattern also affects html_static_path and html_extra_path.
72 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
73 |
74 | # The name of the Pygments (syntax highlighting) style to use.
75 | pygments_style = None
76 |
77 |
78 | # -- Options for HTML output -------------------------------------------------
79 |
80 | # The theme to use for HTML and HTML Help pages. See the documentation for
81 | # a list of builtin themes.
82 | #
83 | html_theme = 'sphinx_rtd_theme'
84 |
85 | # Theme options are theme-specific and customize the look and feel of a theme
86 | # further. For a list of options available for each theme, see the
87 | # documentation.
88 | #
89 | # html_theme_options = {}
90 |
91 | # Add any paths that contain custom static files (such as style sheets) here,
92 | # relative to this directory. They are copied after the builtin static files,
93 | # so a file named "default.css" will overwrite the builtin "default.css".
94 | html_static_path = ['_static']
95 |
96 | # Custom sidebar templates, must be a dictionary that maps document names
97 | # to template names.
98 | #
99 | # The default sidebars (for documents that don't match any pattern) are
100 | # defined by theme itself. Builtin themes are using these templates by
101 | # default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
102 | # 'searchbox.html']``.
103 | #
104 | # html_sidebars = {}
105 |
106 |
107 | # -- Options for HTMLHelp output ---------------------------------------------
108 |
109 | # Output file base name for HTML help builder.
110 | htmlhelp_basename = 'DeepNeurodoc'
111 |
112 |
113 | # -- Options for LaTeX output ------------------------------------------------
114 |
115 | latex_elements = {
116 | # The paper size ('letterpaper' or 'a4paper').
117 | #
118 | # 'papersize': 'letterpaper',
119 |
120 | # The font size ('10pt', '11pt' or '12pt').
121 | #
122 | # 'pointsize': '10pt',
123 |
124 | # Additional stuff for the LaTeX preamble.
125 | #
126 | # 'preamble': '',
127 |
128 | # Latex figure (float) alignment
129 | #
130 | # 'figure_align': 'htbp',
131 | }
132 |
133 | # Grouping the document tree into LaTeX files. List of tuples
134 | # (source start file, target name, title,
135 | # author, documentclass [howto, manual, or own class]).
136 | latex_documents = [
137 | (master_doc, 'DeepNeuro.tex', 'DeepNeuro Documentation',
138 | 'Andrew Beers, the Quantiative Tumor Imaging Lab @ The Martinos Center', 'manual'),
139 | ]
140 |
141 |
142 | # -- Options for manual page output ------------------------------------------
143 |
144 | # One entry per manual page. List of tuples
145 | # (source start file, name, description, authors, manual section).
146 | man_pages = [
147 | (master_doc, 'deepneuro', 'DeepNeuro Documentation',
148 | [author], 1)
149 | ]
150 |
151 |
152 | # -- Options for Texinfo output ----------------------------------------------
153 |
154 | # Grouping the document tree into Texinfo files. List of tuples
155 | # (source start file, target name, title, author,
156 | # dir menu entry, description, category)
157 | texinfo_documents = [
158 | (master_doc, 'DeepNeuro', 'DeepNeuro Documentation',
159 | author, 'DeepNeuro', 'One line description of project.',
160 | 'Miscellaneous'),
161 | ]
162 |
163 |
164 | # -- Options for Epub output -------------------------------------------------
165 |
166 | # Bibliographic Dublin Core info.
167 | epub_title = project
168 |
169 | # The unique identifier of the text. This can be a ISBN number
170 | # or the project homepage.
171 | #
172 | # epub_identifier = ''
173 |
174 | # A unique identification for the text.
175 | #
176 | # epub_uid = ''
177 |
178 | # A list of files that should not be packed into the epub file.
179 | epub_exclude_files = ['search.html']
180 |
181 |
182 | # -- Extension configuration -------------------------------------------------
--------------------------------------------------------------------------------
/coverage_wrapper.py:
--------------------------------------------------------------------------------
1 | # A wrapper function to monitor coverage through all tests
2 |
3 | from coverage import coverage
4 | import pytest
5 |
6 | cov = coverage(omit='.tox*')
7 | cov.start()
8 |
9 | # Tests to run
10 | # Pytest will crawl through the project directory for test files.
11 | pytest.main()
12 |
13 | cov.stop()
14 | cov.save()
--------------------------------------------------------------------------------
/deepneuro/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | warnings.filterwarnings("ignore", message=".*dtype size changed.*")
4 | warnings.filterwarnings("ignore", message=".*keepdims.*")
5 | warnings.filterwarnings("ignore", message=".*pydicom.*")
6 | warnings.filterwarnings("ignore", message=".*maximum recommended rowsize.*")
--------------------------------------------------------------------------------
/deepneuro/augmentation/__init__.py:
--------------------------------------------------------------------------------
1 | from .subsample import ExtractPatches
2 | from .augment import Flip_Rotate_2D, Shift_Squeeze_Intensities, Flip_Rotate_3D, MaskData
--------------------------------------------------------------------------------
/deepneuro/container/Dockerfile.deepneuro:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
2 | LABEL maintainer "Andrew Beers "
3 |
4 | ARG TENSORFLOW_VERSION=1.11.0
5 | ARG TENSORFLOW_ARCH=gpu
6 | ARG KERAS_VERSION=2.2.4
7 |
8 | # Install some dependencies
9 | # Install basic packages and miscellaneous dependencies
10 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
11 | liblapack-dev \
12 | libopenblas-dev \
13 | libzmq3-dev \
14 | python3 \
15 | python3-dev \
16 | python3-pip \
17 | python3-setuptools \
18 | python3-tk
19 |
20 | # Install Pillow (PIL) dependencies
21 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
22 | libfreetype6-dev \
23 | libjpeg-dev \
24 | liblcms2-dev \
25 | libopenjpeg-dev \
26 | libpng12-dev \
27 | libtiff5-dev \
28 | libwebp-dev \
29 | zlib1g-dev
30 |
31 | # Install support functions
32 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
33 | curl \
34 | git \
35 | wget \
36 | cmake
37 |
38 | # Cleanup Installs
39 | RUN apt-get clean && \
40 | apt-get autoremove && \
41 | rm -rf /var/lib/apt/lists/* && \
42 | # Link BLAS library to use OpenBLAS using the alternatives mechanism (https://www.scipy.org/scipylib/building/linux.html#debian-ubuntu)
43 | update-alternatives --set libblas.so.3 /usr/lib/openblas-base/libblas.so.3
44 |
45 | # Install pip
46 | RUN pip3 install --upgrade \
47 | setuptools \
48 | pip
49 |
50 | # Add SNI support to Python
51 | RUN pip3 --no-cache-dir install \
52 | pyopenssl \
53 | ndg-httpsclient \
54 | pyasn1
55 |
56 | # Install other useful Python packages using pip
57 | RUN pip3 --no-cache-dir install --upgrade ipython && \
58 | pip3 --no-cache-dir install \
59 | Cython \
60 | ipykernel \
61 | jupyter \
62 | path.py \
63 | Pillow \
64 | pygments \
65 | six \
66 | sphinx \
67 | wheel \
68 | zmq \
69 | && \
70 | python3 -m ipykernel.kernelspec
71 |
72 | # Install TensorFlow
73 | # For specific installations -- TODO, peg a version of Tensorflow.
74 | # RUN pip --no-cache-dir install \
75 | # https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
76 | # Generic.
77 | RUN pip3 --no-cache-dir install tensorflow-gpu==${TENSORFLOW_VERSION}
78 |
79 | # Install Keras
80 | RUN pip --no-cache-dir install git+git://github.com/fchollet/keras.git@${KERAS_VERSION}
81 |
82 | # Install Additional Packages for DeepNeuro
83 | RUN apt-get update -y
84 | RUN apt-get install graphviz -y
85 | RUN pip3 --no-cache-dir install pydot
86 | RUN pip3 --no-cache-dir install pandas --upgrade
87 | RUN pip3 --no-cache-dir install numexpr --upgrade
88 | RUN pip3 --no-cache-dir install nibabel pydicom lycon tqdm pynrrd tables imageio matplotlib
89 |
90 | # Install Slicer
91 | RUN SLICER_URL="http://download.slicer.org/bitstream/561384" && \
92 | curl -v -s -L $SLICER_URL | tar xz -C /tmp && \
93 | mv /tmp/Slicer* /opt/slicer
94 |
95 | # Install ANTS
96 | # WORKDIR /home
97 | # RUN wget "https://github.com/stnava/ANTs/releases/download/v2.1.0/Linux_Ubuntu14.04.tar.bz2" && \
98 | # DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes bzip2 && \
99 | # tar -C /usr/local -xjf Linux_Ubuntu14.04.tar.bz2 && \
100 | # rm Linux_Ubuntu14.04.tar.bz2
101 |
102 | # Python 2.7
103 | WORKDIR /usr/src
104 | ENV PYTHON_VERSION 2.7.10
105 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
106 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
107 | cd Python-${PYTHON_VERSION} && \
108 | ./configure && \
109 | make -j$(grep -c processor /proc/cpuinfo) && \
110 | make install && \
111 | cd .. && rm -rf Python-${PYTHON_VERSION}*
112 |
113 | # Build and install dcmqi
114 |
115 | WORKDIR /usr/src
116 | ENV PYTHON_VERSION 2.7.10
117 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
118 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
119 | cd Python-${PYTHON_VERSION} && \
120 | ./configure && \
121 | make -j$(grep -c processor /proc/cpuinfo) && \
122 | make install && \
123 | cd .. && rm -rf Python-${PYTHON_VERSION}*
124 |
125 | WORKDIR /usr/src
126 | RUN git clone https://github.com/QIICR/dcmqi.git && \
127 | mkdir dcmqi-superbuild && \
128 | cd dcmqi-superbuild && \
129 | cmake -DCMAKE_INSTALL_PREFIX=/usr ../dcmqi && \
130 | make -j$(grep -c processor /proc/cpuinfo)
131 |
132 | # Environmental Variables
133 | ENV PATH "$PATH:/opt/slicer"
134 | ENV PATH "$PATH:/usr/local/ANTs.2.1.0.Debian-Ubuntu_X64"
135 |
136 | # Install DeepNeuro. Scikit-image has installation problems with EasyInstall and setup.py
137 | RUN git clone https://github.com/QTIM-Lab/DeepNeuro /home/DeepNeuro
138 | WORKDIR /home/DeepNeuro
139 |
140 | # TODO: Check if Docker has solved this checkpointing problem.
141 | RUN echo 60
142 | RUN git pull
143 | RUN python3 /home/DeepNeuro/setup.py develop
144 |
145 | # Commands at startup.
146 | WORKDIR "/"
147 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
148 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
149 |
--------------------------------------------------------------------------------
/deepneuro/container/Dockerfile.deepneuro_development:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
2 | LABEL maintainer "Andrew Beers "
3 |
4 | ARG TENSORFLOW_VERSION=1.11.0
5 | ARG TENSORFLOW_ARCH=gpu
6 | ARG KERAS_VERSION=2.2.4
7 |
8 | # Install some dependencies
9 | # Install basic packages and miscellaneous dependencies
10 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
11 | liblapack-dev \
12 | libopenblas-dev \
13 | libzmq3-dev \
14 | python3 \
15 | python3-dev \
16 | python3-pip \
17 | python3-setuptools \
18 | python3-tk
19 |
20 | # Install Pillow (PIL) dependencies
21 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
22 | libfreetype6-dev \
23 | libjpeg-dev \
24 | liblcms2-dev \
25 | libopenjpeg-dev \
26 | libpng12-dev \
27 | libtiff5-dev \
28 | libwebp-dev \
29 | zlib1g-dev
30 |
31 | # Install support functions
32 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
33 | curl \
34 | git \
35 | wget \
36 | cmake
37 |
38 | # Cleanup Installs
39 | RUN apt-get clean && \
40 | apt-get autoremove && \
41 | rm -rf /var/lib/apt/lists/* && \
42 | # Link BLAS library to use OpenBLAS using the alternatives mechanism (https://www.scipy.org/scipylib/building/linux.html#debian-ubuntu)
43 | update-alternatives --set libblas.so.3 /usr/lib/openblas-base/libblas.so.3
44 |
45 | # Install pip
46 | RUN pip3 install --upgrade \
47 | setuptools \
48 | pip
49 |
50 | # Add SNI support to Python
51 | RUN pip3 --no-cache-dir install \
52 | pyopenssl \
53 | ndg-httpsclient \
54 | pyasn1
55 |
56 | # Install other useful Python packages using pip
57 | RUN pip3 --no-cache-dir install --upgrade ipython && \
58 | pip3 --no-cache-dir install \
59 | Cython \
60 | ipykernel \
61 | jupyter \
62 | path.py \
63 | Pillow \
64 | pygments \
65 | six \
66 | sphinx \
67 | wheel \
68 | zmq \
69 | && \
70 | python3 -m ipykernel.kernelspec
71 |
72 | # Install TensorFlow
73 | # For specific installations -- TODO, peg a version of Tensorflow.
74 | # RUN pip --no-cache-dir install \
75 | # https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
76 | # Generic.
77 | RUN pip3 --no-cache-dir install tensorflow-gpu
78 |
79 | # Install Keras
80 | RUN pip --no-cache-dir install git+git://github.com/fchollet/keras.git@${KERAS_VERSION}
81 |
82 | # Install Additional Packages for DeepNeuro
83 | RUN apt-get update -y
84 | RUN apt-get install graphviz -y
85 | RUN pip3 --no-cache-dir install pydot
86 | RUN pip3 --no-cache-dir install pandas --upgrade
87 | RUN pip3 --no-cache-dir install numexpr --upgrade
88 | RUN pip3 --no-cache-dir install nibabel pydicom lycon tqdm pynrrd tables imageio matplotlib
89 |
90 | # Install Slicer
91 | RUN SLICER_URL="http://download.slicer.org/bitstream/561384" && \
92 | curl -v -s -L $SLICER_URL | tar xz -C /tmp && \
93 | mv /tmp/Slicer* /opt/slicer
94 |
95 | # Install ANTS
96 | # WORKDIR /home
97 | # RUN wget "https://github.com/stnava/ANTs/releases/download/v2.1.0/Linux_Ubuntu14.04.tar.bz2" && \
98 | # DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes bzip2 && \
99 | # tar -C /usr/local -xjf Linux_Ubuntu14.04.tar.bz2 && \
100 | # rm Linux_Ubuntu14.04.tar.bz2
101 |
102 | # Python 2.7
103 | WORKDIR /usr/src
104 | ENV PYTHON_VERSION 2.7.10
105 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
106 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
107 | cd Python-${PYTHON_VERSION} && \
108 | ./configure && \
109 | make -j$(grep -c processor /proc/cpuinfo) && \
110 | make install && \
111 | cd .. && rm -rf Python-${PYTHON_VERSION}*
112 |
113 | # Build and install dcmqi
114 |
115 | WORKDIR /usr/src
116 | ENV PYTHON_VERSION 2.7.10
117 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
118 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
119 | cd Python-${PYTHON_VERSION} && \
120 | ./configure && \
121 | make -j$(grep -c processor /proc/cpuinfo) && \
122 | make install && \
123 | cd .. && rm -rf Python-${PYTHON_VERSION}*
124 |
125 | WORKDIR /usr/src
126 | RUN git clone https://github.com/QIICR/dcmqi.git && \
127 | mkdir dcmqi-superbuild && \
128 | cd dcmqi-superbuild && \
129 | cmake -DCMAKE_INSTALL_PREFIX=/usr ../dcmqi && \
130 | make -j$(grep -c processor /proc/cpuinfo)
131 |
132 | # Environmental Variables
133 | ENV PATH "$PATH:/opt/slicer"
134 | ENV PATH "$PATH:/usr/local/ANTs.2.1.0.Debian-Ubuntu_X64"
135 |
136 | # Install DeepNeuro. Scikit-image has installation problems with EasyInstall and setup.py
137 | RUN git clone https://github.com/QTIM-Lab/DeepNeuro /home/DeepNeuro
138 | WORKDIR /home/DeepNeuro
139 |
140 | # Copy in models -- do this with Python module in future.
141 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_GBM
142 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/Segment_GBM_Wholetumor_Model.h5 "https://www.dropbox.com/s/bnbdi1yogq2yye3/GBM_Wholetumor_Public.h5?dl=1"
143 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/Segment_GBM_Enhancing_Model.h5 "https://www.dropbox.com/s/hgsqi0vj7cfuk1g/GBM_Enhancing_Public.h5?dl=1"
144 |
145 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/SkullStripping
146 | RUN wget -O /home/DeepNeuro/deepneuro/load/SkullStripping/Skullstrip_MRI_Model.h5 "https://www.dropbox.com/s/cucffmytzhp5byn/Skullstrip_MRI_Model.h5?dl=1"
147 |
148 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_Mets
149 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_Mets/Segment_Mets_Model.h5 "https://www.dropbox.com/s/j11t9jtjhzcp3ny/Brain_Mets_Segmentation_Model.h5?dl=1"
150 |
151 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_Ischemic_Stroke
152 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_Ischemic_Stroke/Ischemic_Stroke_Model.h5 "https://www.dropbox.com/s/4qpxvfac204xzhf/Ischemic_Stroke_Segmentation_Model.h5?dl=1"
153 |
154 | # TODO: Check if Docker has solved this checkpointing problem.
155 | RUN echo 61
156 | RUN git pull
157 |
158 | RUN git branch -f winter_development_2018 origin/winter_development_2018
159 | RUN git checkout winter_development_2018
160 | RUN python3 /home/DeepNeuro/setup.py develop
161 |
162 | # Commands at startup.
163 | WORKDIR "/"
164 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
165 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
166 |
--------------------------------------------------------------------------------
/deepneuro/container/Dockerfile.deepneuro_development_nopackage:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
2 | LABEL maintainer "Andrew Beers "
3 |
4 | ARG TENSORFLOW_VERSION=1.11.0
5 | ARG TENSORFLOW_ARCH=gpu
6 | ARG KERAS_VERSION=2.2.4
7 |
8 | # Install some dependencies
9 | # Install basic packages and miscellaneous dependencies
10 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
11 | liblapack-dev \
12 | libopenblas-dev \
13 | libzmq3-dev \
14 | python3 \
15 | python3-dev \
16 | python3-pip \
17 | python3-setuptools \
18 | python3-tk
19 |
20 | # Install Pillow (PIL) dependencies
21 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
22 | libfreetype6-dev \
23 | libjpeg-dev \
24 | liblcms2-dev \
25 | libopenjpeg-dev \
26 | libpng12-dev \
27 | libtiff5-dev \
28 | libwebp-dev \
29 | zlib1g-dev
30 |
31 | # Install support functions
32 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
33 | curl \
34 | git \
35 | wget \
36 | cmake
37 |
38 | # Cleanup Installs
39 | RUN apt-get clean && \
40 | apt-get autoremove && \
41 | rm -rf /var/lib/apt/lists/* && \
42 | # Link BLAS library to use OpenBLAS using the alternatives mechanism (https://www.scipy.org/scipylib/building/linux.html#debian-ubuntu)
43 | update-alternatives --set libblas.so.3 /usr/lib/openblas-base/libblas.so.3
44 |
45 | # Install pip
46 | RUN pip3 install --upgrade \
47 | setuptools \
48 | pip
49 |
50 | # Add SNI support to Python
51 | RUN pip3 --no-cache-dir install \
52 | pyopenssl \
53 | ndg-httpsclient \
54 | pyasn1
55 |
56 | # Install other useful Python packages using pip
57 | RUN pip3 --no-cache-dir install --upgrade ipython && \
58 | pip3 --no-cache-dir install \
59 | Cython \
60 | ipykernel \
61 | jupyter \
62 | path.py \
63 | Pillow \
64 | pygments \
65 | six \
66 | sphinx \
67 | wheel \
68 | zmq \
69 | && \
70 | python3 -m ipykernel.kernelspec
71 |
72 | # Install TensorFlow
73 | # For specific installations -- TODO, peg a version of Tensorflow.
74 | # RUN pip --no-cache-dir install \
75 | # https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
76 | # Generic.
77 | RUN pip3 --no-cache-dir install tensorflow-gpu
78 |
79 | # Install Keras
80 | RUN pip --no-cache-dir install git+git://github.com/fchollet/keras.git@${KERAS_VERSION}
81 |
82 | # Install Additional Packages for DeepNeuro
83 | RUN apt-get update -y
84 | RUN apt-get install graphviz -y
85 | RUN pip3 --no-cache-dir install pydot
86 | RUN pip3 --no-cache-dir install pandas --upgrade
87 | RUN pip3 --no-cache-dir install numexpr --upgrade
88 | RUN pip3 --no-cache-dir install nibabel pydicom lycon tqdm pynrrd tables imageio matplotlib
89 |
90 | # Install Slicer
91 | RUN SLICER_URL="http://download.slicer.org/bitstream/561384" && \
92 | curl -v -s -L $SLICER_URL | tar xz -C /tmp && \
93 | mv /tmp/Slicer* /opt/slicer
94 |
95 | # Install ANTS
96 | # WORKDIR /home
97 | # RUN wget "https://github.com/stnava/ANTs/releases/download/v2.1.0/Linux_Ubuntu14.04.tar.bz2" && \
98 | # DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes bzip2 && \
99 | # tar -C /usr/local -xjf Linux_Ubuntu14.04.tar.bz2 && \
100 | # rm Linux_Ubuntu14.04.tar.bz2
101 |
102 | # Python 2.7
103 | WORKDIR /usr/src
104 | ENV PYTHON_VERSION 2.7.10
105 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
106 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
107 | cd Python-${PYTHON_VERSION} && \
108 | ./configure && \
109 | make -j$(grep -c processor /proc/cpuinfo) && \
110 | make install && \
111 | cd .. && rm -rf Python-${PYTHON_VERSION}*
112 |
113 | # Build and install dcmqi
114 |
115 | WORKDIR /usr/src
116 | ENV PYTHON_VERSION 2.7.10
117 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
118 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
119 | cd Python-${PYTHON_VERSION} && \
120 | ./configure && \
121 | make -j$(grep -c processor /proc/cpuinfo) && \
122 | make install && \
123 | cd .. && rm -rf Python-${PYTHON_VERSION}*
124 |
125 | WORKDIR /usr/src
126 | RUN git clone https://github.com/QIICR/dcmqi.git && \
127 | mkdir dcmqi-superbuild && \
128 | cd dcmqi-superbuild && \
129 | cmake -DCMAKE_INSTALL_PREFIX=/usr ../dcmqi && \
130 | make -j$(grep -c processor /proc/cpuinfo)
131 |
132 | # Environmental Variables
133 | ENV PATH "$PATH:/opt/slicer"
134 | ENV PATH "$PATH:/usr/local/ANTs.2.1.0.Debian-Ubuntu_X64"
135 |
136 | # Commands at startup.
137 | WORKDIR "/"
138 |
--------------------------------------------------------------------------------
/deepneuro/container/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/container/__init__.py
--------------------------------------------------------------------------------
/deepneuro/container/container_cli.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | from subprocess import call
4 |
5 | from deepneuro.utilities.util import quotes
6 |
7 |
8 | def docker_wrapper(**kwargs):
9 |
10 | return
11 |
12 |
13 | def nvidia_docker_wrapper(command, cli_args=None, filename_args=None, interactive=False, docker_container='deepneuro'):
14 |
15 | if filename_args is not None:
16 | filename_args = [arg for arg in filename_args if cli_args[arg] is not None]
17 | mounted_dir = os.path.abspath(os.path.dirname(os.path.commonprefix([cli_args[arg] for arg in filename_args])))
18 | for arg in filename_args:
19 | after_mounted = os.path.abspath(cli_args[arg]).split(mounted_dir, 1)[1].lstrip(os.sep)
20 | cli_args[arg] = quotes(os.path.join('/INPUT_DATA', after_mounted))
21 | else:
22 | pass # TODO: Default behavior when mounted directory not needed.
23 |
24 | if interactive:
25 | docker_command = ['nvidia-docker', 'run', '-it', '-v', mounted_dir + ':/INPUT_DATA', docker_container, 'bash']
26 |
27 | else:
28 | docker_command = ['nvidia-docker', 'run', '--rm', '-v', mounted_dir + ':/INPUT_DATA', docker_container] + command
29 |
30 | # This presumes everything is an optional arg, which is wrong.
31 | # Rewrite this to accept command line strings, instead of Python variables.
32 | # Would solve problem..
33 | for arg in cli_args:
34 | if cli_args[arg] is True:
35 | docker_command += ['-' + str(arg)]
36 | elif not cli_args[arg] or cli_args[arg] is None:
37 | continue
38 | else:
39 | docker_command += ['-' + str(arg) + ' ' + cli_args[arg]]
40 |
41 | call(' '.join(docker_command), shell=True)
42 |
43 |
44 | def singularity_wrapper(command, cli_args=None, filename_args=None, interactive=False, container='deepneuro'):
45 |
46 | raise NotImplementedError
47 |
48 | if filename_args is not None:
49 | filename_args = [arg for arg in filename_args if cli_args[arg] is not None]
50 | mounted_dir = os.path.abspath(os.path.dirname(os.path.commonprefix([cli_args[arg] for arg in filename_args])))
51 | for arg in filename_args:
52 | after_mounted = os.path.abspath(cli_args[arg]).split(mounted_dir, 1)[1].lstrip(os.sep)
53 | cli_args[arg] = quotes(os.path.join('/INPUT_DATA', after_mounted))
54 | else:
55 | pass # TODO: Default behavior when mounted directory not needed.
56 |
57 | if interactive:
58 | docker_command = ['nvidia-docker', 'run', '-it', '-v', mounted_dir + ':/INPUT_DATA', container, 'bash']
59 |
60 | else:
61 | docker_command = ['nvidia-docker', 'run', '--rm', '-v', mounted_dir + ':/INPUT_DATA', container] + command
62 |
63 | # This presumes everything is an optional arg, which is wrong.
64 | # Rewrite this to accept command line strings, instead of Python variables.
65 | # Would solve problem..
66 | for arg in cli_args:
67 | if cli_args[arg] is True:
68 | docker_command += ['-' + str(arg)]
69 | elif not cli_args[arg] or cli_args[arg] is None:
70 | continue
71 | else:
72 | docker_command += ['-' + str(arg) + ' ' + cli_args[arg]]
73 |
74 | call(' '.join(docker_command), shell=True)
--------------------------------------------------------------------------------
/deepneuro/container/deploy.py:
--------------------------------------------------------------------------------
1 |
2 | def build_all_pipeline_dockers():
3 |
4 | return
5 |
6 |
7 | if __name__ == '__main__':
8 |
9 | pass
--------------------------------------------------------------------------------
/deepneuro/core.py:
--------------------------------------------------------------------------------
1 | from .preprocessing import *
2 | from .postprocessing import *
3 | from .utilities import *
4 | from .outputs import *
5 | from .models import *
6 | from .load import *
7 | from .data import *
8 | from .augmentation import *
--------------------------------------------------------------------------------
/deepneuro/data/__init__.py:
--------------------------------------------------------------------------------
1 | from .data_collection import DataCollection
--------------------------------------------------------------------------------
/deepneuro/data/data_group.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from deepneuro.utilities.conversion import read_image_files
4 |
5 |
6 | class DataGroup(object):
7 |
8 | def __init__(self, label):
9 |
10 | """ Inconsistent behavior when reading from HDF5 or filepaths in DataGroups.
11 | """
12 |
13 | self.label = label
14 | self.augmentations = []
15 | self.data = {}
16 | self.cases = []
17 | self.case_num = 0
18 |
19 | # HDF5 variables
20 | self.source = None
21 | self.data_casenames = None
22 | self.data_affines = None
23 |
24 | # TODO: More distinctive naming for "base" and "current" cases.
25 | self.preprocessed_case = None
26 | self.preprocessed_affine = None
27 | self.base_case = None
28 | self.base_affine = None
29 |
30 | self.augmentation_cases = [None]
31 | self.augmentation_strings = ['']
32 | self.preprocessing_data = []
33 |
34 | self.data_storage = None
35 | self.casename_storage = None
36 | self.affine_storage = None
37 |
38 | self.output_shape = None
39 | self.base_shape = None
40 |
41 | def add_case(self, case_name, item):
42 | self.data[case_name] = item
43 | self.cases.append(case_name)
44 |
45 | def get_shape(self):
46 |
47 | # TODO: Add support for non-nifti files.
48 | # Also this is not good. Perhaps specify shape in input?
49 |
50 | if self.output_shape is None:
51 | if self.data == {}:
52 | print('No Data!')
53 | return (0,)
54 | elif self.base_shape is None:
55 | if self.source == 'hdf5':
56 | self.base_shape = self.data[0].shape
57 | else:
58 | self.base_shape = read_image_files(list(self.data.values())[0]).shape
59 | self.output_shape = self.base_shape
60 | else:
61 | return None
62 |
63 | return self.output_shape
64 |
65 | # @profile
66 | def get_data(self, index=None, return_affine=False):
67 |
68 | """ Wonky behavior reading from hdf5 here.
69 | """
70 |
71 | if self.source == 'hdf5':
72 | preprocessed_case = self.data[index][:][np.newaxis][0]
73 |
74 | # Storing affines needs work. How not to duplicate affines in case
75 | # of augmentation, for example?
76 | if self.data_affines is not None:
77 | if self.data_affines.shape[0] == 0:
78 | preprocessed_affine = None
79 | else:
80 | preprocessed_affine = self.data_affines[index]
81 | else:
82 | preprocessed_affine = None
83 | else:
84 | if type(self.preprocessed_case) is np.ndarray:
85 | preprocessed_case, preprocessed_affine = self.preprocessed_case, self.preprocessed_affine
86 | else:
87 | preprocessed_case, preprocessed_affine = read_image_files(self.preprocessed_case, return_affine=True)
88 |
89 | if return_affine:
90 | return preprocessed_case, preprocessed_affine
91 | else:
92 | return preprocessed_case
93 |
94 | return None
95 |
96 | def convert_to_array_data(self):
97 |
98 | self.preprocessed_case, affine = read_image_files(self.preprocessed_case, return_affine=True)
99 |
100 | if affine is not None:
101 | self.preprocessed_affine = affine
102 |
103 | # @profile
104 | def write_to_storage(self):
105 |
106 | # if self.base_case.shape != self.data_storage.shape:
107 | # print(self.base_case.shape, self.data_storage.shape)
108 | # print("""Attempting to write data to HDF5 with incorrect shape. Are you attempting to save different size data inputs to HDF5? DeepNeuro currently only saves equal size inputs/""")
109 | # raise ValueError
110 |
111 | if len(self.augmentation_cases) == 1:
112 | self.data_storage.append(self.base_case)
113 | else:
114 | self.data_storage.append(self.augmentation_cases[-1])
115 |
116 | self.casename_storage.append(np.array(bytes(self.base_casename, 'utf-8'))[np.newaxis][np.newaxis])
117 |
118 | if self.base_affine is not None:
119 | self.affine_storage.append(self.base_affine[:][np.newaxis])
120 |
--------------------------------------------------------------------------------
/deepneuro/data/data_split.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | def split_data(input_data_collection):
4 |
5 | return
6 |
7 |
8 | if __name__ == '__main__':
9 |
10 | pass
--------------------------------------------------------------------------------
/deepneuro/data/data_utilities.py:
--------------------------------------------------------------------------------
1 | import tables
2 |
3 |
4 | def hdf5_transpose(hdf5, output_hdf5, axes):
5 |
6 | open_hdf5 = tables.open_file(hdf5, "r")
7 |
8 | for data_group in open_hdf5.root._f_iter_nodes():
9 | if '_affines' not in data_group.name and '_casenames' not in data_group.name:
10 |
11 | print((data_group.shape))
12 |
13 | return
14 |
15 |
16 | if __name__ == '__main__':
17 | hdf5_transpose()
--------------------------------------------------------------------------------
/deepneuro/data/sampling.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/data/sampling.py
--------------------------------------------------------------------------------
/deepneuro/external/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/external/__init__.py
--------------------------------------------------------------------------------
/deepneuro/interface/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/interface/__init__.py
--------------------------------------------------------------------------------
/deepneuro/interface/master_cli.py:
--------------------------------------------------------------------------------
1 |
2 |
3 | if __name__ == '__main__':
4 | pass
--------------------------------------------------------------------------------
/deepneuro/interface/web_wrapper.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/interface/web_wrapper.py
--------------------------------------------------------------------------------
/deepneuro/load/__init__.py:
--------------------------------------------------------------------------------
1 | from .load import load
--------------------------------------------------------------------------------
/deepneuro/load/load.py:
--------------------------------------------------------------------------------
1 | """ This library loads trained models, sample datasets, and other
2 | assets for use in DeepNeuro. By default, data is loaded into
3 | local directories in the [package_location]/deepneuro/load package
4 | directory.
5 | """
6 |
7 | import zipfile
8 | import os
9 |
10 | from six.moves.urllib.request import urlretrieve
11 |
12 | current_dir = os.path.realpath(os.path.dirname(__file__))
13 |
14 | # Perhaps one day replace this with config files distributed at the level of modules.
15 | data_dict = {'skullstrip_mri': [os.path.join(current_dir, 'SkullStripping', 'Skullstrip_MRI_Model.h5'),
16 | 'https://www.dropbox.com/s/cyddw4tt380u7j9/Skullstrip_MRI_Model.h5?dl=1'],
17 |
18 | 'gbm_wholetumor_mri': [os.path.join(current_dir, 'Segment_GBM', 'Segment_GBM_Wholetumor_Model.h5'),
19 | 'https://www.dropbox.com/s/tmnnowda0vl6rmk/Segment_GBM_Wholetumor_Model.h5?dl=1'],
20 |
21 | 'gbm_enhancingtumor_mri': [os.path.join(current_dir, 'Segment_GBM', 'Segment_GBM_Enhancing_Model.h5'),
22 | 'https://www.dropbox.com/s/gv2hjgrt5ufooew/Segment_GBM_Enhancing_Model.h5?dl=1'],
23 |
24 | 'mets_enhancing': [os.path.join(current_dir, 'Segment_Mets', 'Segment_Mets_Model.h5'),
25 | 'https://www.dropbox.com/s/p4xzes20g0fviye/Brain_Mets_Segmentation_Model.h5?dl=1'],
26 |
27 | 'ischemic_stroke': [os.path.join(current_dir, 'Segment_Ischemic_Stroke', 'Ischemic_Stroke_Model.h5'),
28 | 'https://www.dropbox.com/s/bnydjydg76xu22g/Ischemic_Stroke_Model.h5?dl=1'],
29 |
30 | 'sample_gbm_nifti': [os.path.join(current_dir, 'Sample_Data', 'TCGA_GBM_NIFTI', 'TCGA_GBM_NIFTI.zip'),
31 | 'https://www.dropbox.com/s/bqclpqzwfsreolb/GBM_NIFTI.zip?dl=1'],
32 |
33 | 'sample_gbm_dicom': [os.path.join(current_dir, 'Sample_Data', 'TCGA_GBM_DICOM', 'TCGA_GBM_DICOM.zip'),
34 | 'https://www.dropbox.com/s/mbdq7m0vxutuwcs/GBM_DICOM.zip?dl=1']}
35 |
36 |
37 | def load(dataset, output_datapath=None):
38 |
39 | """ This function loads trained models, sample datasets, and other
40 | assets for use in DeepNeuro. By default, data is loaded into
41 | local directories in the [package_location]/deepneuro/load package
42 | directory.
43 |
44 | Parameters
45 | ----------
46 | dataset : str
47 | Key for dataset to be returned. E.g., "mets_enhancing"
48 | output_datapath: str
49 | Folder to output loaded data into. If None, will be placed in deepneuro directory.
50 |
51 | Returns
52 | -------
53 | str
54 | Output folder
55 | """
56 |
57 | # TODO: Loading progress indicator.
58 |
59 | if output_datapath is None:
60 | dataset_path = data_dict[dataset][0]
61 | else:
62 | dataset_path = os.path.join(output_datapath, os.path.basename(data_dict[dataset][0]))
63 |
64 | if not os.path.exists(os.path.dirname(dataset_path)):
65 | os.makedirs(os.path.dirname(dataset_path))
66 |
67 | if not os.path.exists(dataset_path):
68 | if True:
69 | # try:
70 | urlretrieve(data_dict[dataset][1], dataset_path)
71 | if dataset_path.endswith('.zip'):
72 | zip_ref = zipfile.ZipFile(dataset_path, 'r')
73 | zip_ref.extractall(os.path.dirname(dataset_path))
74 | zip_ref.close()
75 | os.remove(dataset_path)
76 | if dataset_path.endswith('.tar.gz'):
77 | raise NotImplementedError
78 | # except Exception:
79 | # if os.path.exists(dataset_path):
80 | # os.remove(dataset_path)
81 | # raise
82 |
83 | return dataset_path
84 |
85 |
86 | if __name__ == '__main__':
87 | pass
88 |
--------------------------------------------------------------------------------
/deepneuro/models/__init__.py:
--------------------------------------------------------------------------------
1 | from .unet import UNet
2 | from .model import load_old_model
--------------------------------------------------------------------------------
/deepneuro/models/dn_ops.py:
--------------------------------------------------------------------------------
1 |
2 | import tensorflow as tf
3 |
4 | from keras.layers import UpSampling2D, UpSampling3D, Conv3D, MaxPooling3D, Conv2D, MaxPooling2D, Activation, BatchNormalization, Dropout, ZeroPadding2D, ZeroPadding3D
5 |
6 | from deepneuro.models.ops import pixel_norm_2d, pixel_norm_3d, conv2d, conv3d, deconv2d, deconv3d, upscale2d
7 |
8 |
9 | class DnOp(object):
10 |
11 | def __init__(self):
12 |
13 | return
14 |
15 |
16 | def DnDropout(input_tensor, ratio=.5, backend='tensorflow'):
17 |
18 | if backend == 'keras':
19 |
20 | return Dropout(ratio)(input_tensor)
21 |
22 | if backend == 'tensorflow':
23 |
24 | return tf.nn.dropout(input_tensor, ratio)
25 |
26 | return
27 |
28 |
29 | def DnBatchNormalization(input_tensor, backend='tensorflow'):
30 |
31 | if backend == 'keras' or True:
32 |
33 | return BatchNormalization()(input_tensor)
34 |
35 | if backend == 'tensorflow':
36 |
37 | return tf.contrib.layers.batch_norm(input_tensor)
38 |
39 | return
40 |
41 |
42 | def DnMaxPooling(input_tensor, pool_size=(2, 2), dim=2, padding='SAME', backend='tensorflow'):
43 |
44 | op = None
45 |
46 | if backend == 'keras':
47 |
48 | if dim == 2:
49 | return MaxPooling2D(pool_size=pool_size, padding=padding)(input_tensor)
50 | if dim == 3:
51 | return MaxPooling3D(pool_size=pool_size, padding=padding)(input_tensor)
52 |
53 | if backend == 'tensorflow':
54 |
55 | if dim == 2:
56 | op = tf.nn.max_pool(input_tensor, ksize=[1] + list(pool_size) + [1], strides=[1] + list(pool_size) + [1], padding='SAME')
57 | if dim == 3:
58 | op = tf.nn.max_pool3d(input_tensor, ksize=[1] + list(pool_size) + [1], strides=[1] + list(pool_size) + [1], padding='SAME')
59 |
60 | if op is None:
61 | raise NotImplementedError
62 |
63 | return op
64 |
65 |
66 | def DnAveragePooling(input_tensor, pool_size=(2, 2), dim=2, backend='tensorflow'):
67 |
68 | if backend == 'keras':
69 |
70 | if dim == 2:
71 | raise NotImplementedError
72 | if dim == 3:
73 | raise NotImplementedError
74 |
75 | if backend == 'tensorflow':
76 |
77 | if dim == 2:
78 | op = tf.nn.avg_pool(input_tensor, ksize=[1] + list(pool_size) + [1], strides=[1] + list(pool_size) + [1], padding='SAME')
79 | if dim == 3:
80 | op = tf.nn.avg_pool3d(input_tensor, ksize=[1] + list(pool_size) + [1], strides=[1] + list(pool_size) + [1], padding='SAME')
81 |
82 | return op
83 |
84 |
85 | def DnConv(input_tensor, output_dim, kernel_size=(5, 5, 5), stride_size=(2, 2, 2), dim=3, padding='SAME', initializer_std=0.02, activation=None, name=None, dropout=0, batch_norm=False, backend='tensorflow'):
86 |
87 | """ TODO: Provide different options for intializers, and resolve inconsistencies bewteen 2D and 3D.
88 | """
89 |
90 | if name is None:
91 | name = 'conv' + str(dim) + 'd'
92 |
93 | if backend == 'tensorflow':
94 |
95 | padding = padding.upper()
96 |
97 | if dim == 2:
98 | conv = conv2d(input_tensor, output_dim, kernel_size=kernel_size, stride_size=stride_size, padding=padding, initializer_std=initializer_std, name=name)
99 | elif dim == 3:
100 | conv = conv3d(input_tensor, output_dim, kernel_size=kernel_size, stride_size=stride_size, padding=padding, initializer_std=initializer_std, name=name)
101 |
102 | elif backend == 'keras':
103 | if dim == 2:
104 | conv = Conv2D(output_dim, kernel_size=kernel_size, strides=stride_size, padding=padding, name=name)(input_tensor)
105 | elif dim == 3:
106 | conv = Conv3D(output_dim, kernel_size=kernel_size, strides=stride_size, padding=padding, name=name)(input_tensor)
107 |
108 | if activation is not None:
109 | conv = Activation('sigmoid')(conv)
110 |
111 | if batch_norm:
112 | conv = DnBatchNormalization(conv, backend=backend)
113 |
114 | if dropout > 0:
115 | conv = DnDropout(conv, dropout, backend=backend)
116 |
117 | return conv
118 |
119 |
120 | def DnDeConv(input_tensor, output_dim, kernel_size=(5, 5, 5), stride_size=(2, 2, 2), dim=3, padding='SAME', initializer_std=0.02, activation=None, name=None, backend='tensorflow'):
121 |
122 | """ TODO: Provide different options for intializers, and resolve inconsistencies bewteen 2D and 3D.
123 | """
124 |
125 | if name is None:
126 | name = 'conv' + str(dim) + 'd'
127 |
128 | if backend == 'tensorflow':
129 |
130 | padding = padding.upper()
131 |
132 | if dim == 2:
133 | conv = deconv2d(input_tensor, output_dim, kernel_size=kernel_size, stride_size=stride_size, padding=padding, initializer_std=initializer_std, name=name, backend=backend)
134 | elif dim == 3:
135 | conv = deconv3d(input_tensor, output_dim, kernel_size=kernel_size, stride_size=stride_size, padding=padding, initializer_std=initializer_std, name=name, backend=backend)
136 |
137 | elif backend == 'keras':
138 |
139 | print('not implemented')
140 |
141 | if activation is not None:
142 | pass
143 |
144 | return conv
145 |
146 |
147 | def DnUpsampling(input_tensor, pool_size=(2, 2, 2), dim=3, backend='tensorflow'):
148 |
149 | if backend == 'keras':
150 | if dim == 2:
151 | return UpSampling2D(size=pool_size)(input_tensor)
152 | elif dim == 3:
153 | return UpSampling3D(size=pool_size)(input_tensor)
154 |
155 | if backend == 'tensorflow':
156 | if dim == 2:
157 | return upscale2d(input_tensor, pool_size[0])
158 | elif dim == 3:
159 | raise NotImplementedError
160 |
161 |
162 | def DnPixelNorm(input_tensor, dim=3, backend='tensorflow'):
163 |
164 | if backend == 'tensorflow':
165 |
166 | if dim == 2:
167 | return pixel_norm_2d(input_tensor)
168 | elif dim == 3:
169 | return pixel_norm_3d(input_tensor)
170 |
171 | if backend == 'keras':
172 | raise NotImplementedError
173 |
174 |
175 | def DnZeroPadding(input_tensor, padding=(1, 1), dim=3, backend='keras'):
176 |
177 | if backend == 'keras':
178 |
179 | if dim == 2:
180 | return ZeroPadding2D(padding=padding)(input_tensor)
181 | elif dim == 3:
182 | print(padding)
183 | return ZeroPadding3D(padding=padding)(input_tensor)
184 |
185 | if backend == 'tensorflow':
186 | raise NotImplementedError
187 |
188 |
--------------------------------------------------------------------------------
/deepneuro/models/gan.py:
--------------------------------------------------------------------------------
1 | """ This is a vanilla implementation of a generative adversarial network. It includes
2 | the Wasserstein Gradient-Penalty by default.
3 | """
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 | import os
8 |
9 | from deepneuro.models.tensorflow_model import TensorFlowModel
10 | from deepneuro.utilities.util import add_parameter
11 | from deepneuro.models.blocks import generator, discriminator
12 | from deepneuro.models.cost_functions import wasserstein_loss
13 |
14 |
15 | class GAN(TensorFlowModel):
16 |
17 | def load(self, kwargs):
18 |
19 | """ Parameters
20 | ----------
21 | latent_size : int, optional
22 | Size of the latent vector for image synthesis. Default is 128.
23 | depth : int, optional
24 | Specifies the number of layers
25 | max_filter: int, optional
26 | Specifies the number of filters at the bottom level of the U-Net.
27 |
28 | """
29 |
30 | super(GAN, self).load(kwargs)
31 |
32 | # Generator Parameters
33 | add_parameter(self, kwargs, 'latent_size', 128)
34 | add_parameter(self, kwargs, 'depth', 4)
35 | add_parameter(self, kwargs, 'generator_updates', 1)
36 |
37 | # Model Parameters
38 | add_parameter(self, kwargs, 'filter_cap', 128)
39 | add_parameter(self, kwargs, 'filter_floor', 16)
40 |
41 | add_parameter(self, kwargs, 'generator_max_filter', 128)
42 |
43 | # Discriminator Parameters
44 | add_parameter(self, kwargs, 'discriminator_depth', 4)
45 | add_parameter(self, kwargs, 'discriminator_max_filter', 128)
46 | add_parameter(self, kwargs, 'discriminator_updates', 1)
47 |
48 | # Loss Parameters
49 | add_parameter(self, kwargs, 'gradient_penalty_weight', 10) # For WP
50 |
51 | self.sess = None
52 | self.init = None
53 |
54 | def get_filter_num(self, depth):
55 |
56 | # This will need to be a bit more complicated; see PGGAN paper.
57 | if self.max_filter / (2 ** (depth)) <= self.filter_floor:
58 | return self.filter_floor
59 | else:
60 | return min(self.max_filter / (2 ** (depth)), self.filter_cap)
61 |
62 | def process_step(self, step_counter):
63 |
64 | # Replace with GPU function?
65 | sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size])
66 | reference_data = next(self.training_data_generator)[self.input_data]
67 |
68 | # Optimize!
69 |
70 | _, g_loss = self.sess.run([self.opti_G, self.G_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent})
71 | _, d_loss, d_origin_loss = self.sess.run([self.opti_D, self.D_loss, self.d_origin_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent})
72 |
73 | # This is a little weird -- it only records loss on discriminator steps.
74 | self.log([g_loss, d_loss, d_origin_loss], headers=['Generator Loss', 'WP Discriminator Loss', 'Discriminator Loss'], verbose=self.hyperverbose)
75 | step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss) + " Discriminator Loss: {0:.5f}".format(d_loss))
76 |
77 | return
78 |
79 | def build_tensorflow_model(self, batch_size):
80 |
81 | """ Break it out into functions?
82 | """
83 |
84 | # Set input/output shapes for reference during inference.
85 | self.model_input_shape = tuple([batch_size] + list(self.input_shape))
86 | self.model_output_shape = tuple([batch_size] + list(self.input_shape))
87 |
88 | self.latent = tf.placeholder(tf.float32, [None, self.latent_size])
89 | self.reference_images = tf.placeholder(tf.float32, [None] + list(self.model_input_shape)[1:])
90 | self.synthetic_images = generator(self, self.latent, depth=self.depth, name='generator')
91 |
92 | self.discriminator_real, self.discriminator_real_logits = discriminator(self, self.reference_images, depth=self.depth + 1, name='discriminator')
93 | self.discriminator_fake, self.discriminator_fake_logits = discriminator(self, self.synthetic_images, depth=self.depth + 1, name='discriminator', reuse=True)
94 |
95 | t_vars = tf.trainable_variables()
96 | self.d_vars = [var for var in t_vars if 'discriminator' in var.name]
97 | self.g_vars = [var for var in t_vars if 'generator' in var.name]
98 | self.saver = tf.train.Saver(self.g_vars + self.d_vars)
99 |
100 | self.calculate_losses()
101 |
102 | if self.hyperverbose:
103 | self.model_summary()
104 |
105 | def calculate_losses(self):
106 |
107 | self.D_loss, self.G_loss, self.D_origin_loss = wasserstein_loss(self, discriminator, self.discriminator_fake_logits, self.discriminator_real_logits, self.synthetic_images, self.real_images, gradient_penalty_weight=self.gradient_penalty_weight, name='discriminator', dim=self.dim)
108 |
109 | # A little sketchy. Attempting to make variable loss functions extensible later.
110 | self.D_loss = self.D_loss[0]
111 | self.G_loss = self.G_loss[0]
112 | self.D_origin_loss = self.D_origin_loss[0]
113 |
114 | # Create Optimizers
115 | self.opti_D = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(
116 | self.D_loss, var_list=self.d_vars)
117 | self.opti_G = self.tensorflow_optimizer_dict[self.optimizer](learning_rate=self.initial_learning_rate, beta1=0.0, beta2=0.99).minimize(self.G_loss, var_list=self.g_vars)
118 |
119 | def load_model(self, input_model_path, batch_size=1):
120 |
121 | self.build_tensorflow_model(batch_size)
122 | self.init_sess()
123 | self.saver.restore(self.sess, os.path.join(input_model_path, 'model.ckpt'))
124 |
125 | def predict(self, sample_latent=None, batch_size=1):
126 |
127 | self.init_sess()
128 |
129 | if sample_latent is None:
130 | sample_latent = np.random.normal(size=[batch_size, self.latent_size])
131 |
132 | return self.sess.run(self.synthetic_images, feed_dict={self.latent: sample_latent})
133 |
134 | def log_variables(self):
135 |
136 | super(GAN, self).log_variables()
137 |
138 | return
--------------------------------------------------------------------------------
/deepneuro/models/keras_pretrained.py:
--------------------------------------------------------------------------------
1 | """ Pretrained neural networks implemented by Keras.
2 | Implementation borrows heavily from:
3 | https://www.pyimagesearch.com/2017/03/20/imagenet-vggnet-resnet-inception-xception-keras/
4 | https://flyyufelix.github.io/2016/10/08/fine-tuning-in-keras-part2.html
5 | written by Adrian Rosebrock and Felix Yu, respectively
6 | """
7 |
8 | from keras.applications import ResNet50
9 | from keras.applications import InceptionV3
10 | from keras.applications import Xception
11 | from keras.applications import VGG16
12 | from keras.applications import VGG19
13 | from keras.layers import Dense, GlobalAveragePooling2D
14 | from keras.engine import Model
15 |
16 | from deepneuro.models.keras_model import KerasModel
17 | from deepneuro.utilities.util import add_parameter
18 |
19 |
20 | class KerasPreTrainedModel(KerasModel):
21 |
22 | def load(self, kwargs):
23 |
24 | """ Parameters
25 | ----------
26 | model_version : str, optional
27 |
28 |
29 | """
30 |
31 | super(KerasPreTrainedModel, self).load(kwargs)
32 |
33 | # Model Choice Parameters
34 | add_parameter(self, kwargs, 'model_type', 'inception')
35 | add_parameter(self, kwargs, 'pretrained_weights', 'imagenet')
36 |
37 | # Finetuning Parameters
38 | add_parameter(self, kwargs, 'input_shape', None)
39 | add_parameter(self, kwargs, 'output_classes', None)
40 | add_parameter(self, kwargs, 'bottleneck_layers_num', None)
41 | add_parameter(self, kwargs, 'finetuning_dense_features', 128)
42 |
43 | self.models = {
44 | "vgg16": VGG16,
45 | "vgg19": VGG19,
46 | "inception": InceptionV3,
47 | "xception": Xception,
48 | "resnet50": ResNet50
49 | }
50 |
51 | self.output_activation = False
52 |
53 | if self.input_shape is None:
54 | self.include_top = False
55 | else:
56 | self.include_top = True
57 |
58 | def build_model(self):
59 |
60 | self.model = self.models[self.model_type](weights=self.pretrained_weights, include_top=False)
61 |
62 | if self.output_classes is not None:
63 |
64 | if self.model_type in ['vgg19', 'vgg16']:
65 | self.model.layers.pop()
66 | self.model.outputs = [self.model.layers[-1].output]
67 | self.model.layers[-1].outbound_nodes = []
68 | self.model.add(Dense(self.output_classes, activation='softmax'))
69 |
70 | elif self.model_type in ['inception']:
71 | model_output = self.model.output
72 | model_output = GlobalAveragePooling2D()(model_output)
73 | model_output = Dense(self.finetuning_dense_features, activation='relu')(model_output)
74 | predictions = Dense(self.output_classes, activation='softmax')(model_output)
75 | self.model = Model(self.model.input, predictions)
76 | elif self.model_type in ['resnet50']:
77 | # self.model.layers.pop()
78 | model_output = self.model.output
79 | model_output = GlobalAveragePooling2D()(model_output)
80 | predictions = Dense(self.output_classes, activation='softmax')(model_output)
81 | self.model = Model(self.model.input, predictions)
82 |
83 | if self.bottleneck_layers_num is not None:
84 | for layer in self.model.layers[:self.bottleneck_layers_num]:
85 | layer.trainable = False
86 |
87 | self.inputs = self.model.input
88 |
89 | super(KerasPreTrainedModel, self).build_model(compute_output=False)
90 |
--------------------------------------------------------------------------------
/deepneuro/models/minimal.py:
--------------------------------------------------------------------------------
1 | """ unet.py includes different implementations of the popular U-Net model.
2 | See more at https://arxiv.org/abs/1505.04597
3 | """
4 |
5 | from deepneuro.models.keras_model import KerasModel
6 | from deepneuro.models.dn_ops import DnConv
7 |
8 |
9 | class MinimalKerasCNN(KerasModel):
10 |
11 | def load(self, kwargs):
12 |
13 | """ Parameters
14 | ----------
15 | depth : int, optional
16 | Specified the layers deep the proposed U-Net should go.
17 | Layer depth is symmetric on both upsampling and downsampling
18 | arms.
19 | max_filter: int, optional
20 | Specifies the number of filters at the bottom level of the U-Net.
21 |
22 | """
23 |
24 | super(MinimalKerasCNN, self).load(kwargs)
25 |
26 | def build_model(self):
27 |
28 | self.output_layer = DnConv(self.inputs, 1, self.kernel_size, stride_size=(1,) * self.dim, dim=self.dim, name='minimal_conv', backend='keras')
29 |
30 | if self.input_tensor is None:
31 |
32 | super(MinimalKerasCNN, self).build()
33 | return self.model
34 |
35 | else:
36 | return self.output_layer
--------------------------------------------------------------------------------
/deepneuro/models/tensorflow_model.py:
--------------------------------------------------------------------------------
1 | import os
2 | import glob
3 | import tensorflow as tf
4 | import numpy as np
5 |
6 | from shutil import rmtree
7 | from tqdm import tqdm
8 |
9 | from deepneuro.models.model import DeepNeuroModel
10 | from deepneuro.utilities.util import add_parameter
11 |
12 |
13 | class TensorFlowModel(DeepNeuroModel):
14 |
15 | def load(self, kwargs):
16 |
17 | """ Parameters
18 | ----------
19 | depth : int, optional
20 | Specified the layers deep the proposed U-Net should go.
21 | Layer depth is symmetric on both upsampling and downsampling
22 | arms.
23 | max_filter: int, optional
24 | Specifies the number of filters at the bottom level of the U-Net.
25 | """
26 |
27 | add_parameter(self, kwargs, 'sess', None)
28 | add_parameter(self, kwargs, 'saver', None)
29 |
30 | self.tensorflow_optimizer_dict = {'Adam': tf.train.AdamOptimizer}
31 |
32 | def init_training(self, training_data_collection, kwargs):
33 |
34 | # Outputs
35 | add_parameter(self, kwargs, 'output_model_filepath')
36 |
37 | # Training Parameters
38 | add_parameter(self, kwargs, 'num_epochs', 100)
39 | add_parameter(self, kwargs, 'training_steps_per_epoch', 10)
40 | add_parameter(self, kwargs, 'training_batch_size', 16)
41 |
42 | self.init_sess()
43 | self.build_tensorflow_model(self.training_batch_size)
44 | self.create_data_generators(training_data_collection, training_batch_size=self.training_batch_size, training_steps_per_epoch=self.training_steps_per_epoch)
45 |
46 | return
47 |
48 | def train(self, training_data_collection, validation_data_collection=None, **kwargs):
49 |
50 | self.init_training(training_data_collection, kwargs)
51 |
52 | self.init = tf.global_variables_initializer()
53 | self.sess.run(self.init)
54 |
55 | self.callback_process('on_train_begin')
56 |
57 | try:
58 |
59 | for epoch in range(self.num_epochs):
60 |
61 | print(('Epoch {}/{}'.format(epoch, self.num_epochs)))
62 | self.callback_process('on_epoch_begin', epoch)
63 |
64 | step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1)
65 |
66 | for step in step_counter:
67 |
68 | self.callback_process('on_batch_begin', step)
69 |
70 | self.process_step(step_counter)
71 |
72 | self.callback_process('on_batch_end', step)
73 |
74 | self.callback_process('on_epoch_end', epoch)
75 |
76 | self.callback_process('on_train_end')
77 |
78 | except KeyboardInterrupt:
79 |
80 | self.callback_process('on_train_end')
81 | except:
82 | raise
83 |
84 | def process_step(self):
85 |
86 | for epoch in range(self.num_epochs):
87 |
88 | step_counter = tqdm(list(range(self.training_steps_per_epoch)), total=self.training_steps_per_epoch, unit="step", desc="Generator Loss:", miniters=1)
89 |
90 | for step in step_counter:
91 |
92 | # Replace with GPU function?
93 | sample_latent = np.random.normal(size=[self.training_batch_size, self.latent_size])
94 | reference_data = next(self.training_data_generator)[self.input_data]
95 |
96 | # Optimize!
97 |
98 | _, g_loss = self.sess.run([self.basic_optimizer, self.basic_loss], feed_dict={self.reference_images: reference_data, self.latent: sample_latent})
99 |
100 | self.log([g_loss], headers=['Basic Loss'], verbose=self.hyperverbose)
101 | step_counter.set_description("Generator Loss: {0:.5f}".format(g_loss))
102 |
103 | self.save_model(self.output_model_filepath)
104 |
105 | return
106 |
107 | def init_sess(self):
108 |
109 | if self.sess is None:
110 | self.graph = tf.Graph()
111 |
112 | config = tf.ConfigProto()
113 | config.gpu_options.allow_growth = True
114 | self.sess = tf.InteractiveSession(config=config, graph=self.graph)
115 |
116 | elif self.sess._closed:
117 | self.graph = tf.Graph()
118 |
119 | config = tf.ConfigProto()
120 | config.gpu_options.allow_growth = True
121 | self.sess = tf.InteractiveSession(config=config, graph=self.graph)
122 |
123 | def save_model(self, output_model_filepath, overwrite=True):
124 |
125 | self.init_sess()
126 |
127 | if output_model_filepath.endswith(('.h5', '.hdf5')):
128 | output_model_filepath = '.'.join(str.split(output_model_filepath, '.')[0:-1])
129 |
130 | if os.path.exists(output_model_filepath) and overwrite:
131 | rmtree(output_model_filepath)
132 |
133 | if self.saver is None:
134 | self.saver = tf.train.Saver()
135 |
136 | save_path = self.saver.save(self.sess, os.path.join(output_model_filepath, "model.ckpt"))
137 |
138 | return save_path
139 |
140 | def log_variables(self):
141 | self.summary_op = tf.summary.merge_all()
142 | if self.tensorboard_directory is not None:
143 | if self.tensorboard_run_directory is None:
144 | previous_runs = glob.glob(os.path.join(self.tensorboard_directory, 'tensorboard_run*'))
145 | if len(previous_runs) == 0:
146 | run_number = 0
147 | else:
148 | run_number = max([int(s.split('tensorboard_run_')[1]) for s in previous_runs]) + 1
149 | self.tensorboard_run_directory = os.path.join(self.tensorboard_directory, 'tensorboard_run_%02d' % run_number)
150 | self.summary_writer = tf.summary.FileWriter(self.tensorboard_run_directory, self.sess.graph)
151 |
152 | def model_summary(self):
153 |
154 | for layer in tf.trainable_variables():
155 | print(layer)
156 |
157 | def callback_process(self, command='', idx=None):
158 |
159 | for callback in self.callbacks:
160 | if type(callback) is str:
161 | continue
162 | method = getattr(callback, command)
163 | method(idx)
164 |
165 | return
166 |
167 | def grab_tensor(self, layer):
168 | return self.graph.get_tensor_by_name(layer + ':0')
169 |
170 | def find_layers(self, contains=['discriminator/']):
171 |
172 | for layer in self.graph.get_operations():
173 | if any(op_type in layer.name for op_type in contains):
174 | try:
175 | if self.graph.get_tensor_by_name(layer.name + ':0').get_shape() != ():
176 | print((layer.name, self.graph.get_tensor_by_name(layer.name + ':0').get_shape()))
177 | except:
178 | continue
179 |
180 | def load_model(self, input_model_path, batch_size=1):
181 |
182 | self.build_tensorflow_model(batch_size)
183 | self.init_sess()
184 | self.saver.restore(self.sess, os.path.join(input_model_path, 'model.ckpt'))
--------------------------------------------------------------------------------
/deepneuro/outputs/__init__.py:
--------------------------------------------------------------------------------
1 | from .toy import PatchDiagram
2 | from .segmentation import PatchesInference
3 | from .inference import ModelInference
4 | from .classification import ClassInference
--------------------------------------------------------------------------------
/deepneuro/outputs/classification.py:
--------------------------------------------------------------------------------
1 | from deepneuro.outputs.inference import ModelInference
2 |
3 |
4 | class ClassInference(ModelInference):
5 |
6 | def load(self, kwargs):
7 |
8 | """ Parameters
9 | ----------
10 |
11 | """
12 |
13 | super(ClassInference, self).load(kwargs)
14 |
15 |
--------------------------------------------------------------------------------
/deepneuro/outputs/gan.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from deepneuro.outputs.inference import ModelInference
4 | from deepneuro.utilities.util import add_parameter
5 |
6 |
7 | class GANInference(ModelInference):
8 |
9 | def load(self, kwargs):
10 |
11 | """ Parameters
12 | ----------
13 | output_num : int, optional
14 | The amount of batches generated by the GAN.
15 |
16 | """
17 |
18 | super(GANInference, self).load(kwargs)
19 |
20 | # Patching Parameters
21 | add_parameter(self, kwargs, 'output_num', 10)
22 | add_parameter(self, kwargs, 'sampling_std', 1)
23 | add_parameter(self, kwargs, 'sampling_mean', 0)
24 |
25 | def generate(self, model=None):
26 |
27 | self.generate_output_directory()
28 |
29 | if model is not None:
30 | self.model = model
31 |
32 | self.latent_size = self.model.latent_size
33 |
34 | for batch_idx in range(0, self.output_num, self.batch_size):
35 |
36 | minibatch_length = min(self.batch_size, self.output_num - batch_idx)
37 |
38 | input_data = {'input_data': np.random.normal(self.sampling_mean, self.sampling_std, [minibatch_length, self.latent_size]),
39 | 'casename': [''] * minibatch_length,
40 | 'input_data_augmentation_string': [str(i) for i in range(batch_idx, batch_idx + minibatch_length)],
41 | 'input_data_affine': [None] * minibatch_length}
42 |
43 | self.process_case(input_data)
44 | self.postprocess(input_data)
45 |
46 | self.close_output()
47 |
48 | return_dict = {'data': self.return_objects, 'filenames': self.return_filenames}
49 | return return_dict
50 |
51 | def process_case(self, input_data):
52 |
53 | input_data = input_data['input_data']
54 | output_data = self.model.predict(input_data)
55 |
56 | self.return_objects.append(output_data)
57 |
58 | return output_data
--------------------------------------------------------------------------------
/deepneuro/outputs/inference.py:
--------------------------------------------------------------------------------
1 | """ This is a base class for running inference on models.
2 | """
3 |
4 | import numpy as np
5 |
6 | from deepneuro.outputs.output import Output
7 | from deepneuro.utilities.util import add_parameter
8 |
9 |
10 | class ModelInference(Output):
11 |
12 | def load(self, kwargs):
13 |
14 | # Evaluation Params
15 | add_parameter(self, kwargs, 'ground_truth', None)
16 |
17 | # Saving Params
18 | add_parameter(self, kwargs, 'postprocessor_string', '_inference')
19 |
20 | # Model Parameters
21 | add_parameter(self, kwargs, 'input_channels', None)
22 | add_parameter(self, kwargs, 'output_channels', None)
23 |
24 | add_parameter(self, kwargs, 'channels_dim', None)
25 |
26 | if self.channels_dim is None:
27 | if self.channels_first:
28 | self.channels_dim = 1
29 | else:
30 | self.channels_dim = -1
31 |
32 | def process_case(self, input_data, model=None):
33 |
34 | """Summary
35 |
36 | Parameters
37 | ----------
38 | input_data : TYPE
39 | Description
40 | model : None, optional
41 | Description
42 |
43 | Returns
44 | -------
45 | TYPE
46 | Description
47 | """
48 |
49 | input_data = input_data[self.lead_key]
50 |
51 | if model is not None:
52 | self.model = model
53 |
54 | if self.channels_first:
55 | input_data = np.swapaxes(input_data, 1, -1)
56 |
57 | if self.input_channels is not None:
58 | input_data = np.take(input_data, self.input_channels, self.channels_dim)
59 |
60 | self.output_shape = [1] + list(self.model.model_output_shape)[1:] # Weird
61 |
62 | output_data = self.predict(input_data)
63 |
64 | if self.output_channels is not None:
65 | output_data = np.take(output_data, self.output_channels, self.channels_dim)
66 |
67 | # Will fail for time-data.
68 | if self.channels_first:
69 | output_data = np.swapaxes(output_data, 1, -1)
70 |
71 | self.return_objects.append(output_data)
72 |
73 | return output_data
74 |
75 | def predict(self, input_data):
76 | """Summary
77 |
78 | Parameters
79 | ----------
80 | input_data : TYPE
81 | Description
82 |
83 | Returns
84 | -------
85 | TYPE
86 | Description
87 | """
88 | # Vanilla prediction case is obivously not fleshed out.
89 |
90 | prediction = self.model.predict(input_data)
91 |
92 | return prediction
--------------------------------------------------------------------------------
/deepneuro/outputs/radiomics.py:
--------------------------------------------------------------------------------
1 |
2 | if __name__ == '__main__':
3 |
4 | pass
--------------------------------------------------------------------------------
/deepneuro/outputs/statistics.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 | import matplotlib.pyplot as plt
4 |
5 | from skimage.morphology import label
6 | from skimage.measure import regionprops
7 |
8 | from deepneuro.utilities.conversion import convert_input_2_numpy
9 |
10 |
11 | def calc_voxel_count(input_data, mask_value=0):
12 |
13 | input_data = convert_input_2_numpy(input_data)
14 |
15 | return input_data[input_data != mask_value].size
16 |
17 |
18 | def calc_volume(image_numpy, pixdims, mask_value=0):
19 | return pixdims[0] * pixdims[1] * pixdims[2] * calc_voxel_count(image_numpy, mask_value)
20 |
21 |
22 | def calc_RANO(input_label, affine=None, resolution_x=1, resolution_y=1, resolution_z=1, output_csv=None, image_data=None, output_image_filepath=None, display_image=False):
23 |
24 | """ Calculate RANO criteria. Assumes data is oriented [x, y, z]. TODO: Make dimension agnostic.
25 | Code modified from original written by Ken Chang.
26 | My editing of the code is not great; TODO: Refactor.
27 | """
28 |
29 | image_data, image_affine = convert_input_2_numpy(image_data, return_affine=True)
30 | input_label = convert_input_2_numpy(input_label)
31 |
32 | # if affine and image_affine is None:
33 | # pass
34 | # else:
35 | # resolution_x, resolution_y, resolution_z = 1, 1, 1 # Placeholder.
36 |
37 | rano_measures, rano_slices, rano_props = [], [], []
38 |
39 | connected_components = label(input_label, connectivity=2)
40 | component_labels = np.unique(connected_components)
41 |
42 | for lesion_idx in component_labels:
43 |
44 | lesion = connected_components.astype(int)
45 | lesion[connected_components != lesion_idx] = 0
46 |
47 | major_diameter, minor_diameter, rano_slice, region_props = [None] * 4
48 |
49 | volume_threshold = 2 * resolution_z
50 | if volume_threshold < 10:
51 | volume_threshold = 10
52 |
53 | for z_slice in range(lesion.shape[2]):
54 |
55 | lesion_slice = lesion[..., z_slice]
56 |
57 | if np.sum(lesion_slice) == 0:
58 | continue
59 |
60 | lesion_properties = regionprops(lesion_slice)
61 | current_major = lesion_properties[0].major_axis_length * resolution_x
62 | current_minor = lesion_properties[0].minor_axis_length * resolution_y
63 |
64 | if current_major < volume_threshold:
65 | continue
66 | if major_diameter is None:
67 | major_diameter, minor_diameter, rano_slice, region_props = current_major, current_minor, z_slice, lesion_properties
68 | elif current_major > major_diameter:
69 | major_diameter, minor_diameter, rano_slice, region_props = current_major, current_minor, z_slice, lesion_properties
70 |
71 | if major_diameter is not None:
72 | rano_measures += [major_diameter * minor_diameter]
73 | rano_slices += [rano_slice]
74 | rano_props += [region_props]
75 |
76 | if len(rano_measures) < 5:
77 | sum_rano = np.sum(rano_measures)
78 | else:
79 | sum_rano = np.sum(rano_measures.sort()[-5:])
80 |
81 | if output_csv is not None:
82 | if not os.path.exists(output_csv):
83 | pass
84 |
85 | if output_image_filepath is not None or display_image:
86 |
87 | for idx, z_slice in enumerate(rano_slices):
88 |
89 | lesion_props = rano_props[idx][0]
90 |
91 | if image_data is None:
92 | display_data = input_label[..., z_slice]
93 | else:
94 | display_data = image_data[..., z_slice]
95 |
96 | center_y, center_x = lesion_props.centroid
97 | major_angle = lesion_props.orientation
98 |
99 | minor_angle = major_angle + np.pi / 2
100 |
101 | half_major, half_minor = lesion_props.major_axis_length / 2, lesion_props.minor_axis_length / 2
102 |
103 | major_x_1 = center_x + np.cos(major_angle) * half_major
104 | major_y_1 = center_y - np.sin(major_angle) * half_major
105 | major_x_2 = center_x - np.cos(major_angle) * half_major
106 | major_y_2 = center_y + np.sin(major_angle) * half_major
107 |
108 | minor_x_1 = center_x + np.cos(minor_angle) * half_minor
109 | minor_y_1 = center_y - np.sin(minor_angle) * half_minor
110 | minor_x_2 = center_x - np.cos(minor_angle) * half_minor
111 | minor_y_2 = center_y + np.sin(minor_angle) * half_minor
112 |
113 | plt.imshow(display_data, interpolation='none', origin='lower', cmap='gray')
114 | plt.plot(center_x, center_y, 'ro')
115 | plt.plot(major_x_1, major_y_1, 'ro')
116 | plt.plot(major_x_2, major_y_2, 'ro')
117 | plt.plot(minor_x_1, minor_y_1, 'ro')
118 | plt.plot(minor_x_2, minor_y_2, 'ro')
119 | plt.show()
120 |
121 | return sum_rano
122 |
123 |
124 | RANO = calc_RANO
125 |
126 |
127 | def calculate_prediction_dice(self, label_volume_1, label_volume_2):
128 |
129 | im1 = np.asarray(label_volume_1).astype(np.bool)
130 | im2 = np.asarray(label_volume_2).astype(np.bool)
131 |
132 | if im1.shape != im2.shape:
133 | raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
134 |
135 | im_sum = im1.sum() + im2.sum()
136 | if im_sum == 0:
137 | return 0
138 |
139 | # Compute Dice coefficient
140 | intersection = np.logical_and(im1, im2)
141 |
142 | return 2. * intersection.sum() / im_sum
143 |
144 |
145 | if __name__ == '__main__':
146 | pass
--------------------------------------------------------------------------------
/deepneuro/outputs/toy.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from deepneuro.utilities.util import add_parameter
4 | from deepneuro.outputs.segmentation import PatchesInference
5 |
6 |
7 | class PatchDiagram(PatchesInference):
8 |
9 | def load(self, kwargs):
10 |
11 | """ Parameters
12 | ----------
13 | depth : int, optional
14 | Specified the layers deep the proposed U-Net should go.
15 | Layer depth is symmetric on both upsampling and downsampling
16 | arms.
17 | max_filter: int, optional
18 | Specifies the number of filters at the bottom level of the U-Net.
19 |
20 | """
21 |
22 | super(PatchDiagram, self).load(kwargs)
23 |
24 | add_parameter(self, kwargs, 'border_width', 1)
25 |
26 | def aggregate_predictions(self, output_data, repatched_image, rep_idx):
27 |
28 | output_data = np.logical_or(output_data, repatched_image).astype(float)
29 | return output_data
30 |
31 | # output_data += repatched_image * (rep_idx + 1)
32 | # output_data[output_data == (rep_idx + rep_idx + 1)] = (rep_idx + 1)
33 | # return output_data
34 |
35 | def run_inference(self, input_patches):
36 |
37 | output_patches = np.ones_like(input_patches)
38 |
39 | front_border_slice = [slice(None)] + [slice(self.border_width, -self.border_width, None) for dim in self.patch_dimensions] + [slice(None)]
40 |
41 | output_patches[tuple(front_border_slice)] = 0
42 |
43 | return output_patches
44 |
--------------------------------------------------------------------------------
/deepneuro/package_test/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/package_test/__init__.py
--------------------------------------------------------------------------------
/deepneuro/package_test/ci_test.py:
--------------------------------------------------------------------------------
1 | def test_func_fast():
2 |
3 | assert 1
4 |
5 | pass
6 |
--------------------------------------------------------------------------------
/deepneuro/package_test/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | cd /home/DeepNeuro
3 | python3 -m pytest -s
--------------------------------------------------------------------------------
/deepneuro/package_test/package_test.py:
--------------------------------------------------------------------------------
1 | """A test module for automatic documentation purposes
2 | """
3 |
4 |
5 | def test_function():
6 |
7 | import deepneuro
8 |
9 | return
10 |
11 |
12 | if __name__ == '__main__':
13 | test_function()
--------------------------------------------------------------------------------
/deepneuro/package_test/pipeline_test.py:
--------------------------------------------------------------------------------
1 | """ Testing functions for
2 | """
3 |
4 |
5 | def test_glioblastoma_module(testing_directory="/home/DeepNeuro/tmp", gpu_num='0'):
6 |
7 | import numpy as np
8 | import os
9 | from shutil import rmtree
10 |
11 | FLAIR, T1POST, T1PRE = np.random.normal(loc=1000, scale=200, size=(240, 240, 40)), \
12 | np.random.normal(loc=1500, scale=200, size=(240, 240, 180)), \
13 | np.random.normal(loc=1300, scale=200, size=(120, 120, 60))
14 |
15 | from deepneuro.utilities.conversion import save_data
16 |
17 | try:
18 | os.mkdir(testing_directory)
19 | FLAIR_file = save_data(FLAIR, os.path.join(testing_directory, 'FLAIR.nii.gz'))
20 | T1PRE_file = save_data(T1PRE, os.path.join(testing_directory, 'T1PRE.nii.gz'))
21 | T1POST_file = save_data(T1POST, os.path.join(testing_directory, 'T1POST.nii.gz'))
22 |
23 | from deepneuro.pipelines.Segment_GBM.predict import predict_GBM
24 |
25 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
26 | os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_num)
27 | predict_GBM(testing_directory,
28 | T1POST=T1POST_file,
29 | FLAIR=FLAIR_file,
30 | T1PRE=T1PRE_file)
31 |
32 | rmtree(testing_directory)
33 |
34 | except:
35 | rmtree(testing_directory)
36 | raise
37 |
38 | return
39 |
40 |
41 | if __name__ == '__main__':
42 |
43 | pass
--------------------------------------------------------------------------------
/deepneuro/package_test/utilities_test.py:
--------------------------------------------------------------------------------
1 |
2 | def nifti_load_test():
3 |
4 | return
--------------------------------------------------------------------------------
/deepneuro/pipelines/Dockerfile_base:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:9.0-cudnn7-devel-ubuntu16.04
2 | LABEL maintainer "Andrew Beers "
3 |
4 | ARG TENSORFLOW_VERSION=1.11.0
5 | ARG TENSORFLOW_ARCH=gpu
6 | ARG KERAS_VERSION=2.2.2
7 |
8 | # Install some dependencies
9 | # Install basic packages and miscellaneous dependencies
10 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
11 | liblapack-dev \
12 | libopenblas-dev \
13 | libzmq3-dev \
14 | python3 \
15 | python3-dev \
16 | python3-pip \
17 | python3-setuptools \
18 | python3-tk
19 |
20 | # Install Pillow (PIL) dependencies
21 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
22 | libfreetype6-dev \
23 | libjpeg-dev \
24 | liblcms2-dev \
25 | libopenjpeg-dev \
26 | libpng12-dev \
27 | libtiff5-dev \
28 | libwebp-dev \
29 | zlib1g-dev
30 |
31 | # Install support functions
32 | RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y \
33 | curl \
34 | git \
35 | wget \
36 | cmake
37 |
38 | # Cleanup Installs
39 | RUN apt-get clean && \
40 | apt-get autoremove && \
41 | rm -rf /var/lib/apt/lists/* && \
42 | # Link BLAS library to use OpenBLAS using the alternatives mechanism (https://www.scipy.org/scipylib/building/linux.html#debian-ubuntu)
43 | update-alternatives --set libblas.so.3 /usr/lib/openblas-base/libblas.so.3
44 |
45 | # Install pip
46 | RUN pip3 install --upgrade \
47 | setuptools \
48 | pip
49 |
50 | # Add SNI support to Python
51 | RUN pip3 --no-cache-dir install \
52 | pyopenssl \
53 | ndg-httpsclient \
54 | pyasn1
55 |
56 | # Install other useful Python packages using pip
57 | RUN pip3 --no-cache-dir install --upgrade ipython && \
58 | pip3 --no-cache-dir install \
59 | Cython \
60 | ipykernel \
61 | jupyter \
62 | path.py \
63 | Pillow \
64 | pygments \
65 | six \
66 | sphinx \
67 | wheel \
68 | zmq \
69 | && \
70 | python3 -m ipykernel.kernelspec
71 |
72 | # Install TensorFlow
73 | # For specific installations -- TODO, peg a version of Tensorflow.
74 | # RUN pip --no-cache-dir install \
75 | # https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
76 | # Generic.
77 | RUN pip3 --no-cache-dir install tensorflow-gpu
78 |
79 | # Install Keras
80 | RUN pip --no-cache-dir install git+git://github.com/fchollet/keras.git@${KERAS_VERSION}
81 |
82 | # Install Additional Packages for DeepNeuro
83 | RUN apt-get update -y
84 | RUN apt-get install graphviz -y
85 | RUN pip3 --no-cache-dir install pydot
86 | RUN pip3 --no-cache-dir install pandas --upgrade
87 | RUN pip3 --no-cache-dir install numexpr --upgrade
88 | RUN pip3 --no-cache-dir install nibabel pydicom lycon tqdm pynrrd tables imageio matplotlib
89 |
90 | # Install Slicer
91 | RUN SLICER_URL="http://download.slicer.org/bitstream/561384" && \
92 | curl -v -s -L $SLICER_URL | tar xz -C /tmp && \
93 | mv /tmp/Slicer* /opt/slicer
94 |
95 | # Install ANTS
96 | WORKDIR /home
97 | RUN wget "https://github.com/stnava/ANTs/releases/download/v2.1.0/Linux_Ubuntu14.04.tar.bz2" && \
98 | DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes bzip2 && \
99 | tar -C /usr/local -xjf Linux_Ubuntu14.04.tar.bz2 && \
100 | rm Linux_Ubuntu14.04.tar.bz2
101 |
102 | # Python 2.7
103 | WORKDIR /usr/src
104 | ENV PYTHON_VERSION 2.7.10
105 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
106 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
107 | cd Python-${PYTHON_VERSION} && \
108 | ./configure && \
109 | make -j$(grep -c processor /proc/cpuinfo) && \
110 | make install && \
111 | cd .. && rm -rf Python-${PYTHON_VERSION}*
112 |
113 | # Build and install dcmqi
114 |
115 | WORKDIR /usr/src
116 | ENV PYTHON_VERSION 2.7.10
117 | RUN wget https://www.python.org/ftp/python/${PYTHON_VERSION}/Python-${PYTHON_VERSION}.tgz && \
118 | tar xvzf Python-${PYTHON_VERSION}.tgz && \
119 | cd Python-${PYTHON_VERSION} && \
120 | ./configure && \
121 | make -j$(grep -c processor /proc/cpuinfo) && \
122 | make install && \
123 | cd .. && rm -rf Python-${PYTHON_VERSION}*
124 |
125 | WORKDIR /usr/src
126 | RUN git clone https://github.com/QIICR/dcmqi.git && \
127 | mkdir dcmqi-superbuild && \
128 | cd dcmqi-superbuild && \
129 | cmake -DCMAKE_INSTALL_PREFIX=/usr ../dcmqi && \
130 | make -j$(grep -c processor /proc/cpuinfo)
131 |
132 | # Environmental Variables
133 | ENV PATH "$PATH:/opt/slicer"
134 | ENV PATH "$PATH:/usr/local/ANTs.2.1.0.Debian-Ubuntu_X64"
135 |
136 | # Install DeepNeuro. Scikit-image has installation problems with EasyInstall and setup.py
137 | RUN git clone https://github.com/QTIM-Lab/DeepNeuro /home/DeepNeuro
138 | WORKDIR /home/DeepNeuro
139 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM qtimlab/deepneuro:latest
2 | LABEL maintainer "Andrew Beers "
3 |
4 | # Copy in models -- do this with Python module in future.
5 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_Ischemic_Stroke
6 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_Ischemic_Stroke/Ischemic_Stroke_Model.h5 "https://www.dropbox.com/s/bnydjydg76xu22g/Ischemic_Stroke_Model.h5?dl=1"
7 |
8 | # Commands at startup.
9 | WORKDIR "/"
10 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
11 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
12 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/README.md:
--------------------------------------------------------------------------------
1 | # Ischemic_Stroke
2 |
3 | This module creates segmentations of ischemic stroke lesions given diffusion maps from DWI MR, and B0 maps as input volumes. These segmentations are created by deep neural networks trained on 991 patients from the Massachusetts General Hospital as part of the NIH-funded Heart-Brain Interactions in Human Acute Ischemic Stroke Study. The following pre-processing steps are included in module: Image Registration, and Zero-Mean normalization. This module was developed at the Quantitative Tumor Imaging Lab at the Martinos Center (MGH, MIT/Harvard HST).
4 |
5 | ## Table of Contents
6 | - [Docker Usage](#docker-usage)
7 | - [Python Docker Wrapper Usage](#python-docker-wrapper-usage)
8 | - [Docker Example](#docker-example)
9 | - [Citation](#citation)
10 |
11 | ## Docker Usage
12 |
13 | The best way to use this module is with a Docker container. If you are not familiar with Docker, you can download it [here](https://docs.docker.com/engine/installation/) and read a tutorial on proper usage [here](https://docker-curriculum.com/).
14 |
15 | Pull the Ischemic_Stroke Docker container from https://hub.docker.com/r/qtimlab/deepneuro_segment_ischemic_stroke/. Use the command "docker pull qtimlab/deepneuro_segment_ischemic_stroke".
16 |
17 | You can then enter a command using the following template to generate a segmentation:
18 |
19 | ```
20 | nvidia-docker run --rm -v [MOUNTED_DIRECTORY]:/INPUT_DATA qtimlab/deepneuro_segment_ischemic_stroke segment_ischemic_stroke pipeline -B0 -DWI -output_folder [-gpu_num -registered -preprocessed -save_all_steps -save_preprocessed -segmentation_output ]
21 | ```
22 |
23 | In order to use Docker, you must mount the directory containing all of your data and your output. All inputted filepaths must be relative to this mounted directory. For example, if you mounted the directory /home/my_users/data/, and wanted to input the file /home/my_users/data/patient_1/B0.nii.gz as a parameter, you should input /INPUT_DATA/patient_1/B0.nii.gz. Note that the Python wrapper for Docker in this module will adjust paths for you.
24 |
25 | A brief explanation of this function's parameters follows:
26 |
27 | | Parameter | Documenation |
28 | | ------------- |-------------|
29 | | -output_folder | A filepath to your output folder. Output segmentations will be placed here with filenames specified in -segmentation_output |
30 | | -B0, DWI | Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each. |
31 | | -segmentation_output | Optional. Name of output for enhancing tumor labels. Should not be a filepath, like '/home/user/segmentation.nii.gz', but just a name, like "segmentation.nii.gz" |
32 | | -gpu_num | Optional. Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu. |
33 | | -registered | If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step. |
34 | | -preprocessed | If flagged, data is assumed to already have been entirely preprocessed by DeepNeuro, including intensity normalization. Only use if data has been passed through DeepNeuro already to ensure proper performance. |
35 | | -save_all_steps | If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder. |
36 | | -save_preprocessed | If flagged, the final volumes after bias correction, resampling, and registration. |
37 |
38 | ## Python Docker Wrapper Usage
39 |
40 | To avoid adjusting your you may want to avoid using nvidia-docker directly. I've also created a python utility that wraps around the nvidia-docker command above, and is slightly easier to use. In order to use this utlity, you will need to clone this repository. ("git clone https://github.com/QTIM-Lab/DeepNeuro"), and install it ("python setup.py install", in the directory you cloned the repository).
41 |
42 | Once you have installed the repository, you can use the following command on the command-line:
43 |
44 | ```
45 | segment_ischemic_stroke docker_pipeline -B0 -DWI -output_folder [-gpu_num -registered -preprocessed -save_all_steps -save_preprocessed
46 | ```
47 |
48 | Parameters should be exactly the same as in the Docker use-case, except now you will not have to modify filepaths to be relative to the mounted folder.
49 |
50 | ## Docker Example
51 |
52 | Let's say you stored some DICOM data on your computer at the path /home/my_user/Data/, and wanted to segment data located at /home/my_user/Data/Patient_1. The nvidia-docker command would look like this:
53 |
54 | ```
55 | nvidia-docker run --rm -v /home/my_user/Data:/INPUT_DATA qtimlab/deepneuro_segment_ischemic_stroke segment_ischemic_stroke pipeline -B0 /INPUT_DATA/Patient_1/B0 -DWI /INPUT_DATA/Patient_1/DWI -output_folder /INPUT_DATA/Patient_1/Output_Folder
56 | ```
57 |
58 | First, note that the "/INPUT_DATA" designation on the right-hand side of the "-v" option will never change. "INPUT_DATA" is a folder within the Docker container that will not change between runs.
59 |
60 | Second, note that you will need to make sure that the left-hand side of the "-v" option is an absolute, rather than relative, path. For example "../Data/" and "~/Data/" will not work (relative path), but "/home/my_user/Data/" will work (absolute path, starting from the root directory).
61 |
62 | Third, note that the folders you provide as arguments to the "segment_ischemic_stroke pipeline" command should be relative paths. This is because you are mounting, and thus renaming, a folder on your system to the "/INPUT_DATA" folder inside the Docker system. For example, if you were mounting the directory "/home/my_user/Data/" to "/INPUT_DATA", you should not provide the path "/home/my_user/Data/Patient_1/B0" as a parameter. Rather, you should provide the path "/INPUT_DATA/Patient_1/B0", as those parts of the path are within the scope of your mounted directory.
63 |
64 | ## Citation
65 |
66 | Publication in preparation.
67 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/Singularity.deepneuro_segment_ischemic_stroke:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: qtimlab/deepneuro_segment_ischemic_stroke:latest
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Ischemic_Stroke/__init__.py
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/cli.py:
--------------------------------------------------------------------------------
1 | """ Command line interface for the Brain Metastases Segmentation
2 | module.
3 | """
4 |
5 | import argparse
6 | import sys
7 | import os
8 |
9 | from deepneuro.pipelines.shared import DeepNeuroCLI
10 |
11 |
12 | class Segment_Ischemic_Stroke_cli(DeepNeuroCLI):
13 |
14 | def load(self):
15 |
16 | self.command_name = 'segment_ischemic_stroke'
17 | self.docker_container = 'qtimlab/deepneuro_segment_ischmeic_stroke:latest'
18 | self.filepath_arguments = ['output_folder', 'DWI', 'B0', 'input_directory']
19 |
20 | super(Segment_Ischemic_Stroke_cli, self).load()
21 |
22 | def parse_args(self):
23 |
24 | parser = argparse.ArgumentParser(
25 | description='''segment_ischemic_stroke pipeline
26 |
27 | Create a binary ischemic-stroke mask.
28 |
29 | -output_folder: A filepath to your output folder, where segmentations and precussor files will be output.
30 | -DWI, B0: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
31 | -segmentation_output: Name of output for enhancing tumor segmentations. Should not be a filepath, like '/home/user/enhancing.nii.gz', but just a name, like "segmentation.nii.gz"
32 | -gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
33 | -registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
34 | -normalized: If flagged, data is assumed to have been preprocessed with with zero mean and unit variance with respect to a brain mask.
35 | -save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
36 | -save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
37 | ''')
38 |
39 | parser.add_argument('-output_folder', type=str)
40 | parser.add_argument('-DWI', type=str)
41 | parser.add_argument('-B0', type=str)
42 | parser.add_argument('-input_directory', type=str)
43 | parser.add_argument('-segmentation_output', nargs='?', type=str, const='segmentation.nii.gz', default='segmentation.nii.gz')
44 |
45 | parser.add_argument('-registered', action='store_true')
46 | parser.add_argument('-preprocessed', action='store_true')
47 |
48 | parser.add_argument('-gpu_num', nargs='?', const='0', default='0', type=str)
49 | parser.add_argument('-save_only_segmentations', action='store_true')
50 | parser.add_argument('-save_all_steps', action='store_true')
51 | parser.add_argument('-quiet', action='store_true')
52 | parser.add_argument('-output_probabilities', action='store_true')
53 | args = parser.parse_args(sys.argv[2:])
54 |
55 | return args
56 |
57 | def pipeline(self):
58 |
59 | args = self.parse_args()
60 |
61 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
62 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
63 |
64 | from deepneuro.pipelines.Ischemic_Stroke.predict import predict_ischemic_stroke
65 |
66 | predict_ischemic_stroke(args.output_folder, DWI=args.DWI, B0=args.B0, ground_truth=None, input_directory=args.input_directory, registered=args.registered, preprocessed=args.preprocessed, save_all_steps=args.save_all_steps, output_segmentation_filename=args.segmentation_output, quiet=args.quiet, save_only_segmentations=args.save_only_segmentations)
67 |
68 |
69 | def main():
70 | Segment_Ischemic_Stroke_cli()
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/predict.py:
--------------------------------------------------------------------------------
1 | from deepneuro.outputs import PatchesInference
2 | from deepneuro.preprocessing.preprocessor import DICOMConverter
3 | from deepneuro.preprocessing.signal import ZeroMeanNormalization
4 | from deepneuro.preprocessing.transform import Coregister
5 | from deepneuro.postprocessing.label import BinarizeLabel
6 | from deepneuro.pipelines.shared import load_data
7 | from deepneuro.models.model import load_model_with_output
8 | from deepneuro.utilities.util import docker_print
9 |
10 |
11 | def predict_ischemic_stroke(output_folder,
12 | B0,
13 | DWI,
14 | ground_truth=None,
15 | input_directory=None,
16 | registered=False,
17 | preprocessed=False,
18 | save_only_segmentations=False,
19 | save_all_steps=False,
20 | output_segmentation_filename='segmentation.nii.gz',
21 | input_data=None,
22 | registration_reference='FLAIR',
23 | quiet=False):
24 |
25 | verbose = not quiet
26 | save_preprocessed = not save_only_segmentations
27 | registration_reference_channel = 1
28 |
29 | #--------------------------------------------------------------------#
30 | # Step 1, Load Data
31 | #--------------------------------------------------------------------#
32 |
33 | data_collection = load_data(inputs=[B0, DWI], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)
34 |
35 | #--------------------------------------------------------------------#
36 | # Step 2, Load Models
37 | #--------------------------------------------------------------------#
38 |
39 | stroke_prediction_parameters = {'inputs': ['input_data'],
40 | 'output_directory': output_folder,
41 | 'output_filename': output_segmentation_filename,
42 | 'batch_size': 50,
43 | 'patch_overlaps': 6,
44 | 'output_patch_shape': (62, 62, 6, 1),
45 | 'case_in_filename': False,
46 | 'verbose': verbose}
47 |
48 | stroke_model = load_model_with_output(model_name='ischemic_stroke', outputs=[PatchesInference(**stroke_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='label')])
49 |
50 | #--------------------------------------------------------------------#
51 | # Step 3, Add Data Preprocessors
52 | #--------------------------------------------------------------------#
53 |
54 | if not preprocessed:
55 |
56 | preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
57 |
58 | if not registered:
59 | preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=registration_reference_channel)]
60 |
61 | if not preprocessed:
62 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]
63 |
64 | data_collection.append_preprocessor(preprocessing_steps)
65 |
66 | #--------------------------------------------------------------------#
67 | # Step 4, Run Inference
68 | #--------------------------------------------------------------------#
69 |
70 | if verbose:
71 | docker_print('Starting New Case...')
72 |
73 | docker_print('Ischemic Stroke Prediction')
74 | docker_print('======================')
75 |
76 | stroke_model.generate_outputs(data_collection, output_folder)
77 |
78 | data_collection.clear_preprocessor_outputs()
79 |
80 |
81 | if __name__ == '__main__':
82 |
83 | pass
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/resources/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Ischemic_Stroke/resources/icon.png
--------------------------------------------------------------------------------
/deepneuro/pipelines/Ischemic_Stroke/train.py:
--------------------------------------------------------------------------------
1 |
2 | import os
3 |
4 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
5 | os.environ["CUDA_VISIBLE_DEVICES"] = '3'
6 |
7 | from deepneuro.data.data_collection import DataCollection
8 | from deepneuro.augmentation.augment import Flip_Rotate_2D, ExtractPatches
9 | from deepneuro.models.unet import UNet
10 | from deepneuro.models.timenet import TimeNet
11 | from deepneuro.outputs.inference import ModelPatchesInference
12 | from deepneuro.models.model import load_old_model
13 |
14 | # Temporary
15 | from keras.utils import plot_model
16 | import glob
17 |
18 | def train_Segment_GBM(data_directory, val_data_directory):
19 |
20 | # Define input modalities to load.
21 | if True:
22 | training_modality_dict = {'input_modalities':
23 | ['*FLAIR_pp.*', '*T2_pp.*', '*T1_pp.*', '*T1post_pp.*'],
24 | 'ground_truth': ['*full_edemamask_pp.*']}
25 | else:
26 | training_modality_dict = {'input_modalities':
27 | [['*FLAIR_pp.*', 'FLAIR_norm2*'], ['*T1post_pp.*', 'T1post_norm2*']],
28 | 'ground_truth': ['*full_edemamask_pp.*', 'FLAIRmask-label.nii.gz']}
29 |
30 | load_data = True
31 | train_model = True
32 | load_test_data = True
33 | predict = True
34 |
35 | training_data = './wholetumor_predict_patches_test3.h5'
36 | model_file = 'wholetumor_segnet-58-0.38.h5'
37 | testing_data = './brats_test_case.h5'
38 |
39 | # Write the data to hdf5
40 | if (not os.path.exists(training_data) and train_model) or load_data:
41 |
42 | # Create a Data Collection
43 | training_data_collection = DataCollection(data_directory, modality_dict=training_modality_dict, verbose=True)
44 | training_data_collection.fill_data_groups()
45 |
46 | # Define patch sampling regions
47 | def brain_region(data):
48 | return (data['ground_truth'] != 1) & (data['input_modalities'] != 0)
49 | def roi_region(data):
50 | return data['ground_truth'] == 1
51 |
52 | # Add patch augmentation
53 | patch_augmentation = ExtractPatches(patch_shape=(32,32,32), patch_region_conditions=[[brain_region, 1]], data_groups=['input_modalities', 'ground_truth'])
54 | training_data_collection.append_augmentation(patch_augmentation, multiplier=200)
55 |
56 | # Add left-right flips
57 | flip_augmentation = Flip_Rotate_2D(flip=True, rotate=False, data_groups=['input_modalities', 'ground_truth'])
58 | training_data_collection.append_augmentation(flip_augmentation, multiplier=2)
59 |
60 | # Write data to hdf5
61 | training_data_collection.write_data_to_file(training_data)
62 |
63 | # Or load pre-loaded data.
64 | training_data_collection = DataCollection(data_storage=training_data, verbose=True)
65 | training_data_collection.fill_data_groups()
66 |
67 | # Define model parameters
68 | model_parameters = {'input_shape': (32, 32, 32, 4),
69 | 'downsize_filters_factor': 1,
70 | 'pool_size': (2, 2, 2),
71 | 'filter_shape': (3, 3, 3),
72 | 'dropout': 0,
73 | 'batch_norm': True,
74 | 'initial_learning_rate': 0.000001,
75 | 'output_type': 'binary_label',
76 | 'num_outputs': 1,
77 | 'activation': 'relu',
78 | 'padding': 'same',
79 | 'implementation': 'keras',
80 | 'depth': 4,
81 | 'max_filter': 512}
82 |
83 | # Create U-Net
84 | if train_model:
85 | unet_model = UNet(**model_parameters)
86 | plot_model(unet_model.model, to_file='model_image_dn.png', show_shapes=True)
87 | training_parameters = {'input_groups': ['input_modalities', 'ground_truth'],
88 | 'output_model_filepath': 'wholetumor_segnet-{epoch:02d}-{loss:.2f}.h5',
89 | 'training_batch_size': 2,
90 | 'num_epochs': 100,
91 | 'training_steps_per_epoch': 200,
92 | 'save_best_only': False}
93 | unet_model.train(training_data_collection, **training_parameters)
94 | else:
95 | unet_model = load_old_model(model_file)
96 |
97 | # Load testing data..
98 | if not os.path.exists(testing_data) or load_test_data:
99 | # Create a Data Collection
100 | testing_data_collection = DataCollection(val_data_directory, modality_dict=training_modality_dict, verbose=True)
101 | testing_data_collection.fill_data_groups()
102 | # Write data to hdf5
103 | testing_data_collection.write_data_to_file(testing_data)
104 |
105 | if predict:
106 | testing_data_collection = DataCollection(data_storage=testing_data, verbose=True)
107 | testing_data_collection.fill_data_groups()
108 |
109 | testing_parameters = {'inputs': ['input_modalities'],
110 | 'output_filename': 'deepneuro.nii.gz',
111 | 'batch_size': 200,
112 | 'patch_overlaps': 1}
113 |
114 | prediction = ModelPatchesInference(testing_data_collection, **testing_parameters)
115 |
116 | unet_model.append_output([prediction])
117 | unet_model.generate_outputs()
118 |
119 |
120 | if __name__ == '__main__':
121 |
122 | data_directory = ['~/BRATS2017/Train', '~/BRATS2017/Train']
123 | val_data_directory = ''
124 |
125 | train_Segment_GBM(data_directory, val_data_directory)
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM qtimlab/deepneuro:latest
2 | LABEL maintainer "Andrew Beers "
3 |
4 | # Copy in models -- do this with Python module in future.
5 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_Mets
6 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_Mets/Segment_Mets_Model.h5 "https://www.dropbox.com/s/p4xzes20g0fviye/Brain_Mets_Segmentation_Model.h5?dl=1"
7 |
8 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/SkullStripping
9 | RUN wget -O /home/DeepNeuro/deepneuro/load/SkullStripping/Skullstrip_MRI_Model.h5 "https://www.dropbox.com/s/cyddw4tt380u7j9/Skullstrip_MRI_Model.h5?dl=1"
10 |
11 | # Commands at startup.
12 | WORKDIR "/"
13 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
14 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
15 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/Singularity.deepneuro_segment_mets:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: qtimlab/deepneuro_segment_mets:latest
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Segment_Brain_Mets/__init__.py
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/cli.py:
--------------------------------------------------------------------------------
1 | """ Command line interface for the Brain Metastases Segmentation
2 | module.
3 | """
4 |
5 | import argparse
6 | import sys
7 | import os
8 |
9 | from deepneuro.pipelines.shared import DeepNeuroCLI
10 |
11 |
12 | class Segment_Mets_cli(DeepNeuroCLI):
13 |
14 | def load(self):
15 |
16 | self.command_name = 'segment_mets'
17 | self.docker_container = 'qtimlab/deepneuro_segment_mets:latest'
18 | self.filepath_arguments = ['output_folder', 'T1', 'T2', 'T1POST', 'FLAIR', 'input_directory']
19 |
20 | super(Segment_Mets_cli, self).load()
21 |
22 | def parse_args(self):
23 |
24 | parser = argparse.ArgumentParser(
25 | description='''segment_mets pipeline
26 |
27 | Segment an image from DICOMs with all preprocessing steps included.
28 |
29 | -output_folder: A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz"
30 | -T2, T1, -T1POST, -FLAIR: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
31 | -segmentation_output: Name of output for enhancing tumor segmentations. Should not be a filepath, like '/home/user/enhancing.nii.gz', but just a name, like "segmentation"
32 | -gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
33 | -debiased: If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step.
34 | -resampled: If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step.
35 | -registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
36 | -skullstripped: If flagged, data is assumed to have been already skull-stripped.
37 | -preprocessed: If flagged, data is assumed to have been preprocessed with with zero mean and unit variance with respect to a brain mask.
38 | -save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
39 | -save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
40 | ''')
41 |
42 | parser.add_argument('-output_folder', type=str)
43 | parser.add_argument('-T2', type=str)
44 | parser.add_argument('-T1', type=str)
45 | parser.add_argument('-T1POST', type=str)
46 | parser.add_argument('-FLAIR', type=str)
47 | parser.add_argument('-input_directory', type=str)
48 | parser.add_argument('-segmentation_output', nargs='?', type=str, const='segmentation.nii.gz', default='segmentation.nii.gz')
49 |
50 | parser.add_argument('-debiased', action='store_true')
51 | parser.add_argument('-registered', action='store_true')
52 | parser.add_argument('-skullstripped', action='store_true')
53 | parser.add_argument('-preprocessed', action='store_true')
54 |
55 | parser.add_argument('-gpu_num', nargs='?', const='0', default='0', type=str)
56 | parser.add_argument('-save_only_segmentations', action='store_true')
57 | parser.add_argument('-save_all_steps', action='store_true')
58 | parser.add_argument('-quiet', action='store_true')
59 | parser.add_argument('-output_probabilities', action='store_true')
60 | args = parser.parse_args(sys.argv[2:])
61 |
62 | return args
63 |
64 | def pipeline(self):
65 |
66 | args = self.parse_args()
67 |
68 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
69 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
70 |
71 | from deepneuro.pipelines.Segment_Brain_Mets.predict import predict_brain_mets
72 |
73 | predict_brain_mets(args.output_folder,
74 | T2=args.T2,
75 | T1POST=args.T1POST,
76 | T1PRE=args.T1,
77 | FLAIR=args.FLAIR,
78 | ground_truth=None,
79 | bias_corrected=args.debiased,
80 | registered=args.registered,
81 | skullstripped=args.skullstripped,
82 | preprocessed=args.preprocessed,
83 | save_only_segmentations=args.save_only_segmentations,
84 | output_probabilities=args.output_probabilities,
85 | save_all_steps=args.save_all_steps,
86 | output_segmentation_filename=args.segmentation_output,
87 | quiet=args.quiet)
88 |
89 |
90 | def main():
91 | Segment_Mets_cli()
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/predict.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | #--------------------------------------------------------------------#
4 | # Step 0, Import DeepNeuro Commands
5 | #--------------------------------------------------------------------#
6 |
7 | from deepneuro.outputs import PatchesInference
8 | from deepneuro.preprocessing import DICOMConverter, N4BiasCorrection, ZeroMeanNormalization, Coregister, SkullStrip_Model
9 | from deepneuro.postprocessing import BinarizeLabel, LargestComponents, FillHoles
10 | from deepneuro.pipelines.shared import load_data
11 | from deepneuro.models.model import load_model_with_output
12 | from deepneuro.utilities import docker_print
13 |
14 |
15 | def predict_brain_mets(output_folder,
16 | T2=None,
17 | T1POST=None,
18 | T1PRE=None,
19 | FLAIR=None,
20 | ground_truth=None,
21 | input_directory=None,
22 | bias_corrected=True,
23 | registered=False,
24 | skullstripped=False,
25 | preprocessed=False,
26 | output_segmentation_filename='segmentation.nii.gz',
27 | output_probabilities=False,
28 | quiet=False,
29 | input_data=None,
30 | save_only_segmentations=False,
31 | save_all_steps=False):
32 |
33 | verbose = not quiet
34 | save_preprocessed = not save_only_segmentations
35 |
36 | #--------------------------------------------------------------------#
37 | # Step 1, Load Data
38 | #--------------------------------------------------------------------#
39 |
40 | data_collection = load_data(inputs=[T1PRE, T1POST, T2, FLAIR], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)
41 |
42 | #--------------------------------------------------------------------#
43 | # Step 2, Load Models
44 | #--------------------------------------------------------------------#
45 |
46 | mets_prediction_parameters = {'inputs': ['input_data'],
47 | 'output_directory': output_folder,
48 | 'output_filename': output_segmentation_filename,
49 | 'batch_size': 50,
50 | 'patch_overlaps': 8,
51 | 'output_patch_shape': (28, 28, 28, 1),
52 | 'output_channels': [1],
53 | 'case_in_filename': False,
54 | 'verbose': verbose}
55 |
56 | mets_model = load_model_with_output(model_name='mets_enhancing', outputs=[PatchesInference(**mets_prediction_parameters)], postprocessors=[BinarizeLabel(postprocessor_string='label')], wcc_weights={0: 0.1, 1: 3.0})
57 |
58 | #--------------------------------------------------------------------#
59 | # Step 3, Add Data Preprocessors
60 | #--------------------------------------------------------------------#
61 |
62 | if not preprocessed:
63 |
64 | # Random hack to save DICOMs to niftis for further processing.
65 | preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
66 |
67 | if not skullstripped:
68 | skullstripping_prediction_parameters = {'inputs': ['input_data'],
69 | 'output_filename': os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
70 | 'batch_size': 50,
71 | 'patch_overlaps': 3,
72 | 'output_patch_shape': (56, 56, 6, 1),
73 | 'save_to_file': False,
74 | 'data_collection': data_collection}
75 |
76 | skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])
77 |
78 | if not bias_corrected:
79 | preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
80 |
81 | if not registered:
82 | preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=1)]
83 |
84 | if not skullstripped:
85 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
86 |
87 | preprocessing_steps += [SkullStrip_Model(data_groups=['input_data'], model=skullstripping_model, save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=[3, 1])]
88 |
89 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_preprocessor=preprocessing_steps[-1], preprocessor_string='_preprocessed')]
90 |
91 | else:
92 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]
93 |
94 | data_collection.append_preprocessor(preprocessing_steps)
95 |
96 | #--------------------------------------------------------------------#
97 | # Step 4, Run Inference
98 | #--------------------------------------------------------------------#
99 |
100 | if verbose:
101 | docker_print('Starting New Case...')
102 |
103 | docker_print('Enhancing Mets Prediction')
104 | docker_print('======================')
105 |
106 | mets_model.generate_outputs(data_collection, output_folder)
107 |
108 | data_collection.clear_preprocessor_outputs()
109 |
110 |
111 | if __name__ == '__main__':
112 |
113 | pass
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/resources/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Segment_Brain_Mets/resources/icon.png
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_Brain_Mets/train.py:
--------------------------------------------------------------------------------
1 | import os
2 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
3 | os.environ["CUDA_VISIBLE_DEVICES"] = '3'
4 |
5 | from deepneuro.data.data_collection import DataCollection
6 | from deepneuro.augmentation.augment import ExtractPatches
7 | from deepneuro.postprocessing.label import BinarizeLabel
8 | from deepneuro.preprocessing.signal import ZeroMeanNormalization
9 | from deepneuro.models.weighted_cat_cross_entropy import WeightedCategoricalCrossEntropy
10 |
11 | TrainingDataCollection = DataCollection(data_sources={'csv': 'Metastases_Data_Train.csv'})
12 | TestingDataCollection = DataCollection(data_sources={'csv': 'Metastases_Data_Test.csv'})
13 |
14 | Normalization = ZeroMeanNormalization(data_groups=['input_data'])
15 | TrainingDataCollection.append_preprocessor(Normalization)
16 |
17 | def BrainRegion(data):
18 | return data['input_data'] != 0
19 | def TumorRegion(data):
20 | return data['ground_truth'] == 1
21 |
22 | PatchAugmentation = ExtractPatches(patch_shape=(32, 32, 32),
23 | patch_region_conditions=[[BrainRegion, 0.70], [TumorRegion, 0.30]])
24 | TrainingDataCollection.append_augmentation(PatchAugmentation, multiplier=20)
25 | TrainingDataCollection.write_data_to_file('training_data.hdf5')
26 |
27 | ModelParameters = {'input_shape': (32, 32, 32, 1),
28 | 'cost_function': 'weighted_categorical_label',
29 | ''}
30 | UNETModel = UNet(**ModelParameters)
31 |
32 | TrainingParameters = {'output_model_filepath': 'unet_metastases.py',
33 | 'training_batch_size': 16,
34 | 'num_epochs': 50}
35 | UNETModel.train(TrainingDataCollection, **TrainingParameters)
36 |
37 | TestingParameters = {'inputs': ['input_data'],
38 | 'output_filename': '_segmentation.nii.gz',
39 | 'batch_size': 20,
40 | 'output_patch_shape': (30, 30, 30, 1),
41 | 'patch_overlaps': 4,
42 | 'output_directory': './Prediction_Outputs'}
43 | Prediction = ModelPatchesInference(**TestingParameters)
44 |
45 | LabelBinarization = BinarizeLabel(binarization_threshold=.5)
46 | ErrorStatistics = ErrorCalculation(output_log='inference_statistics.csv',
47 | cost_functions=['dice', 'cluster_accuracy'])
48 | Prediction.append_postprocessor([LabelBinarization, ErrorStatistics])
49 |
50 | UNETModel.append_output([Prediction])
51 | UNETModel.generate_outputs(TestingDataCollection)
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM qtimlab/deepneuro:latest
2 | LABEL maintainer "Andrew Beers "
3 |
4 | # Copy in models -- do this with Python module in future.
5 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_GBM
6 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/Segment_GBM_Wholetumor_Model.h5 "https://www.dropbox.com/s/0ovo3q9neky2zhy/Segment_GBM_Wholetumor_Model.h5?dl=1"
7 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/Segment_GBM_Enhancing_Model.h5 "https://www.dropbox.com/s/yz1gyld4ie6apjh/Segment_GBM_Enhancing_Model.h5?dl=1"
8 |
9 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/SkullStripping
10 | RUN wget -O /home/DeepNeuro/deepneuro/load/SkullStripping/Skullstrip_MRI_Model.h5 "https://www.dropbox.com/s/cyddw4tt380u7j9/Skullstrip_MRI_Model.h5?dl=1"
11 |
12 | # Commands at startup.
13 | WORKDIR "/"
14 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
15 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
16 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/Singularity.deepneuro_segment_gbm:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: qtimlab/deepneuro_segment_gbm:latest
3 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Segment_GBM/__init__.py
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 | import os
4 |
5 | from deepneuro.pipelines.shared import DeepNeuroCLI
6 |
7 |
8 | class Segment_GBM_cli(DeepNeuroCLI):
9 |
10 | def load(self):
11 |
12 | self.command_name = 'segment_gbm'
13 | self.docker_container = 'qtimlab/deepneuro_segment_gbm:latest'
14 | self.filepath_arguments = ['output_folder', 'T1', 'T1POST', 'FLAIR', 'input_directory']
15 |
16 | super(Segment_GBM_cli, self).load()
17 |
18 | def parse_args(self):
19 |
20 | parser = argparse.ArgumentParser(
21 | description='''segment_gbm pipeline
22 |
23 | Segment an image from DICOMs with all preprocessing steps included.
24 |
25 | -output_folder: A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz"
26 | -T2, -T1, -T1POST, -FLAIR: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
27 | -wholetumor_output, -enhancing_output: Name of output for wholetumor and enhancing labels, respectively. Should not be a filepath, like '/home/user/enhancing.nii.gz', but just a name, like "enhancing"
28 | -gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
29 | -debiased: If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step.
30 | -resampled: If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step.
31 | -registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
32 | -skullstripped: If flagged, data is assumed to have been already skull-stripped.
33 | -preprocessed: If flagged, data is assumed to have been preprocessed with with zero mean and unit variance with respect to a brain mask.
34 | -save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
35 | -save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
36 | ''')
37 |
38 | parser.add_argument('-output_folder', type=str)
39 | parser.add_argument('-T1', type=str)
40 | parser.add_argument('-T1POST', type=str)
41 | parser.add_argument('-FLAIR', type=str)
42 | parser.add_argument('-wholetumor_output', nargs='?', type=str, const='wholetumor.nii.gz', default='wholetumor.nii.gz')
43 | parser.add_argument('-enhancing_output', nargs='?', type=str, const='enhancing.nii.gz', default='enhancing.nii.gz')
44 |
45 | parser.add_argument('-debiased', action='store_true')
46 | parser.add_argument('-registered', action='store_true')
47 | parser.add_argument('-skullstripped', action='store_true')
48 | parser.add_argument('-preprocessed', action='store_true')
49 |
50 | parser.add_argument('-gpu_num', nargs='?', const='0', default='0', type=str)
51 | parser.add_argument('-save_only_segmentations', action='store_true')
52 | parser.add_argument('-save_all_steps', action='store_true')
53 | parser.add_argument('-quiet', action='store_true')
54 | parser.add_argument('-output_probabilities', action='store_true')
55 | args = parser.parse_args(sys.argv[2:])
56 |
57 | return args
58 |
59 | def pipeline(self):
60 |
61 | args = self.parse_args()
62 |
63 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
64 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
65 |
66 | from deepneuro.pipelines.Segment_GBM.predict import predict_GBM
67 |
68 | predict_GBM(args.output_folder,
69 | FLAIR=args.FLAIR,
70 | T1POST=args.T1POST,
71 | T1PRE=args.T1,
72 | ground_truth=None,
73 | bias_corrected=args.debiased,
74 | registered=args.registered,
75 | skullstripped=args.skullstripped,
76 | preprocessed=args.preprocessed,
77 | save_only_segmentations=args.save_only_segmentations,
78 | output_probabilities=args.output_probabilities,
79 | save_all_steps=args.save_all_steps,
80 | output_wholetumor_filename=args.wholetumor_output,
81 | output_enhancing_filename=args.enhancing_output,
82 | quiet=args.quiet)
83 |
84 |
85 | def main():
86 | Segment_GBM_cli()
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/predict.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | #--------------------------------------------------------------------#
4 | # Step 0, Import DeepNeuro Commands
5 | #--------------------------------------------------------------------#
6 |
7 | from deepneuro.outputs.segmentation import PatchesInference
8 | from deepneuro.preprocessing.preprocessor import DICOMConverter
9 | from deepneuro.preprocessing.signal import N4BiasCorrection, ZeroMeanNormalization
10 | from deepneuro.preprocessing.transform import Coregister
11 | from deepneuro.preprocessing.skullstrip import SkullStrip_Model
12 | from deepneuro.postprocessing.label import BinarizeLabel, LargestComponents, FillHoles
13 | from deepneuro.pipelines.shared import load_data
14 | from deepneuro.models.model import load_model_with_output
15 | from deepneuro.utilities.util import docker_print
16 |
17 |
18 | def predict_GBM(output_folder,
19 | T1POST=None,
20 | FLAIR=None,
21 | T1PRE=None,
22 | ground_truth=None,
23 | input_directory=None,
24 | bias_corrected=True,
25 | resampled=False,
26 | registered=False,
27 | skullstripped=False,
28 | preprocessed=False,
29 | save_only_segmentations=False,
30 | save_all_steps=False,
31 | output_wholetumor_filename='wholetumor_segmentation.nii.gz',
32 | output_enhancing_filename='enhancing_segmentation.nii.gz',
33 | output_probabilities=False,
34 | quiet=False,
35 | input_data=None,
36 | registration_reference='FLAIR'):
37 |
38 | verbose = not quiet
39 | save_preprocessed = not save_only_segmentations
40 |
41 | #--------------------------------------------------------------------#
42 | # Step 1, Load Data
43 | #--------------------------------------------------------------------#
44 |
45 | data_collection = load_data(inputs=[FLAIR, T1POST, T1PRE], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)
46 |
47 | #--------------------------------------------------------------------#
48 | # Step 2, Load Models and Postprocessors
49 | #--------------------------------------------------------------------#
50 |
51 | wholetumor_prediction_parameters = {'output_directory': output_folder,
52 | 'output_filename': output_wholetumor_filename,
53 | 'batch_size': 50,
54 | 'patch_overlaps': 6,
55 | 'output_patch_shape': (56, 56, 6, 1),
56 | 'case_in_filename': False,
57 | 'verbose': verbose}
58 |
59 | enhancing_prediction_parameters = {'output_directory': output_folder,
60 | 'output_filename': output_enhancing_filename,
61 | 'batch_size': 50,
62 | 'patch_overlaps': 6,
63 | 'output_patch_shape': (56, 56, 6, 1),
64 | 'case_in_filename': False,
65 | 'verbose': verbose}
66 |
67 | wholetumor_model = load_model_with_output(model_name='gbm_wholetumor_mri',
68 | outputs=[PatchesInference(**wholetumor_prediction_parameters)],
69 | postprocessors=[BinarizeLabel(postprocessor_string='label')])
70 |
71 | enhancing_model = load_model_with_output(model_name='gbm_enhancingtumor_mri',
72 | outputs=[PatchesInference(**enhancing_prediction_parameters)],
73 | postprocessors=[BinarizeLabel(postprocessor_string='label')])
74 |
75 | #--------------------------------------------------------------------#
76 | # Step 3, Add Data Preprocessors
77 | #--------------------------------------------------------------------#
78 |
79 | if not preprocessed:
80 |
81 | preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
82 |
83 | if not bias_corrected:
84 | preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
85 |
86 | if not registered:
87 | preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=0)]
88 |
89 | if not skullstripped:
90 |
91 | skullstripping_prediction_parameters = {'inputs': ['input_data'],
92 | 'output_filename': os.path.join(output_folder, 'skullstrip_mask.nii.gz'),
93 | 'batch_size': 50,
94 | 'patch_overlaps': 3,
95 | 'output_patch_shape': (56, 56, 6, 1),
96 | 'save_to_file': False,
97 | 'data_collection': data_collection}
98 |
99 | skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents()])
100 |
101 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
102 |
103 | preprocessing_steps += [SkullStrip_Model(data_groups=['input_data'], model=skullstripping_model, save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=[0, 1])]
104 |
105 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_preprocessor=preprocessing_steps[-1], preprocessor_string='_preprocessed')]
106 |
107 | else:
108 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder, mask_zeros=True, preprocessor_string='_preprocessed')]
109 |
110 | data_collection.append_preprocessor(preprocessing_steps)
111 |
112 | #--------------------------------------------------------------------#
113 | # Step 4, Run Inference
114 | #--------------------------------------------------------------------#
115 |
116 | if verbose:
117 | docker_print('Starting New Case...')
118 |
119 | docker_print('Whole Tumor Prediction')
120 | docker_print('======================')
121 |
122 | wholetumor_file = wholetumor_model.generate_outputs(data_collection, output_folder)[0]['filenames'][-1]
123 | data_collection.add_channel(output_folder, wholetumor_file)
124 |
125 | if verbose:
126 | docker_print('Enhancing Tumor Prediction')
127 | docker_print('======================')
128 |
129 | enhancing_model.generate_outputs(data_collection, output_folder)
130 |
131 | data_collection.clear_preprocessor_outputs()
132 |
133 |
134 | if __name__ == '__main__':
135 |
136 | pass
137 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/resources/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Segment_GBM/resources/icon.png
--------------------------------------------------------------------------------
/deepneuro/pipelines/Segment_GBM/template.py:
--------------------------------------------------------------------------------
1 | Template_Dictionary = {
2 |
3 | "Module Name": "Segment_GBM",
4 | "Docker Name": "deepneuro_segment_gbm",
5 | "Command Line Name": "segment_gbm"
6 | 'Module Description': """
7 | """
8 |
9 |
10 |
11 | }
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM qtimlab/deepneuro:latest
2 | LABEL maintainer "Andrew Beers "
3 |
4 | # Copy in models -- do this with Python module in future.
5 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/SkullStripping
6 | RUN wget -O /home/DeepNeuro/deepneuro/load/SkullStripping/Skullstrip_MRI_Model.h5 "https://www.dropbox.com/s/cyddw4tt380u7j9/Skullstrip_MRI_Model.h5?dl=1"
7 |
8 | # Commands at startup.
9 | WORKDIR "/"
10 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh
11 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]
12 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/README.md:
--------------------------------------------------------------------------------
1 | # Skull_Stripping
2 |
3 | This module creates segmentations of brain masks given pre-contrast T1 and FLAIR input volumes. These segmentations are created by deep neural networks trained on hundreds of public and private datasets of pre-operative high- and low-grade GBMs. The following pre-processing steps are included in module: N4Bias Correction, Isotropic Resampling (1x1x1), Image Registration, and Zero-Mean normalization. This module was developed at the Quantitative Tumor Imaging Lab at the Martinos Center (MGH, MIT/Harvard HST).
4 |
5 | ## Table of Contents
6 | - [Docker Usage](#docker-usage)
7 | - [Python Docker Wrapper Usage](#python-docker-wrapper-usage)
8 | - [Docker Example](#docker-example)
9 | - [Citation](#citation)
10 |
11 | ## Docker Usage
12 |
13 | The best way to use this module is with a Docker container. If you are not familiar with Docker, you can download it [here](https://docs.docker.com/engine/installation/) and read a tutorial on proper usage [here](https://docker-curriculum.com/).
14 |
15 | Pull the Skull_Stripping Docker container from https://hub.docker.com/r/qtimlab/deepneuro_skullstripping/. Use the command "docker pull qtimlab/deepneuro_skullstripping".
16 |
17 | You can then create a command using the following template to create a glioblastoma segmentation:
18 |
19 | ```
20 | nvidia-docker run --rm -v [MOUNTED_DIRECTORY]:/INPUT_DATA qtimlab/deepneuro_skullstripping skull_stripping pipeline -T1POST -FLAIR -output_folder [-gpu_num -debiased -resampled -registered -save_all_steps -save_preprocessed]
21 | ```
22 |
23 | In order to use Docker, you must mount the directory containing all of your data and your output. All inputted filepaths must be relative to this mounted directory. For example, if you mounted the directory /home/my_users/data/, and wanted to input the file /home/my_users/data/patient_1/FLAIR.nii.gz as a parameter, you should input /INPUT_DATA/patient_1/FLAIR.nii.gz. Note that the Python wrapper for Docker in this module will adjust paths for you.
24 |
25 | A brief explanation of this functions parameters follows:
26 |
27 | | Parameter | Documenation |
28 | | ------------- |-------------|
29 | | -output_folder | A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz" |
30 | | -T1POST, -FLAIR | Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each. |
31 | | -mask_output | Optional. Name of output for skullstripping mask. Should not be a filepath, like '/home/user/mask.nii.gz', but just a name, like "mask.nii.gz" |
32 | | -gpu_num | Optional. Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu. |
33 | | -debiased | If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step. |
34 | | -resampled | If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step. |
35 | | -registered | If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step. |
36 | | -preprocessed | If flagged, data is assumed to already have been entirely preprocessed by DeepNeuro, including intensity normalization. Only use if data has been passed through DeepNeuro already to ensure proper performance. |
37 | | -save_all_steps | If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder. |
38 | | -save_preprocessed | If flagged, the final volumes after bias correction, resampling, and registration. |
39 |
40 | ## Python Docker Wrapper Usage
41 |
42 | To avoid adjusting your you may want to avoid using nvidia-docker directly. I've also created a python utility that wraps around the nvidia-docker command above, and is slightly easier to use. In order to use this utlity, you will need to clone this repository. ("git clone https://github.com/QTIM-Lab/DeepNeuro"), and install it ("python setup.py install", in the directory you cloned the repository).
43 |
44 | Once you have installed the repository, you can use the following command on the command-line:
45 |
46 | ```
47 | skull_stripping docker_pipeline -T1POST -FLAIR -output_folder [-gpu_num -bias -resampled -registered -save_all_steps -save_preprocessed
48 | ```
49 |
50 | Parameters should be exactly the same as in the Docker use-case, except now you will not have to modify filepaths to be relative to the mounted folder.
51 |
52 | ## Docker Example
53 |
54 | Let's say you stored some DICOM data on your computer at the path /home/my_user/Data/, and wanted to segment data located at /home/my_user/Data/Patient_1. The nvidia-docker command would look like this:
55 |
56 | ```
57 | nvidia-docker run --rm -v /home/my_user/Data:/INPUT_DATA qtimlab/deepneuro_skullstripping skull_stripping pipeline -T1POST /INPUT_DATA/Patient_1/T1post -FLAIR /INPUT_DATA/Patient_1/FLAIR -output_folder /INPUT_DATA/Patient_1/Output_Folder
58 | ```
59 |
60 | First, note that the "/INPUT_DATA" designation on the right-hand side of the "-v" option will never change. "INPUT_DATA" is a folder within the Docker container that will not change between runs.
61 |
62 | Second, note that you will need to make sure that the left-hand side of the "-v" option is an absolute, rather than relative, path. For example "../Data/" and "~/Data/" will not work (relative path), but "/home/my_user/Data/" will work (absolute path, starting from the root directory).
63 |
64 | Third, note that the folders you provide as arguments to the "skull_stripping pipeline" command should be relative paths. This is because you are mounting, and thus renaming, a folder on your system to the "/INPUT_DATA" folder inside the Docker system. For example, if you were mounting the directory "/home/my_user/Data/" to "/INPUT_DATA", you should not provide the path "/home/my_user/Data/Patient_1/FLAIR" as a parameter. Rather, you should provide the path "/INPUT_DATA/Patient_1/FLAIR", as those parts of the path are within the scope of your mounted directory.
65 |
66 | ## Citation
67 |
68 | Publication in preparation.
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/Singularity.deepneuro_skullstripping:
--------------------------------------------------------------------------------
1 | Bootstrap: docker
2 | From: qtimlab/deepneuro_skullstripping:latest
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Skull_Stripping/__init__.py
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/cli.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 | import os
4 |
5 | from deepneuro.pipelines.shared import DeepNeuroCLI
6 |
7 |
8 | class Skull_Stripping_cli(DeepNeuroCLI):
9 |
10 | def load(self):
11 |
12 | self.command_name = 'skull_stripping'
13 | self.docker_container = 'qtimlab/deepneuro_skull_strip:latest'
14 | self.filepath_arguments = ['output_folder', 'T1POST', 'FLAIR', 'input_directory']
15 |
16 | super(Skull_Stripping_cli, self).load()
17 |
18 | def parse_args(self):
19 |
20 | parser = argparse.ArgumentParser(
21 | description='''skull_strip pipeline [-gpu_num -niftis -nobias -preprocessed -keep_outputs]
22 |
23 | Segment an image from DICOMs with all preprocessing steps included.
24 |
25 | -output_folder: A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz"
26 | -T1POST, -FLAIR: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
27 | -mask_output: Name of output for your binary skull mask. Should not be a filepath, like '/home/user/enhancing.nii.gz', but just a name, like "enhancing"
28 | -gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
29 | -debiased: If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step.
30 | -resampled: If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step.
31 | -registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
32 | -save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
33 | -save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
34 | ''')
35 |
36 | parser.add_argument('-output_folder', type=str)
37 | parser.add_argument('-T1POST', type=str)
38 | parser.add_argument('-FLAIR', type=str)
39 | parser.add_argument('-input_directory', type=str)
40 | parser.add_argument('-segmentation_output', nargs='?', type=str, const='segmentation.nii.gz', default='segmentation.nii.gz')
41 |
42 | parser.add_argument('-debiased', action='store_true')
43 | parser.add_argument('-registered', action='store_true')
44 | parser.add_argument('-preprocessed', action='store_true')
45 |
46 | parser.add_argument('-gpu_num', nargs='?', const='0', default='0', type=str)
47 | parser.add_argument('-save_only_segmentations', action='store_true')
48 | parser.add_argument('-save_all_steps', action='store_true')
49 | parser.add_argument('-quiet', action='store_true')
50 | parser.add_argument('-output_probabilities', action='store_true')
51 | args = parser.parse_args(sys.argv[2:])
52 |
53 | return args
54 |
55 | def pipeline(self):
56 |
57 | args = self.parse_args()
58 |
59 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
60 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
61 |
62 | from deepneuro.pipelines.Skull_Stripping.predict import skull_strip
63 |
64 | skull_strip(output_folder=args.output_folder, T1POST=args.T1POST, FLAIR=args.FLAIR, ground_truth=None, input_directory=args.input_directory, bias_corrected=args.debiased, registered=args.registered, preprocessed=args.preprocessed, save_only_segmentations=args.save_only_segmentations, save_all_steps=args.save_all_steps, output_segmentation_filename=args.segmentation_output, quiet=args.quiet)
65 |
66 |
67 | def main():
68 | Skull_Stripping_cli()
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/predict.py:
--------------------------------------------------------------------------------
1 | #--------------------------------------------------------------------#
2 | # Step 0, Import DeepNeuro Commands
3 | #--------------------------------------------------------------------#
4 |
5 | from deepneuro.outputs import PatchesInference
6 | from deepneuro.preprocessing import DICOMConverter, N4BiasCorrection, ZeroMeanNormalization, Coregister
7 | from deepneuro.postprocessing import BinarizeLabel, LargestComponents, FillHoles
8 | from deepneuro.pipelines.shared import load_data
9 | from deepneuro.models.model import load_model_with_output
10 | from deepneuro.utilities import docker_print
11 |
12 |
13 | def skull_strip(output_folder,
14 | T1POST=None,
15 | FLAIR=None,
16 | ground_truth=None,
17 | input_directory=None,
18 | bias_corrected=True,
19 | registered=False,
20 | preprocessed=False,
21 | output_segmentation_filename='segmentation.nii.gz',
22 | output_probabilities=False,
23 | quiet=False,
24 | input_data=None,
25 | save_only_segmentations=False,
26 | save_all_steps=False):
27 |
28 | verbose = not quiet
29 | save_preprocessed = not save_only_segmentations
30 |
31 | #--------------------------------------------------------------------#
32 | # Step 1, Load Data
33 | #--------------------------------------------------------------------#
34 |
35 | data_collection = load_data(inputs=[FLAIR, T1POST], output_folder=output_folder, input_directory=input_directory, ground_truth=ground_truth, input_data=input_data, verbose=verbose)
36 |
37 | #--------------------------------------------------------------------#
38 | # Step 2, Load Models
39 | #--------------------------------------------------------------------#
40 |
41 | skullstripping_prediction_parameters = {'inputs': ['input_data'],
42 | 'output_directory': output_folder,
43 | 'output_filename': output_segmentation_filename,
44 | 'batch_size': 50,
45 | 'patch_overlaps': 6,
46 | 'output_patch_shape': (56, 56, 6, 1),
47 | 'case_in_filename': False,
48 | 'verbose': verbose}
49 |
50 | skullstripping_model = load_model_with_output(model_name='skullstrip_mri', outputs=[PatchesInference(**skullstripping_prediction_parameters)], postprocessors=[BinarizeLabel(), FillHoles(), LargestComponents(postprocessor_string='label')])
51 |
52 | #--------------------------------------------------------------------#
53 | # Step 3, Add Data Preprocessors
54 | #--------------------------------------------------------------------#
55 |
56 | if not preprocessed:
57 |
58 | # Random hack to save DICOMs to niftis for further processing.
59 | preprocessing_steps = [DICOMConverter(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
60 |
61 | if not bias_corrected:
62 | preprocessing_steps += [N4BiasCorrection(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder)]
63 |
64 | if not registered:
65 | preprocessing_steps += [Coregister(data_groups=['input_data'], save_output=save_all_steps, verbose=verbose, output_folder=output_folder, reference_channel=0)]
66 |
67 | preprocessing_steps += [ZeroMeanNormalization(data_groups=['input_data'], save_output=save_preprocessed, verbose=verbose, output_folder=output_folder)]
68 |
69 | data_collection.append_preprocessor(preprocessing_steps)
70 |
71 | #--------------------------------------------------------------------#
72 | # Step 4, Run Inference
73 | #--------------------------------------------------------------------#
74 |
75 | if verbose:
76 | docker_print('Starting New Case...')
77 |
78 | docker_print('Skullstripping Prediction')
79 | docker_print('======================')
80 |
81 | skullstripping_model.generate_outputs(data_collection, output_folder)
82 |
83 | data_collection.clear_preprocessor_outputs()
84 |
85 |
86 | if __name__ == '__main__':
87 |
88 | pass
--------------------------------------------------------------------------------
/deepneuro/pipelines/Skull_Stripping/resources/icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/Skull_Stripping/resources/icon.png
--------------------------------------------------------------------------------
/deepneuro/pipelines/__init__.py:
--------------------------------------------------------------------------------
1 | from .Ischemic_Stroke.predict import *
2 | from .Skull_Stripping.predict import *
3 | from .Segment_GBM.predict import *
4 | from .Segment_Brain_Mets.predict import *
--------------------------------------------------------------------------------
/deepneuro/pipelines/cli/core.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/cli/core.py
--------------------------------------------------------------------------------
/deepneuro/pipelines/echo_count.txt:
--------------------------------------------------------------------------------
1 | 100
--------------------------------------------------------------------------------
/deepneuro/pipelines/sample_template.yml:
--------------------------------------------------------------------------------
1 | ### NOT YET IMPLEMENTED ###
2 |
3 | ### Input Preferences ###
4 |
5 | Inputs:
6 |
7 | input_data:
8 | 0:
9 | FLAIR
10 | 1:
11 | T1POST
12 | 2:
13 | T1PRE
14 |
15 | ground_truth:
16 | 0:
17 | ground_truth
18 |
19 | ### Model Preferences ###
20 |
21 | Models:
22 |
23 | wholetumor_seg:
24 |
25 | model_name: filler
26 | output_parameters:
27 | example: filler
28 |
29 | enhancing_seg:
30 |
31 | model_name: filler
32 | output_parameters:
33 | example: filler
34 |
35 | skullstripping:
36 |
37 | model_name: filler
38 | output_parameters:
39 | example: filler
40 |
41 | ### Preprocessor Preferences ###
42 |
43 | ### PostProcessor Preferences ###
44 |
45 | ### Output Preferences ###
46 |
47 | ### Documentation Preferences ###
48 |
49 | Icon_Path: ~/filler
50 |
51 |
52 |
--------------------------------------------------------------------------------
/deepneuro/pipelines/shared.py:
--------------------------------------------------------------------------------
1 | """
2 | """
3 |
4 | import os
5 | import sys
6 | import argparse
7 |
8 | from shutil import copy
9 |
10 | from deepneuro.data.data_collection import DataCollection
11 | from deepneuro.container.container_cli import nvidia_docker_wrapper
12 |
13 |
14 | class DeepNeuroCLI(object):
15 |
16 | def __init__(self):
17 |
18 | self.command_name = 'deepneuro_module'
19 | self.docker_container = 'qtimlab/deepneuro:latest'
20 | self.filepath_arguments = []
21 |
22 | self.load()
23 |
24 | def load(self):
25 |
26 | parser = argparse.ArgumentParser(
27 | description='A number of pre-packaged commands used by the Quantiative Tumor Imaging Lab at the Martinos Center',
28 | usage='''{} []
29 |
30 | The following commands are available:
31 | pipeline Run the entire model pipeline, with options to leave certain pre-processing steps out.
32 | docker_pipeline Run the previous command via a Docker container via nvidia-docker.
33 |
34 | |Not Implemented|
35 | server Creates a DeepNeuro server that can process DeepNeuro jobs remotely.
36 | explorer Creates a graphical user interface for this DeepNeuro module.
37 | '''.format(self.command_name))
38 |
39 | parser.add_argument('command', help='Subcommand to run')
40 | args = parser.parse_args(sys.argv[1:2])
41 |
42 | if not hasattr(self, args.command):
43 | print('Sorry, that\'s not one of the commands.')
44 | parser.print_help()
45 | exit(1)
46 |
47 | # use dispatch pattern to invoke method with same name
48 | getattr(self, args.command)()
49 |
50 | def docker_pipeline(self):
51 |
52 | args = self.parse_args()
53 |
54 | nvidia_docker_wrapper([self.command_name, 'pipeline'], vars(args), self.filepath_arguments, docker_container=self.docker_container)
55 |
56 |
57 | def load_data(inputs, output_folder, input_directory=None, ground_truth=None, input_data=None, verbose=True):
58 |
59 | """ A convenience function when building single-input pipelines. This function
60 | quickly builds DataCollections
61 | """
62 |
63 | if any(data is None for data in inputs):
64 | raise ValueError("Cannot run pipeline; required inputs are missing. Please consult this module's documentation, and make sure all required parameters are input.")
65 |
66 | inputs = [os.path.abspath(input_filename) for input_filename in inputs]
67 | output_folder = os.path.abspath(output_folder)
68 |
69 | input_data = {'input_data': inputs}
70 |
71 | if ground_truth is not None:
72 | input_data['ground_truth'] = [ground_truth]
73 |
74 | if input_directory is None:
75 |
76 | if any(data is None for data in input_data):
77 | raise ValueError("Cannot run pipeline; required inputs are missing. Please consult this module's documentation, and make sure all required parameters are input.")
78 |
79 | data_collection = DataCollection(verbose=verbose)
80 | data_collection.add_case(input_data, case_name=output_folder)
81 |
82 | else:
83 | data_collection = DataCollection(input_directory, data_group_dict=input_data, verbose=verbose)
84 |
85 | if not os.path.exists(output_folder):
86 | os.makedirs(output_folder)
87 |
88 | if verbose:
89 | print('File loading completed.')
90 |
91 | return data_collection
92 |
93 |
94 | def create_Dockerfile(output_directory, models_included=None, module_name=None, deepneuro_branch='master'):
95 |
96 | current_dir = os.path.realpath(os.path.dirname(__file__))
97 | base_Dockerfile = os.path.join(current_dir, 'Dockerfile_base')
98 | new_Dockerfile = os.path.join(output_directory, 'Dockerfile')
99 |
100 | copy(base_Dockerfile, new_Dockerfile)
101 |
102 | echo_count = os.path.join(current_dir, 'echo_count.txt')
103 | with open(echo_count, 'r') as myfile:
104 | echo_count = myfile.read()
105 |
106 | with open(new_Dockerfile, "a") as writefile:
107 |
108 | if models_included is not None:
109 |
110 | if module_name is None:
111 | raise ValueError("If you are including models in your container, please include the module_name parameter.")
112 |
113 | writefile.write("RUN mkdir -p /home/DeepNeuro/deepneuro/load/{}\n".format(module_name))
114 |
115 | for key, value in models_included.items():
116 | writefile.write("""RUN wget -O /home/DeepNeuro/deepneuro/load/{}/{}.h5 {}""".format(module_name, key, value))
117 |
118 | writefile.write("""
119 | RUN echo {} \n
120 | RUN git pull \n
121 | RUN python3 /home/DeepNeuro/setup.py develop \n
122 | \n
123 | # Commands at startup. \n
124 | WORKDIR "/" \n
125 | RUN chmod 777 /home/DeepNeuro/entrypoint.sh \n
126 | ENTRYPOINT ["/home/DeepNeuro/entrypoint.sh"]""".format(echo_count))
127 |
128 | return new_Dockerfile
129 |
130 |
131 | def create_Singularity(output_directory, docker_name):
132 |
133 | output_singularity = os.path.join(output_directory, 'Singularity.' + docker_name)
134 |
135 | with open("output_singularity", "w") as writefile:
136 | writefile.write("Bootstrap: docker\n")
137 | writefile.write("From: qtimlab/{}:latest".format(docker_name))
138 |
139 | return output_singularity
140 |
141 |
142 | def upload_icon(output_directory, icon_filepath):
143 |
144 | resources_directory = os.path.join(output_directory, 'resources')
145 |
146 | if not os.path.exists(resources_directory):
147 | os.mkdir(os.path.join(output_directory, 'resources'))
148 |
149 | copy(icon_filepath, os.path.join(resources_directory, 'icon.png'))
--------------------------------------------------------------------------------
/deepneuro/pipelines/template.py:
--------------------------------------------------------------------------------
1 | """ Not implemented. Reads YAML files to create DeepNeuro pipelines.
2 | """
3 |
4 | import yaml
5 |
6 |
7 | def parse_template(input_file):
8 |
9 | template = yaml.load(open(input_file))
10 |
11 | for key, label in list(template.items()):
12 | print(key, label)
13 |
14 | ### Process Inputs ###
15 | print(template['Inputs'])
16 |
17 | return
18 |
19 |
20 | def create_cli_from_template():
21 |
22 | return
23 |
24 |
25 | def write_template_to_script():
26 |
27 | return
28 |
29 |
30 | if __name__ == '__main__':
31 |
32 | parse_template('sample_template.yml')
33 |
34 | pass
--------------------------------------------------------------------------------
/deepneuro/pipelines/templates/Dockerfile_template:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/templates/Dockerfile_template
--------------------------------------------------------------------------------
/deepneuro/pipelines/templates/README_template.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/pipelines/templates/README_template.md
--------------------------------------------------------------------------------
/deepneuro/pipelines/templates/cli_template.py:
--------------------------------------------------------------------------------
1 | import argparse
2 | import sys
3 | import os
4 |
5 | from deepneuro.docker.docker_cli import nvidia_docker_wrapper
6 |
7 |
8 | class {{cli_class_name}}(object):
9 |
10 | def __init__(self):
11 |
12 | parser = argparse.ArgumentParser(
13 | description='A number of pre-packaged commands used by the Quantiative Tumor Imaging Lab at the Martinos Center',
14 | usage='''{{cli_command_name}} []
15 |
16 | The following commands are available:
17 | pipeline Run the entire segmentation pipeline, with options to leave certain pre-processing steps out.
18 | docker_pipeline Run the previous command via a Docker container via nvidia-docker.
19 | ''')
20 |
21 | parser.add_argument('command', help='Subcommand to run')
22 | args = parser.parse_args(sys.argv[1:2])
23 |
24 | if not hasattr(self, args.command):
25 | print('Sorry, that\'s not one of the commands.')
26 | parser.print_help()
27 | exit(1)
28 |
29 | getattr(self, args.command)()
30 |
31 | def parse_args(self):
32 |
33 | parser = argparse.ArgumentParser(
34 | description=\
35 | {{method_description}}
36 | )
37 |
38 | {{arguments}}
39 |
40 | args = parser.parse_args(sys.argv[2:])
41 |
42 | return args
43 |
44 | def pipeline(self):
45 |
46 | args = self.parse_args()
47 |
48 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
49 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
50 |
51 | {{import_command}}
52 |
53 | {{function_call}}
54 |
55 | def docker_pipeline(self):
56 |
57 | args = self.parse_args()
58 |
59 | {{docker_call}}
60 |
61 |
62 | def main():
63 | {{cli_class_name}}()
--------------------------------------------------------------------------------
/deepneuro/postprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | from .label import BinarizeLabel, Rescale, LargestComponents, FillHoles
2 | from .classification import MaximumClassifier
3 | from .statistics import ErrorCalculation
--------------------------------------------------------------------------------
/deepneuro/postprocessing/classification.py:
--------------------------------------------------------------------------------
1 | """ A set of postprocessors constructed specifically for classification tasks.
2 | """
3 |
4 | import numpy as np
5 |
6 | from deepneuro.postprocessing.postprocessor import Postprocessor
7 | from deepneuro.utilities.util import add_parameter
8 |
9 |
10 | class MaximumClassifier(Postprocessor):
11 |
12 | def load(self, kwargs):
13 |
14 | # Naming parameter
15 | add_parameter(self, kwargs, 'name', 'MaxClass')
16 | add_parameter(self, kwargs, 'postprocessor_string', 'max_class')
17 |
18 | def postprocess(self, input_data, raw_data=None, casename=None):
19 |
20 | return np.argmax(input_data, axis=1)[..., None]
--------------------------------------------------------------------------------
/deepneuro/postprocessing/label.py:
--------------------------------------------------------------------------------
1 |
2 | import numpy as np
3 |
4 | from skimage.measure import label
5 | from scipy.ndimage.morphology import binary_fill_holes
6 |
7 | from deepneuro.postprocessing.postprocessor import Postprocessor
8 | from deepneuro.utilities.util import add_parameter
9 |
10 |
11 | class BinarizeLabel(Postprocessor):
12 |
13 | def load(self, kwargs):
14 |
15 | # Naming parameter
16 | add_parameter(self, kwargs, 'name', 'Binarization')
17 | add_parameter(self, kwargs, 'postprocessor_string', 'binarized')
18 |
19 | add_parameter(self, kwargs, 'binarization_threshold', 0.5)
20 |
21 | def postprocess(self, input_data, raw_data=None, casename=None):
22 |
23 | return (input_data > self.binarization_threshold).astype(float)
24 |
25 |
26 | class Rescale(Postprocessor):
27 |
28 | """ TODO: Merge with Normalization Preprocessor
29 | """
30 |
31 | def load(self, kwargs):
32 |
33 | # Naming parameter
34 | add_parameter(self, kwargs, 'name', 'Rescale')
35 | add_parameter(self, kwargs, 'postprocessor_string', 'rescaled')
36 |
37 | add_parameter(self, kwargs, 'input_intensity_range', None)
38 | add_parameter(self, kwargs, 'output_intensity_range', [0, 255])
39 |
40 | def postprocess(self, input_data, raw_data=None, casename=None):
41 |
42 | normalize_numpy = input_data.astype(float)
43 |
44 | if self.input_intensity_range is None:
45 | input_intensity_range = [np.min(normalize_numpy), np.max(normalize_numpy)]
46 | else:
47 | input_intensity_range = self.input_intensity_range
48 |
49 | if input_intensity_range[0] == input_intensity_range[1]:
50 | normalize_numpy[:] = self.output_intensity_range[0]
51 | print('Warning: normalization edge case. All array values are equal. Normalizing to minimum value.')
52 | else:
53 | normalize_numpy = ((self.output_intensity_range[1] - self.output_intensity_range[0]) * (normalize_numpy - input_intensity_range[0])) / (input_intensity_range[1] - input_intensity_range[0]) + self.output_intensity_range[0]
54 |
55 | if self.input_intensity_range is not None:
56 | normalize_numpy[normalize_numpy < self.output_intensity_range[0]] = self.output_intensity_range[0]
57 | normalize_numpy[normalize_numpy > self.output_intensity_range[1]] = self.output_intensity_range[1]
58 |
59 | return normalize_numpy
60 |
61 |
62 | class LargestComponents(Postprocessor):
63 |
64 | def load(self, kwargs):
65 |
66 | # Naming parameter
67 | add_parameter(self, kwargs, 'name', 'Binarization')
68 | add_parameter(self, kwargs, 'postprocessor_string', 'largest_components')
69 |
70 | add_parameter(self, kwargs, 'component_number', 1)
71 | add_parameter(self, kwargs, 'connectivity', 2)
72 |
73 | def postprocess(self, input_data, raw_data=None, casename=None):
74 |
75 | """ I rewrote Ken's script, but I think I made it worse... TODO: Rewrite again? For clarity.
76 | """
77 |
78 | for batch in range(input_data.shape[0]):
79 | for channel in range(input_data.shape[-1]):
80 | input_data[batch, ..., channel] = largest_components(input_data[batch, ..., channel], component_number=1, connectivity=self.connectivity)
81 |
82 | return input_data
83 |
84 |
85 | def largest_components(input_data, component_number=1, connectivity=2):
86 |
87 | connected_components = label(input_data, connectivity=connectivity)
88 | total_components = np.max(connected_components)
89 |
90 | component_sizes = []
91 | for i in range(1, total_components):
92 | component_sizes += [np.sum(connected_components == i)]
93 |
94 | component_rankings = np.argsort(np.array(component_sizes))
95 | component_rankings = component_rankings[:-component_number]
96 |
97 | # I think this would be slower than doing it with one fell swoop,
98 | # Perhaps with many or statements. Consider doing that.
99 | for i in component_rankings:
100 | input_data[connected_components == i + 1] = 0
101 |
102 | return input_data
103 |
104 |
105 | class FillHoles(Postprocessor):
106 |
107 | def load(self, kwargs):
108 |
109 | # Naming parameter
110 | add_parameter(self, kwargs, 'name', 'FillHoles')
111 | add_parameter(self, kwargs, 'postprocessor_string', 'holes_filled')
112 |
113 | # Hole-Filling Parameters
114 | add_parameter(self, kwargs, 'slice_dimension', -2) # Currently not operational
115 |
116 | def postprocess(self, input_data, raw_data=None, casename=None):
117 |
118 | """ Although I don't know, this seems a bit ineffecient. See if there's a better 3D hole-filler out there
119 | Or better yet, arbitrary dimension hole_filler.
120 | """
121 |
122 | for batch in range(input_data.shape[0]):
123 | for channel in range(input_data.shape[-1]):
124 | for slice_idx in range(input_data.shape[self.slice_dimension]):
125 | input_data[batch, ..., slice_idx, channel] = binary_fill_holes(input_data[batch, ..., slice_idx, channel]).astype(np.float)
126 |
127 | return input_data
128 |
--------------------------------------------------------------------------------
/deepneuro/postprocessing/postprocessor.py:
--------------------------------------------------------------------------------
1 | from deepneuro.utilities.util import add_parameter
2 |
3 |
4 | class Postprocessor(object):
5 |
6 | def __init__(self, **kwargs):
7 |
8 | # Default Variables
9 | add_parameter(self, kwargs, 'verbose', False)
10 | add_parameter(self, kwargs, 'raw_data', None)
11 | add_parameter(self, kwargs, 'ground_truth', 'ground_truth')
12 |
13 | # Naming Variables
14 | add_parameter(self, kwargs, 'name', 'Postprocesser')
15 | add_parameter(self, kwargs, 'postprocessor_string', 'postprocess')
16 |
17 | self.transform_output = True
18 |
19 | self.load(kwargs)
20 |
21 | def load(self, kwargs):
22 |
23 | return
24 |
25 | def execute(self, output, raw_data):
26 |
27 | postprocessed_objects = []
28 |
29 | # TODO: Return object syntax is broken / not implemented
30 | for return_object in output.return_objects:
31 |
32 | if self.verbose:
33 | print(('Postprocessing with...', self.name))
34 |
35 | # This piece of code has not yet been refactored.
36 | if self.ground_truth in list(raw_data.keys()):
37 | casename = raw_data['casename'][0]
38 | else:
39 | casename = None
40 |
41 | postprocessed_objects += [self.postprocess(return_object, raw_data=raw_data, casename=casename)]
42 |
43 | output.return_objects = postprocessed_objects
44 |
45 | def postprocess(self, input_data, raw_data=None, casename=None):
46 |
47 | return input_data
48 |
49 | def clear_outputs(self):
50 |
51 | return
52 |
53 | def close(self):
54 |
55 | return
--------------------------------------------------------------------------------
/deepneuro/postprocessing/signal.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/postprocessing/signal.py
--------------------------------------------------------------------------------
/deepneuro/postprocessing/statistics.py:
--------------------------------------------------------------------------------
1 | import csv
2 | import numpy as np
3 |
4 | from skimage.measure import label
5 |
6 | from deepneuro.postprocessing.postprocessor import Postprocessor
7 | from deepneuro.utilities.util import add_parameter
8 |
9 |
10 | class ErrorCalculation(Postprocessor):
11 |
12 | def load(self, kwargs):
13 |
14 | # Naming parameter
15 | add_parameter(self, kwargs, 'name', 'ErrorCalculation')
16 | add_parameter(self, kwargs, 'postprocessor_string', '')
17 |
18 | # Logging Parameters
19 | add_parameter(self, kwargs, 'output_log', 'outputs.csv')
20 | add_parameter(self, kwargs, 'cost_functions', ['dice'])
21 | add_parameter(self, kwargs, 'write_mode', 'w')
22 | add_parameter(self, kwargs, 'print_output', True)
23 |
24 | self.cost_function_dict = {
25 | 'dice': dice_cost_function,
26 | 'accuracy': accuracy_cost_function,
27 | 'cluster_accuracy': cluster_accuracy_cost_function
28 | }
29 |
30 | self.cost_function_label_dict = {
31 | 'dice': 'Dice Coeffecient',
32 | 'accuracy': 'Accuracy',
33 | 'cluster_accuracy': 'Cluster Accuracy'
34 | }
35 |
36 | self.transform_output = False
37 | self.csv_file = None
38 | # Not sure of the best method to close this file
39 |
40 | def postprocess(self, input_data, raw_data=None, casename=None):
41 |
42 | if self.csv_file is None:
43 | self.csv_file = open(self.output_log, self.write_mode)
44 | self.csv_writer = csv.writer(self.csv_file)
45 | self.csv_writer.writerow(['casename'] + [self.cost_function_label_dict[cost_function] for cost_function in self.cost_functions])
46 |
47 | ground_truth = raw_data[self.ground_truth]
48 |
49 | if casename is None:
50 | casename = ''
51 | output_row = [casename]
52 |
53 | for cost_function in self.cost_functions:
54 |
55 | if cost_function not in list(self.cost_function_dict.keys()):
56 | print(('Error, cost function', cost_function, 'not implemented'))
57 |
58 | cost = self.cost_function_dict[cost_function](input_data, ground_truth)
59 |
60 | if self.print_output:
61 | print((self.cost_function_label_dict[cost_function] + ':', cost))
62 |
63 | output_row += [str(cost)]
64 |
65 | self.csv_writer.writerow(output_row)
66 | self.csv_file.flush()
67 |
68 | return input_data
69 |
70 | def close(self):
71 | self.csv_file.close()
72 |
73 |
74 | def dice_cost_function(input_data, ground_truth):
75 |
76 | """ Calculate the dice coefficient.
77 |
78 | Parameters
79 | ----------
80 | input_data, ground_truth : NumPy
81 | Arrays to be compared.
82 |
83 | Returns
84 | -------
85 | float
86 | Dice coefficient.
87 |
88 | """
89 |
90 | im1 = np.asarray(input_data).astype(np.bool)
91 | im2 = np.asarray(ground_truth).astype(np.bool)
92 |
93 | if im1.shape != im2.shape:
94 | raise ValueError("Shape mismatch: Predicted data and ground truth must have the same shape.")
95 |
96 | im_sum = im1.sum() + im2.sum()
97 | if im_sum == 0:
98 | return 0
99 |
100 | # Compute Dice coefficient
101 | intersection = np.logical_and(im1, im2)
102 |
103 | return 2. * intersection.sum() / im_sum
104 |
105 |
106 | def accuracy_cost_function(input_data, ground_truth):
107 |
108 | """Summary
109 |
110 | Parameters
111 | ----------
112 | input_data, ground_truth : NumPy
113 | Arrays to be compared.
114 |
115 | Returns
116 | -------
117 | TYPE
118 | Description
119 | """
120 |
121 | return np.sum(input_data == ground_truth)
122 |
123 |
124 | def cluster_accuracy_cost_function(input_data, ground_truth, connectivity=2):
125 |
126 | """Computes a function to see how many clusters that exist in the ground truth
127 | data have overlapping segments in the input data. Note that this does not account
128 | for extraneous segmentations in the input data that do not correspond to any
129 | clusters in the ground truth data.
130 |
131 | Parameters
132 | ----------
133 | input_data, ground_truth : NumPy
134 | Arrays to be compared.
135 | connectivity : int, optional
136 | Description
137 |
138 | Returns
139 | -------
140 | float
141 | Cluster accuracy metric.
142 | """
143 |
144 | if input_data.shape[-1] != 1:
145 | raise NotImplementedError('Cluster accuracy not implemented for data with multiple channels.')
146 |
147 | input_data = input_data[0, ..., 0]
148 | ground_truth = ground_truth[0, ..., 0]
149 |
150 | overlapping_map = np.logical_and(input_data, ground_truth)
151 | connected_components = label(ground_truth, connectivity=connectivity)
152 | total_components = np.max(connected_components)
153 |
154 | overlapping_components = 0
155 |
156 | for i in range(1, total_components + 1):
157 | individual_component = np.copy(connected_components)
158 | individual_component[individual_component != i] == 0
159 | if np.sum(np.logical_and(overlapping_map, individual_component.astype(bool))) != 0:
160 | overlapping_components += 1
161 |
162 | return overlapping_components / total_components
163 |
--------------------------------------------------------------------------------
/deepneuro/postprocessing/transform.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | from deepneuro.postprocessing.postprocessor import Postprocessor
4 | from deepneuro.utilities.util import add_parameter
5 |
6 |
7 | class UniqueClasses(Postprocessor):
8 |
9 | """This class reverses the effect of one-hot encoding data.
10 | """
11 |
12 | def load(self, kwargs):
13 |
14 | # Naming parameter
15 | add_parameter(self, kwargs, 'name', 'UniqueClasses')
16 | add_parameter(self, kwargs, 'postprocessor_string', '_unique_classes')
17 |
18 | def postprocess(self, input_data, raw_data=None, casename=None):
19 |
20 | output_data = np.argmax(input_data, axis=1)
21 |
22 | return output_data
--------------------------------------------------------------------------------
/deepneuro/preprocessing/__init__.py:
--------------------------------------------------------------------------------
1 | from .transform import ReorderAxes, SqueezeAxes, MergeChannels, OneHotEncode, SelectChannels, SplitData, Coregister, CropValues,CopyChannels
2 | from .preprocessor import DICOMConverter
3 | from .signal import N4BiasCorrection, MaskValues, RangeNormalization, BinaryNormalization, ZeroMeanNormalization
4 | from .skullstrip import SkullStrip, SkullStrip_Model
--------------------------------------------------------------------------------
/deepneuro/preprocessing/append_output.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/preprocessing/append_output.py
--------------------------------------------------------------------------------
/deepneuro/preprocessing/classes.py:
--------------------------------------------------------------------------------
1 | import os
2 | import numpy as np
3 |
4 | from deepneuro.preprocessing.preprocessor import Preprocessor
5 | from deepneuro.utilities.util import add_parameter
6 |
7 | FNULL = open(os.devnull, 'w')
8 |
9 |
10 | class GetCentroid(Preprocessor):
11 |
12 | def load(self, kwargs):
13 |
14 | # Naming Parameters
15 | add_parameter(self, kwargs, 'name', 'OneHotClasses')
16 |
17 | # Dropping Parameters
18 | add_parameter(self, kwargs, 'channels', None)
19 | add_parameter(self, kwargs, 'output_data_group', None)
20 | add_parameter(self, kwargs, 'max_centroid', 1)
21 | add_parameter(self, kwargs, 'aggregate_centroid', False)
22 |
23 | self.output_shape = {}
24 | self.array_input = True
25 |
26 | def initialize(self, data_collection):
27 |
28 | super(GetCentroid, self).initialize(data_collection)
29 |
30 | for label, data_group in list(self.data_groups.items()):
31 |
32 | data_shape = list(data_group.get_shape())
33 |
34 | if self.channels is None:
35 | self.output_shape[label] = (len(data_shape) - 1, self.max_centroid, 1)
36 | else:
37 | if type(self.channels) is not list:
38 | self.channels = [self.channels]
39 | self.output_shape[label] = (len(data_shape) - 1, self.max_centroid, len(self.channels))
40 |
41 | def preprocess(self, data_group):
42 |
43 | raise NotImplementedError
44 |
45 | if self.channels is None:
46 | pass
47 |
48 | input_data = data_group.preprocessed_case
49 | output_data = np.take(input_data, self.channels, axis=-1)
50 |
51 | data_group.preprocessed_case = output_data
52 | self.output_data = output_data
53 |
--------------------------------------------------------------------------------
/deepneuro/preprocessing/models.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/deepneuro/preprocessing/models.py
--------------------------------------------------------------------------------
/deepneuro/utilities/__init__.py:
--------------------------------------------------------------------------------
1 | from .conversion import *
2 | from .util import *
3 | from .visualize import check_data
--------------------------------------------------------------------------------
/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | exec "$@"
--------------------------------------------------------------------------------
/index.rst:
--------------------------------------------------------------------------------
1 | .. DeepNeuro documentation master file, created by
2 | sphinx-quickstart on Tue Oct 30 17:50:18 2018.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | Welcome to DeepNeuro's documentation!
7 | =====================================
8 |
9 | .. toctree::
10 | :maxdepth: 2
11 | :caption: Contents:
12 |
13 |
14 |
15 | Indices and tables
16 | ==================
17 |
18 | * :ref:`genindex`
19 | * :ref:`modindex`
20 | * :ref:`search`
21 |
--------------------------------------------------------------------------------
/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=.
11 | set BUILDDIR=_build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/misc/DeepInfer/Segment_GBM/DeepNeuro_Glioblastoma.json:
--------------------------------------------------------------------------------
1 | {
2 | "name": "DeepNeuro_Glioblastoma",
3 | "number_of_inputs": 4,
4 | "task": "Segmentation",
5 | "organ": "Brain",
6 | "modality": "MRI",
7 | "train_test_data_details": "The model is trained on pre-processed brain MRIs from the BRATS2017 competition, as well as private datasets from multiple instituions.",
8 | "briefdescription": "Segmentation of glioblastoma into whole tumor (peritumoral edema and necrosis) and enhancing tumor.",
9 | "detaileddescription": "",
10 | "website": "https://github.com/QTIM-Lab/DeepNeuro",
11 | "citation": "",
12 | "version": "0.1",
13 | "docker": {
14 | "dockerhub_repository": "qtimlab/deepneuro_segment_gbm:deepinfer",
15 | "digest": "sha256:acaecad262a6dcabf0cf2480d5b25face8ded1e28a3836e8485fd39d1ff6ef4a",
16 | "size": "5 GB"
17 | },
18 | "model_name": "deepneuro_glioblastoma",
19 | "data_path": "/INPUT_DATA",
20 | "members": [
21 | {
22 | "name": "T2",
23 | "type": "volume",
24 | "iotype": "input",
25 | "voltype": "ScalarVolume",
26 | "detaileddescriptionSet": "T2-weighted MRI volume.\n",
27 | "default": "std::vector(3, 64)",
28 | "itk_type": "typename FilterType::SizeType"
29 | },
30 | {
31 | "name": "T1",
32 | "type": "volume",
33 | "iotype": "input",
34 | "voltype": "ScalarVolume",
35 | "detaileddescriptionSet": "T1-weighted MRI volume.\n",
36 | "default": "std::vector(3, 64)",
37 | "itk_type": "typename FilterType::SizeType"
38 | },
39 | {
40 | "name": "T1POST",
41 | "type": "volume",
42 | "iotype": "input",
43 | "voltype": "ScalarVolume",
44 | "detaileddescriptionSet": "T1-weighted MRI post-contrast volume.\n",
45 | "default": "std::vector(3, 64)",
46 | "itk_type": "typename FilterType::SizeType"
47 | },
48 | {
49 | "name": "FLAIR",
50 | "type": "volume",
51 | "iotype": "input",
52 | "voltype": "ScalarVolume",
53 | "detaileddescriptionSet": "FLAIR MRI volume.\n",
54 | "default": "std::vector(3, 64)",
55 | "itk_type": "typename FilterType::SizeType"
56 | },
57 | {
58 | "name": "debiased",
59 | "type": "bool",
60 | "default": "true",
61 | "detaileddescriptionSet": "If data is already debiased, check this box.\n",
62 | "iotype": "parameter"
63 | },
64 | {
65 | "name": "resampled",
66 | "type": "bool",
67 | "default": "true",
68 | "detaileddescriptionSet": "If data is already isotropically resampled, check this box.\n",
69 | "iotype": "parameter"
70 | },
71 | {
72 | "name": "registered",
73 | "type": "bool",
74 | "default": "true",
75 | "detaileddescriptionSet": "If data is already registered, check this box.\n",
76 | "iotype": "parameter"
77 | },
78 | {
79 | "name": "skullstripped",
80 | "type": "bool",
81 | "default": "true",
82 | "detaileddescriptionSet": "If data is already skullstripped, check this box.\n",
83 | "iotype": "parameter"
84 | },
85 | {
86 | "name": "GPU Number",
87 | "type": "uint16_t",
88 | "iotype": "parameter",
89 | "param_name": "gpu_num",
90 | "default": 0
91 | },
92 | {
93 | "name": "Output_WholeTumor",
94 | "type": "volume",
95 | "iotype": "output",
96 | "voltype": "LabelMap",
97 | "detaileddescriptionSet": "Output labelmap for the segmentation results.\n",
98 | "default": "std::vector(3, 64)",
99 | "itk_type": "typename FilterType::SizeType"
100 | },
101 | {
102 | "name": "Output_EnhancingTumor",
103 | "type": "volume",
104 | "iotype": "output",
105 | "voltype": "LabelMap",
106 | "detaileddescriptionSet": "Output labelmap for the segmentation results.\n",
107 | "default": "std::vector(3, 64)",
108 | "itk_type": "typename FilterType::SizeType"
109 | }
110 | ]
111 | }
--------------------------------------------------------------------------------
/misc/DeepInfer/Segment_GBM/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM nvidia/cuda:8.0-cudnn5-devel-ubuntu14.04
2 | LABEL maintainer "Andrew Beers "
3 |
4 | ARG TENSORFLOW_VERSION=1.2.1
5 | ARG TENSORFLOW_ARCH=gpu
6 | ARG KERAS_VERSION=2.0.6
7 |
8 | #RUN echo -e "\n**********************\nNVIDIA Driver Version\n**********************\n" && \
9 | # cat /proc/driver/nvidia/version && \
10 | # echo -e "\n**********************\nCUDA Version\n**********************\n" && \
11 | # nvcc -V && \
12 | # echo -e "\n\nBuilding your Deep Learning Docker Image...\n"
13 |
14 | # Install some dependencies
15 | RUN apt-get update && apt-get install -y \
16 | bc \
17 | build-essential \
18 | cmake \
19 | curl \
20 | g++ \
21 | gfortran \
22 | git \
23 | libffi-dev \
24 | libfreetype6-dev \
25 | libhdf5-dev \
26 | libjpeg-dev \
27 | liblcms2-dev \
28 | libopenblas-dev \
29 | liblapack-dev \
30 | libopenjpeg2 \
31 | libpng12-dev \
32 | libssl-dev \
33 | libtiff5-dev \
34 | libwebp-dev \
35 | libzmq3-dev \
36 | nano \
37 | pkg-config \
38 | python-dev \
39 | software-properties-common \
40 | unzip \
41 | vim \
42 | wget \
43 | zlib1g-dev \
44 | qt5-default \
45 | libvtk6-dev \
46 | zlib1g-dev \
47 | libjpeg-dev \
48 | libwebp-dev \
49 | libpng-dev \
50 | libtiff5-dev \
51 | libjasper-dev \
52 | libopenexr-dev \
53 | libgdal-dev \
54 | libdc1394-22-dev \
55 | libavcodec-dev \
56 | libavformat-dev \
57 | libswscale-dev \
58 | libtheora-dev \
59 | libvorbis-dev \
60 | libxvidcore-dev \
61 | libx264-dev \
62 | yasm \
63 | libopencore-amrnb-dev \
64 | libopencore-amrwb-dev \
65 | libv4l-dev \
66 | libxine2-dev \
67 | libtbb-dev \
68 | libeigen3-dev \
69 | python-dev \
70 | python-tk \
71 | python-numpy \
72 | python3-dev \
73 | python3-tk \
74 | python3-numpy \
75 | ant \
76 | default-jdk \
77 | doxygen \
78 | && \
79 | apt-get clean && \
80 | apt-get autoremove && \
81 | rm -rf /var/lib/apt/lists/* && \
82 | # Link BLAS library to use OpenBLAS using the alternatives mechanism (https://www.scipy.org/scipylib/building/linux.html#debian-ubuntu)
83 | update-alternatives --set libblas.so.3 /usr/lib/openblas-base/libblas.so.3
84 |
85 | # Install pip
86 | RUN curl -O https://bootstrap.pypa.io/get-pip.py && \
87 | python get-pip.py && \
88 | rm get-pip.py
89 |
90 | # Add SNI support to Python
91 | RUN pip --no-cache-dir install \
92 | pyopenssl \
93 | ndg-httpsclient \
94 | pyasn1
95 |
96 | # Install useful Python packages using apt-get to avoid version incompatibilities with Tensorflow binary
97 | # especially numpy, scipy, skimage and sklearn (see https://github.com/tensorflow/tensorflow/issues/2034)
98 | RUN apt-get update && apt-get install -y \
99 | python-numpy \
100 | python-scipy \
101 | python-nose \
102 | python-h5py \
103 | python-skimage \
104 | python-matplotlib \
105 | python-pandas \
106 | python-sklearn \
107 | python-sympy \
108 | && \
109 | apt-get clean && \
110 | apt-get autoremove && \
111 | rm -rf /var/lib/apt/lists/*
112 |
113 | # Install other useful Python packages using pip
114 | RUN pip --no-cache-dir install --upgrade ipython && \
115 | pip --no-cache-dir install \
116 | Cython \
117 | ipykernel \
118 | jupyter \
119 | path.py \
120 | Pillow \
121 | pygments \
122 | six \
123 | sphinx \
124 | wheel \
125 | zmq \
126 | && \
127 | python -m ipykernel.kernelspec
128 |
129 | # Install TensorFlow
130 | RUN pip --no-cache-dir install \
131 | https://storage.googleapis.com/tensorflow/linux/${TENSORFLOW_ARCH}/tensorflow_${TENSORFLOW_ARCH}-${TENSORFLOW_VERSION}-cp27-none-linux_x86_64.whl
132 |
133 | # Install Keras
134 | RUN pip --no-cache-dir install git+git://github.com/fchollet/keras.git@${KERAS_VERSION}
135 |
136 | # Install Additional Packages for DeepNeuro
137 | RUN apt-get update -y
138 | RUN apt-get install graphviz -y
139 | RUN pip install pydot==1.1.0
140 | RUN pip install pandas --upgrade
141 | RUN pip install numexpr --upgrade
142 | RUN pip install nibabel pydicom
143 |
144 | # Install Slicer
145 | RUN SLICER_URL="http://download.slicer.org/bitstream/561384" && \
146 | curl -v -s -L $SLICER_URL | tar xz -C /tmp && \
147 | mv /tmp/Slicer* /opt/slicer
148 |
149 | # Install ANTS
150 | WORKDIR /home
151 | RUN wget "https://github.com/stnava/ANTs/releases/download/v2.1.0/Linux_Ubuntu14.04.tar.bz2" && \
152 | sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes bzip2 && \
153 | tar -C /usr/local -xjf Linux_Ubuntu14.04.tar.bz2 && \
154 | rm Linux_Ubuntu14.04.tar.bz2
155 |
156 | # Install NeuroDebian
157 | RUN wget -O- http://neuro.debian.net/lists/trusty.us-nh.full | sudo tee /etc/apt/sources.list.d/neurodebian.sources.list
158 | RUN sudo apt-key adv --recv-keys --keyserver hkp://pool.sks-keyservers.net:80 0xA5D32F012649A5A9
159 | RUN apt-get update
160 |
161 | # Install FSL with NeuroDebian
162 | RUN sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes fsl-5.0-complete
163 |
164 | # Environmental Variables
165 | ENV PATH "$PATH:/opt/slicer"
166 | ENV PATH "$PATH:/usr/local/ANTs.2.1.0.Debian-Ubuntu_X64"
167 |
168 | # Setup Scripts
169 | RUN echo "source /usr/share/fsl/5.0/etc/fslconf/fsl.sh" >> ~/.bashrc
170 |
171 | RUN git clone https://github.com/QTIM-Lab/DeepNeuro /home/DeepNeuro
172 | WORKDIR /home/DeepNeuro
173 | RUN python /home/DeepNeuro/setup.py develop
174 |
175 | # Copy in models
176 | RUN mkdir -p /home/DeepNeuro/deepneuro/load/Segment_GBM
177 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/wholetumor.h5 "https://www.dropbox.com/s/74tjx14ue11rc0q/wholetumor.h5?dl=1"
178 | RUN wget -O /home/DeepNeuro/deepneuro/load/Segment_GBM/enhancing.h5 "https://www.dropbox.com/s/usdal6cbkw3bceu/enhancingtumor_BRATS_submission.h5?dl=1"
179 |
180 | # TODO: Check if Docker has solved this checkpointing problem.
181 | RUN echo 22
182 | RUN git pull
183 |
184 | # Commands at startup.
185 | WORKDIR "/home/DeepNeuro/misc/DeepInfer/Segment_GBM/"
186 | RUN chmod 777 /home/DeepNeuro/misc/DeepInfer/Segment_GBM/entrypoint.sh
187 | ENTRYPOINT ["/home/DeepNeuro/misc/DeepInfer/Segment_GBM/entrypoint.sh"]
188 |
--------------------------------------------------------------------------------
/misc/DeepInfer/Segment_GBM/entrypoint.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 |
5 | if __name__ == '__main__':
6 |
7 | parser = argparse.ArgumentParser(
8 | description='''segment pipeline [-gpu_num -niftis -nobias -preprocessed -keep_outputs]
9 |
10 | Segment an image from DICOMs with all preprocessing steps included.
11 |
12 | -output_folder: A filepath to your output folder. Two nifti files will be generated "enhancingtumor.nii.gz" and "wholetumor.nii.gz"
13 | -T2, -T1, -T1POST, -FLAIR: Filepaths to input MR modalities. Inputs can be either nifti files or DICOM folders. Note that DICOM folders should only contain one volume each.
14 | -gpu_num: Which CUDA GPU ID # to use. Defaults to 0, i.e. the first gpu.
15 | -debiased: If flagged, data is assumed to already have been N4 bias-corrected, and skips that preprocessing step.
16 | -resampled: If flagged, data is assumed to already have been isotropically resampled, and skips that preprocessing step.
17 | -registered: If flagged, data is assumed to already have been registered into the same space, and skips that preprocessing step.
18 | -save_all_steps: If flagged, intermediate volumes in between preprocessing steps will be saved in output_folder.
19 | -save_preprocessed: If flagged, the final volume after all preprocessing steps will be saved in output_folder
20 | ''')
21 |
22 | parser.add_argument('--output_folder', type=str)
23 | parser.add_argument('--T2', type=str)
24 | parser.add_argument('--T1', type=str)
25 | parser.add_argument('--T1POST', type=str)
26 | parser.add_argument('--FLAIR', type=str)
27 | parser.add_argument('--gpu_num', nargs='?', const='0', type=str)
28 | parser.add_argument('--debiased', action='store_true')
29 | parser.add_argument('--resampled', action='store_true')
30 | parser.add_argument('--registered', action='store_true')
31 | parser.add_argument('--skullstripped', action='store_true') # Currently non-functional
32 | parser.add_argument('--normalized', action='store_true')
33 | parser.add_argument('--save_preprocess', action='store_true')
34 | parser.add_argument('--save_all_steps', action='store_true')
35 | parser.add_argument('--ModelName', type=str)
36 | parser.add_argument('--Output_WholeTumor', type=str)
37 | parser.add_argument('--Output_EnhancingTumor', type=str)
38 | args = parser.parse_args(sys.argv[2:])
39 | #
40 |
41 | os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
42 | os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_num)
43 |
44 | from deepneuro.pipelines.Segment_GBM.predict import predict_GBM
45 |
46 | predict_GBM(args.output_folder, T2=args.T2, T1=args.T1, T1POST=args.T1POST, FLAIR=args.FLAIR, ground_truth=None, input_directory='/INPUT_DATA', bias_corrected=args.debiased, resampled=args.resampled, registered=args.registered, skullstripped=args.skullstripped, normalized=False, save_preprocess=False, save_all_steps=False, output_wholetumor_filename=args.Output_WholeTumor, output_enhancing_filename=args.Output_EnhancingTumor)
--------------------------------------------------------------------------------
/misc/DeepInfer/Segment_GBM/entrypoint.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | source /usr/share/fsl/5.0/etc/fslconf/fsl.sh
3 | exec "$@"
4 | python entrypoint.py
--------------------------------------------------------------------------------
/notebooks/Call_External_Packages.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "markdown",
5 | "metadata": {
6 | "collapsed": true
7 | },
8 | "source": [
9 | ""
10 | ]
11 | },
12 | {
13 | "cell_type": "markdown",
14 | "metadata": {},
15 | "source": [
16 | "# Wrapping Around External Packages with DeepNeuro\n",
17 | "\n",
18 | "Conducting machine learning research in the field of medical imaging poses unique challenges. One of the most pernicious challenges is the wide variety of supporting softwares needed to preprocess and analyze medical imaging data. These packages are often open-source and maintained by academic labs, but may have complicated installation instructions or only work on certain platforms. Furthermore, different scanning modalities and even sequences can require very different preprocessing steps, requiring users to install many different software packages in multi-modality workflows.\n",
19 | "\n",
20 | "DeepNeuro seeks to simplify some of these problems by creating Docker containers for common software packages used in medical imaging, and writing easy-to-use Python wrappers around these Docker containers. This let's you quickly use well-known, state-of-the-art methods without having to spend days with a technician installing their requirements.\n",
21 | "\n",
22 | "In this tutorial, we will use some of these wrappers to preprocess MR data, create segmentations using a trained neural network, and then save these segmentations to DICOM Segmentation Object (DSO) format. We will use wrappers around the open-source medical imaging package 3DSlicer, and the open-source DICOM conversion package _dcmqi_. We will also use the DeepNeuro docker container to use trained deep learning models for skull-stripping as part of our pipeline."
23 | ]
24 | },
25 | {
26 | "cell_type": "markdown",
27 | "metadata": {},
28 | "source": [
29 | "\n",
30 | "Learn more about 3D Slicer here!
"
31 | ]
32 | },
33 | {
34 | "cell_type": "markdown",
35 | "metadata": {},
36 | "source": [
37 | "### Tutorial Requirements\n",
38 | "\n",
39 | "You will need to have Docker already installed for this tutorial. You can find some instructions on how to do that here: https://docs.docker.com/install/. Some tutorials on how to use Docker can be found here: https://docker-curriculum.com/.\n",
40 | "\n",
41 | "In order to run these Docker containers on the GPU, you will also have to install nvidia-docker. nvidia-docker is an extension to Docker that lets you seamlessly hook up your docker containers to your NVIDIA GPU drivers and supporting software. You can find instructions on how to install nvidia-docker here: https://github.com/NVIDIA/nvidia-docker."
42 | ]
43 | },
44 | {
45 | "cell_type": "markdown",
46 | "metadata": {},
47 | "source": [
48 | "## Downloading Sample Data\n",
49 | "\n",
50 | "Our first step is to download some sample data. We will download some DICOM data from TCGA-GBM dataset on the Cancer Imaging Archive (TCIA)."
51 | ]
52 | },
53 | {
54 | "cell_type": "code",
55 | "execution_count": null,
56 | "metadata": {
57 | "collapsed": true
58 | },
59 | "outputs": [],
60 | "source": [
61 | "from deepneuro.load.load import load\n",
62 | "\n",
63 | "load('sample_gbm_dicom', output_datapath='./Sample_Data')"
64 | ]
65 | },
66 | {
67 | "cell_type": "markdown",
68 | "metadata": {},
69 | "source": [
70 | "## Pulling the DeepNeuro Docker\n",
71 | "\n",
72 | "This dataset is comprised of four MR sequences from one visit of a patient with a high-grade glioma. These sequences are not guaranteed to be oriented in the same patient space, or even be in the same resolution. We also do not have much information about the specific scanner or choices made during sequence development for these images, meaning that voxel intensities may not be in the range we typically expect them to be. If we want\n",
73 | "\n",
74 | "Ordinarily, these preprocessing steps are coordinated for you in DeepNeuro modules. However, for this tutorial, we will start from scratch and code the preprocessing steps ourselves. We will do this using the base DeepNeuro Docker container. This container has all off the medical imaging software that DeepNeuro uses pre-installed, and all of the pre-trained models DeepNeuro has to offer pre-downloaded.\n",
75 | "\n",
76 | "Our first step will be to pull the Docker container for DeepNeuro from DockerHub."
77 | ]
78 | },
79 | {
80 | "cell_type": "code",
81 | "execution_count": null,
82 | "metadata": {
83 | "collapsed": true
84 | },
85 | "outputs": [],
86 | "source": [
87 | "!docker pull qtimlab/deepneuro"
88 | ]
89 | },
90 | {
91 | "cell_type": "markdown",
92 | "metadata": {},
93 | "source": [
94 | "Easy enough! Let's get on to coding."
95 | ]
96 | },
97 | {
98 | "cell_type": "markdown",
99 | "metadata": {},
100 | "source": [
101 | "## Developing with Docker"
102 | ]
103 | },
104 | {
105 | "cell_type": "markdown",
106 | "metadata": {},
107 | "source": [
108 | "## Loading Input DICOM Data"
109 | ]
110 | },
111 | {
112 | "cell_type": "markdown",
113 | "metadata": {},
114 | "source": [
115 | "## Applying Preprocessing Steps with 3DSlicer and Pretrained Models"
116 | ]
117 | },
118 | {
119 | "cell_type": "markdown",
120 | "metadata": {},
121 | "source": [
122 | "## Saving to DSO Format Using _dcmqi_"
123 | ]
124 | },
125 | {
126 | "cell_type": "code",
127 | "execution_count": null,
128 | "metadata": {
129 | "collapsed": true
130 | },
131 | "outputs": [],
132 | "source": []
133 | }
134 | ],
135 | "metadata": {
136 | "anaconda-cloud": {},
137 | "kernelspec": {
138 | "display_name": "Python 3",
139 | "language": "python",
140 | "name": "python3"
141 | },
142 | "language_info": {
143 | "codemirror_mode": {
144 | "name": "ipython",
145 | "version": 3
146 | },
147 | "file_extension": ".py",
148 | "mimetype": "text/x-python",
149 | "name": "python",
150 | "nbconvert_exporter": "python",
151 | "pygments_lexer": "ipython3",
152 | "version": "3.4.3"
153 | }
154 | },
155 | "nbformat": 4,
156 | "nbformat_minor": 2
157 | }
158 |
--------------------------------------------------------------------------------
/notebooks/Medical_Decathlon_Task02_Heart.ipynb:
--------------------------------------------------------------------------------
1 | {
2 | "cells": [
3 | {
4 | "cell_type": "code",
5 | "execution_count": null,
6 | "metadata": {
7 | "collapsed": true
8 | },
9 | "outputs": [],
10 | "source": []
11 | }
12 | ],
13 | "metadata": {
14 | "kernelspec": {
15 | "display_name": "Python 3",
16 | "language": "python",
17 | "name": "python3"
18 | },
19 | "language_info": {
20 | "codemirror_mode": {
21 | "name": "ipython",
22 | "version": 3
23 | },
24 | "file_extension": ".py",
25 | "mimetype": "text/x-python",
26 | "name": "python",
27 | "nbconvert_exporter": "python",
28 | "pygments_lexer": "ipython3",
29 | "version": "3.4.3"
30 | }
31 | },
32 | "nbformat": 4,
33 | "nbformat_minor": 2
34 | }
35 |
--------------------------------------------------------------------------------
/notebooks/resources/3D_Slicer_Logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/3D_Slicer_Logo.png
--------------------------------------------------------------------------------
/notebooks/resources/docker_module_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/docker_module_icon.png
--------------------------------------------------------------------------------
/notebooks/resources/external_package_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/external_package_icon.png
--------------------------------------------------------------------------------
/notebooks/resources/model_inference_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/model_inference_icon.png
--------------------------------------------------------------------------------
/notebooks/resources/train_model_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/train_model_icon.png
--------------------------------------------------------------------------------
/notebooks/resources/train_preprocess_icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/notebooks/resources/train_preprocess_icon.png
--------------------------------------------------------------------------------
/package_resources/logos/DeepNeuro.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/package_resources/logos/DeepNeuro.PNG
--------------------------------------------------------------------------------
/package_resources/logos/DeepNeuro_alt.PNG:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/QTIM-Lab/DeepNeuro/8a55a958660227859439df003ac39b98ce3da1b0/package_resources/logos/DeepNeuro_alt.PNG
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
4 | [flake8]
5 | max-line-length = 600
6 | ignore = W291,W292,W293,E722,E115,W391,E127,E128,E116,E265,E402
7 | exclude =
8 | deepneuro/pipelines
9 | deepneuro/models
10 | deepneuro/package_test
11 | docs
12 | misc
13 | package_resources
14 | .git
15 | deepneuro/core.py
16 | __init__.py
17 |
18 | [tool:pytest]
19 | addopts = --verbose
20 | norecursedirs = .tox .git env docs package_resources
21 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | """DeepNeuro: A deep learning python package for neuroimaging data.
2 |
3 | Created by the Quantitative Tumor Imaging Lab at the Martinos Center
4 | (Harvard-MIT Program in Health, Sciences, and Technology / Massachussets General Hospital).
5 |
6 | """
7 |
8 | DOCLINES = __doc__.split("\n")
9 |
10 | import sys
11 |
12 | from setuptools import setup, find_packages
13 | from codecs import open
14 | from os import path
15 | import os
16 |
17 | os.environ["MPLCONFIGDIR"] = "."
18 |
19 | if sys.version_info[:2] < (3, 5):
20 | raise RuntimeError("Python version 3.5 or greater required.")
21 |
22 | setup(
23 | name='deepneuro',
24 | version='0.2.3',
25 | description=DOCLINES[0],
26 | packages=find_packages(),
27 | entry_points= {
28 | "console_scripts": ['segment_gbm = deepneuro.pipelines.Segment_GBM.cli:main',
29 | 'skull_stripping = deepneuro.pipelines.Skull_Stripping.cli:main',
30 | 'segment_mets = deepneuro.pipelines.Segment_Brain_Mets.cli:main',
31 | 'segment_ischemic_stroke = deepneuro.pipelines.Ischemic_Stroke.cli:main'],
32 | },
33 | author='Andrew Beers',
34 | author_email='abeers@mgh.harvard.edu',
35 | url='https://github.com/QTIM-Lab/DeepNeuro', # use the URL to the github repo
36 | download_url='https://github.com/QTIM-Lab/DeepNeuro/tarball/0.2.3',
37 | keywords=['neuroimaging', 'neuroncology', 'neural networks', 'neuroscience', 'neurology', 'deep learning', 'fmri', 'pet', 'mri', 'dce', 'dsc', 'dti', 'machine learning', 'computer vision', 'learning', 'keras', 'theano', 'tensorflow', 'nifti', 'nrrd', 'dicom'],
38 | install_requires=['tables', 'pydicom', 'pynrrd', 'nibabel', 'pyyaml', 'six', 'imageio', 'matplotlib', 'pydot', 'scipy', 'numpy', 'scikit-image', 'imageio', 'tqdm'],
39 | classifiers=[],
40 | )
41 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | # Tox (http://tox.testrun.org/) is a tool for running tests
2 | # in multiple virtualenvs. This configuration file will run the
3 | # test suite on all supported python versions. To use it, "pip install tox"
4 | # and then run "tox" from this directory.
5 |
6 | # `envlist` chooses the build environments to test with the main `testenv`.
7 | # The pyXX environments are built in, but custom environments (testenv:XX)
8 | # must be added manually to the test suite (envlist)
9 |
10 |
11 | [tox]
12 | envlist=
13 | py35,flake8
14 |
15 |
16 |
17 | ###########################
18 | # Relay Flasher Tests
19 | ###########################
20 |
21 | [testenv]
22 | deps =
23 | pytest
24 | coverage
25 | commands =
26 | python coverage_wrapper.py
27 | coverage report
28 |
29 |
30 | ###########################
31 | # Run flake8 linter
32 | ###########################
33 |
34 | [testenv:flake8]
35 | deps =
36 | flake8
37 | commands =
38 | flake8 deepneuro
39 |
--------------------------------------------------------------------------------