├── .coveragerc
├── .flake8
├── .github
├── ISSUE_TEMPLATE
│ ├── bug_report.md
│ └── feature_request.md
└── workflows
│ └── ci.yml
├── .gitignore
├── .readthedocs.yml
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── LICENSE
├── Makefile
├── README.md
├── bin
├── dosma
└── setup
├── codecov.yml
├── docs
├── .nojekyll
├── Makefile
├── index.html
├── make.bat
├── requirements.txt
└── source
│ ├── conf.py
│ ├── core_api.rst
│ ├── documentation.rst
│ ├── faq.rst
│ ├── figures
│ ├── unrolled_fc.png
│ └── workflow.png
│ ├── guide_basic.rst
│ ├── guide_fitting.rst
│ ├── guide_registration.rst
│ ├── guide_tutorials.rst
│ ├── index.rst
│ ├── installation.rst
│ ├── introduction.rst
│ ├── models.rst
│ ├── references.bib
│ ├── scans.rst
│ ├── tissues.rst
│ ├── usage.rst
│ ├── user_guide.rst
│ ├── utils_api.rst
│ └── zreferences.rst
├── dosma
├── __init__.py
├── app.py
├── cli.py
├── core
│ ├── __init__.py
│ ├── device.py
│ ├── fitting.py
│ ├── io
│ │ ├── __init__.py
│ │ ├── dicom_io.py
│ │ ├── format_io.py
│ │ ├── format_io_utils.py
│ │ └── nifti_io.py
│ ├── med_volume.py
│ ├── numpy_routines.py
│ ├── orientation.py
│ ├── quant_vals.py
│ └── registration.py
├── defaults.py
├── file_constants.py
├── gui
│ ├── __init__.py
│ ├── defaults
│ │ └── skel-rotate.gif
│ ├── dosma_gui.py
│ ├── gui_errors.py
│ ├── gui_utils
│ │ ├── __init__.py
│ │ ├── console_output.py
│ │ ├── filedialog_reader.py
│ │ └── gui_utils.py
│ ├── im_viewer.py
│ ├── ims.py
│ └── preferences_viewer.py
├── models
│ ├── __init__.py
│ ├── oaiunet2d.py
│ ├── seg_model.py
│ ├── stanford_qdess.py
│ └── util.py
├── msk
│ ├── __init__.py
│ └── knee.py
├── resources
│ ├── elastix
│ │ └── params
│ │ │ ├── parameters-affine-interregister.txt
│ │ │ ├── parameters-affine.txt
│ │ │ ├── parameters-bspline.txt
│ │ │ ├── parameters-rigid-interregister.txt
│ │ │ └── parameters-rigid.txt
│ └── templates
│ │ ├── .preferences.yml
│ │ └── .preferences_cmd_line_schema.yml
├── scan_sequences
│ ├── __init__.py
│ ├── mri
│ │ ├── __init__.py
│ │ ├── cones.py
│ │ ├── cube_quant.py
│ │ ├── mapss.py
│ │ └── qdess.py
│ ├── scan_io.py
│ └── scans.py
├── tissues
│ ├── __init__.py
│ ├── femoral_cartilage.py
│ ├── meniscus.py
│ ├── patellar_cartilage.py
│ ├── tibial_cartilage.py
│ └── tissue.py
└── utils
│ ├── __init__.py
│ ├── cmd_line_utils.py
│ ├── collect_env.py
│ ├── env.py
│ ├── geometry_utils.py
│ ├── img_utils.py
│ ├── io_utils.py
│ └── logger.py
├── pyproject.toml
├── readme_ims
└── dess_protocol.png
├── requirements.txt
├── scripts
├── bilateral-knee-dess
├── msk-qdess
├── multi-scan-script
└── runtime-script
├── setup.cfg
├── setup.py
└── tests
├── __init__.py
├── core
├── __init__.py
├── io
│ ├── __init__.py
│ ├── test_dicom_io.py
│ ├── test_format_io.py
│ ├── test_format_io_utils.py
│ ├── test_inter_io.py
│ └── test_nifti_io.py
├── test_device.py
├── test_fitting.py
├── test_med_volume.py
├── test_numpy_routines.py
├── test_orientation.py
├── test_quant_vals.py
└── test_registration.py
├── models
├── __init__.py
├── test_oaiunet2d.py
├── test_stanford_qdess.py
└── test_util.py
├── resources
└── preferences.yml
├── scan_sequences
├── __init__.py
├── mri
│ ├── __init__.py
│ ├── test_cones.py
│ ├── test_cubequant.py
│ ├── test_mapss.py
│ └── test_qdess.py
└── test_scan_io.py
├── test_preferences.py
├── tissues
└── test_tissue.py
├── util.py
└── utils
├── __init__.py
├── test_collect_env.py
├── test_env.py
├── test_io_utils.py
└── test_logger.py
/.coveragerc:
--------------------------------------------------------------------------------
1 | [run]
2 | omit =
3 | dosma/gui/*
4 | dosma/resources/*
5 | *test*
6 |
7 | [report]
8 | include =
9 | dosma/*
--------------------------------------------------------------------------------
/.flake8:
--------------------------------------------------------------------------------
1 | # This is an example .flake8 config, used when developing *Black* itself.
2 | # Keep in sync with setup.cfg and pyproject.toml which is used for source packages.
3 |
4 | [flake8]
5 | ignore = W503, E203, B950, B011, B904
6 | max-line-length = 100
7 | max-complexity = 18
8 | select = B,C,E,F,W,T4,B9
9 | docstring-convention = google
10 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Bug report
3 | about: Create a report to help us improve
4 | title: '[BUG]'
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Describe the bug**
11 | A clear and concise description of what the bug is.
12 |
13 | **To Reproduce**
14 | Steps to reproduce the behavior:
15 | 1. Go to '...'
16 | 2. Click on '....'
17 | 3. Scroll down to '....'
18 | 4. See error
19 |
20 | **Expected behavior**
21 | A clear and concise description of what you expected to happen.
22 |
23 | **Screenshots**
24 | If applicable, add screenshots to help explain your problem.
25 |
26 | **Desktop (please complete the following information):**
27 | - OS: [e.g. iOS]
28 | - Browser [e.g. chrome, safari]
29 | - Version [e.g. 22]
30 |
31 | Please also provide the output of the following code:
32 |
33 | ```python
34 | >>> from dosma.utils.collect_env import collect_env_info
35 | >>> print(collect_env_info())
36 | ```
37 |
38 | **Additional context**
39 | Add any other context about the problem here.
40 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.md:
--------------------------------------------------------------------------------
1 | ---
2 | name: Feature request
3 | about: Suggest an idea for this project
4 | title: '[FEATURE]'
5 | labels: ''
6 | assignees: ''
7 |
8 | ---
9 |
10 | **Is your feature request related to a problem? Please describe.**
11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
12 |
13 | **Describe the solution you'd like**
14 | A clear and concise description of what you want to happen.
15 |
16 | **Describe alternatives you've considered**
17 | A clear and concise description of any alternative solutions or features you've considered.
18 |
19 | **Additional context**
20 | Add any other context or screenshots about the feature request here.
21 |
--------------------------------------------------------------------------------
/.github/workflows/ci.yml:
--------------------------------------------------------------------------------
1 | name: CI
2 |
3 | on:
4 | push:
5 | branches: [ master ]
6 | pull_request:
7 | branches: [ master ]
8 |
9 | # Allows you to run this workflow manually from the Actions tab
10 | workflow_dispatch:
11 |
12 | jobs:
13 |
14 | Linting:
15 | runs-on: ubuntu-latest
16 | strategy:
17 | matrix:
18 | python-version: ["3.6", "3.7", "3.8"]
19 |
20 | steps:
21 | - uses: actions/checkout@v2
22 | - name: Set up Python ${{ matrix.python-version }}
23 | uses: actions/setup-python@v2
24 | with:
25 | python-version: ${{ matrix.python-version }}
26 |
27 | - uses: actions/cache@v2
28 | with:
29 | path: ~/.cache/pip
30 | key: ${{ runner.os }}-pip
31 |
32 | - name: Install Dependencies
33 | run: |
34 | python -m pip install --upgrade pip
35 | make dev
36 | pip install --upgrade click==8.0.2
37 | - name: Lint with isort, black, docformatter, flake8
38 | run: |
39 | make lint
40 |
41 | Documentation:
42 | needs: Linting
43 | runs-on: ubuntu-latest
44 | strategy:
45 | matrix:
46 | python-version: ["3.6", "3.7", "3.8"]
47 |
48 | steps:
49 | - uses: actions/checkout@v2
50 | - name: Set up Python ${{ matrix.python-version }}
51 | uses: actions/setup-python@v2
52 | with:
53 | python-version: ${{ matrix.python-version }}
54 |
55 | - uses: actions/cache@v2
56 | with:
57 | path: ~/.cache/pip
58 | key: ${{ runner.os }}-pip
59 |
60 | - name: Install Dependencies
61 | run: |
62 | python -m pip install --upgrade pip
63 | make dev
64 | pip install -e '.[dev]'
65 |
66 | - name: Generate Docs
67 | run: |
68 | make build-docs
69 | Build:
70 | needs: Documentation
71 | runs-on: ${{ matrix.os }}
72 | strategy:
73 | matrix:
74 | os: [ubuntu-latest]
75 | python-version: ["3.6", "3.7", "3.8"] # there are some issues with numpy multiarray in 3.7
76 |
77 | steps:
78 | - uses: actions/checkout@v2
79 |
80 | - name: Set up Python ${{ matrix.python-version }}
81 | uses: actions/setup-python@v2
82 | with:
83 | python-version: ${{ matrix.python-version }}
84 |
85 | - name: Install Dependencies
86 | # there are some issues with numpy multiarray in 3.7, affecting numba 0.54 installation
87 | # SimpleITK 2.1.0 does not support non-orthonormal directions
88 | run: |
89 | python -m pip install --upgrade pip
90 | pip install numba==0.53.1
91 | pip install tensorflow==2.4.1 keras
92 | pip install torch
93 | pip install sigpy
94 | pip install --upgrade simpleitk==2.0.2
95 | make dev
96 | pip install -e '.[dev]'
97 |
98 | - name: Install Elastix
99 | run: |
100 | wget https://github.com/SuperElastix/elastix/releases/download/4.9.0/elastix-4.9.0-linux.tar.bz2
101 | ELASTIX_FOLDER="elastix"
102 | mkdir -p $ELASTIX_FOLDER
103 | tar -xvf elastix-4.9.0-linux.tar.bz2 -C $ELASTIX_FOLDER
104 | ELASTIX_DIR="$(realpath $ELASTIX_FOLDER)"
105 | echo "PATH=${ELASTIX_DIR}/bin:${PATH}" >> $GITHUB_ENV
106 | echo "LD_LIBRARY_PATH=${ELASTIX_DIR}/lib:${LD_LIBRARY_PATH}" >> $GITHUB_ENV
107 |
108 | - name: Test with pytest
109 | run: |
110 | pip install pytest
111 | pip install pytest-cov
112 | make test-cov
113 |
114 | - name: Upload to codecov.io
115 | uses: codecov/codecov-action@v1
116 | with:
117 | file: ./coverage.xml
118 | flags: unittests
119 | name: codecov-umbrella
120 | fail_ci_if_error: true
121 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # project files
2 | **/.idea
3 | **/.vscode
4 |
5 | # cache files
6 | **/__pycache__
7 | **/.pytest_cache
8 | *.pyc
9 |
10 | # build files
11 | *.egg-info
12 |
13 | # weights files
14 | **/weights
15 |
16 | # data
17 | **/dicoms
18 | **/unittest-data
19 | **/.dosma
20 |
21 | # test data files
22 | **/temp
23 | **/ex_patient
24 |
25 | # ignore local files
26 | libANNlib.dylib
27 | debug_runs.py
28 |
29 | # ignore ismrm
30 | **/ismrm_2019_scripts
31 |
32 | # ignore runtime generated package scripts
33 | **/._*
34 |
35 | # pyscripts
36 | **/py_scripts
37 |
38 | # playgrounds
39 | /gui/gui_playground.py
40 |
41 | # build files
42 | build/
43 | **/dist
44 | *.spec
45 |
46 | # Generated docs
47 | docs/source/generated
48 |
49 | # ignore preferences file
50 | **/preferences.*
51 |
52 | # ignore all resources
53 | **/resources
54 |
55 | # coverage files
56 | .coverage
57 | coverage.xml
--------------------------------------------------------------------------------
/.readthedocs.yml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | python:
4 | version: 3.7
5 | install:
6 | - method: pip
7 | path: .
8 | extra_requirements:
9 | - docs
10 |
11 | sphinx:
12 | builder: html
13 | fail_on_warning: false
--------------------------------------------------------------------------------
/CODE_OF_CONDUCT.md:
--------------------------------------------------------------------------------
1 | # Contributor Covenant Code of Conduct
2 |
3 | ## Our Pledge
4 |
5 | In the interest of fostering an open and welcoming environment, we as
6 | contributors and maintainers pledge to making participation in our project and
7 | our community a harassment-free experience for everyone, regardless of age, body
8 | size, disability, ethnicity, sex characteristics, gender identity and expression,
9 | level of experience, education, socio-economic status, nationality, personal
10 | appearance, race, religion, or sexual identity and orientation.
11 |
12 | ## Our Standards
13 |
14 | Examples of behavior that contributes to creating a positive environment
15 | include:
16 |
17 | * Using welcoming and inclusive language
18 | * Being respectful of differing viewpoints and experiences
19 | * Gracefully accepting constructive criticism
20 | * Focusing on what is best for the community
21 | * Showing empathy towards other community members
22 |
23 | Examples of unacceptable behavior by participants include:
24 |
25 | * The use of sexualized language or imagery and unwelcome sexual attention or
26 | advances
27 | * Trolling, insulting/derogatory comments, and personal or political attacks
28 | * Public or private harassment
29 | * Publishing others' private information, such as a physical or electronic
30 | address, without explicit permission
31 | * Other conduct which could reasonably be considered inappropriate in a
32 | professional setting
33 |
34 | ## Our Responsibilities
35 |
36 | Project maintainers are responsible for clarifying the standards of acceptable
37 | behavior and are expected to take appropriate and fair corrective action in
38 | response to any instances of unacceptable behavior.
39 |
40 | Project maintainers have the right and responsibility to remove, edit, or
41 | reject comments, commits, code, wiki edits, issues, and other contributions
42 | that are not aligned to this Code of Conduct, or to ban temporarily or
43 | permanently any contributor for other behaviors that they deem inappropriate,
44 | threatening, offensive, or harmful.
45 |
46 | ## Scope
47 |
48 | This Code of Conduct applies both within project spaces and in public spaces
49 | when an individual is representing the project or its community. Examples of
50 | representing a project or community include using an official project e-mail
51 | address, posting via an official social media account, or acting as an appointed
52 | representative at an online or offline event. Representation of a project may be
53 | further defined and clarified by project maintainers.
54 |
55 | ## Enforcement
56 |
57 | Instances of abusive, harassing, or otherwise unacceptable behavior may be
58 | reported by contacting the project team at arjun.desai.research@gmail.com. All
59 | complaints will be reviewed and investigated and will result in a response that
60 | is deemed necessary and appropriate to the circumstances. The project team is
61 | obligated to maintain confidentiality with regard to the reporter of an incident.
62 | Further details of specific enforcement policies may be posted separately.
63 |
64 | Project maintainers who do not follow or enforce the Code of Conduct in good
65 | faith may face temporary or permanent repercussions as determined by other
66 | members of the project's leadership.
67 |
68 | ## Attribution
69 |
70 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
71 | available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
72 |
73 | [homepage]: https://www.contributor-covenant.org
74 |
75 | For answers to common questions about this code of conduct, see
76 | https://www.contributor-covenant.org/faq
77 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | autoformat:
2 | set -e
3 | isort .
4 | black --config pyproject.toml .
5 | flake8
6 |
7 | lint:
8 | set -e
9 | isort -c .
10 | black --check --config pyproject.toml .
11 | flake8
12 |
13 | test:
14 | set -e
15 | coverage run -m pytest tests/
16 |
17 | test-cov:
18 | set -e
19 | pytest tests/ --cov=./ --cov-report=xml
20 |
21 | test-like-ga:
22 | set -e
23 | DOSMA_UNITTEST_DISABLE_DATA=true pytest tests/
24 |
25 | build-docs:
26 | set -e
27 | mkdir -p docs/source/_static
28 | rm -rf docs/build
29 | rm -rf docs/source/generated
30 | cd docs && make html
31 |
32 | dev:
33 | pip install black==21.4b2 click==8.0.2 coverage isort flake8 flake8-bugbear flake8-comprehensions
34 | pip install --upgrade mistune==0.8.4 sphinx sphinx-rtd-theme recommonmark m2r2
35 | pip install -r docs/requirements.txt
36 |
37 | all: autoformat test build-docs
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # DOSMA: Deep Open-Source Medical Image Analysis
2 | [](https://www.gnu.org/licenses/gpl-3.0)
3 | 
4 | [](https://codecov.io/gh/ad12/DOSMA)
5 | [](https://dosma.readthedocs.io/en/latest/?badge=latest)
6 |
7 | [Documentation](http://dosma.readthedocs.io/) | [Questionnaire](https://forms.gle/sprthTC2swyt8dDb6) | [DOSMA Basics Tutorial](https://colab.research.google.com/drive/1zY5-3ZyTBrn7hoGE5lH0IoQqBzumzP1i?usp=sharing)
8 |
9 | DOSMA is an AI-powered Python library for medical image analysis. This includes, but is not limited to:
10 | - image processing (denoising, super-resolution, registration, segmentation, etc.)
11 | - quantitative fitting and image analysis
12 | - anatomical visualization and analysis (patellar tilt, femoral cartilage thickness, etc.)
13 |
14 | We hope that this open-source pipeline will be useful for quick anatomy/pathology analysis and will serve as a hub for adding support for analyzing different anatomies and scan sequences.
15 |
16 | ## Installation
17 | DOSMA requires Python 3.6+. The core module depends on numpy, nibabel, nipype,
18 | pandas, pydicom, scikit-image, scipy, PyYAML, and tqdm.
19 |
20 | Additional AI features can be unlocked by installing tensorflow and keras. To
21 | enable built-in registration functionality, download [elastix](https://elastix.lumc.nl/download.php).
22 | Details can be found in the [setup documentation](https://dosma.readthedocs.io/en/latest/general/installation.html#setup).
23 |
24 | To install DOSMA, run:
25 |
26 | ```bash
27 | pip install dosma
28 |
29 | # To install with AI support
30 | pip install dosma[ai]
31 | ```
32 |
33 | If you would like to contribute to DOSMA, we recommend you clone the repository and
34 | install DOSMA with `pip` in editable mode.
35 |
36 | ```bash
37 | git clone git@github.com:ad12/DOSMA.git
38 | cd DOSMA
39 | pip install -e '.[dev,docs]'
40 | make dev
41 | ```
42 |
43 | To run tests, build documentation and contribute, run
44 | ```bash
45 | make autoformat test build-docs
46 | ```
47 |
48 | ## Features
49 | ### Simplified, Efficient I/O
50 | DOSMA provides efficient readers for DICOM and NIfTI formats built on nibabel and pydicom. Multi-slice DICOM data can be loaded in
51 | parallel with multiple workers and structured into the appropriate 3D volume(s). For example, multi-echo and dynamic contrast-enhanced (DCE) MRI scans have multiple volumes acquired at different echo times and trigger times, respectively. These can be loaded into multiple volumes with ease:
52 |
53 | ```python
54 | import dosma as dm
55 |
56 | multi_echo_scan = dm.load("/path/to/multi-echo/scan", group_by="EchoNumbers", num_workers=8, verbose=True)
57 | dce_scan = dm.load("/path/to/dce/scan", group_by="TriggerTime")
58 | ```
59 |
60 | ### Data-Embedded Medical Images
61 | DOSMA's [MedicalVolume](https://dosma.readthedocs.io/en/latest/generated/dosma.MedicalVolume.html#dosma.MedicalVolume) data structure supports array-like operations (arithmetic, slicing, etc.) on medical images while preserving spatial attributes and accompanying metadata. This structure supports NumPy interoperability, intelligent reformatting, fast low-level computations, and native GPU support. For example, given MedicalVolumes `mvA` and `mvB` we can do the following:
62 |
63 | ```python
64 | # Reformat image into Superior->Inferior, Anterior->Posterior, Left->Right directions.
65 | mvA = mvA.reformat(("SI", "AP", "LR"))
66 |
67 | # Get and set metadata
68 | study_description = mvA.get_metadata("StudyDescription")
69 | mvA.set_metadata("StudyDescription", "A sample study")
70 |
71 | # Perform NumPy operations like you would on image data.
72 | rss = np.sqrt(mvA**2 + mvB**2)
73 |
74 | # Move to GPU 0 for CuPy operations
75 | mv_gpu = mvA.to(dosma.Device(0))
76 |
77 | # Take slices. Metadata will be sliced appropriately.
78 | mv_subvolume = mvA[10:20, 10:20, 4:6]
79 | ```
80 |
81 | ### Built-in AI Models
82 | DOSMA is built to be a hub for machine/deep learning models. A complete list of models and corresponding publications can be found [here](https://dosma.readthedocs.io/en/latest/models.html).
83 | We can use one of the knee segmentation models to segment a MedicalVolume `mv` and model
84 | `weights` [downloaded locally](https://dosma.readthedocs.io/en/latest/installation.html#segmentation):
85 |
86 | ```python
87 | from dosma.models import IWOAIOAIUnet2DNormalized
88 |
89 | # Reformat such that sagittal plane is last dimension.
90 | mv = mv.reformat(("SI", "AP", "LR"))
91 |
92 | # Do segmentation
93 | model = IWOAIOAIUnet2DNormalized(input_shape=mv.shape[:2] + (1,), weights_path=weights)
94 | masks = model.generate_mask(mv)
95 | ```
96 |
97 | ### Parallelizable Operations
98 | DOSMA supports parallelization for compute-heavy operations, like curve fitting and image registration.
99 | Image registration is supported thru the [elastix/transformix](https://elastix.lumc.nl/download.php) libraries. For example we can use multiple workers to register volumes to a target, and use the registered outputs for per-voxel monoexponential fitting:
100 |
101 | ```python
102 | # Register images mvA, mvB, mvC to target image mv_tgt in parallel
103 | _, (mvA_reg, mvB_reg, mvC_reg) = dosma.register(
104 | mv_tgt,
105 | moving=[mvA, mvB, mvC],
106 | parameters="/path/to/elastix/registration/file",
107 | num_workers=3,
108 | return_volumes=True,
109 | show_pbar=True,
110 | )
111 |
112 | # Perform monoexponential fitting.
113 | def monoexponential(x, a, b):
114 | return a * np.exp(b*x)
115 |
116 | fitter = dosma.CurveFitter(
117 | monoexponential,
118 | num_workers=4,
119 | p0={"a": 1.0, "b": -1/30},
120 | )
121 | popt, r2 = fitter.fit(x=[1, 2, 3, 4], [mv_tgt, mvA_reg, mvB_reg, mvC_reg])
122 | a_fit, b_fit = popt[..., 0], popt[..., 1]
123 | ```
124 |
125 | ## Citation
126 | ```
127 | @inproceedings{desai2019dosma,
128 | title={DOSMA: A deep-learning, open-source framework for musculoskeletal MRI analysis},
129 | author={Desai, Arjun D and Barbieri, Marco and Mazzoli, Valentina and Rubin, Elka and Black, Marianne S and Watkins, Lauren E and Gold, Garry E and Hargreaves, Brian A and Chaudhari, Akshay S},
130 | booktitle={Proc 27th Annual Meeting ISMRM, Montreal},
131 | pages={1135},
132 | year={2019}
133 | }
134 | ```
135 |
136 | In addition to DOSMA, please also consider citing the work that introduced the method used for analysis.
137 |
--------------------------------------------------------------------------------
/bin/dosma:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Bash interface for DOSMA
4 | #
5 | # @usage (from terminal/command line):
6 | # Command line interface: ./dosma command-line-args
7 | # User Interface: ./dosma --app
8 | #
9 | # @initialization protocol:
10 | # 1. Navigate to this folder
11 | # 2. Run "chmod +x dosma" from command-line (Linux) or Terminal (MacOS)
12 | #
13 | # @author: Arjun Desai, Stanford University
14 | # (c) Stanford University, 2019
15 |
16 | BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
17 | DOSMA_DIR="$( cd "$( dirname "${BIN_DIR}" )" >/dev/null 2>&1 && pwd )"
18 | DOSMA_ENV_NAME="dosma_env"
19 |
20 |
21 | # Check if environment is active
22 | if [[ -z `conda env list | grep \* | grep $DOSMA_ENV_NAME` ]]; then
23 | echo "Activate $DOSMA_ENV_NAME before running this script."
24 | exit 1
25 | fi
26 |
27 | cd $DOSMA_DIR
28 | if [[ $1 == "--app" ]]; then
29 | python -m dosma.app
30 | else
31 | python -m dosma.cli $*
32 | fi
--------------------------------------------------------------------------------
/bin/setup:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Initialize DOSMA
4 | #
5 | # @usage (from terminal/command line):
6 | # ./setup
7 | #
8 | # @initialization protocol:
9 | # 1. Navigate to this folder in terminal/command line
10 | # 2. Run "chmod +x setup" from command-line (Linux) or Terminal (MacOS)
11 | #
12 | # @author: Arjun Desai, Stanford University
13 | # (c) Stanford University, 2018
14 |
15 | openURL() {
16 | if [[ "$OSTYPE" == "linux-gnu" ]]; then
17 | xdg-open $1
18 | elif [[ "$OSTYPE" == "darwin"* ]]; then
19 | # Mac OSX
20 | open $1
21 | else
22 | echo "Only Linux and MacOS are supported"
23 | exit 125
24 | fi
25 | }
26 |
27 | setBashFile() {
28 | if [[ "$OSTYPE" == "linux-gnu" ]]; then
29 | bash_file="$HOME/.bashrc"
30 | elif [[ "$OSTYPE" == "darwin"* ]]; then
31 | bash_file="$HOME/.bash_profile"
32 | else
33 | echo "Only Linux and MacOS are supported"
34 | exit 125
35 | fi
36 | }
37 |
38 | BIN_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
39 | DOSMA_DIR="$( cd "$( dirname "${BIN_DIR}" )" >/dev/null 2>&1 && pwd )"
40 |
41 | ANACONDA_KEYWORD="anaconda"
42 | ANACONDA_DOWNLOAD_URL="https://www.anaconda.com/distribution/"
43 | MINICONDA_KEYWORD="miniconda"
44 |
45 | DOSMA_ENV_NAME="dosma_env"
46 | ENV_FILE=$DOSMA_DIR/environment.yml
47 |
48 | GOOGLE_FORM_URL="https://forms.gle/sprthTC2swyt8dDb6"
49 |
50 | DOSMA_SETUP_PATTERN="dosma setup|export PATH.*dosma\/bin"
51 |
52 | hasAnaconda=0
53 | updateEnv=1
54 | updatePath=1
55 | setBashFile
56 |
57 | while getopts ":h" opt; do
58 | case ${opt} in
59 | h )
60 | echo "DOSMA setup"
61 | echo " Create Anaconda virtual environment and add dosma executable to PATH variable"
62 | echo "Usage:"
63 | echo " -h Display this help message"
64 | exit 0
65 | ;;
66 | \? )
67 | echo "Usage: cmd [-h] [-u] [-e]"
68 | exit 0
69 | ;;
70 | esac
71 | done
72 |
73 | # Check if conda exists
74 | if echo $PATH | grep -q $ANACONDA_KEYWORD; then
75 | hasAnaconda=1
76 | echo "Conda found in path"
77 | fi
78 |
79 | if echo $PATH | grep -q $MINICONDA_KEYWORD
80 | then
81 | hasAnaconda=1
82 | echo "Miniconda found in path"
83 | fi
84 |
85 | if [[ $hasAnaconda -eq 0 ]]; then
86 | echo "Anaconda/Miniconda not installed - install from $ANACONDA_DOWNLOAD_URL"
87 | openURL $ANACONDA_DOWNLOAD_URL
88 | exit 125
89 | fi
90 |
91 | # Check if OS is supported
92 | if [[ "$OSTYPE" != "linux-gnu" && "$OSTYPE" != "darwin"* ]]; then
93 | echo "Only Linux and MacOS are supported"
94 | exit 125
95 | fi
96 |
97 | # Create Anaconda environment (dosma_env)
98 | if [[ `conda env list | grep $DOSMA_ENV_NAME` ]]; then
99 | if [[ ${updateEnv} -eq 0 ]]; then
100 | echo "Environment 'dosma_env' found. Run 'conda activate dosma_env' to get started."
101 | else
102 | conda env remove -n $DOSMA_ENV_NAME
103 | conda env create -f $ENV_FILE
104 | fi
105 | else
106 | conda env create -f $ENV_FILE
107 | fi
108 |
109 | # If DOSMA executable already exists as a shortcut and updating DOSMA, then remove the path lines
110 | if [[ ! -z `cat ${bash_file} | egrep -i "${DOSMA_SETUP_PATTERN}"` && ${updatePath} -ne 0 ]]; then
111 | echo ""
112 | echo "Overwriting DOSMA executable path"
113 | echo ""
114 |
115 | cp $bash_file $bash_file"-dosma.bak"
116 | tempFile="$HOME/.bash_profile_temp"
117 | egrep -iv "${DOSMA_SETUP_PATTERN}" $bash_file >> $tempFile
118 | cp $tempFile $bash_file
119 | fi
120 |
121 | # Add dosma to path
122 | if [[ -z `cat ${bash_file} | egrep -i "${DOSMA_SETUP_PATTERN}"` ]]; then
123 | cp $bash_file $bash_file"-dosma.bak"
124 |
125 | echo "Adding dosma to path - changes made in $bash_file"
126 | echo "" >> $bash_file
127 | echo "# Added by DOSMA setup" >> $bash_file
128 | echo 'export PATH='$BIN_DIR':$PATH' >> $bash_file
129 | fi
130 |
131 | echo ""
132 | echo ""
133 | echo "DOSMA Usage"
134 | echo "------------"
135 | echo "For command line help menu: 'dosma -h'"
136 | echo "For GUI (user interface): 'dosma --app'"
137 | echo ""
138 | echo "For help with DOSMA, see documetation:"
139 | echo " https://ad12.github.io/DOSMA"
140 | echo ""
141 | echo "Please also complete the DOSMA questionnaire if not previously completed"
142 | echo " ${GOOGLE_FORM_URL}"
143 | echo ""
144 | echo ""
145 | # Launch google form
146 | openURL $GOOGLE_FORM_URL
147 |
--------------------------------------------------------------------------------
/codecov.yml:
--------------------------------------------------------------------------------
1 | comment: false
2 |
3 | ignore:
4 | - dosma/gui/*
5 | - dosma/resources/*
6 |
7 | coverage:
8 | status:
9 | # Enable coverage measurement for diff introduced in the pull-request,
10 | # but do not mark "X" on commit status for now.
11 | patch:
12 | default:
13 | target: '0%'
--------------------------------------------------------------------------------
/docs/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/docs/.nojekyll
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Minimal makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line, and also
5 | # from the environment for the first two.
6 | SPHINXOPTS ?=
7 | SPHINXBUILD ?= sphinx-build
8 | SPHINXPROJ = dosma
9 | SOURCEDIR = source
10 | BUILDDIR = build
11 |
12 | # Put it first so that "make" without argument is like "make help".
13 | help:
14 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
15 |
16 | .PHONY: help Makefile
17 |
18 | # Catch-all target: route all unknown targets to Sphinx using the new
19 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
20 | %: Makefile
21 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
22 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/docs/make.bat:
--------------------------------------------------------------------------------
1 | @ECHO OFF
2 |
3 | pushd %~dp0
4 |
5 | REM Command file for Sphinx documentation
6 |
7 | if "%SPHINXBUILD%" == "" (
8 | set SPHINXBUILD=sphinx-build
9 | )
10 | set SOURCEDIR=source
11 | set BUILDDIR=build
12 |
13 | if "%1" == "" goto help
14 |
15 | %SPHINXBUILD% >NUL 2>NUL
16 | if errorlevel 9009 (
17 | echo.
18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
19 | echo.installed, then set the SPHINXBUILD environment variable to point
20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you
21 | echo.may add the Sphinx directory to PATH.
22 | echo.
23 | echo.If you don't have Sphinx installed, grab it from
24 | echo.http://sphinx-doc.org/
25 | exit /b 1
26 | )
27 |
28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
29 | goto end
30 |
31 | :help
32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
33 |
34 | :end
35 | popd
36 |
--------------------------------------------------------------------------------
/docs/requirements.txt:
--------------------------------------------------------------------------------
1 | h5py<3.0.0
2 | numpy
3 | natsort
4 | nested-lookup
5 | nibabel
6 | nipype
7 | pandas
8 | pydicom>=1.6.0
9 | scikit-image
10 | scipy
11 | seaborn
12 | openpyxl
13 | Pmw
14 | PyYAML
15 | tqdm>=4.42.0
16 | sphinx
17 | sphinx-rtd-theme
18 | recommonmark
19 | sphinx_bootstrap_theme
20 | sphinxcontrib-bibtex>=2.0.0
21 | m2r2
--------------------------------------------------------------------------------
/docs/source/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | sys.path.insert(0, os.path.abspath("../.."))
17 |
18 |
19 | def get_version():
20 | init_py_path = os.path.join(
21 | os.path.abspath(os.path.dirname(__file__)), "../../dosma", "__init__.py"
22 | )
23 | init_py = open(init_py_path, "r").readlines()
24 | version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] # noqa: E741
25 | version = version_line.split("=")[-1].strip().strip("'\"")
26 | return version
27 |
28 |
29 | # -- Project information -----------------------------------------------------
30 |
31 | project = "dosma"
32 | copyright = "2019-2021, Arjun Desai"
33 | author = "Arjun Desai"
34 |
35 | # The full version, including alpha/beta/rc tags
36 | # release = setup.get_version(ignore_nightly=True)
37 | release = get_version()
38 |
39 | # -- General configuration ---------------------------------------------------
40 |
41 | # Add any Sphinx extension module names here, as strings. They can be
42 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
43 | # ones.
44 | extensions = [
45 | "sphinx.ext.autodoc",
46 | "sphinx.ext.autosummary",
47 | "sphinx.ext.intersphinx",
48 | "sphinx.ext.todo",
49 | "sphinx.ext.coverage",
50 | "sphinx.ext.mathjax",
51 | "sphinx.ext.ifconfig",
52 | "sphinx.ext.viewcode",
53 | "sphinx.ext.githubpages",
54 | "sphinx.ext.napoleon",
55 | "sphinxcontrib.bibtex",
56 | "sphinx_rtd_theme",
57 | "sphinx.ext.githubpages",
58 | "m2r2",
59 | ]
60 | autosummary_generate = True
61 | autosummary_imported_members = True
62 |
63 | # Bibtex files
64 | bibtex_bibfiles = ["references.bib"]
65 |
66 | # Add any paths that contain templates here, relative to this directory.
67 | templates_path = ["_templates"]
68 |
69 | # List of patterns, relative to source directory, that match files and
70 | # directories to ignore when looking for source files.
71 | # This pattern also affects html_static_path and html_extra_path.
72 | exclude_patterns = []
73 |
74 | # The name of the Pygments (syntax highlighting) style to use.
75 | pygments_style = "sphinx"
76 |
77 | # -- Options for HTML output -------------------------------------------------
78 |
79 | # The theme to use for HTML and HTML Help pages. See the documentation for
80 | # a list of builtin themes.
81 | #
82 | # html_theme = 'alabaster'
83 | # html_theme = 'bootstrap'
84 | # html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
85 | html_theme = "sphinx_rtd_theme"
86 |
87 | # -- Options for HTMLHelp output ---------------------------------------------
88 |
89 | # Output file base name for HTML help builder.
90 | htmlhelp_basename = "DOSMAdoc"
91 |
92 | # Add any paths that contain custom static files (such as style sheets) here,
93 | # relative to this directory. They are copied after the builtin static files,
94 | # so a file named "default.css" will overwrite the builtin "default.css".
95 | html_static_path = ["_static"]
96 |
97 | # Intersphinx mappings
98 | intersphinx_mapping = {"numpy": ("https://numpy.org/doc/stable/", None)}
99 |
100 | # Theme options are theme-specific and customize the look and feel of a theme
101 | # further. For a list of options available for each theme, see the
102 | # documentation.
103 | #
104 | html_theme_options = {"navigation_depth": 2}
105 |
106 | # Source Files
107 | source_suffix = [".rst", ".md"]
108 |
109 | # Documentation to include
110 | todo_include_todos = True
111 | napoleon_use_ivar = True
112 | napoleon_google_docstring = True
113 | html_show_sourcelink = False
114 |
--------------------------------------------------------------------------------
/docs/source/core_api.rst:
--------------------------------------------------------------------------------
1 | .. _core_api:
2 |
3 | Core API (dosma.core)
4 | ================================================================================
5 |
6 | MedicalVolume
7 | ---------------------------
8 | .. _core_api_medicalvolume:
9 |
10 | .. autosummary::
11 | :toctree: generated
12 | :nosignatures:
13 |
14 | dosma.MedicalVolume
15 |
16 |
17 | Numpy Routines
18 | ---------------------------
19 | .. _core_api_numpy_routines:
20 |
21 | Numpy operations that are supported on MedicalVolumes.
22 |
23 | .. autosummary::
24 | :toctree: generated
25 | :nosignatures:
26 |
27 | dosma.core.numpy_routines.all_np
28 | dosma.core.numpy_routines.amax
29 | dosma.core.numpy_routines.amin
30 | dosma.core.numpy_routines.any_np
31 | dosma.core.numpy_routines.argmax
32 | dosma.core.numpy_routines.argmin
33 | dosma.core.numpy_routines.around
34 | dosma.core.numpy_routines.clip
35 | dosma.core.numpy_routines.concatenate
36 | dosma.core.numpy_routines.expand_dims
37 | dosma.core.numpy_routines.may_share_memory
38 | dosma.core.numpy_routines.mean_np
39 | dosma.core.numpy_routines.nan_to_num
40 | dosma.core.numpy_routines.nanargmax
41 | dosma.core.numpy_routines.nanargmin
42 | dosma.core.numpy_routines.nanmax
43 | dosma.core.numpy_routines.nanmean
44 | dosma.core.numpy_routines.nanmin
45 | dosma.core.numpy_routines.nanstd
46 | dosma.core.numpy_routines.nansum
47 | dosma.core.numpy_routines.ones_like
48 | dosma.core.numpy_routines.pad
49 | dosma.core.numpy_routines.shares_memory
50 | dosma.core.numpy_routines.squeeze
51 | dosma.core.numpy_routines.stack
52 | dosma.core.numpy_routines.std
53 | dosma.core.numpy_routines.sum_np
54 | dosma.core.numpy_routines.where
55 | dosma.core.numpy_routines.zeros_like
56 |
57 | Standard universal functions that act element-wise on the array are also supported.
58 | A (incomplete) list is shown below:
59 |
60 | .. list-table::
61 | :widths: 20 20 20 20 20
62 | :header-rows: 0
63 |
64 | * - numpy.power
65 | - numpy.sign
66 | - numpy.remainder
67 | - numpy.mod
68 | - numpy.abs
69 | * - numpy.log
70 | - numpy.exp
71 | - numpy.sqrt
72 | - numpy.square
73 | - numpy.reciprocal
74 | * - numpy.sin
75 | - numpy.cos
76 | - numpy.tan
77 | - numpy.bitwise_and
78 | - numpy.bitwise_or
79 | * - numpy.isfinite
80 | - numpy.isinf
81 | - numpy.isnan
82 | - numpy.floor
83 | - numpy.ceil
84 |
85 |
86 | Image I/O
87 | ---------------------------
88 | .. autosummary::
89 | :toctree: generated
90 | :nosignatures:
91 |
92 | dosma.read
93 | dosma.write
94 | dosma.NiftiReader
95 | dosma.NiftiWriter
96 | dosma.DicomReader
97 | dosma.DicomWriter
98 |
99 |
100 | Image Orientation
101 | ---------------------------
102 | .. automodule::
103 | dosma.core.orientation
104 |
105 | .. autosummary::
106 | :toctree: generated
107 | :nosignatures:
108 |
109 | dosma.core.orientation.to_affine
110 | dosma.core.orientation.get_transpose_inds
111 | dosma.core.orientation.get_flip_inds
112 | dosma.core.orientation.orientation_nib_to_standard
113 | dosma.core.orientation.orientation_standard_to_nib
114 |
115 |
116 | Image Registration
117 | ---------------------------
118 | For details on using registration, see the :ref:`Registration Guide `.
119 |
120 | .. automodule::
121 | dosma.core.registration
122 |
123 | .. autosummary::
124 | :toctree: generated
125 | :nosignatures:
126 |
127 | dosma.register
128 | dosma.apply_warp
129 | dosma.symlink_elastix
130 | dosma.unlink_elastix
131 |
132 |
133 | Fitting
134 | ---------------------------
135 | For details on using fitting functions, see the :ref:`Fitting Guide `.
136 |
137 | .. automodule::
138 | dosma.core.fitting
139 |
140 | General fitting functions:
141 |
142 | .. autosummary::
143 | :toctree: generated
144 | :nosignatures:
145 |
146 | dosma.curve_fit
147 | dosma.polyfit
148 | dosma.core.fitting.monoexponential
149 | dosma.core.fitting.biexponential
150 |
151 | Fitter classes:
152 |
153 | .. autosummary::
154 | :toctree: generated
155 | :nosignatures:
156 |
157 | dosma.CurveFitter
158 | dosma.PolyFitter
159 | dosma.MonoExponentialFit
160 |
161 |
162 | Device
163 | ----------
164 | .. autosummary::
165 | :toctree: generated
166 | :nosignatures:
167 |
168 | dosma.Device
169 | dosma.get_device
170 | dosma.to_device
171 |
172 |
173 | Preferences
174 | -------------
175 | .. autosummary::
176 | :toctree: generated
177 | :nosignatures:
178 |
179 | dosma.defaults._Preferences
180 | dosma.preferences
181 |
182 |
183 | (BETA) Quantitative Values
184 | ---------------------------
185 | Utilities for different quantitative parameters.
186 | Note, this feature is in beta and will likely change in future releases.
187 |
188 | .. autosummary::
189 | :toctree: generated
190 | :nosignatures:
191 |
192 | dosma.core.quant_vals.QuantitativeValue
193 | dosma.core.quant_vals.T1Rho
194 | dosma.core.quant_vals.T2
195 | dosma.core.quant_vals.T2Star
196 |
--------------------------------------------------------------------------------
/docs/source/documentation.rst:
--------------------------------------------------------------------------------
1 | .. _documentation:
2 |
3 | Documentation
4 | ================================================================================
5 | This page discusses the core API and built-in extensions for DOSMA.
6 |
7 | To learn more about what scans and tissues are supported, see the pages below:
8 |
9 | .. toctree::
10 | :maxdepth: 1
11 | :titlesonly:
12 |
13 | core_api
14 | scans
15 | tissues
16 | models
17 | utils_api
18 |
19 | For a brief overview of DOSMA command-line practices and examples, see :ref:`Command Line Usage `.
20 |
21 | To inspect the source code and corresponding documentation, use the following:
22 |
23 | * :ref:`genindex`
24 | * :ref:`modindex`
25 | * :ref:`search`
26 |
--------------------------------------------------------------------------------
/docs/source/faq.rst:
--------------------------------------------------------------------------------
1 | .. _faq:
2 |
3 | FAQs
4 | ================================================================================
5 |
6 | :ref:`How do I change my preferences? `
7 |
8 | :ref:`How do I cite DOSMA? `
9 |
10 | |
11 |
12 | .. _faq-preferences:
13 |
14 | **How do I change my preferences?**
15 |
16 | Different users have different preferences for visualizations, data formats, etc.
17 | You can set your own preferences by launching the GUI (``dosma --app``) and pressing the ``Preferences`` button on the home
18 | page and change your settings.
19 |
20 | To save settings for all future computation sessions, click ``Save Settings``.
21 | To apply settings just to this session, click ``Apply Settings``.
22 |
23 | .. _faq-citation:
24 |
25 | **How do I cite DOSMA?**
26 |
27 | ::
28 |
29 | @inproceedings{desai2019dosma,
30 | Title={DOSMA: A deep-learning, open-source framework for musculoskeletal MRI analysis.},
31 | Author = {Desai, Arjun D and Barbieri, Marco and Mazzoli, Valentina and Rubin, Elka and Black, Marianne S and Watkins, Lauren E and Gold, Garry E and Hargreaves, Brian A and Chaudhari, Akshay S},
32 | Booktitle={Proc. Intl. Soc. Mag. Reson. Med},
33 | Volume={27},
34 | Number={1106},
35 | Year={2019}
36 | }
37 |
38 | .. Additional citation details can be found [here](https://zenodo.org/record/2559549#.XFyRrs9KjyJ).
39 |
--------------------------------------------------------------------------------
/docs/source/figures/unrolled_fc.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/docs/source/figures/unrolled_fc.png
--------------------------------------------------------------------------------
/docs/source/figures/workflow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/docs/source/figures/workflow.png
--------------------------------------------------------------------------------
/docs/source/guide_fitting.rst:
--------------------------------------------------------------------------------
1 | .. _guide_fitting:
2 |
3 | **This guide is still under construction**
4 |
5 | Fitting
6 | -----------
7 |
8 | Dosma supports cpu-parallelizable quantitative fitting based on
9 | `scipy.optimize.curve_fit `_.
10 |
11 | To perform generic fitting to any array-like object using an arbitrary model function, we can use
12 | :func:`dosma.curve_fit`. For example, we can use this function to fit an array to a
13 | monoexponential model ``y = a * exp(b * x)`` using a maximum of 4 workers:
14 |
15 | >>> from dosma import curve_fit, monoexponential
16 | >>> curve_fit(monoexponential, x, y, num_workers=4)
17 |
18 | Quantitative fitting is quite common in medical image analysis. For example,
19 | quantitative MRI (qMRI) has enabled computing voxel-wise relaxation parameter maps
20 | (e.g. |T2|, |T1rho|, etc.). We can fit a monoexponential model for each voxel across these registered_images,
21 | where ``tc0`` is the initial guess for parameter :math:`-\frac{1}{b}` in the monoexponential model:
22 |
23 | >>> from dosma import MonoExponentialFit
24 | >>> tc0 = 30.0
25 | >>> echo_times = np.asarray([10.0, 20.0, 50.0])
26 | >>> fitter = MonoExponentialFit(tc0=tc0, num_workers=4)
27 | >>> tc, r2_map = fitter.fit(echo_times, images)
28 |
29 | If you don't have a good initial guess for ``tc0`` or expect the initial guess to be dependent on the voxel being fit
30 | (which is often the case), you can specify that the initial guess should be determined based on results from a
31 | polynomial fit over the log-linearized form of the monoexponential equation ``log(y) = log(a) - x/tc``:
32 |
33 | >>> from dosma import MonoExponentialFit
34 | >>> tc0 = "polyfit"
35 | >>> echo_times = np.asarray([10.0, 20.0, 50.0])
36 | >>> fitter = MonoExponentialFit(tc0=tc0, num_workers=4)
37 | >>> tc, r2_map = fitter.fit(echo_times, images)
38 |
39 | Custom model functions can also be provided and used with :class:`dosma.curve_fit` and :class:`dosma.CurveFitter` (recommended),
40 | a class wrapper around :class:`dosma.curve_fit` that handles :class:`MedicalVolume` data and supports additional post-processing
41 | on the fitted parameters. The commands below using :class:`dosma.CurveFitter` and :class:`dosma.curve_fit` are equivalent to the
42 | ``fitter`` above:
43 |
44 | >>> from dosma import CurveFitter
45 | >>> cfitter = CurveFitter(
46 | ... monoexponential, p0=(1.0, -1/tc0), num_workers=4, nan_to_num=0,
47 | ... out_ufuncs=[None, lambda x: -1/x], out_bounds=(0, 100))
48 | >>> popt, r2_map = cfitter.fit(echo_times, images)
49 | >>> tc = popt[..., 1]
50 |
51 | >>> from dosma import curve_fit
52 | >>> curve_fit(monoexponential, echo_times, [x.volume for x in images], p0=(1.0, -1/tc0), num_workers=4)
53 |
54 | Non-linear curve fitting often requires carefully selected parameter initialization. In cases where
55 | non-linear curve fitting fails, polynomial fitting may be more effective. Polynomials can be fit to
56 | the data using :func:`dosma.polyfit` or :class:`dosma.PolyFitter` (recommended),
57 | which is the polynomial fitting equivalent of ``CurveFitter``. Because polynomial fitting can also be
58 | done as a single least squares problem, it may also often be faster than standard curve fitting.
59 | The commands below use ``PolyFitter`` to fit to the log-linearized monoexponential fit
60 | (i.e. ``log(y) = log(a) + b*x`` to some image data:
61 |
62 | >>> from dosma import PolyFitter
63 | >>> echo_times = np.asarray([10.0, 20.0, 50.0])
64 | >>> pfitter = PolyFitter(deg=1, nan_to_num=0, out_ufuncs=[None, lambda x: -1/x], out_bounds=(0, 100))
65 | >>> log_images = [np.log(img) for img in images]
66 | >>> popt, r2_map = pfitter.fit(echo_times, log_images)
67 | >>> tc = popt[..., 0] # note ordering of parameters - see numpy.polyfit for more details.
68 |
69 | We can also use the polyfit estimates to initialize the non-linear curve fit. For monoexponential
70 | fitting, we can do the following:
71 |
72 | >>> from dosma import CurveFitter, PolyFitter
73 | >>> echo_times = np.asarray([10.0, 20.0, 50.0])
74 | >>> pfitter = PolyFitter(deg=1, r2_threshold=0, num_workers=0)
75 | >>> log_images = [np.log(img) for img in images]
76 | >>> popt_pf, _ = pfitter.fit(echo_times, log_images)
77 | >>> cfitter = CurveFitter(monoexponential, r2_threshold=0.9, nan_to_num=0, out_ufuncs=[None, lambda x: -1/x], out_bounds=(0, 100))
78 | >>> popt, r2 = cfitter.fit(echo_times, images, p0={"a": popt_pf[..., 1], "b": popt_pf[..., 0]})
79 | >>> tc = popt[..., 1]
80 |
81 | .. Substitutions
82 | .. |T2| replace:: T\ :sub:`2`
83 | .. |T1| replace:: T\ :sub:`1`
84 | .. |T1rho| replace:: T\ :sub:`1`:math:`{\rho}`
85 | .. |T2star| replace:: T\ :sub:`2`:sup:`*`
86 |
--------------------------------------------------------------------------------
/docs/source/guide_registration.rst:
--------------------------------------------------------------------------------
1 | .. _guide_registration:
2 |
3 | **This guide is still under construction**
4 |
5 | Image Registration
6 | ------------------
7 |
8 | Dosma supports image registration using the Elastix and Transformix by creating a
9 | wrapper around the standard command-line usage. In addition to multi-threading, Dosma
10 | supports true parallel execution when registering multiple volumes to a target.
11 |
12 | Elastix/Transformix must be installed and configured on your machine. See
13 | :ref:`the setup guide ` for more information
14 |
15 | To register moving image(s) to a target image, we can use :class:`dosma.register`:
16 |
17 | >>> from dosma import register
18 | >>> out = register(target, moving, "/path/to/elastix/file", "/path/to/save", return_volumes=True)
19 | >>> registered_images = out["volumes"]
20 |
21 | To use multiple workers, we can pass the ``num_workers`` argument. Note that ``num_workers``
22 | parallelizes registration when there are multiple moving images. The true number of parallel
23 | processes are equivalent to ``min(num_workers, len(moving))``. To increase the number of threads
24 | used per, use ``num_threads``.
25 |
26 | To transform moving image(s) using a transformation file, we can use :class:`dosma.apply_warp`:
27 |
28 | >>> from dosma import apply_warp
29 | >>> transformed_image = apply_warp(image, transform="/path/to/transformation/file")
30 |
31 | Often we may want to copy the final transformation file produced during registration to transform
32 | other volumes:
33 |
34 | >>> reg_out = register(target, moving, "/path/to/elastix/file", "/path/to/save", return_volumes=True)
35 | >>> warp_out = apply_warp(other_moving, transform=out_reg["outputs"].transform)
36 |
--------------------------------------------------------------------------------
/docs/source/guide_tutorials.rst:
--------------------------------------------------------------------------------
1 | .. _guide_registration:
2 |
3 | **This guide is still under construction**
4 |
5 | Tutorials
6 | ------------------
7 |
8 | Below is a running list of tutorials and reproducible workflows built using DOSMA.
9 |
10 | - `DOSMA Basics `_
11 | - `Quantitative Knee MRI Workflows `_
12 |
--------------------------------------------------------------------------------
/docs/source/index.rst:
--------------------------------------------------------------------------------
1 | .. DOSMA documentation master file, created by
2 | sphinx-quickstart on Sat Dec 28 14:56:23 2019.
3 | You can adapt this file completely to your liking, but it should at least
4 | contain the root `toctree` directive.
5 |
6 | .. mdinclude:: ../../README.md
7 |
8 | .. toctree::
9 | :maxdepth: 2
10 | :hidden:
11 |
12 | introduction
13 | installation
14 | user_guide
15 | documentation
16 | usage
17 | faq
--------------------------------------------------------------------------------
/docs/source/installation.rst:
--------------------------------------------------------------------------------
1 | .. _installation:
2 |
3 | Installation
4 | ================================================================================
5 |
6 | This page provides a step-by-step overview of creating a virtual environment,
7 | installing DOSMA via `pip`, and verifying the install.
8 |
9 |
10 | Anaconda
11 | --------------------------------------------------------------------------------
12 | Please install the `Anaconda `_ virtual environment manager.
13 |
14 |
15 | .. _install-setup:
16 |
17 | Setup
18 | --------------------------------------------------------------------------------
19 | The following steps will create an Anaconda environment (``dosma_env``).
20 |
21 | 1. Open a Terminal/Shell window
22 | 2. Create the `dosma_env` environment::
23 |
24 | $ conda create -n dosma_env python=3.7
25 |
26 | 3. Install dosma via pip::
27 |
28 | $ pip install dosma
29 |
30 | 4. Complete the `DOSMA questionnaire `_.
31 |
32 | If you want to update your dosma version, run ``pip install --upgrade dosma``.
33 |
34 |
35 | Segmentation
36 | ############
37 | DOSMA currently supports automatic deep learning segmentation methods. These methods use pre-trained weights for
38 | segmenting tissues in specific scans. Currently, segmentation for quantitative double echo in steady state (qDESS) scans
39 | is supported for knee articular cartilage and meniscus.
40 |
41 | If you will be using this functionality, please follow the instructions below.
42 |
43 | 1. Request access using this `Google form `_
44 | *and* email arjundd (at)
45 |
46 | 2. Save these weights in an accessible location. **Do not rename these files**.
47 |
48 | We understand this process may be involved and are actively working on more effective methods to distribute these
49 | weights.
50 |
51 | .. _install-setup-registration:
52 |
53 | Registration
54 | ############
55 | Registration between scans in DOSMA is supported through Elastix and Transformix. If you plan on using the registration,
56 | follow the instructions below:
57 |
58 | 1. Download `elastix `_
59 | 2. Follow instructions on adding elastix/transformix to your system path
60 |
61 | On Ubuntu 18.04 Elastix version 5.0.1 does not work properly. Elastix 4.9.0 is recommended.
62 |
63 | If you are using a MacOS system, you may run into path issues with elastix (see
64 | `this discussion `_). To fix
65 | this, we can use the `dosma.symlink_elastix` to create
66 | appropriate symbolic links to files causing issues:
67 |
68 | $ conda activate dosma_env; python
69 | >>> from dosma import symlink_elastix
70 | >>> symlink_elastix()
71 |
72 | Note you will need to run this every time you update elastix/transformix paths
73 | on your machine.
74 |
75 | .. _install-verification:
76 |
77 | Verification
78 | --------------------------------------------------------------------------------
79 | 1. Open new Terminal window.
80 | 2. Activate DOSMA Anaconda environment::
81 |
82 | $ conda activate dosma_env
83 |
84 | 3. Run DOSMA from the command-line (cli). You should see a help menu output::
85 |
86 | $ python -m dosma.cli --help
87 |
88 | 4. Run DOSMA as an UI application (app). You should see a UI window pop-up::
89 |
90 | $ python -m dosma.app
91 |
92 |
93 | Updating DOSMA
94 | --------------------------------------------------------------------------------
95 | If you have used an earlier stand-alone of DOSMA (v0.0.11 or earlier), you may
96 | already have a ``dosma_env`` virtual environment. Please delete this environment
97 | and reinstall follows steps in setup :ref:`Setup `.
98 |
99 | For those (v0.0.12 or later) having installed dosma via ``pip``, you can update
100 | dosma using::
101 |
102 | $ pip install --upgrade dosma
103 |
--------------------------------------------------------------------------------
/docs/source/introduction.rst:
--------------------------------------------------------------------------------
1 | .. _introduction:
2 |
3 | **This guide is still under construction**
4 |
5 | Introduction
6 | ================================================================================
7 | DOSMA is an open-source Python library and application for medical image analysis.
8 |
9 | DOSMA is designed to streamline medical image analysis by standardizing medical image
10 | I/O, simplifying array-like operations on medical images, and deploying state-of-the-art
11 | image analysis algorithms. Because DOSMA is a framework, it is built to be flexible enough
12 | to write analysis protocols that can be run for different imaging modalities and scan sequences.
13 |
14 | For example, we can build the analysis workflow for a combination
15 | of quantitative DESS, CubeQuant (3D fast spin echo), and ultra-short echo time Cones scans for multiple patients
16 | (shown below) can be done in 7 lines of code:
17 |
18 | .. figure:: figures/workflow.png
19 | :align: center
20 | :alt: Example workflow for analyzing multiple scans per patient
21 | :figclass: align-center
22 |
23 | Example quantitative knee MRI workflow for analyzing 1. quantitative DESS (qDESS),
24 | a |T2|-weighted sequence, 2. CubeQuant, a |T1rho|-weighted sequence, and 3. ultra-short echo
25 | time (UTE) Cones, a |T2star| weighted sequence.
26 |
27 | Workflow
28 | --------------------------------------------------------------------------------
29 | DOSMA uses various modules to handle MSK analysis for multiple scan types and tissues:
30 |
31 | - **Scan** modules declare scan-specific actions (fitting, segmentation, registration, etc).
32 | - **Tissue** modules handle visualization and analysis optimized for different tissues.
33 | - **Analysis** modules abstract different methods for performing different actions (different segmentation methods, fitting methods, etc.)
34 |
35 | **Note**: DOSMA is still in beta, and APIs are subject to change.
36 |
37 | Features
38 | --------------------------------------------------------------------------------
39 |
40 | Dynamic Input/Output (I/O)
41 | ^^^^^^^^^^^^^^^^^^^^^^^^^^
42 | Reading and writing medical images relies on standardized data formats.
43 | The Digital Imaging and Communications in Medicine (DICOM) format has been the international
44 | standard for medical image I/O. However, header information is memory intensive and
45 | and may not be useful in cases where only volume information is desired.
46 |
47 | The Neuroimaging Informatics Technology Initiative (NIfTI) format is useful in these cases.
48 | It stores only volume-specific header information (rotation, position, resolution, etc.) with
49 | the volume.
50 |
51 | DOSMA supports the use of both formats. However, because NIfTI headers do not contain relevant scan
52 | information, it is not possible to perform quantitative analysis that require this information.
53 | Therefore, we recommend using DICOM inputs, which is the standard output of acquisition systems,
54 | when starting processing with DOSMA.
55 |
56 | By default, volumes (segmentations, quantitative maps, etc.) are written in the NIfTI format.
57 | The default output file format can be changed in the :ref:`preferences `.
58 |
59 | Array-Like Medical Images
60 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
61 | Medical images are spatially-aware pixel arrays with metadata. DOSMA supports array-like
62 | operations (arithmetic, slicing, etc.) on medical images while preserving spatial attributes and
63 | accompanying metadata with the :class:`MedicalVolume` data structure. It also supports intelligent
64 | reformatting, fast low-level computations, and native GPU support.
65 |
66 |
67 | Disclaimers
68 | --------------------------------------------------------------------------------
69 |
70 | Using Deep Learning
71 | ^^^^^^^^^^^^^^^^^^^
72 | All weights/parameters trained for any task are likely to be most closely correlated to data used for training.
73 | If scans from a particular sequence were used for training, the performance of those weights are likely optimized
74 | for that specific scan prescription (resolution, TR/TE, etc.). As a result, they may not perform as well on segmenting images
75 | acquired using different scan types.
76 |
77 | If you do train weights for any deep learning task that you would want to include as part of this repo, please provide
78 | a link to those weights and detail the scanning parameters/sequence used to acquire those images.
79 |
80 | .. Substitutions
81 | .. |T2| replace:: T\ :sub:`2`
82 | .. |T1| replace:: T\ :sub:`1`
83 | .. |T1rho| replace:: T\ :sub:`1`:math:`{\rho}`
84 | .. |T2star| replace:: T\ :sub:`2`:sup:`*`
85 |
--------------------------------------------------------------------------------
/docs/source/models.rst:
--------------------------------------------------------------------------------
1 | .. _seg_models:
2 |
3 | Models (dosma.models)
4 | ================================================================================
5 | DOSMA currently supports pre-trained deep learning models for segmenting, each described in detail below.
6 | Model aliases are string fields used to distinguish/specify particular models in DOSMA (command-line
7 | argument :code:`--model`).
8 |
9 | All models are open-sourced under the GNU General Public License v3.0 license.
10 | If you use these models, please reference both DOSMA and the original work.
11 |
12 | .. automodule::
13 | dosma.models
14 |
15 | .. autosummary::
16 | :toctree: generated
17 | :nosignatures:
18 |
19 | dosma.models.OAIUnet2D
20 | dosma.models.IWOAIOAIUnet2D
21 | dosma.models.IWOAIOAIUnet2DNormalized
22 | dosma.models.StanfordQDessUNet2D
23 |
24 |
25 | OAI 2D U-Net
26 | --------------------------------------------------------------------------------
27 | A 2D U-Net trained on a downsampled rendition of the OAI iMorphics DESS dataset :cite:`chaudhari2018open`.
28 | Inputs are zero-mean, unit standard deviation normalized before segmentation.
29 |
30 | Aliases: :code:`oai-unet2d`, :code:`oai_unet2d`
31 |
32 |
33 | IWOAI Segmentation Challenge - Team 6 2D U-Net
34 | --------------------------------------------------------------------------------
35 | This model was submitted by Team 6 to the 2019 International Workshop on Osteoarthritis Segmentation
36 | :cite:`desai2020international`.
37 | It consists of a 2D U-Net trained on the standardized OAI training dataset.
38 |
39 | Note, inputs are not normalized before segmentation and therefore may be difficult to generalize to
40 | DESS scans with different parameters than the OAI.
41 |
42 | Aliases: :code:`iwoai-2019-t6`
43 |
44 |
45 | IWOAI Segmentation Challenge - Team 6 2D U-Net (Normalized)
46 | --------------------------------------------------------------------------------
47 | This model is a duplicate of the `iwoai-2019-t6` network (above), but differs in that it uses
48 | zero-mean, unit standard deviation normalized inputs. This may make the network more robust to
49 | different DESS scan parameters and/or scanner vendors.
50 |
51 | While this model was not submitted to the IWOAI challenge, the architecture, training parameters, and dataset are
52 | identical to the Team 6 submission. Performance on the standardized OAI test set was similar to the original network
53 | submitted by Team 6 (see table below).
54 |
55 | Aliases: :code:`iwoai-2019-t6-normalized`
56 |
57 | .. table:: Average (standard deviation) performance summary on OAI test set.
58 | Coefficient of variation is calculated as root-mean-square value.
59 |
60 | ========= =================== ================== ==================== ===============
61 | .. Femoral Cartilage Tibial Cartilage Patellar Cartilage Meniscus
62 | ========= =================== ================== ==================== ===============
63 | Dice 0.906 +/- 0.014 0.881 +/- 0.033 0.857 +/- 0.080 0.870 +/- 0.032
64 | VOE 0.171 +/- 0.023 0.211 +/- 0.052 0.242 +/- 0.108 0.229 +/- 0.049
65 | RMS-CV 0.019 +/- 0.011 0.048 +/- 0.029 0.076 +/- 0.061 0.045 +/- 0.025
66 | ASSD (mm) 0.174 +/- 0.020 0.270 +/- 0.166 0.243 +/- 0.106 0.344 +/- 0.111
67 | ========= =================== ================== ==================== ===============
68 |
69 |
70 | SKM-TEA qDESS Knee Segmentation - 2D U-net
71 | --------------------------------------------------------------------------------
72 | This collection of models are trained on the `SKM-TEA dataset `_
73 | (previously known as the *2021 Stanford qDESS Knee Dataset*).
74 | Details of the different models that are trained are shown in the training configurations
75 | distributed with the weights.
76 |
77 |
78 | * ``qDESS_2021_v1-rms-unet2d-pc_fc_tc_men_weights.h5``: This is the baseline
79 | RSS model trained on the SKM-TEA v1 dataset.
80 | Though the same hyperparameters were used, this model (trained with Tensorflow/Keras)
81 | performs better than the PyTorch implementation specified in the main paper.
82 | Results are shown in the table below.
83 | * ``qDESS_2021_v0_0_1-rms-pc_fc_tc_men_weights.h5``: This model is trained on the
84 | 2021 Stanford qDESS knee dataset (v0.0.1).
85 | * ``qDESS_2021_v0_0_1-traintest-rms-pc_fc_tc_men_weights.h5``: This model
86 | is trained on both the train and test set of the 2021 Stanford qDESS knee
87 | dataset (v0.0.1).
88 |
89 | Aliases: :code:`stanford-qdess-2021-unet2d`, :code:`skm-tea-unet2d`
90 |
91 |
92 | .. table:: Mean +/- standard deviation performance summary on SKM-TEA v1 dataset.
93 |
94 | ========= =================== ================== ==================== ===============
95 | .. Femoral Cartilage Tibial Cartilage Patellar Cartilage Meniscus
96 | ========= =================== ================== ==================== ===============
97 | Dice 0.882 +/- 0.033 0.865 +/- 0.035 0.879 +/- 0.103 0.847 +/- 0.068
98 | VOE 0.210 +/- 0.052 0.237 +/- 0.053 0.205 +/- 0.121 0.261 +/- 0.092
99 | CV 0.051 +/- 0.033 0.053 +/- 0.037 0.049 +/- 0.077 0.052 +/- 0.052
100 | ASSD (mm) 0.265 +/- 0.114 0.354 +/- 0.250 0.477 +/- 0.720 0.485 +/- 0.307
101 | ========= =================== ================== ==================== ===============
--------------------------------------------------------------------------------
/docs/source/references.bib:
--------------------------------------------------------------------------------
1 | @article{jordan2014variability,
2 | title={Variability of CubeQuant T1$\rho$, quantitative DESS T2, and cones sodium MRI in knee cartilage},
3 | author={Jordan, Caroline D and McWalter, EJ and Monu, UD and Watkins, Ronald D and Chen, Weitian and Bangerter, Neal K and Hargreaves, Brian A and Gold, Garry E},
4 | journal={Osteoarthritis and cartilage},
5 | volume={22},
6 | number={10},
7 | pages={1559--1567},
8 | year={2014},
9 | publisher={Elsevier}
10 | }
11 |
12 | @article{qian2010multicomponent,
13 | title={Multicomponent T2* mapping of knee cartilage: technical feasibility ex vivo},
14 | author={Qian, Yongxian and Williams, Ashley A and Chu, Constance R and Boada, Fernando E},
15 | journal={Magnetic resonance in medicine},
16 | volume={64},
17 | number={5},
18 | pages={1426--1431},
19 | year={2010},
20 | publisher={Wiley Online Library}
21 | }
22 |
23 | @article{li2008vivo,
24 | title={In vivo T1$\rho$ mapping in cartilage using 3D magnetization-prepared angle-modulated partitioned k-space spoiled gradient echo snapshots (3D MAPSS)},
25 | author={Li, Xiaojuan and Han, Eric T and Busse, Reed F and Majumdar, Sharmila},
26 | journal={Magnetic Resonance in Medicine: An Official Journal of the International Society for Magnetic Resonance in Medicine},
27 | volume={59},
28 | number={2},
29 | pages={298--307},
30 | year={2008},
31 | publisher={Wiley Online Library}
32 | }
33 |
34 | @article{sveinsson2017simple,
35 | title={A simple analytic method for estimating T2 in the knee from DESS},
36 | author={Sveinsson, B and Chaudhari, AS and Gold, GE and Hargreaves, BA},
37 | journal={Magnetic resonance imaging},
38 | volume={38},
39 | pages={63--70},
40 | year={2017},
41 | publisher={Elsevier}
42 | }
43 |
44 | @inproceedings{desai2019dosma,
45 | Title={DOSMA: A deep-learning, open-source framework for musculoskeletal MRI analysis.},
46 | Author = {Desai, Arjun D and Barbieri, Marco and Mazzoli, Valentina and Rubin, Elka and Black, Marianne S and Watkins, Lauren E and Gold, Garry E and Hargreaves, Brian A and Chaudhari, Akshay S},
47 | Booktitle={Proc. Intl. Soc. Mag. Reson. Med},
48 | Volume={27},
49 | Number={1106},
50 | Year={2019}
51 | }
52 |
53 | @article{monu2017cluster,
54 | title={Cluster analysis of quantitative MRI T2 and T1$\rho$ relaxation times of cartilage identifies differences between healthy and ACL-injured individuals at 3T},
55 | author={Monu, Uchechukwuka D and Jordan, Caroline D and Samuelson, Bonnie L and Hargreaves, Brian A and Gold, Garry E and McWalter, Emily J},
56 | journal={Osteoarthritis and cartilage},
57 | volume={25},
58 | number={4},
59 | pages={513--520},
60 | year={2017},
61 | publisher={Elsevier}
62 | }
63 |
64 | @inproceedings{chaudhari2018open,
65 | Title={Open-Sourced Deep-Learning for Cartilage and Meniscus Segmentation},
66 | Author = {Chaudhari, Akshay and Fang, and Lee, and Hargreaves, Brian},
67 | Booktitle={Proc. Intl. Work. Ost. Art. Img},
68 | Year={2018}
69 | }
70 |
71 | @article{desai2020international,
72 | title={The International Workshop on Osteoarthritis Imaging Knee MRI Segmentation Challenge: A Multi-Institute Evaluation and Analysis Framework on a Standardized Dataset},
73 | author={Desai, Arjun D and Caliva, Francesco and Iriondo, Claudia and Khosravan, Naji and Mortazi, Aliasghar and
74 | Jambawalikar, Sachin and Torigian, Drew and Ellerman, Jutta and Akcakaya, Mehmet and Bagci, Ulas and
75 | Tibrewala, Radhika and Flament, Io and O'Brien, Matthew and Majumdar, Sharmila and Perslev, Mathias and Pai, Akshay
76 | and Igel, Christian and Dam, Erik B and Gaj, Sibaji and Yang, Mingrui and Nakamura, Kunio and Li, Xiaojuan and
77 | Deniz, Cem M and Juras, Vladimir and Regatte, Ravinder and Gold, Garry E and Hargreaves, Brian A and
78 | Pedoia, Valentina and Chaudhari, Akshay S},
79 | journal={arXiv preprint arXiv:2004.14003},
80 | year={2020}
81 | }
--------------------------------------------------------------------------------
/docs/source/scans.rst:
--------------------------------------------------------------------------------
1 | .. _scans:
2 |
3 | Scans (dosma.scan_sequences)
4 | ================================================================================
5 | DOSMA currently supports 4 MRI scan types:
6 |
7 | 1. Quantitative DESS (qDESS)
8 | 2. CubeQuant
9 | 3. MAPSS
10 | 4. UTE Cones
11 |
12 | Each scan implements a subset of the following actions:
13 |
14 | 1. **Segment** tissues from scan
15 | 2. **Interregister** to register between lower resolution (moving) and higher resolution (target) scans
16 | 3. **Quantitative fitting** for voxel-wise parameter maps.
17 |
18 | .. automodule::
19 | dosma.scan_sequences
20 |
21 | .. autosummary::
22 | :toctree: generated
23 |
24 | dosma.scan_sequences.ScanSequence
25 | dosma.scan_sequences.QDess
26 | dosma.scan_sequences.CubeQuant
27 | dosma.scan_sequences.Mapss
28 | dosma.scan_sequences.Cones
29 |
30 | Below we briefly discuss the different scan types and associated actions.
31 |
32 | qDESS
33 | --------------------------------------------------------------------------------
34 | Quantitative double echo in steady state (qDESS) is a high-resolution scan that has shown high efficacy for analytic
35 | |T2| mapping :cite:`sveinsson2017simple`. Because of its high resolution, qDESS scans have been shown to be good candidates for automatic
36 | segmentation.
37 |
38 | DOSMA supports both automatic segmentation and analytical |T2| solving for qDESS scans. Automated segmentation uses
39 | pre-trained convolutional neural networks (CNNs).
40 |
41 |
42 | CubeQuant (3D FSE)
43 | --------------------------------------------------------------------------------
44 | Cubequant is a 3D fast-spin-echo (FSE) |T1rho|-weighted sequence. Acquisitions between spin-locks are
45 | susceptible to motion, and as a result, volumes within the scan have to be registered to each other
46 | (i.e. *intra*-registered).
47 |
48 | Moreover, CubeQuant scans often have lower resolution to increase SNR in practice. Because of the
49 | low-resolution, these scans are often registered to higher resolution target scans :cite:`jordan2014variability`.
50 |
51 | By default, DOSMA intraregisters volumes acquired at different spin-locks to one another. This framework also supports
52 | both registration between scan types (interregistration) and |T1rho| fitting.
53 |
54 | Because registration is sensitive to the target scan type, different registration approaches may work better with
55 | different scan types. By default, the registration approaches are optimized to register CubeQuant scans to qDESS scans.
56 |
57 |
58 | 3D MAPSS (SPGR)
59 | --------------------------------------------------------------------------------
60 | Magnetization‐prepared angle‐modulated partitioned k‐space spoiled gradient echo snapshots (3D MAPSS) is a spoiled
61 | gradient (SPGR) sequence that reduce specific absorption rate (SAR), increase SNR, and reduce the extent of
62 | retrospective correction of contaminating |T1| effects :cite:`li2008vivo`.
63 |
64 | The MAPSS sequence can be used to estimate both |T1rho| and |T2| quantitative values. Like CubeQuant scans, MAPSS scans
65 | must also be intraregistered to ensure alignment between all volumes acquired at different echos and spin-lock times.
66 |
67 | DOSMA automatically performs intraregistration among volumes within the MAPSS scan. |T2| and |T1rho| fitting is also
68 | supported.
69 |
70 |
71 | UTE Cones
72 | --------------------------------------------------------------------------------
73 | Ultra-short echo time (UTE) Cones (or Cones) is a |T2star|-weighted sequence. In practice, many of these scans are low
74 | resolution.
75 |
76 | DOSMA supports interregistration between Cones and other scan sequences; however, registration files are optimized for
77 | registration to qDESS. |T2star| fitting is also supported.
78 |
79 |
80 | .. Substitutions
81 | .. |T2| replace:: T\ :sub:`2`
82 | .. |T1| replace:: T\ :sub:`1`
83 | .. |T1rho| replace:: T\ :sub:`1`:math:`{\rho}`
84 | .. |T2star| replace:: T\ :sub:`2`:sup:`*`
--------------------------------------------------------------------------------
/docs/source/tissues.rst:
--------------------------------------------------------------------------------
1 | .. _tissues:
2 |
3 | Tissues (dosma.tissues)
4 | ================================================================================
5 | This page details the different tissues that are supported by DOSMA and briefly
6 | explains the methods used for analysis and visualization.
7 |
8 | Tissues are loosely defined as structures of interest in anatomical regions.
9 | For example, both the femur and femoral cartilage are considered tissues are tissues
10 | of the knee.
11 |
12 | DOSMA currently supports 4 tissues:
13 |
14 | 1. Femoral cartilage (fc)
15 | 2. Tibial cartilage (tc)
16 | 3. Patellar cartilage (pc)
17 | 4. Meniscus (men)
18 |
19 | Different tissues have different quantitative profiles, visualization methods, and
20 | analysis techniques. The modules for the supported tissues implement and use these
21 | differences for analysis of each tissue.
22 |
23 | .. automodule::
24 | dosma.tissues
25 |
26 | .. autosummary::
27 | :toctree: generated
28 | :nosignatures:
29 |
30 | dosma.tissues.FemoralCartilage
31 | dosma.tissues.TibialCartilage
32 | dosma.tissues.PatellarCartilage
33 | dosma.tissues.Meniscus
34 |
35 | Femoral Cartilage
36 | ---------------------------------------------------------------------------------
37 | Femoral cartilage has been closely studied for evaluating knee health. The structure
38 | is often divided into sub-regions to evaluate the sensitivity of different regions of
39 | the knee to chronic diseases such as osteoarthritis. These regions are identified by
40 | three planes (12 regions):
41 |
42 | 1. **Sagittal**: Medial, Lateral
43 | 2. **Coronal**: Anterior, Central, Posterior
44 | 3. **Depth**: Deep, Superficial
45 |
46 | For example, the deep-anterior-medial femoral cartilage tissue is one region.
47 | To analyze differences in these regions, |T2| maps can be unrolled onto a 2D plane :cite:`monu2017cluster`.
48 |
49 | DOSMA supports automatic division of femoral cartilage into these regions and unrolling of these regions.
50 | Unrolled maps are produced for deep, superficial, and total (combined deep and superficial) layers as seen below.
51 |
52 | .. figure:: figures/unrolled_fc.png
53 | :align: center
54 | :alt: Unrolled femoral cartilage |T2| maps
55 | :figclass: align-center
56 |
57 |
58 | Tibial Cartilage
59 | --------------------------------------------------------------------------------
60 | Tibial cartilage is a flatter surface and is often divided across the three common
61 | planes:
62 |
63 | 1. **Sagittal**: Medial, Lateral
64 | 2. **Coronal**: Anterior, Central, Posterior
65 | 3. **Axial**: Deep, Superficial
66 |
67 | DOSMA automatically divides the tissue into these regions and produces corresponding
68 | visualizations.
69 |
70 |
71 | Patellar Cartilage
72 | --------------------------------------------------------------------------------
73 | Patellar cartilage is a thin, flat tissue. Because of this structure, it is not often
74 | divided. However, there has been work that may suggest that deep/superficial differences
75 | in the patellar cartilage may be insightful.
76 |
77 | DOSMA divides patellar cartilage into deep/superficial layers across the coronal plane.
78 |
79 |
80 | Meniscus
81 | --------------------------------------------------------------------------------
82 | DOSMA supports quantitative processing and segmentation of the mensiscus.
83 | However, visualization for the meniscus is not yet supported.
84 |
85 | .. Substitutions
86 | .. |T2| replace:: T\ :sub:`2`
87 | .. |T1| replace:: T\ :sub:`1`
88 | .. |T1rho| replace:: T\ :sub:`1`:math:`{\rho}`
89 | .. |T2star| replace:: T\ :sub:`2`:sup:`*`
90 |
--------------------------------------------------------------------------------
/docs/source/user_guide.rst:
--------------------------------------------------------------------------------
1 | .. _usage:
2 |
3 | User Guide
4 | ================================================================================
5 | .. toctree::
6 | :maxdepth: 2
7 | :titlesonly:
8 |
9 | guide_basic
10 | guide_registration
11 | guide_fitting
12 | guide_tutorials
13 |
14 |
--------------------------------------------------------------------------------
/docs/source/utils_api.rst:
--------------------------------------------------------------------------------
1 | .. _utils_api:
2 |
3 | Utilities
4 | ================================================================================
5 |
6 | Collect Env
7 | ---------------------------
8 | .. _utils_api_collect_env:
9 |
10 | .. autosummary::
11 | :toctree: generated
12 | :nosignatures:
13 |
14 | dosma.utils.collect_env.collect_env_info
15 |
16 |
17 | Env
18 | ---------------------------
19 | .. _utils_api_env:
20 |
21 | .. autosummary::
22 | :toctree: generated
23 | :nosignatures:
24 |
25 | dosma.debug
26 | dosma.utils.env.package_available
27 | dosma.utils.env.get_version
28 |
29 |
30 | Logger
31 | ---------------------------
32 | .. _utils_api_logger:
33 |
34 | .. autosummary::
35 | :toctree: generated
36 | :nosignatures:
37 |
38 | dosma.setup_logger
39 |
40 | If you do not want logging messages to display on your console (terminal, Jupyter Notebook, etc.),
41 | the code below will only log messages at the ERROR level or higher:
42 |
43 | >>> import logging
44 | >>> dm.setup_logger(stream_lvl=logging.ERROR)
45 |
--------------------------------------------------------------------------------
/docs/source/zreferences.rst:
--------------------------------------------------------------------------------
1 | :orphan:
2 |
3 | .. _references:
4 |
5 | References
6 | ================================================================================
7 | .. bibliography:: references.bib
8 | :all:
9 |
--------------------------------------------------------------------------------
/dosma/__init__.py:
--------------------------------------------------------------------------------
1 | """The core module contains functions and classes for medical image analysis.
2 | """
3 | from dosma.utils.logger import setup_logger # noqa
4 |
5 | from dosma import core as _core # noqa: E402
6 |
7 | from dosma.core import * # noqa
8 | from dosma.defaults import preferences # noqa
9 | from dosma.utils.collect_env import collect_env_info # noqa
10 | from dosma.utils.env import debug # noqa
11 |
12 | from dosma.core.med_volume import MedicalVolume # noqa: F401
13 | from dosma.core.io.format_io_utils import read, write # noqa: F401
14 | from dosma.core.io.format_io import ImageDataFormat # noqa: F401
15 | from dosma.core.io.dicom_io import DicomReader, DicomWriter # noqa: F401
16 | from dosma.core.io.nifti_io import NiftiReader, NiftiWriter # noqa: F401
17 | from dosma.core.device import Device, get_device, to_device # noqa: F401
18 | from dosma.core.orientation import to_affine # noqa: F401
19 | from dosma.core.registration import ( # noqa: F401
20 | register,
21 | apply_warp,
22 | symlink_elastix,
23 | unlink_elastix,
24 | )
25 | from dosma.core.fitting import ( # noqa: F401
26 | CurveFitter,
27 | PolyFitter,
28 | MonoExponentialFit,
29 | curve_fit,
30 | polyfit,
31 | )
32 |
33 | import dosma.core.numpy_routines as numpy_routines # noqa: F401
34 |
35 |
36 | __all__ = []
37 | __all__.extend(_core.__all__)
38 |
39 | setup_logger()
40 |
41 | # This line will be programatically read/write by setup.py.
42 | # Leave them at the bottom of this file and don't touch them.
43 | __version__ = "0.1.2"
44 |
--------------------------------------------------------------------------------
/dosma/app.py:
--------------------------------------------------------------------------------
1 | import matplotlib
2 |
3 | matplotlib.use("TkAgg")
4 |
5 | from dosma.gui.ims import DosmaViewer # noqa: E402
6 | from dosma.utils.logger import setup_logger # noqa: E402
7 |
8 | # Initialize logger for the GUI.
9 | setup_logger()
10 |
11 | app = DosmaViewer()
12 | app.mainloop()
13 |
--------------------------------------------------------------------------------
/dosma/core/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.core import io
2 |
3 | from dosma.core import (
4 | device,
5 | fitting,
6 | med_volume,
7 | numpy_routines,
8 | orientation,
9 | quant_vals,
10 | registration,
11 | )
12 |
13 | from dosma.core.device import * # noqa
14 | from dosma.core.fitting import * # noqa
15 | from dosma.core.io import * # noqa
16 | from dosma.core.med_volume import * # noqa
17 | from dosma.core.orientation import * # noqa
18 | from dosma.core.registration import * # noqa
19 |
20 | __all__ = ["numpy_routines", "quant_vals"]
21 | __all__.extend(device.__all__)
22 | __all__.extend(fitting.__all__)
23 | __all__.extend(io.__all__)
24 | __all__.extend(med_volume.__all__)
25 | __all__.extend(orientation.__all__)
26 | __all__.extend(registration.__all__)
27 |
--------------------------------------------------------------------------------
/dosma/core/io/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.core.io import dicom_io, format_io_utils, nifti_io # noqa: F401
2 |
3 | from dosma.core.io.dicom_io import * # noqa
4 | from dosma.core.io.format_io import ImageDataFormat # noqa
5 | from dosma.core.io.format_io_utils import * # noqa
6 | from dosma.core.io.nifti_io import * # noqa
7 |
8 | __all__ = []
9 | __all__.extend(dicom_io.__all__)
10 | __all__.extend(["ImageDataFormat"])
11 | __all__.extend(format_io_utils.__all__)
12 | __all__.extend(nifti_io.__all__)
13 |
--------------------------------------------------------------------------------
/dosma/core/io/nifti_io.py:
--------------------------------------------------------------------------------
1 | """NIfTI I/O.
2 |
3 | This module contains NIfTI input/output helpers.
4 |
5 |
6 | """
7 |
8 | import os
9 | from typing import Collection
10 |
11 | import nibabel as nib
12 |
13 | from dosma.core.io.format_io import DataReader, DataWriter, ImageDataFormat
14 | from dosma.core.med_volume import MedicalVolume
15 | from dosma.defaults import AFFINE_DECIMAL_PRECISION, SCANNER_ORIGIN_DECIMAL_PRECISION
16 | from dosma.utils import io_utils
17 |
18 | __all__ = ["NiftiReader", "NiftiWriter"]
19 |
20 |
21 | class NiftiReader(DataReader):
22 | """A class for reading NIfTI files.
23 |
24 | Attributes:
25 | data_format_code (ImageDataFormat): The supported image data format.
26 | """
27 |
28 | data_format_code = ImageDataFormat.nifti
29 |
30 | def load(self, file_path, mmap: bool = False) -> MedicalVolume:
31 | """Load volume from NIfTI file path.
32 |
33 | A NIfTI file should only correspond to one volume.
34 |
35 | Args:
36 | file_path (str): File path to NIfTI file.
37 | mmap (bool): Whether to use memory mapping.
38 |
39 | Returns:
40 | MedicalVolume: Loaded volume.
41 |
42 | Raises:
43 | FileNotFoundError: If `file_path` not found.
44 | ValueError: If `file_path` does not end in a supported NIfTI extension.
45 | """
46 | if not os.path.isfile(file_path):
47 | raise FileNotFoundError("{} not found".format(file_path))
48 |
49 | if not self.data_format_code.is_filetype(file_path):
50 | raise ValueError(
51 | "{} must be a file with extension '.nii' or '.nii.gz'".format(file_path)
52 | )
53 |
54 | nib_img = nib.load(file_path)
55 | return MedicalVolume.from_nib(
56 | nib_img,
57 | affine_precision=AFFINE_DECIMAL_PRECISION,
58 | origin_precision=SCANNER_ORIGIN_DECIMAL_PRECISION,
59 | mmap=mmap,
60 | )
61 |
62 | def __serializable_variables__(self) -> Collection[str]:
63 | return self.__dict__.keys()
64 |
65 | read = load # pragma: no cover
66 |
67 |
68 | class NiftiWriter(DataWriter):
69 | """A class for writing volumes in NIfTI format.
70 |
71 | Attributes:
72 | data_format_code (ImageDataFormat): The supported image data format.
73 | """
74 |
75 | data_format_code = ImageDataFormat.nifti
76 |
77 | def save(self, volume: MedicalVolume, file_path: str):
78 | """Save volume in NIfTI format,
79 |
80 | Args:
81 | volume (MedicalVolume): Volume to save.
82 | file_path (str): File path to NIfTI file.
83 |
84 | Raises:
85 | ValueError: If `file_path` does not end in a supported NIfTI extension.
86 | """
87 | if not self.data_format_code.is_filetype(file_path):
88 | raise ValueError(
89 | "{} must be a file with extension '.nii' or '.nii.gz'".format(file_path)
90 | )
91 |
92 | # Create dir if does not exist
93 | io_utils.mkdirs(os.path.dirname(file_path))
94 |
95 | nib_img = volume.to_nib()
96 | nib.save(nib_img, file_path)
97 |
98 | def __serializable_variables__(self) -> Collection[str]:
99 | return self.__dict__.keys()
100 |
101 | write = save # pragma: no cover
102 |
--------------------------------------------------------------------------------
/dosma/file_constants.py:
--------------------------------------------------------------------------------
1 | """Basic file constants to be shared with program"""
2 |
3 | import os
4 |
5 | # Elastix files
6 |
7 | __DIR__ = os.path.abspath(os.path.dirname(__file__))
8 | __OUT_DIR__ = os.path.abspath(os.path.expanduser("~/.dosma"))
9 | _DOSMA_ELASTIX_FOLDER = os.path.join(__DIR__, "resources/elastix")
10 | __PATH_TO_ELASTIX_FOLDER__ = os.path.join(__DIR__, "resources/elastix/params")
11 |
12 | ELASTIX_AFFINE_PARAMS_FILE = os.path.join(__PATH_TO_ELASTIX_FOLDER__, "parameters-affine.txt")
13 | ELASTIX_BSPLINE_PARAMS_FILE = os.path.join(__PATH_TO_ELASTIX_FOLDER__, "parameters-bspline.txt")
14 | ELASTIX_RIGID_PARAMS_FILE = os.path.join(__PATH_TO_ELASTIX_FOLDER__, "parameters-rigid.txt")
15 |
16 | ELASTIX_AFFINE_INTERREGISTER_PARAMS_FILE = os.path.join(
17 | __PATH_TO_ELASTIX_FOLDER__, "parameters-affine-interregister.txt"
18 | )
19 | ELASTIX_RIGID_INTERREGISTER_PARAMS_FILE = os.path.join(
20 | __PATH_TO_ELASTIX_FOLDER__, "parameters-rigid-interregister.txt"
21 | )
22 |
23 | MAPSS_ELASTIX_AFFINE_INTERREGISTER_PARAMS_FILE = os.path.join(
24 | __PATH_TO_ELASTIX_FOLDER__, "parameters-affine-interregister.txt"
25 | )
26 | MAPSS_ELASTIX_RIGID_INTERREGISTER_PARAMS_FILE = os.path.join(
27 | __PATH_TO_ELASTIX_FOLDER__, "parameters-rigid-interregister.txt"
28 | )
29 |
--------------------------------------------------------------------------------
/dosma/gui/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/gui/__init__.py
--------------------------------------------------------------------------------
/dosma/gui/defaults/skel-rotate.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/gui/defaults/skel-rotate.gif
--------------------------------------------------------------------------------
/dosma/gui/dosma_gui.py:
--------------------------------------------------------------------------------
1 | import inspect
2 | import tkinter as tk
3 | from tkinter import IntVar
4 | from typing import Dict
5 |
6 | import Pmw
7 |
8 | from dosma.cli import SEGMENTATION_MODEL_KEY, SEGMENTATION_WEIGHTS_DIR_KEY
9 | from dosma.gui.gui_utils import gui_utils as gutils
10 | from dosma.models import SUPPORTED_MODELS
11 | from dosma.models.seg_model import SegModel
12 | from dosma.tissues.tissue import Tissue
13 |
14 |
15 | class ScanReader:
16 | def __init__(self, parent):
17 | self.parent = parent
18 | self.hbox = None
19 | self.action_box = None
20 | self.params: Dict = {}
21 | self.action_var = None
22 | self._action_bool = IntVar()
23 | self.balloon = None
24 |
25 | def load_scan(self, scan_class):
26 | self._action_bool = IntVar()
27 | if self.hbox:
28 | self.hbox.destroy()
29 | if self.action_box:
30 | self.action_box.destroy()
31 | self.action_var = None
32 | self.balloon = Pmw.Balloon()
33 |
34 | cmd_line_actions = scan_class.cmd_line_actions()
35 | hbox = tk.Frame(self.parent)
36 | hbox.pack(anchor="nw", side="top")
37 |
38 | buttons = []
39 | count = 0
40 | for a_method, a_description in cmd_line_actions:
41 | b = tk.Radiobutton(
42 | hbox,
43 | text=a_description.name,
44 | value=count,
45 | command=lambda v=(a_method, a_description): self.show_action_params(v[0], v[1]),
46 | variable=self._action_bool,
47 | )
48 | self.balloon.bind(b, a_description.help)
49 | buttons.append(b)
50 | count += 1
51 |
52 | self._action_bool.set(-1)
53 |
54 | for b in buttons:
55 | b.pack(anchor="nw", side="left", padx=5)
56 |
57 | self.hbox = hbox
58 |
59 | def show_action_params(self, action, action_wrapper):
60 | self.action_var = action_wrapper.name
61 | if self.action_box:
62 | self.action_box.destroy()
63 |
64 | if self.params:
65 | self.params = {}
66 |
67 | hbox = tk.Frame(self.parent)
68 | hbox.pack(anchor="nw", side="top")
69 | self.action_box = hbox
70 |
71 | func_signature = inspect.signature(action)
72 | parameters = func_signature.parameters
73 |
74 | for param_name in parameters.keys():
75 | param = parameters[param_name]
76 | param_type = param.annotation
77 | param_default = param.default
78 |
79 | if param_name == "self" or param_type is Tissue:
80 | continue
81 |
82 | # # see if the type is a custom type, if not handle it as a basic type
83 | is_custom_arg = param_type in CUSTOM_TYPE_TO_GUI
84 | if is_custom_arg:
85 | CUSTOM_TYPE_TO_GUI[param_type](self.params, hbox, self.balloon)
86 | continue
87 |
88 | param_var = gutils.convert_base_type_to_gui(
89 | param_name,
90 | param_type,
91 | param_default,
92 | hbox,
93 | balloon=self.balloon,
94 | param_help=action_wrapper.get_param_help(param_name),
95 | )
96 |
97 | # map parameter name --> variable, is_required
98 | # if you have a non zero default value, it must be specified.
99 | is_required = (param_type is not bool and param_default == inspect._empty) or (
100 | param_type in [float, int] and bool(param_default)
101 | )
102 | self.params[param_name] = (param_var, is_required)
103 |
104 | def get_cmd_line_str(self):
105 | if not self.action_var:
106 | raise ValueError("No action selected. Select an action to continue.")
107 | cmd_line_str = "%s" % self.action_var
108 | for param_name in self.params:
109 | param_var, add_arg = self.params[param_name]
110 |
111 | if add_arg and not param_var.get():
112 | raise ValueError('"%s" must have a value' % param_name)
113 |
114 | if param_var.get():
115 | cmd_line_str += " --%s" % param_name
116 | if add_arg:
117 | cmd_line_str += " %s" % param_var.get()
118 |
119 | return cmd_line_str
120 |
121 |
122 | def add_segmentation_gui_parser(params, hbox, balloon):
123 | # add model
124 | param_name, param_type, param_default = SEGMENTATION_MODEL_KEY, str, None
125 | param_var = gutils.convert_base_type_to_gui(
126 | param_name,
127 | param_type,
128 | param_default,
129 | hbox,
130 | balloon=balloon,
131 | param_help="segmentation models",
132 | options=SUPPORTED_MODELS,
133 | )
134 | params[param_name] = (param_var, param_type is not bool)
135 |
136 | # add weights directory
137 | param_name, param_type, param_default = SEGMENTATION_WEIGHTS_DIR_KEY, str, None
138 | param_var = gutils.convert_base_type_to_gui(
139 | param_name,
140 | param_type,
141 | param_default,
142 | hbox,
143 | balloon=balloon,
144 | param_help="path to weights directory",
145 | )
146 | params[param_name] = (param_var, param_type is not bool)
147 |
148 |
149 | CUSTOM_TYPE_TO_GUI = {SegModel: add_segmentation_gui_parser}
150 |
--------------------------------------------------------------------------------
/dosma/gui/gui_errors.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/gui/gui_errors.py
--------------------------------------------------------------------------------
/dosma/gui/gui_utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/gui/gui_utils/__init__.py
--------------------------------------------------------------------------------
/dosma/gui/gui_utils/console_output.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import sys
3 | import tkinter as tk
4 |
5 |
6 | class WidgetLogger(logging.StreamHandler):
7 | def __init__(self, widget):
8 | logging.Handler.__init__(self, sys.stdout)
9 | self.setLevel(logging.INFO)
10 | self.widget = widget
11 | self.widget.config(state="disabled")
12 |
13 | def emit(self, record):
14 | self.widget.config(state="normal")
15 | # Append message (record) to the widget
16 | self.widget.insert(tk.END, self.format(record) + "\n")
17 | self.widget.see(tk.END) # Scroll to the bottom
18 | self.widget.config(state="disabled")
19 |
--------------------------------------------------------------------------------
/dosma/gui/gui_utils/filedialog_reader.py:
--------------------------------------------------------------------------------
1 | import os
2 | from tkinter import StringVar, filedialog
3 |
4 | from dosma.core.io import format_io_utils as fio_utils
5 | from dosma.gui.preferences_viewer import PreferencesManager
6 |
7 |
8 | class FileDialogReader:
9 | SUPPORTED_FORMATS = (("nifti files", "*.nii\.gz"), ("dicom files", "*.dcm")) # noqa: W605
10 | __base_filepath = "../"
11 |
12 | def __init__(self, string_var: StringVar = None):
13 | self.preferences = PreferencesManager()
14 | self.string_var = string_var
15 |
16 | def load_volume(self, title="Select volume file(s)"):
17 | filepath = self.get_volume_filepath(title)
18 |
19 | im = fio_utils.generic_load(filepath, 1)
20 |
21 | return im
22 |
23 | def get_volume_filepath(self, title="Select path", im_type: fio_utils.ImageDataFormat = None):
24 | filetypes = None
25 | if im_type is fio_utils.ImageDataFormat.dicom:
26 | filetypes = ((im_type.name, "*.dcm"),)
27 |
28 | files = filedialog.askopenfilenames(
29 | initialdir=self.__base_filepath, title=title, filetypes=filetypes
30 | )
31 | if len(files) == 0:
32 | return
33 |
34 | filepath = files[0]
35 | self.__base_filepath = os.path.dirname(filepath)
36 |
37 | if filepath.endswith(".dcm"):
38 | filepath = os.path.dirname(filepath)
39 |
40 | if self.string_var:
41 | self.string_var.set(filepath)
42 |
43 | return filepath
44 |
45 | def get_filepath(self, title="Select file"):
46 | file_str = filedialog.askopenfilename(initialdir=self.__base_filepath, title=title)
47 | if not file_str:
48 | return
49 |
50 | if self.string_var:
51 | self.string_var.set(file_str)
52 |
53 | return file_str
54 |
55 | def get_dirpath(self, title="Select directory"):
56 | file_str = filedialog.askdirectory(initialdir=self.__base_filepath, title=title)
57 | if not file_str:
58 | return
59 |
60 | if self.string_var:
61 | self.string_var.set(file_str)
62 |
63 | return file_str
64 |
65 | def get_save_dirpath(self):
66 | file_str = filedialog.askdirectory(initialdir=self.__base_filepath, mustexist=False)
67 | if not file_str:
68 | return
69 |
70 | if self.string_var:
71 | self.string_var.set(file_str)
72 |
73 | return file_str
74 |
--------------------------------------------------------------------------------
/dosma/gui/im_viewer.py:
--------------------------------------------------------------------------------
1 | from __future__ import print_function
2 |
3 | import numpy as np
4 |
5 | import matplotlib
6 |
7 | matplotlib.use("TkAgg")
8 |
9 |
10 | class IndexTracker:
11 | def __init__(self, ax, x):
12 | self.ax = ax
13 | self.im = None
14 | self.ind = 0
15 | self.x = x
16 |
17 | def onscroll(self, event):
18 | if event.button == "down":
19 | self.ind = min(self.ind + 1, self.num_slices - 1)
20 | elif event.button == "up":
21 | self.ind = max(self.ind - 1, 0)
22 |
23 | self.update()
24 |
25 | def update(self):
26 | x_im = self._x_normalized
27 | x_im = np.squeeze(x_im[:, :, self.ind, :])
28 | if self.im is None:
29 | self.im = self.ax.imshow(x_im, cmap="gray")
30 | self.ax.get_xaxis().set_ticks([])
31 | self.ax.get_yaxis().set_ticks([])
32 |
33 | self.im.set_data(x_im)
34 | self.ax.set_ylabel("slice %s" % (self.ind + 1))
35 | self.im.axes.figure.canvas.draw()
36 |
37 | @property
38 | def x(self):
39 | return self._x
40 |
41 | @x.setter
42 | def x(self, value):
43 | assert type(value) is np.ndarray and (
44 | value.ndim == 3 or value.ndim == 4
45 | ), "image must be 3d (grayscale) or 4d (rgb) ndarray"
46 | if value.ndim == 3:
47 | value = value[..., np.newaxis]
48 |
49 | self._x = value
50 | self.num_slices = self._x.shape[2]
51 | self._x_normalized = self._x
52 |
53 | self.update()
54 |
--------------------------------------------------------------------------------
/dosma/models/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.models import oaiunet2d, stanford_qdess, util
2 | from dosma.models.oaiunet2d import * # noqa
3 | from dosma.models.stanford_qdess import * # noqa: F401, F403
4 | from dosma.models.util import * # noqa
5 |
6 | __all__ = []
7 | __all__.extend(util.__all__)
8 | __all__.extend(oaiunet2d.__all__)
9 | __all__.extend(stanford_qdess.__all__)
10 |
--------------------------------------------------------------------------------
/dosma/models/seg_model.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | import numpy as np
4 |
5 | from dosma.core.med_volume import MedicalVolume
6 | from dosma.defaults import preferences
7 |
8 | try:
9 | import keras.backend as K
10 | except ImportError: # pragma: no cover
11 | pass
12 |
13 |
14 | class SegModel(ABC):
15 | """
16 | Args:
17 | input_shape (Tuple[int] or List[Tuple[int]]): Shapes(s) for initializing input(s)
18 | into model in format (height, width, channels).
19 | weights_path (str): filepath to weights used to initialize Keras model
20 | force_weights (`bool`, optional): If `True`, load weights without any checking.
21 | Keras/Tensorflow only.
22 | """
23 |
24 | ALIASES = [""] # each segmentation model must have an alias
25 |
26 | def __init__(self, input_shape, weights_path, force_weights=False):
27 | self.batch_size = preferences.segmentation_batch_size
28 | self.seg_model = self.build_model(input_shape, weights_path)
29 |
30 | @abstractmethod
31 | def build_model(self, input_shape, weights_path):
32 | """
33 | Builds a segmentation model architecture and loads weights.
34 |
35 | Args:
36 | input_shape: Input shape of volume
37 | weights_path:
38 |
39 | Returns:
40 | a segmentation model that can be used for segmenting tissues (a Keras/TF/PyTorch model)
41 | """
42 | pass
43 |
44 | @abstractmethod
45 | def generate_mask(self, volume: MedicalVolume):
46 | """Segment the MRI volumes.
47 |
48 | Args:
49 | volume (MedicalVolume): Volume to segment in proper orientation.
50 |
51 | Returns:
52 | :class:`MedicalVolume` or List[MedicalVolume]: Volumes are binarized (0,1)
53 | uint8 3D ndarray of shape ``volume.shape``.
54 |
55 | Raises:
56 | ValueError: If volumes is not 3D ndarray
57 | or if tissue is not a string or not in list permitted tissues.
58 |
59 | """
60 | pass
61 |
62 | def __call__(self, *args, **kwargs):
63 | return self.generate_mask(*args, **kwargs)
64 |
65 | def __preprocess_volume__(self, volume: np.ndarray):
66 | """
67 | Preprocess volume prior to putting as input into segmentation network
68 | :param volume: a numpy array
69 | :return: a preprocessed numpy array
70 | """
71 | return volume
72 |
73 | def __postprocess_volume__(self, volume: np.ndarray):
74 | """
75 | Post-process logits (probabilities) or binarized mask
76 | :param volume: a numpy array
77 | :return: a postprocessed numpy array
78 | """
79 | return volume
80 |
81 |
82 | class KerasSegModel(SegModel):
83 | """
84 | Abstract wrapper for Keras model used for semantic segmentation
85 | """
86 |
87 | def build_model(self, input_shape, weights_path=None):
88 | keras_model = self.__load_keras_model__(input_shape)
89 | if weights_path:
90 | keras_model.load_weights(weights_path)
91 |
92 | return keras_model
93 |
94 | @abstractmethod
95 | def __load_keras_model__(self, input_shape):
96 | """
97 | Build Keras architecture
98 |
99 | :param input_shape: tuple or list of tuples for initializing input(s) into Keras model
100 |
101 | :return: a Keras model
102 | """
103 | pass
104 |
105 | def __del__(self):
106 | K.clear_session()
107 |
108 |
109 | # ============================ Preprocessing utils ============================
110 | __VOLUME_DIMENSIONS__ = 3
111 | __EPSILON__ = 1e-8
112 |
113 |
114 | def whiten_volume(x: np.ndarray, eps: float = 0.0):
115 | """Whiten volumes by mean and std of all pixels.
116 |
117 | Args:
118 | x (ndarray): 3D numpy array (MRI volumes)
119 | eps (float, optional): Epsilon to avoid division by 0.
120 |
121 | Returns:
122 | ndarray: A numpy array with mean ~ 0 and standard deviation ~ 1
123 | """
124 | if len(x.shape) != __VOLUME_DIMENSIONS__:
125 | raise ValueError(f"Input has {x.ndims} dimensions. Expected {__VOLUME_DIMENSIONS__}")
126 |
127 | return (x - np.mean(x)) / (np.std(x) + eps)
128 |
--------------------------------------------------------------------------------
/dosma/models/util.py:
--------------------------------------------------------------------------------
1 | """
2 | Functions for loading Keras models
3 |
4 | @author: Arjun Desai
5 | (C) Stanford University, 2019
6 | """
7 | import os
8 | import yaml
9 | from functools import partial
10 | from typing import Sequence
11 |
12 | from dosma.models.oaiunet2d import IWOAIOAIUnet2D, IWOAIOAIUnet2DNormalized, OAIUnet2D
13 | from dosma.models.seg_model import SegModel
14 |
15 | __all__ = ["get_model", "SUPPORTED_MODELS"]
16 |
17 | # Network architectures currently supported
18 | __SUPPORTED_MODELS__ = [OAIUnet2D, IWOAIOAIUnet2D, IWOAIOAIUnet2DNormalized]
19 |
20 | # Initialize supported models for the command line
21 | SUPPORTED_MODELS = [x.ALIASES[0] for x in __SUPPORTED_MODELS__]
22 |
23 |
24 | def get_model(model_str, input_shape, weights_path, **kwargs):
25 | """Get a Keras model
26 | :param model_str: model identifier
27 | :param input_shape: tuple or list of tuples for initializing input(s) into Keras model
28 | :param weights_path: filepath to weights used to initialize Keras model
29 | :return: a Keras model
30 | """
31 | for m in __SUPPORTED_MODELS__:
32 | if model_str in m.ALIASES or model_str == m.__name__:
33 | return m(input_shape, weights_path, **kwargs)
34 |
35 | raise LookupError("%s model type not supported" % model_str)
36 |
37 |
38 | def model_from_config(cfg_file_or_dict, weights_dir=None, **kwargs) -> SegModel:
39 | """Builds a new model from a config file.
40 |
41 | This function is useful for building models that have similar structure/architecture
42 | to existing models supported in DOSMA, but have different weights and categories.
43 | The config defines what dosma model should be used as a base, what weights should be loaded,
44 | and what are the categories.
45 |
46 | The config file should be a yaml file that has the following keys:
47 | * "DOSMA_MODEL": The base model that exists in DOSMA off of which data should be built.
48 | * "CATEGORIES": The categories that are supposed to be loaded.
49 | * "WEIGHTS_FILE": The basename of (or full path to) weights that should be loaded.
50 |
51 | Args:
52 | cfg_file_or_dict (str or dict): The yaml file or dictionary corresponding to the config.
53 | weights_dir (str): The directory where weights are stored. If not specified, assumes
54 | "WEIGHTS_FILE" field in the config is the full path to the weights.
55 | **kwargs: Keyword arguments for base model `__init__`
56 |
57 | Returns:
58 | SegModel: A segmentation model with appropriate changes to `generate_mask` to produce
59 | the right masks.
60 | """
61 |
62 | def _gen_mask(func, *_args, **_kwargs):
63 | out = func(*_args, **_kwargs)
64 | if isinstance(out, dict):
65 | # Assumes that the dict is ordered, which it is for python>=3.6
66 | out = out.values()
67 | elif not isinstance(out, Sequence):
68 | out = [out]
69 | if not len(categories) == len(out):
70 | raise ValueError("Got {} outputs, but {} categories".format(len(out), len(categories)))
71 | return {cat: out for cat, out in zip(categories, out)}
72 |
73 | if isinstance(cfg_file_or_dict, str):
74 | with open(cfg_file_or_dict, "r") as f:
75 | cfg = yaml.load(f)
76 | else:
77 | cfg = cfg_file_or_dict
78 |
79 | base_model = cfg["DOSMA_MODEL"]
80 | categories = cfg["CATEGORIES"]
81 | weights = cfg["WEIGHTS_FILE"]
82 | if not os.path.isfile(weights):
83 | assert weights_dir, "`weights_dir` must be specified"
84 | weights = os.path.join(weights_dir, cfg["WEIGHTS_FILE"])
85 |
86 | try:
87 | model: SegModel = get_model(base_model, weights_path=weights, force_weights=True, **kwargs)
88 | except LookupError as e:
89 | raise LookupError("BASE_MODEL '{}' not supported \n{}".format(base_model, e))
90 |
91 | # Override generate mask function
92 | model.generate_mask = partial(_gen_mask, model.generate_mask)
93 |
94 | return model
95 |
--------------------------------------------------------------------------------
/dosma/msk/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/msk/__init__.py
--------------------------------------------------------------------------------
/dosma/msk/knee.py:
--------------------------------------------------------------------------------
1 | """
2 | Command line interface declaration for knee-related analyses
3 |
4 | @author: Arjun Desai
5 | (C) Stanford University, 2019
6 | """
7 |
8 | import logging
9 | import uuid
10 |
11 | from dosma.core.quant_vals import QuantitativeValueType as QV
12 | from dosma.defaults import preferences
13 | from dosma.tissues import FemoralCartilage, Meniscus, PatellarCartilage, TibialCartilage
14 |
15 | KNEE_KEY = "knee"
16 | MEDIAL_TO_LATERAL_KEY = "ml"
17 | TISSUES_KEY = "tissues"
18 | LOAD_KEY = "load"
19 | SAVE_KEY = "save"
20 | PID_KEY = "pid"
21 |
22 | SUPPORTED_TISSUES = [FemoralCartilage, Meniscus, TibialCartilage, PatellarCartilage]
23 | SUPPORTED_QUANTITATIVE_VALUES = [QV.T2, QV.T1_RHO, QV.T2_STAR]
24 |
25 | _logger = logging.getLogger(__name__)
26 |
27 |
28 | def knee_parser(base_parser):
29 | """Parse command line input related to knee
30 |
31 | :param base_parser: the base parser to add knee subcommand to
32 | """
33 | parser_tissue = base_parser.add_parser(
34 | KNEE_KEY, help="calculate/analyze quantitative data for knee"
35 | )
36 |
37 | parser_tissue.add_argument(
38 | "--%s" % MEDIAL_TO_LATERAL_KEY,
39 | action="store_const",
40 | const=True,
41 | default=False,
42 | help="defines slices in sagittal direction going from medial -> lateral",
43 | )
44 |
45 | parser_tissue.add_argument(
46 | "--%s" % PID_KEY, nargs="?", default=str(uuid.uuid4()), help="specify pid"
47 | )
48 |
49 | for tissue in SUPPORTED_TISSUES:
50 | parser_tissue.add_argument(
51 | "--%s" % tissue.STR_ID,
52 | action="store_const",
53 | default=False,
54 | const=True,
55 | help="analyze %s" % tissue.FULL_NAME,
56 | )
57 |
58 | qvs_dict = {}
59 | for qv in SUPPORTED_QUANTITATIVE_VALUES:
60 | qv_name = qv.name.lower()
61 | qvs_dict[qv_name] = qv
62 | parser_tissue.add_argument(
63 | "--%s" % qv_name,
64 | action="store_const",
65 | const=True,
66 | default=False,
67 | help="quantify %s" % qv_name,
68 | )
69 |
70 | parser_tissue.set_defaults(func=handle_knee)
71 |
72 |
73 | def handle_knee(vargin):
74 | """Handle parsing command-line input for knee subcommand
75 | :param vargin:
76 | :return:
77 | """
78 | tissues = vargin[TISSUES_KEY]
79 | load_path = vargin[LOAD_KEY]
80 | medial_to_lateral = vargin[MEDIAL_TO_LATERAL_KEY]
81 | pid = vargin[PID_KEY]
82 |
83 | if tissues is None or len(tissues) == 0:
84 | _logger.info("Computing for all supported knee tissues...")
85 | tissues = []
86 | for t in SUPPORTED_TISSUES:
87 | tissues.append(t())
88 |
89 | # Get all supported quantitative values
90 | qvs = []
91 | for qv in SUPPORTED_QUANTITATIVE_VALUES:
92 | if vargin[qv.name.lower()]:
93 | qvs.append(qv)
94 |
95 | if len(qvs) == 0:
96 | _logger.info("Computing for all supported quantitative values...")
97 | qvs = SUPPORTED_QUANTITATIVE_VALUES
98 |
99 | for tissue in tissues:
100 | tissue.pid = pid
101 | tissue.medial_to_lateral = medial_to_lateral
102 | tissue.load_data(load_path)
103 |
104 | _logger.info("")
105 | _logger.info("==" * 40)
106 | _logger.info(tissue.FULL_NAME)
107 | _logger.info("==" * 40)
108 |
109 | for qv in qvs:
110 | # load file
111 | _logger.info("Analyzing %s" % qv.name.lower())
112 | tissue.calc_quant_vals()
113 |
114 | for tissue in tissues:
115 | tissue.save_data(vargin[SAVE_KEY], data_format=preferences.image_data_format)
116 |
117 | return tissues
118 |
--------------------------------------------------------------------------------
/dosma/resources/elastix/params/parameters-affine-interregister.txt:
--------------------------------------------------------------------------------
1 | // *********************
2 | // * rigid
3 | // *********************
4 |
5 | // *********************
6 | // * ImageTypes
7 | // *********************
8 | (FixedInternalImagePixelType "float")
9 | (MovingInternalImagePixelType "float")
10 | (UseDirectionCosines "true")
11 |
12 | // *********************
13 | // * Components
14 | // *********************
15 | (FixedImagePyramid "FixedSmoothingImagePyramid")
16 | (MovingImagePyramid "MovingSmoothingImagePyramid")
17 | (Registration "MultiResolutionRegistration")
18 | (Interpolator "BSplineInterpolator")
19 | (ResampleInterpolator "FinalBSplineInterpolator")
20 | (Metric "AdvancedMattesMutualInformation")
21 | (BSplineInterpolationOrder 3)
22 | (Resampler "DefaultResampler")
23 | (Optimizer "AdaptiveStochasticGradientDescent")
24 | (Transform "EulerTransform")
25 |
26 | // *********************
27 | // * Mask settings
28 | // *********************
29 | (ErodeMask "false")
30 | (ErodeFixedMask "false")
31 |
32 | // *********************
33 | // * Optimizer settings
34 | // *********************
35 | (NumberOfResolutions 3)
36 | (MaximumNumberOfIterations 400)
37 | (ASGDParameterEstimationMethod "Original")
38 | (AutomaticParameterEstimation "true")
39 | (AutomaticTransformInitialization "true")
40 | (AutomaticScalesEstimation "true")
41 |
42 | // *********************
43 | // * Transform settings
44 | // *********************
45 | (HowToCombineTransforms "Compose")
46 |
47 | // *********************
48 | // * Pyramid settings
49 | // *********************
50 | (NumberOfHistogramBins 32)
51 |
52 |
53 | // *********************
54 | // * Sampler parameters
55 | // *********************
56 | (NumberOfSpatialSamples 2000)
57 | //(ImageSampler "RandomSparseMask")
58 | (ImageSampler "RandomCoordinate")
59 | (CheckNumberOfSamples "true")
60 | (NewSamplesEveryIteration "true")
61 | (FinalBSplineInterpolationOrder 3)
62 |
63 | // *********************
64 | // * Output settings
65 | // *********************
66 | (DefaultPixelValue 0)
67 | (WriteTransformParametersEachIteration "false")
68 | (WriteResultImage "true")
69 | (ResultImageFormat "nii.gz")
70 | (ResultImagePixelType "float")
71 |
--------------------------------------------------------------------------------
/dosma/resources/elastix/params/parameters-affine.txt:
--------------------------------------------------------------------------------
1 | // *********************
2 | // * affine
3 | // *********************
4 |
5 | // *********************
6 | // * ImageTypes
7 | // *********************
8 | (FixedInternalImagePixelType "float")
9 | (MovingInternalImagePixelType "float")
10 | (UseDirectionCosines "true")
11 |
12 | // *********************
13 | // * Components
14 | // *********************
15 | (FixedImagePyramid "FixedSmoothingImagePyramid")
16 | (MovingImagePyramid "MovingSmoothingImagePyramid")
17 | (Registration "MultiResolutionRegistration")
18 | (Interpolator "BSplineInterpolator")
19 | (ResampleInterpolator "FinalBSplineInterpolator")
20 | (Metric "AdvancedMattesMutualInformation")
21 | (BSplineInterpolationOrder 1)
22 | (Resampler "DefaultResampler")
23 | (Optimizer "AdaptiveStochasticGradientDescent")
24 | (Transform "AffineTransform")
25 |
26 | // *********************
27 | // * Mask settings
28 | // *********************
29 | (ErodeMask "false")
30 | (ErodeFixedMask "false")
31 |
32 | // *********************
33 | // * Optimizer settings
34 | // *********************
35 | (NumberOfResolutions 1)
36 | (MaximumNumberOfIterations 1000)
37 | (ASGDParameterEstimationMethod "Original")
38 | (AutomaticParameterEstimation "true")
39 | (AutomaticTransformInitialization "true")
40 | (AutomaticScalesEstimation "true")
41 |
42 | // *********************
43 | // * Transform settings
44 | // *********************
45 | (HowToCombineTransforms "Compose")
46 |
47 | // *********************
48 | // * Pyramid settings
49 | // *********************
50 | (NumberOfHistogramBins 32)
51 |
52 |
53 | // *********************
54 | // * Sampler parameters
55 | // *********************
56 | (NumberOfSpatialSamples 2000)
57 | (ImageSampler "RandomCoordinate")
58 | (CheckNumberOfSamples "true")
59 | (NewSamplesEveryIteration "true")
60 | (FinalBSplineInterpolationOrder 3)
61 |
62 | // *********************
63 | // * Output settings
64 | // *********************
65 | (DefaultPixelValue 0)
66 | (WriteTransformParametersEachIteration "false")
67 | (WriteResultImage "true")
68 | (ResultImageFormat "nii.gz")
69 | (ResultImagePixelType "float")
70 |
--------------------------------------------------------------------------------
/dosma/resources/elastix/params/parameters-bspline.txt:
--------------------------------------------------------------------------------
1 | // *********************
2 | // * bspline
3 | // *********************
4 |
5 | // *********************
6 | // * ImageTypes
7 | // *********************
8 | (FixedInternalImagePixelType "float")
9 | (MovingInternalImagePixelType "float")
10 | (UseDirectionCosines "true")
11 |
12 | // *********************
13 | // * Components
14 | // *********************
15 | (FixedImagePyramid "FixedSmoothingImagePyramid")
16 | (MovingImagePyramid "MovingSmoothingImagePyramid")
17 | (Registration "MultiResolutionRegistration")
18 | (Interpolator "BSplineInterpolator")
19 | (ResampleInterpolator "FinalBSplineInterpolator")
20 | (Metric "AdvancedMattesMutualInformation")
21 | (BSplineInterpolationOrder 1)
22 | (Resampler "DefaultResampler")
23 | (Optimizer "AdaptiveStochasticGradientDescent")
24 | (Transform "BSplineTransform")
25 | (FinalGridSpacingInPhysicalUnits 30 30 30)
26 | (MovingImageDerivativeScales 1 1 1)
27 |
28 | // *********************
29 | // * Mask settings
30 | // *********************
31 | (ErodeMask "false")
32 | (ErodeFixedMask "false")
33 |
34 | // *********************
35 | // * Optimizer settings
36 | // *********************
37 | (NumberOfResolutions 3)
38 | (MaximumNumberOfIterations 1000)
39 | (ASGDParameterEstimationMethod "Original")
40 | (AutomaticParameterEstimation "true")
41 | (AutomaticTransformInitialization "true")
42 | (AutomaticScalesEstimation "true")
43 |
44 | // *********************
45 | // * Transform settings
46 | // *********************
47 | (HowToCombineTransforms "Compose")
48 |
49 | // *********************
50 | // * Pyramid settings
51 | // *********************
52 | (NumberOfHistogramBins 32)
53 |
54 |
55 | // *********************
56 | // * Sampler parameters
57 | // *********************
58 | (NumberOfSpatialSamples 2000)
59 | (ImageSampler "RandomCoordinate")
60 | (CheckNumberOfSamples "true")
61 | (NewSamplesEveryIteration "true")
62 | (FinalBSplineInterpolationOrder 3)
63 |
64 | // *********************
65 | // * Output settings
66 | // *********************
67 | (DefaultPixelValue 0)
68 | (WriteTransformParametersEachIteration "false")
69 | (WriteResultImage "true")
70 | (ResultImageFormat "nii.gz")
71 | (ResultImagePixelType "float")
72 |
--------------------------------------------------------------------------------
/dosma/resources/elastix/params/parameters-rigid-interregister.txt:
--------------------------------------------------------------------------------
1 | // *********************
2 | // * rigid
3 | // *********************
4 |
5 | // *********************
6 | // * ImageTypes
7 | // *********************
8 | (FixedInternalImagePixelType "float")
9 | (MovingInternalImagePixelType "float")
10 | (UseDirectionCosines "true")
11 |
12 | // *********************
13 | // * Components
14 | // *********************
15 | (FixedImagePyramid "FixedSmoothingImagePyramid")
16 | (MovingImagePyramid "MovingSmoothingImagePyramid")
17 | (Registration "MultiResolutionRegistration")
18 | (Interpolator "BSplineInterpolator")
19 | (ResampleInterpolator "FinalBSplineInterpolator")
20 | (Metric "AdvancedMattesMutualInformation")
21 | (BSplineInterpolationOrder 3)
22 | (Resampler "DefaultResampler")
23 | (Optimizer "AdaptiveStochasticGradientDescent")
24 | (Transform "EulerTransform")
25 |
26 | // *********************
27 | // * Mask settings
28 | // *********************
29 | (ErodeMask "false")
30 | (ErodeFixedMask "false")
31 |
32 | // *********************
33 | // * Optimizer settings
34 | // *********************
35 | (NumberOfResolutions 3)
36 | (MaximumNumberOfIterations 400)
37 | (ASGDParameterEstimationMethod "Original")
38 | (AutomaticParameterEstimation "true")
39 | (AutomaticTransformInitialization "true")
40 | (AutomaticScalesEstimation "true")
41 |
42 | // *********************
43 | // * Transform settings
44 | // *********************
45 | (HowToCombineTransforms "Compose")
46 |
47 | // *********************
48 | // * Pyramid settings
49 | // *********************
50 | (NumberOfHistogramBins 32)
51 |
52 |
53 | // *********************
54 | // * Sampler parameters
55 | // *********************
56 | (NumberOfSpatialSamples 2000)
57 | //(ImageSampler "RandomSparseMask")
58 | (ImageSampler "RandomCoordinate")
59 | (CheckNumberOfSamples "true")
60 | (NewSamplesEveryIteration "true")
61 | (FinalBSplineInterpolationOrder 3)
62 |
63 | // *********************
64 | // * Output settings
65 | // *********************
66 | (DefaultPixelValue 0)
67 | (WriteTransformParametersEachIteration "false")
68 | (WriteResultImage "true")
69 | (ResultImageFormat "nii.gz")
70 | (ResultImagePixelType "float")
71 |
--------------------------------------------------------------------------------
/dosma/resources/elastix/params/parameters-rigid.txt:
--------------------------------------------------------------------------------
1 | // *********************
2 | // * rigid
3 | // *********************
4 |
5 | // *********************
6 | // * ImageTypes
7 | // *********************
8 | (FixedInternalImagePixelType "float")
9 | (MovingInternalImagePixelType "float")
10 | (UseDirectionCosines "true")
11 |
12 | // *********************
13 | // * Components
14 | // *********************
15 | (FixedImagePyramid "FixedSmoothingImagePyramid")
16 | (MovingImagePyramid "MovingSmoothingImagePyramid")
17 | (Registration "MultiResolutionRegistration")
18 | (Interpolator "BSplineInterpolator")
19 | (ResampleInterpolator "FinalBSplineInterpolator")
20 | (Metric "AdvancedMattesMutualInformation")
21 | (BSplineInterpolationOrder 1)
22 | (Resampler "DefaultResampler")
23 | (Optimizer "AdaptiveStochasticGradientDescent")
24 | (Transform "EulerTransform")
25 |
26 | // *********************
27 | // * Mask settings
28 | // *********************
29 | (ErodeMask "false")
30 | (ErodeFixedMask "false")
31 |
32 | // *********************
33 | // * Optimizer settings
34 | // *********************
35 | (NumberOfResolutions 3)
36 | (MaximumNumberOfIterations 1000)
37 | (ASGDParameterEstimationMethod "Original")
38 | (AutomaticParameterEstimation "true")
39 | (AutomaticTransformInitialization "true")
40 | (AutomaticScalesEstimation "true")
41 |
42 | // *********************
43 | // * Transform settings
44 | // *********************
45 | (HowToCombineTransforms "Compose")
46 |
47 | // *********************
48 | // * Pyramid settings
49 | // *********************
50 | (NumberOfHistogramBins 32)
51 |
52 |
53 | // *********************
54 | // * Sampler parameters
55 | // *********************
56 | (NumberOfSpatialSamples 2000)
57 | (ImageSampler "RandomCoordinate")
58 | (CheckNumberOfSamples "true")
59 | (NewSamplesEveryIteration "true")
60 | (FinalBSplineInterpolationOrder 3)
61 |
62 | // *********************
63 | // * Output settings
64 | // *********************
65 | (DefaultPixelValue 0)
66 | (WriteTransformParametersEachIteration "false")
67 | (WriteResultImage "true")
68 | (ResultImageFormat "nii.gz")
69 | (ResultImagePixelType "float")
70 |
--------------------------------------------------------------------------------
/dosma/resources/templates/.preferences.yml:
--------------------------------------------------------------------------------
1 | data:
2 | format: 'nifti'
3 | fitting:
4 | r2.threshold: 0.9
5 | logging:
6 | nipype: "file_stdout"
7 | registration:
8 | mask:
9 | dilation.rate: 9.0
10 | dilation.threshold: 0.0001
11 | segmentation:
12 | batch.size: 16
13 | visualization:
14 | matplotlib:
15 | rcParams:
16 | font.size: 16
17 | savefig.dpi: 200
18 | savefig.format: 'png'
19 | use.vmax: False
20 |
--------------------------------------------------------------------------------
/dosma/resources/templates/.preferences_cmd_line_schema.yml:
--------------------------------------------------------------------------------
1 | # """Schema for command line information for preferences.
2 | # All preferences options that can be accessed from the command-line/GUI should be listed here.
3 | # If new argument is added, please follow the Argparse schema with at minimum the following arguments:
4 | # -name: A readable name
5 | # -aliases: The names given to the command-line argument.
6 | # -type: The expected type.
7 | # -nargs: Number of arguments expected. This should typically be '?', as none of these flags are required
8 | # -help: A short help description.
9 | #
10 | # Fields should first start with the fields above, then follow in alphabetical order
11 | # """
12 | data:
13 | format:
14 | name: 'Data Format'
15 | aliases: ['--df', '--data_format']
16 | type: 'str'
17 | nargs: '?'
18 | choices: ['dicom', 'nifti']
19 | help: 'format to save medical data'
20 | fitting:
21 | r2.threshold:
22 | name: 'r2 Threshold'
23 | aliases: ['--r2', '--r2_threshold']
24 | metavar: 'T'
25 | type: 'float'
26 | nargs: '?'
27 | help: 'r^2 threshold for goodness of fit. Range [0-1).'
28 | logging:
29 | nipype:
30 | name: 'nipype logging'
31 | aliases: ['--nipype_logging']
32 | type: str
33 | nargs: '?'
34 | choices: ['file', 'file_split', 'file_stdout', 'file_stderr', 'stream', 'allatonce', 'none']
35 | help: 'nipype library logging'
36 | visualization:
37 | matplotlib:
38 | rcParams:
39 | savefig.dpi:
40 | name: 'Figure dpi'
41 | aliases: ['--dpi']
42 | metavar: 'DPI'
43 | type: 'float'
44 | nargs: '?'
45 | help: 'figure resolution in dots per inch (dpi)'
46 | savefig.format:
47 | name: 'Figure format'
48 | aliases: ['--vf', '--visualization_format']
49 | type: 'str'
50 | nargs: '?'
51 | choices: ['png', 'eps', 'pdf', 'jpeg', 'pgf', 'ps', 'raw', 'rgba', 'svg', 'svgz', 'tiff']
52 | help: 'format to save figures'
--------------------------------------------------------------------------------
/dosma/scan_sequences/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.scan_sequences import mri
2 |
3 | from dosma.scan_sequences import scans
4 |
5 | from dosma.scan_sequences.mri import * # noqa
6 | from dosma.scan_sequences.scans import * # noqa
7 |
8 | __all__ = []
9 | __all__.extend(mri.__all__)
10 | __all__.extend(scans.__all__)
11 |
--------------------------------------------------------------------------------
/dosma/scan_sequences/mri/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.scan_sequences.mri import cones, cube_quant, mapss, qdess # noqa: F401
2 |
3 | from dosma.scan_sequences.mri.cones import * # noqa
4 | from dosma.scan_sequences.mri.cube_quant import * # noqa
5 | from dosma.scan_sequences.mri.mapss import * # noqa
6 | from dosma.scan_sequences.mri.qdess import * # noqa
7 |
8 | __all__ = []
9 | __all__.extend(cones.__all__)
10 | __all__.extend(cube_quant.__all__)
11 | __all__.extend(mapss.__all__)
12 | __all__.extend(qdess.__all__)
13 |
--------------------------------------------------------------------------------
/dosma/tissues/__init__.py:
--------------------------------------------------------------------------------
1 | from dosma.tissues import femoral_cartilage, meniscus, patellar_cartilage, tibial_cartilage
2 | from dosma.tissues.femoral_cartilage import * # noqa
3 | from dosma.tissues.meniscus import * # noqa
4 | from dosma.tissues.patellar_cartilage import * # noqa
5 | from dosma.tissues.tibial_cartilage import * # noqa
6 | from dosma.tissues.tissue import * # noqa
7 |
8 | __all__ = []
9 | __all__.extend(femoral_cartilage.__all__)
10 | __all__.extend(meniscus.__all__)
11 | __all__.extend(patellar_cartilage.__all__)
12 | __all__.extend(tibial_cartilage.__all__)
13 |
--------------------------------------------------------------------------------
/dosma/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/dosma/utils/__init__.py
--------------------------------------------------------------------------------
/dosma/utils/cmd_line_utils.py:
--------------------------------------------------------------------------------
1 | __all__ = ["ActionWrapper"]
2 |
3 |
4 | class ActionWrapper(object):
5 | """Wrapper for actions (methods) that can be executed via command-line.
6 |
7 | Examples include `segment` scans, `interregister` scans, etc.
8 |
9 | Actions are instance methods of classes that can be executed via the command line.
10 | They are typically associated with different scans.
11 |
12 | To expose these methods to the command-line interface, we wrap these actions as subparsers.
13 | Parameters for the method are arguments of the subparser.
14 | """
15 |
16 | def __init__(self, name, **kwargs):
17 | self._name = name
18 | self._help = ""
19 | self._param_help = None
20 | self._alternative_param_names = None
21 | self._aliases = []
22 |
23 | if "help" in kwargs:
24 | self._help = kwargs.get("help")
25 |
26 | if "aliases" in kwargs:
27 | aliases = kwargs.get("aliases")
28 | assert type(aliases) is list, "aliases must be a list"
29 | self._aliases = aliases
30 |
31 | if "param_help" in kwargs:
32 | param_help_in = kwargs.get("param_help")
33 | assert type(param_help_in) is dict, "`param_help` must be a dictionary of str->str"
34 | for param_name in param_help_in:
35 | assert type(param_name) is str, "Keys must be of string type"
36 | assert type(param_help_in[param_name]) is str, "Values must be of string type"
37 | self._param_help = dict(param_help_in)
38 |
39 | if "alternative_param_names" in kwargs:
40 | alternative_param_names_in = kwargs.get("alternative_param_names")
41 | assert (
42 | type(alternative_param_names_in) is dict
43 | ), "`alternative_param_names` must be a dictionary of str->str"
44 | for param_name in alternative_param_names_in:
45 | assert type(param_name) is str, "Keys must be of string type"
46 | assert type(alternative_param_names_in[param_name]) in [
47 | list,
48 | tuple,
49 | ], "Values must be of string type"
50 | self._alternative_param_names = alternative_param_names_in
51 |
52 | def get_alternative_param_names(self, param: str):
53 | """Get aliases (alternate names) for a parameter.
54 |
55 | Args:
56 | param (str): Action parameter name.
57 |
58 | Returns:
59 | Optional[list[str]]: If aliases exist for parameter. `None`, otherwise.
60 | """
61 | if not self._alternative_param_names or param not in self._alternative_param_names:
62 | return None
63 |
64 | return self._alternative_param_names[param]
65 |
66 | def get_param_help(self, param: str):
67 | """Get help menu for a parameter.
68 |
69 | Args:
70 | param (str): Action parameter name.
71 |
72 | Returns:
73 | str: Help menu for parameter, if initialized. `""`, otherwise.
74 | """
75 | if not self._param_help or param not in self._param_help:
76 | return ""
77 |
78 | return self._param_help[param]
79 |
80 | @property
81 | def aliases(self):
82 | """list[str]: Aliases (other names) for this action."""
83 | return self._aliases
84 |
85 | @property
86 | def help(self):
87 | """str: Help menu for this action."""
88 | return self._help
89 |
90 | @property
91 | def name(self):
92 | """str: Action name."""
93 | return self._name
94 |
--------------------------------------------------------------------------------
/dosma/utils/collect_env.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 |
4 | import h5py
5 | import nibabel
6 | import nipype
7 | import numpy as np
8 | import pandas as pd
9 | import pydicom
10 | import scipy as sp
11 | import skimage
12 | from tabulate import tabulate
13 |
14 | __all__ = ["collect_env_info"]
15 |
16 |
17 | def collect_env_info():
18 | """Collect environment information for reporting issues.
19 |
20 | Run this function when reporting issues on Github.
21 | """
22 | data = []
23 | data.append(("sys.platform", sys.platform))
24 | data.append(("Python", sys.version.replace("\n", "")))
25 | data.append(("numpy", np.__version__))
26 |
27 | try:
28 | import dosma # noqa
29 |
30 | data.append(("dosma", dosma.__version__ + " @" + os.path.dirname(dosma.__file__)))
31 | except ImportError:
32 | data.append(("dosma", "failed to import"))
33 |
34 | # Required packages
35 | data.append(("h5py", h5py.__version__))
36 | data.append(("nibabel", nibabel.__version__))
37 | data.append(("nipype", nipype.__version__))
38 | data.append(("pandas", pd.__version__))
39 | data.append(("pydicom", pydicom.__version__))
40 | data.append(("scipy", sp.__version__))
41 | data.append(("skimage", skimage.__version__))
42 |
43 | # Optional packages
44 | try:
45 | import cupy
46 |
47 | data.append(("cupy", cupy.__version__))
48 | except ImportError:
49 | pass
50 |
51 | try:
52 | import tensorflow
53 |
54 | data.append(("tensorflow", tensorflow.__version__))
55 | except ImportError:
56 | pass
57 |
58 | try:
59 | import keras
60 |
61 | data.append(("keras", keras.__version__))
62 | except ImportError:
63 | pass
64 |
65 | try:
66 | import SimpleITK as sitk
67 |
68 | data.append(("SimpleITK", sitk.__version__))
69 | except ImportError:
70 | pass
71 |
72 | try:
73 | import sigpy
74 |
75 | data.append(("sigpy", sigpy.__version__))
76 | except ImportError:
77 | pass
78 |
79 | env_str = tabulate(data)
80 | return env_str
81 |
--------------------------------------------------------------------------------
/dosma/utils/env.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from importlib import util
4 |
5 | _SUPPORTED_PACKAGES = {}
6 |
7 | _FILE_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
8 |
9 | __all__ = ["debug", "get_version", "package_available"]
10 |
11 |
12 | def package_available(name: str):
13 | """Returns if package is available.
14 |
15 | Args:
16 | name (str): Name of the package.
17 |
18 | Returns:
19 | bool: Whether module exists in environment.
20 | """
21 | global _SUPPORTED_PACKAGES
22 | if name not in _SUPPORTED_PACKAGES:
23 | _SUPPORTED_PACKAGES[name] = util.find_spec(name) is not None
24 | return _SUPPORTED_PACKAGES[name]
25 |
26 |
27 | def get_version(package_or_name) -> str:
28 | """Returns package version.
29 |
30 | Args:
31 | package_or_name (``module`` or ``str``): Module or name of module.
32 | This package must have the version accessible through ``.__version__``.
33 |
34 | Returns:
35 | str: The package version.
36 |
37 | Examples:
38 | >>> get_version("numpy")
39 | "1.20.0"
40 | """
41 | if isinstance(package_or_name, str):
42 | if not package_available(package_or_name):
43 | raise ValueError(f"Package {package_or_name} not available")
44 | spec = util.find_spec(package_or_name)
45 | package_or_name = util.module_from_spec(spec)
46 | spec.loader.exec_module(package_or_name)
47 | version = package_or_name.__version__
48 | return version
49 |
50 |
51 | def debug(value: bool = None) -> bool:
52 | """Return (and optionally set) debug mode.
53 |
54 | Args:
55 | value (bool, optional): If specified, sets the debug status.
56 | If not specified, debug mode is not set, only returned.
57 |
58 | Returns:
59 | bool: If ``True``, debug mode is active.
60 |
61 | Raises:
62 | ValueError: If ``value`` is not a supported value.
63 |
64 | Note:
65 | Changing the debug state changes the stream handler logging level
66 | for the default dosma logger. If debug state is turned off, logging
67 | level is set to ``logging.INFO``. If debug state is turned on,
68 | logging level is set to ``logging.DEBUG``.
69 |
70 | Examples:
71 | >>> debug() # get debug status, defaults to False
72 | False
73 | >>> debug(True) # turn on debug mode
74 | True
75 | >>> debug() # get debug status
76 | True
77 | """
78 |
79 | def _is_debug():
80 | return os.environ.get("DOSMA_DEBUG", "") in ["True", "true"]
81 |
82 | def _toggle_debug(_old_value, _new_value):
83 | from dosma.defaults import preferences
84 |
85 | # TODO: Toggle dosma logging to debug mode.
86 | if _old_value == _new_value:
87 | return
88 |
89 | _dm_logger = logging.getLogger("dosma")
90 | if _new_value:
91 | preferences.set("nipype", value="stream", prefix="logging")
92 | _dm_logger.setLevel(logging.DEBUG)
93 | for h in _dm_logger.handlers:
94 | h.setLevel(logging.DEBUG)
95 | else:
96 | preferences.set("nipype", value="file_stderr", prefix="logging")
97 | _dm_logger.setLevel(logging.DEBUG) # the root logger should always log at DEBUG level
98 | for h in _dm_logger.handlers:
99 | if isinstance(h, logging.StreamHandler):
100 | h.setLevel(logging.INFO)
101 |
102 | if value is not None:
103 | old_value = _is_debug()
104 | if isinstance(value, bool):
105 | os.environ["DOSMA_DEBUG"] = str(value)
106 | elif isinstance(value, str) and value.lower() in ("true", "false", ""):
107 | os.environ["DOSMA_DEBUG"] = value
108 | else:
109 | raise ValueError(f"Unknown value for debug: '{value}'")
110 |
111 | _toggle_debug(old_value, _is_debug())
112 |
113 | return _is_debug()
114 |
115 |
116 | def sitk_available():
117 | return package_available("SimpleITK")
118 |
119 |
120 | def cupy_available():
121 | if "cupy" not in _SUPPORTED_PACKAGES:
122 | try:
123 | import cupy # noqa
124 | except ImportError:
125 | _SUPPORTED_PACKAGES["cupy"] = False
126 | return package_available("cupy")
127 |
128 |
129 | def sigpy_available():
130 | return package_available("sigpy")
131 |
132 |
133 | def torch_available():
134 | return package_available("torch")
135 |
136 |
137 | def resources_dir() -> str:
138 | return os.path.abspath(os.path.join(_FILE_DIRECTORY, "../resources"))
139 |
140 |
141 | def output_dir() -> str:
142 | return os.path.abspath(os.path.join(_FILE_DIRECTORY, "../../.dosma"))
143 |
144 |
145 | def temp_dir() -> str:
146 | return os.path.join(output_dir(), "temp")
147 |
148 |
149 | def log_file_path() -> str:
150 | return os.path.join(output_dir(), "dosma.log")
151 |
--------------------------------------------------------------------------------
/dosma/utils/geometry_utils.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import scipy.ndimage as sni
3 | from scipy import optimize
4 |
5 | from dosma.core.device import get_array_module
6 | from dosma.utils import env
7 |
8 | __all__ = ["circle_fit", "cart2pol"]
9 |
10 |
11 | def circle_fit(x: np.ndarray, y: np.ndarray):
12 | """Fit a circle given (x, y) scatter points in a plane.
13 |
14 | Args:
15 | x (np.ndarray): x-coordinates of scatter points.
16 | y (np.ndarray): y-coordinates of scatter points.
17 |
18 | Returns:
19 | tuple[float]: Coordinates and radius of circle (center x, center y, radius).
20 | """
21 | ###
22 | # this function fit a circle given (x,y) scatter points in a plane.
23 | #
24 | # INPUT:
25 | #
26 | # x................numpy array (n,) where n is the length of the array
27 | # y................numpy array (n,) where n is the length of the array
28 | #
29 | # OUTPUT:
30 | #
31 | # xc_2.............scalar, it is the coordinate x of the fitted circle
32 | # yc_2.............scalar, it is the coordinate y of the fitted circle
33 | # R_2..............scalar, it is the radius of the fitted circle
34 | ###
35 |
36 | # initialize the coordinate for the fitting procedure
37 | x_m = np.mean(x)
38 | y_m = np.mean(y)
39 |
40 | def calc_R(xc, yc):
41 | """
42 | Calculate the distance of each 2D points from the center (xc, yc).
43 |
44 | Args:
45 | xc: Center x.
46 | yc: Center y.
47 | """
48 | return np.sqrt((x - xc) ** 2 + (y - yc) ** 2)
49 |
50 | def f_2(c):
51 | """
52 | Calculate the algebraic distance between the 2D points
53 | and the mean circle centered at :math:`c=(xc, yc)`.
54 |
55 | Args:
56 | c (float): Circle center.
57 | """
58 | Ri = calc_R(*c)
59 | return Ri - Ri.mean()
60 |
61 | center_estimate = x_m, y_m
62 | center_2, ier = optimize.leastsq(f_2, center_estimate)
63 |
64 | xc_2, yc_2 = center_2
65 | Ri_2 = calc_R(xc_2, yc_2)
66 | R_2 = Ri_2.mean()
67 | # residu_2 = sum((Ri_2 - R_2)**2)
68 | # residu2_2 = sum((Ri_2**2-R_2**2)**2)
69 |
70 | return xc_2, yc_2, R_2
71 |
72 |
73 | def cart2pol(x, y):
74 | """Convert cartesian coordinates to polar coordinates.
75 |
76 | Args:
77 | x: x-coordinate.
78 | y: y-coordinate.
79 |
80 | Returns:
81 | tuple[float, float]: radius (rho) and angle (phi).
82 | """
83 | rho = np.sqrt(x ** 2 + y ** 2)
84 | phi = np.arctan2(y, x)
85 |
86 | phi = phi * (180 / np.pi) # degrees
87 | phi[phi == 180] = -180
88 |
89 | return rho, phi
90 |
91 |
92 | def center_of_mass(input, labels=None, index=None):
93 | """
94 | Calculate the center of mass of the values of an array at labels.
95 |
96 | Args:
97 | input : ndarray
98 | Data from which to calculate center-of-mass. The masses can either
99 | be positive or negative.
100 | labels : ndarray, optional
101 | Labels for objects in `input`, as generated by `ndimage.label`.
102 | Only used with `index`. Dimensions must be the same as `input`.
103 | index : int or sequence of ints, optional
104 | Labels for which to calculate centers-of-mass. If not specified,
105 | all labels greater than zero are used. Only used with `labels`.
106 |
107 | Returns
108 | -------
109 | center_of_mass : tuple, or list of tuples
110 | Coordinates of centers-of-mass.
111 |
112 | Note:
113 | This is adapted from scipy.ndimage to support cupy.
114 | """
115 | _sni = sni
116 | if env.cupy_available():
117 | import cupy as cp
118 |
119 | if get_array_module(input) == cp:
120 | import cupyx.scipy.ndimage as csni
121 |
122 | _sni = csni
123 |
124 | return _sni.center_of_mass(input, labels=labels, index=index)
125 |
--------------------------------------------------------------------------------
/dosma/utils/img_utils.py:
--------------------------------------------------------------------------------
1 | import itertools
2 |
3 | import numpy as np
4 | import seaborn as sns
5 |
6 | from dosma import defaults
7 |
8 | import matplotlib.pyplot as plt
9 | from matplotlib.lines import Line2D
10 |
11 | __all__ = ["downsample_slice", "write_regions"]
12 |
13 |
14 | def downsample_slice(img_array, ds_factor, is_mask=False):
15 | """
16 | Takes in a 3D array and then downsamples in the z-direction by a
17 | user-specified downsampling factor.
18 |
19 | Args:
20 | img_array (np.ndarray): 3D numpy array for now (xres x yres x zres)
21 | ds_factor (int): Downsampling factor
22 | is_mask (:obj:`bool`, optional): If ``True``, ``img_array`` is a mask and will be binarized
23 | after downsampling. Defaults to `False`.
24 |
25 | Returns:
26 | np.ndarray: 3D numpy array of dimensions (xres x yres x zres//ds_factor)
27 |
28 | Examples:
29 | >>> input_image = numpy.random.rand(4,4,4)
30 | >>> input_mask = (a > 0.5) * 1.0
31 | >>> output_image = downsample_slice(input_mask, ds_factor = 2, is_mask = False)
32 | >>> output_mask = downsample_slice(input_mask, ds_factor = 2, is_mask = True)
33 | """
34 |
35 | img_array = np.transpose(img_array, (2, 0, 1))
36 | L = list(img_array)
37 |
38 | def grouper(iterable, n):
39 | args = [iter(iterable)] * n
40 | return itertools.zip_longest(fillvalue=0, *args)
41 |
42 | final = np.array([sum(x) for x in grouper(L, ds_factor)])
43 | final = np.transpose(final, (1, 2, 0))
44 |
45 | # Binarize if it is a mask.
46 | if is_mask is True:
47 | final = (final >= 1) * 1
48 |
49 | return final
50 |
51 |
52 | def write_regions(file_path, arr, plt_dict=None):
53 | """Write 2D array to region image where colors correspond to the region.
54 |
55 | All finite values should be >= 1.
56 | nan/inf value are ignored - written as white.
57 |
58 | Args:
59 | file_path (str): File path to save image.
60 | arr (np.ndarray): The 2D numpy array to convert to region image.
61 | Unique non-zero values correspond to different regions.
62 | Values that are `0` or `np.nan` will be written as white pixels.
63 | plt_dict (:obj:`dict`, optional): Dictionary of values to use when plotting with
64 | ``matplotlib.pyplot``. Keys are strings like `xlabel`, `ylabel`, etc.
65 | Use Key `labels` to specify a mapping from unique non-zero values in the array
66 | to names for the legend.
67 | """
68 |
69 | if len(arr.shape) != 2:
70 | raise ValueError("`arr` must be a 2D numpy array")
71 |
72 | unique_vals = np.unique(arr.flatten())
73 | if 0 in unique_vals:
74 | raise ValueError("All finite values in `arr` must be >=1")
75 |
76 | unique_vals = unique_vals[np.isfinite(unique_vals)]
77 | num_unique_vals = len(unique_vals)
78 |
79 | plt_dict_int = {"xlabel": "", "ylabel": "", "title": "", "labels": None}
80 | if plt_dict:
81 | plt_dict_int.update(plt_dict)
82 | plt_dict = plt_dict_int
83 |
84 | labels = plt_dict["labels"]
85 | if labels is None:
86 | labels = list(unique_vals)
87 |
88 | if len(labels) != num_unique_vals:
89 | raise ValueError(
90 | "len(labels) != num_unique_vals - %d != %d" % (len(labels), num_unique_vals)
91 | )
92 |
93 | cpal = sns.color_palette("pastel", num_unique_vals)
94 |
95 | arr_c = np.array(arr)
96 | arr_c = np.nan_to_num(arr_c)
97 | arr_c[arr_c > np.max(unique_vals)] = 0
98 | arr_rgb = np.ones([arr_c.shape[0], arr_c.shape[1], 3])
99 |
100 | plt.figure()
101 | plt.clf()
102 | custom_lines = []
103 | for i in range(num_unique_vals):
104 | unique_val = unique_vals[i]
105 | i0, i1 = np.where(arr_c == unique_val)
106 | arr_rgb[i0, i1, ...] = np.asarray(cpal[i])
107 |
108 | custom_lines.append(
109 | Line2D([], [], color=cpal[i], marker="o", linestyle="None", markersize=5)
110 | )
111 |
112 | plt.xlabel(plt_dict["xlabel"])
113 | plt.ylabel(plt_dict["ylabel"])
114 | plt.title(plt_dict["title"])
115 |
116 | lgd = plt.legend(
117 | custom_lines,
118 | labels,
119 | loc="upper center",
120 | bbox_to_anchor=(0.5, -defaults.DEFAULT_TEXT_SPACING),
121 | fancybox=True,
122 | shadow=True,
123 | ncol=3,
124 | )
125 | plt.imshow(arr_rgb)
126 |
127 | plt.savefig(file_path, bbox_extra_artists=(lgd,), bbox_inches="tight")
128 |
--------------------------------------------------------------------------------
/dosma/utils/io_utils.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import pickle
4 | import warnings
5 | from typing import Sequence
6 |
7 | import h5py
8 | import pandas as pd
9 |
10 | from dosma.utils.logger import setup_logger
11 |
12 | __all__ = ["mkdirs", "save_pik", "load_pik", "save_h5", "load_h5", "save_tables"]
13 |
14 |
15 | def mkdirs(dir_path: str):
16 | """Make directory is directory does not exist.
17 |
18 | Args:
19 | dir_path (str): Directory path to make.
20 |
21 | Returns:
22 | str: path to directory
23 | """
24 | if not os.path.isdir(dir_path):
25 | os.makedirs(dir_path)
26 |
27 | return dir_path
28 |
29 |
30 | def save_pik(file_path: str, data):
31 | """Save data using `pickle`.
32 |
33 | Pickle is not be stable across Python 2/3.
34 |
35 | Args:
36 | file_path (str): File path to save to.
37 | data (Any): Data to serialize.
38 | """
39 | mkdirs(os.path.dirname(file_path))
40 | with open(file_path, "wb") as f:
41 | pickle.dump(data, f)
42 |
43 |
44 | def load_pik(file_path: str):
45 | """Load data using `pickle`.
46 |
47 | Should be used with :any:`save_pik`.
48 |
49 | Pickle is not be stable across Python 2/3.
50 |
51 | Args:
52 | file_path (str): File path to load from.
53 |
54 | Returns:
55 | Any: Loaded data.
56 |
57 | Raises:
58 | FileNotFoundError: If `file_path` does not exist.
59 | """
60 | if not os.path.isfile(file_path):
61 | raise FileNotFoundError("{} does not exist".format(file_path))
62 |
63 | with open(file_path, "rb") as f:
64 | return pickle.load(f)
65 |
66 |
67 | def save_h5(file_path: str, data_dict: dict):
68 | """Save data in H5DF format.
69 |
70 | Args:
71 | file_path (str): File path to save to.
72 | data_dict (dict): Dictionary of data to store. Dictionary can only have depth of 1.
73 | """
74 | mkdirs(os.path.dirname(file_path))
75 | with h5py.File(file_path, "w") as f:
76 | for key in data_dict.keys():
77 | f.create_dataset(key, data=data_dict[key])
78 |
79 |
80 | def load_h5(file_path):
81 | """Load data in H5DF format.
82 |
83 | Args:
84 | file_path (str): File path to save to.
85 |
86 | Returns:
87 | dict: Loaded data.
88 |
89 | Raises:
90 | FileNotFoundError: If `file_path` does not exist.
91 | """
92 | if not os.path.isfile(file_path):
93 | raise FileNotFoundError("{} does not exist".format(file_path))
94 |
95 | data = {}
96 | with h5py.File(file_path, "r") as f:
97 | for key in f.keys():
98 | data[key] = f.get(key).value
99 |
100 | return data
101 |
102 |
103 | def save_tables(
104 | file_path: str, data_frames: Sequence[pd.DataFrame], sheet_names: Sequence[str] = None
105 | ):
106 | """Save data in excel tables.
107 |
108 | Args:
109 | file_path (str): File path to excel file.
110 | data_frames (Sequence[pd.DataFrame]): Tables to store to excel file.
111 | One table stored per sheet.
112 | sheet_names (:obj:`Sequence[str]`, optional): Sheet names for each data frame.
113 | """
114 | mkdirs(os.path.dirname(file_path))
115 | writer = pd.ExcelWriter(file_path)
116 |
117 | if sheet_names is None:
118 | sheet_names = []
119 | for i in range(len(data_frames)):
120 | sheet_names.append("Sheet%d" % (i + 1))
121 |
122 | if len(data_frames) != len(sheet_names):
123 | raise ValueError("Number of data_frames and sheet_frames should be the same")
124 |
125 | for i in range(len(data_frames)):
126 | df = data_frames[i]
127 | df.to_excel(writer, sheet_names[i], index=False)
128 |
129 | writer.save()
130 |
131 |
132 | def init_logger(log_file: str, debug: bool = False): # pragma: no cover
133 | """Initialize logger.
134 |
135 | Args:
136 | log_file (str): File path to log file.
137 | debug (:obj:`bool`, optional): If ``True``, debug mode will be enabled for the logger.
138 | This means debug statements will also be written to the log file.
139 | Defaults to ``False``.
140 | """
141 | warnings.warn(
142 | "init_logger is deprecated since v0.0.14 and will no longer be "
143 | "supported in v0.13. Use `dosma.setup_logger` instead.",
144 | DeprecationWarning,
145 | )
146 |
147 | level = logging.DEBUG if debug else logging.INFO
148 | setup_logger(log_file, level=level)
149 |
--------------------------------------------------------------------------------
/dosma/utils/logger.py:
--------------------------------------------------------------------------------
1 | """Logging utility.
2 | """
3 |
4 | import atexit
5 | import functools
6 | import logging
7 | import os
8 | import sys
9 | from time import localtime, strftime
10 | from typing import Union
11 |
12 | from termcolor import colored
13 |
14 | from dosma.utils import env
15 |
16 | __all__ = ["setup_logger"]
17 |
18 |
19 | class _ColorfulFormatter(logging.Formatter):
20 | """
21 | This class is adapted from Facebook's detectron2:
22 | https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/logger.py
23 | """
24 |
25 | def __init__(self, *args, **kwargs):
26 | self._root_name = kwargs.pop("root_name") + "."
27 | self._abbrev_name = kwargs.pop("abbrev_name", "")
28 | if len(self._abbrev_name):
29 | self._abbrev_name = self._abbrev_name + "."
30 | super(_ColorfulFormatter, self).__init__(*args, **kwargs)
31 |
32 | def formatMessage(self, record):
33 | record.name = record.name.replace(self._root_name, self._abbrev_name)
34 | log = super(_ColorfulFormatter, self).formatMessage(record)
35 | if record.levelno == logging.WARNING:
36 | prefix = colored("WARNING", "red", attrs=["blink"])
37 | elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
38 | prefix = colored("ERROR", "red", attrs=["blink", "underline"])
39 | else:
40 | return log
41 | return prefix + " " + log
42 |
43 |
44 | @functools.lru_cache() # so that calling setup_logger multiple times won't add many handlers
45 | def setup_logger(
46 | output: Union[str, bool] = True,
47 | color=True,
48 | name="dosma",
49 | abbrev_name=None,
50 | stream_lvl=None,
51 | overwrite_handlers: bool = False,
52 | ):
53 | """Initialize the dosma logger.
54 |
55 | Args:
56 | output (str | bool): A file name or a directory to save log or a boolean.
57 | If ``True``, logs will save to the default dosma log location
58 | (:func:`dosma.utils.env.log_file_path`).
59 | If ``None`` or ``False``, logs will not be written to a file. This is not recommended.
60 | If a string and ends with ".txt" or ".log", assumed to be a file name.
61 | Otherwise, logs will be saved to `output/log.txt`.
62 | color (bool): If ``True``, logs printed to terminal (stdout) will be in color.
63 | name (str): The root module name of this logger.
64 | abbrev_name (str): An abbreviation of the module, to avoid long names in logs.
65 | Set to "" to not log the root module in logs.
66 | By default, will abbreviate "dosma" to "dm" and leave other
67 | modules unchanged.
68 | stream_lvl (int): The level for logging to console. Defaults to ``logging.DEBUG``
69 | if :func:`dosma.utils.env.debug()` is ``True``, else defaults to ``logging.INFO``.
70 | overwrite_handlers (bool): It ``True`` and logger with name ``name`` has logging handlers,
71 | these handlers will be removed before adding the new handlers. This is useful
72 | when to avoid having too many handlers for a logger.
73 |
74 | Returns:
75 | logging.Logger: A logger.
76 |
77 | Note:
78 | This method removes existing handlers from the logger.
79 |
80 | Examples:
81 | >>> setup_logger() # how initializing logger is done most of the time
82 | >>> setup_logger("/path/to/save/dosma.log") # save log to particular file
83 | >>> setup_logger(
84 | ... stream_lvl=logging.WARNING,
85 | ... overwrite_handlers=True) # only prints warnings to console
86 | """
87 | if stream_lvl is None:
88 | stream_lvl = logging.DEBUG if env.debug() else logging.INFO
89 |
90 | logger = logging.getLogger(name)
91 | logger.setLevel(logging.DEBUG)
92 | logger.propagate = False
93 |
94 | # Clear handlers if they exist.
95 | is_new_logger = not logger.hasHandlers()
96 | if not is_new_logger and overwrite_handlers:
97 | logger.handlers.clear()
98 |
99 | if abbrev_name is None:
100 | abbrev_name = "dm" if name == "dosma" else name
101 |
102 | plain_formatter = logging.Formatter(
103 | "[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S"
104 | )
105 |
106 | ch = logging.StreamHandler(stream=sys.stdout)
107 | ch.setLevel(stream_lvl)
108 | if color:
109 | formatter = _ColorfulFormatter(
110 | colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
111 | datefmt="%m/%d %H:%M:%S",
112 | root_name=name,
113 | abbrev_name=str(abbrev_name),
114 | )
115 | else:
116 | formatter = plain_formatter
117 | ch.setFormatter(formatter)
118 | logger.addHandler(ch)
119 |
120 | if output is not None and output is not False:
121 | if output is True:
122 | output = env.log_file_path()
123 |
124 | if output.endswith(".txt") or output.endswith(".log"):
125 | filename = output
126 | else:
127 | filename = os.path.join(output, "dosma.log")
128 | os.makedirs(os.path.dirname(filename), exist_ok=True)
129 |
130 | fh = logging.StreamHandler(_cached_log_stream(filename))
131 | fh.setLevel(logging.DEBUG)
132 | fh.setFormatter(plain_formatter)
133 | logger.addHandler(fh)
134 |
135 | if is_new_logger and name == "dosma":
136 | logger.debug("\n" * 4)
137 | logger.debug("==" * 40)
138 | logger.debug("Timestamp: %s" % strftime("%Y-%m-%d %H:%M:%S", localtime()))
139 | logger.debug("\n\n")
140 |
141 | return logger
142 |
143 |
144 | # cache the opened file object, so that different calls to `setup_logger`
145 | # with the same file name can safely write to the same file.
146 | @functools.lru_cache(maxsize=None)
147 | def _cached_log_stream(filename):
148 | # use 1K buffer if writing to cloud storage
149 | io = open(filename, "a", buffering=1024 if "://" in filename else -1)
150 | atexit.register(io.close)
151 | return io
152 |
--------------------------------------------------------------------------------
/pyproject.toml:
--------------------------------------------------------------------------------
1 | [tool.black]
2 | line-length = 100
3 | exclude = '(\.eggs|\.git|\.github)'
4 |
--------------------------------------------------------------------------------
/readme_ims/dess_protocol.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/readme_ims/dess_protocol.png
--------------------------------------------------------------------------------
/requirements.txt:
--------------------------------------------------------------------------------
1 | h5py
2 | matplotlib
3 | numpy
4 | natsort
5 | nested-lookup
6 | nibabel
7 | nipype
8 | packaging
9 | pandas
10 | pydicom>=1.6.0
11 | scikit-image
12 | scipy
13 | seaborn
14 | openpyxl
15 | Pmw
16 | PyYAML
17 | tabulate
18 | termcolor
19 | tqdm>=4.42.0
--------------------------------------------------------------------------------
/scripts/bilateral-knee-dess:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # 1. Separate dicom files from bilateral dess knee scan into left dicoms and right dicoms
4 | # 2. Run qDESS analysis on left and right knees
5 | #
6 | # @usage (from terminal/command line):
7 | # ./bilateral-knee-dess PATH_TO_DICOM_FOLDER PATIENT_ID
8 | # eg: "./bilateral-knee-dess /Users/data/Patient07/005 07"
9 | #
10 | # @initialization protocol:
11 | # 1. run "chmod +x bilateral-knee-dess" from the command line
12 | # 2. Update `WEIGHTS_DIRECTORY` field below to point to the appropriate weights
13 | # 3. Update 'TISSUES' field if additional tissues desired
14 | # e.g. '--fc --tc' for femoral cartilage and tibial cartilage
15 | #
16 | # @assumptions:
17 | # - Scan volumes are acquired in sagittal direction from patient left to patient right
18 | # (i.e. left knee, then right knee)
19 | # - Volume slices (1...N) - Left knee slices (1...N/2), right knee slices (N/2 + 1, ... N)
20 | # - Left knee - lateral --> medial
21 | # - Right knee - medial --> lateral
22 | #
23 | # @author: Arjun Desai, Stanford University
24 | # (c) Stanford University, 2018
25 |
26 |
27 | SCRIPTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
28 | DOSMA_DIR="$( cd "$( dirname "${SCRIPTS_DIR}" )" >/dev/null 2>&1 && pwd )"
29 |
30 | WEIGHTS_DIRECTORY=$DOSMA_DIR/"../weights"
31 | TISSUES='--fc'
32 |
33 | if [[ -z "$WEIGHTS_DIRECTORY" ]]; then
34 | echo "Please define WEIGHTS_DIRECTORY in script. Use the absolute path"
35 | exit 125
36 | fi
37 |
38 | if [[ $# -lt 1 ]]; then
39 | echo "Please provide path to dicom folder and patient id"
40 | exit 125
41 | fi
42 |
43 | if [[ $# -lt 2 ]]; then
44 | echo "Please provide patient id"
45 | exit 125
46 | fi
47 |
48 | DICOM_PATH=$1
49 | PID=$2
50 | echo "dicom path: $DICOM_PATH"
51 | echo "patient id: $PID"
52 |
53 | # get list of dicoms in this folder
54 | dicom_list_str=$(find $DICOM_PATH -type f -name "*.dcm" -maxdepth 1 | sort)
55 | dicom_array=()
56 | for filepath in $dicom_list_str
57 | do
58 | dicom_array+=($filepath)
59 | done
60 |
61 | echo "Number of dicoms: ${#dicom_array[@]}"
62 |
63 | # halfpoint in dicom list to split
64 | half_point=$((${#dicom_array[@]} / 2))
65 |
66 | # Assume directories exist, if they don't set this to false
67 | DIRS_EXIST=1
68 |
69 | LEFT_DIR="$DICOM_PATH/LEFT/"
70 | if [[ ! -d "$LEFT_DIR" ]]; then
71 | mkdir $LEFT_DIR
72 | DIRS_EXIST=0
73 | fi
74 |
75 | RIGHT_DIR="$DICOM_PATH/RIGHT/"
76 | if [[ ! -d "$RIGHT_DIR" ]]; then
77 | mkdir $RIGHT_DIR
78 | DIRS_EXIST=0
79 | fi
80 |
81 | # if the directories already exist, assume the data has already been separated into different folders
82 | if [[ $DIRS_EXIST -eq 0 ]]; then
83 | counter=1
84 | for filepath in ${dicom_array[@]}
85 | do
86 |
87 | filename=$(basename $filepath)
88 |
89 | if [[ $counter -gt $half_point ]]; then
90 | # store in right directory
91 | cp $filepath $RIGHT_DIR
92 | else
93 | cp $filepath $LEFT_DIR
94 | fi
95 |
96 | counter=$(expr $counter + 1)
97 | done
98 | fi
99 |
100 | cd ..
101 |
102 | base_dicom_path=$(dirname $DICOM_PATH)
103 | base_filename=$(basename $DICOM_PATH)
104 | DATA_DIR="$base_dicom_path/data/$base_filename"
105 |
106 | echo "Save path: $DATA_DIR"
107 |
108 | # 2. run analysis on the qdess files
109 | # if data directory already exist, skip analysis
110 |
111 | DATA_DIR_LEFT="$DATA_DIR/LEFT"
112 |
113 | dosma --d $LEFT_DIR --s $DATA_DIR_LEFT qdess $TISSUES segment --rms --weights_dir $WEIGHTS_DIRECTORY
114 | dosma --l $DATA_DIR_LEFT qdess $TISSUES t2
115 | dosma --l $DATA_DIR_LEFT knee --pid $PID $TISSUES
116 |
117 |
118 | DATA_DIR_RIGHT="$DATA_DIR/RIGHT"
119 |
120 | dosma --d $RIGHT_DIR --s $DATA_DIR_RIGHT qdess $TISSUES segment --rms --weights_dir $WEIGHTS_DIRECTORY
121 | dosma --l $DATA_DIR_RIGHT qdess $TISSUES t2
122 | dosma --l $DATA_DIR_RIGHT knee --ml --pid $PID $TISSUES
123 |
124 |
--------------------------------------------------------------------------------
/scripts/msk-qdess:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # msk-qdess
4 | #
5 | # @brief: Run DESS analysis for femoral cartilage on patient folder specified as an argument
6 | # 1. Do automatic segmentation
7 | # 2. Calculate T2 map
8 | # 3. Analyze all supp
9 | #
10 | # @usage (from terminal/command line):
11 | # ./msk-qdess PATH_TO_PATIENT_FOLDER qDESS_DICOM_FOLDER_NAME
12 | # eg: "./msk-qdess subject_scans/patient01 001"
13 | #
14 | # @initialization protocol:
15 | # 1. run "chmod +x msk-qdess" from the command line
16 | # 2. Update `WEIGHTS_DIRECTORY` field below
17 | #
18 | # @author: Arjun Desai, Stanford University
19 | # (c) Stanford University, 2018
20 |
21 | WEIGHTS_DIRECTORY=""
22 | if [ -z "$WEIGHTS_DIRECTORY" ]; then
23 | echo "Please define WEIGHTS_DIRECTORY in script. Use the absolute path"
24 | exit 125
25 | fi
26 |
27 | if [ $# -eq 1 ]
28 | then
29 | SERIES="qdess"
30 | else
31 | SERIES=$2
32 | fi
33 |
34 | # find relevant dicom files
35 |
36 | FILES=$(find $1 -type d -name $SERIES)
37 |
38 | cd ..
39 |
40 | for i in $FILES; do
41 | DIRNAME=$(dirname $i)
42 | DIRNAME="$DIRNAME/data"
43 | dosma --d $i --s $DIRNAME qdess --fc segment --rms --weights_dir $WEIGHTS_DIRECTORY
44 | dosma --l $DIRNAME qdess --fc t2
45 | dosma --l $DIRNAME knee --fc --t2
46 | done
47 |
--------------------------------------------------------------------------------
/scripts/multi-scan-script:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # multi-scan-script
4 | #
5 | # @brief: Run qDESS, Cubequant, and Cones analysis on super folder (absolute path) specified as an argument
6 | #
7 | # @folder structure:
8 | # |subject_scans
9 | # |patient01
10 | # |qdess
11 | # |cubequant
12 | # |cones
13 | # |patient02
14 | # |qdess
15 | # |cubequant
16 | # |cones
17 | # |patient03
18 | # |qdess
19 | # |cubequant
20 | # |cones
21 | # ...
22 | #
23 | # @usage (from terminal/command line):
24 | # ./multi-scan-script PATH_TO_SUPER_FOLDER
25 | # eg: "./multi-scan-script Users/john/Documents/subject_scans/"
26 | #
27 | # @initialization protocol:
28 | # 1. run "chmod +x multi-scan-script" from the command line
29 | # 2. Update `WEIGHTS_DIRECTORY` field below
30 | #
31 | # @notes:
32 | # - If data already exists for subject, then script does not recalculate - delete `data` folder and rerun script to redo analysis
33 | #
34 | # @author: Arjun Desai, Stanford University
35 | # (c) Stanford University, 2018
36 |
37 | echo "This file is currently deprecated - if needed, please make an issue on the corresponding Github repository"
38 | exit 125
39 |
40 | WEIGHTS_DIRECTORY="/Users/arjundesai/Documents/stanford/research/msk_pipeline_raw/weights"
41 | if [ -z "$WEIGHTS_DIRECTORY" ]; then
42 | echo "Please define WEIGHTS_DIRECTORY in script. Use the absolute path"
43 | exit 125
44 | fi
45 |
46 | if [ $# -eq 1 ]
47 | then
48 | SERIES="qdess"
49 | else
50 | SERIES=$2
51 | fi
52 |
53 | # find relevant dicom files
54 |
55 | FILES=$(find $1 -type d -name $SERIES)
56 |
57 | cd ..
58 |
59 | for i in $FILES; do
60 | DIRNAME=$(dirname $i)
61 | SAVE_DIRNAME="$DIRNAME/data"
62 | CUBEQUANT_DIRNAME="$DIRNAME/cubequant"
63 | CONES_DIRNAME="$DIRNAME/cones"
64 | TARGET_SCAN="$SAVE_DIRNAME/qdess_data/echo1.nii.gz"
65 | MASK="$SAVE_DIRNAME/fc/fc.nii.gz"
66 |
67 | CUBEQUANT_INTERREGISTERED_DATA="$SAVE_DIRNAME/cubequant_data/interregistered"
68 | CUEQUANT_T1RHO="$SAVE_DIRNAME/cubequant_data/t1_rho.nii.gz"
69 |
70 |
71 | CONES_INTERREGISTERED_DATA="$SAVE_DIRNAME/cones_data/interregistered"
72 | CONES_T2STAR="$SAVE_DIRNAME/cones_data/t2_star.nii.gz"
73 |
74 | echo ""
75 | echo "----------Analyzing $DIRNAME---------"
76 |
77 | # use qDESS to get femoral cartilage segmentation + 3D t2 maps
78 | if [ ! -e $TARGET_SCAN ]; then
79 | dosma --d $i --s $SAVE_DIRNAME qdess --fc segment --rms --weights_dir $WEIGHTS_DIRECTORY
80 | dosma --l $SAVE_DIRNAME qdess --fc t2
81 | fi
82 |
83 | # interregister cubequant with qdess
84 | if [ ! -e $CUBEQUANT_INTERREGISTERED_DATA ]; then
85 | echo $CUBEQUANT_INTERREGISTERED_DATA
86 | dosma --d $CUBEQUANT_DIRNAME --s $SAVE_DIRNAME cubequant interregister --ts $TARGET_SCAN --tm $MASK
87 | fi
88 |
89 | # cubequant 3D t1rho map
90 | if [ ! -e $CUEQUANT_T1RHO ]; then
91 | dosma --l $SAVE_DIRNAME cubequant t1_rho
92 | fi
93 |
94 | # interregister cones with qdess
95 | if [ ! -e $CONES_INTERREGISTERED_DATA ]; then
96 | dosma --d $CONES_DIRNAME --s $SAVE_DIRNAME cones interregister --ts $TARGET_SCAN --tm $MASK
97 | fi
98 |
99 | # cones 3D t2_star map
100 | if [ ! -e $CONES_T2STAR ]; then
101 | dosma --l $SAVE_DIRNAME cones t2_star
102 | fi
103 |
104 | # analyze femoral cartilage
105 | python -m pipeline --l $SAVE_DIRNAME knee --fc --t2 --t2_star --t1_rho
106 | done
107 |
--------------------------------------------------------------------------------
/scripts/runtime-script:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | echo "This file is currently deprecated - if needed, please make an issue on the corresponding Github repository"
4 | exit 125
5 |
6 | WEIGHTS_DIRECTORY="/Users/arjundesai/Documents/stanford/research/msk_pipeline_raw/weights"
7 | if [ -z "$WEIGHTS_DIRECTORY" ]; then
8 | echo "Please define WEIGHTS_DIRECTORY in script. Use the absolute path"
9 | exit 125
10 | fi
11 |
12 | if [ $# -eq 1 ]
13 | then
14 | SERIES="qdess"
15 | else
16 | SERIES=$2
17 | fi
18 |
19 | # find relevant dicom files
20 |
21 | FILES=$(find $1 -type d -name $SERIES)
22 |
23 | cd ..
24 |
25 | for i in $FILES; do
26 | DIRNAME=$(dirname $i)
27 | SAVE_DIRNAME="$DIRNAME/data"
28 | CUBEQUANT_DIRNAME="$DIRNAME/cubequant"
29 | CONES_DIRNAME="$DIRNAME/cones"
30 | TARGET_SCAN="$SAVE_DIRNAME/qdess_data/echo1.nii.gz"
31 | MASK="$SAVE_DIRNAME/fc/fc.nii.gz"
32 |
33 | CUBEQUANT_INTERREGISTERED_DATA="$SAVE_DIRNAME/cubequant_data/interregistered"
34 | CUEQUANT_T1RHO="$SAVE_DIRNAME/cubequant_data/t1_rho.nii.gz"
35 |
36 |
37 | CONES_INTERREGISTERED_DATA="$SAVE_DIRNAME/cones_data/interregistered"
38 | CONES_T2STAR="$SAVE_DIRNAME/cones_data/t2_star.nii.gz"
39 |
40 | echo ""
41 | echo "----------Analyzing $DIRNAME---------"
42 |
43 | # use qDESS to get femoral cartilage segmentation + 3D t2 maps
44 | echo "qDESS T2map time:"
45 | python -m pipeline -d $i -s $SAVE_DIRNAME qdess -fc -t2
46 |
47 | echo "segmentation time:"
48 | python -m pipeline -l $SAVE_DIRNAME -s $SAVE_DIRNAME qdess -fc segment -rms --weights_dir $WEIGHTS_DIRECTORY
49 |
50 | echo "cubequant registration time:"
51 | echo $CUBEQUANT_INTERREGISTERED_DATA
52 | python -m pipeline -d $CUBEQUANT_DIRNAME -s $SAVE_DIRNAME cq interregister -ts $TARGET_SCAN
53 |
54 | echo "cubequant T2map time:"
55 | python -m pipeline -l $SAVE_DIRNAME cq -t1rho -fm $MASK
56 |
57 | echo "cones registration time:"
58 | python -m pipeline -d $CONES_DIRNAME -s $SAVE_DIRNAME cones interregister -ts $TARGET_SCAN
59 |
60 | echo "cones t2star time:"
61 | python -m pipeline -l $SAVE_DIRNAME cones -t2star -fm $MASK
62 |
63 | echo "analysis time"
64 | python -m pipeline -l $SAVE_DIRNAME knee -fc -t2 -t1_rho -t2_star
65 | done
66 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [isort]
2 | profile = black
3 | line_length=100
4 | multi_line_output=3
5 | include_trailing_comma=True
6 | force_grid_wrap = 0
7 | use_parentheses = True
8 | ensure_newline_before_comments = True
9 | extra_standard_library=setuptools,mock,yaml
10 | skip=docs,setup.py
11 | skip_glob=*/__init__.py
12 | known_myself=dosma
13 | known_third_party=h5py,numpy,natsort,nested-lookup,nibabel,nipype,pandas,pydicom,skimage,scipy,seaborn,SimpleITK,packaging,Pmw,tabulate,termcolor,tqdm
14 | no_lines_before=STDLIB
15 | sections=FUTURE,STDLIB,THIRDPARTY,MYSELF,FIRSTPARTY,LOCALFOLDER
16 | default_section=FIRSTPARTY
17 |
18 | [mypy]
19 | python_version=3.6
20 | ignore_missing_imports = True
21 | warn_unused_configs = True
22 | disallow_untyped_defs = True
23 | check_untyped_defs = True
24 | warn_unused_ignores = True
25 | warn_redundant_casts = True
26 | show_column_numbers = True
27 | follow_imports = silent
28 | allow_redefinition = True
29 | ; Require all functions to be annotated
30 | disallow_incomplete_defs = True
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | import os
4 | import sys
5 | from shutil import rmtree
6 | from setuptools import Command, find_packages, setup
7 |
8 | here = os.path.abspath(os.path.dirname(__file__))
9 |
10 |
11 | def get_version():
12 | init_py_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), "dosma", "__init__.py")
13 | init_py = open(init_py_path, "r").readlines()
14 | version_line = [l.strip() for l in init_py if l.startswith("__version__")][0] # noqa: E741
15 | version = version_line.split("=")[-1].strip().strip("'\"")
16 |
17 | # The following is used to build release packages.
18 | # Users should never use it.
19 | suffix = os.getenv("DOSMA_VERSION_SUFFIX", "")
20 | version = version + suffix
21 | if os.getenv("BUILD_NIGHTLY", "0") == "1":
22 | from datetime import datetime
23 |
24 | date_str = datetime.today().strftime("%y%m%d")
25 | version = version + ".dev" + date_str
26 |
27 | new_init_py = [l for l in init_py if not l.startswith("__version__")] # noqa: E741
28 | new_init_py.append('__version__ = "{}"\n'.format(version))
29 | with open(init_py_path, "w") as f:
30 | f.write("".join(new_init_py))
31 | return version
32 |
33 |
34 | def get_resources():
35 | """Get the resources files for dosma. To be used with `package_data`.
36 |
37 | All files under 'dosma/resources/{elastix,templates}'.
38 | """
39 | import pathlib
40 |
41 | files = []
42 | # Elastix files
43 | for path in pathlib.Path("dosma/resources/elastix/params").rglob("*.*"):
44 | files.append(str(path))
45 | for path in pathlib.Path("dosma/resources/templates").rglob("*.*"):
46 | files.append(str(path))
47 | return [x.split("/", 1)[1] for x in files]
48 |
49 |
50 | class UploadCommand(Command):
51 | """Support setup.py upload.
52 |
53 | Adapted from https://github.com/robustness-gym/meerkat.
54 | """
55 |
56 | description = "Build and publish the package."
57 | user_options = []
58 |
59 | @staticmethod
60 | def status(s):
61 | """Prints things in bold."""
62 | print("\033[1m{0}\033[0m".format(s))
63 |
64 | def initialize_options(self):
65 | pass
66 |
67 | def finalize_options(self):
68 | pass
69 |
70 | def run(self):
71 | try:
72 | self.status("Removing previous builds…")
73 | rmtree(os.path.join(here, "dist"))
74 | except OSError:
75 | pass
76 |
77 | self.status("Building Source and Wheel (universal) distribution…")
78 | os.system("{0} setup.py sdist bdist_wheel --universal".format(sys.executable))
79 |
80 | self.status("Uploading the package to PyPI via Twine…")
81 | os.system("twine upload dist/*")
82 |
83 | self.status("Pushing git tags…")
84 | os.system("git tag v{0}".format(get_version()))
85 | os.system("git push --tags")
86 |
87 | sys.exit()
88 |
89 |
90 | # ---------------------------------------------------
91 | # Setup Information
92 | # ---------------------------------------------------
93 |
94 | # Required pacakges.
95 | REQUIRED = [
96 | "matplotlib",
97 | "numpy",
98 | "h5py",
99 | "natsort",
100 | "nested-lookup",
101 | "nibabel",
102 | "nipype",
103 | "packaging",
104 | "pandas",
105 | # TODO Issue #57: Remove pydicom upper bound (https://github.com/ad12/DOSMA/issues/57)
106 | "pydicom>=1.6.0",
107 | "scikit-image",
108 | "scipy",
109 | "seaborn",
110 | "openpyxl",
111 | "Pmw",
112 | "PyYAML",
113 | "tabulate",
114 | "termcolor",
115 | "tqdm>=4.42.0",
116 | ]
117 |
118 | # Optional packages.
119 | # TODO Issue #106: Fix to only import tensorflow version with fixed version
120 | # once keras import statements are properly handled.
121 | EXTRAS = {
122 | "dev": [
123 | "coverage",
124 | "flake8",
125 | "flake8-bugbear",
126 | "flake8-comprehensions",
127 | "isort",
128 | "black==21.4b2",
129 | "click==8.0.2",
130 | "simpleitk",
131 | "sphinx",
132 | "sphinxcontrib.bibtex",
133 | "m2r2",
134 | "tensorflow<=2.4.1",
135 | "keras<=2.4.3",
136 | "sigpy",
137 | ],
138 | "ai": ["tensorflow<=2.4.1", "keras<=2.4.3"],
139 | "docs": ["mistune>=0.8.1,<2.0.0", "sphinx", "sphinxcontrib.bibtex", "m2r2"],
140 | }
141 |
142 | with open("README.md", "r", encoding="utf-8") as fh:
143 | long_description = fh.read()
144 |
145 |
146 | setup(
147 | name="dosma",
148 | version=get_version(),
149 | author="Arjun Desai",
150 | url="https://github.com/ad12/DOSMA",
151 | project_urls={"Documentation": "https://dosma.readthedocs.io/"},
152 | description="An AI-powered open-source medical image analysis toolbox",
153 | long_description=long_description,
154 | long_description_content_type="text/markdown",
155 | packages=find_packages(exclude=("configs", "tests", "tests.*")),
156 | package_data={"dosma": get_resources()},
157 | python_requires=">=3.6",
158 | install_requires=REQUIRED,
159 | license="GNU",
160 | classifiers=[
161 | "Programming Language :: Python :: 3",
162 | "License :: OSI Approved :: GNU General Public License v3 (GPLv3)",
163 | "Operating System :: OS Independent",
164 | ],
165 | extras_require=EXTRAS,
166 | # $ setup.py publish support.
167 | cmdclass={
168 | "upload": UploadCommand,
169 | },
170 | )
171 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/__init__.py
--------------------------------------------------------------------------------
/tests/core/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/core/__init__.py
--------------------------------------------------------------------------------
/tests/core/io/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/core/io/__init__.py
--------------------------------------------------------------------------------
/tests/core/io/test_format_io.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from dosma.core.io.format_io import ImageDataFormat
4 |
5 |
6 | class TestImageDataFormat(unittest.TestCase):
7 | def test_isfiletype(self):
8 | dcm_fname = "data.dcm"
9 | nifti_fname = "data.nii.gz"
10 |
11 | assert ImageDataFormat.dicom.is_filetype(dcm_fname)
12 | assert not ImageDataFormat.dicom.is_filetype(nifti_fname)
13 |
14 | assert ImageDataFormat.nifti.is_filetype(nifti_fname)
15 | assert not ImageDataFormat.nifti.is_filetype(dcm_fname)
16 |
17 | def get_image_data_format(self):
18 | dcm_fname = "data.dcm"
19 | dcm_dir = "/path/to/dir"
20 | nifti_fname = "data.nii.gz"
21 |
22 | assert ImageDataFormat.get_image_data_format(dcm_fname) == ImageDataFormat.dicom
23 | assert ImageDataFormat.get_image_data_format(dcm_dir) == ImageDataFormat.dicom
24 | assert ImageDataFormat.get_image_data_format(nifti_fname) == nifti_fname
25 |
--------------------------------------------------------------------------------
/tests/core/io/test_format_io_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 |
4 | import nibabel.testing as nib_testing
5 | import pydicom.data as pydd
6 |
7 | import dosma as dm
8 | import dosma.core.io.format_io_utils as fio_utils
9 | from dosma.core.io.dicom_io import DicomReader, DicomWriter
10 | from dosma.core.io.format_io import ImageDataFormat
11 | from dosma.core.io.nifti_io import NiftiReader, NiftiWriter
12 |
13 |
14 | class TestFormatIOUtils(unittest.TestCase):
15 | def test_get_reader_writer(self):
16 | assert isinstance(fio_utils.get_reader(ImageDataFormat.nifti), NiftiReader)
17 | assert isinstance(fio_utils.get_reader(ImageDataFormat.dicom), DicomReader)
18 |
19 | assert isinstance(fio_utils.get_writer(ImageDataFormat.nifti), NiftiWriter)
20 | assert isinstance(fio_utils.get_writer(ImageDataFormat.dicom), DicomWriter)
21 |
22 | def test_convert_image_data_format(self):
23 | dcm_dir = "/path/to/dcm/data"
24 | dcm_fname = "data.dcm"
25 | nifti_fname = "data.nii.gz"
26 |
27 | assert fio_utils.convert_image_data_format(dcm_dir, ImageDataFormat.dicom) == dcm_dir
28 | assert (
29 | fio_utils.convert_image_data_format(dcm_dir, ImageDataFormat.nifti)
30 | == f"{dcm_dir}.nii.gz"
31 | )
32 |
33 | assert fio_utils.convert_image_data_format(dcm_fname, ImageDataFormat.dicom) == dcm_fname
34 | # TODO: Activate when support for single dicom files is added
35 | # assert fio_utils.convert_image_data_format(dcm_fname, ImageDataFormat.nifti) == nifti_fname # noqa: E501
36 |
37 | assert (
38 | fio_utils.convert_image_data_format(nifti_fname, ImageDataFormat.nifti) == nifti_fname
39 | )
40 | assert fio_utils.convert_image_data_format(nifti_fname, ImageDataFormat.dicom) == "data"
41 |
42 | def test_get_filepath_variations(self):
43 | dcm_dir = "/path/to/dcm/data"
44 | nifti_fname = "data.nii.gz"
45 |
46 | fp_variations = fio_utils.get_filepath_variations(dcm_dir)
47 | assert set(fp_variations) == {dcm_dir, f"{dcm_dir}.nii.gz"}
48 |
49 | fp_variations = fio_utils.get_filepath_variations(nifti_fname)
50 | assert set(fp_variations) == {nifti_fname, "data"}
51 |
52 | def test_generic_load(self):
53 | nib_data = os.path.join(nib_testing.data_path, "example4d.nii.gz")
54 | vol = fio_utils.generic_load(nib_data)
55 | expected = NiftiReader().load(nib_data)
56 | assert vol.is_identical(expected)
57 |
58 | dcm_data = os.path.join(pydd.get_testdata_file("MR_small.dcm"))
59 | vol = fio_utils.generic_load(dcm_data)[0]
60 | expected = DicomReader().load(dcm_data)[0]
61 | assert vol.is_identical(expected)
62 |
63 |
64 | def test_read():
65 | nib_data = os.path.join(nib_testing.data_path, "example4d.nii.gz")
66 | vol = dm.read(nib_data)
67 | expected = NiftiReader().load(nib_data)
68 | assert vol.is_identical(expected)
69 |
70 | nib_data = os.path.join(nib_testing.data_path, "example4d.nii.gz")
71 | vol = dm.read(nib_data, "nifti")
72 | expected = NiftiReader().load(nib_data)
73 | assert vol.is_identical(expected)
74 |
75 | nib_data = os.path.join(nib_testing.data_path, "example4d.nii.gz")
76 | vol = dm.read(nib_data, ImageDataFormat.nifti)
77 | expected = NiftiReader().load(nib_data)
78 | assert vol.is_identical(expected)
79 |
80 | dcm_data = os.path.join(pydd.get_testdata_file("MR_small.dcm"))
81 | vol = dm.read(dcm_data)[0]
82 | expected = DicomReader().load(dcm_data)[0]
83 | assert vol.is_identical(expected)
84 |
85 | dcm_data = os.path.join(pydd.get_testdata_file("MR_small.dcm"))
86 | vol = dm.read(dcm_data, unpack=True)
87 | expected = DicomReader().load(dcm_data)[0]
88 | assert vol.is_identical(expected)
89 |
90 | dcm_data = os.path.join(pydd.get_testdata_file("MR_small.dcm"))
91 | vol = dm.read(dcm_data, group_by="EchoNumbers")[0]
92 | expected = DicomReader().load(dcm_data)[0]
93 | assert vol.is_identical(expected)
94 |
95 |
96 | def test_write(tmpdir):
97 | filepath = pydd.get_testdata_file("MR_small.dcm")
98 | dr = DicomReader(group_by=None)
99 | mv_base = dr.load(filepath)[0]
100 |
101 | dicom_out_dir = tmpdir / "test_save_sort_by"
102 | dm.write(mv_base, dicom_out_dir, sort_by="InstanceNumber")
103 | mv2 = dr.load(dicom_out_dir)[0]
104 | assert mv2.is_identical(mv_base)
105 |
106 | nr = NiftiReader()
107 | nifti_out_file = tmpdir / "test_save_sort_by.nii.gz"
108 | dm.write(mv_base, nifti_out_file)
109 | mv2 = nr.load(nifti_out_file)
110 | assert mv2.is_identical(mv_base)
111 |
112 | nr = NiftiReader()
113 | nifti_out_file = tmpdir / "test_save_sort_by-nifti.nii.gz"
114 | dm.write(mv_base, nifti_out_file, "nifti")
115 | mv2 = nr.load(nifti_out_file)
116 | assert mv2.is_identical(mv_base)
117 |
--------------------------------------------------------------------------------
/tests/core/io/test_inter_io.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 |
4 | from dosma.core.io.dicom_io import DicomReader, DicomWriter
5 | from dosma.core.io.format_io import ImageDataFormat
6 | from dosma.core.io.nifti_io import NiftiReader, NiftiWriter
7 |
8 | from ... import util as ututils
9 |
10 |
11 | class TestInterIO(unittest.TestCase):
12 | nr = NiftiReader()
13 | nw = NiftiWriter()
14 |
15 | dr = DicomReader()
16 | dw = DicomWriter()
17 |
18 | @staticmethod
19 | def compare_vols(vol1, vol2):
20 | assert vol1.is_same_dimensions(vol2)
21 | assert (vol1.volume == vol2.volume).all()
22 |
23 | @unittest.skipIf(not ututils.is_data_available(), "unittest data is not available")
24 | def test_dcm_nifti_load(self):
25 | """Verify that volumes loaded from nifti or dicom are identical"""
26 | for dp_ind, dp in enumerate(ututils.SCAN_DIRPATHS):
27 | curr_scan = ututils.SCANS[dp_ind]
28 | curr_scan_info = ututils.SCANS_INFO[curr_scan] # noqa
29 |
30 | nifti_filepaths = ututils.get_read_paths(dp, ImageDataFormat.nifti)
31 | dicom_filepaths = ututils.get_read_paths(dp, ImageDataFormat.dicom)
32 |
33 | for i in range(len(nifti_filepaths)):
34 | nfp = nifti_filepaths[i]
35 | dfp = dicom_filepaths[i]
36 |
37 | nifti_vol = self.nr.load(nfp)
38 | dicom_vol = self.dr.load(dfp)[0]
39 | dicom_vol.reformat(nifti_vol.orientation, inplace=True)
40 |
41 | # assert nifti_vol.is_same_dimensions(dicom_vol)
42 | assert (nifti_vol.volume == dicom_vol.volume).all()
43 |
44 | @unittest.skipIf(not ututils.is_data_available(), "unittest data is not available")
45 | def test_dcm_to_nifti(self):
46 | for dp_ind, dp in enumerate(ututils.SCAN_DIRPATHS):
47 | curr_scan = ututils.SCANS[dp_ind]
48 | curr_scan_info = ututils.SCANS_INFO[curr_scan] # noqa
49 |
50 | dicom_path = ututils.get_dicoms_path(dp)
51 | nifti_read_paths = ututils.get_read_paths(dp, ImageDataFormat.nifti)
52 | nifti_write_path = ututils.get_write_path(dp, ImageDataFormat.nifti)
53 |
54 | # Load ground truth (nifti format)
55 | gt_nifti_vols = []
56 | for rfp in nifti_read_paths:
57 | gt_nifti_vols.append(self.nr.load(rfp))
58 |
59 | # DicomReader to read multiple echo volumes from scan sequence.
60 | dicom_loaded_vols = self.dr.load(dicom_path)
61 |
62 | # Get dicom and itksnap in same orientation
63 | o = dicom_loaded_vols[0].orientation
64 | for v in dicom_loaded_vols[1:]:
65 | assert o == v.orientation, (
66 | "Orientations of multiple dicom volumes loaded from single folder "
67 | "should be identical"
68 | )
69 |
70 | for v in gt_nifti_vols:
71 | v.reformat(o, inplace=True)
72 |
73 | for i in range(len(dicom_loaded_vols)):
74 | dcm_vol = dicom_loaded_vols[i]
75 | nifti_vol = gt_nifti_vols[i]
76 | echo_num = i + 1
77 |
78 | assert (dcm_vol.volume == nifti_vol.volume).all(), (
79 | "e%d volumes (dcm, nifti-ground truth) should be identical" % echo_num
80 | )
81 |
82 | # Use NiftiWriter to save volumes (read in as dicoms)
83 | for i in range(len(dicom_loaded_vols)):
84 | dcm_vol = dicom_loaded_vols[i]
85 | echo_num = i + 1
86 |
87 | nifti_write_filepath = os.path.join(nifti_write_path, "e%d.nii.gz" % echo_num)
88 | self.nw.save(dcm_vol, nifti_write_filepath)
89 |
90 | # check if saved versions of volumes load correctly
91 | e_loaded = self.nr.load(nifti_write_filepath)
92 | e_dcm = dcm_vol
93 | e_gt_nifti = gt_nifti_vols[i]
94 |
95 | # assert e_loaded.is_same_dimensions(e_gt_nifti), (
96 | # "Mismatched dimensions: %s echo-%d" % (curr_scan, i+1)
97 | # )
98 |
99 | assert (e_dcm.volume == e_gt_nifti.volume).all()
100 | assert (e_loaded.volume == e_gt_nifti.volume).all()
101 |
102 |
103 | if __name__ == "__main__":
104 | unittest.main()
105 |
--------------------------------------------------------------------------------
/tests/core/io/test_nifti_io.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 |
4 | import nibabel as nib
5 | import nibabel.testing as nib_testing
6 | import numpy as np
7 |
8 | from dosma.core.io.format_io import ImageDataFormat
9 | from dosma.core.io.nifti_io import NiftiReader, NiftiWriter
10 |
11 | from ... import util as ututils
12 |
13 |
14 | class TestNiftiIO(unittest.TestCase):
15 | nr = NiftiReader()
16 | nw = NiftiWriter()
17 |
18 | data_format = ImageDataFormat.nifti
19 |
20 | @unittest.skipIf(not ututils.is_data_available(), "unittest data is not available")
21 | def test_nifti_read(self):
22 | for dp in ututils.SCAN_DIRPATHS:
23 | dicoms_path = ututils.get_dicoms_path(dp)
24 | read_filepaths = ututils.get_read_paths(dp, self.data_format)
25 |
26 | for read_filepath in read_filepaths:
27 | _ = self.nr.load(read_filepath)
28 |
29 | with self.assertRaises(FileNotFoundError):
30 | _ = self.nr.load(os.path.join(dp, "bleh"))
31 |
32 | with self.assertRaises(FileNotFoundError):
33 | _ = self.nr.load(dp)
34 |
35 | with self.assertRaises(ValueError):
36 | _ = self.nr.load(os.path.join(dicoms_path, "I0002.dcm"))
37 |
38 | @unittest.skipIf(not ututils.is_data_available(), "unittest data is not available")
39 | def test_nifti_write(self):
40 | for dp in ututils.SCAN_DIRPATHS:
41 | read_filepaths = ututils.get_read_paths(dp, self.data_format)
42 | save_dirpath = ututils.get_write_path(dp, self.data_format)
43 |
44 | for rfp in read_filepaths:
45 | save_filepath = os.path.join(save_dirpath, os.path.basename(rfp))
46 | mv = self.nr.load(rfp)
47 | self.nw.save(mv, save_filepath)
48 |
49 | # cannot save with extensions other than nii or nii.gz
50 | with self.assertRaises(ValueError):
51 | self.nw.save(mv, os.path.join(ututils.TEMP_PATH, "eg.dcm"))
52 |
53 | def test_nifti_nib(self):
54 | """Test with nibabel sample data."""
55 | filepath = os.path.join(nib_testing.data_path, "example4d.nii.gz")
56 | mv_nib = nib.load(filepath)
57 |
58 | nr = NiftiReader()
59 | mv = nr(filepath)
60 |
61 | assert mv.shape == mv_nib.shape
62 | assert np.all(mv.A == mv_nib.get_fdata())
63 | assert np.allclose(mv.affine, mv_nib.affine, atol=1e-4)
64 |
65 | out_path = os.path.join(ututils.TEMP_PATH, "nifti_nib_example.nii.gz")
66 | nw = NiftiWriter()
67 | nw(mv, out_path)
68 |
69 | mv_nib2 = nib.load(out_path)
70 | assert np.all(mv_nib2.get_fdata() == mv_nib.get_fdata())
71 |
72 | def test_state(self):
73 | nr1 = NiftiReader()
74 | state_dict = nr1.state_dict()
75 | state_dict = {k: "foo" for k in state_dict}
76 |
77 | nr2 = NiftiReader()
78 | nr2.load_state_dict(state_dict)
79 | for k in state_dict:
80 | assert getattr(nr2, k) == "foo"
81 |
82 | nw1 = NiftiWriter()
83 | state_dict = nw1.state_dict()
84 | state_dict = {k: "bar" for k in state_dict}
85 |
86 | nw2 = NiftiWriter()
87 | nw2.load_state_dict(state_dict)
88 | for k in state_dict:
89 | assert getattr(nw2, k) == "bar"
90 |
91 | with self.assertRaises(AttributeError):
92 | nw2.load_state_dict({"foobar": "delta"})
93 |
94 |
95 | if __name__ == "__main__":
96 | unittest.main()
97 |
--------------------------------------------------------------------------------
/tests/core/test_device.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 |
5 | from dosma.core.device import Device, cpu_device, get_device, to_device
6 | from dosma.core.med_volume import MedicalVolume
7 |
8 | from ..util import requires_packages
9 |
10 |
11 | class TestDevice(unittest.TestCase):
12 | def test_basic(self):
13 | assert Device(-1) == cpu_device
14 | assert Device("cpu") == cpu_device
15 | assert cpu_device.xp == np
16 |
17 | device = Device(-1)
18 | assert int(device) == -1
19 | assert device.index == -1
20 | assert device.id == -1
21 | assert device == -1
22 | assert device.cpdevice is None
23 |
24 | device2 = Device(-1)
25 | assert device2 == device
26 |
27 | @requires_packages("cupy")
28 | def test_cupy(self):
29 | import cupy as cp
30 |
31 | device = Device(0)
32 | assert device.cpdevice == cp.cuda.Device(0)
33 | assert device.type == "cuda"
34 | assert device.index == 0
35 | assert device.xp == cp
36 | assert int(device) == 0
37 |
38 | device = Device(cp.cuda.Device(0))
39 | assert device.cpdevice == cp.cuda.Device(0)
40 | assert device.type == "cuda"
41 | assert device.index == 0
42 |
43 | @requires_packages("sigpy")
44 | def test_sigpy(self):
45 | import sigpy as sp
46 |
47 | assert Device(-1) == sp.cpu_device
48 | assert Device(sp.cpu_device) == sp.cpu_device
49 |
50 | device = Device(-1)
51 | assert device == sp.cpu_device
52 | assert device.spdevice == sp.cpu_device
53 |
54 | @requires_packages("sigpy", "cupy")
55 | def test_sigpy_cupy(self):
56 | import sigpy as sp
57 |
58 | assert Device(0) == sp.Device(0)
59 |
60 | device = Device(0)
61 | assert device.spdevice == sp.Device(0)
62 |
63 | @requires_packages("torch")
64 | def test_torch(self):
65 | import torch
66 |
67 | pt_device = torch.device("cpu")
68 |
69 | assert Device(pt_device) == cpu_device
70 |
71 | dm_device = Device(-1)
72 | assert dm_device == pt_device
73 | assert dm_device.ptdevice == pt_device
74 |
75 | def test_to_device(self):
76 | arr = np.ones((3, 3, 3))
77 | mv = MedicalVolume(arr, affine=np.eye(4))
78 |
79 | arr2 = to_device(arr, -1)
80 | assert get_device(arr2) == cpu_device
81 |
82 | mv2 = to_device(mv, -1)
83 | assert get_device(mv2) == cpu_device
84 |
85 |
86 | if __name__ == "__main__":
87 | unittest.main()
88 |
--------------------------------------------------------------------------------
/tests/core/test_registration.py:
--------------------------------------------------------------------------------
1 | import multiprocessing as mp
2 | import os
3 | import shutil
4 | import unittest
5 | from functools import partial
6 |
7 | import numpy as np
8 |
9 | import dosma.file_constants as fc
10 | from dosma.core.med_volume import MedicalVolume
11 | from dosma.core.orientation import to_affine
12 | from dosma.core.registration import apply_warp, register
13 |
14 | from .. import util
15 |
16 |
17 | def _generate_translated_vols(n=3):
18 | """Generate mock data that is translated diagonally by 1 pixel."""
19 | mvs = []
20 | affine = to_affine(("SI", "AP"), (0.3, 0.3, 0.5))
21 | for offset in range(n):
22 | arr = np.zeros((250, 250, 10))
23 | arr[15 + offset : 35 + offset, 15 + offset : 35 + offset] = 1
24 | mvs.append(MedicalVolume(arr, affine))
25 | return mvs
26 |
27 |
28 | class TestRegister(util.TempPathMixin):
29 | @unittest.skipIf(not util.is_elastix_available(), "elastix is not available")
30 | def test_multiprocessing(self):
31 | mvs = _generate_translated_vols()
32 | data_dir = os.path.join(self.data_dirpath, "test-register-mp")
33 |
34 | out_path = os.path.join(data_dir, "expected")
35 | _, expected = register(
36 | mvs[0],
37 | mvs[1:],
38 | fc.ELASTIX_AFFINE_PARAMS_FILE,
39 | out_path,
40 | num_workers=0,
41 | num_threads=2,
42 | return_volumes=True,
43 | rtype=tuple,
44 | show_pbar=True,
45 | )
46 |
47 | out_path = os.path.join(data_dir, "out")
48 | _, out = register(
49 | mvs[0],
50 | mvs[1:],
51 | fc.ELASTIX_AFFINE_PARAMS_FILE,
52 | out_path,
53 | num_workers=util.num_workers(),
54 | num_threads=2,
55 | return_volumes=True,
56 | rtype=tuple,
57 | show_pbar=True,
58 | )
59 |
60 | for vol, exp in zip(out, expected):
61 | assert np.allclose(vol.volume, exp.volume)
62 |
63 | shutil.rmtree(data_dir)
64 |
65 | def test_complex(self):
66 | mvs = _generate_translated_vols()
67 | mask = mvs[0]._partial_clone(volume=np.ones(mvs[0].shape))
68 | data_dir = os.path.join(self.data_dirpath, "test-register-complex-sequential-moving-masks")
69 |
70 | out_path = os.path.join(data_dir, "expected")
71 | _ = register(
72 | mvs[0],
73 | mvs[1:],
74 | [fc.ELASTIX_AFFINE_PARAMS_FILE, fc.ELASTIX_AFFINE_PARAMS_FILE],
75 | out_path,
76 | target_mask=mask,
77 | use_mask=[True, True],
78 | sequential=True,
79 | num_workers=0,
80 | num_threads=2,
81 | return_volumes=True,
82 | rtype=tuple,
83 | show_pbar=True,
84 | )
85 |
86 | shutil.rmtree(data_dir)
87 |
88 |
89 | class TestApplyWarp(util.TempPathMixin):
90 | @unittest.skipIf(not util.is_elastix_available(), "elastix is not available")
91 | def test_multiprocessing(self):
92 | """Verify that multiprocessing compatible with apply_warp."""
93 | # Generate viable transform file.
94 | mvs = _generate_translated_vols(n=4)
95 | out_path = os.path.join(self.data_dirpath, "test-apply-warp")
96 | out, _ = register(
97 | mvs[0],
98 | mvs[1],
99 | fc.ELASTIX_AFFINE_PARAMS_FILE,
100 | out_path,
101 | num_workers=util.num_workers(),
102 | num_threads=2,
103 | return_volumes=False,
104 | rtype=tuple,
105 | )
106 | vols = mvs[2:]
107 |
108 | # Single process (main thread)
109 | expected = []
110 | for v in vols:
111 | expected.append(apply_warp(v, out_registration=out[0]))
112 |
113 | # Multiple process (within apply warp)
114 | num_workers = min(len(vols), util.num_workers())
115 | outputs = apply_warp(vols, out_registration=out[0], num_workers=num_workers)
116 | for mv_out, exp in zip(outputs, expected):
117 | assert np.allclose(mv_out.volume, exp.volume)
118 |
119 | # Multiple process
120 | func = partial(apply_warp, out_registration=out[0])
121 | with mp.Pool(num_workers) as p:
122 | outputs = p.map(func, vols)
123 |
124 | for mv_out, exp in zip(outputs, expected):
125 | assert np.allclose(mv_out.volume, exp.volume)
126 |
127 | shutil.rmtree(out_path)
128 |
129 |
130 | if __name__ == "__main__":
131 | unittest.main()
132 |
--------------------------------------------------------------------------------
/tests/models/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/models/__init__.py
--------------------------------------------------------------------------------
/tests/models/test_stanford_qdess.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 |
5 | from dosma.core.med_volume import MedicalVolume
6 | from dosma.core.orientation import to_affine
7 | from dosma.models.stanford_qdess import StanfordQDessUNet2D
8 |
9 | from .. import util as ututils
10 |
11 |
12 | class TestStanfordQDessUNet2D(unittest.TestCase):
13 | def test_input_shape(self):
14 | """Test support for both 3D and 4D inputs."""
15 | vol = np.ones((256, 256, 2))
16 | mv = MedicalVolume(
17 | vol,
18 | to_affine(("SI", "AP", "LR")),
19 | headers=ututils.build_dummy_headers(vol.shape[2], {"EchoTime": 2}),
20 | )
21 | model = StanfordQDessUNet2D(mv.shape[:2] + (1,), weights_path=None)
22 | out = model.generate_mask(mv)
23 | assert all(x in out for x in ["pc", "fc", "tc", "men"])
24 | assert out["pc"].headers().shape == (1, 1, 2)
25 | del model
26 |
27 | vol = np.stack([np.ones((256, 256, 2)), 2 * np.ones((256, 256, 2))], axis=-1)
28 | headers = np.stack(
29 | [
30 | ututils.build_dummy_headers(vol.shape[2], {"EchoTime": 2}),
31 | ututils.build_dummy_headers(vol.shape[2], {"EchoTime": 10}),
32 | ],
33 | axis=-1,
34 | )
35 | mv = MedicalVolume(vol, to_affine(("SI", "AP", "LR")), headers=headers)
36 | model = StanfordQDessUNet2D(mv.shape[:2] + (1,), weights_path=None)
37 | out = model.generate_mask(mv)
38 | assert all(out[x].ndim == 3 for x in ["pc", "fc", "tc", "men"])
39 | assert out["pc"].headers().shape == (1, 1, 2)
40 |
41 | def test_call(self):
42 | vol = np.ones((256, 256, 2))
43 | mv = MedicalVolume(
44 | vol,
45 | to_affine(("SI", "AP", "LR")),
46 | headers=ututils.build_dummy_headers(vol.shape[2], {"EchoTime": 2}),
47 | )
48 | model = StanfordQDessUNet2D(mv.shape[:2] + (1,), weights_path=None)
49 | out = model(mv)
50 | out2 = model.generate_mask(mv)
51 | for k in out:
52 | assert np.all(out[k].volume == out2[k].volume)
53 | del model
54 |
--------------------------------------------------------------------------------
/tests/models/test_util.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from dosma.models import util as m_util
4 |
5 |
6 | class TestUtil(unittest.TestCase):
7 | def test_aliases_exist(self):
8 | # Verify that none of the supported segmentation models have overlapping aliases
9 | models = m_util.__SUPPORTED_MODELS__
10 | alias_to_model = {} # noqa: F841
11 |
12 | for m in models:
13 | aliases = m.ALIASES
14 |
15 | # all supported models must have at least 1 alias that is not ''
16 | valid_alias = len(aliases) >= 1 and all([x != "" for x in aliases])
17 |
18 | assert valid_alias, "%s does not have valid aliases" % m
19 |
20 | def test_overlapping_aliases(self):
21 | # Verify that none of the supported segmentation models have overlapping aliases
22 | models = m_util.__SUPPORTED_MODELS__
23 | alias_to_model = {}
24 |
25 | for m in models:
26 | curr_aliases = set(alias_to_model.keys())
27 | aliases = set(m.ALIASES)
28 |
29 | assert aliases.isdisjoint(curr_aliases), "%s alias(es) already in use" % str(
30 | aliases.intersection(curr_aliases)
31 | )
32 |
33 | for a in aliases:
34 | alias_to_model[a] = m
35 |
--------------------------------------------------------------------------------
/tests/resources/preferences.yml:
--------------------------------------------------------------------------------
1 | data:
2 | format: 'nifti'
3 | fitting:
4 | r2.threshold: 0.9 # range [0, 1)
5 | registration:
6 | mask:
7 | dilation.rate: 9.0
8 | dilation.threshold: 0.0001
9 | segmentation:
10 | batch.size: 16
11 | visualization:
12 | matplotlib:
13 | rcParams:
14 | savefig.dpi: 200
15 | font.size: 16
16 | savefig.format: 'png'
17 | backend: 'Agg'
18 | use.vmax: False
19 |
20 | testing1:
21 | foo: 10
22 | testing2:
23 | foo: 12
24 |
--------------------------------------------------------------------------------
/tests/scan_sequences/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/scan_sequences/__init__.py
--------------------------------------------------------------------------------
/tests/scan_sequences/mri/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/scan_sequences/mri/__init__.py
--------------------------------------------------------------------------------
/tests/scan_sequences/mri/test_mapss.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | import warnings
4 |
5 | import numpy as np
6 |
7 | from dosma.core.io.nifti_io import NiftiWriter
8 | from dosma.core.med_volume import MedicalVolume
9 | from dosma.scan_sequences.mri import Mapss
10 | from dosma.tissues.femoral_cartilage import FemoralCartilage
11 |
12 | from ... import util
13 |
14 | SEGMENTATION_WEIGHTS_FOLDER = os.path.join(
15 | os.path.dirname(__file__), "../../weights/iwoai-2019-t6-normalized"
16 | )
17 | SEGMENTATION_MODEL = "iwoai-2019-t6-normalized"
18 |
19 | # Path to manual segmentation mask
20 | MANUAL_SEGMENTATION_MASK_PATH = os.path.join(
21 | util.get_scan_dirpath(Mapss.NAME), "misc/fc_manual.nii.gz"
22 | )
23 |
24 |
25 | class MapssTest(util.ScanTest):
26 | SCAN_TYPE = Mapss
27 |
28 | def _generate_mock_data(self, shape=None, ts=None, metadata=True):
29 | """Generates mock monexponential data for MAPSS sequence.
30 |
31 | The mock data is overly simplified in that the t1rho and t2 maps
32 | are identical. This is to simply the data generation process.
33 |
34 | Args:
35 | ys: The volumes at different spin lock times.
36 | ts: Echo times.
37 | t1rho (ndarray): The t1rho times for each voxel
38 | a (ndarray or int): The multiplicative constant.
39 | """
40 | if shape is None:
41 | shape = (10, 10, 10)
42 | if ts is None:
43 | ts = [0, 10, 12.847, 25.695, 40, 51.39, 80]
44 | else:
45 | assert len(ts) == 7
46 |
47 | a = 1.0
48 | t2 = t1rho = np.random.rand(*shape) * 80 + 0.1
49 | _, ys, _, _ = util.generate_monoexp_data(shape=shape, x=ts, a=a, b=-1 / t1rho)
50 |
51 | if metadata:
52 | with warnings.catch_warnings():
53 | warnings.simplefilter("ignore")
54 | for idx, (y, t) in enumerate(zip(ys, ts)):
55 | y.set_metadata("EchoTime", t, force=True)
56 | y.set_metadata("EchoNumber", idx + 1, force=True)
57 |
58 | return ys, ts, a, t1rho, t2
59 |
60 | def test_basic(self):
61 | ys, ts, _, _, _ = self._generate_mock_data()
62 | scan = Mapss(ys)
63 | assert scan.echo_times == ts
64 | assert scan.volumes == ys
65 |
66 | with self.assertRaises(ValueError):
67 | _ = Mapss(np.stack(ys, axis=-1))
68 |
69 | def test_generate_t1_rho_map(self):
70 | ys, _, _, _, _ = self._generate_mock_data()
71 | scan = Mapss(ys)
72 |
73 | mask = MedicalVolume(np.ones(ys[0].shape), np.eye(4))
74 | mask_path = os.path.join(self.data_dirpath, "test_t1rho_mask.nii.gz")
75 | NiftiWriter().save(mask, mask_path)
76 |
77 | tissue = FemoralCartilage()
78 | map1 = scan.generate_t1_rho_map(tissue)
79 | assert map1 is not None
80 |
81 | tissue.set_mask(mask)
82 | map2 = scan.generate_t1_rho_map(tissue, num_workers=util.num_workers())
83 | assert map2 is not None
84 | assert map1.volumetric_map.is_identical(map2.volumetric_map)
85 |
86 | map2 = scan.generate_t1_rho_map(tissue, mask_path=mask, num_workers=util.num_workers())
87 | assert map2 is not None
88 | assert map1.volumetric_map.is_identical(map2.volumetric_map)
89 |
90 | def test_generate_t2_map(self):
91 | ys, _, _, _, _ = self._generate_mock_data()
92 | scan = Mapss(ys)
93 |
94 | mask = MedicalVolume(np.ones(ys[0].shape), np.eye(4))
95 | mask_path = os.path.join(self.data_dirpath, "test_t2_mask.nii.gz")
96 | NiftiWriter().save(mask, mask_path)
97 |
98 | tissue = FemoralCartilage()
99 | map1 = scan.generate_t2_map(tissue)
100 | assert map1 is not None
101 |
102 | tissue.set_mask(mask)
103 | map2 = scan.generate_t2_map(tissue, num_workers=util.num_workers())
104 | assert map2 is not None
105 | assert map1.volumetric_map.is_identical(map2.volumetric_map)
106 |
107 | map2 = scan.generate_t2_map(tissue, mask_path=mask, num_workers=util.num_workers())
108 | assert map2 is not None
109 | assert map1.volumetric_map.is_identical(map2.volumetric_map)
110 |
111 | def test_intraregister(self):
112 | ys, _, _, _, _ = self._generate_mock_data()
113 | scan = Mapss(ys)
114 | scan.intraregister()
115 | assert scan.volumes is not ys
116 |
117 | def test_save_load(self):
118 | ys, _, _, _, _ = self._generate_mock_data()
119 | scan = Mapss(ys)
120 |
121 | save_dir = os.path.join(self.data_dirpath, "test_save_load")
122 | pik_file = scan.save(save_dir, save_custom=True)
123 | assert os.path.isfile(pik_file)
124 | assert all(
125 | os.path.isfile(os.path.join(save_dir, "volumes", f"echo-{idx:03d}.nii.gz"))
126 | for idx in range(7)
127 | )
128 |
129 | scan2 = Mapss.load(pik_file)
130 | for v1, v2 in zip(scan.volumes, scan2.volumes):
131 | assert v1.is_identical(v2)
132 | assert scan2.echo_times == scan.echo_times
133 |
134 | @unittest.skipIf(
135 | not util.is_data_available() or not util.is_elastix_available(),
136 | "unittest data or elastix is not available",
137 | )
138 | def test_cmd_line(self):
139 | # Intraregister
140 | cmdline_str = "--d %s --s %s mapss intraregister" % (self.dicom_dirpath, self.data_dirpath)
141 | self.__cmd_line_helper__(cmdline_str)
142 |
143 | # Estimate T1-rho for femoral cartilage.
144 | cmdline_str = "--l %s mapss --fc t1_rho --mask %s" % (
145 | self.data_dirpath,
146 | MANUAL_SEGMENTATION_MASK_PATH,
147 | )
148 | self.__cmd_line_helper__(cmdline_str)
149 |
150 | # Generate T2 map for femoral cartilage, tibial cartilage, and meniscus via command line
151 | cmdline_str = "--l %s mapss --fc t2 --mask %s" % (
152 | self.data_dirpath,
153 | MANUAL_SEGMENTATION_MASK_PATH,
154 | )
155 | self.__cmd_line_helper__(cmdline_str)
156 |
--------------------------------------------------------------------------------
/tests/scan_sequences/mri/test_qdess.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 | import warnings
4 |
5 | import numpy as np
6 | from pydicom.tag import Tag
7 |
8 | from dosma.core.med_volume import MedicalVolume
9 | from dosma.core.quant_vals import QuantitativeValue
10 | from dosma.models.util import get_model
11 | from dosma.scan_sequences.mri.qdess import QDess
12 | from dosma.tissues.femoral_cartilage import FemoralCartilage
13 |
14 | import keras.backend as K
15 |
16 | from ... import util
17 |
18 | SEGMENTATION_WEIGHTS_FOLDER = os.path.join(
19 | os.path.dirname(__file__), "../../../weights/iwoai-2019-t6-normalized"
20 | )
21 | SEGMENTATION_MODEL = "iwoai-2019-t6-normalized"
22 |
23 |
24 | class QDessTest(util.ScanTest):
25 | SCAN_TYPE = QDess
26 |
27 | def _generate_mock_data(self, shape=None, metadata=True):
28 | """Generates arbitrary mock data for QDess sequence.
29 |
30 | Metadata values were extracted from a real qDESS sequence.
31 | """
32 | if shape is None:
33 | shape = (10, 10, 10)
34 | e1 = MedicalVolume(np.random.rand(*shape) * 80 + 0.1, affine=np.eye(4))
35 | e2 = MedicalVolume(np.random.rand(*shape) * 40 + 0.1, affine=np.eye(4))
36 | ys = [e1, e2]
37 | ts = [8, 42]
38 | if metadata:
39 | with warnings.catch_warnings():
40 | warnings.simplefilter("ignore")
41 | for idx, (y, t) in enumerate(zip(ys, ts)):
42 | y.set_metadata("EchoTime", t, force=True)
43 | y.set_metadata("EchoNumber", idx + 1, force=True)
44 | y.set_metadata("RepetitionTime", 25.0, force=True)
45 | y.set_metadata("FlipAngle", 30.0, force=True)
46 | y.set_metadata(Tag(0x001910B6), 3132.0, force=True) # gradient time
47 | y.set_metadata(Tag(0x001910B7), 1560.0, force=True) # gradient area
48 |
49 | return ys, ts, None
50 |
51 | def test_basic(self):
52 | ys, _, _ = self._generate_mock_data()
53 | scan = QDess(ys)
54 | assert scan.ref_dicom == ys[0].headers(flatten=True)[0]
55 |
56 | with self.assertRaises(ValueError):
57 | _ = QDess(ys + ys)
58 |
59 | def test_calc_rss(self):
60 | ys, _, _ = self._generate_mock_data()
61 | scan = QDess(ys)
62 | rss = scan.calc_rss()
63 |
64 | assert np.allclose(rss.A, np.sqrt(ys[0] ** 2 + ys[1] ** 2).A)
65 |
66 | def test_generate_t2_map(self):
67 | ys, _, _ = self._generate_mock_data()
68 | scan = QDess(ys)
69 |
70 | tissue = FemoralCartilage()
71 | t2 = scan.generate_t2_map(tissue)
72 | assert isinstance(t2, QuantitativeValue)
73 |
74 | def test_save_load(self):
75 | ys, _, _ = self._generate_mock_data()
76 | scan = QDess(ys)
77 |
78 | save_dir = os.path.join(self.data_dirpath, "test_save_load")
79 | pik_file = scan.save(save_dir, save_custom=True)
80 | assert os.path.isfile(pik_file)
81 | assert all(
82 | os.path.isfile(os.path.join(save_dir, "volumes", f"echo-{idx:03d}.nii.gz"))
83 | for idx in range(2)
84 | )
85 |
86 | scan2 = QDess.load(pik_file)
87 | for v1, v2 in zip(scan.volumes, scan2.volumes):
88 | assert v1.is_identical(v2)
89 |
90 | @unittest.skipIf(not util.is_data_available(), "unittest data is not available")
91 | def test_segmentation_multiclass(self):
92 | """Test support for multiclass segmentation."""
93 | scan = self.SCAN_TYPE.from_dicom(self.dicom_dirpath, num_workers=util.num_workers())
94 | tissue = FemoralCartilage()
95 | tissue.find_weights(SEGMENTATION_WEIGHTS_FOLDER),
96 | dims = scan.get_dimensions()
97 | input_shape = (dims[0], dims[1], 1)
98 | model = get_model(
99 | SEGMENTATION_MODEL, input_shape=input_shape, weights_path=tissue.weights_file_path
100 | )
101 | scan.segment(model, tissue, use_rss=True)
102 |
103 | # This should call __del__ in KerasSegModel
104 | model = None
105 | K.clear_session()
106 |
107 | @unittest.skipIf(not util.is_data_available(), "unittest data is not available")
108 | def test_cmd_line(self):
109 | # Generate segmentation mask for femoral cartilage via command line.
110 | cmdline_str = (
111 | f"--d {self.dicom_dirpath} --s {self.data_dirpath} qdess --fc "
112 | f"segment --weights_dir {SEGMENTATION_WEIGHTS_FOLDER} "
113 | f"--model {SEGMENTATION_MODEL} --use_rss"
114 | )
115 | self.__cmd_line_helper__(cmdline_str)
116 |
117 | # Generate T2 map for femoral cartilage, tibial cartilage, and meniscus.
118 | cmdline_str = (
119 | f"--l {self.data_dirpath} qdess --fc t2 --suppress_fat " f"--suppress_fluid --beta 1.1"
120 | )
121 | self.__cmd_line_helper__(cmdline_str)
122 |
123 |
124 | if __name__ == "__main__":
125 | unittest.main()
126 |
--------------------------------------------------------------------------------
/tests/scan_sequences/test_scan_io.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import numpy as np
4 | import pydicom
5 | from pydicom.data import get_testdata_file
6 |
7 | from dosma.scan_sequences.scan_io import ScanIOMixin
8 |
9 | from .. import util as ututils
10 |
11 |
12 | class MockScanIOMixin(ScanIOMixin):
13 | """Mock class for the :cls:`ScanIOMixin`.
14 |
15 | This mixin will be used to load an MR slice hosted on pydicom: ``MR_small.dcm``.
16 | """
17 |
18 | NAME = "mock-scan-io"
19 | __DEFAULT_SPLIT_BY__ = None
20 |
21 | def __init__(self, volumes, foo="foo", bar="bar") -> None:
22 | self.volumes = volumes
23 | self._from_file_args = {}
24 |
25 | self.foo = foo
26 | self._bar = bar
27 |
28 | # Attributes that should not be serialized
29 | self._temp_path = "some/path"
30 | self.__some_attr__ = 1234
31 | self.__pydicom_header__ = pydicom.Dataset()
32 |
33 | @property
34 | def some_property(self):
35 | return "new/path"
36 |
37 |
38 | class TestScanIOMixin(ututils.TempPathMixin):
39 | def test_from_dicom(self):
40 | mr_dcm = get_testdata_file("MR_small.dcm")
41 | fs = pydicom.read_file(mr_dcm)
42 | arr = fs.pixel_array
43 |
44 | scan = MockScanIOMixin.from_dicom(mr_dcm, foo="foofoo", bar="barbar")
45 | assert len(scan.volumes) == 1
46 | assert np.all(scan.volumes[0] == arr[..., np.newaxis])
47 | assert scan.foo == "foofoo"
48 | assert scan._bar == "barbar"
49 | assert scan._from_file_args == {
50 | "dir_or_files": mr_dcm,
51 | "ignore_ext": False,
52 | "group_by": None,
53 | "_type": "dicom",
54 | }
55 |
56 | scan = MockScanIOMixin.from_dicom([mr_dcm], foo="foofoo", bar="barbar")
57 | assert len(scan.volumes) == 1
58 | assert np.all(scan.volumes[0] == arr[..., np.newaxis])
59 | assert scan.foo == "foofoo"
60 | assert scan._bar == "barbar"
61 | assert scan._from_file_args == {
62 | "dir_or_files": [mr_dcm],
63 | "ignore_ext": False,
64 | "group_by": None,
65 | "_type": "dicom",
66 | }
67 |
68 | def test_from_dict(self):
69 | mr_dcm = get_testdata_file("MR_small.dcm")
70 | scan1 = MockScanIOMixin.from_dicom(mr_dcm)
71 |
72 | scan2 = MockScanIOMixin.from_dict(scan1.__dict__)
73 | assert scan1.__dict__.keys() == scan2.__dict__.keys()
74 | for k in scan1.__dict__.keys():
75 | assert scan1.__dict__[k] == scan2.__dict__[k]
76 |
77 | new_dict = dict(scan1.__dict__)
78 | new_dict["extra_bool_field"] = True
79 | with self.assertWarns(UserWarning):
80 | scan2 = MockScanIOMixin.from_dict(new_dict)
81 | assert not hasattr(scan2, "extra_bool_field")
82 |
83 | scan2 = MockScanIOMixin.from_dict(new_dict, force=True)
84 | assert hasattr(scan2, "extra_bool_field")
85 |
86 | def test_save_load(self):
87 | mr_dcm = get_testdata_file("MR_small.dcm")
88 | scan = MockScanIOMixin.from_dicom(mr_dcm)
89 | scan.foo = "foofoo"
90 | scan._bar = "barbar"
91 |
92 | vars = scan.__serializable_variables__()
93 | serializable = ("foo", "_bar", "volumes", "_from_file_args")
94 | not_serializable = ("_temp_path", "__some_attr__", "__pydicom_header__", "some_property")
95 | assert all(x in vars for x in serializable)
96 | assert all(x not in vars for x in not_serializable)
97 |
98 | save_dir = os.path.join(self.data_dirpath, "test_save_load")
99 | save_path = scan.save(save_dir, save_custom=True)
100 | assert os.path.isfile(save_path)
101 |
102 | scan_loaded = MockScanIOMixin.load(save_path)
103 | assert scan_loaded.volumes[0].is_identical(scan.volumes[0])
104 | assert scan_loaded.foo == "foofoo"
105 | assert scan._bar == "barbar"
106 |
107 | scan_loaded = MockScanIOMixin.load(save_dir)
108 | assert scan_loaded.volumes[0].is_identical(scan.volumes[0])
109 | assert scan_loaded.foo == "foofoo"
110 | assert scan._bar == "barbar"
111 |
112 | with self.assertRaises(FileNotFoundError):
113 | _ = MockScanIOMixin.load(os.path.join(save_dir, "some-path.pik"))
114 |
115 | new_dict = dict(scan.__dict__)
116 | new_dict.pop("volumes")
117 | with self.assertWarns(UserWarning):
118 | scan_loaded = MockScanIOMixin.load(new_dict)
119 | assert scan_loaded.volumes[0].is_identical(scan.volumes[0])
120 | assert scan_loaded.foo == "foofoo"
121 | assert scan._bar == "barbar"
122 |
123 | # Backwards compatibility with how DOSMA wrote files versions<0.0.12
124 | new_dict = dict(scan.__dict__)
125 | new_dict.pop("volumes")
126 | new_dict.pop("_from_file_args")
127 | new_dict.update({"dicom_path": mr_dcm, "ignore_ext": False, "series_number": 7})
128 | with self.assertWarns(UserWarning):
129 | scan_loaded = MockScanIOMixin.load(new_dict)
130 | assert scan_loaded.volumes[0].is_identical(scan.volumes[0])
131 | assert scan_loaded.foo == "foofoo"
132 | assert scan._bar == "barbar"
133 |
134 | new_dict = dict(scan.__dict__)
135 | new_dict.pop("volumes")
136 | new_dict.pop("_from_file_args")
137 | with self.assertRaises(ValueError):
138 | _ = MockScanIOMixin.load(new_dict)
139 |
140 | save_dir = os.path.join(self.data_dirpath, "test_save_data")
141 | with self.assertWarns(DeprecationWarning):
142 | scan.save_data(save_dir)
143 |
--------------------------------------------------------------------------------
/tests/test_preferences.py:
--------------------------------------------------------------------------------
1 | """Test defaults and preferences
2 | Files tested: defaults.py
3 | """
4 |
5 | import collections
6 | import os
7 | import unittest
8 | from shutil import copyfile
9 |
10 | from dosma.defaults import _Preferences
11 |
12 | import nested_lookup
13 |
14 | # Duplicate the resources file
15 | _test_preferences_sample_filepath = os.path.join(
16 | os.path.dirname(os.path.abspath(__file__)), "resources/preferences.yml"
17 | )
18 | _test_preferences_duplicate_filepath = os.path.join(
19 | os.path.dirname(os.path.abspath(__file__)), "resources/.test.preferences.yml"
20 | )
21 |
22 |
23 | class PreferencesMock(_Preferences):
24 | _preferences_filepath = _test_preferences_duplicate_filepath
25 |
26 |
27 | class TestPreferences(unittest.TestCase):
28 | @classmethod
29 | def setUpClass(cls):
30 | copyfile(_test_preferences_sample_filepath, _test_preferences_duplicate_filepath)
31 |
32 | @classmethod
33 | def tearDownClass(cls):
34 | if os.path.isfile(_test_preferences_duplicate_filepath):
35 | os.remove(_test_preferences_duplicate_filepath)
36 |
37 | def test_duplicate(self):
38 | """Test duplicate instances share same config."""
39 | a = PreferencesMock()
40 | b = PreferencesMock()
41 |
42 | assert a.config == b.config, "Configs must be the same dictionary."
43 |
44 | # Test with _Preferences to be certain
45 | a = _Preferences()
46 | b = _Preferences()
47 |
48 | assert a.config == b.config, "Configs must be the same dictionary."
49 |
50 | def test_get(self):
51 | a = PreferencesMock()
52 |
53 | # Raise error when key doesn't exist.
54 | with self.assertRaises(KeyError):
55 | a.get("sample-key")
56 |
57 | # Raise error when key is not specific.
58 | assert nested_lookup.get_occurrence_of_key(a.config, "foo") > 1
59 | with self.assertRaises(KeyError):
60 | a.get("foo")
61 |
62 | # No error when lookup is specific.
63 | a.get("testing1/foo")
64 |
65 | def test_set(self):
66 | a = PreferencesMock()
67 |
68 | # Raise error when key doesn't exist.
69 | with self.assertRaises(KeyError):
70 | a.set("sample-key", "bar")
71 |
72 | # Raise error when key is not specific.
73 | assert nested_lookup.get_occurrence_of_key(a.config, "foo") > 1, "%s." % a.config
74 | with self.assertRaises(KeyError):
75 | a.set("foo", 100)
76 |
77 | # Raise error when value is not the same
78 | with self.assertRaises(TypeError):
79 | a.set("testing1/foo", "bar")
80 |
81 | # Success when using full path or prefix kwarg
82 | a.set("testing1/foo", 50)
83 | assert a.get("testing1/foo") == 50, "Value mismatch: got %s, expected %s" % (
84 | a.get("testing1/foo"),
85 | 50,
86 | )
87 |
88 | a.set("foo", 100, "testing1")
89 | assert a.get("testing1/foo") == 100, "Value mismatch: got %s, expected %s" % (
90 | a.get("testing1/foo"),
91 | 100,
92 | )
93 |
94 | def test_write(self):
95 | a = PreferencesMock()
96 |
97 | a.set("testing1/foo", 250)
98 | a.save()
99 |
100 | b = PreferencesMock()
101 |
102 | assert a.config == b.config, "Configs must be the same dictionary."
103 | assert a.get("testing1/foo") == b.get("testing1/foo")
104 |
105 |
106 | class TestPreferencesSchema(unittest.TestCase):
107 | @classmethod
108 | def setUpClass(cls):
109 | copyfile(_test_preferences_sample_filepath, _test_preferences_duplicate_filepath)
110 |
111 | @classmethod
112 | def tearDownClass(cls):
113 | if os.path.isfile(_test_preferences_duplicate_filepath):
114 | os.remove(_test_preferences_duplicate_filepath)
115 |
116 | def test_cmd_line_schema(self):
117 | """Test that the command line schema for preferences is valid.
118 |
119 | Checks:
120 | - No overlapping aliases
121 | - Fields ['aliases', 'type', 'nargs', 'help'] present
122 | - All aliases are list and begin with '--'
123 | """
124 | a = PreferencesMock()
125 | config_dict = a.cmd_line_flags()
126 |
127 | # Check to see not duplicates in aliases
128 | aliases = []
129 | for k in config_dict.keys():
130 | aliases.extend(config_dict[k]["aliases"])
131 |
132 | alias_duplicates = [
133 | item for item, count in collections.Counter(aliases).items() if count > 1
134 | ]
135 | assert len(alias_duplicates) == 0, "Duplicate aliases: %s" % alias_duplicates
136 |
137 | for k in config_dict.keys():
138 | arg = config_dict[k]
139 | # Check to see each field has at least 4 primary keys
140 | for field in ["name", "aliases", "type", "nargs", "help"]:
141 | assert field in arg.keys(), "`%s` missing from %s" % (field, k)
142 |
143 | # Check type(aliases) is list
144 | assert type(arg["aliases"]) is list, "Aliases must be list - k" % k
145 |
146 | # Check to see each field has at least 4 primary keys
147 | for alias in arg["aliases"]:
148 | assert alias.startswith("--"), "Alias '%s' in %s must start with '--'" % (alias, k)
149 |
--------------------------------------------------------------------------------
/tests/tissues/test_tissue.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | import numpy as np
4 |
5 | from dosma.tissues.tissue import largest_cc
6 |
7 |
8 | class TestLargestCC(unittest.TestCase):
9 | def test_largest_cc(self):
10 | # smallest cc
11 | a = np.zeros((100, 100)).astype(np.uint8)
12 | a[:10, :10] = 1
13 |
14 | # medium cc
15 | b = np.zeros((100, 100)).astype(np.uint8)
16 | b[85:, 85:] = 1
17 |
18 | # largest cc
19 | c = np.zeros((100, 100)).astype(np.uint8)
20 | c[25:75, 25:75] = 1
21 |
22 | mask = a | b | c
23 |
24 | assert np.all(largest_cc(mask) == c) # only largest cc returned
25 | assert np.all(largest_cc(mask, num=2) == (b | c)) # largest 2 cc
26 | assert np.all(largest_cc(mask, num=3) == (a | b | c)) # largest 3 cc
27 | assert np.all(largest_cc(mask, num=4) == (a | b | c)) # only 3 cc, return all
28 |
29 |
30 | if __name__ == "__main__":
31 | unittest.main()
32 |
--------------------------------------------------------------------------------
/tests/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/ad12/DOSMA/bd5efecbb944263c9a5d7853f154d9071c72ba62/tests/utils/__init__.py
--------------------------------------------------------------------------------
/tests/utils/test_collect_env.py:
--------------------------------------------------------------------------------
1 | import unittest
2 |
3 | from dosma.utils.collect_env import collect_env_info
4 |
5 |
6 | class TestCollectEnvInfo(unittest.TestCase):
7 | def test_collect_env_info(self):
8 | env_info = collect_env_info()
9 | assert isinstance(env_info, str)
10 |
--------------------------------------------------------------------------------
/tests/utils/test_env.py:
--------------------------------------------------------------------------------
1 | import os
2 | import unittest
3 |
4 | import dosma
5 | from dosma.defaults import preferences
6 | from dosma.utils import env
7 |
8 |
9 | class TestEnv(unittest.TestCase):
10 | def test_package_available(self):
11 | assert env.package_available("dosma")
12 | assert not env.package_available("blah")
13 |
14 | def test_get_version(self):
15 | assert env.get_version("dosma") == dosma.__version__
16 |
17 | def test_debug(self):
18 | os_env = os.environ.copy()
19 |
20 | env.debug(True)
21 | assert preferences.nipype_logging == "stream"
22 |
23 | env.debug(False)
24 | assert preferences.nipype_logging == "file_stderr"
25 |
26 | os.environ = os_env # noqa: B003
27 |
--------------------------------------------------------------------------------
/tests/utils/test_io_utils.py:
--------------------------------------------------------------------------------
1 | import os
2 | import shutil
3 | import unittest
4 |
5 | import numpy as np
6 | import pandas as pd
7 |
8 | from dosma.utils import io_utils
9 |
10 | from .. import util
11 |
12 | IO_UTILS_DATA = os.path.join(util.UNITTEST_DATA_PATH, "io_utils")
13 |
14 |
15 | class UtilsTest(unittest.TestCase):
16 | @classmethod
17 | def setUpClass(cls):
18 | io_utils.mkdirs(IO_UTILS_DATA)
19 |
20 | @classmethod
21 | def tearDownClass(cls):
22 | shutil.rmtree(IO_UTILS_DATA)
23 |
24 | def test_h5(self):
25 | filepath = os.path.join(IO_UTILS_DATA, "sample.h5")
26 | datas = [{"type": np.random.rand(10, 45, 2), "type2": np.random.rand(13, 95, 4)}]
27 |
28 | for data in datas:
29 | io_utils.save_h5(filepath, data)
30 | data2 = io_utils.load_h5(filepath)
31 |
32 | assert len(list(data.keys())) == len(list(data2.keys()))
33 |
34 | for key in data.keys():
35 | assert (data[key] == data2[key]).all()
36 |
37 | def test_pik(self):
38 | filepath = os.path.join(IO_UTILS_DATA, "sample.pik")
39 | datas = {"type": np.random.rand(10, 45, 2), "type2": np.random.rand(13, 95, 4)}
40 |
41 | io_utils.save_pik(filepath, datas)
42 | datas2 = io_utils.load_pik(filepath)
43 |
44 | for data in datas:
45 | assert (datas[data] == datas2[data]).all()
46 |
47 | def test_save_tables(self):
48 | df = pd.DataFrame({"a": [1, 2, 3, 4], "b": [5, 6, 7, 8]})
49 | path = os.path.join(IO_UTILS_DATA, "table.xlsx")
50 | io_utils.save_tables(path, [df])
51 |
52 | df2 = pd.read_excel(path, engine="openpyxl")
53 | assert np.all(df == df2)
54 |
--------------------------------------------------------------------------------
/tests/utils/test_logger.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | import unittest
4 |
5 | from dosma.utils import env
6 | from dosma.utils.logger import setup_logger
7 |
8 | from .. import util
9 |
10 |
11 | class TestSetupLogger(unittest.TestCase):
12 | def test_log_info(self):
13 | debug_val = env.debug()
14 |
15 | env.debug(False)
16 | setup_logger(None)
17 | with self.assertLogs("dosma", level="INFO"):
18 | logging.getLogger("dosma").info("Sample log at INFO level")
19 |
20 | env.debug(True)
21 | setup_logger(None)
22 | with self.assertLogs("dosma", level="DEBUG"):
23 | logging.getLogger("dosma").debug("Sample log at DEBUG level")
24 |
25 | env.debug(debug_val)
26 |
27 | def test_makes_file(self):
28 | setup_logger(util.TEMP_PATH)
29 | assert os.path.isfile(os.path.join(util.TEMP_PATH, "dosma.log"))
30 |
31 | def test_overwrite_handlers(self):
32 | logger = setup_logger(name="foobar")
33 | assert len(logger.handlers) == 2
34 |
35 | logger = setup_logger(name="foobar", abbrev_name="foo")
36 | assert len(logger.handlers) == 4
37 |
38 | logger = setup_logger(name="foobar", overwrite_handlers=True)
39 | assert len(logger.handlers) == 2
40 |
--------------------------------------------------------------------------------