├── .github └── workflows │ └── pypi_publish.yml ├── .gitignore ├── .readthedocs.yaml ├── CHANGELOG.md ├── Cargo.lock ├── Cargo.toml ├── EULA.txt ├── LICENSE ├── README.md ├── documentation ├── _config.yml ├── _toc.yml ├── api.md ├── api │ ├── phantom.md │ ├── reco.md │ ├── sequence.md │ ├── simulation.md │ ├── simulation │ │ ├── isochromat_sim.md │ │ └── pdg_sim.md │ └── util.md ├── intro.md ├── logo.png ├── phantom_generation.md ├── playground │ ├── generated │ │ └── seqs │ │ │ └── FLASH.ipynb │ ├── quantified_brain.npz │ └── templates │ │ ├── binary_masks.ipynb │ │ ├── generate.py │ │ ├── seqs.ipynb │ │ └── template_A.ipynb ├── playground_mr0 │ ├── AdjDataUser2gB0_transversal_0.08moving_average.mat │ ├── Pulseq_zero_FLASH_FAopt_PSFtask.ipynb │ ├── Pulseq_zero_TSE_FAopt_SARtask.ipynb │ ├── flash.ipynb │ ├── legacy_seqs │ │ ├── flash_DWI.ipynb │ │ ├── mr0_CS_cartesian_seq.ipynb │ │ ├── mr0_CS_radial_seq.ipynb │ │ ├── mr0_DWI_GRE_2D_seq.ipynb │ │ └── mr0_spiral_GRE.ipynb │ ├── mr00_FLASH_2D_ernstAngle_opt.ipynb │ ├── mr0_DREAM_STE_seq.ipynb │ ├── mr0_DREAM_STID_seq.ipynb │ ├── mr0_DWI_SE_EPI.ipynb │ ├── mr0_EPI_2D_seq.ipynb │ ├── mr0_FID_seq.ipynb │ ├── mr0_FLASH_2D_seq.ipynb │ ├── mr0_GRE_to_FLASH.ipynb │ ├── mr0_RARE_2D_seq.ipynb │ ├── mr0_RARE_2D_seq_multi_shot.ipynb │ ├── mr0_SE_CPMG_seq.ipynb │ ├── mr0_STE_3pulses_5echoes_seq.ipynb │ ├── mr0_TSE_2D_multi_shot_seq.ipynb │ ├── mr0_bSSFP_2D_seq.ipynb │ ├── mr0_diffusion_prep_STEAM_2D_seq.ipynb │ ├── mr0_opt_FLASH_2D_IR_Fit_T1.ipynb │ ├── mr0_opt_FLASH_2D_IR_voxelNN_T1.ipynb │ ├── mr0_pypulseq_exmpls_seq.ipynb │ ├── mr0_upload_seq.ipynb │ ├── numerical_brain_cropped.mat │ ├── overview.md │ ├── ptx_phantom.p │ ├── pulseq_flash.ipynb │ ├── pulseq_rf_shim.ipynb │ ├── pulseq_sim_pTx.ipynb │ ├── reference_bssfp_data_feb2025.npz │ ├── seqs │ │ ├── flash pTx CP.seq │ │ ├── flash pTx EP.seq │ │ └── flash pTx QM.seq │ ├── subject05.npz │ └── unsorted │ │ ├── Pulseq_2D_density_adapted_radial.ipynb │ │ ├── gpt4mr.ipynb │ │ ├── improved_mr0_RARE_2D_seq.ipynb │ │ ├── improved_mr0_RARE_2D_seq_nonsel.ipynb │ │ ├── mr0_EPI_2D_with_ORC_seq.ipynb │ │ ├── mr0_burst_TSE.ipynb │ │ └── numerical_brain_cropped.mat └── requirements.txt ├── pyproject.toml ├── python └── MRzeroCore │ ├── __init__.py │ ├── phantom │ ├── brainweb │ │ ├── __init__.py │ │ ├── brainweb_data.json │ │ ├── brainweb_data_sources.txt │ │ └── output │ │ │ └── .gitkeep │ ├── custom_voxel_phantom.py │ ├── sim_data.py │ └── voxel_grid_phantom.py │ ├── pulseq │ ├── exporter.py │ ├── exporter_v2.py │ ├── helpers.py │ ├── pulseq_exporter.py │ └── pulseq_loader │ │ ├── __init__.py │ │ ├── adc.py │ │ ├── helpers.py │ │ ├── pulse.py │ │ ├── pulseq_file │ │ ├── __init__.py │ │ ├── adc.py │ │ ├── block.py │ │ ├── definitons.py │ │ ├── gradient.py │ │ ├── helpers.py │ │ ├── rf.py │ │ └── trap.py │ │ └── spoiler.py │ ├── reconstruction.py │ ├── sequence.py │ ├── simulation │ ├── isochromat_sim.py │ ├── main_pass.py │ └── pre_pass.py │ └── util.py └── src ├── lib.rs └── pre_pass.rs /.github/workflows/pypi_publish.yml: -------------------------------------------------------------------------------- 1 | # This file is autogenerated by maturin v0.14.17 2 | # To update, run 3 | # 4 | # maturin generate-ci github -o .github/workflows/pypi_publish.yml 5 | # 6 | name: CI 7 | 8 | on: 9 | push: 10 | branches: 11 | - main 12 | - master 13 | tags: 14 | - '*' 15 | pull_request: 16 | workflow_dispatch: 17 | 18 | permissions: 19 | contents: read 20 | 21 | jobs: 22 | linux: 23 | runs-on: ubuntu-latest 24 | strategy: 25 | matrix: 26 | target: [x86_64, x86, aarch64, armv7, s390x, ppc64le] 27 | steps: 28 | - uses: actions/checkout@v3 29 | - uses: actions/setup-python@v4 30 | with: 31 | python-version: '3.10' 32 | - name: Build wheels 33 | uses: PyO3/maturin-action@v1 34 | with: 35 | target: ${{ matrix.target }} 36 | args: --release --out dist 37 | sccache: 'true' 38 | manylinux: auto 39 | - name: Upload wheels 40 | uses: actions/upload-artifact@v4 41 | with: 42 | name: ${{ format('wheels_linux_{0}', matrix.target) }} 43 | path: dist 44 | 45 | windows: 46 | runs-on: windows-latest 47 | strategy: 48 | matrix: 49 | target: [x64, x86] 50 | steps: 51 | - uses: actions/checkout@v3 52 | - uses: actions/setup-python@v4 53 | with: 54 | python-version: '3.10' 55 | architecture: ${{ matrix.target }} 56 | - name: Build wheels 57 | uses: PyO3/maturin-action@v1 58 | with: 59 | target: ${{ matrix.target }} 60 | args: --release --out dist 61 | sccache: 'true' 62 | - name: Upload wheels 63 | uses: actions/upload-artifact@v4 64 | with: 65 | name: ${{ format('wheels_windows_{0}', matrix.target) }} 66 | path: dist 67 | 68 | macos: 69 | runs-on: macos-latest 70 | strategy: 71 | matrix: 72 | target: [x86_64, aarch64] 73 | steps: 74 | - uses: actions/checkout@v3 75 | - uses: actions/setup-python@v4 76 | with: 77 | python-version: '3.10' 78 | - name: Build wheels 79 | uses: PyO3/maturin-action@v1 80 | with: 81 | target: ${{ matrix.target }} 82 | args: --release --out dist 83 | sccache: 'true' 84 | - name: Upload wheels 85 | uses: actions/upload-artifact@v4 86 | with: 87 | name: ${{ format('wheels_macos_{0}', matrix.target) }} 88 | path: dist 89 | 90 | sdist: 91 | runs-on: ubuntu-latest 92 | steps: 93 | - uses: actions/checkout@v3 94 | - name: Build sdist 95 | uses: PyO3/maturin-action@v1 96 | with: 97 | command: sdist 98 | args: --out dist 99 | - name: Upload sdist 100 | uses: actions/upload-artifact@v4 101 | with: 102 | name: wheels_src 103 | path: dist 104 | 105 | pypi-publish: 106 | runs-on: ubuntu-latest 107 | environment: 108 | name: pypi 109 | url: https://pypi.org/p/mrzerocore 110 | permissions: 111 | id-token: write 112 | 113 | needs: [linux, windows, macos, sdist] 114 | steps: 115 | - uses: actions/download-artifact@v4 116 | with: 117 | path: dist 118 | merge-multiple: true 119 | - name: Publish package 120 | uses: pypa/gh-action-pypi-publish@27b31702a0e7fc50959f5ad993c78deac1bdfc29 121 | with: 122 | user: __token__ 123 | password: ${{ secrets.PYPI_API_TOKEN }} 124 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /target 2 | /documentation/_autosummary 3 | /documentation/_build 4 | out/ 5 | *.seq 6 | *.env 7 | *.ipynb_checkpoints 8 | 9 | *.pdf 10 | 11 | # Test files 12 | external.seq 13 | *.blend1 14 | 15 | # Brainweb files 16 | *.i8.gz 17 | *.npz 18 | 19 | # Byte-compiled / optimized / DLL files 20 | __pycache__/ 21 | .pytest_cache/ 22 | *.py[cod] 23 | 24 | # C extensions 25 | *.so 26 | 27 | # Distribution / packaging 28 | .Python 29 | .venv/ 30 | env/ 31 | bin/ 32 | build/ 33 | develop-eggs/ 34 | dist/ 35 | eggs/ 36 | lib/ 37 | lib64/ 38 | parts/ 39 | sdist/ 40 | var/ 41 | include/ 42 | man/ 43 | venv/ 44 | *.egg-info/ 45 | .installed.cfg 46 | *.egg 47 | 48 | # Installer logs 49 | pip-log.txt 50 | pip-delete-this-directory.txt 51 | pip-selfcheck.json 52 | 53 | # Unit test / coverage reports 54 | htmlcov/ 55 | .tox/ 56 | .coverage 57 | .cache 58 | nosetests.xml 59 | coverage.xml 60 | 61 | # Translations 62 | *.mo 63 | 64 | # Mr Developer 65 | .mr.developer.cfg 66 | .project 67 | .pydevproject 68 | 69 | # Rope 70 | .ropeproject 71 | 72 | # Django stuff: 73 | *.log 74 | *.pot 75 | 76 | .DS_Store 77 | 78 | # Sphinx documentation 79 | docs/_build/ 80 | 81 | # PyCharm 82 | .idea/ 83 | 84 | # VSCode 85 | .vscode/ 86 | 87 | # Pyenv 88 | .python-version -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # Read the Docs configuration file 2 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 3 | version: 2 4 | 5 | build: 6 | os: ubuntu-22.04 7 | tools: 8 | python: "3.11" 9 | jobs: 10 | pre_build: 11 | # Generate the Sphinx configuration for this Jupyter Book so it builds. 12 | - "jupyter-book config sphinx documentation/" 13 | 14 | python: 15 | install: 16 | - requirements: documentation/requirements.txt 17 | 18 | sphinx: 19 | builder: html 20 | configuration: documentation/conf.py 21 | # fail_on_warning: true 22 | -------------------------------------------------------------------------------- /CHANGELOG.md: -------------------------------------------------------------------------------- 1 | - 0.3.13 2 | - bugfix: util.load_phantom - B0_polynomial computation used wrong number of dims 3 | - default `clear_state_mag` to `True` to avoid memory problems 4 | - include optional initial magnetization in execute_graph 5 | - fixed numpy version in playground 6 | - 0.3.12 7 | - Changed how to return magnetization from the simulation, see pull request #97 8 | - 0.3.11 9 | - new util functions for phantom loading and simulating 10 | - 0.3.10 11 | - phantom_motion and voxel_motion now also exists in VoxelGridPhantom 12 | - new simple simulate function in util, that directly accepts a VoxelGridPhantom 13 | - 0.3.9 14 | - New simple load_default_phantom function in util 15 | - 0.3.8 16 | - Bugfix missing tensor dtype (#82) 17 | - Support for 0 shim array shape_id 18 | - Default shim in importer for pulses that do not specify any 19 | - Deprecated old importer 20 | - 0.3.7 21 | - Support for loading .seq files with pTx rf_shim extension for pulseq 22 | - Support for loading .dsv files 23 | - 0.3.6 24 | - Support for complex B1 maps (#80) 25 | - Isochromat bugfix (#74) 26 | - 0.3.5 27 | - centered signal on adc samples in `insert_signal_plot()` 28 | - ensured `tissue_masks` is never zero to fix remaining bugs 29 | - 0.3.4 30 | - Bugfix: `insert_signal_plot()` now respects adc delay 31 | - 0.3.3 32 | - Bugfix: `VoxelGridPhantom.interpolate()` and `.scale()` now work again 33 | - 0.3.2 34 | - Added tissue maps to phantom and plotting (#68) 35 | - Fixed `.plot()`ting of 3D VoxelGridPhantom, added slice parameter 36 | - Added `simulate_2D` to util for easy simulation of pypulseq sequences 37 | - 0.3.1 38 | - WIP .dsv support with pydisseqt 0.1.5 39 | - changed type annotation in util.imshow for python 3.9 compatibility 40 | - 0.3.0 41 | - Bugfixes (seq.get_contrast(), pTx shim phase, pytorch deprecation warnings, > 360° pulses, pulseq plot, missing 2pi in diffusion, phantom plot titles) 42 | - WIP rigid phantom motion simulation - not yet documented and subject to change 43 | - Added option to return transversal magnetization from simulation 44 | - New imshow() function in util.py for consistent plotting 45 | - Some fixes to playground sequences - bigger overhaul coming soon 46 | - Allow loading B0 / B1 in new VoxelGridPhantom.load() function 47 | - Introduced quantified brain 48 | - Performance improvement - only calculate signal for measured ADC samples 49 | - Added latent_signal_unormalized to prepass graph for vizualisation 50 | - 0.2.12 51 | - Fixed Brainweb phantom loading and generation 52 | - Switched pre-pass kt precision from f32 to f64 53 | - Made pre-pass state merging relative to sequence gradient moment and event time sizes 54 | - 0.2.11 55 | - (re-)introduced return_mag_z parameter in pdg simulation 56 | - 0.2.10 57 | - change default FOV in all notebooks to phantom size (200 mm) 58 | - Use Open in Colab badges in the playground instead of links 59 | - Documented how to run on GPU 60 | - Added another pure mr0 flash DWI sequence to the playground 61 | - Bugfix: normalized_grads setting was not passed in Sequence.cuda() and .cpu() 62 | - Bugfix: removed wrong 2*pi factor from Diffusion b-value calculations 63 | - 0.2.9 64 | - Change sign in gradient simulation - flips orientation of old reconstructions 65 | - Included util file in mr0 66 | - Improved plot function to support pypulseq 1.4.0-dev 67 | - Add util to documentation 68 | - 0.2.8 69 | - Updated playground notebook to use new FOV definitions 70 | - Fixed SimData.recover() 71 | - 0.2.7 72 | - Removed perlin-numpy from dependencies (git dependency not allowed by pypi) 73 | - 0.2.6 74 | - New pulseq importer (correctly implements 1.2 - 1.4 spec) 75 | - FOV / phantom size now in SI units per default 76 | - Normalized gradients (when defining sequences in mr0) is now an explicit sequence flag 77 | -------------------------------------------------------------------------------- /Cargo.lock: -------------------------------------------------------------------------------- 1 | # This file is automatically @generated by Cargo. 2 | # It is not intended for manual editing. 3 | version = 3 4 | 5 | [[package]] 6 | name = "autocfg" 7 | version = "1.3.0" 8 | source = "registry+https://github.com/rust-lang/crates.io-index" 9 | checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" 10 | 11 | [[package]] 12 | name = "bitflags" 13 | version = "2.5.0" 14 | source = "registry+https://github.com/rust-lang/crates.io-index" 15 | checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" 16 | 17 | [[package]] 18 | name = "cfg-if" 19 | version = "1.0.0" 20 | source = "registry+https://github.com/rust-lang/crates.io-index" 21 | checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" 22 | 23 | [[package]] 24 | name = "heck" 25 | version = "0.4.1" 26 | source = "registry+https://github.com/rust-lang/crates.io-index" 27 | checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" 28 | 29 | [[package]] 30 | name = "indoc" 31 | version = "2.0.5" 32 | source = "registry+https://github.com/rust-lang/crates.io-index" 33 | checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" 34 | 35 | [[package]] 36 | name = "libc" 37 | version = "0.2.155" 38 | source = "registry+https://github.com/rust-lang/crates.io-index" 39 | checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" 40 | 41 | [[package]] 42 | name = "lock_api" 43 | version = "0.4.12" 44 | source = "registry+https://github.com/rust-lang/crates.io-index" 45 | checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" 46 | dependencies = [ 47 | "autocfg", 48 | "scopeguard", 49 | ] 50 | 51 | [[package]] 52 | name = "memoffset" 53 | version = "0.9.1" 54 | source = "registry+https://github.com/rust-lang/crates.io-index" 55 | checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" 56 | dependencies = [ 57 | "autocfg", 58 | ] 59 | 60 | [[package]] 61 | name = "mrzero_core" 62 | version = "0.3.13" 63 | dependencies = [ 64 | "num-complex", 65 | "pyo3", 66 | ] 67 | 68 | [[package]] 69 | name = "num-complex" 70 | version = "0.4.6" 71 | source = "registry+https://github.com/rust-lang/crates.io-index" 72 | checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" 73 | dependencies = [ 74 | "num-traits", 75 | ] 76 | 77 | [[package]] 78 | name = "num-traits" 79 | version = "0.2.19" 80 | source = "registry+https://github.com/rust-lang/crates.io-index" 81 | checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" 82 | dependencies = [ 83 | "autocfg", 84 | ] 85 | 86 | [[package]] 87 | name = "once_cell" 88 | version = "1.19.0" 89 | source = "registry+https://github.com/rust-lang/crates.io-index" 90 | checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" 91 | 92 | [[package]] 93 | name = "parking_lot" 94 | version = "0.12.2" 95 | source = "registry+https://github.com/rust-lang/crates.io-index" 96 | checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" 97 | dependencies = [ 98 | "lock_api", 99 | "parking_lot_core", 100 | ] 101 | 102 | [[package]] 103 | name = "parking_lot_core" 104 | version = "0.9.10" 105 | source = "registry+https://github.com/rust-lang/crates.io-index" 106 | checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" 107 | dependencies = [ 108 | "cfg-if", 109 | "libc", 110 | "redox_syscall", 111 | "smallvec", 112 | "windows-targets", 113 | ] 114 | 115 | [[package]] 116 | name = "portable-atomic" 117 | version = "1.6.0" 118 | source = "registry+https://github.com/rust-lang/crates.io-index" 119 | checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" 120 | 121 | [[package]] 122 | name = "proc-macro2" 123 | version = "1.0.83" 124 | source = "registry+https://github.com/rust-lang/crates.io-index" 125 | checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" 126 | dependencies = [ 127 | "unicode-ident", 128 | ] 129 | 130 | [[package]] 131 | name = "pyo3" 132 | version = "0.21.2" 133 | source = "registry+https://github.com/rust-lang/crates.io-index" 134 | checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" 135 | dependencies = [ 136 | "cfg-if", 137 | "indoc", 138 | "libc", 139 | "memoffset", 140 | "num-complex", 141 | "parking_lot", 142 | "portable-atomic", 143 | "pyo3-build-config", 144 | "pyo3-ffi", 145 | "pyo3-macros", 146 | "unindent", 147 | ] 148 | 149 | [[package]] 150 | name = "pyo3-build-config" 151 | version = "0.21.2" 152 | source = "registry+https://github.com/rust-lang/crates.io-index" 153 | checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" 154 | dependencies = [ 155 | "once_cell", 156 | "target-lexicon", 157 | ] 158 | 159 | [[package]] 160 | name = "pyo3-ffi" 161 | version = "0.21.2" 162 | source = "registry+https://github.com/rust-lang/crates.io-index" 163 | checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" 164 | dependencies = [ 165 | "libc", 166 | "pyo3-build-config", 167 | ] 168 | 169 | [[package]] 170 | name = "pyo3-macros" 171 | version = "0.21.2" 172 | source = "registry+https://github.com/rust-lang/crates.io-index" 173 | checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" 174 | dependencies = [ 175 | "proc-macro2", 176 | "pyo3-macros-backend", 177 | "quote", 178 | "syn", 179 | ] 180 | 181 | [[package]] 182 | name = "pyo3-macros-backend" 183 | version = "0.21.2" 184 | source = "registry+https://github.com/rust-lang/crates.io-index" 185 | checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" 186 | dependencies = [ 187 | "heck", 188 | "proc-macro2", 189 | "pyo3-build-config", 190 | "quote", 191 | "syn", 192 | ] 193 | 194 | [[package]] 195 | name = "quote" 196 | version = "1.0.36" 197 | source = "registry+https://github.com/rust-lang/crates.io-index" 198 | checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" 199 | dependencies = [ 200 | "proc-macro2", 201 | ] 202 | 203 | [[package]] 204 | name = "redox_syscall" 205 | version = "0.5.1" 206 | source = "registry+https://github.com/rust-lang/crates.io-index" 207 | checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" 208 | dependencies = [ 209 | "bitflags", 210 | ] 211 | 212 | [[package]] 213 | name = "scopeguard" 214 | version = "1.2.0" 215 | source = "registry+https://github.com/rust-lang/crates.io-index" 216 | checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" 217 | 218 | [[package]] 219 | name = "smallvec" 220 | version = "1.13.2" 221 | source = "registry+https://github.com/rust-lang/crates.io-index" 222 | checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" 223 | 224 | [[package]] 225 | name = "syn" 226 | version = "2.0.65" 227 | source = "registry+https://github.com/rust-lang/crates.io-index" 228 | checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" 229 | dependencies = [ 230 | "proc-macro2", 231 | "quote", 232 | "unicode-ident", 233 | ] 234 | 235 | [[package]] 236 | name = "target-lexicon" 237 | version = "0.12.14" 238 | source = "registry+https://github.com/rust-lang/crates.io-index" 239 | checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" 240 | 241 | [[package]] 242 | name = "unicode-ident" 243 | version = "1.0.12" 244 | source = "registry+https://github.com/rust-lang/crates.io-index" 245 | checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" 246 | 247 | [[package]] 248 | name = "unindent" 249 | version = "0.2.3" 250 | source = "registry+https://github.com/rust-lang/crates.io-index" 251 | checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" 252 | 253 | [[package]] 254 | name = "windows-targets" 255 | version = "0.52.5" 256 | source = "registry+https://github.com/rust-lang/crates.io-index" 257 | checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" 258 | dependencies = [ 259 | "windows_aarch64_gnullvm", 260 | "windows_aarch64_msvc", 261 | "windows_i686_gnu", 262 | "windows_i686_gnullvm", 263 | "windows_i686_msvc", 264 | "windows_x86_64_gnu", 265 | "windows_x86_64_gnullvm", 266 | "windows_x86_64_msvc", 267 | ] 268 | 269 | [[package]] 270 | name = "windows_aarch64_gnullvm" 271 | version = "0.52.5" 272 | source = "registry+https://github.com/rust-lang/crates.io-index" 273 | checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" 274 | 275 | [[package]] 276 | name = "windows_aarch64_msvc" 277 | version = "0.52.5" 278 | source = "registry+https://github.com/rust-lang/crates.io-index" 279 | checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" 280 | 281 | [[package]] 282 | name = "windows_i686_gnu" 283 | version = "0.52.5" 284 | source = "registry+https://github.com/rust-lang/crates.io-index" 285 | checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" 286 | 287 | [[package]] 288 | name = "windows_i686_gnullvm" 289 | version = "0.52.5" 290 | source = "registry+https://github.com/rust-lang/crates.io-index" 291 | checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" 292 | 293 | [[package]] 294 | name = "windows_i686_msvc" 295 | version = "0.52.5" 296 | source = "registry+https://github.com/rust-lang/crates.io-index" 297 | checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" 298 | 299 | [[package]] 300 | name = "windows_x86_64_gnu" 301 | version = "0.52.5" 302 | source = "registry+https://github.com/rust-lang/crates.io-index" 303 | checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" 304 | 305 | [[package]] 306 | name = "windows_x86_64_gnullvm" 307 | version = "0.52.5" 308 | source = "registry+https://github.com/rust-lang/crates.io-index" 309 | checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" 310 | 311 | [[package]] 312 | name = "windows_x86_64_msvc" 313 | version = "0.52.5" 314 | source = "registry+https://github.com/rust-lang/crates.io-index" 315 | checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" 316 | -------------------------------------------------------------------------------- /Cargo.toml: -------------------------------------------------------------------------------- 1 | [package] 2 | name = "mrzero_core" 3 | version = "0.3.13" 4 | edition = "2021" 5 | 6 | # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html 7 | [lib] 8 | crate-type = ["cdylib"] 9 | 10 | [dependencies] 11 | pyo3 = { version = "0.21.2", features = ["abi3-py37", "extension-module", "num-complex"] } 12 | num-complex = "0.4.6" 13 | -------------------------------------------------------------------------------- /EULA.txt: -------------------------------------------------------------------------------- 1 | This End User License Agreement (“EULA”) is a legal agreement between you (“you” or “Licensee”) 2 | and the copyright holder of the MRtwin code (“Copyright Holder”) made available on GitHub (“Code”). 3 | By accessing or using the Code, you agree to be bound by the terms and conditions of this EULA. 4 | 5 | License Grant 6 | The Copyright Holder grants you a non-exclusive, non-transferable license to use the Code for non-commercial purposes only. 7 | 8 | Restrictions 9 | You shall not use the Code or derivative works of it for any commercial purpose, including but not limited to, 10 | selling, renting, leasing, sublicensing, or otherwise distributing the Code for any fee or other consideration. 11 | 12 | Ownership 13 | The Code is proprietary to the Copyright Holder and is protected by copyright laws and international copyright treaties, 14 | as well as other intellectual property laws and treaties. The Code is licensed, not sold. 15 | 16 | Termination 17 | This EULA will terminate automatically if you fail to comply with any of the terms and conditions of this EULA. 18 | Upon termination, you shall immediately cease all use of the Code and destroy all copies of the Code in your possession. 19 | 20 | Disclaimer of Warranties 21 | The Code is provided “AS IS” without warranty of any kind, either express or implied, 22 | including but not limited to, the implied warranties of merchantability and fitness for a particular purpose. 23 | The Copyright Holder does not warrant that the Code will meet your requirements or that the operation of the Code will be uninterrupted or error-free. 24 | 25 | Limitation of Liability 26 | In no event shall the Copyright Holder be liable for any damages whatsoever (including but not limited to, 27 | direct, indirect, special, incidental, or consequential damages, loss of profits, loss of data, or business interruption) 28 | arising out of the use or inability to use the Code, even if the Copyright Holder has been advised of the possibility of such damages. 29 | 30 | Governing Law 31 | This EULA shall be governed by and construed in accordance with the laws of the country in which the Code was created, 32 | without giving effect to any principles of conflicts of law. 33 | 34 | Entire Agreement 35 | This EULA constitutes the entire agreement between you and the Copyright Holder with respect to the use of the Code 36 | and supersedes all prior or contemporaneous communications and proposals, whether oral or written, between you and the Copyright Holder. 37 | 38 | Acknowledgement 39 | By using the Code, you acknowledge that you have read this EULA, understand it, and agree to be bound by its terms and conditions. 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Documentation Status](https://readthedocs.org/projects/mrzero-core/badge/?version=latest)](https://mrzero-core.readthedocs.io/en/latest/?badge=latest) 2 | ![PyPI - Version](https://img.shields.io/pypi/v/MRzeroCore) 3 | 4 | # MRzero Core 5 | 6 | The MRzero Core contains the core functionality of [MRzero](https://arxiv.org/abs/2002.04265) like MRI sequence building, simulation and reconstruction. MRzero Core does not force you to take any particular approach to e.g., reconstruction, as it targets easy integration in existing projects. Nevertheless, more tools can be added in the future if they helpful for the general application space. 7 | 8 | ## Usage 9 | 10 | MRzero Core is written in [Python](https://www.python.org/), heavily relying on [PyTorch](https://pytorch.org/) for fast (GPU-) Tensor calculations. 11 | To improve performance, parts of the simulation are written in [Rust](https://www.rust-lang.org/) and compiled for x86 Windows and Linux, other platforms are currently not supported. 12 | 13 | Install with pip: 14 | ``` 15 | pip install MRzeroCore 16 | ``` 17 | 18 | The typical way of using it is like the following: 19 | ```python 20 | import MRzeroCore as mr0 21 | ``` 22 | 23 | Examples on how to use can be found in the [Playground](https://mrzero-core.readthedocs.io/en/latest/playground_mr0/overview.html). 24 | 25 | ## Links 26 | 27 | - Documentation: https://mrzero-core.readthedocs.io/ 28 | - Examples: [Playground](https://mrzero-core.readthedocs.io/en/latest/playground_mr0/overview.html) 29 | - PyPI: https://pypi.org/project/mrzerocore/ 30 | - Original MRzero Paper: https://arxiv.org/abs/2002.04265 31 | 32 | ## Building from source 33 | 34 | This assumes windows as host operating system. For building the python wheel, you need: 35 | - the Rust toolchain: [rustup](https://rustup.rs/) 36 | - the rust-python build tool tool: [pip install maturin](https://github.com/PyO3/maturin) 37 | - for Linux crosscompilation: [docker](https://www.docker.com/) 38 | - to build the documentation: [pip install jupyter-book](https://jupyterbook.org/en/stable/intro.html) 39 | 40 | **Building for Windows** 41 | ``` 42 | maturin build --interpreter python 43 | ``` 44 | **Building for Linux** 45 | ``` 46 | docker run --rm -v /MRzero-Core:/io ghcr.io/pyo3/maturin build 47 | ``` 48 | 49 | To **build the documentation**, run 50 | ``` 51 | jupyter-book build documentation/ 52 | ``` 53 | in the root folder of this project. This requires jupyter-book as well as MRzeroCore itself to be installed. 54 | 55 | 56 | ## Official builds 57 | 58 | The [python wheels](https://pypi.org/project/mrzerocore/) hosted by [PyPI](https://pypi.org/) is built as described above, and uploaded as following: 59 | 60 | ``` 61 | maturin upload target/wheels/MRzeroCore-{ version }-cp37-abi3-win_amd64.whl target/wheels/MRzeroCore-{ version }-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl -u -p 62 | ``` 63 | 64 | The [documentation](https://mrzero-core.readthedocs.io/en/latest/intro.html) is built using [readthedocs](https://readthedocs.org/), which works the same as described above. 65 | -------------------------------------------------------------------------------- /documentation/_config.yml: -------------------------------------------------------------------------------- 1 | # Book settings 2 | # Learn more at https://jupyterbook.org/customize/config.html 3 | 4 | title: MRzero Documentation 5 | author: Jonathan Endres 6 | logo: logo.png 7 | 8 | # Force re-execution of notebooks on each build. 9 | # See https://jupyterbook.org/content/execute.html 10 | execute: 11 | execute_notebooks: cache 12 | exclude_patterns: 13 | - 'legacy_seqs/*' 14 | 15 | # Define the name of the latex output file for PDF builds 16 | latex: 17 | latex_documents: 18 | targetname: book.tex 19 | 20 | # Information about where the book exists on the web 21 | repository: 22 | url: https://github.com/MRsources/MRzero-Core # Online location of your book 23 | path_to_book: documentation # Optional path to your book, relative to the repository root 24 | branch: main # Which branch of the repository should be used when creating links (optional) 25 | 26 | # Add GitHub buttons to your book 27 | # See https://jupyterbook.org/customize/config.html#add-a-link-to-your-repository 28 | html: 29 | use_issues_button: true 30 | use_repository_button: true 31 | 32 | # Launch notebooks in Google Colab 33 | # See https://jupyterbook.org/en/stable/interactive/launchbuttons.html 34 | launch_buttons: 35 | colab_url: "https://colab.research.google.com" 36 | 37 | sphinx: 38 | extra_extensions: 39 | - 'sphinx.ext.autodoc' 40 | - 'sphinx.ext.napoleon' 41 | - 'sphinx.ext.viewcode' 42 | - 'enum_tools.autoenum' 43 | config: 44 | add_module_names: False 45 | -------------------------------------------------------------------------------- /documentation/_toc.yml: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | # Learn more at https://jupyterbook.org/customize/toc.html 3 | 4 | format: jb-book 5 | root: intro 6 | chapters: 7 | - file: phantom_generation 8 | - file: api 9 | sections: 10 | - file: api/sequence 11 | - file: api/reco 12 | - file: api/util 13 | - file: api/phantom 14 | - file: api/simulation 15 | sections: 16 | - file: api/simulation/pdg_sim 17 | - file: api/simulation/isochromat_sim 18 | - file: playground_mr0/overview 19 | sections: 20 | - file: playground_mr0/mr0_FID_seq.ipynb 21 | - file: playground_mr0/mr0_SE_CPMG_seq.ipynb 22 | - file: playground_mr0/mr0_STE_3pulses_5echoes_seq.ipynb 23 | - file: playground_mr0/mr0_FLASH_2D_seq.ipynb 24 | - file: playground_mr0/mr0_EPI_2D_seq.ipynb 25 | - file: playground_mr0/mr0_RARE_2D_seq.ipynb 26 | - file: playground_mr0/mr0_bSSFP_2D_seq.ipynb 27 | - file: playground_mr0/mr0_diffusion_prep_STEAM_2D_seq.ipynb 28 | - file: playground_mr0/mr0_GRE_to_FLASH.ipynb 29 | - file: playground_mr0/mr0_DREAM_STE_seq.ipynb 30 | - file: playground_mr0/mr0_DREAM_STID_seq.ipynb 31 | - file: playground_mr0/mr0_burst_TSE.ipynb 32 | - file: playground_mr0/mr0_CS_cartesian_seq.ipynb 33 | - file: playground_mr0/mr0_CS_radial_seq.ipynb 34 | - file: playground_mr0/pulseq_rf_shim.ipynb 35 | 36 | - file: playground_mr0/mr0_pypulseq_exmpls_seq.ipynb 37 | - file: playground_mr0/mr0_upload_seq.ipynb 38 | 39 | - file: playground_mr0/mr0_opt_FLASH_2D_IR_Fit_T1.ipynb 40 | - file: playground_mr0/mr0_opt_FLASH_2D_IR_voxelNN_T1.ipynb 41 | 42 | - file: playground_mr0/mr00_FLASH_2D_ernstAngle_opt.ipynb 43 | 44 | - file: playground_mr0/flash 45 | - file: playground_mr0/flash_DWI 46 | - file: playground_mr0/pulseq_flash 47 | - file: playground_mr0/pulseq_sim_pTx 48 | -------------------------------------------------------------------------------- /documentation/api.md: -------------------------------------------------------------------------------- 1 | # API Reference 2 | 3 | All functionality provided by MRzeroCore is re-exported at the top level. It is recommended to import MRzeroCore as follows: 4 | 5 | ```python 6 | import MRzeroCore as mr0 7 | 8 | # Example: build a sequence 9 | seq = mr0.Sequence() 10 | rep = seq.new_rep(65) 11 | rep.pulse.usage = mr0.PulseUsage.EXCIT 12 | ... 13 | ``` 14 | 15 | To run simulations on the GPU, the approach is similar to when using pyTorch: 16 | 17 | ```python 18 | graph = mr0.compute_graph(seq, obj) 19 | # Calculate signal on the GPU, move returned tensor back to the CPU: 20 | signal = mr0.execute_graph(seq.cuda(), obj.cuda()).cpu() 21 | ``` 22 | 23 | The following pages list all functionality provided by `MRzeroCore` 24 | 25 | ::::{grid} 26 | :gutter: 2 27 | 28 | :::{grid-item-card} [Sequence](sequence) 29 | Create MRI sequences 30 | ::: 31 | 32 | :::{grid-item-card} [Reconstruction](reco) 33 | Reconstruct images 34 | ::: 35 | 36 | :::{grid-item-card} [Util](util) 37 | Utilitary functions 38 | ::: 39 | 40 | :::{grid-item-card} [Phantom](phantom) 41 | Various virtual subjects 42 | ::: 43 | 44 | :::{grid-item-card} [Simulation](simulation) 45 | Calculate ADC signals 46 | ::: 47 | :::: 48 | 49 | -------------------------------------------------------------------------------- /documentation/api/phantom.md: -------------------------------------------------------------------------------- 1 | (phantom)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Phantom 7 | 8 | Phantoms based on BrainWeb data can be downloaded and generated easily, as explained [here](generating_phantoms). 9 | The data required by the simulation is stored in the {class}`SimData` class. It holds all the necessary maps as sparse tensors of voxels, the voxel position is stored in one of those tensors. {class}`SimData` should not be created directly, but rather by using one of the phantoms. Currently, there are two phantom classes available: 10 | 11 | ::::{grid} 12 | :gutter: 3 13 | 14 | :::{grid-item-card} [VoxelGridPhantom](voxel_grid_phantom) 15 | A phantom described by a uniform cartesian grid of voxels. 16 | ::: 17 | 18 | :::{grid-item-card} [CustomVoxelPhantom](custom_voxel_phantom) 19 | A user specified list of voxel with custom, but uniform size and shape. 20 | ::: 21 | 22 | :::{grid-item-card} [SimData](sim_data) 23 | Simulation data used by the simulation. 24 | ::: 25 | :::: 26 | 27 | (load_brainweb)= 28 | Example for loading a BrainWeb phantom, scaling it and using it as {class}`SimData` on the GPU: 29 | 30 | ```python 31 | phantom = mr0.VoxelGridPhantom.brainweb("subject05.npz") 32 | phantom = phantom.interpolate(128, 128, 32).slices([16]) 33 | data = phantom.build().cuda() 34 | ``` 35 | 36 | When building {class}`SimData`, a voxel shape can be selected. Following options are available: 37 | 38 | | Shape | Description | 39 | | -------------- |-------------| 40 | | `"exact_sinc"` | An sinc shaped voxel with a rect function as k-space drop off. | 41 | | `"sinc"` | The hard edge of the `"exact_sinc"` in k-space can be problematic for optimization. The default `"sinc"` shape has uses a sigmoid to smoothly drop to zero between the Nyquist frequency and the next k-space sample | 42 | | `"box"` | A box shaped voxel, with a sinc shaped k-space responce. When used for simulation, this responce will blur the image and at the same time introduce higher frequencies. 43 | | `"gauss"` | Normal distribution shaped voxels. Voxel size describes the variance. Only available for {class}`CustomVoxelPhantom`. | 44 | 45 | 46 | (voxel_grid_phantom)= 47 | ## Voxel Grid Phantom 48 | 49 | When converting to {class}`SimData`, voxels are sinc-shaped by default in order to correctly describe a bandwidth-limited signal: This phantom will only emit a signal up to the Nyquist frequency. Above of the Nyquist frequency, no signal will be emitted as these frequencies are not contained in the original input data as well. 50 | 51 | 52 | ```{eval-rst} 53 | .. autoclass:: VoxelGridPhantom 54 | :members: 55 | ``` 56 | 57 | (custom_voxel_phantom)= 58 | ## Custom Voxel Phantom 59 | 60 | Analytical phantom for experimentation. There is no resolution or grid, voxels can be placed anywhere and have any size or shape. Useful for testing reconstruction, sub-voxel positioning etc. 61 | 62 | The `size` of this phantom is computed from the extends of the voxel positions. 63 | 64 | :::{note} 65 | Because of how the voxel k-space responce is implemented currently, all voxels are limited to have the same shape and size. This limitation could be lifted in the future, if required. 66 | ::: 67 | 68 | ```{eval-rst} 69 | .. autoclass:: CustomVoxelPhantom 70 | :members: 71 | ``` 72 | 73 | (sim_data)= 74 | ## Simulation Data 75 | 76 | Simulation data by default is stored on the **CPU**. If simulation should be done using the **GPU**, transfer the data by using something like: 77 | 78 | ```python 79 | data = data.cuda() 80 | ``` 81 | 82 | ```{eval-rst} 83 | .. autoclass:: SimData 84 | :members: 85 | ``` 86 | -------------------------------------------------------------------------------- /documentation/api/reco.md: -------------------------------------------------------------------------------- 1 | (reco)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Reconstruction 7 | 8 | 9 | ## NUFFT Reconstruction 10 | 11 | Nufft reconstruction is not integrated into `mr0` directly, but can be realized with `torchkbnufft` and code similar to the following: 12 | 13 | ```python 14 | import torchkbnufft as tkbn 15 | 16 | # Construct a sequence "seq" and simulate it, resulting in "signal" 17 | 18 | kdata = signal.view(1, 1, -1) 19 | ktraj = seq.get_kspace()[:, :2].T / (2 * np.pi) 20 | im_size = (64, 64) 21 | 22 | adjnufft_ob = tkbn.KbNufftAdjoint(im_size) 23 | dcomp = tkbn.calc_density_compensation_function(ktraj, im_size=im_size) 24 | reco = adjnufft_ob(kdata * dcomp, ktraj).view(im_size) 25 | ``` 26 | 27 | 28 | ## Adjoint Reconstruction 29 | 30 | Adjoint reconstruction builds a very simple backwards version of the encoding / measurement operation. Essentially, it resembles a DFT and is thus slower and consumes more memory than an FFT, but can handle any readout trajectory. 31 | 32 | ```{eval-rst} 33 | .. autofunction:: reco_adjoint 34 | ``` 35 | -------------------------------------------------------------------------------- /documentation/api/sequence.md: -------------------------------------------------------------------------------- 1 | (sequence)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Sequence 7 | 8 | A MRI {class}`Sequence` is a Python list of {class}`Repetition` s, each of which is starting with an instantaneous {class}`Pulse` . 9 | The {class}`PulseUsage` of these pulses is only used for automatically building the k-space trajectory for easier reconstruction. 10 | Typical use is similar to the following: 11 | 12 | ```python 13 | import MRzeroCore as mr0 14 | from numpy import pi 15 | 16 | seq = mr0.Sequence() 17 | 18 | # Iterate over the repetitions 19 | for i in range(64): 20 | rep = seq.new_rep(2 + 64 + 2) 21 | 22 | # Set the pulse 23 | rep.pulse.usage = mr0.PulseUsage.EXCIT 24 | rep.pulse.angle = 5 * pi/180 25 | rep.pulse.phase = i**2 * 117*pi/180 % (2*pi) 26 | 27 | # Set encoding 28 | rep.gradm[:, :] = ... 29 | 30 | # Set timing 31 | rep.event_time[:] = ... 32 | 33 | # Set ADC 34 | rep.adc_usage[2:-2] = 1 35 | rep.adc_phase[2:-2] = pi - rep.pulse.phase 36 | ``` 37 | 38 | ## Pulseq 39 | 40 | Sequences can also be imported from [Pulseq](https://pulseq.github.io/) `.seq` files using the {meth}`Sequence.from_seq_file` method. 41 | 42 | :::{note} 43 | The importer tries to minimize the amount of events for imported sequences. This can be undesirable for diffusion-weighted sequences that rely on spoiler gradients, which might be removed in this process. For that reason, this behaviour might be removed in the future. Furthermore, MRzero currently uses instantaneous pulses and will ignore slice selection, off-resonance etc. 44 | ::: 45 | 46 | Sometimes it might be desirable to measure multiple contrasts in a single sequence. This can be realized by combining sequences with {func}`chain`, followed by masking the simulated signal using {meth}`Sequence.get_contrasts()`. Alternatively, {attr}`Repetition.adc_usage` allows to manually assign ADC samples to different contrasts. 47 | 48 | To export MRzero sequences as Pulseq .seq files, {func}`pulseq_write_cartesian` can be used. This exporter does only support sequences with k-space trajectories that are on a cartesian grid. Exporters that are more flexible will be added in the future, as well as better documentation of these exporters. 49 | 50 | ```{eval-rst} 51 | .. autofunction:: pulseq_write_cartesian 52 | ``` 53 | 54 | 55 | ## Sequence 56 | 57 | ```{eval-rst} 58 | .. autoclass:: Sequence 59 | :members: 60 | 61 | .. autofunction:: chain 62 | ``` 63 | 64 | 65 | ## Repetition 66 | 67 | In MRzero, a repetition describes a section of the sequence starting with an RF pulse and ending just before the next. This is intuitive for sequences that consists of many similar sections (usually identical apart from phase encoding), but is used more loosely here as a general term even for sequences, where those "repetitions" are completely different. 68 | 69 | ```{eval-rst} 70 | .. autoclass:: Repetition 71 | :members: 72 | ``` 73 | 74 | 75 | ## Pulse 76 | 77 | ```{eval-rst} 78 | .. autoclass:: Pulse 79 | :members: 80 | ``` 81 | 82 | 83 | ## PulseUsage 84 | 85 | ```{eval-rst} 86 | .. autoclass:: PulseUsage 87 | :members: 88 | ``` 89 | -------------------------------------------------------------------------------- /documentation/api/simulation.md: -------------------------------------------------------------------------------- 1 | (simulation)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Simulation 7 | 8 | The main simulation of MRzero is based on Phase Distribution Graphs ([PDG simulation](pdg_sim)). It is split in two parts: The [pre-pass](pre_pass) and [main-pass](main_pass). The pre-pass is an approximate calculation of the signal to determine which parts of the full signal equations (which states in PDG terms) are important to the signal. This info is stored in a graph, that is then "executed" by the main-pass at full resolution. 9 | 10 | There is also an [isochromat simulation](isochromat_sim_doc) available. It is meant as an intentionally simple ground truth and not written for speed (it generally performs well but has a large overhead, noticable at low resolutions). 11 | 12 | ::::{grid} 13 | :gutter: 3 14 | 15 | :::{grid-item-card} [PDG simulation](pdg_sim) 16 | Two-pass simulation based on Phase Distribution Graphs. Fast, analytical (noise-free), precise and differentiable signal calculation. 17 | ::: 18 | 19 | :::{grid-item-card} [Isochromat simulation](isochromat_sim_doc) 20 | Very basic isochromat-based Bloch simulation. Meant as ground-truth and intentionally simple implementation. At least 10x slower than PDG. 21 | ::: 22 | :::: 23 | -------------------------------------------------------------------------------- /documentation/api/simulation/isochromat_sim.md: -------------------------------------------------------------------------------- 1 | (isochromat_sim_doc)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Isochromat Bloch Simulation 7 | 8 | This simulation is meant as ground-truth and purposely written as simple as possible. Spins (used interchangably with isochromats) are distributed in a voxel via the [R2 Sequence](http://extremelearning.com.au/unreasonable-effectiveness-of-quasirandom-sequences/). 9 | 10 | :::{note} 11 | The isochromat simulation still uses cubic voxels, ignoring the voxel shape specified in {class}`SimData`. **Cubic voxels are nearly always the wrong choice**. The `"box"` voxel shape is only supported by PDG so it can be compared to the isochromat simulation. 12 | 13 | Additionally, the isochromat simulation does not support diffusion. 14 | ::: 15 | 16 | ```{eval-rst} 17 | .. autofunction:: isochromat_sim 18 | ``` 19 | 20 | ## Helper functions 21 | 22 | The simulation is split into multiple functions, each of them only executing a single, easily understandable step of the overall simulation. These are not exposed by `MRzeroCore` as they are not meant to be used directly. 23 | 24 | ```{eval-rst} 25 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.measure 26 | 27 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.relax 28 | 29 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.dephase 30 | 31 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.flip 32 | 33 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.grad_precess 34 | 35 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.B0_precess 36 | 37 | .. autofunction:: MRzeroCore.simulation.isochromat_sim.intravoxel_precess 38 | ``` 39 | -------------------------------------------------------------------------------- /documentation/api/simulation/pdg_sim.md: -------------------------------------------------------------------------------- 1 | (pdg_sim)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # PDG Simulation 7 | 8 | Phase Distribution Graphs split the magnetization into multiple states, forming a Graph over the duration of the sequence. More details can be found in the PDG Paper, once published. 9 | 10 | The simulation runs in two passes: 11 | - The pre-pass simulates a large number of states, but only roughly estimates the signal. This information is used to assess which states are important for the signal, and which can be skipped because they don't contribute anything. 12 | - The main-pass uses the exact signal equation and takes all voxels into account, but simulates as few states as possible. It does so by using the information generated by the pre-pass. 13 | 14 | The precision / speed tradeoff for both passes can be chosen as follows: 15 | 16 | ```python 17 | # Default parameters: max 200 states per repetition 18 | # with at leat 0.0001 of maximum possible magnetization 19 | graph = mr0.compute_graph(seq, data, 200, 1e-4) 20 | # Minimum emitted and latent signal of 0.01 (compared per repetition to strongest state) 21 | signal = mr0.execute_graph(graph, seq, data, 0.01, 0.01) 22 | ``` 23 | 24 | The pre-pass limits the state count by both the maximum number (higher = more states / slower + more precise simulaiton) of states and the minimum magnetization (lower = more states). In practice, it turns out that the second parameter is less surprising in how it influences the quality of the simulation, which means that the first parameter should be set to a high number and the second parameter should be increased if the pre-pass is too slow (usually the main-pass is more likely to be the bottleneck). 25 | 26 | The main-pass has two parameters which are used as threshold to determine which states are used in the signal equation and which states are simulated. Usually they are both set to the same number. Lower numbers mean that more states are above the thresold and will be simulated. It is not useful to set `min_emitted_signal` to a value lower than `min_latent_signal`, as this is trying to include to states into the signal equation that are not simulated. 27 | 28 | Examples: 29 | ```python 30 | # Very precise simulation 31 | graph = mr0.compute_graph(seq, data, 10000, 1e-7) 32 | signal = mr0.execute_graph(graph, seq, data, 1e-7, 1e-7) 33 | 34 | # Average precision 35 | graph = mr0.compute_graph(seq, data, 10000, 1e-4) 36 | signal = mr0.execute_graph(graph, seq, data, 1e-3, 1e-4) 37 | 38 | # FID only 39 | graph = mr0.compute_graph(seq, data, 10000, 1e-3) 40 | signal = mr0.execute_graph(graph, seq, data, 1, 0.1) 41 | ``` 42 | 43 | Optimal settings can always depend on the sequence itself. Some sequences are more demanding and require a very precise simulation, some are already decently described by only a few states. 44 | 45 | (pre_pass)= 46 | ## Pre-Pass 47 | 48 | Two functions are provided: {func}`compute_graph` and {func}`compute_graph_ext`. They are both wrappers around the actual prepass, which is written in rust. {func}`compute_graph` computes average values for $T_1$, $T_2$, $T_2'$ and $D$ and then calls {func}`compute_graph_ext`. 49 | 50 | ```{eval-rst} 51 | .. autofunction:: compute_graph 52 | 53 | .. autofunction:: compute_graph_ext 54 | ``` 55 | 56 | ## Phase Distribution Graph 57 | 58 | ```{eval-rst} 59 | .. autoclass:: MRzeroCore.simulation.pre_pass.Graph 60 | :members: 61 | ``` 62 | 63 | (main_pass)= 64 | ## Main-Pass 65 | 66 | Takes the {class}`Sequence`, the {class}`SimData` and the {class} `Graph` produced by both in the [pre-pass](pre_pass) in order to calculate the measured ADC signal. Because of the work done by the [pre-pass](pre_pass), only the minimal work needed in order to achieve the desired precision is executed. This precision can be tuned by the {attr}`min_emitted_signal` and {attr}`min_latent_signal` thresholds. Higher values lead to less states being simulated, which improves speed and reduces accuracy. A value of 1 will mean that 2 states will be simulated (z0 and one +), resulting in the FID signal. A value of 0 means that everything will be simulated that somehow contributes to the signal. 67 | 68 | ```{eval-rst} 69 | .. autofunction:: execute_graph 70 | ``` 71 | -------------------------------------------------------------------------------- /documentation/api/util.md: -------------------------------------------------------------------------------- 1 | (util)= 2 | 3 | # Utilitary functions 4 | 5 | ```{eval-rst} 6 | .. autofunction:: MRzeroCore.util.get_signal_from_real_system 7 | ``` 8 | 9 | ```{eval-rst} 10 | .. autofunction:: MRzeroCore.util.insert_signal_plot 11 | ``` 12 | 13 | ```{eval-rst} 14 | .. autofunction:: MRzeroCore.util.pulseq_plot 15 | ``` 16 | -------------------------------------------------------------------------------- /documentation/intro.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | 3 | 4 | ## MRzero Overview 5 | 6 | [MRzero](https://onlinelibrary.wiley.com/doi/abs/10.1002/mrm.28727) is a framework that replicates the whole MRI pipeline consisting of sequence and phantom definition, signal simulation, and image reconstruction. It uses a state-of-the-art PDG Bloch simulation, capable of calculating an accurate ADC signal comparable to that returned by a in vivo measurement of the signal in less time and while exhibiting no noise compared to isochromat based Monte-Carlo simulations. 7 | 8 | The MRzero Framework is built using [PyTorch](https://pytorch.org/), enabling it to run on CUDA capable GPUs and providing automatic differentiation via backpropagation of the whole pipeline. This means that sequence parameters or phantom values can be optimized based on loss functions that consider the reconstructed image of the simulated signal. 9 | ## Playground MR0 10 | 11 | MRzero Core can be used in Jupyter Notebooks and can be used in online services like Google Colab. 12 | A constantly increasing selection of example script can be found in the [Playground MR0](playground_mr0) 13 | 14 | ## Getting Started 15 | 16 | To see a simple sequence in action, have a look at the [FLASH](flash) example! 17 | 18 | All examples are provided as [Jupyter Notebooks](https://jupyter.org/) and can be explored on sites like [Binder](https://mybinder.org/) or [Google Colab](https://colab.research.google.com/). Options are listed in the header of the according documentation pages. 19 | 20 | To run the scripts locally on your computer, you need to install 21 | 22 | - PyTorch: https://pytorch.org/get-started/locally/ 23 | - MRzeroCore: ```pip install MRzeroCore``` 24 | 25 | MRzeroCore also contains a pulseq .seq file parser and sequence exporter. It is fully self contained, so [pypulseq](https://github.com/imr-framework/pypulseq) or alternatives are only needed if you want to program sequences in them directly. 26 | 27 | ```{note} 28 | This documentation builds on Jupyter Notebooks to represent text, code and outputs in an easy and reproducible way. 29 | For the best user experience, it is recommended to install MRzeroCore locally and to use Python scripts for development. Editors like [PyCharm](https://www.jetbrains.com/de-de/pycharm/), [Spyder](https://www.spyder-ide.org/) or [VSCode](https://code.visualstudio.com/) provide autocompletion, an interactive console and direct access to the extensive documentation of MRzero. 30 | ``` 31 | 32 | ## Literature 33 | 34 | ### Papers 35 | 36 | _Endres J, Weinmüller S, Dang HN, Zaiss M._ 37 | **Phase distribution graphs for fast, differentiable, and spatially encoded Bloch simulations of arbitrary MRI sequences.** 38 | Magn Reson Med. 2024; 92(3): 1189-1204. https://doi.org/10.1002/mrm.30055 39 | 40 | _Weinmüller S, Endres J, Dang HN, Stollberger R, Zaiss M._ 41 | **MR-zero meets FLASH – Controlling the transient signal decay in gradient- and rf-spoiled gradient echo sequences.** 42 | Magn Reson Med. 2024; https://doi.org/10.1002/mrm.30318 43 | 44 | _Loktyushin A, Herz K, Dang HN._ 45 | **MRzero - Automated discovery of MRI sequences using supervised learning.** 46 | Magn Reson Med. 2021; 86: 709–724. https://doi.org/10.1002/mrm.28727 47 | 48 | _Dang, H, Endres, J, Weinmüller S et al._ 49 | **MR-zero meets RARE MRI: Joint optimization of refocusing flip angles and neural networks to minimize T2-induced blurring in spin echo sequences.** 50 | Magn Reson Med. 2023; 90(4): 1345-1362. https://doi.org/10.1002/mrm.29710 51 | 52 | 53 | ### Conference abstracts 54 | 55 | _Glang F, Loktyushin A, Herz K, Dang HN, Deshmane A, Weinmüller S, Zaiss M et al._ 56 | **Advances in MRzero: supervised learning of parallel imaging sequences including joint non-Cartesian trajectory and flip angle optimization.** 57 | Proc. Intl. Soc. Mag. Reson. Med. 29, 29:4200, 2021. https://archive.ismrm.org/2021/4200.html. 58 | 59 | _Weinmüller S, Baum T, Dang HN, Endres J, Zaiss M._ 60 | **DREAM-zero – Optimized variable flip angles for decreased image blurring in magnetization-prepared DREAM sequences.** 61 | 2023, Magnetic Resonance Materials in Physics, Biology and Medicine, Book of Abstracts ESMRMB 2023 Online 39th Annual Scientific Meeting 4–7 October 2023 36 (1): 240–41. https://doi.org/10.1007/s10334-023-01108-9 62 | 63 | _Duarte M, Endres J, Rajput J, Weinmüller S, Zaiss M._ 64 | **Joint reconstruction of EPI and FLASH using Phase Distribution Graphs as a general MRI reconstruction model.** 65 | ESMRMB 2024 Online 40th Annual Scientific Meeting 2–5 October 20241. 66 | 67 | _Weinmüller S, Endres J, Dang N, Glang F, Zaiss M._ 68 | **Quantalizer: a quantitative digital twin to create and run tailored sequences.** 69 | In: Submitted to Magnetic Resonance Materials in Physics, Biology and Medicine, ESMRMB 2024 40th Annual Scientific Meeting 2–5 October 2024. Vol Magn Reson Mater Phy 37 (Suppl 1). ESMRMB 2024 Online 40th Annual Scientific Meeting 2–5 October 2024. doi:https://doi.org/10.1007/s10334-024-01191-6 70 | 71 | _Freudensprung M, Weinmüller S, Endres J, Nagel AM, Zaiss M._ 72 | **A simple pTx Pulseq extension for pulse-specific B1 shimming.** 73 | In Proc ESMRMB 2023. Basel; 2023, #LT53. https://doi.org/10.1007/s10334-023-01108-9 74 | 75 | _Freudensprung M, Weinmüller S, Endres J, Nagel AM, Zaiss M._ 76 | **kT-points optimization using Pulseq and MR-zero at 7T.** 77 | In Proc ESMRMB 2024. Barcelona; 2024, #442. https://doi.org/10.1007/s10334-024-01191-6 78 | 79 | _Malich J, Weinmüller S, Endres J, Dawood P, Zaiss M._ 80 | **MR-zero-tailored dummy refocusing pulses in TSE sequences reduce artifacts.** 81 | In: Submitted to Magnetic Resonance Materials in Physics, Biology and Medicine, ESMRMB 2024 40th Annual Scientific Meeting 2–5 October 2024. Vol Magn Reson Mater Phy 37 (Suppl 1). ESMRMB 2024 Online 40th Annual Scientific Meeting 2–5 October 2024; :189-190. 82 | 83 | _Weinmüller S, Dang HN, Endres J, Glang F, Loktyushin A, Zaiss M._ 84 | **A blurring-free 3D snapshot readout for fast CEST- or relaxation-prepared MRI.** 85 | In: 24. Jahrestagung Der Deutschen Sektion Der ISMRM. ; 2022:6-7. 86 | 87 | _Weinmüller S, Dang HN, Loktyushin A, Glang F, Doerfler A, Maier A, Schölkopf B, Scheffler K, Zaiss M._ 88 | **MRzero sequence generation using analytic signal equations as forward model and neural network reconstruction for efficient auto-encoding.** 89 | In: Proc. Intl. Soc. Mag. Reson. Med. 29. Vol 29. ; 2021:1761. 90 | 91 | _Baum T, Weinmüller S, Endres J, Liebig P, Zaiss M._ 92 | **A magnetization-prepared DREAM sequence for CEST imaging with an intrinsic dynamic B0 and B1 reference (CEST-MP-DREAM).** 93 | Magnetic Resonance Materials in Physics, Biology and Medicine, Book of Abstracts ESMRMB 2023 Online 39th Annual Scientific Meeting 4–7 October 2023. 2023;36(1):88-89. doi:10.1007/s10334-023-01108-9. 94 | 95 | _Baum T, Weinmüller S, Nagel AM, Vossiek M, Zaiss M._ 96 | **Dynamic DREAM MRI: B0, B1 and Tx/Rx-phase mapping for assisting motion tracking systems.** 97 | Magnetic Resonance Materials in Physics, Biology and Medicine, Book of Abstracts ESMRMB 2023 Online 39th Annual Scientific Meeting 4–7 October 2023. 2023;36(1):66-68. doi:10.1007/s10334-023-01108-9 98 | 99 | _Glang F, Loktyushin A, Herz K. Dang HN, Deshmane A, Weinmüller S, Doerfler A, Maier A, Schölkopf B, Scheffler K, Zaiss M._ 100 | **Advances in MRzero – Supervised Learning of Parallel Imaging Sequences Including Joint Non-Cartesian Trajectory and Flip Angle Optimization.** 101 | Poster at the ISMRM & ISMRT Annual Meeting & Exhibition 2021; Virtual (2021); #4200. 102 | 103 | _West D, Glang F, Endres J, Leitão D, McElroy S, Zaiss M, Hajnal J, Malik S._ 104 | **Non-idealized system (NIS) optimization of EPI sequences at ultra-high field.** 105 | ISMRM & ISMRT Annual Meeting & Exhibition 2024, Singapore (2024); #0528. 106 | 107 | _West D, Glang F, Endres J, Zaiss M, Hajnal J, Malik S._ 108 | **Overcoming System Imperfections Using End-to-End MR Sequence Design.** 109 | ISMRM & ISMRT Annual Meeting & Exhibition 2023, Toronto (ON, Canada) (2023); #0061. 110 | -------------------------------------------------------------------------------- /documentation/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/logo.png -------------------------------------------------------------------------------- /documentation/phantom_generation.md: -------------------------------------------------------------------------------- 1 | (generating_phantoms)= 2 | ```{eval-rst} 3 | .. currentmodule:: MRzeroCore 4 | ``` 5 | 6 | # Generating Phantoms 7 | 8 | Phantoms are built from [BrainWeb](https://brainweb.bic.mni.mcgill.ca/) data. This data is not included directly. 9 | Instead, a BrainWeb downloader is part of `mr0`, that can be run once to download all segmentation data provided by BrainWeb, which is then filled to produce files that can be loaded as mentioned [here](load_brainweb). 10 | 11 | Phantoms can be generated with different configurations, depending on the use-case, like 3T or 7T data, high-res, or whether to include fat are options. A fixed set of configuartions facilitates reproducibility. To execute generation, just run the following code: 12 | 13 | ```python 14 | import MRzeroCore as mr0 15 | 16 | mr0.generate_brainweb_phantoms("output/brainweb", "3T") 17 | ``` 18 | 19 | ```{eval-rst} 20 | .. autofunction:: MRzeroCore.phantom.brainweb.generate_brainweb_phantoms 21 | ``` 22 | -------------------------------------------------------------------------------- /documentation/playground/quantified_brain.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground/quantified_brain.npz -------------------------------------------------------------------------------- /documentation/playground/templates/binary_masks.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": {}, 7 | "outputs": [], 8 | "source": [ 9 | "import MRzeroCore as mr0\n", 10 | "import matplotlib.pyplot as plt" 11 | ] 12 | }, 13 | { 14 | "cell_type": "markdown", 15 | "metadata": {}, 16 | "source": [ 17 | "# Generate phantoms with tissue masks" 18 | ] 19 | }, 20 | { 21 | "cell_type": "code", 22 | "execution_count": null, 23 | "metadata": {}, 24 | "outputs": [], 25 | "source": [ 26 | "\n", 27 | "mr0.generate_brainweb_phantoms(\"bw_phantoms\" , \"3T\")" 28 | ] 29 | }, 30 | { 31 | "cell_type": "code", 32 | "execution_count": null, 33 | "metadata": {}, 34 | "outputs": [], 35 | "source": [ 36 | "\n", 37 | "phantom = mr0.VoxelGridPhantom.brainweb(\"bw_phantoms/subject04_3T.npz\")\n", 38 | "\n", 39 | "obj = phantom.interpolate(256, 256, 32).slices([16])\n", 40 | "obj.plot(plot_masks=True)\n", 41 | "phantom = obj.build()" 42 | ] 43 | }, 44 | { 45 | "cell_type": "code", 46 | "execution_count": null, 47 | "metadata": {}, 48 | "outputs": [], 49 | "source": [ 50 | "\n", 51 | "# get tissue masks from phantom\n", 52 | "masks = phantom.tissue_masks\n", 53 | "\n", 54 | "#plot tissue masks\n", 55 | "plt.figure()\n", 56 | "for key, value in masks.items():\n", 57 | " plt.imshow(value[:, :, 0])\n", 58 | " plt.colorbar()\n", 59 | " plt.title(key)\n", 60 | " plt.show()" 61 | ] 62 | } 63 | ], 64 | "metadata": { 65 | "kernelspec": { 66 | "display_name": "mrzero_source", 67 | "language": "python", 68 | "name": "python3" 69 | }, 70 | "language_info": { 71 | "codemirror_mode": { 72 | "name": "ipython", 73 | "version": 3 74 | }, 75 | "file_extension": ".py", 76 | "mimetype": "text/x-python", 77 | "name": "python", 78 | "nbconvert_exporter": "python", 79 | "pygments_lexer": "ipython3", 80 | "version": "3.10.14" 81 | } 82 | }, 83 | "nbformat": 4, 84 | "nbformat_minor": 2 85 | } 86 | -------------------------------------------------------------------------------- /documentation/playground/templates/generate.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from copy import deepcopy 4 | 5 | 6 | def create_nb(source, template, output_path: str): 7 | snippets = {} 8 | snip = [] 9 | for line in source: 10 | if line.startswith("# !!!"): 11 | name = line[5:].strip() 12 | if name in snippets: 13 | print(f"[!] Snippet {name} defined more than once") 14 | snip = [] 15 | snippets[name] = snip 16 | else: 17 | snip.append(line) 18 | for snippet in snippets.values(): 19 | while len(snippet) > 0 and len(snippet[-1].strip()) == 0: 20 | snippet.pop() 21 | 22 | used_snippets = set() 23 | output = deepcopy(template) 24 | 25 | for cell in output["cells"]: 26 | if cell["cell_type"] == "code": 27 | template_source = cell["source"] 28 | cell["source"] = [] 29 | 30 | for line in template_source: 31 | if line.startswith("# !!!"): 32 | snippet_name = line[5:].strip() 33 | if snippet_name not in snippets: 34 | print(f"[!] Snippet {snippet_name} requested but not provided") 35 | else: 36 | if snippet_name in used_snippets: 37 | print(f"[!] Snippet {snippet_name} used more than once") 38 | else: 39 | print(f"Used snippet {snippet_name}") 40 | 41 | cell["source"] += snippets[snippet_name] 42 | used_snippets.add(snippet_name) 43 | else: 44 | cell["source"].append(line) 45 | 46 | for snippet_name in snippets: 47 | if snippet_name not in used_snippets: 48 | print(f"[!] Snippet {snippet_name} provided but not requested") 49 | 50 | with open(output_path, "w") as output_file: 51 | json.dump(output, output_file) 52 | 53 | 54 | cwd = os.path.dirname(os.path.realpath(__file__)) 55 | files = os.listdir(cwd) 56 | templates = [f for f in files if f.startswith("template")] 57 | instatiators = [f for f in files if f not in templates and f.endswith(".ipynb")] 58 | 59 | print(f"Generating notebooks based on {instatiators}") 60 | print(f"using the templates {templates}") 61 | 62 | template_src = {} 63 | for nb_name in templates: 64 | with open(os.path.join(cwd, nb_name)) as nb: 65 | template_src[nb_name] = json.load(nb) 66 | 67 | for nb_name in instatiators: 68 | with open(os.path.join(cwd, nb_name)) as nb: 69 | cells = json.load(nb)["cells"] 70 | for cell in cells: 71 | if cell["cell_type"] == "code": 72 | source = cell["source"] 73 | if source[0].startswith("# ***"): 74 | template, _, output = source[0][6:].partition("->") 75 | output = output.strip() 76 | outdir = os.path.join(os.path.dirname(cwd), "generated", nb_name.partition(".")[0]) 77 | os.makedirs(outdir, exist_ok=True) 78 | output = os.path.join(outdir, output) 79 | 80 | print(f"> Creating notebook: {nb_name} {template.strip()} -> {output.strip()}") 81 | template = template_src[template.strip()] 82 | create_nb(source, template, output) 83 | -------------------------------------------------------------------------------- /documentation/playground/templates/seqs.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 2, 6 | "metadata": {}, 7 | "outputs": [ 8 | { 9 | "name": "stderr", 10 | "output_type": "stream", 11 | "text": [ 12 | "c:\\Users\\endresjn\\Documents\\MRzero\\MRzero-Core\\.env\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", 13 | " from .autonotebook import tqdm as notebook_tqdm\n" 14 | ] 15 | } 16 | ], 17 | "source": [ 18 | "import numpy as np\n", 19 | "np.int = int\n", 20 | "np.float = float\n", 21 | "np.complex = complex\n", 22 | "\n", 23 | "import pypulseq as pp\n", 24 | "\n", 25 | "# dummy system\n", 26 | "system = pp.Opts(\n", 27 | " max_grad=28, grad_unit='mT/m',\n", 28 | " max_slew=150, slew_unit='T/m/s',\n", 29 | " rf_ringdown_time=20e-6, rf_dead_time=100e-6,\n", 30 | " adc_dead_time=20e-6,\n", 31 | " grad_raster_time=50e-6\n", 32 | ")" 33 | ] 34 | }, 35 | { 36 | "cell_type": "code", 37 | "execution_count": 4, 38 | "metadata": {}, 39 | "outputs": [], 40 | "source": [ 41 | "# *** template_A.ipynb -> FLASH.ipynb\n", 42 | "\n", 43 | "# !!! CONFIG\n", 44 | "# Field of view, imaging volume and resolution\n", 45 | "fov = 200e-3\n", 46 | "slice_thickness = 8e-3\n", 47 | "Nread = 64\n", 48 | "Nphase = 64\n", 49 | "\n", 50 | "# Sequence's name\n", 51 | "experiment_id = 'flash'\n", 52 | "\n", 53 | "# !!! SEQUENCE DEFINITION\n", 54 | "def flash_2D(fov=200e-3, slice_thickness=8e-3,\n", 55 | " n_read=64, n_phase=64,\n", 56 | " system=system,\n", 57 | " flip_angle=10, phase_cycling=84):\n", 58 | " \"\"\"Linear, cartesian 2D FLASH with TR = 26 ms + 50 us * n_phase\"\"\"\n", 59 | " rf, gz, gzr = pp.make_sinc_pulse(\n", 60 | " flip_angle=flip_angle * np.pi / 180, duration=1e-3,\n", 61 | " slice_thickness=slice_thickness, apodization=0.5, time_bw_product=4,\n", 62 | " return_gz=True, system=system\n", 63 | " )\n", 64 | "\n", 65 | " adc_dur = n_phase * 50e-6\n", 66 | " gx = pp.make_trapezoid(channel='x', flat_area=n_read / fov, flat_time=adc_dur, system=system)\n", 67 | " adc = pp.make_adc(num_samples=n_read, duration=adc_dur, delay=gx.rise_time, system=system)\n", 68 | " gx_pre = pp.make_trapezoid(channel='x', area=-0.5 * gx.area, duration=5e-3, system=system)\n", 69 | " gx_spoil = pp.make_trapezoid(channel='x', area=1.5 * gx.area, duration=2e-3, system=system)\n", 70 | "\n", 71 | " seq = pp.Sequence(system)\n", 72 | " for i in range(n_phase):\n", 73 | " phase = 0.5 * phase_cycling * (2 + i + i**2)\n", 74 | " rf.phase_offset = (phase % 360) * np.pi / 180\n", 75 | " adc.phase_offset = rf.phase_offset\n", 76 | "\n", 77 | " phenc = (i - n_phase // 2) / fov\n", 78 | "\n", 79 | " seq.add_block(rf, gz)\n", 80 | " seq.add_block(gzr, pp.make_delay(5e-3))\n", 81 | " gp = pp.make_trapezoid(channel='y', area=phenc, duration=5e-3, system=system)\n", 82 | " seq.add_block(gx_pre, gp)\n", 83 | " seq.add_block(adc, gx)\n", 84 | " gp = pp.make_trapezoid(channel='y', area=-phenc, duration=5e-3, system=system)\n", 85 | " seq.add_block(gx_spoil, gp)\n", 86 | " seq.add_block(pp.make_delay(10e-3))\n", 87 | "\n", 88 | " seq.set_definition('FOV', [fov, fov, slice_thickness])\n", 89 | " return seq\n", 90 | "\n", 91 | "# !!! SEQUENCE CREATION\n", 92 | "seq = flash_2D(fov, slice_thickness, Nread, Nphase, system)\n", 93 | "seq.set_definition('Name', experiment_id)\n", 94 | "seq.write(experiment_id + '.seq')\n" 95 | ] 96 | } 97 | ], 98 | "metadata": { 99 | "kernelspec": { 100 | "display_name": ".env", 101 | "language": "python", 102 | "name": "python3" 103 | }, 104 | "language_info": { 105 | "codemirror_mode": { 106 | "name": "ipython", 107 | "version": 3 108 | }, 109 | "file_extension": ".py", 110 | "mimetype": "text/x-python", 111 | "name": "python", 112 | "nbconvert_exporter": "python", 113 | "pygments_lexer": "ipython3", 114 | "version": "3.11.4" 115 | } 116 | }, 117 | "nbformat": 4, 118 | "nbformat_minor": 2 119 | } 120 | -------------------------------------------------------------------------------- /documentation/playground_mr0/AdjDataUser2gB0_transversal_0.08moving_average.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/AdjDataUser2gB0_transversal_0.08moving_average.mat -------------------------------------------------------------------------------- /documentation/playground_mr0/legacy_seqs/flash_DWI.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "tags": [ 8 | "hide-cell" 9 | ] 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "!pip install MRzeroCore &> /dev/null\n", 14 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/subject05.npz &> /dev/null" 15 | ] 16 | }, 17 | { 18 | "cell_type": "code", 19 | "execution_count": null, 20 | "metadata": {}, 21 | "outputs": [], 22 | "source": [ 23 | "import MRzeroCore as mr0\n", 24 | "import matplotlib.pyplot as plt\n", 25 | "import numpy as np\n", 26 | "from numpy import pi\n", 27 | "import torch" 28 | ] 29 | }, 30 | { 31 | "attachments": {}, 32 | "cell_type": "markdown", 33 | "metadata": {}, 34 | "source": [ 35 | "(flash_dwi)=\n", 36 | "# DWI preparation with FLASH readout\n", 37 | "\n", 38 | "Change the b-value in build_seq() to the desired value" 39 | ] 40 | }, 41 | { 42 | "cell_type": "code", 43 | "execution_count": null, 44 | "metadata": {}, 45 | "outputs": [], 46 | "source": [ 47 | "def build_seq(fov=0.2) -> mr0.Sequence:\n", 48 | " seq = mr0.Sequence()\n", 49 | "\n", 50 | " # Needed, otherwise prep produces stronger first readout\n", 51 | " dummies = 1\n", 52 | "\n", 53 | " # Add DWI with a 90-180-90 prep sequence\n", 54 | " b = 1000 # s / mm^2\n", 55 | " t_grad = 10e-3\n", 56 | " t_rf = 2e-3\n", 57 | " # We manually scale xy grads for FOV and leave z alone.\n", 58 | " # Convert mm to m -> 1e3\n", 59 | " k = 1e3 * np.sqrt(b / (2/3 * t_grad + t_rf))\n", 60 | " print(f\"b-value: {b} -> gradient moment: {k}\")\n", 61 | "\n", 62 | " rep = seq.new_rep(2)\n", 63 | " rep.pulse.angle = pi/2\n", 64 | " rep.pulse.usage = mr0.PulseUsage.EXCIT\n", 65 | " rep.event_time[0] = t_rf\n", 66 | " rep.event_time[1] = t_grad\n", 67 | " rep.gradm[1, 2] = k\n", 68 | "\n", 69 | " rep = seq.new_rep(2)\n", 70 | " rep.pulse.angle = pi\n", 71 | " rep.pulse.usage = mr0.PulseUsage.REFOC\n", 72 | " rep.event_time[0] = t_rf\n", 73 | " rep.event_time[1] = t_grad\n", 74 | " rep.gradm[1, 2] = k\n", 75 | " rep.gradm[1, 0] = -96\n", 76 | "\n", 77 | " rep = seq.new_rep(2)\n", 78 | " rep.pulse.angle = pi/2\n", 79 | " rep.pulse.usage = mr0.PulseUsage.STORE\n", 80 | " rep.event_time[0] = t_rf\n", 81 | " rep.event_time[1] = 10e-3 # Could increase this for more T1 weighting\n", 82 | "\n", 83 | " # FLASH readout\n", 84 | " for i in range(64 + dummies):\n", 85 | " rep = seq.new_rep(2 + 64 + 1)\n", 86 | " rep.pulse.usage = mr0.PulseUsage.EXCIT\n", 87 | " rep.pulse.angle = 7 * pi/180\n", 88 | " rep.pulse.phase = 0.5 * 117 * (i**2+i+2) * pi / 180\n", 89 | "\n", 90 | " rep.event_time[0] = 2e-3 # Pulse\n", 91 | " rep.event_time[1] = 2e-3 # Rewinder\n", 92 | " rep.event_time[2:-1] = 0.08e-3 # Readout\n", 93 | " rep.event_time[-1] = 2e-3 # Spoiler\n", 94 | "\n", 95 | " rep.gradm[1, 0] = 96 - 33\n", 96 | " rep.gradm[2:-1, 0] = 1\n", 97 | " rep.gradm[-1, 0] = 96 - 31\n", 98 | "\n", 99 | " if i >= dummies:\n", 100 | " # Linear reordered phase encoding\n", 101 | " rep.gradm[1, 1] = i - 32\n", 102 | " rep.gradm[-1, 1] = -rep.gradm[1, 1]\n", 103 | "\n", 104 | " rep.adc_usage[2:-1] = 1\n", 105 | " rep.adc_phase[2:-1] = pi - rep.pulse.phase\n", 106 | "\n", 107 | " seq.normalized_grads = False\n", 108 | " for rep in seq:\n", 109 | " # Don't scale z-gradients used for diffusion\n", 110 | " rep.gradm[:, :2] /= fov\n", 111 | "\n", 112 | " return seq" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": null, 118 | "metadata": { 119 | "tags": [ 120 | "hide-output" 121 | ] 122 | }, 123 | "outputs": [], 124 | "source": [ 125 | "# Simulate\n", 126 | "phantom = mr0.VoxelGridPhantom.brainweb(\"subject05.npz\")\n", 127 | "phantom = phantom.interpolate(64, 64, 32).slices([16])\n", 128 | "data = phantom.build()\n", 129 | "\n", 130 | "seq = build_seq(fov=data.size[0])\n", 131 | "seq.plot_kspace_trajectory()\n", 132 | "\n", 133 | "graph = mr0.compute_graph(seq, data)\n", 134 | "signal = mr0.execute_graph(graph, seq, data, print_progress=False)" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "# Plot the result\n", 144 | "kspace = signal.view(64, 64)\n", 145 | "reco = torch.fft.fftshift(\n", 146 | " torch.fft.fft2(torch.fft.fftshift(kspace), norm=\"forward\")\n", 147 | ")\n", 148 | "\n", 149 | "plt.figure(figsize=(8, 7))\n", 150 | "plt.subplot(221)\n", 151 | "plt.title(\"abs(reco)\")\n", 152 | "plt.imshow(reco.abs().cpu().flip(0), vmin=0)\n", 153 | "plt.colorbar()\n", 154 | "plt.axis(\"off\")\n", 155 | "plt.subplot(222)\n", 156 | "plt.title(\"angle(reco)\")\n", 157 | "plt.imshow(reco.angle().cpu().flip(0), vmin=-np.pi, vmax=np.pi, cmap=\"twilight\")\n", 158 | "plt.colorbar()\n", 159 | "plt.axis(\"off\")\n", 160 | "plt.subplot(223)\n", 161 | "plt.title(\"log(abs(kspace))\")\n", 162 | "plt.imshow(kspace.abs().log().cpu().flip(0))\n", 163 | "plt.colorbar()\n", 164 | "plt.axis(\"off\")\n", 165 | "plt.subplot(224)\n", 166 | "plt.title(\"Proton Density\")\n", 167 | "plt.imshow(phantom.PD.cpu()[:, :, 0].T, origin='lower', vmin=0, vmax=1)\n", 168 | "plt.colorbar()\n", 169 | "plt.axis(\"off\")\n", 170 | "plt.show()" 171 | ] 172 | } 173 | ], 174 | "metadata": { 175 | "kernelspec": { 176 | "display_name": "Python 3", 177 | "language": "python", 178 | "name": "python3" 179 | }, 180 | "language_info": { 181 | "codemirror_mode": { 182 | "name": "ipython", 183 | "version": 3 184 | }, 185 | "file_extension": ".py", 186 | "mimetype": "text/x-python", 187 | "name": "python", 188 | "nbconvert_exporter": "python", 189 | "pygments_lexer": "ipython3", 190 | "version": "3.11.4" 191 | }, 192 | "orig_nbformat": 4 193 | }, 194 | "nbformat": 4, 195 | "nbformat_minor": 2 196 | } 197 | -------------------------------------------------------------------------------- /documentation/playground_mr0/legacy_seqs/mr0_CS_cartesian_seq.ipynb: -------------------------------------------------------------------------------- 1 | {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"elapsed":13863,"status":"ok","timestamp":1678740065139,"user":{"displayName":"Zhengguo Tan","userId":"10291917877743041231"},"user_tz":-60},"id":"vTjDmgyofjbF","outputId":"c702ebb6-274c-4e2e-fec0-c67810ceb02c","tags":["hide-cell"]},"outputs":[],"source":["!pip install pypulseq==1.3.1.post1 &> /dev/null\n","!pip install MRzeroCore &> /dev/null\n","!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/numerical_brain_cropped.mat &> /dev/null"]},{"cell_type":"markdown","metadata":{},"source":["(mr0_CS_cartesian_seq)=\n","# Compressed Sensing - cartesian"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"executionInfo":{"elapsed":20513,"status":"ok","timestamp":1678741422978,"user":{"displayName":"Zhengguo Tan","userId":"10291917877743041231"},"user_tz":-60},"id":"O-_sr6lZjR_n","outputId":"d9cd1a67-4a63-44e6-df28-96198556f5d4"},"outputs":[],"source":["#@title generate\n","# %% S0. SETUP env\n","from skimage.restoration import denoise_tv_chambolle\n","import pywt\n","import MRzeroCore as mr0\n","import numpy as np\n","from matplotlib import pyplot as plt\n","plt.rcParams['figure.figsize'] = [10, 5]\n","plt.rcParams['figure.dpi'] = 100 # 200 e.g. is really fine, but slower\n","import pypulseq as pp\n","import torch\n","\n","experiment_id = 'exD01_bSSFP_2D'\n","\n","# %% S1. SETUP sys\n","\n","# choose the scanner limits\n","system = pp.Opts(\n"," max_grad=28,\n"," grad_unit='mT/m',\n"," max_slew=150,\n"," slew_unit='T/m/s',\n"," rf_ringdown_time=20e-6,\n"," rf_dead_time=100e-6,\n"," adc_dead_time=20e-6,\n"," grad_raster_time=50*10e-6\n",")\n","\n","# %% S2. DEFINE the sequence \n","seq = pp.Sequence()\n","\n","# Define FOV and resolution\n","fov = 200e-3 \n","slice_thickness=8e-3\n","sz=[128,128] # spin system size / resolution\n","Nread = sz[0] # frequency encoding steps/samples\n","Nphase = sz[1] # phase encoding steps/samples\n","\n","# Define rf events\n","rf1 = pp.make_sinc_pulse(flip_angle=5 * np.pi / 180, duration=1e-3,slice_thickness=slice_thickness, apodization=0.5, time_bw_product=4, system=system)\n","# rf1, _= pp.make_block_pulse(flip_angle=90 * np.pi / 180, duration=1e-3, system=system)\n","\n","# Define other gradients and ADC events\n","gx = pp.make_trapezoid(channel='x', flat_area=Nread / fov, flat_time=10e-3, system=system)\n","adc = pp.make_adc(num_samples=Nread, duration=10e-3, phase_offset=0*np.pi/180,delay=gx.rise_time, system=system)\n","gx_pre = pp.make_trapezoid(channel='x', area=-gx.area / 2, duration=5e-3, system=system)\n","gx_spoil = pp.make_trapezoid(channel='x', area=1.5*gx.area, duration=2e-3, system=system)\n","\n","rf_phase = 0\n","rf_inc = 0\n","rf_spoiling_inc=117\n","\n","phase_enc__gradmoms = (torch.arange(0,Nphase,1)-Nphase//2) / fov\n","\n","# ======\n","# CONSTRUCT SEQUENCE\n","# ======\n","\n","idx = np.random.normal(loc=Nphase//2, scale=Nphase//4, size=(Nphase//2,)).astype(int)\n","idx[idx>=Nphase] = Nphase - 1\n","idx[idx<0] = 0\n","print('idx: ', idx)\n","\n","# idx = [1,5,15,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,45,50,55]\n","#idx = np.linspace(0,63,64)\n","#idx = [5,15,20,27,28,29,30,31,32,33,34,35,36,37,38,39,40,45,50,55]\n","# idx = np.random.poisson(Nread/2,Nread*8)\n","idx = np.unique(idx)\n","for ii in range(0, len(idx)): # e.g. -64:63\n","\n"," rf1.phase_offset = rf_phase / 180 * np.pi # set current rf phase\n"," \n"," adc.phase_offset = rf_phase / 180 * np.pi # follow with ADC\n"," rf_inc = divmod(rf_inc + rf_spoiling_inc, 360.0)[1] # increase increment\n"," rf_phase = divmod(rf_phase + rf_inc, 360.0)[1] # increment additional pahse\n","\n"," seq.add_block(rf1)\n"," gp= pp.make_trapezoid(channel='y', area=phase_enc__gradmoms[int(idx[ii])], duration=5e-3, system=system)\n"," seq.add_block(gx_pre,gp)\n"," seq.add_block(adc,gx)\n"," gp= pp.make_trapezoid(channel='y', area=-phase_enc__gradmoms[int(idx[ii])], duration=5e-3, system=system)\n"," seq.add_block(gx_spoil,gp)\n"," if ii= epsilon\n"," low_values = coeff <= -epsilon\n"," coeff[shrink_values] = 0\n"," coeff[high_values] -= epsilon\n"," coeff[low_values] += epsilon\n","\n","\n","# help?\n","# https://www2.isye.gatech.edu/~brani/wp/kidsA.pdf\n","#for family in pywt.families():\n","# print(\"%s family: \" % family + ', '.join(pywt.wavelist(family)))\n","\n","\n","def waveletShrinkage(current, epsilon):\n"," # Compute Wavelet decomposition\n"," cA, (cH, cV, cD) = pywt.dwt2(current, 'haar')\n"," # Shrink\n"," shrink(cA, epsilon)\n"," shrink(cH, epsilon)\n"," shrink(cV, epsilon)\n"," shrink(cD, epsilon)\n"," wavelet = cA, (cH, cV, cD)\n"," # return inverse WT\n"," return pywt.idwt2(wavelet, 'haar')\n","\n","\n","def updateData(k_space, pattern, current, step, i):\n"," # go to k-space\n"," update = np.fft.ifft2(np.fft.fftshift(current))\n"," # compute difference\n"," update = k_space - (update * pattern)\n"," #print(\"i: {}, consistency RMSEpc: {:3.6f}\".format(i, np.abs(update[:]).sum() * 100))\n"," # return to image space\n"," update = np.fft.fftshift(np.fft.fft2(update))\n"," # improve current estimation by consitency\n"," update = current + (step * update)\n"," return update\n","\n","# %% S6.3 undersampling and undersampled reconstruction\n","# space= space/ np.linalg.norm(space[:]) # normalization of the data somethimes helps\n","\n","# parameters of iterative reconstructio using total variation denoising\n","denoising_strength = 5e-5\n","number_of_iterations = 4000\n","stepsz = 0.1\n","\n","# actual iterative reconstruction algorithm\n","current = np.zeros(kspace.shape)\n","first = updateData(kspace, pattern, current, 1, 0)\n","current_shrink = first\n","all_iter = np.zeros((kspace.shape[0], kspace.shape[1], number_of_iterations))\n","\n","for i in range(number_of_iterations):\n"," current = updateData(kspace, pattern, current_shrink,stepsz, i)\n","\n"," current_shrink = denoise_tv_chambolle(abs(current), denoising_strength)\n"," # current_shrink = waveletShrinkage(abs(current), denoising_strength)\n","\n"," all_iter[:, :, i] = current\n","\n","\n","plt.subplot(323)\n","plt.set_cmap(plt.gray())\n","plt.imshow(abs(first))\n","plt.ylabel('first iter (=NUFFT)')\n","plt.subplot(325)\n","plt.set_cmap(plt.gray())\n","plt.imshow(abs(current_shrink))\n","plt.ylabel('final recon')\n","\n","\n","# %% Plot all iter\n","# make 25 example iterations\n","idx = np.linspace(1, all_iter.shape[2], 25) - 1\n","# choose them from all iters\n","red_iter = all_iter[:, :, tuple(idx.astype(int))]\n","Tot = red_iter.shape[2]\n","Rows = Tot // 5\n","if Tot % 5 != 0:\n"," Rows += 1\n","Position = range(1, Tot + 1) # Position index\n","\n","fig = plt.figure()\n","for k in range(Tot):\n"," ax = fig.add_subplot(Rows, 5, Position[k])\n"," ax.imshow((abs((red_iter[:, :, k]))))\n"," plt.title('iter {}'.format(idx[k].astype(int)))\n"," print(k)\n","plt.show()"]}],"metadata":{"colab":{"provenance":[{"file_id":"1uTk3lc-O3xZS-rLDSkOLEYVV1rM0UTDJ","timestamp":1676904915190},{"file_id":"1lnFKubthQBxkz19cY7ScS-S0Hj9vHjEj","timestamp":1676708491940}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"codemirror_mode":{"name":"ipython","version":3},"file_extension":".py","mimetype":"text/x-python","name":"python","nbconvert_exporter":"python","pygments_lexer":"ipython3","version":"3.11.3"}},"nbformat":4,"nbformat_minor":0} 2 | -------------------------------------------------------------------------------- /documentation/playground_mr0/mr0_RARE_2D_seq_multi_shot.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": { 6 | "colab_type": "text", 7 | "id": "view-in-github" 8 | }, 9 | "source": [ 10 | "\"Open" 11 | ] 12 | }, 13 | { 14 | "cell_type": "code", 15 | "execution_count": null, 16 | "metadata": { 17 | "id": "vTjDmgyofjbF", 18 | "tags": [ 19 | "hide-cell" 20 | ] 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "!pip install pypulseq &> /dev/null\n", 25 | "!pip install MRzeroCore &> /dev/null\n", 26 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/numerical_brain_cropped.mat &> /dev/null" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# @title On Google Colab, you need to restart the runtime after executing this cell\n", 36 | "!pip install numpy==1.24" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": { 42 | "id": "Vp6yK0GH3BXF" 43 | }, 44 | "source": [ 45 | "(RARE_2D_multishot_seq)=\n", 46 | "# 2D RARE" 47 | ] 48 | }, 49 | { 50 | "cell_type": "code", 51 | "execution_count": null, 52 | "metadata": { 53 | "cellView": "form", 54 | "colab": { 55 | "base_uri": "https://localhost:8080/", 56 | "height": 1000 57 | }, 58 | "id": "O-_sr6lZjR_n", 59 | "outputId": "f8e745c2-8777-4901-c883-d218afb694d3" 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "#@title RARE\n", 64 | "import numpy as np\n", 65 | "import MRzeroCore as mr0\n", 66 | "import pypulseq as pp\n", 67 | "import torch\n", 68 | "import matplotlib.pyplot as plt\n", 69 | "\n", 70 | "plt.rcParams['figure.figsize'] = [10, 5]\n", 71 | "plt.rcParams['figure.dpi'] = 100 # 200 e.g. is really fine, but slower\n", 72 | "\n", 73 | "experiment_id = 'RARE_2D'\n", 74 | "\n", 75 | "# %% S1. SETUP sys\n", 76 | "system = pp.Opts(\n", 77 | " max_grad=28, grad_unit='mT/m', max_slew=150, slew_unit='T/m/s',\n", 78 | " rf_ringdown_time=20e-6, rf_dead_time=100e-6,\n", 79 | " adc_dead_time=20e-6, grad_raster_time=10e-6)\n", 80 | "\n", 81 | "seq = pp.Sequence(system)\n", 82 | "# Define FOV and resolution\n", 83 | "fov = 200e-3\n", 84 | "slice_thickness = 8e-3\n", 85 | "\n", 86 | "\n", 87 | "base_resolution= 42 # @param {type: \"slider\", min: 2, max: 112,step:2}\n", 88 | "\n", 89 | "Nread = base_resolution # frequency encoding steps/samples\n", 90 | "Nphase = base_resolution # phase encoding steps/samples\n", 91 | "\n", 92 | "TE_ms=5 # @param {type: \"slider\", min: 0.0, max: 200.0}\n", 93 | "TE=TE_ms*1e-3\n", 94 | "TR=5 # @param {type: \"slider\", min: 0.0, max: 20}\n", 95 | "TI_s=0 # @param {type: \"slider\", min: 0.0, max: 10.0, step: 0.1}\n", 96 | "Excitation_FA=90 # @param {type: \"slider\", min: 10, max: 270}\n", 97 | "Refocusing_FA=120 # @param {type: \"slider\", min: 10, max: 270}\n", 98 | "PEtype = 'linear' # @param ['centric', 'linear']\n", 99 | "r_spoil =2 # @param {type: \"slider\", min: 0, max: 3}\n", 100 | "PE_grad_on=True # @param {type: \"boolean\"}\n", 101 | "RO_grad_on=True # @param {type: \"boolean\"}\n", 102 | "shots=1 # @param {type: \"slider\", min: 1, max: 8}\n", 103 | "dumshots=0 # @param {type: \"slider\", min: 0, max: 3}\n", 104 | "dumref=1 # @param {type: \"slider\", min: 0, max: 10}\n", 105 | "\n", 106 | "\n", 107 | "# Define rf events\n", 108 | "rf1, gz1, gzr1 = pp.make_sinc_pulse(\n", 109 | " flip_angle=Excitation_FA * np.pi / 180, phase_offset=90 * np.pi / 180, duration=1e-3,\n", 110 | " slice_thickness=slice_thickness, apodization=0.5, time_bw_product=4,\n", 111 | " system=system, return_gz=True)\n", 112 | "\n", 113 | "rf2, gz2, _ = pp.make_sinc_pulse(\n", 114 | " flip_angle=Refocusing_FA* np.pi / 180, duration=1e-3,\n", 115 | " slice_thickness=slice_thickness, apodization=0.5, time_bw_product=4,\n", 116 | " system=system, return_gz=True)\n", 117 | "\n", 118 | "dwell=50e-6\n", 119 | "\n", 120 | "G_flag=(int(RO_grad_on),int(PE_grad_on)) # gradient flag (read,PE), if (0,0) all gradients are 0, for (1,0) PE is off\n", 121 | "\n", 122 | "# Define other gradients and ADC events\n", 123 | "gx = pp.make_trapezoid(channel='x', flat_area=Nread / fov*G_flag[0], flat_time=Nread*dwell, system=system)\n", 124 | "adc = pp.make_adc(num_samples=Nread, duration=Nread*dwell, phase_offset=90 * np.pi / 180, delay=gx.rise_time, system=system)\n", 125 | "gx_pre0 = pp.make_trapezoid(channel='x', area=+((1.0 + r_spoil) * gx.area / 2) , duration=1.5e-3, system=system)\n", 126 | "gx_prewinder = pp.make_trapezoid(channel='x', area=+(r_spoil * gx.area / 2 ), duration=1e-3, system=system)\n", 127 | "gp = pp.make_trapezoid(channel='y', area=0 / fov, duration=1e-3, system=system)\n", 128 | "rf_prep = pp.make_block_pulse(flip_angle=180 * np.pi / 180, duration=1e-3, system=system)\n", 129 | "\n", 130 | "\n", 131 | "if PE_grad_on:\n", 132 | " if PEtype == 'centric':\n", 133 | " phenc = np.asarray([i // 2 if i % 2 == 0 else -(i + 1) // 2 for i in range(Nphase)]) / fov\n", 134 | " else:\n", 135 | " phenc = np.arange(-Nphase // 2, Nphase // 2) / fov\n", 136 | "else:\n", 137 | " phenc = np.zeros((Nphase, ))\n", 138 | "\n", 139 | "# the minimal TE is given by one full period form ref pulse to ref pulse, thus gz2+gx+2*gp\n", 140 | "minTE2=(pp.calc_duration(gz2) +pp.calc_duration(gx) + 2*pp.calc_duration(gp))/2\n", 141 | "\n", 142 | "minTE2=round(minTE2/10e-5)*10e-5\n", 143 | "\n", 144 | "\n", 145 | "# to realize longer TE, we introduce a TEdelay that is added before and afetr the encoding period\n", 146 | "TEd=round(max(0, (TE/2-minTE2))/10e-5)*10e-5 # round to raster time\n", 147 | "\n", 148 | "if TEd==0:\n", 149 | " print('echo time set to minTE [ms]', 2*(minTE2 +TEd)*1000)\n", 150 | "else:\n", 151 | " print(' TE [ms]', 2*(minTE2 +TEd)*1000)\n", 152 | "\n", 153 | "TRd=0\n", 154 | "if dumshots+shots>1:\n", 155 | " TRd=TR - Nphase//shots*TE\n", 156 | "\n", 157 | "# seq loop starts\n", 158 | "for shot in range(-dumshots, shots):\n", 159 | " # FLAIR\n", 160 | " if TI_s>0:\n", 161 | " seq.add_block(rf_prep)\n", 162 | " seq.add_block(pp.make_delay(TI_s))\n", 163 | " seq.add_block(gx_pre0)\n", 164 | "\n", 165 | " seq.add_block(rf1,gz1)\n", 166 | " seq.add_block(gx_pre0,gzr1)\n", 167 | "\n", 168 | " # last timing step is to add TE/2 also between excitation and first ref pulse\n", 169 | " # from pulse top to pulse top we have already played out one full rf and gx_pre0, thus we substract these from TE/2\n", 170 | " seq.add_block(pp.make_delay((minTE2 +TEd ) - pp.calc_duration(gz1)-pp.calc_duration(gx_pre0)))\n", 171 | "\n", 172 | " if shot<0:\n", 173 | " phenc_dum=np.zeros(Nphase//shots+dumref) # add refocusing dummies\n", 174 | " else:\n", 175 | " phenc_dum=np.concatenate([np.repeat(np.nan, dumref), phenc[shot::shots]]) # add refocusing dummies\n", 176 | "\n", 177 | " for ii, encoding in enumerate(phenc_dum): # e.g. -64:63\n", 178 | " dum_ref_flag=0\n", 179 | " if np.isnan(encoding): encoding = 1e-8; dum_ref_flag=1 # no pe gradient if refdummy\n", 180 | "\n", 181 | " gp = pp.make_trapezoid(channel='y', area=+encoding , duration=1e-3, system=system)\n", 182 | " gp_ = pp.make_trapezoid(channel='y', area=-encoding, duration=1e-3, system=system)\n", 183 | "\n", 184 | " seq.add_block(rf2,gz2)\n", 185 | " seq.add_block(pp.make_delay(TEd)) # TE delay\n", 186 | " seq.add_block(gx_prewinder, gp)\n", 187 | "\n", 188 | " if shot<0 or dum_ref_flag: #added dummy shots\n", 189 | " seq.add_block(gx)\n", 190 | " else:\n", 191 | " seq.add_block(adc, gx)\n", 192 | " seq.add_block(gx_prewinder, gp_)\n", 193 | " seq.add_block(pp.make_delay(TEd)) # TE delay\n", 194 | " seq.add_block(pp.make_delay(TRd))\n", 195 | "\n", 196 | "# %% S2. CHECK, PLOT and WRITE the sequence as .seq\n", 197 | "# Check whether the timing of the sequence is correct\n", 198 | "ok, error_report = seq.check_timing()\n", 199 | "if ok:\n", 200 | " print('Timing check passed successfully')\n", 201 | "else:\n", 202 | " print('Timing check failed. Error listing follows:')\n", 203 | " [print(e) for e in error_report]\n", 204 | "\n", 205 | "\n", 206 | "# %% S3 quick 2D brain phantom sim and plot\n", 207 | "signal = mr0.util.simulate_2d(seq)\n", 208 | "#seq.plot(plot_now=False,time_range=(0,2.5*TE))\n", 209 | "seq.plot(plot_now=False)\n", 210 | "mr0.util.insert_signal_plot(seq=seq, signal =signal.numpy())\n", 211 | "# Get figure handles\n", 212 | "fig_handles = plt.get_fignums()\n", 213 | "\n", 214 | "# Iterate and save each figure\n", 215 | "for fig_num in fig_handles:\n", 216 | " plt.figure(fig_num)\n", 217 | " plt.savefig(f'TSE_{fig_num}.png', format='png')\n", 218 | "\n", 219 | "plt.show()\n", 220 | "seq.write('external.seq')\n", 221 | "seq0 = mr0.Sequence.import_file('external.seq')\n", 222 | "\n", 223 | "reco = mr0.reco_adjoint(signal, seq0.get_kspace(), (Nread, Nphase, 1), (0.2, 0.2, 1))\n", 224 | "mr0.util.imshow(reco.abs(),cmap='gray'); plt.colorbar()" 225 | ] 226 | } 227 | ], 228 | "metadata": { 229 | "colab": { 230 | "include_colab_link": true, 231 | "provenance": [] 232 | }, 233 | "kernelspec": { 234 | "display_name": ".venv", 235 | "language": "python", 236 | "name": "python3" 237 | }, 238 | "language_info": { 239 | "codemirror_mode": { 240 | "name": "ipython", 241 | "version": 3 242 | }, 243 | "file_extension": ".py", 244 | "mimetype": "text/x-python", 245 | "name": "python", 246 | "nbconvert_exporter": "python", 247 | "pygments_lexer": "ipython3", 248 | "version": "3.11.3" 249 | } 250 | }, 251 | "nbformat": 4, 252 | "nbformat_minor": 0 253 | } 254 | -------------------------------------------------------------------------------- /documentation/playground_mr0/mr0_pypulseq_exmpls_seq.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "executionInfo": { 8 | "elapsed": 38992, 9 | "status": "ok", 10 | "timestamp": 1696783114404, 11 | "user": { 12 | "displayName": "Jonathan Endres", 13 | "userId": "15767859839481375594" 14 | }, 15 | "user_tz": -120 16 | }, 17 | "id": "vTjDmgyofjbF", 18 | "tags": [ 19 | "hide-cell" 20 | ] 21 | }, 22 | "outputs": [], 23 | "source": [ 24 | "!pip install pypulseq==1.3.1.post1 &> /dev/null\n", 25 | "!pip install MRzeroCore &> /dev/null\n", 26 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/numerical_brain_cropped.mat &> /dev/null" 27 | ] 28 | }, 29 | { 30 | "cell_type": "code", 31 | "execution_count": null, 32 | "metadata": {}, 33 | "outputs": [], 34 | "source": [ 35 | "# @title On Google Colab, you need to restart the runtime after executing this cell\n", 36 | "!pip install numpy==1.24" 37 | ] 38 | }, 39 | { 40 | "cell_type": "markdown", 41 | "metadata": { 42 | "id": "kW_eOCdVUzNa" 43 | }, 44 | "source": [ 45 | "(mr0_pypulseq_example)=\n", 46 | "# Simulate pypulseq example files\n", 47 | "here the pypulseq example files from pypulseq/seq_examples.scripts" 48 | ] 49 | }, 50 | { 51 | "cell_type": "code", 52 | "execution_count": null, 53 | "metadata": { 54 | "colab": { 55 | "base_uri": "https://localhost:8080/", 56 | "height": 1000 57 | }, 58 | "executionInfo": { 59 | "elapsed": 107037, 60 | "status": "ok", 61 | "timestamp": 1696783221437, 62 | "user": { 63 | "displayName": "Jonathan Endres", 64 | "userId": "15767859839481375594" 65 | }, 66 | "user_tz": -120 67 | }, 68 | "id": "O-_sr6lZjR_n", 69 | "outputId": "5c4cd1fa-718f-46d6-f0fc-f52cfd74385f", 70 | "tags": [ 71 | "hide-output" 72 | ] 73 | }, 74 | "outputs": [], 75 | "source": [ 76 | "#@title 1.a choose pypulseq example seq files (first call generates them)\n", 77 | "import numpy as np\n", 78 | "# newer numpy versions don't contain this, but pypulseq still relies on it\n", 79 | "np.int = int\n", 80 | "np.float = float\n", 81 | "np.complex = complex\n", 82 | "\n", 83 | "# Only build some for faster doc build, you can uncomment all\n", 84 | "# from pypulseq.seq_examples.scripts import write_epi\n", 85 | "# from pypulseq.seq_examples.scripts import write_epi_se\n", 86 | "# from pypulseq.seq_examples.scripts import write_epi_se_rs\n", 87 | "from pypulseq.seq_examples.scripts import write_gre\n", 88 | "# from pypulseq.seq_examples.scripts import write_gre_label\n", 89 | "# from pypulseq.seq_examples.scripts import write_haste\n", 90 | "# from pypulseq.seq_examples.scripts import write_tse\n", 91 | "# from pypulseq.seq_examples.scripts import write_ute\n", 92 | "\n", 93 | "#@title choose pypulseq example\n", 94 | "seq_file = \"gre_pypulseq.seq\" #@param [\"epi_pypulseq.seq\", \"epi_se_pypulseq.seq\", \"epi_se_rs_pypulseq.seq\", \"gre_pypulseq.seq\", \"gre_label_pypulseq.seq\", \"haste_pypulseq.seq\", \"tse_pypulseq.seq\", \"ute_pypulseq.seq\"] {allow-input: true}" 95 | ] 96 | }, 97 | { 98 | "cell_type": "code", 99 | "execution_count": null, 100 | "metadata": { 101 | "colab": { 102 | "base_uri": "https://localhost:8080/", 103 | "height": 1000 104 | }, 105 | "executionInfo": { 106 | "elapsed": 57193, 107 | "status": "ok", 108 | "timestamp": 1696783278618, 109 | "user": { 110 | "displayName": "Jonathan Endres", 111 | "userId": "15767859839481375594" 112 | }, 113 | "user_tz": -120 114 | }, 115 | "id": "HTYTghFLjeCY", 116 | "outputId": "dcad0089-5a06-4034-bff4-12799c7cf422" 117 | }, 118 | "outputs": [], 119 | "source": [ 120 | "#@title 2. simulate for brain phantom\n", 121 | "import MRzeroCore as mr0\n", 122 | "import pypulseq as pp\n", 123 | "import torch\n", 124 | "import matplotlib.pyplot as plt\n", 125 | "\n", 126 | "plt.rcParams['figure.figsize'] = [10, 5]\n", 127 | "plt.rcParams['figure.dpi'] = 100 # 200 e.g. is really fine, but slower\n", 128 | "\n", 129 | "seq = pp.Sequence()\n", 130 | "seq.read(seq_file)\n", 131 | "\n", 132 | "print('load phantom')\n", 133 | "# %% S4: SETUP SPIN SYSTEM/object on which we can run the MR sequence external.seq from above\n", 134 | "sz = [64, 64]\n", 135 | "obj_p = mr0.VoxelGridPhantom.load_mat('numerical_brain_cropped.mat')\n", 136 | "brain_phantom_res = 64 #@param {type:\"slider\", min:16, max:128, step:16}\n", 137 | "obj_p = obj_p.interpolate(brain_phantom_res, brain_phantom_res, 1)\n", 138 | "obj_p.B0[:] = 0\n", 139 | "plot_phantom = True #@param {type:\"boolean\"}\n", 140 | "if plot_phantom: obj_p.plot()\n", 141 | "\n", 142 | "obj_p = obj_p.build()\n", 143 | "print('simulate (2D) \\n' + seq_file)\n", 144 | "seq0 = mr0.Sequence.import_file(seq_file)\n", 145 | "# seq0.plot_kspace_trajectory()" 146 | ] 147 | }, 148 | { 149 | "cell_type": "code", 150 | "execution_count": null, 151 | "metadata": { 152 | "tags": [] 153 | }, 154 | "outputs": [], 155 | "source": [ 156 | "# Simulate the sequence\n", 157 | "\n", 158 | "graph = mr0.compute_graph(seq0, obj_p, 200, 1e-3)\n", 159 | "signal = mr0.execute_graph(graph, seq0, obj_p, print_progress=False)\n", 160 | "#@title 3. Plot sequence and signal\n", 161 | "sp_adc, t_adc = mr0.util.pulseq_plot(seq=seq,signal=signal.numpy())\n", 162 | "\n", 163 | "# Unfortunately, we need to limit the resolution as reco_adjoint is very RAM-hungy\n", 164 | "print('reconstruct and plot')\n", 165 | "seq0.plot_kspace_trajectory()\n", 166 | "\n", 167 | "reco = mr0.reco_adjoint(signal, seq0.get_kspace(), resolution=(64, 64, 1), FOV=(0.22, 0.22, 1))\n", 168 | "plt.figure()\n", 169 | "plt.subplot(121)\n", 170 | "plt.title(\"Magnitude\")\n", 171 | "plt.imshow(reco[:, :, 0].T.abs(), origin=\"lower\")\n", 172 | "plt.colorbar()\n", 173 | "plt.subplot(122)\n", 174 | "plt.title(\"Phase\")\n", 175 | "plt.imshow(reco[:, :, 0].T.angle(), origin=\"lower\", vmin=-np.pi, vmax=np.pi)\n", 176 | "plt.colorbar()\n", 177 | "plt.show()" 178 | ] 179 | } 180 | ], 181 | "metadata": { 182 | "colab": { 183 | "provenance": [ 184 | { 185 | "file_id": "1uTk3lc-O3xZS-rLDSkOLEYVV1rM0UTDJ", 186 | "timestamp": 1676904915190 187 | }, 188 | { 189 | "file_id": "1lnFKubthQBxkz19cY7ScS-S0Hj9vHjEj", 190 | "timestamp": 1676708491940 191 | } 192 | ] 193 | }, 194 | "kernelspec": { 195 | "display_name": "Python 3", 196 | "name": "python3" 197 | }, 198 | "language_info": { 199 | "codemirror_mode": { 200 | "name": "ipython", 201 | "version": 3 202 | }, 203 | "file_extension": ".py", 204 | "mimetype": "text/x-python", 205 | "name": "python", 206 | "nbconvert_exporter": "python", 207 | "pygments_lexer": "ipython3", 208 | "version": "3.11.4" 209 | } 210 | }, 211 | "nbformat": 4, 212 | "nbformat_minor": 0 213 | } 214 | -------------------------------------------------------------------------------- /documentation/playground_mr0/numerical_brain_cropped.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/numerical_brain_cropped.mat -------------------------------------------------------------------------------- /documentation/playground_mr0/overview.md: -------------------------------------------------------------------------------- 1 | (playground_mr0)= 2 | # Playground MR0 3 | 4 | Welcome to Playground MR0, a playground to share, vary and simulate MR sequences. 5 | MR sequences are written in the Pulseq standard using the pypulseq library. 6 | Pulseq files are simulated with the efficient Phase Distribution Graph Bloch simulation. 7 | Here we share links to example colabs that contain various MR sequences or let you upload your own seq file for simulation. 8 | 9 | Many of the examples are build using [PyPulseq](https://github.com/imr-framework/pypulseq) and simulate the resulting .seq files with `MR0`. 10 | These .seq files could also be measured on any MRI scanner using a Pulseq interpreter. 11 | 12 | 13 | ## Code and simulate PyPulseq 14 | 15 | | Sequence | | 16 | | -------- | - | 17 | | [Free Induction Decay](FID_seq) | Open In Colab | 18 | | [Spin Echo CPMG](SE_CPMG_seq) | Open In Colab | 19 | | [Stimulated Echo 3 pulses - 5 echoes](STE_3pulses_5echoes_seq) | Open In Colab | 20 | | [FLASH 2D sequence](FLASH_2D_seq) | Open In Colab | 21 | | [GRE EPI 2D sequence](EPI_2D_seq) | Open In Colab | 22 | | [DWI SE EPI 2D sequence](DWI_SE_EPI_seq) | Open In Colab | 23 | | [Diffusion prepared STEAM](diff_prep_STEAM_seq) | Open In Colab | 24 | | [RARE 2D sequence](RARE_2D_seq) | Open In Colab | 25 | | [TSE 2D sequence](TSE_2D_seq) | Open In Colab | 26 | | [Interactive GRE to FLASH](GRE2FLASH_seq) | Open In Colab | 27 | | [balanced SSFP sequence](bSSFP_seq) | Open In Colab | 28 | | [DREAM STE for B0, B1, TxRx mapping](DREAM_STE_seq) | Open In Colab | 29 | | [DREAM STID for B0, B1, TxRx mapping](DREAM_STID_seq) | Open In Colab | 30 | | [Pulseq with RF shimming](pulseq_ptx) | Open In Colab | 31 | 32 | ## Plot and simulate predifined .seq files 33 | 34 | | Sequence | | 35 | | -------- | - | 36 | | [Simulate pypulseq example sequences](mr0_pypulseq_example) | Open In Colab | 37 | | [Simulate own uploaded seq files](mr0_upload_seq) | Open In Colab | 38 | 39 | 40 | ## MR-zero optimization 41 | 42 | Gradient descent optimizations using automatic differentiation by backpropagation. 43 | Some notebooks use [pulseq-zero](https://github.com/pulseq-frame/pulseq-zero) for optimizable sequence definitions with PyPulseq. 44 | 45 | | Sequence | | 46 | | -------- | - | 47 | | [IR FLASH 2D sequence for T1 mapping using a fit](IR_FLASH_fit) | Open In Colab | 48 | | [IR FLASH 2D sequence for T1 mapping using a NN](IR_FLASH_NN) | Open In Colab | 49 | | [FLASH flip angle opt. for PSF (with pulseq-zero)](FLASH_FAopt_PSF) | Open In Colab | 50 | | [TSE flip angle opt. for SAR (with pulseq-zero)](TSE_FAopt_SAR) | Open In Colab | 51 | 52 | 53 | ## MR-double-zero optimization 54 | 55 | Gradient-free optimization with [nevergrad](https://github.com/facebookresearch/nevergrad) 56 | 57 | | Sequence | Google Colab | 58 | | -------- | ------------ | 59 | | [Ernst angle optimization](mr00_FLASH_2D_ernstAngle_opt) | Open In Colab | 60 | 61 | 62 | ## MR plot wall of fame 63 | 64 | famous historic plots recreated 65 | 66 | 67 | ## MR0 example notebooks 68 | 69 | The following sequences are examples of how to realize various tasks in MR-zero rather than demonstrations of specific MRI sequences. 70 | 71 | | Sequence | | 72 | | -------- | - | 73 | | [Pure `MR0` FLASH](flash) | Open In Colab | 74 | | [pulseq FLASH](pulseq_flash) | Open In Colab | 75 | | [pulseq pTx FLASH](pulseq_pTx_sim) | Open In Colab | 76 | 77 | 78 | ## Notebook execution results 79 | 80 | ::::{toggle} 81 | 82 | :::{nb-exec-table} 83 | ::: 84 | 85 | :::: 86 | -------------------------------------------------------------------------------- /documentation/playground_mr0/ptx_phantom.p: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/ptx_phantom.p -------------------------------------------------------------------------------- /documentation/playground_mr0/pulseq_flash.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "tags": [ 8 | "hide-cell" 9 | ] 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "!pip install pypulseq==1.3.1.post1 &> /dev/null\n", 14 | "!pip install MRzeroCore &> /dev/null\n", 15 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/subject05.npz &> /dev/null" 16 | ] 17 | }, 18 | { 19 | "cell_type": "code", 20 | "execution_count": null, 21 | "metadata": {}, 22 | "outputs": [], 23 | "source": [ 24 | "# @title On Google Colab, you need to restart the runtime after executing this cell\n", 25 | "!pip install numpy==1.24" 26 | ] 27 | }, 28 | { 29 | "cell_type": "code", 30 | "execution_count": null, 31 | "metadata": {}, 32 | "outputs": [], 33 | "source": [ 34 | "import numpy as np\n", 35 | "# newer numpy versions don't contain this, but pypulseq still relies on it\n", 36 | "np.int = int\n", 37 | "np.float = float\n", 38 | "np.complex = complex\n", 39 | "\n", 40 | "import pypulseq as pp\n", 41 | "import MRzeroCore as mr0\n", 42 | "import torch\n", 43 | "import matplotlib.pyplot as plt\n", 44 | "\n", 45 | "experiment_id = \"flash\"" 46 | ] 47 | }, 48 | { 49 | "attachments": {}, 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "(pulseq_flash)=\n", 54 | "# Pulseq and MRzeroCore\n", 55 | "\n", 56 | "MRzero Core has functionality to parse and simulate pulseq .seq files.\n", 57 | "We build the same pulseq sequence as before, but this time with pulseq." 58 | ] 59 | }, 60 | { 61 | "cell_type": "code", 62 | "execution_count": null, 63 | "metadata": {}, 64 | "outputs": [], 65 | "source": [ 66 | "sys = pp.Opts(\n", 67 | " max_grad=28, grad_unit='mT/m', max_slew=150, slew_unit='T/m/s',\n", 68 | " rf_ringdown_time=20e-6, rf_dead_time=100e-6, adc_dead_time=20e-6,\n", 69 | " grad_raster_time=50e-6\n", 70 | ")" 71 | ] 72 | }, 73 | { 74 | "cell_type": "code", 75 | "execution_count": null, 76 | "metadata": {}, 77 | "outputs": [], 78 | "source": [ 79 | "n_read = 64\n", 80 | "n_phase = 64\n", 81 | "fov = 192e-3\n", 82 | "slice_thickness = 8e-3\n", 83 | "\n", 84 | "rf = pp.make_sinc_pulse(\n", 85 | " flip_angle=5 * np.pi/180, duration=1e-3,\n", 86 | " slice_thickness=slice_thickness, apodization=0.5, time_bw_product=4,\n", 87 | " system=sys, return_gz=False\n", 88 | ")\n", 89 | "# Readout gradient\n", 90 | "gx = pp.make_trapezoid('x', flat_area=n_read / fov, flat_time=n_read*50e-6, system=sys)\n", 91 | "adc = pp.make_adc(\n", 92 | " num_samples=n_read, dwell=50e-6, delay=gx.rise_time,\n", 93 | " system=sys\n", 94 | ")\n", 95 | "# Rewinder before gx and spoiler afterwards\n", 96 | "gx_pre = pp.make_trapezoid('x', area=-0.5*gx.area, duration=5e-3, system=sys)\n", 97 | "gx_spoil = pp.make_trapezoid('x', area=1.5*gx.area, duration=2e-3, system=sys)\n", 98 | "\n", 99 | "# Construct the sequence\n", 100 | "seq = pp.Sequence()\n", 101 | "for i in range(-n_phase//2, n_phase//2):\n", 102 | " # RF phase spoiling\n", 103 | " rf.phase_offset = (0.5 * (i**2+i+2) * 117) % 360 * np.pi / 180\n", 104 | " adc.phase_offset = rf.phase_offset\n", 105 | " seq.add_block(rf)\n", 106 | " # Phase encoding\n", 107 | " gy = pp.make_trapezoid('y', area=i / fov, duration=5e-3, system=sys)\n", 108 | " seq.add_block(gx_pre, gy)\n", 109 | " seq.add_block(adc, gx)\n", 110 | " # Rewind phase and spoil\n", 111 | " gy = pp.make_trapezoid('y', area=-i / fov, duration=5e-3, system=sys)\n", 112 | " seq.add_block(gx_spoil, gy)\n", 113 | "\n", 114 | " seq.add_block(pp.make_delay(1e-3))" 115 | ] 116 | }, 117 | { 118 | "cell_type": "code", 119 | "execution_count": null, 120 | "metadata": {}, 121 | "outputs": [], 122 | "source": [ 123 | "ok, error_report = seq.check_timing()\n", 124 | "if ok:\n", 125 | " print(\"Timing check passed successfully\")\n", 126 | "else:\n", 127 | " print(\"Timing check failed:\")\n", 128 | " [print(e, end=\"\") for e in error_report]\n", 129 | "\n", 130 | "seq.plot()\n", 131 | "\n", 132 | "seq.set_definition(\"FOV\", [fov, fov, slice_thickness])\n", 133 | "seq.set_definition(\"Name\", experiment_id)\n", 134 | "seq.write(experiment_id + \".seq\")" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": null, 140 | "metadata": {}, 141 | "outputs": [], 142 | "source": [ 143 | "phantom = mr0.VoxelGridPhantom.brainweb(\"subject05.npz\")\n", 144 | "phantom = phantom.interpolate(64, 64, 32).slices([16])\n", 145 | "phantom.plot()\n", 146 | "data = phantom.build()" 147 | ] 148 | }, 149 | { 150 | "cell_type": "code", 151 | "execution_count": null, 152 | "metadata": { 153 | "tags": [ 154 | "hide-output" 155 | ] 156 | }, 157 | "outputs": [], 158 | "source": [ 159 | "seq = mr0.Sequence.import_file(experiment_id + \".seq\")\n", 160 | "seq.plot_kspace_trajectory()\n", 161 | "\n", 162 | "graph = mr0.compute_graph(seq, data, 200, 1e-3)\n", 163 | "signal = mr0.execute_graph(graph, seq, data, print_progress=False)" 164 | ] 165 | }, 166 | { 167 | "cell_type": "code", 168 | "execution_count": null, 169 | "metadata": {}, 170 | "outputs": [], 171 | "source": [ 172 | "kspace = signal.view(n_phase, n_read)\n", 173 | "reco = torch.fft.fftshift(torch.fft.fft2(torch.fft.fftshift(kspace)))\n", 174 | "\n", 175 | "plt.figure()\n", 176 | "plt.imshow(reco.abs(), origin=\"lower\")\n", 177 | "plt.show()" 178 | ] 179 | } 180 | ], 181 | "metadata": { 182 | "kernelspec": { 183 | "display_name": "base", 184 | "language": "python", 185 | "name": "python3" 186 | }, 187 | "language_info": { 188 | "codemirror_mode": { 189 | "name": "ipython", 190 | "version": 3 191 | }, 192 | "file_extension": ".py", 193 | "mimetype": "text/x-python", 194 | "name": "python", 195 | "nbconvert_exporter": "python", 196 | "pygments_lexer": "ipython3", 197 | "version": "3.11.3" 198 | }, 199 | "orig_nbformat": 4, 200 | "vscode": { 201 | "interpreter": { 202 | "hash": "88279d2366fe020547cde40dd65aa0e3aa662a6ec1f3ca12d88834876c85e1a6" 203 | } 204 | } 205 | }, 206 | "nbformat": 4, 207 | "nbformat_minor": 2 208 | } 209 | -------------------------------------------------------------------------------- /documentation/playground_mr0/pulseq_sim_pTx.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "\"Open" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": null, 13 | "metadata": { 14 | "colab": { 15 | "base_uri": "https://localhost:8080/", 16 | "height": 257 17 | }, 18 | "id": "O-_sr6lZjR_n", 19 | "outputId": "52bb3cab-4dbf-45d5-aaed-0e05f5d14128", 20 | "tags": [ 21 | "hide-cell" 22 | ] 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "!pip install pypulseq==1.3.1.post1 &> /dev/null\n", 27 | "!pip install MRzeroCore &> /dev/null\n", 28 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/subject05.npz &> /dev/null\n", 29 | "!wget https://github.com/MRsources/MRzero-Core/raw/main/documentation/playground_mr0/AdjDataUser2gB0_transversal_0.08moving_average.mat &> /dev/null" 30 | ] 31 | }, 32 | { 33 | "cell_type": "code", 34 | "execution_count": null, 35 | "metadata": {}, 36 | "outputs": [], 37 | "source": [ 38 | "# @title On Google Colab, you need to restart the runtime after executing this cell\n", 39 | "!pip install numpy==1.24" 40 | ] 41 | }, 42 | { 43 | "cell_type": "code", 44 | "execution_count": null, 45 | "metadata": {}, 46 | "outputs": [], 47 | "source": [ 48 | "import MRzeroCore as mr0\n", 49 | "import matplotlib.pyplot as plt\n", 50 | "import numpy as np\n", 51 | "import scipy.io\n", 52 | "import torch\n", 53 | "from torch.nn.functional import interpolate" 54 | ] 55 | }, 56 | { 57 | "attachments": {}, 58 | "cell_type": "markdown", 59 | "metadata": {}, 60 | "source": [ 61 | "(pulseq_pTx_sim)=\n", 62 | "\n", 63 | "# Simulating a pTx .seq with MR0" 64 | ] 65 | }, 66 | { 67 | "cell_type": "code", 68 | "execution_count": null, 69 | "metadata": {}, 70 | "outputs": [], 71 | "source": [ 72 | "# Create a phantom by merging BrainWeb and adj data\n", 73 | "phantom = mr0.VoxelGridPhantom.brainweb(\"subject05.npz\")\n", 74 | "phantom = phantom.interpolate(64, 64, 32).slices([15])\n", 75 | "mask = phantom.PD[:, :, 0] > 1e-3\n", 76 | "\n", 77 | "# Extract necessary data from the MATLAB .mat file\n", 78 | "file = scipy.io.loadmat(\"AdjDataUser2gB0_transversal_0.08moving_average.mat\")[\"Adj\"]\n", 79 | "size = (file[\"image_m\"][0, 0][0, 0], file[\"image_n\"][0, 0][0, 0], file[\"slices\"][0, 0][0, 0])\n", 80 | "coil_count = file[\"coils\"][0, 0][0, 0]\n", 81 | "voxel_pos = np.stack(np.meshgrid(file[\"values_m\"][0, 0], file[\"values_n\"][0, 0], file[\"values_s\"][0, 0]), -1)\n", 82 | "\n", 83 | "# Crop and interpolate B0 data to roughly fit BrainWeb\n", 84 | "B0 = torch.tensor(file[\"B0\"][0, 0][:, :, size[2] // 2], dtype=torch.float)\n", 85 | "B0 = B0[27:61, 20:54].flip(0).T\n", 86 | "B0 = interpolate(\n", 87 | " B0[None, None, :, :], (64, 64), mode=\"area\"\n", 88 | ")[0, 0, :, :, None]\n", 89 | "\n", 90 | "# Crop and interpolate B1 data to roughly fit BrainWeb\n", 91 | "B1 = torch.tensor(file[\"S\"][0, 0][:, :, size[2] // 2], dtype=torch.cfloat).T.reshape(-1, size[0], size[1])\n", 92 | "B1 = B1[:, 27:61, 20:54].flip(2)\n", 93 | "B1 = (\n", 94 | " interpolate(B1.real[:, None, :, :], (64, 64), mode=\"area\")[:, 0, :, :, None]\n", 95 | " + 1j * interpolate(B1.imag[:, None, :, :], (64, 64), mode=\"area\")[:, 0, :, :, None]\n", 96 | ")\n", 97 | "\n", 98 | "B0[~mask] = 0\n", 99 | "B1[:, ~mask] = 0\n", 100 | "B0 -= B0[mask].mean()\n", 101 | "\n", 102 | "normalize = False\n", 103 | "if normalize:\n", 104 | " # Remove phase from B1\n", 105 | " for i in range(coil_count):\n", 106 | " B1[i, ...] *= torch.exp(-1j * B1[i, mask].mean().angle())\n", 107 | " # Normalize B1 so that quadrature mode has a mean of abs = 1 and phase = 0\n", 108 | " B1 *= np.exp(-1j * B1.sum(0)[mask].mean().angle()) / B1.sum(0)[mask].mean().abs()\n", 109 | "\n", 110 | "phantom.B0 = B0\n", 111 | "phantom.B1 = B1" 112 | ] 113 | }, 114 | { 115 | "cell_type": "code", 116 | "execution_count": null, 117 | "metadata": { 118 | "id": "HTYTghFLjeCY", 119 | "tags": [ 120 | "hide-output" 121 | ] 122 | }, 123 | "outputs": [], 124 | "source": [ 125 | "data = phantom.build()\n", 126 | "\n", 127 | "# NOTE: this currently doesn't work in Colab\n", 128 | "\n", 129 | "# New importer does not have the pTx extension yet, so use the old one and\n", 130 | "# revert the FOV scaling of gradients\n", 131 | "seq = mr0.Sequence.from_seq_file(\"seqs/flash pTx CP.seq\")\n", 132 | "seq.plot_kspace_trajectory()\n", 133 | "for rep in seq:\n", 134 | " rep.gradm[:, 0] /= 0.2\n", 135 | " rep.gradm[:, 1] /= 0.2\n", 136 | "\n", 137 | "# Simulate the sequence\n", 138 | "graph = mr0.compute_graph(seq, data, 200, 1e-3)\n", 139 | "signal = mr0.execute_graph(graph, seq, data, print_progress=False)" 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": null, 145 | "metadata": { 146 | "id": "uLlcta8qwU5L" 147 | }, 148 | "outputs": [], 149 | "source": [ 150 | "reco = mr0.reco_adjoint(signal, seq.get_kspace(), resolution=(64, 64, 1), FOV=(0.2, 0.2, 1))\n", 151 | "\n", 152 | "plt.figure(figsize=(7, 3), dpi=200)\n", 153 | "plt.subplot(121)\n", 154 | "plt.title(\"Magnitude\")\n", 155 | "plt.imshow(reco[:, :, 0].T.abs(), origin=\"lower\", vmin=0, cmap=\"gray\")\n", 156 | "plt.colorbar()\n", 157 | "plt.subplot(122)\n", 158 | "plt.title(\"Phase\")\n", 159 | "plt.imshow(reco[:, :, 0].T.angle(), origin=\"lower\", vmin=-np.pi, vmax=np.pi, cmap=\"twilight\")\n", 160 | "plt.colorbar()\n", 161 | "plt.show()" 162 | ] 163 | } 164 | ], 165 | "metadata": { 166 | "colab": { 167 | "provenance": [] 168 | }, 169 | "kernelspec": { 170 | "display_name": "Python 3", 171 | "name": "python3" 172 | }, 173 | "language_info": { 174 | "codemirror_mode": { 175 | "name": "ipython", 176 | "version": 3 177 | }, 178 | "file_extension": ".py", 179 | "mimetype": "text/x-python", 180 | "name": "python", 181 | "nbconvert_exporter": "python", 182 | "pygments_lexer": "ipython3", 183 | "version": "3.11.3" 184 | } 185 | }, 186 | "nbformat": 4, 187 | "nbformat_minor": 0 188 | } 189 | -------------------------------------------------------------------------------- /documentation/playground_mr0/reference_bssfp_data_feb2025.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/reference_bssfp_data_feb2025.npz -------------------------------------------------------------------------------- /documentation/playground_mr0/subject05.npz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/subject05.npz -------------------------------------------------------------------------------- /documentation/playground_mr0/unsorted/mr0_burst_TSE.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": null, 6 | "metadata": { 7 | "id": "GmqCxc9MnLv_", 8 | "tags": [ 9 | "hide-cell" 10 | ] 11 | }, 12 | "outputs": [], 13 | "source": [ 14 | "!pip install pypulseq &> /dev/null\n", 15 | "!pip install MRzeroCore &> /dev/null" 16 | ] 17 | }, 18 | { 19 | "cell_type": "markdown", 20 | "metadata": {}, 21 | "source": [ 22 | "(burst_TSE_seq)=\n", 23 | "# BURST TSE\n", 24 | "\n", 25 | "https://link.springer.com/article/10.1007/BF02660372\n", 26 | "\n", 27 | "https://pubmed.ncbi.nlm.nih.gov/7984078/\n", 28 | "\n", 29 | "This Burst-TSE follows the main idea of encoding multiple z-states that are then\n", 30 | "read in a single readout (with multiple refocusings). It does currently not\n", 31 | "minimize gradient switching (phase encoding is applied as short gradient after\n", 32 | "every k-space line, instead of continuously). Excitation pulses are constant\n", 33 | "flip angles, which is results in varying signal intensity." 34 | ] 35 | }, 36 | { 37 | "cell_type": "code", 38 | "execution_count": null, 39 | "metadata": { 40 | "id": "_4Kbv7OrneZB" 41 | }, 42 | "outputs": [], 43 | "source": [ 44 | "#@title 1. Imports\n", 45 | "import numpy as np\n", 46 | "import torch\n", 47 | "import matplotlib.pyplot as plt\n", 48 | "import MRzeroCore as mr0\n", 49 | "\n", 50 | "res = (64, 64)\n", 51 | "\n", 52 | "data = mr0.util.load_phantom(size=res)" 53 | ] 54 | }, 55 | { 56 | "cell_type": "code", 57 | "execution_count": null, 58 | "metadata": { 59 | "id": "Z3EbohACoDvy" 60 | }, 61 | "outputs": [], 62 | "source": [ 63 | "#@title 2. Burst sequence definition\n", 64 | "def build_seq(burst_flip=5, refoc_flip=120, spoiler=10, refocs=8, CPMG=True):\n", 65 | " burst_len = res[1] // refocs\n", 66 | " assert res[1] / refocs % 1 == 0\n", 67 | "\n", 68 | " t_pulse = 0.5e-3\n", 69 | " t_refoc_pulse = 1e-3\n", 70 | " t_adc = 30e-6\n", 71 | "\n", 72 | " seq = mr0.Sequence()\n", 73 | "\n", 74 | " # BURST pulse\n", 75 | " for i in range(burst_len):\n", 76 | " rep = seq.new_rep(2)\n", 77 | "\n", 78 | " rep.pulse.angle = burst_flip * torch.pi/180\n", 79 | " rep.event_time[0] = t_pulse\n", 80 | "\n", 81 | " rep.gradm[1, 0] = res[0] + spoiler\n", 82 | " rep.event_time[1] = (res[0] + spoiler) * t_adc\n", 83 | "\n", 84 | "\n", 85 | " # TSE readout\n", 86 | " for i in range(refocs):\n", 87 | " rep = seq.new_rep(2 + (res[0] + 1) * burst_len + 1)\n", 88 | " rep.pulse.angle = refoc_flip * torch.pi / 180\n", 89 | " if CPMG == True:\n", 90 | " rep.pulse.phase = (0.5 - (i % 2)) * torch.pi\n", 91 | " elif CPMG == \"wrong\":\n", 92 | " rep.pulse.phase = torch.pi / 2\n", 93 | " rep.event_time[0] = t_refoc_pulse\n", 94 | "\n", 95 | " rep.gradm[1, 0] = spoiler + res[0] // 2 - 1\n", 96 | " rep.gradm[1, 1] = -res[0] // 2 + i * burst_len\n", 97 | " rep.event_time[1] = rep.gradm[1, 0] * t_adc\n", 98 | "\n", 99 | " # Readout\n", 100 | " for j in range(burst_len):\n", 101 | " start = 2 + j * (res[0] + 1)\n", 102 | " stop = start + res[0]\n", 103 | "\n", 104 | " rep.gradm[start:stop, 0] = 1\n", 105 | " rep.adc_usage[start:stop] = 1\n", 106 | " rep.event_time[start:stop] = t_adc\n", 107 | "\n", 108 | " rep.gradm[stop, 0] = spoiler\n", 109 | " rep.gradm[stop, 1] = 1\n", 110 | " rep.event_time[stop] = spoiler * t_adc\n", 111 | "\n", 112 | " rep.gradm[-1, :] = -rep.gradm[:-1, :].sum(0)\n", 113 | " rep.event_time[-1] = rep.gradm[-1, 0] * t_adc\n", 114 | "\n", 115 | " return seq" 116 | ] 117 | }, 118 | { 119 | "cell_type": "code", 120 | "execution_count": null, 121 | "metadata": { 122 | "colab": { 123 | "base_uri": "https://localhost:8080/" 124 | }, 125 | "id": "wCrohnxlpZ2b", 126 | "outputId": "6f51e3ee-f3af-4e75-bcba-7f0f85ba5b23", 127 | "tags": [ 128 | "hide-output" 129 | ] 130 | }, 131 | "outputs": [], 132 | "source": [ 133 | "#@title Simulate\n", 134 | "# Use less states / isochromats here because free google Colab is slow.\n", 135 | "# This means that isochromats will not produce a usable image!\n", 136 | "\n", 137 | "spin_recos = {}\n", 138 | "pdg_recos = {}\n", 139 | "\n", 140 | "for i in range(3):\n", 141 | " cpmg = [False, \"wrong\", True][i]\n", 142 | " seq = build_seq(CPMG=cpmg)\n", 143 | "\n", 144 | " # Very high thresholds for inaccurate but fast doc build\n", 145 | " signal, _ = mr0.util.simulate(seq, data)\n", 146 | " pdg_recos[cpmg] = torch.fft.fftshift(torch.fft.fft2(signal.view(res)))\n", 147 | "\n", 148 | " # NOTE: Commented for documentation build out because slow\n", 149 | " # signal = mr0.isochromat_sim(seq, data, 100).cpu().flatten()\n", 150 | " # spin_recos[cpmg] = torch.fft.fftshift(torch.fft.fft2(signal.view(res)))\n", 151 | " spin_recos[cpmg] = torch.zeros_like(pdg_recos[cpmg])" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": null, 157 | "metadata": { 158 | "colab": { 159 | "base_uri": "https://localhost:8080/", 160 | "height": 499 161 | }, 162 | "id": "ZlACudf4p37-", 163 | "outputId": "7ab36d05-20af-4e20-f300-7b2593d2c6f1" 164 | }, 165 | "outputs": [], 166 | "source": [ 167 | "#@title Plot the figure\n", 168 | "text_args = {\"c\": \"w\", \"fontsize\": 14, \"ha\": \"center\", \"va\": \"center\", \"bbox\": {\"fill\": True}}\n", 169 | "\n", 170 | "plt.figure(figsize=(9, 6), dpi=100)\n", 171 | "for i in range(3):\n", 172 | " cpmg = [False, \"wrong\", True][i]\n", 173 | " name = [\"CPMG violated\", \"CPMG\", \"CPMG ($\\\\pm 180°$)\"][i]\n", 174 | "\n", 175 | " plt.subplot(231 + i)\n", 176 | " plt.text(32, 58, name, text_args)\n", 177 | " if i == 0:\n", 178 | " plt.text(5, 32, \"PDG\", text_args, rotation=\"vertical\")\n", 179 | " plt.imshow(pdg_recos[cpmg].abs(), origin=\"lower\", vmin=0, vmax=300)\n", 180 | " plt.axis(\"off\")\n", 181 | " plt.subplot(234 + i)\n", 182 | " if i == 0:\n", 183 | " plt.text(5, 32, \"Isochromats\", text_args, rotation=\"vertical\")\n", 184 | " plt.imshow(spin_recos[cpmg].abs(), origin=\"lower\", vmin=0, vmax=300)\n", 185 | " plt.axis(\"off\")\n", 186 | "plt.subplots_adjust(hspace=0.05, wspace=0)\n", 187 | "plt.show()" 188 | ] 189 | }, 190 | { 191 | "cell_type": "code", 192 | "execution_count": null, 193 | "metadata": { 194 | "colab": { 195 | "base_uri": "https://localhost:8080/", 196 | "height": 472 197 | }, 198 | "id": "sSyvc3LoQtgb", 199 | "outputId": "825b3c3f-9423-46a5-c4d4-db5676a24f74" 200 | }, 201 | "outputs": [], 202 | "source": [ 203 | "#@title Tau-view of the PDG\n", 204 | "\n", 205 | "logarithmic = True # @param {type: \"boolean\"}\n", 206 | "what = \"emitted signal\" #@param [\"emitted signal\", \"latent signal\", \"magnetization\"]\n", 207 | "sim_only = False # @param {type: \"boolean\"}\n", 208 | "eps = 1e-7\n", 209 | "\n", 210 | "values = [] # (rep, dephasing, weight or signal)\n", 211 | "\n", 212 | "for i, rep in enumerate(graph):\n", 213 | " for state in rep:\n", 214 | " if sim_only and state.kt_vec is None:\n", 215 | " continue\n", 216 | " if what == \"emitted signal\" and state.dist_type == '+':\n", 217 | " values.append((\n", 218 | " i,\n", 219 | " state.prepass_kt_vec[3],\n", 220 | " state.emitted_signal + eps,\n", 221 | " ))\n", 222 | " elif what == \"latent signal\":\n", 223 | " values.append((\n", 224 | " i,\n", 225 | " state.prepass_kt_vec[3],\n", 226 | " state.latent_signal + eps,\n", 227 | " ))\n", 228 | " elif what == \"magnetization\":\n", 229 | " values.append((\n", 230 | " i,\n", 231 | " state.prepass_kt_vec[3],\n", 232 | " np.abs(state.prepass_mag) + eps,\n", 233 | " ))\n", 234 | "\n", 235 | "values = sorted(values, key=lambda v: v[2])\n", 236 | "r = [v[0] for v in values]\n", 237 | "t = [v[1] for v in values]\n", 238 | "if logarithmic:\n", 239 | " v = [np.log10(v[2]) for v in values]\n", 240 | "else:\n", 241 | " v = [v[2] for v in values]\n", 242 | "\n", 243 | "plt.figure()\n", 244 | "if logarithmic:\n", 245 | " plt.scatter(r, t, c=v, s=20, zorder=10, vmin=np.log10(eps), vmax=0)\n", 246 | "else:\n", 247 | " plt.scatter(r, t, c=v, s=20, zorder=10, vmin=0, vmax=1)\n", 248 | "\n", 249 | "plt.grid()\n", 250 | "plt.colorbar()\n", 251 | "\n", 252 | "plt.xlabel(\"Repetition\")\n", 253 | "plt.ylabel(\"$\\\\tau$ dephasing [s]\")\n", 254 | "plt.show()" 255 | ] 256 | } 257 | ], 258 | "metadata": { 259 | "colab": { 260 | "provenance": [] 261 | }, 262 | "kernelspec": { 263 | "display_name": ".venv", 264 | "language": "python", 265 | "name": "python3" 266 | }, 267 | "language_info": { 268 | "codemirror_mode": { 269 | "name": "ipython", 270 | "version": 3 271 | }, 272 | "file_extension": ".py", 273 | "mimetype": "text/x-python", 274 | "name": "python", 275 | "nbconvert_exporter": "python", 276 | "pygments_lexer": "ipython3", 277 | "version": "3.11.3" 278 | } 279 | }, 280 | "nbformat": 4, 281 | "nbformat_minor": 0 282 | } 283 | -------------------------------------------------------------------------------- /documentation/playground_mr0/unsorted/numerical_brain_cropped.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/documentation/playground_mr0/unsorted/numerical_brain_cropped.mat -------------------------------------------------------------------------------- /documentation/requirements.txt: -------------------------------------------------------------------------------- 1 | jupyter-book 2 | sphinx-toolbox 3 | enum-tools[sphinx] 4 | MRzeroCore 5 | pulseqzero 6 | jupyterplot 7 | nevergrad 8 | scikit-image 9 | PyWavelets 10 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["maturin>=1.5,<2.0"] 3 | build-backend = "maturin" 4 | 5 | [project] 6 | name = "MRzeroCore" 7 | description = "Core functionality of MRzero" 8 | authors = [ 9 | {name = "Jonathan Endres", email = "jonathan.endres@uk-erlangen.de"}, 10 | ] 11 | readme = "README.md" 12 | classifiers = [ 13 | "Programming Language :: Rust", 14 | "Programming Language :: Python :: Implementation :: CPython", 15 | "Programming Language :: Python :: Implementation :: PyPy", 16 | "License :: OSI Approved :: GNU Affero General Public License v3", 17 | ] 18 | requires-python = ">=3.9" 19 | dependencies = [ 20 | "torch>=1.12", 21 | "pypulseq", 22 | "matplotlib>=3.5", 23 | "scipy>=1.7", 24 | "requests>=2.20", 25 | "scikit-image", 26 | "torchkbnufft", 27 | "pydisseqt>=0.1.13" 28 | ] 29 | 30 | [project.urls] 31 | Repository = "https://github.com/MRsources/MRzero-Core" 32 | Documentation = "https://mrzero-core.readthedocs.io/" 33 | MRzero-Paper = "https://arxiv.org/abs/2002.04265" 34 | 35 | [tool.maturin] 36 | profile = "release" 37 | strip = true 38 | module-name = "MRzeroCore._prepass" 39 | python-source = "python" 40 | -------------------------------------------------------------------------------- /python/MRzeroCore/__init__.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | if not hasattr(numpy, "int"): 3 | numpy.int = int 4 | if not hasattr(numpy, "float"): 5 | numpy.float = float 6 | if not hasattr(numpy, "complex"): 7 | numpy.complex = complex 8 | 9 | from .sequence import PulseUsage, Pulse, Repetition, Sequence, chain 10 | from .phantom.voxel_grid_phantom import VoxelGridPhantom 11 | from .phantom.custom_voxel_phantom import CustomVoxelPhantom 12 | from .phantom.sim_data import SimData 13 | from .phantom.brainweb import generate_brainweb_phantoms 14 | from .simulation.isochromat_sim import isochromat_sim 15 | from .simulation.pre_pass import compute_graph, compute_graph_ext, Graph 16 | from .simulation.main_pass import execute_graph 17 | from .reconstruction import reco_adjoint 18 | from .pulseq.exporter import pulseq_write_cartesian 19 | from . import util 20 | -------------------------------------------------------------------------------- /python/MRzeroCore/phantom/brainweb/__init__.py: -------------------------------------------------------------------------------- 1 | from typing import Literal 2 | import json 3 | import gzip 4 | import requests 5 | import os 6 | import numpy as np 7 | 8 | 9 | # Load the brainweb data file that contains info about tissues, subjects, ... 10 | brainweb_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 11 | "brainweb_data.json") 12 | brainweb_data = json.load(open(brainweb_data_path)) 13 | 14 | 15 | def load_tissue(subject: int, alias: str, cache_dir: str) -> np.ndarray: 16 | download_alias = f"subject{subject:02d}_{alias}" 17 | file_name = download_alias + ".i8.gz" # 8 bit signed int, gnuzip 18 | file_path = os.path.join(cache_dir, file_name) 19 | 20 | # Download and cache file if it doesn't exist yet 21 | if not os.path.exists(file_path): 22 | print(f"Downloading '{download_alias}'", end="", flush=True) 23 | response = requests.post( 24 | "https://brainweb.bic.mni.mcgill.ca/cgi/brainweb1", 25 | data={ 26 | "do_download_alias": download_alias, 27 | "format_value": "raw_byte", 28 | "zip_value": "gnuzip" 29 | } 30 | ) 31 | with open(file_path, "wb") as f: 32 | f.write(response.content) 33 | print(" - ", end="") 34 | 35 | # Load the raw BrainWeb data and add it to the return array 36 | with gzip.open(file_path) as f: 37 | print(f"Loading {os.path.basename(file_path)}", end="", flush=True) 38 | # BrainWeb says this data is unsigned, which is a lie 39 | tmp = np.frombuffer(f.read(), np.uint8) + 128 40 | 41 | # Vessel bugfix: most of background is 1 instead of zero 42 | if alias == "ves": 43 | tmp[tmp == 1] = 0 44 | data = tmp.reshape(362, 434, 362).swapaxes(0, 2).astype(np.float32) 45 | 46 | print(" - done") 47 | return data / 255.0 48 | 49 | 50 | def gen_noise(range: float, res: np.ndarray) -> np.ndarray: 51 | if range == 0: 52 | return 1 53 | else: 54 | freq = 20 55 | padded_res = (res + freq - 1) // freq * freq 56 | try: 57 | from perlin_numpy import generate_perlin_noise_3d 58 | noise = generate_perlin_noise_3d(padded_res, (freq, freq, freq)) 59 | except: 60 | print("perlin_numpy@git+https://github.com/pvigier/perlin-numpy") 61 | print("is not installed, falling back to numpy.random.random()") 62 | noise = np.random.random(padded_res) 63 | return 1 + range * noise[:res[0], :res[1], :res[2]] 64 | 65 | 66 | def downsample(array: np.ndarray, factor: int) -> np.ndarray: 67 | # crop array to multiple of factor 68 | shape = (np.array(array.shape) // factor) * factor 69 | array = array[:shape[0], :shape[1], :shape[2]] 70 | 71 | tmp = np.zeros(shape // factor) 72 | for x in range(factor): 73 | for y in range(factor): 74 | for z in range(factor): 75 | tmp += array[x::factor, y::factor, z::factor] 76 | 77 | return tmp / factor**3 78 | 79 | 80 | def generate_brainweb_phantoms( 81 | output_dir: str, 82 | config: Literal["3T", "7T-noise", "3T-highres-fat"] = "3T"): 83 | """Generate BrainWeb phantom maps for the selected configuration. 84 | 85 | Raw tissue segmentation data is provided by the BrainWeb Database: 86 | http://www.bic.mni.mcgill.ca/brainweb/ 87 | 88 | All tissue data etc. are stored in [brainweb_data.json](https://github.com/MRsources/MRzero-Core/blob/main/python/MRzeroCore/phantom/brainweb/brainweb_data.json). 89 | To ensure consistent configurations and reproducible results, available 90 | configs are stored in this file as well. They specify which field strength 91 | to use, which tissues to include, and the downsampling and noise levels. 92 | 93 | The emitted files are compressed numpy files, which can be loaded with 94 | ``np.load(file_name)``. They contain the following arrays: 95 | 96 | - `PD_map`: Proton Density [a.u.] 97 | - `T1_map`: T1 relaxation time [s] 98 | - `T2_map`: T2 relaxation time [s] 99 | - `T2dash_map`: T2' relaxation time [s] 100 | - `D_map`: Isotropic Diffusion coefficient [10^-3 mm² / s] 101 | - `tissue_XY`: Tissue segmentation for all included tissues 102 | 103 | Parameters 104 | ---------- 105 | output_dir: str 106 | The directory where the generated phantoms will be stored to. In 107 | addition, a `cache` folder will be generated there too, which contains 108 | all the data downloaded from BrainWeb to avoid repeating the download 109 | for all configurations or when generating phantoms again. 110 | config: ["3T", "7T-noise", "3T-highres-fat"] 111 | The configuration for which the maps are generated. 112 | """ 113 | config_data = brainweb_data["configs"][config] 114 | cache_dir = os.path.join(output_dir, "cache") 115 | 116 | try: 117 | os.makedirs(cache_dir) 118 | except FileExistsError: 119 | pass 120 | 121 | # Map resolution: 122 | res = np.array([362, 434, 362]) // config_data["downsample"] 123 | 124 | def noise() -> np.ndarray: 125 | return gen_noise(config_data["noise"], res) 126 | 127 | for subject in brainweb_data["subjects"]: 128 | print(f"Generating '{config}', subject {subject}") 129 | maps = { 130 | "FOV": np.array([0.181, 0.217, 0.181]), 131 | "PD_map": np.zeros(res, dtype=np.float32), 132 | "T1_map": np.zeros(res, dtype=np.float32), 133 | "T2_map": np.zeros(res, dtype=np.float32), 134 | "T2dash_map": np.zeros(res, dtype=np.float32), 135 | "D_map": np.zeros(res, dtype=np.float32), 136 | } 137 | 138 | for tissue in config_data["tissues"]: 139 | tissue_map = sum([ 140 | load_tissue(subject, alias, cache_dir) 141 | for alias in brainweb_data["download-aliases"][tissue] 142 | ]) 143 | tissue_map = downsample(tissue_map, config_data["downsample"]) 144 | maps["tissue_" + tissue] = tissue_map 145 | 146 | field_strength = config_data["field-strength"] 147 | tissue_data = brainweb_data["tissues"][field_strength][tissue] 148 | 149 | # Separate noise maps is slower but uncorrelated. 150 | # Might be better for training or worse - could be configurable 151 | print("Adding tissue to phantom", end="", flush=True) 152 | maps["PD_map"] += tissue_data["PD"] * tissue_map * noise() 153 | maps["T1_map"] += tissue_data["T1"] * tissue_map * noise() 154 | maps["T2_map"] += tissue_data["T2"] * tissue_map * noise() 155 | maps["T2dash_map"] += tissue_data["T2'"] * tissue_map * noise() 156 | maps["D_map"] += tissue_data["D"] * tissue_map * noise() 157 | print(" - done") 158 | 159 | file = os.path.join(output_dir, f"subject{subject:02d}_{config}.npz") 160 | print(f"Saving to '{os.path.basename(file)}'", end="", flush=True) 161 | np.savez_compressed(file, **maps) 162 | print(" - done\n") 163 | 164 | 165 | if __name__ == "__main__": 166 | print("This is for testing only, use generate_brainweb_phantoms directly!") 167 | file_dir = os.path.dirname(os.path.realpath(__file__)) 168 | output_dir = os.path.join(file_dir, "output") 169 | 170 | for config in brainweb_data["configs"].keys(): 171 | generate_brainweb_phantoms(output_dir, config) 172 | -------------------------------------------------------------------------------- /python/MRzeroCore/phantom/brainweb/brainweb_data.json: -------------------------------------------------------------------------------- 1 | { 2 | "configs": { 3 | "3T": { 4 | "field-strength": "3T", 5 | "tissues": ["gm", "wm", "csf"], 6 | "downsample": 3, 7 | "noise": 0 8 | }, 9 | "7T-noise": { 10 | "field-strength": "7T", 11 | "tissues": ["gm", "wm", "csf"], 12 | "downsample": 3, 13 | "noise": 0.2 14 | }, 15 | "3T-highres-fat": { 16 | "field-strength": "3T", 17 | "tissues": ["gm", "wm", "csf", "fat"], 18 | "downsample": 1, 19 | "noise": 0 20 | } 21 | }, 22 | "subjects": [ 23 | 4, 5, 6, 18, 20, 38, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54 24 | ], 25 | "download-aliases": { 26 | "gm": ["gry"], 27 | "wm": ["wht"], 28 | "csf": ["csf", "ves"], 29 | "fat": ["fat", "mus", "m-s", "dura", "fat2"] 30 | }, 31 | "tissues": { 32 | "3T": { 33 | "gm": { 34 | "PD": 0.8, 35 | "T1": 1.56, 36 | "T2": 0.083, 37 | "T2'": 0.32, 38 | "D": 0.83 39 | }, 40 | "wm": { 41 | "PD": 0.7, 42 | "T1": 0.83, 43 | "T2": 0.075, 44 | "T2'": 0.18, 45 | "D": 0.65 46 | }, 47 | "csf": { 48 | "PD": 1, 49 | "T1": 4.16, 50 | "T2": 1.65, 51 | "T2'": 0.059, 52 | "D": 3.19 53 | }, 54 | "fat": { 55 | "PD": 1, 56 | "T1": 0.37, 57 | "T2": 0.125, 58 | "T2'": 0.012, 59 | "D": 0.1 60 | } 61 | }, 62 | "7T": { 63 | "gm": { 64 | "PD": 0.8, 65 | "T1": 1.67, 66 | "T2": 0.043, 67 | "T2'": 0.82, 68 | "D": 0.83 69 | }, 70 | "wm": { 71 | "PD": 0.7, 72 | "T1": 1.22, 73 | "T2": 0.037, 74 | "T2'": 0.65, 75 | "D": 0.65 76 | }, 77 | "csf": { 78 | "PD": 1, 79 | "T1": 4.0, 80 | "T2": 0.8, 81 | "T2'": 0.204, 82 | "D": 3.19 83 | }, 84 | "fat": { 85 | "PD": 1, 86 | "T1": 0.374, 87 | "T2": 0.125, 88 | "T2'": 0.0117, 89 | "D": 0.1 90 | } 91 | } 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /python/MRzeroCore/phantom/brainweb/brainweb_data_sources.txt: -------------------------------------------------------------------------------- 1 | ! NOTE 2 | 7T maps are not checked as thrououghly. Only source so far: 3 | https://cds.ismrm.org/protected/14MProceedings/PDFfiles/3208.pdf 4 | 5 | 6 | # T1 and T2 times, taken from: 7 | # https://mri-q.com/uploads/3/4/5/7/34572113/normal_relaxation_times_at_3t.pdf 8 | # Value taken from paper with most participants (draw: closest to mean of all) 9 | # Studies that are outliers are ignored (WM T2 time) 10 | 11 | # T2' calculated from T2 and T2*, taken from: 12 | # https://www.sciencedirect.com/science/article/pii/S0730725X07001701?via%3Dihub 13 | 14 | # Water / Fat T2': https://link.springer.com/article/10.1007/s00723-015-0737-5 (4.7 T) 15 | 16 | # Errors are uncertenties of studies, not data on variation in one measurement 17 | 18 | Brainweb Tissues: 19 | - CSF 20 | T1: 4163 ± 263 21 | T2: 1650 (approx, taken from https://www.ncbi.nlm.nih.gov/pmc/articles/PMC5973950/) 22 | T2*: 57.1 -> T2': 59.1 23 | 24 | - Gray Matter 25 | T1: 1558 ± 88 26 | T2: 83 ± 4 27 | T2*: 66.0 ± 1.4 -> T2': 322 28 | 29 | - White Matter 30 | T1: 830 ± 0 31 | T2: 75 ± 3 32 | T2*: 53.2 ± 1.2 -> T2': 183 33 | T2': 56 ± 1 34 | 35 | - Fat 36 | T1: 374 ± 45 37 | T2: 125 38 | T2* = 10.7 -> T2': 11.7 39 | 40 | # The following values are not used for phantom generation 41 | 42 | - Muscle 43 | T1: 1100 ± 59 44 | T2: 40 45 | 46 | - Muscle/Skin 47 | 48 | - Skull 49 | Probably similar to Bone marrow, but they don't overlap in the images 50 | 51 | - Blood vessels 52 | Probably similar to CSF (mostly water) 53 | 54 | - Connective (region around fat) 55 | Overlaps nearly fully with Fat 56 | 57 | - Dura matter 58 | Very little volume, probably not noticable at lower resolutions 59 | 60 | - Bone marrow 61 | T1: 586 ± 73 62 | T2: 127 63 | 64 | 65 | Diffusion: 66 | Values taken from https://onlinelibrary.wiley.com/doi/10.1002/jmri.1076 67 | 10^-3 mm² / s 68 | CSF: 3.19 ± 0.10 69 | WM: 0.65 ± 0.03 70 | GM: 0.83 ± 0.05 71 | 72 | No great source for fat, https://onlinelibrary.wiley.com/doi/10.1002/mrm.24535 73 | says it barely diffuses 74 | FAT: ~0.1 75 | -------------------------------------------------------------------------------- /python/MRzeroCore/phantom/brainweb/output/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MRsources/MRzero-Core/f55f20d87dcec0f2c636ae354a0c9f5f8daf263c/python/MRzeroCore/phantom/brainweb/output/.gitkeep -------------------------------------------------------------------------------- /python/MRzeroCore/phantom/sim_data.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from typing import Callable, Any, Optional, Dict 3 | import torch 4 | from numpy import pi 5 | 6 | 7 | class SimData: 8 | """This class contains the physical data for simulating a MRI sequence. 9 | 10 | It is not intended to create this class directly, but rather to use one of 11 | the :class:`SimData` builders / loaders. Those are made fore specific 12 | tasks and can be converted into :class:`SimData`, but also attach 13 | metadata to the output so it can be converted back. The attributes of this 14 | class are nothing but the data needed for simulation, so it can describe 15 | a single voxel, randomly distributed voxels, a BrainWeb phantom, ... 16 | 17 | Attributes 18 | ---------- 19 | PD : torch.Tensor 20 | Per voxel proton density 21 | T1 : torch.Tensor 22 | Per voxel T1 relaxation time (seconds) 23 | T2 : torch.Tensor 24 | Per voxel T2 relaxation time (seconds) 25 | T2dash : torch.Tensor 26 | Per voxel T2' dephasing time (seconds) 27 | D: torch.Tensor 28 | Isometric diffusion coefficients [10^-3 mm^2/s] 29 | B0 : torch.Tensor 30 | Per voxel B0 inhomogentity (Hertz) 31 | B1 : torch.Tensor 32 | (coil_count, voxel_count) Per coil and per voxel B1 inhomogenity 33 | coil_sens : torch.Tensor 34 | (coil_count, voxel_count) Per coil sensitivity (arbitrary units) 35 | size : torch.Tensor 36 | Physical size of the phantom. If a sequence with normalized gradients 37 | is simulated, size is used to scale them to match the phantom. 38 | avg_B1_trig : torch.Tensor 39 | (361, 3) values containing the PD-weighted avg of sin/cos/sin²(B1*flip) 40 | voxel_pos : torch.Tensor 41 | (voxel_count, 3) Voxel positions. These can be anywhere, but for easy 42 | sequence programming they should be in the range [-0.5, 0.5[ 43 | nyquist : torch.Tensor 44 | (3, ) tensor: Maximum frequency encoded by the data 45 | dephasing_func : torch.Tensor -> torch.Tensor 46 | A function describing the intra-voxel dephasing. Maps a k-space 47 | trajectory (events, 3) to the measured attenuation (events). 48 | recover_func : SimData -> Any 49 | A function that can recover the original data that was used to create 50 | this instance. Usually a lambda that captures meta data like a mask. 51 | """ 52 | 53 | def __init__( 54 | self, 55 | PD: torch.Tensor, 56 | T1: torch.Tensor, 57 | T2: torch.Tensor, 58 | T2dash: torch.Tensor, 59 | D: torch.Tensor, 60 | B0: torch.Tensor, 61 | B1: torch.Tensor, 62 | coil_sens: torch.Tensor, 63 | size: torch.Tensor, 64 | voxel_pos: torch.Tensor, 65 | nyquist: torch.Tensor, 66 | dephasing_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor], 67 | recover_func: Callable[[SimData], Any] | None = None, 68 | phantom_motion=None, 69 | voxel_motion=None, 70 | tissue_masks: Optional[Dict[str,torch.Tensor]] = None, 71 | ) -> None: 72 | """Create a :class:`SimData` instance based on the given tensors. 73 | 74 | All parameters must be of shape ``(voxel_count, )``, only B1 and 75 | coil_sens have an additional first dimension for multiple coils. 76 | 77 | Parameters 78 | ---------- 79 | normalize : bool 80 | If true, applies B0 -= B0.mean(), B1 /= B1.mean(), PD /= PD.sum() 81 | """ 82 | if not (PD.shape == T1.shape == T2.shape == T2dash.shape == B0.shape): 83 | raise Exception("Mismatch of voxel-data shapes") 84 | if not PD.ndim == 1: 85 | raise Exception("Data must be 1D (flattened)") 86 | if B1.ndim < 2 or B1.shape[1] != PD.numel(): 87 | raise Exception("B1 must have shape [coils, voxel_count]") 88 | if coil_sens.ndim < 2 or coil_sens.shape[1] != PD.numel(): 89 | raise Exception("coil_sens must have shape [coils, voxel_count]") 90 | 91 | self.PD = PD.clamp(min=0) 92 | self.T1 = T1.clamp(min=1e-6) 93 | self.T2 = T2.clamp(min=1e-6) 94 | self.T2dash = T2dash.clamp(min=1e-6) 95 | self.D = D.clamp(min=1e-6) 96 | self.B0 = B0.clone() 97 | self.B1 = B1.clone() 98 | self.tissue_masks = tissue_masks 99 | if self.tissue_masks is None: 100 | self.tissue_masks = {} 101 | self.coil_sens = coil_sens.clone() 102 | self.size = size.clone() 103 | self.voxel_pos = voxel_pos.clone() 104 | self.avg_B1_trig = calc_avg_B1_trig(B1, PD) 105 | self.nyquist = nyquist.clone() 106 | self.dephasing_func = dephasing_func 107 | self.recover_func = recover_func 108 | 109 | self.phantom_motion = phantom_motion 110 | self.voxel_motion = voxel_motion 111 | 112 | def cuda(self) -> SimData: 113 | """Move the simulation data to the default CUDA device. 114 | 115 | The returned :class:`SimData` is equivalent to :attr:`self` if the data 116 | already was on the GPU. 117 | """ 118 | return SimData( 119 | self.PD.cuda(), 120 | self.T1.cuda(), 121 | self.T2.cuda(), 122 | self.T2dash.cuda(), 123 | self.D.cuda(), 124 | self.B0.cuda(), 125 | self.B1.cuda(), 126 | self.coil_sens.cuda(), 127 | self.size.cuda(), 128 | self.voxel_pos.cuda(), 129 | self.nyquist.cuda(), 130 | self.dephasing_func, 131 | self.recover_func, 132 | self.phantom_motion, 133 | self.voxel_motion, 134 | tissue_masks={ 135 | k: v.cuda() for k, v in self.tissue_masks.items() 136 | }, 137 | ) 138 | 139 | def cpu(self) -> SimData: 140 | """Move the simulation data to the CPU. 141 | 142 | The returned :class:`SimData` is equivalent to :attr:`self` if the data 143 | already was on the CPU. 144 | """ 145 | return SimData( 146 | self.PD.cpu(), 147 | self.T1.cpu(), 148 | self.T2.cpu(), 149 | self.T2dash.cpu(), 150 | self.D.cpu(), 151 | self.B0.cpu(), 152 | self.B1.cpu(), 153 | self.coil_sens.cpu(), 154 | self.size.cpu(), 155 | self.voxel_pos.cpu(), 156 | self.nyquist.cpu(), 157 | self.dephasing_func, 158 | self.recover_func, 159 | self.phantom_motion, 160 | self.voxel_motion, 161 | tissue_masks={ 162 | k: v.cpu() for k, v in self.tissue_masks.items() 163 | }, 164 | ) 165 | 166 | @property 167 | def device(self) -> torch.device: 168 | """The device (either CPU or a CUDA device) the data is stored on.""" 169 | return self.PD.device 170 | 171 | def recover(self) -> Any: 172 | """Recover the data that was used to build this instance.""" 173 | if self.recover_func is None: 174 | raise Exception("No recover function was provided") 175 | else: 176 | return self.recover_func(self) 177 | 178 | 179 | def calc_avg_B1_trig(B1: torch.Tensor, PD: torch.Tensor) -> torch.Tensor: 180 | """Return a (361, 3) tensor for B1 specific sin, cos and sin² values. 181 | 182 | This function calculates values for sin, cos and sin² for (0, 2pi) * B1 and 183 | then averages the results, weighted by PD. These 3 functions are the non 184 | linear parts of a rotation matrix, the resulting look up table can be used 185 | to calcualte averaged rotations for the whole phantom. This is useful for 186 | the pre-pass, to get better magnetization estmates even if the pre-pass is 187 | not spatially resolved. 188 | """ 189 | # With pTx, there are now potentially multiple B1 maps with phase. 190 | # NOTE: This is a (probably suboptimal) workaround 191 | B1 = B1.sum(0).abs() 192 | 193 | B1 = B1.flatten()[:, None] # voxels, 1 194 | PD = (PD.flatten() / PD.sum())[:, None] # voxels, 1 195 | angle = torch.linspace(0, 2*pi, 361, device=PD.device)[None, :] # 1, angle 196 | return torch.stack([ 197 | (torch.sin(B1 * angle) * PD).sum(0), 198 | (torch.cos(B1 * angle) * PD).sum(0), 199 | (torch.sin(B1 * angle/2)**2 * PD).sum(0) 200 | ], dim=1).type(torch.float32) 201 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/helpers.py: -------------------------------------------------------------------------------- 1 | import time 2 | import os 3 | import io 4 | import torch 5 | import numpy as np 6 | import base64 7 | from ..sequence import Sequence 8 | 9 | print("""---------------------------------------------------------- 10 | WARNING! 11 | Included helpers.py, which is not yet appropriately 12 | documented for MRzero-Core, neither are its dependencies 13 | included in the MRzero-Core requirements, 14 | and it is not exposed yet to the global mr0 import! 15 | ----------------------------------------------------------""") 16 | 17 | 18 | # TODO: This is specific to GRE-like sequences, make it more general! 19 | def get_signal_from_real_system(path, seq, NRep: float | None = None): 20 | if NRep is None: 21 | NRep = len(seq) 22 | NCol = torch.count_nonzero(seq[2].adc_usage).item() 23 | 24 | print('waiting for TWIX file from the scanner... ' + path) 25 | done_flag = False 26 | while not done_flag: 27 | if os.path.isfile(path): 28 | # read twix file 29 | print("TWIX file arrived. Reading....") 30 | 31 | ncoils = 20 32 | time.sleep(0.2) 33 | raw = np.loadtxt(path) 34 | 35 | heuristic_shift = 4 36 | print("raw size: {} ".format(raw.size) + "expected size: {} ".format( 37 | "raw size: {} ".format(NRep*ncoils*(NCol+heuristic_shift)*2))) 38 | 39 | if raw.size != NRep*ncoils*(NCol+heuristic_shift)*2: 40 | print( 41 | "get_signal_from_real_system: SERIOUS ERROR, TWIX dimensions corrupt, returning zero array..") 42 | raw = np.zeros((NRep, ncoils, NCol+heuristic_shift, 2)) 43 | raw = raw[:, :, :NCol, 0] + 1j*raw[:, :, :NCol, 1] 44 | else: 45 | raw = raw.reshape([NRep, ncoils, NCol+heuristic_shift, 2]) 46 | raw = raw[:, :, :NCol, 0] + 1j*raw[:, :, :NCol, 1] 47 | 48 | # raw = raw.transpose([1,2,0]) #ncoils,NRep,NCol 49 | raw = raw.transpose([0, 2, 1]) # NRep,NCol,NCoils 50 | raw = raw.reshape([NRep*NCol, ncoils]) 51 | raw = np.copy(raw) 52 | done_flag = True 53 | 54 | return torch.tensor(raw, dtype=torch.complex64) 55 | 56 | 57 | def write_data_to_seq_file(seq: Sequence, file_name: str): 58 | """Write all sequence data needed for reconstruction into a .seq file. 59 | 60 | The data is compressed, base64 encoded and inserted as a comment into the 61 | pulseq .seq file, which means it is ignored by all interpreters and only 62 | slightly increases the file size. 63 | 64 | Parameters 65 | ---------- 66 | seq : Sequence 67 | Should be the sequence that was used to produce the .seq file 68 | file_name : str 69 | The file name to append the data to, it is not checked if this 70 | actually is a pulseq .seq file. 71 | """ 72 | kspace = seq.get_kspace() 73 | adc_usage = torch.cat([rep.adc_usage[rep.adc_usage > 0] for rep in seq]) 74 | 75 | # Transpose for more efficient compression (contiguous components) 76 | kspace_enc = np.ascontiguousarray(kspace.T.cpu().numpy()) 77 | # Delta encoding (works very well for cartesian trajectories) 78 | kspace_enc[:, 1:] -= kspace_enc[:, :-1] 79 | # Reduce precision, don't need 32bit for a kspace 80 | kspace_enc = kspace_enc.astype(np.float16) 81 | 82 | # Compressing adc_usage 83 | assert -128 <= adc_usage.min() <= 127, "8 bit are not enough" 84 | adc_usage_enc = adc_usage.cpu().numpy().astype(np.int8) 85 | 86 | # Compress and encode with base64 to write as legal ASCII text 87 | buffer = io.BytesIO() 88 | np.savez_compressed(buffer, kspace=kspace_enc, adc_usage=adc_usage_enc) 89 | encoded = base64.b64encode(buffer.getvalue()).decode('ascii') 90 | 91 | # The pulseq Siemens interpreter has a bug in the comment code leading to 92 | # errors if comments are longer than MAX_LINE_WIDTH = 256. We split the 93 | # data into chunks of 250 bytes to be on the safe side. 94 | with open(file_name, "a") as file: 95 | for i in range(0, len(encoded), 250): 96 | file.write(f"\n# {encoded[i:i+250]}") 97 | file.write("\n") 98 | 99 | 100 | def extract_data_from_seq_file( 101 | file_name: str 102 | ) -> tuple[torch.Tensor, torch.Tensor]: 103 | """Extracts kspace and adc_usage written with ``write_data_to_seq_file``. 104 | 105 | Parameters 106 | ---------- 107 | file_name : str 108 | The name of the file the kspace was previously written to. 109 | 110 | Returns 111 | ------- 112 | The original kspace and the adc_usage. There might be a loss of precision 113 | because the kspace is written as 16 bit (half precision) floats and the 114 | usage as 8 bit integer (-128 to 127), this could be changed. 115 | """ 116 | try: 117 | with open(file_name, "r") as file: 118 | # Find the last n lines that start with a '#' 119 | lines = file.readlines() 120 | 121 | if lines[-1][-1:] != '\n': 122 | lines[-1] = lines[-1] + '\n' 123 | 124 | n = len(lines) 125 | while n > 0 and lines[n-1][0] == '#': 126 | n -= 1 127 | if n == len(lines): 128 | raise ValueError( 129 | "No data comment found at the end of the file") 130 | 131 | # Join the parts of the comment while removing "# " and "\n" 132 | encoded = "".join(line[2:-1] for line in lines[n:]) 133 | # print(encoded) 134 | decoded = base64.b64decode(encoded, validate=True) 135 | 136 | data = np.load(io.BytesIO(decoded)) 137 | kspace = np.cumsum(data["kspace"].astype(np.float32), 1).T 138 | adc_usage = data["adc_usage"].astype(np.int32) 139 | 140 | return torch.tensor(kspace), torch.tensor(adc_usage) 141 | except Exception as e: 142 | raise ValueError("Could not extract data from .seq") from e 143 | 144 | 145 | def load_measurement( 146 | seq_file: str, 147 | seq_dat_file: str, 148 | wait_for_dat: bool = False, 149 | twix: bool = False 150 | ) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]: 151 | """Loads the seq data from a .seq file and the signal from a .seq.dat file. 152 | 153 | This function waits for the .seq.dat file if it doesn't exist yet and 154 | ``wait_for_dat = True``. 155 | 156 | Parameters 157 | ---------- 158 | seq_file : str 159 | Name of the (path to the) .seq file 160 | seq_dat_file : str 161 | Name of the (path to the) .seq.dat file 162 | wait_for_dat : bool 163 | Specifies if this function should wait for the .seq.dat file or throw 164 | an error if it doesn't exist 165 | 166 | Returns 167 | ------- 168 | (Samples, 4) tensor containing the kspace stored in the .seq file and a 169 | (Samples, Coils) tensor containing the signal (for all coils) 170 | """ 171 | 172 | kspace, adc_usage = extract_data_from_seq_file(seq_file) 173 | 174 | if wait_for_dat: 175 | print("Waiting for TWIX file...", end="") 176 | while not os.path.isfile(seq_dat_file): 177 | time.sleep(0.2) 178 | print(" arrived!") 179 | 180 | if twix: 181 | # Clone https://github.com/pehses/twixtools 182 | import twixtools 183 | twix = twixtools.read_twix(seq_dat_file) 184 | image_mdbs = [mdb for mdb in twix[-1]['mdb'] if mdb.is_image_scan()] 185 | 186 | n_line = 1 + max([mdb.cLin for mdb in image_mdbs]) 187 | 188 | # assume that all data were acquired with same number of channels & columns: 189 | n_channel, n_column = image_mdbs[0].data.shape 190 | 191 | kspace_data = np.zeros( 192 | [n_line, n_channel, n_column], dtype=np.complex64) 193 | for mdb in image_mdbs: 194 | kspace_data[mdb.cLin] = mdb.data 195 | # For 32 Coils! 196 | signal = kspace_data.transpose(0, 2, 1).reshape(-1, 32) 197 | else: 198 | data = np.loadtxt(seq_dat_file) 199 | 200 | data = data[:, 0] + 1j*data[:, 1] 201 | 202 | # .dat files contain additional samples we need to remove. This is probably 203 | # a bug in the TWIX to text file converter. 204 | # 205 | # These additional samples might be at the and of every shot or ADC block, 206 | # in which case a possible solution would be to store the subdivision in 207 | # the .seq file. 208 | # 209 | # Or maybe we can just fix it when exporting .seq files :D 210 | # 211 | # For now, we detect the number of samples in a single ADC readout and 212 | # assume 20 coils. Might not work for irregular readouts. 213 | 214 | # We assume that there are no exact zeros in the actual signal 215 | adc_length = np.where(np.abs(data) == 0)[0][0] 216 | data = data.reshape([-1, 20, adc_length + 4]) 217 | 218 | # Remove additional samples and reshape into samples x coils 219 | signal = data.transpose([0, 2, 1])[:, :adc_length, :].reshape([-1, 20]) 220 | 221 | if kspace.shape[0] != signal.shape[0]: 222 | print( 223 | f"WARNING: the kspace contains {kspace.shape[0]} samples but the " 224 | f"loaded signal has {signal.shape[0]}. They are either not for the" 225 | " same measurement, or something went wrong loading the data." 226 | ) 227 | 228 | return kspace, adc_usage, torch.tensor(signal, dtype=torch.complex64) 229 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from .pulseq_file import PulseqFile # noqa 3 | from .pulse import Pulse 4 | from .spoiler import Spoiler 5 | from .adc import Adc 6 | 7 | 8 | # ----- PULSES ----- 9 | # Simulated pulses are instantaneous. Pulseq blocks containing pulse events are 10 | # split in the center of the pulse shape and converted into a gradient event 11 | # before the pulse and a pulse + gradient event afterwards. This should result 12 | # in a simulation that has no e.g. slice selection, but where the gradeint 13 | # moments seen by refocussed, excited and unaffected magnetisation are correct. 14 | # ----- ADC ----- 15 | # As stated in the specification, all samples are placed at (n + 0.5) * raster, 16 | # which means when there is a gradient and an adc sample at the same time, 17 | # the adc measurement sees only the first half of the gradient. TODO: check 18 | # pulseq exporters and the Siemens interpreter to see if they respect the spec 19 | # or if the adc samples should rather be placed at (n + 0 or 1) * raster. 20 | # ----- GRADIENTS ----- 21 | # Only TRAP gradients are currently supported, arbitrary gradients would 22 | # require to resample the gradient onto the ADC time grid. Note that the spec 23 | # does not specify what should happen in between gradient samples, but there 24 | # are two sensible options: 25 | # - Mimic the scanner, where interpolation is given by the electronics, but a 26 | # simple linear or bezier or similar interpolation should be a good choice 27 | # - Adhere to the implementation of the official pulseq exporter, which 28 | # probably assumes a piecewise constant gradient 29 | # NOTE: As long as we don't simulate diffusion, we don't care about the shape 30 | # of a gradient if it is not measured simultaneously. 31 | # ----- Additional ----- 32 | # Simultaneous RF and ADC events are not supported but probably don't exist in 33 | # pracitce anyways. 34 | 35 | 36 | def intermediate( 37 | file: PulseqFile 38 | ) -> list[tuple[int, Pulse, list[Spoiler | Adc]]]: 39 | seq = [] 40 | # Convert to intermediate representation 41 | for block in file.blocks.values(): 42 | assert block.rf_id == 0 or block.adc_id == 0 43 | 44 | if block.rf_id != 0: 45 | seq += Pulse.parse(block, file) 46 | elif block.adc_id != 0: 47 | seq += Adc.parse(block, file) 48 | else: 49 | seq.append(Spoiler.parse(block, file)) 50 | 51 | reps = [] 52 | # [event_count, pulse, list of events] 53 | current = [0, None, []] # Dummy for events before first pulse 54 | # Split into repetitions 55 | for block in seq: 56 | if isinstance(block, Pulse): 57 | current = [0, block, []] 58 | reps.append(current) 59 | else: 60 | if isinstance(block, Adc): 61 | current[0] += len(block.event_time) 62 | else: # Spoiler 63 | current[0] += 1 64 | current[2].append(block) 65 | 66 | return reps 67 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/adc.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import numpy as np 3 | from .pulseq_file import PulseqFile, Block 4 | from .helpers import integrate 5 | from .spoiler import Spoiler 6 | 7 | 8 | class Adc: 9 | def __init__(self, event_time: np.ndarray, gradm: np.ndarray, phase: float 10 | ) -> None: 11 | self.event_time = event_time 12 | self.gradm = gradm 13 | self.phase = phase 14 | 15 | @classmethod 16 | def parse(cls, block: Block, pulseq: PulseqFile) -> tuple[Adc, Spoiler]: 17 | adc = pulseq.adcs[block.adc_id] 18 | time = np.concatenate([ 19 | [0.0], 20 | adc.delay + np.arange(adc.num) * adc.dwell, 21 | [block.duration] 22 | ]) 23 | 24 | gradm = np.zeros((adc.num + 1, 3)) 25 | if block.gx_id != 0: 26 | grad = pulseq.grads[block.gx_id] 27 | gradm[:, 0] = np.diff([integrate(grad, pulseq, t) for t in time]) 28 | if block.gy_id != 0: 29 | grad = pulseq.grads[block.gy_id] 30 | gradm[:, 1] = np.diff([integrate(grad, pulseq, t) for t in time]) 31 | if block.gz_id != 0: 32 | grad = pulseq.grads[block.gz_id] 33 | gradm[:, 2] = np.diff([integrate(grad, pulseq, t) for t in time]) 34 | 35 | event_time = np.diff(time) 36 | 37 | fov = pulseq.definitions.fov 38 | gradm[:, 0] *= fov[0] 39 | gradm[:, 1] *= fov[1] 40 | gradm[:, 2] *= fov[2] 41 | 42 | return ( 43 | cls(event_time[:-1], gradm[:-1, :], adc.phase), 44 | Spoiler(event_time[-1], gradm[-1, :]) 45 | ) 46 | 47 | def __repr__(self) -> str: 48 | return f"ADC(event_time={self.event_time}, gradm={self.gradm})" 49 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import numpy as np 3 | from .pulseq_file import PulseqFile, Gradient, Trap 4 | 5 | 6 | def split_gradm(grad: Gradient | Trap, pulseq: PulseqFile, t: float 7 | ) -> tuple[float, float]: 8 | before = integrate(grad, pulseq, t) 9 | total = integrate(grad, pulseq, float("inf")) 10 | return (before, total - before) 11 | 12 | 13 | def integrate(grad: Gradient | Trap, pulseq: PulseqFile, t: float) -> float: 14 | if isinstance(grad, Trap): 15 | t -= grad.delay 16 | total = grad.rise/2 + grad.flat + grad.fall/2 17 | 18 | if t <= 0.0: 19 | integral = 0.0 20 | elif t < grad.rise: 21 | integral = 0.5*t**2 / grad.rise 22 | elif t < grad.rise + grad.flat: 23 | t -= grad.rise 24 | integral = grad.rise/2 + t 25 | elif t < grad.rise + grad.flat + grad.fall: 26 | t = grad.rise + grad.flat + grad.fall - t 27 | integral = total - 0.5*t**2 / grad.fall 28 | else: 29 | integral = total 30 | return grad.amp * integral 31 | else: 32 | assert isinstance(grad, Gradient) 33 | # How many gradient samples are there before t? 34 | # We assume that adc aligns with the gradient raster, otherwise we 35 | # would have to integrate over an interpolatet version of the gradient 36 | shape = pulseq.shapes[grad.shape_id] 37 | raster_time = pulseq.definitions.grad_raster_time 38 | t -= grad.delay 39 | 40 | if grad.time_id != 0: 41 | time = pulseq.shapes[grad.time_id] * raster_time 42 | else: 43 | time = np.arange(len(shape)) * raster_time 44 | event_time = np.concatenate([np.diff(time), [raster_time]]) 45 | t = min(t, time[-1]) 46 | 47 | # Cut off all events after t 48 | mask = time < t 49 | time = time[mask] 50 | event_time = event_time[mask] 51 | shape = shape[mask] 52 | 53 | if len(time) == 0: 54 | return 0.0 55 | 56 | # Sum over all samples that end before t 57 | integral = np.sum(shape[:-1] * event_time[:-1]) 58 | # Add the last sample that might only be partially before t 59 | integral += shape[-1] * (t - time[-1]) 60 | 61 | return grad.amp * integral 62 | 63 | 64 | def total_gradm(grad: Gradient | Trap, pulseq: PulseqFile) -> float: 65 | if isinstance(grad, Trap): 66 | return grad.amp * (grad.rise/2 + grad.flat + grad.fall/2) 67 | else: 68 | assert isinstance(grad, Gradient) 69 | shape = pulseq.shapes[grad.shape_id] 70 | raster_time = pulseq.definitions.grad_raster_time 71 | if grad.time_id != 0: 72 | event_time = np.concatenate([np.diff(shape), [1]]) * raster_time 73 | else: 74 | event_time = np.full((len(shape), ), raster_time) 75 | return np.sum(grad.amp * shape * event_time) 76 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulse.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import numpy as np 3 | from .pulseq_file import PulseqFile, Block 4 | from .helpers import split_gradm 5 | from .spoiler import Spoiler 6 | 7 | 8 | class Pulse: 9 | def __init__( 10 | self, 11 | angle: float, 12 | phase: float, 13 | shim_array: np.ndarray 14 | ) -> None: 15 | self.angle = angle 16 | self.phase = phase 17 | self.shim_array = shim_array 18 | 19 | @classmethod 20 | def parse( 21 | cls, block: Block, pulseq: PulseqFile 22 | ) -> tuple[Spoiler, Pulse, Spoiler]: 23 | rf = pulseq.rfs[block.rf_id] 24 | raster_time = pulseq.definitions.rf_raster_time 25 | 26 | if rf.time_id != 0: 27 | time = pulseq.shapes[rf.time_id] 28 | else: 29 | time = np.arange(len(pulseq.shapes[rf.mag_id])) 30 | event_time = np.concatenate([time[1:] - time[:-1], [1]]) * raster_time 31 | 32 | mag = 2*np.pi * rf.amp * pulseq.shapes[rf.mag_id] 33 | phase = pulseq.shapes[rf.phase_id] 34 | pulse = mag * event_time * np.exp(2j*np.pi * phase) 35 | 36 | # Pulses with complex phases are not supported 37 | assert np.sum(np.abs(pulse.imag)) < np.pi/180 # Trigger at 1° 38 | pulse = pulse.real 39 | 40 | angle = np.sum(pulse) 41 | phase = rf.phase 42 | 43 | center = np.argmax(np.cumsum(pulse) > angle / 2) 44 | t = float(rf.delay + center * raster_time) 45 | 46 | gradm = np.zeros((2, 3)) 47 | if block.gx_id != 0: 48 | gradm[:, 0] = split_gradm(pulseq.grads[block.gx_id], pulseq, t) 49 | if block.gy_id != 0: 50 | gradm[:, 1] = split_gradm(pulseq.grads[block.gy_id], pulseq, t) 51 | if block.gz_id != 0: 52 | gradm[:, 2] = split_gradm(pulseq.grads[block.gz_id], pulseq, t) 53 | 54 | fov = pulseq.definitions.fov 55 | gradm[:, 0] *= fov[0] 56 | gradm[:, 1] *= fov[1] 57 | gradm[:, 2] *= fov[2] 58 | 59 | # If there is pTx, replace angle and phase with per-channel dat 60 | if rf.shim_mag_id != 0: 61 | assert rf.shim_phase_id != 0 62 | shim_array = np.stack([ 63 | pulseq.shapes[rf.shim_mag_id], 64 | pulseq.shapes[rf.shim_phase_id] 65 | ], 1) 66 | else: 67 | shim_array = np.ones((1, 2)) 68 | 69 | return ( 70 | Spoiler(t, gradm[0, :]), 71 | cls(angle, phase, shim_array), 72 | Spoiler(block.duration - t, gradm[1, :]) 73 | ) 74 | 75 | def __repr__(self) -> str: 76 | return ( 77 | f"Pulse(angle={self.angle*180/np.pi:.1f}°, " 78 | f"phase={self.phase*180/np.pi:.1f}°, " 79 | f"shim_array={self.shim_array})" 80 | ) 81 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/__init__.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from . import helpers 3 | from .definitons import Definitions 4 | from .block import parse_blocks, write_blocks, Block 5 | from .rf import parse_rfs, write_rfs, Rf # noqa 6 | from .trap import parse_traps, write_traps, Trap 7 | from .gradient import parse_gradients, write_grads, Gradient 8 | from .adc import parse_adcs, write_adcs, Adc # noqa 9 | 10 | import matplotlib.pyplot as plt 11 | import numpy as np 12 | 13 | # Supports version 1.2.0 to 1.4.0, python representation is modeled after 1.4.0 with pTx 14 | # Supports Martin Freudensprung's pTx extension as pulseq version 1.3.9 and 1.4.5 15 | 16 | 17 | class PulseqFile: 18 | def __init__(self, file_name: str) -> None: 19 | sections = helpers.file_to_sections(file_name) 20 | 21 | assert "VERSION" in sections 22 | self.version = helpers.parse_version(sections.pop("VERSION")) 23 | assert 120 <= self.version <= 149 24 | 25 | # mandatory sections 26 | assert "BLOCKS" in sections 27 | assert self.version < 140 or "DEFINITIONS" in sections 28 | assert not (self.version >= 140 and "DELAYS" in sections) 29 | 30 | if "DEFINITIONS" in sections: 31 | self.definitions = Definitions.parse( 32 | sections.pop("DEFINITIONS"), self.version) 33 | else: 34 | self.definitions = Definitions({}, self.version) 35 | 36 | # Parse [RF], [GRADIENTS], [TRAP], [ADC], [SHAPES] 37 | # They are dicts of (ID, event) so return an empty dict if not present 38 | def maybe_parse(sec_name, parser): 39 | if sec_name not in sections: 40 | return {} 41 | else: 42 | return parser(sections.pop(sec_name), self.version) 43 | 44 | self.rfs = maybe_parse("RF", parse_rfs) 45 | self.grads = helpers.merge_dicts( 46 | maybe_parse("GRADIENTS", parse_gradients), 47 | maybe_parse("TRAP", parse_traps), 48 | ) 49 | self.adcs = maybe_parse("ADC", parse_adcs) 50 | self.shapes = maybe_parse("SHAPES", helpers.parse_shapes) 51 | 52 | # Finally parse the blocks, some additional logic is needed to convert 53 | # 1.3.x sequences with delay events into the 1.4.0 format 54 | if self.version >= 140: 55 | self.blocks = parse_blocks( 56 | sections.pop("BLOCKS"), self.version, 57 | None, self.definitions.block_raster_time 58 | ) 59 | else: 60 | delays = maybe_parse("DELAYS", helpers.parse_delays) 61 | self.blocks = parse_blocks( 62 | sections.pop("BLOCKS"), self.version, 63 | delays, None 64 | ) 65 | 66 | # Inform if there are sections that were not parsed 67 | if len(sections) > 0: 68 | print(f"Some sections were ignored: {list(sections.keys())}") 69 | 70 | # Calculate block durations for 1.3.x sequences 71 | def calc_duration(block: Block) -> float: 72 | durs = [block.duration] # delay event for 1.3.x 73 | 74 | if block.adc_id != 0: 75 | durs.append(self.adcs[block.adc_id].get_duration()) 76 | 77 | if block.rf_id != 0: 78 | durs.append(self.rfs[block.rf_id].get_duration( 79 | self.definitions.rf_raster_time, self.shapes 80 | )) 81 | 82 | grads = [ 83 | self.grads.get(block.gx_id, None), 84 | self.grads.get(block.gy_id, None), 85 | self.grads.get(block.gz_id, None) 86 | ] 87 | 88 | for grad in grads: 89 | if isinstance(grad, Gradient): 90 | durs.append(grad.get_duration( 91 | self.definitions.grad_raster_time, self.shapes 92 | )) 93 | if isinstance(grad, Trap): 94 | durs.append(grad.get_duration()) 95 | 96 | return max(durs) 97 | 98 | for block in self.blocks.keys(): 99 | # We could check if 1.4.0 has set correct durations 100 | self.blocks[block].duration = calc_duration(self.blocks[block]) 101 | 102 | def save(self, file_name: str): 103 | with open(file_name, "w") as out: 104 | out.write( 105 | "# Pulseq sequence definition file\n" 106 | "# Re-Exported by the MRzero pulseq interpreter\n" 107 | ) 108 | helpers.write_version(out, 140) 109 | self.definitions.write(out) 110 | write_blocks(out, self.blocks, self.definitions.block_raster_time) 111 | write_rfs(out, self.rfs) 112 | write_traps( 113 | out, 114 | {k: v for k, v in self.grads.items() if isinstance(v, Trap)} 115 | ) 116 | write_grads( 117 | out, 118 | {k: v for k, v in self.grads.items() 119 | if isinstance(v, Gradient)} 120 | ) 121 | write_adcs(out, self.adcs) 122 | helpers.write_shapes(out, self.shapes) 123 | 124 | def __repr__(self) -> str: 125 | return ( 126 | f"PulseqFile(version={self.version}, " 127 | f"definitions={self.definitions}, " 128 | f"blocks={self.blocks}, " 129 | f"rfs={self.rfs}, " 130 | f"adcs={self.adcs}, " 131 | f"grads={self.grads}, " 132 | f"shapes={self.shapes})" 133 | ) 134 | 135 | def plot(self, figsize: tuple[float, float] | None = None): 136 | # Convert the sequence into a plottable format 137 | rf_plot = [] 138 | adc_plot = [] 139 | gx_plot = [] 140 | gy_plot = [] 141 | gz_plot = [] 142 | t0 = [0.0] 143 | 144 | for block in self.blocks.values(): 145 | if block.rf_id != 0: 146 | rf_plot.append(get_rf(self.rfs[block.rf_id], self, t0[-1])) 147 | if block.adc_id != 0: 148 | adc_plot.append(get_adc(self.adcs[block.adc_id], self, t0[-1])) 149 | if block.gx_id != 0: 150 | gx_plot.append(get_grad(self.grads[block.gx_id], self, t0[-1])) 151 | if block.gy_id != 0: 152 | gy_plot.append(get_grad(self.grads[block.gy_id], self, t0[-1])) 153 | if block.gz_id != 0: 154 | gz_plot.append(get_grad(self.grads[block.gz_id], self, t0[-1])) 155 | t0.append(t0[-1] + block.duration) 156 | 157 | # Plot the aquired data 158 | plt.figure(figsize=figsize) 159 | 160 | ax1 = plt.subplot(311) 161 | plt.title("RF") 162 | for rf in rf_plot: 163 | ax1.plot(rf[0], rf[1].real, c="tab:blue") 164 | ax1.plot(rf[0], rf[1].imag, c="tab:orange") 165 | plt.grid() 166 | plt.ylabel("Hz") 167 | 168 | ax2 = plt.subplot(312, sharex=ax1) 169 | plt.title("ADC") 170 | for adc in adc_plot: 171 | ax2.plot(adc[0], adc[1], '.') 172 | for t in t0: 173 | plt.axvline(t, c="#0004") 174 | plt.grid() 175 | plt.ylabel("rad") 176 | 177 | ax3 = plt.subplot(313, sharex=ax1) 178 | plt.title("Gradients") 179 | for grad in gx_plot: 180 | ax3.plot(grad[0], grad[1], c="tab:blue") 181 | for grad in gy_plot: 182 | ax3.plot(grad[0], grad[1], c="tab:orange") 183 | for grad in gz_plot: 184 | ax3.plot(grad[0], grad[1], c="tab:green") 185 | plt.grid() 186 | plt.xlabel("time [s]") 187 | plt.ylabel("Hz/m") 188 | 189 | plt.setp(ax1.get_xticklabels(), visible=False) 190 | plt.setp(ax2.get_xticklabels(), visible=False) 191 | plt.show() 192 | 193 | 194 | # Helper functions for plotting 195 | 196 | def get_rf(rf: Rf, seq: PulseqFile, t0: float 197 | ) -> tuple[np.ndarray, np.ndarray]: 198 | if rf.time_id != 0: 199 | time = seq.shapes[rf.time_id] 200 | else: 201 | time = np.arange(len(seq.shapes[rf.mag_id])) 202 | 203 | time = t0 + rf.delay + (time + 0.5) * seq.definitions.rf_raster_time 204 | mag = rf.amp * seq.shapes[rf.mag_id] 205 | phase = rf.phase + 2*np.pi * seq.shapes[rf.phase_id] 206 | 207 | return time, mag * np.exp(1j * phase) 208 | 209 | 210 | def get_adc(adc: Adc, seq: PulseqFile, t0: float 211 | ) -> tuple[np.ndarray, np.ndarray]: 212 | time = t0 + adc.delay + (np.arange(adc.num) + 0.5) * adc.dwell 213 | return time, adc.phase * np.ones(adc.num) 214 | 215 | 216 | def get_grad(grad: Gradient | Trap, seq: PulseqFile, t0: float 217 | ) -> tuple[np.ndarray, np.ndarray]: 218 | if isinstance(grad, Gradient): 219 | if grad.time_id != 0: 220 | time = seq.shapes[grad.time_id] 221 | else: 222 | time = np.arange(len(seq.shapes[grad.shape_id])) 223 | time = grad.delay + (time + 0.5) * seq.definitions.grad_raster_time 224 | shape = grad.amp * seq.shapes[grad.shape_id] 225 | else: 226 | assert isinstance(grad, Trap) 227 | time = grad.delay + np.array([ 228 | 0.0, 229 | grad.rise, 230 | grad.rise + grad.flat, 231 | grad.rise + grad.flat + grad.fall 232 | ]) 233 | shape = np.array([0, grad.amp, grad.amp, 0]) 234 | 235 | return t0 + time, shape 236 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/adc.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class Adc: 5 | def __init__( 6 | self, 7 | num: int, 8 | dwell: float, # s (spec: ns) 9 | delay: float, # s (spec: us) 10 | freq: float, # Hz 11 | phase: float, # rad 12 | ) -> None: 13 | self.num = num 14 | self.dwell = dwell 15 | self.delay = delay 16 | self.freq = freq 17 | self.phase = phase 18 | 19 | @classmethod 20 | def parse(cls, line: str, version: int) -> tuple[int, Adc]: 21 | assert 120 <= version <= 149 22 | vals = line.split() 23 | assert len(vals) == 6 24 | 25 | adc_id = int(vals[0]) 26 | return adc_id, cls( 27 | int(vals[1]), 28 | float(vals[2]) * 1e-9, 29 | int(vals[3]) * 1e-6, 30 | float(vals[4]), 31 | float(vals[5]), 32 | ) 33 | 34 | def write(self, file, adc_id: int): 35 | file.write( 36 | f"{adc_id:4d} {self.num:7d} {round(self.dwell*1e9):7d} " 37 | f"{round(self.delay*1e6):7d} {self.freq:.6f} {self.phase:.6f}\n" 38 | ) 39 | 40 | def get_duration(self) -> float: 41 | return self.num * self.dwell + self.delay 42 | 43 | def __repr__(self) -> str: 44 | return ( 45 | f"ADC(num={self.num}, " 46 | f"dwell={self.dwell}, " 47 | f"delay={self.delay}, " 48 | f"freq={self.freq}, " 49 | f"phase={self.phase})" 50 | ) 51 | 52 | 53 | def parse_adcs(lines: list[str], version: int) -> dict[int, Adc]: 54 | tmp = {} 55 | for line in lines: 56 | key, value = Adc.parse(line, version) 57 | assert key > 0 and key not in tmp 58 | tmp[key] = value 59 | return tmp 60 | 61 | 62 | def write_adcs(file, adcs: dict[int, Adc]): 63 | file.write( 64 | "\n[ADC]\n" 65 | "# ID num dwell delay freq phase\n" 66 | ) 67 | for adc_id, adc in adcs.items(): 68 | adc.write(file, adc_id) 69 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/block.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class Block: 5 | def __init__( 6 | self, 7 | duration: float, # s (spec: 1.4.0: BlockRaster units, 1.3.x: computed) 8 | rf_id: int, 9 | gx_id: int, 10 | gy_id: int, 11 | gz_id: int, 12 | adc_id: int, 13 | ext_id: int, 14 | ) -> None: 15 | self.duration = duration 16 | self.rf_id = rf_id 17 | self.gx_id = gx_id 18 | self.gy_id = gy_id 19 | self.gz_id = gz_id 20 | self.adc_id = adc_id 21 | self.ext_id = ext_id 22 | 23 | @classmethod 24 | def parse(cls, line: str, version: int, 25 | delays: dict[int, float] | None, # 1.3.x to comp duration 26 | block_duration_raster: float | None # 1.4.0 to comp duration 27 | ) -> tuple[int, Block]: 28 | """ 29 | If this function is used for 1.3.x, duration is only respects the delay 30 | event (if there is any). Calculate duration after finishing parsing 31 | (set to duration of longest event) 32 | """ 33 | assert ( 34 | (120 <= version < 140 and delays is not None) or 35 | (version >= 140 and block_duration_raster is not None) 36 | ) 37 | 38 | vals = line.split() 39 | assert len(vals) == 7 if version < 130 else 8 40 | 41 | if version >= 140: 42 | assert block_duration_raster is not None 43 | duration = int(vals[1]) * block_duration_raster 44 | else: 45 | assert delays is not None 46 | duration = delays.get(int(vals[1]), 0.0) 47 | 48 | block_id = int(vals[0]) 49 | return block_id, cls( 50 | duration, 51 | int(vals[2]), 52 | int(vals[3]), 53 | int(vals[4]), 54 | int(vals[5]), 55 | int(vals[6]), 56 | 0 if version < 130 else int(vals[7]), 57 | ) 58 | 59 | def write(self, file, block_id: int, block_raster_time: float): 60 | file.write( 61 | f"{block_id:4d} {round(self.duration / block_raster_time):7d} " 62 | f"{self.rf_id:4d} {self.gx_id:4d} {self.gy_id:4d} " 63 | f"{self.gz_id:4d} {self.adc_id:4d} {self.ext_id:4d}\n" 64 | ) 65 | 66 | def __repr__(self) -> str: 67 | return ( 68 | f"Block(duration={self.duration}, " 69 | f"rf_id={self.rf_id}, " 70 | f"gx_id={self.gx_id}, " 71 | f"gy_id={self.gy_id}, " 72 | f"gz_id={self.gz_id}, " 73 | f"adc_id={self.adc_id}, " 74 | f"ext_id={self.ext_id})" 75 | ) 76 | 77 | 78 | def parse_blocks( 79 | lines: list[str], 80 | version: int, 81 | delays: dict[int, float] | None, 82 | block_duration_raster: float | None 83 | ) -> dict[int, Block]: 84 | tmp = {} 85 | for line in lines: 86 | key, value = Block.parse(line, version, delays, block_duration_raster) 87 | assert key > 0 and key not in tmp 88 | tmp[key] = value 89 | return tmp 90 | 91 | 92 | def write_blocks(file, blocks: dict[int, Block], block_raster_time: float): 93 | file.write( 94 | "\n[BLOCKS]\n" 95 | "# ID DUR RF GX GY GZ ADC EXT\n" 96 | ) 97 | for block_id, block in blocks.items(): 98 | block.write(file, block_id, block_raster_time) 99 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/definitons.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class Definitions: 5 | def __init__(self, defs: dict, version: int) -> None: 6 | # Use Siemens defaults if nothing is provided by the .seq file 7 | self.grad_raster_time = float(defs.pop("GradientRasterTime", 10e-6)) 8 | self.rf_raster_time = float(defs.pop("RadiofrequencyRasterTime", 1e-6)) 9 | self.adc_raster_time = float(defs.pop("AdcRasterTime", 0.1e-6)) 10 | self.block_raster_time = float(defs.pop("BlockDurationRaster", 10e-6)) 11 | 12 | if "FOV" in defs: 13 | fov_str = defs.pop("FOV").split() 14 | fov = (float(fov_str[0]), float(fov_str[1]), float(fov_str[2])) 15 | # The pulseq spec says nothing about FOV units before 1.4 and 16 | # mandates [mm] since 1.4. In reality, you can use arbitrary units 17 | # when building sequences, so we assume [m] for values < 1 and [mm] 18 | # for larger values. Should be safe bc. FOVs > 1m are unrealistic. 19 | if fov[0] > 1 or fov[1] > 1 or fov[2] > 1: 20 | assert version < 140, "Version 1.4 mandates FOV to be in [m]" 21 | fov = (fov[0] / 1000, fov[1] / 1000, fov[2] / 1000) 22 | self.fov = fov 23 | else: 24 | self.fov = (0.2, 0.2, 0.2) 25 | 26 | self.defs = defs 27 | 28 | @classmethod 29 | def parse(cls, lines: list[str], version: int): 30 | assert 120 <= version <= 149 31 | defs = {} 32 | 33 | for line in lines: 34 | item = line.split(maxsplit=1) 35 | assert len(item) == 2 # No support for defs without a value 36 | defs[item[0]] = item[1] 37 | 38 | if version >= 140: 39 | # Required in 1.4.0, could also use defaults if this violation of 40 | # the spec is common (but could give suprises if defaults change) 41 | assert "GradientRasterTime" in defs 42 | assert "RadiofrequencyRasterTime" in defs 43 | assert "AdcRasterTime" in defs 44 | assert "BlockDurationRaster" in defs 45 | 46 | return cls(defs, version) 47 | 48 | def write(self, file): 49 | file.write( 50 | f"\n[DEFINITIONS]\n" 51 | f"FOV {self.fov[0]} {self.fov[1]} {self.fov[2]}\n" 52 | f"GradientRasterTime {self.grad_raster_time}\n" 53 | f"RadiofrequencyRasterTime {self.rf_raster_time}\n" 54 | f"AdcRasterTime {self.adc_raster_time}\n" 55 | f"BlockDurationRaster {self.block_raster_time}\n" 56 | ) 57 | for key, value in self.defs.items(): 58 | file.write(f"{key} {value}\n") 59 | 60 | def __repr__(self) -> str: 61 | return ( 62 | f"Definitions(fov={self.fov}, " 63 | f"grad_raster_time={self.grad_raster_time}, " 64 | f"rf_raster_time={self.rf_raster_time}, " 65 | f"adc_raster_time={self.adc_raster_time}, " 66 | f"block_raster_time={self.block_raster_time}, " 67 | f"defs={self.defs})" 68 | ) 69 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/gradient.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from numpy import ndarray 3 | 4 | 5 | class Gradient: 6 | def __init__( 7 | self, 8 | amp: float, # Hz / m 9 | shape_id: int, 10 | time_id: int, 11 | delay: float # s (spec: us) 12 | ) -> None: 13 | self.amp = amp 14 | self.shape_id = shape_id 15 | self.time_id = time_id 16 | self.delay = delay 17 | 18 | @classmethod 19 | def parse(cls, line: str, version: int) -> tuple[int, Gradient]: 20 | assert 120 <= version <= 149 21 | vals = line.split() 22 | 23 | gradient_id = int(vals.pop(0)) 24 | gradient = cls( 25 | float(vals.pop(0)), 26 | int(vals.pop(0)), 27 | int(vals.pop(0)) if version >= 140 else 0, # default raster 28 | int(vals.pop(0)) * 1e-6, 29 | ) 30 | assert len(vals) == 0 31 | return gradient_id, gradient 32 | 33 | def write(self, file, grad_id: int): 34 | file.write( 35 | f"{grad_id:4d} {self.amp:12g} {self.shape_id:4d} " 36 | f"{self.time_id:4d} {round(self.delay*1e6):7d}\n" 37 | ) 38 | 39 | def get_duration(self, grad_raster_t: float, shapes: dict[int, ndarray]): 40 | if self.time_id != 0: 41 | last_sample = shapes[self.time_id][-1] 42 | else: 43 | last_sample = len(shapes[self.shape_id]) 44 | return self.delay + last_sample * grad_raster_t 45 | 46 | def __repr__(self) -> str: 47 | return ( 48 | f"Gradient(amp={self.amp}, " 49 | f"shape_id={self.shape_id}, " 50 | f"time_id={self.time_id}, " 51 | f"delay={self.delay})" 52 | ) 53 | 54 | 55 | def parse_gradients(lines: list[str], version: int) -> dict[int, Gradient]: 56 | tmp = {} 57 | for line in lines: 58 | key, value = Gradient.parse(line, version) 59 | assert key > 0 and key not in tmp 60 | tmp[key] = value 61 | return tmp 62 | 63 | 64 | def write_grads(file, grads: dict[int, Gradient]): 65 | file.write( 66 | "\n[GRADIENTS]\n" 67 | "# ID amp mag time delay\n" 68 | ) 69 | for grad_id, trap in grads.items(): 70 | trap.write(file, grad_id) 71 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/helpers.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import numpy as np 3 | 4 | 5 | def merge_dicts(a: dict, b: dict): 6 | common = set(a.keys()).intersection(b.keys()) 7 | if len(common) > 0: 8 | raise Exception(f"Can't merge dicts with common keys: {common}") 9 | 10 | return {**a, **b} 11 | 12 | 13 | def file_to_sections(file_name: str) -> dict[str, list[str]]: 14 | sections = {} 15 | current_section: None | list = None 16 | 17 | def new_section(name): 18 | nonlocal current_section 19 | assert not ("[" in name or "]" in name) # section name filtering error 20 | 21 | if name not in sections: 22 | sections[name] = [] 23 | current_section = sections[name] 24 | 25 | for line in open(file_name): 26 | line = line.strip() 27 | if len(line) > 0 and line[0] != "#": 28 | if line[0] == "[": 29 | new_section(line[1:-1]) 30 | else: 31 | assert current_section is not None 32 | current_section.append(line) 33 | 34 | return sections 35 | 36 | 37 | def parse_version(lines: list[str]) -> int: 38 | assert len(lines) == 3 39 | major = None 40 | minor = None 41 | revision = None 42 | 43 | # This strips suffixes like "1.3.1post1" 44 | def to_int(s: str) -> int: 45 | s = s.split()[1] 46 | pos = 0 47 | while pos < len(s) and s[pos].isdigit(): 48 | pos += 1 49 | return int(s[:pos]) 50 | 51 | for line in lines: 52 | if line.startswith("major"): 53 | major = to_int(line) 54 | elif line.startswith("minor"): 55 | minor = to_int(line) 56 | elif line.startswith("revision"): 57 | revision = to_int(line) 58 | 59 | # We checked that there are 3 lines so each has to be set exactly once 60 | assert major is not None and minor is not None and revision is not None 61 | # Version is compressed to a 3 digit int 62 | assert 0 <= major <= 9 and 0 <= minor <= 9 and 0 <= revision <= 9 63 | return major * 100 + minor * 10 + revision 64 | 65 | 66 | def write_version(file, version: int): 67 | file.write( 68 | f"\n[VERSION]\n" 69 | f"major {version // 100}\n" 70 | f"minor {version // 10 % 10}\n" 71 | f"revision {version % 10}\n" 72 | ) 73 | 74 | 75 | def parse_delays(lines: list[str], version: int) -> dict[int, float]: 76 | assert 120 <= version <= 149 77 | delays = {} 78 | 79 | for line in lines: 80 | vals = line.split() 81 | assert len(vals) == 2 82 | delay_id = int(vals[0]) 83 | duration = int(vals[1]) * 1e-6 84 | 85 | assert delay_id > 0 and delay_id not in delays 86 | delays[delay_id] = duration 87 | 88 | return delays 89 | 90 | 91 | def parse_shape(lines: list[str], version: int) -> tuple[int, np.ndarray]: 92 | assert 120 <= version <= 149 93 | assert len(lines) >= 3 # at least id, num and one sample 94 | 95 | shape_id = int(lines[0].split()[1]) 96 | count = int(lines[1].split()[1]) 97 | compressed = [float(line) for line in lines[2:]] 98 | 99 | # Uncompressed shapes are introduced with 1.4.0 but also used 100 | # for pTx pulses by Martin's pTx extension 101 | if len(compressed) == count and version >= 140: 102 | # No compression 103 | return shape_id, np.array(compressed) 104 | 105 | i = 0 106 | deriviate = [] 107 | while i < len(compressed): 108 | if i < len(compressed) - 2 and compressed[i] == compressed[i+1]: 109 | RLE_count = compressed[i + 2] # +2 for the samples marking RLE 110 | assert RLE_count == int(RLE_count) and RLE_count >= 0 111 | deriviate += [compressed[i]] * (int(RLE_count) + 2) 112 | i += 3 113 | else: 114 | deriviate.append(compressed[i]) 115 | i += 1 116 | 117 | assert len(deriviate) == count, ( 118 | f"Decompressed shape has len: {len(deriviate)}, expected: {count}" 119 | ) 120 | 121 | return shape_id, np.array(deriviate).cumsum() 122 | 123 | 124 | def parse_shapes(lines: list[str], version: int) -> dict[int, np.ndarray]: 125 | def is_new_shape(line: str) -> bool: 126 | return line.split()[0].lower() == "shape_id" 127 | 128 | shape_lines = [] 129 | shapes = {} 130 | 131 | def new_shape(lines): 132 | key, value = parse_shape(lines, version) 133 | assert key > 0 and key not in shapes 134 | shapes[key] = value 135 | 136 | # Read in lines and parse old shape as soon as new one begins 137 | for line in lines: 138 | if is_new_shape(line) and len(shape_lines) > 0: 139 | new_shape(shape_lines) 140 | shape_lines = [] 141 | 142 | shape_lines.append(line) 143 | 144 | if len(shape_lines) > 0: 145 | new_shape(shape_lines) 146 | 147 | return shapes 148 | 149 | 150 | def write_shapes(file, shapes: dict[int, np.ndarray]): 151 | # TODO: compression 152 | file.write("\n[SHAPES]\n") 153 | for shape_id, shape in shapes.items(): 154 | file.write(f"\nShape_ID {shape_id}\nNum_Uncompressed {len(shape)}\n") 155 | for sample in shape: 156 | file.write(f"{sample}\n") 157 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/rf.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from numpy import ndarray 3 | 4 | 5 | class Rf: 6 | def __init__( 7 | self, 8 | amp: float, 9 | mag_id: int, 10 | phase_id: int, 11 | time_id: int, 12 | delay: float, # s (spec: us) 13 | freq: float, 14 | phase: float, 15 | shim_mag_id: int, 16 | shim_phase_id: int, 17 | ) -> None: 18 | self.amp = amp 19 | self.mag_id = mag_id 20 | self.phase_id = phase_id 21 | self.time_id = time_id 22 | self.delay = delay 23 | self.freq = freq 24 | self.phase = phase 25 | self.shim_mag_id = shim_mag_id 26 | self.shim_phase_id = shim_phase_id 27 | 28 | @classmethod 29 | def parse(cls, line: str, version: int) -> tuple[int, Rf]: 30 | assert 120 <= version <= 149 31 | 32 | vals = line.split() 33 | rf_id = int(vals.pop(0)) 34 | rf = cls( 35 | float(vals.pop(0)), 36 | int(vals.pop(0)), 37 | int(vals.pop(0)), 38 | 0 if version < 140 else int(vals.pop(0)), 39 | int(vals.pop(0)) * 1e-6, 40 | float(vals.pop(0)), 41 | float(vals.pop(0)), 42 | int(vals.pop(0)) if version in [139, 145] else 0, 43 | int(vals.pop(0)) if version in [139, 145] else 0, 44 | ) 45 | assert len(vals) == 0 46 | return rf_id, rf 47 | 48 | def write(self, file, rf_id: int): 49 | file.write( 50 | f"{rf_id:4d} {self.amp:12g} {self.mag_id:4d} " 51 | f"{self.phase_id:4d} {self.time_id:4d} " 52 | f"{round(self.delay*1e6):7d} {self.freq:.6f} {self.phase:.6f}\n" 53 | ) 54 | 55 | def get_duration(self, rf_raster_time: float, shapes: dict[int, ndarray]): 56 | if self.time_id != 0: 57 | last_sample = shapes[self.time_id][-1] 58 | else: 59 | last_sample = len(shapes[self.mag_id]) 60 | return self.delay + last_sample * rf_raster_time 61 | 62 | def __repr__(self) -> str: 63 | return ( 64 | f"RF(amp={self.amp}, " 65 | f"mag_id={self.mag_id}, " 66 | f"phase_id={self.phase_id}, " 67 | f"time_id={self.time_id}, " 68 | f"delay={self.delay}, " 69 | f"freq={self.freq}, " 70 | f"phase={self.phase})" 71 | f"mag_id={self.shim_mag_id}, " 72 | f"phase_id={self.shim_phase_id}" 73 | ) 74 | 75 | 76 | def parse_rfs(lines: list[str], version: int) -> dict[int, Rf]: 77 | tmp = {} 78 | for line in lines: 79 | key, value = Rf.parse(line, version) 80 | assert key > 0 and key not in tmp 81 | tmp[key] = value 82 | return tmp 83 | 84 | 85 | def write_rfs(file, rfs: dict[int, Rf]): 86 | file.write( 87 | "\n[RF]\n" 88 | "# ID amp mag angl time delay freq phase\n" 89 | ) 90 | for rf_id, rf in rfs.items(): 91 | rf.write(file, rf_id) 92 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/pulseq_file/trap.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | 3 | 4 | class Trap: 5 | def __init__( 6 | self, 7 | amp: float, # Hz / m 8 | rise: float, # s (spec: us) 9 | flat: float, # s (spec: us) 10 | fall: float, # s (spec: us) 11 | delay: float, # s (spec: us) 12 | ) -> None: 13 | self.amp = amp 14 | self.rise = rise 15 | self.flat = flat 16 | self.fall = fall 17 | self.delay = delay 18 | 19 | @classmethod 20 | def parse(cls, line: str, version: int) -> tuple[int, Trap]: 21 | assert 120 <= version <= 149 22 | vals = line.split() 23 | assert len(vals) == 6 24 | grad_id = int(vals[0]) 25 | 26 | return grad_id, cls( 27 | float(vals[1]), 28 | int(vals[2]) * 1e-6, 29 | int(vals[3]) * 1e-6, 30 | int(vals[4]) * 1e-6, 31 | int(vals[5]) * 1e-6, 32 | ) 33 | 34 | def write(self, file, grad_id: int): 35 | file.write( 36 | f"{grad_id:4d} {self.amp:12g} {round(self.rise*1e6):7d} " 37 | f"{round(self.flat*1e6):7d} {round(self.fall*1e6):7d} " 38 | f"{round(self.delay*1e6):7d}\n" 39 | ) 40 | 41 | def get_duration(self): 42 | return self.delay + self.rise + self.flat + self.fall 43 | 44 | def __repr__(self) -> str: 45 | return ( 46 | f"TRAP(amp={self.amp}, " 47 | f"rise={self.rise}, " 48 | f"flat={self.flat}, " 49 | f"fall={self.fall}, " 50 | f"delay={self.delay})" 51 | ) 52 | 53 | 54 | def parse_traps(lines: list[str], version: int) -> dict[int, Trap]: 55 | tmp = {} 56 | for line in lines: 57 | key, value = Trap.parse(line, version) 58 | assert key > 0 and key not in tmp 59 | tmp[key] = value 60 | return tmp 61 | 62 | 63 | def write_traps(file, traps: dict[int, Trap]): 64 | file.write( 65 | "\n[TRAP]\n" 66 | "# ID amp rise flat fall delay\n" 67 | ) 68 | for grad_id, trap in traps.items(): 69 | trap.write(file, grad_id) 70 | -------------------------------------------------------------------------------- /python/MRzeroCore/pulseq/pulseq_loader/spoiler.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | import numpy as np 3 | from .pulseq_file import PulseqFile, Block 4 | from .helpers import total_gradm 5 | 6 | 7 | class Spoiler: 8 | def __init__( 9 | self, 10 | duration: float, 11 | gradm: np.ndarray 12 | ) -> None: 13 | self.duration = duration 14 | self.gradm = gradm 15 | 16 | @classmethod 17 | def parse(cls, block: Block, pulseq: PulseqFile) -> Spoiler: 18 | fov = pulseq.definitions.fov 19 | gradm = np.zeros(3) 20 | if block.gx_id != 0: 21 | gradm[0] = total_gradm(pulseq.grads[block.gx_id], pulseq) * fov[0] 22 | if block.gy_id != 0: 23 | gradm[1] = total_gradm(pulseq.grads[block.gy_id], pulseq) * fov[1] 24 | if block.gz_id != 0: 25 | gradm[2] = total_gradm(pulseq.grads[block.gz_id], pulseq) * fov[2] 26 | 27 | return cls(block.duration, gradm) 28 | 29 | def __repr__(self) -> str: 30 | return ( 31 | f"Spoiler(duration={self.duration*1e3:.1f}ms, " 32 | f"gradm={self.gradm})" 33 | ) 34 | -------------------------------------------------------------------------------- /python/MRzeroCore/reconstruction.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from numpy import pi 3 | import torch 4 | 5 | 6 | def reco_adjoint(signal: torch.Tensor, 7 | kspace: torch.Tensor, 8 | resolution: tuple[int, int, int] | float | None = None, 9 | FOV: tuple[float, float, float] | float | None = None, 10 | return_multicoil: bool = False, 11 | ) -> torch.Tensor: 12 | """Adjoint reconstruction of the signal, based on a provided kspace. 13 | 14 | Parameters 15 | ---------- 16 | signal : torch.Tensor 17 | A complex tensor containing the signal, 18 | shape (sample_count, coil_count) 19 | kspace : torch.Tensor 20 | A real tensor of shape (sample_count, 4) for the kspace trajectory 21 | resolution : (int, int, int) | float | None 22 | The resolution of the reconstruction. Can be either provided directly 23 | as tuple or set to None, in which case the resolution will be derived 24 | from the k-space (currently only for cartesian trajectories). A single 25 | float value will be used as factor for a derived resolution. 26 | FOV : (float, float, float) | float | None 27 | Because the adjoint reconstruction adapts to the k-space used 28 | for measurement, scaling gradients will not directly change the FOV of 29 | the reconstruction. All SimData phantoms have a normalized size of 30 | (1, 1, 1). Similar to the resolution, a value of None will 31 | automatically derive the FOV of the sequence based on the kspace. A 32 | float value can be used to scale this derived FOV. 33 | return_multicoil : bool 34 | Specifies if coils should be combined or returned separately. 35 | 36 | Returns 37 | ------- 38 | torch.Tensor 39 | A complex tensor with the reconstructed image, the shape is given by 40 | the resolution. 41 | """ 42 | res_scale = 1.0 43 | fov_scale = 1.0 44 | if isinstance(resolution, float): 45 | res_scale = resolution 46 | resolution = None 47 | if isinstance(FOV, float): 48 | fov_scale = FOV 49 | FOV = None 50 | 51 | # Atomatic detection of FOV - NOTE: only works for cartesian k-spaces 52 | # we assume that there is a sample at 0, 0 nad calculate the FOV 53 | # based on the distance on the nearest samples in x, y and z direction 54 | if FOV is None: 55 | def fov(t: torch.Tensor) -> float: 56 | t = t[t > 1e-3] 57 | return 1.0 if t.numel() == 0 else float(t.min()) 58 | tmp = kspace[:, :3].abs() 59 | fov_x = fov_scale / fov(tmp[:, 0]) 60 | fov_y = fov_scale / fov(tmp[:, 1]) 61 | fov_z = fov_scale / fov(tmp[:, 2]) 62 | FOV = (fov_x, fov_y, fov_z) 63 | print(f"Detected FOV: {FOV}") 64 | 65 | # Atomatic detection of resolution 66 | if resolution is None: 67 | def res(scale: float, fov: float, t: torch.Tensor) -> int: 68 | tmp = (scale * (fov * (t.max() - t.min()) + 1)).round() 69 | return max(int(tmp), 1) 70 | res_x = res(res_scale, FOV[0], kspace[:, 0]) 71 | res_y = res(res_scale, FOV[1], kspace[:, 1]) 72 | res_z = res(res_scale, FOV[2], kspace[:, 2]) 73 | resolution = (res_x, res_y, res_z) 74 | print(f"Detected resolution: {resolution}") 75 | 76 | # Same grid as defined in SimData 77 | pos_x, pos_y, pos_z = torch.meshgrid( 78 | FOV[0] * torch.fft.fftshift(torch.fft.fftfreq(resolution[0], device=kspace.device)), 79 | FOV[1] * torch.fft.fftshift(torch.fft.fftfreq(resolution[1], device=kspace.device)), 80 | FOV[2] * torch.fft.fftshift(torch.fft.fftfreq(resolution[2], device=kspace.device)), 81 | ) 82 | 83 | voxel_pos = torch.stack([ 84 | pos_x.flatten(), 85 | pos_y.flatten(), 86 | pos_z.flatten() 87 | ], dim=1).t() 88 | 89 | NCoils = signal.shape[1] 90 | # assert NCoils == 1, "reconstruct currently does not support multicoil" 91 | 92 | # (Samples, 3) x (3, Voxels) 93 | phase = kspace[:, :3] @ voxel_pos 94 | # (Samples, Voxels): Rotation of all voxels at every event 95 | rot = torch.exp(-2j*pi * phase) # Matches definition of forward DFT 96 | 97 | NCoils = signal.shape[1] 98 | 99 | if return_multicoil: 100 | return (signal.t() @ rot).view((NCoils, *resolution)) 101 | elif NCoils == 1: 102 | return (signal.t() @ rot).view(resolution) 103 | else: 104 | return torch.sqrt(((torch.abs(signal.t() @ rot))**2).sum(0)).view(resolution) 105 | -------------------------------------------------------------------------------- /python/MRzeroCore/simulation/pre_pass.py: -------------------------------------------------------------------------------- 1 | from __future__ import annotations 2 | from warnings import warn 3 | import torch 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | from ..sequence import Sequence 7 | from ..phantom.sim_data import SimData 8 | from MRzeroCore import _prepass 9 | 10 | 11 | def compute_graph( 12 | seq: Sequence, 13 | data: SimData, 14 | max_state_count: int = 200, 15 | min_state_mag: float = 1e-4 16 | ) -> Graph: 17 | """Like :func:`pre_pass.compute_graph_ext`, but computes some args from :attr:`data`.""" 18 | return compute_graph_ext( 19 | seq, 20 | float(torch.mean(data.T1)), 21 | float(torch.mean(data.T2)), 22 | float(torch.mean(data.T2dash)), 23 | float(torch.mean(data.D)), 24 | max_state_count, 25 | min_state_mag, 26 | data.nyquist.tolist(), 27 | data.size.tolist(), 28 | data.avg_B1_trig 29 | ) 30 | 31 | 32 | def compute_graph_ext( 33 | seq: Sequence, 34 | T1: float, 35 | T2: float, 36 | T2dash: float, 37 | D: float, 38 | max_state_count: int = 200, 39 | min_state_mag: float = 1e-4, 40 | nyquist: tuple[float, float, float] = (float('inf'), float('inf'), float('inf')), 41 | size: tuple[float, float, float] = (1.0, 1.0, 1.0), 42 | avg_b1_trig: torch.Tensor | None = None, 43 | ) -> Graph: 44 | """Compute the PDG from the sequence and phantom data provided. 45 | 46 | Parameters 47 | ---------- 48 | seq : Sequence 49 | The sequence that produces the returned PDG 50 | T1 : float 51 | Simulated T1 relaxation time [s] 52 | T2 : float 53 | Simulated T2 relaxation time [s] 54 | T2' : float 55 | Simulated T2' relaxation time [s] 56 | D : float 57 | Simulated diffusion coefficient [$10^{-3} mm^2 / s$] 58 | max_state_count : int 59 | Maximum state count. If more states are produced, the weakest are omitted. 60 | min_state_mag : float 61 | Minimum magnetization of a state to be simulated. 62 | nyquist : (float, float, float) 63 | Nyquist frequency of simulated data. Signal is cut off for higher frequencies. 64 | size : (float, float, float) 65 | Size of the simulated phantom. Used for scaling grads for normalized seqs. 66 | avg_b1_trig : torch.Tensor | None 67 | Tensor containing the B1-averaged trigonometry used in the rotation matrix. 68 | Default values are used if `None` is passed. 69 | """ 70 | if min_state_mag < 0: 71 | min_state_mag = 0 72 | 73 | if avg_b1_trig is None: 74 | angle = torch.linspace(0, 2*np.pi, 361) 75 | avg_b1_trig = torch.stack([ 76 | torch.sin(angle), 77 | torch.cos(angle), 78 | torch.sin(angle/2)**2 79 | ], dim=1).type(torch.float32) 80 | 81 | if any(rep.pulse.angle > 2*np.pi for rep in seq): 82 | warn("Some flip angles are > 360°, inhomogeneities produced by extra rotations are ignored by the pre-pass B1 estimation") 83 | 84 | return Graph(_prepass.compute_graph( 85 | seq, 86 | T1, T2, T2dash, D, 87 | max_state_count, min_state_mag, 88 | nyquist, size, seq.normalized_grads, 89 | avg_b1_trig 90 | )) 91 | 92 | 93 | class Graph(list): 94 | """:class:`Graph` is a wrapper around the list of states returned by the prepass.""" 95 | def __init__(self, graph: list[list[_prepass.PyDistribution]]) -> None: 96 | super().__init__(graph) 97 | 98 | def plot(self, 99 | transversal_mag: bool = True, 100 | dephasing: str = "tau", 101 | color: str = "latent signal", 102 | log_color: bool = True): 103 | """Visualize the graph. 104 | 105 | Parameters 106 | ---------- 107 | transversal_mag : bool 108 | If true, show only + states, otherwise z(0) 109 | dephasing : str 110 | Use one of ``['k_x', 'k_y', 'k_z', 'tau']`` dephasing as the 111 | y-position of a state in the scatter plot 112 | color : str 113 | Use one of ``['abs(mag)', 'phase(mag)', 'latent signal', 'signal', 114 | 'latent signal unormalized', 'emitted signal']`` 115 | as the color of a state in the scatter plot 116 | log_color : bool 117 | If true, use the logarithm of the chosen property for coloring 118 | """ 119 | data = [] 120 | kt_idx = {"k_x": 0, "k_y": 1, "k_z": 2, "tau": 3}[dephasing] 121 | 122 | def extract(state: _prepass.PyDistribution): 123 | if color == "abs(mag)": 124 | value = np.abs(state.prepass_mag) 125 | elif color == "phase(mag)": 126 | value = np.angle(state.prepass_mag) 127 | elif color == "latent signal": 128 | value = state.latent_signal 129 | elif color == "signal": 130 | value = state.signal 131 | elif color == "latent signal unormalized": 132 | value = state.latent_signal_unormalized 133 | elif color == "emitted signal": 134 | value = state.emitted_signal 135 | else: 136 | raise AttributeError(f"Unknown property color={color}") 137 | if log_color: 138 | value = np.log10(np.abs(value) + 1e-7) 139 | return value 140 | 141 | for r, rep in enumerate(self): 142 | for state in rep: 143 | if transversal_mag == (state.dist_type == "+"): 144 | data.append(( 145 | r, 146 | state.prepass_kt_vec[kt_idx], 147 | extract(state), 148 | )) 149 | 150 | data.sort(key=lambda d: d[2]) 151 | data = np.asarray(data) 152 | 153 | plt.scatter(data[:, 0], data[:, 1], c=data[:, 2], s=20) 154 | plt.xlabel("Repetition") 155 | plt.ylabel(f"${dephasing}$ - Dephasing") 156 | if log_color: 157 | plt.colorbar(label="log. " + color) 158 | else: 159 | plt.colorbar(label=color) 160 | --------------------------------------------------------------------------------