├── .devcontainer └── devcontainer.json ├── .gitattributes ├── .github └── workflows │ └── docker-image.yml ├── .gitignore ├── .vscode ├── launch.json └── tasks.json ├── LICENSE ├── RunClientServerRecon.ipynb ├── analyzeflow.py ├── bartfire.py ├── client.py ├── connection.py ├── constants.py ├── custom ├── custom.dockerfile ├── filter.json └── filter.py ├── dicom2mrd.py ├── doc ├── MRD Streaming Format.md ├── devcontainers.md └── docker.md ├── docker ├── Dockerfile ├── alpine │ └── Dockerfile ├── bart │ └── Dockerfile ├── docker_tar_to_chroot.sh ├── docker_to_chroot.bat ├── docker_to_chroot.sh └── pytorch │ ├── Dockerfile │ ├── Dockerfile_standalone │ └── requirements.txt ├── environment.yml ├── environment_windows.yml ├── generate_cartesian_shepp_logan_dataset.py ├── invertcontrast.json ├── invertcontrast.py ├── main.py ├── mrd2dicom.py ├── mrd2gif.py ├── mrdhelper.py ├── readme.md ├── report.py ├── server.py ├── simplefft.py ├── start-fire-python-server-with-data-storage.sh ├── start-fire-python-server.sh └── sync-code-and-start-fire-python-server.sh /.devcontainer/devcontainer.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "python-ismrmrd-server-devcon", 3 | 4 | "build": { 5 | "dockerfile": "../docker/Dockerfile", 6 | "target": "python-mrd-devcontainer", 7 | "context": "../", 8 | "cacheFrom": [ 9 | "docker.io/kspacekelvin/fire-python-devcon", 10 | "docker.io/kspacekelvin/fire-python:buildcache"] 11 | }, 12 | 13 | // To enable your local GPUs in container if they are on enabled by default 14 | // "runArgs": [ "--gpus=all" ], 15 | 16 | // Use 'forwardPorts' to make a list of ports inside the container available locally. 17 | "forwardPorts": [ 18 | 9002 19 | ], 20 | 21 | // Default setting for Windows changes files to CRLF, so match this to avoid all files 22 | // being marked as changed. Could be disabled for non-Windows hosts 23 | "postCreateCommand": "git config --global core.autocrlf true && pip install ipykernel", 24 | 25 | "customizations": { 26 | "vscode": { 27 | // Add the IDs of extensions you want installed when the container is created. 28 | "extensions": [ 29 | "ms-python.debugpy", 30 | "ms-python.python", 31 | "ms-python.vscode-pylance", 32 | "ms-toolsai.jupyter" 33 | ], 34 | 35 | "settings": { 36 | "terminal.integrated.defaultProfile.linux": "bash" 37 | } 38 | } 39 | } 40 | } -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | *.h5 filter=lfs diff=lfs merge=lfs -text 2 | -------------------------------------------------------------------------------- /.github/workflows/docker-image.yml: -------------------------------------------------------------------------------- 1 | name: ci 2 | 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | - 'feature/*' 9 | tags: 10 | - 'v*.*.*' 11 | 12 | jobs: 13 | build_devcontainer: 14 | runs-on: ubuntu-latest 15 | steps: 16 | - 17 | name: Checkout code 18 | uses: actions/checkout@v4 19 | - 20 | name: Set up QEMU 21 | uses: docker/setup-qemu-action@v3 22 | - 23 | name: Set up Docker Buildx 24 | uses: docker/setup-buildx-action@v3 25 | - 26 | name: Login to Docker Hub 27 | uses: docker/login-action@v3 28 | with: 29 | username: ${{ secrets.DOCKERHUB_USERNAME }} 30 | password: ${{ secrets.DOCKERHUB_TOKEN }} 31 | - 32 | name: Pre-build dev container image 33 | uses: devcontainers/ci@v0.3 34 | with: 35 | imageName: docker.io/kspacekelvin/fire-python-devcon 36 | cacheFrom: docker.io/kspacekelvin/fire-python-devcon 37 | push: always 38 | docker: 39 | runs-on: ubuntu-latest 40 | steps: 41 | - 42 | name: Set up QEMU 43 | uses: docker/setup-qemu-action@v3 44 | - 45 | name: Set up Docker Buildx 46 | uses: docker/setup-buildx-action@v3 47 | - 48 | name: Login to Docker Hub 49 | uses: docker/login-action@v3 50 | with: 51 | username: ${{ secrets.DOCKERHUB_USERNAME }} 52 | password: ${{ secrets.DOCKERHUB_TOKEN }} 53 | - 54 | name: Make GitHub branch name compatible with Docker tags 55 | id: extract_branch 56 | run: | 57 | FULL_BRANCH=${GITHUB_REF#refs/heads/} 58 | SAFE_BRANCH=${FULL_BRANCH//\//-} 59 | echo "SAFE_BRANCH=${SAFE_BRANCH}" >> $GITHUB_ENV 60 | - 61 | name: Build and push 62 | uses: docker/build-push-action@v6 63 | with: 64 | file: docker/Dockerfile 65 | cache-from: type=registry,ref=kspacekelvin/fire-python:buildcache 66 | cache-to: type=registry,ref=kspacekelvin/fire-python:buildcache,mode=max 67 | push: true 68 | tags: | 69 | kspacekelvin/fire-python:latest 70 | kspacekelvin/fire-python:${{ env.SAFE_BRANCH }} -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *~ 2 | *.pyc 3 | *.log 4 | .idea 5 | .virtualenv 6 | *.tar 7 | *.zip 8 | data/ -------------------------------------------------------------------------------- /.vscode/launch.json: -------------------------------------------------------------------------------- 1 | { 2 | // Use IntelliSense to learn about possible attributes. 3 | // Hover to view descriptions of existing attributes. 4 | // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 5 | "version": "0.2.0", 6 | "configurations": [ 7 | // Start server 8 | { 9 | "name": "Start server", 10 | "type": "debugpy", 11 | "request": "launch", 12 | "program": "${workspaceFolder}/main.py", 13 | "args": [ 14 | "-p", "9020", 15 | // "-v", 16 | // "-s" 17 | ], 18 | "console": "integratedTerminal", 19 | "consoleTitle": "Server", 20 | "justMyCode": true, 21 | }, 22 | 23 | // Run client 24 | { 25 | "name": "Run client", 26 | "type": "debugpy", 27 | "request": "launch", 28 | "program": "${workspaceFolder}/client.py", 29 | "args": [ 30 | "${workspaceFolder}/data/shepplogan_raw.mrd", 31 | "-p", "9020", 32 | "-c", "invertcontrast", 33 | "-o", "${workspaceFolder}/data/shepplogan_recon.mrd" 34 | ], 35 | "console": "integratedTerminal", 36 | "consoleTitle": "Client", 37 | "justMyCode": true, 38 | }, 39 | ], 40 | "compounds": 41 | [ 42 | { 43 | "name": "Server + client", 44 | "configurations": ["Start server", "Run client"] 45 | }, 46 | ] 47 | 48 | } 49 | -------------------------------------------------------------------------------- /.vscode/tasks.json: -------------------------------------------------------------------------------- 1 | { 2 | // See https://go.microsoft.com/fwlink/?LinkId=733558 3 | // for the documentation about the tasks.json format 4 | "version": "2.0.0", 5 | "tasks": [ 6 | // Generate raw data using generate_cartesian_shepp_logan_dataset.py 7 | { 8 | "label": "Generate raw data", 9 | "type": "shell", 10 | "command": "python3", 11 | "args": [ 12 | "${workspaceRoot}/generate_cartesian_shepp_logan_dataset.py", 13 | "-o", "${workspaceRoot}/data/shepplogan_raw.mrd", 14 | ], 15 | "presentation": { 16 | "showReuseMessage": false, 17 | "focus": false, 18 | "group": "client", 19 | }, 20 | "problemMatcher": [] 21 | }, 22 | ] 23 | } -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2024 Kelvin Chow 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /analyzeflow.py: -------------------------------------------------------------------------------- 1 | import ismrmrd 2 | import os 3 | import itertools 4 | import logging 5 | import traceback 6 | import numpy as np 7 | import numpy.fft as fft 8 | import base64 9 | import re 10 | import mrdhelper 11 | import constants 12 | from time import perf_counter 13 | 14 | # Folder for debug output files 15 | debugFolder = "/tmp/share/debug" 16 | 17 | def process(connection, config, mrdHeader): 18 | logging.info("Config: \n%s", config) 19 | 20 | # mrdHeader should be xml formatted MRD header, but may be a string 21 | # if it failed conversion earlier 22 | try: 23 | # Disabled due to incompatibility between PyXB and Python 3.8: 24 | # https://github.com/pabigot/pyxb/issues/123 25 | # # logging.info("MRD header: \n%s", mrdHeader.toxml('utf-8')) 26 | 27 | logging.info("Incoming dataset contains %d encodings", len(mrdHeader.encoding)) 28 | logging.info("First encoding is of type '%s', with a matrix size of (%s x %s x %s) and a field of view of (%s x %s x %s)mm^3", 29 | mrdHeader.encoding[0].trajectory, 30 | mrdHeader.encoding[0].encodedSpace.matrixSize.x, 31 | mrdHeader.encoding[0].encodedSpace.matrixSize.y, 32 | mrdHeader.encoding[0].encodedSpace.matrixSize.z, 33 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.x, 34 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.y, 35 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.z) 36 | 37 | except: 38 | logging.info("Improperly formatted MRD header: \n%s", mrdHeader) 39 | 40 | # Continuously parse incoming data parsed from MRD messages 41 | imgGroup = [] 42 | waveformGroup = [] 43 | try: 44 | for item in connection: 45 | # ---------------------------------------------------------- 46 | # Image data messages 47 | # ---------------------------------------------------------- 48 | if isinstance(item, ismrmrd.Image): 49 | # Only process phase images 50 | if item.image_type is ismrmrd.IMTYPE_PHASE: 51 | imgGroup.append(item) 52 | else: 53 | connection.send_image(item) 54 | continue 55 | 56 | # ---------------------------------------------------------- 57 | # Waveform data messages 58 | # ---------------------------------------------------------- 59 | elif isinstance(item, ismrmrd.Waveform): 60 | waveformGroup.append(item) 61 | 62 | # ---------------------------------------------------------- 63 | # Ignore raw k-space data 64 | # ---------------------------------------------------------- 65 | elif isinstance(item, ismrmrd.Acquisition): 66 | strWarn = "Received an ismrmrd.Acquisition which is ignored by this analysis" 67 | logging.warning(strWarn) 68 | connection.send_logging(constants.MRD_LOGGING_INFO, strWarn) 69 | 70 | elif item is None: 71 | break 72 | 73 | else: 74 | logging.error("Unsupported data type %s", type(item).__name__) 75 | 76 | # Extract raw ECG waveform data. Basic sorting to make sure that data 77 | # is time-ordered, but no additional checking for missing data. 78 | # ecgData has shape (5 x timepoints) 79 | if len(waveformGroup) > 0: 80 | waveformGroup.sort(key = lambda item: item.time_stamp) 81 | ecgData = [item.data for item in waveformGroup if item.waveform_id == 0] 82 | if len(ecgData) > 0: 83 | ecgData = np.concatenate(ecgData,1) 84 | 85 | # Process any remaining groups of raw or image data. This can 86 | # happen if the trigger condition for these groups are not met. 87 | # This is also a fallback for handling image data, as the last 88 | # image in a series is typically not separately flagged. 89 | if len(imgGroup) > 0: 90 | logging.info("Processing a group of images (untriggered)") 91 | image = process_image(imgGroup, connection, config, mrdHeader) 92 | logging.debug("Sending images to client") 93 | connection.send_image(image) 94 | imgGroup = [] 95 | 96 | except Exception as e: 97 | logging.error(traceback.format_exc()) 98 | connection.send_logging(constants.MRD_LOGGING_ERROR, traceback.format_exc()) 99 | 100 | finally: 101 | connection.send_close() 102 | 103 | def process_image(imgGroup, connection, config, mrdHeader): 104 | if len(imgGroup) == 0: 105 | return [] 106 | 107 | logging.info(f'-----------------------------------------------') 108 | logging.info(f' process_image called with {len(imgGroup)} images') 109 | logging.info(f'-----------------------------------------------') 110 | 111 | # Start timer 112 | tic = perf_counter() 113 | 114 | # Create folder, if necessary 115 | if not os.path.exists(debugFolder): 116 | os.makedirs(debugFolder) 117 | logging.debug("Created folder " + debugFolder + " for debug output files") 118 | 119 | logging.debug("Processing data with %d images of type %s", len(imgGroup), ismrmrd.get_dtype_from_data_type(imgGroup[0].data_type)) 120 | 121 | # Display MetaAttributes for first image 122 | tmpMeta = ismrmrd.Meta.deserialize(imgGroup[0].attribute_string) 123 | logging.debug("MetaAttributes[0]: %s", ismrmrd.Meta.serialize(tmpMeta)) 124 | 125 | # Optional serialization of ICE MiniHeader 126 | if 'IceMiniHead' in tmpMeta: 127 | logging.debug("IceMiniHead[0]: %s", base64.b64decode(tmpMeta['IceMiniHead']).decode('utf-8')) 128 | 129 | # Extract some indices for the images 130 | slice = [img.slice for img in imgGroup] 131 | phase = [img.phase for img in imgGroup] 132 | 133 | # Process each group of venc directions separately 134 | unique_venc_dir = np.unique([ismrmrd.Meta.deserialize(img.attribute_string)['FlowDirDisplay'] for img in imgGroup]) 135 | 136 | # Measure processing time 137 | toc = perf_counter() 138 | strProcessTime = "Total processing time: %.2f ms" % ((toc-tic)*1000.0) 139 | logging.info(strProcessTime) 140 | 141 | # Send this as a text message back to the client 142 | connection.send_logging(constants.MRD_LOGGING_INFO, strProcessTime) 143 | 144 | # Start the phase images at series 10. When interpreted by FIRE, images 145 | # with the same image_series_index are kept in the same series, but the 146 | # absolute series number isn't used and can be arbitrary 147 | last_series = 10 148 | imagesOut = [] 149 | for venc_dir in unique_venc_dir: 150 | # data array has dimensions [row col sli phs], i.e. [y x sli phs] 151 | # info lists has dimensions [sli phs] 152 | data = np.zeros((imgGroup[0].data.shape[2], imgGroup[0].data.shape[3], max(slice)+1, max(phase)+1), imgGroup[0].data.dtype) 153 | head = [[None]*(max(phase)+1) for _ in range(max(slice)+1)] 154 | meta = [[None]*(max(phase)+1) for _ in range(max(slice)+1)] 155 | 156 | for img, sli, phs in zip(imgGroup, slice, phase): 157 | if ismrmrd.Meta.deserialize(img.attribute_string)['FlowDirDisplay'] == venc_dir: 158 | # print("sli phs", sli, phs) 159 | data[:,:,sli,phs] = img.data 160 | head[sli][phs] = img.getHead() 161 | meta[sli][phs] = ismrmrd.Meta.deserialize(img.attribute_string) 162 | 163 | logging.debug("Phase data with venc encoding %s is size %s" % (venc_dir, data.shape,)) 164 | np.save(debugFolder + "/" + "data_" + venc_dir + ".npy", data) 165 | 166 | # Mask out data with high mean temporal diff 167 | threshold = 250 168 | data_meandiff = np.mean(np.abs(np.diff(data,3)),3) 169 | data_masked = data 170 | data_masked[(data_meandiff > threshold)] = 2048 171 | np.save(debugFolder + "/" + "data_masked_" + venc_dir + ".npy", data_masked) 172 | 173 | # Determine max value (12 or 16 bit) 174 | BitsStored = 12 175 | if (mrdhelper.get_userParameterLong_value(mrdHeader, "BitsStored") is not None): 176 | BitsStored = mrdhelper.get_userParameterLong_value(mrdHeader, "BitsStored") 177 | maxVal = 2**BitsStored - 1 178 | 179 | # Normalize and convert to int16 180 | data_masked = (data_masked.astype(np.float64) - 2048)*maxVal/2048 181 | data_masked = np.around(data_masked).astype(np.int16) 182 | 183 | # Re-slice back into 2D images 184 | for sli in range(data_masked.shape[2]): 185 | for phs in range(data_masked.shape[3]): 186 | # Create new MRD instance for the processed image 187 | # data has shape [y x sli phs] 188 | # from_array() should be called with 'transpose=False' to avoid warnings, and when called 189 | # with this option, can take input as: [cha z y x], [z y x], or [y x] 190 | tmpImg = ismrmrd.Image.from_array(data_masked[...,sli,phs], transpose=False) 191 | 192 | # Set the header information 193 | tmpHead = head[sli][phs] 194 | tmpHead.data_type = tmpImg.getHead().data_type 195 | tmpHead.image_index = phs + sli*data_masked.shape[3] 196 | tmpHead.image_series_index = last_series 197 | tmpImg.setHead(tmpHead) 198 | 199 | # Set ISMRMRD Meta Attributes 200 | tmpMeta = meta[sli][phs] 201 | tmpMeta['DataRole'] = 'Image' 202 | tmpMeta['ImageProcessingHistory'] = ['FIRE', 'PYTHON'] 203 | tmpMeta['WindowCenter'] = str((maxVal+1)/2) 204 | tmpMeta['WindowWidth'] = str((maxVal+1)) 205 | tmpMeta['Keep_image_geometry'] = 1 206 | 207 | # Add image orientation directions to MetaAttributes if not already present 208 | if tmpMeta.get('ImageRowDir') is None: 209 | tmpMeta['ImageRowDir'] = ["{:.18f}".format(tmpHead.read_dir[0]), "{:.18f}".format(tmpHead.read_dir[1]), "{:.18f}".format(tmpHead.read_dir[2])] 210 | 211 | if tmpMeta.get('ImageColumnDir') is None: 212 | tmpMeta['ImageColumnDir'] = ["{:.18f}".format(tmpHead.phase_dir[0]), "{:.18f}".format(tmpHead.phase_dir[1]), "{:.18f}".format(tmpHead.phase_dir[2])] 213 | 214 | xml = tmpMeta.serialize() 215 | logging.debug("Image MetaAttributes: %s", xml) 216 | tmpImg.attribute_string = xml 217 | imagesOut.append(tmpImg) 218 | 219 | last_series += 1 220 | return imagesOut 221 | -------------------------------------------------------------------------------- /bartfire.py: -------------------------------------------------------------------------------- 1 | import ismrmrd 2 | import os 3 | import logging 4 | import traceback 5 | import numpy as np 6 | import ctypes 7 | import constants 8 | import mrdhelper 9 | import tempfile 10 | from bart import bart 11 | 12 | # Folder for debug output files 13 | debugFolder = "/tmp/share/debug" 14 | 15 | def process(connection, config, metadata): 16 | logging.info("Config: \n%s", config) 17 | 18 | # Metadata should be MRD formatted header, but may be a string 19 | # if it failed conversion earlier 20 | try: 21 | # Disabled due to incompatibility between PyXB and Python 3.8: 22 | # https://github.com/pabigot/pyxb/issues/123 23 | # # logging.info("Metadata: \n%s", metadata.toxml('utf-8')) 24 | 25 | logging.info("Incoming dataset contains %d encodings", len(metadata.encoding)) 26 | logging.info("First encoding is of type '%s', with a matrix size of (%s x %s x %s) and a field of view of (%s x %s x %s)mm^3", 27 | metadata.encoding[0].trajectory, 28 | metadata.encoding[0].encodedSpace.matrixSize.x, 29 | metadata.encoding[0].encodedSpace.matrixSize.y, 30 | metadata.encoding[0].encodedSpace.matrixSize.z, 31 | metadata.encoding[0].encodedSpace.fieldOfView_mm.x, 32 | metadata.encoding[0].encodedSpace.fieldOfView_mm.y, 33 | metadata.encoding[0].encodedSpace.fieldOfView_mm.z) 34 | 35 | except: 36 | logging.info("Improperly formatted metadata: \n%s", metadata) 37 | 38 | # Continuously parse incoming data parsed from MRD messages 39 | acqGroup = [] 40 | try: 41 | for item in connection: 42 | # ---------------------------------------------------------- 43 | # Raw k-space data messages 44 | # ---------------------------------------------------------- 45 | if isinstance(item, ismrmrd.Acquisition): 46 | # Accumulate all imaging readouts in a group 47 | if (not item.is_flag_set(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) and 48 | not item.is_flag_set(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION) and 49 | not item.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA)): 50 | acqGroup.append(item) 51 | 52 | # When this criteria is met, run process_raw() on the accumulated 53 | # data, which returns images that are sent back to the client. 54 | if item.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE): 55 | logging.info("Processing a group of k-space data") 56 | image = process_raw(acqGroup, config, metadata) 57 | connection.send_image(image) 58 | acqGroup = [] 59 | 60 | # ---------------------------------------------------------- 61 | # Image and waveform data messages are not supported 62 | # ---------------------------------------------------------- 63 | elif isinstance(item, ismrmrd.Image): 64 | logging.info("Received an image, but this is not supported -- discarding") 65 | continue 66 | 67 | elif isinstance(item, ismrmrd.Waveform): 68 | logging.info("Received a waveform, but this is not supported -- discarding") 69 | continue 70 | 71 | elif item is None: 72 | break 73 | 74 | else: 75 | logging.error("Unsupported data type %s", type(item).__name__) 76 | 77 | # Process any remaining groups of raw or image data. This can 78 | # happen if the trigger condition for these groups are not met. 79 | # This is also a fallback for handling image data, as the last 80 | # image in a series is typically not separately flagged. 81 | if len(acqGroup) > 0: 82 | logging.info("Processing a group of k-space data (untriggered)") 83 | image = process_raw(acqGroup, config, metadata) 84 | connection.send_image(image) 85 | acqGroup = [] 86 | 87 | except Exception as e: 88 | logging.error(traceback.format_exc()) 89 | connection.send_logging(constants.MRD_LOGGING_ERROR, traceback.format_exc()) 90 | 91 | finally: 92 | connection.send_close() 93 | 94 | def process_raw(group, config, metadata): 95 | if len(group) == 0: 96 | return [] 97 | 98 | # Create folder, if necessary 99 | if not os.path.exists(debugFolder): 100 | os.makedirs(debugFolder) 101 | logging.debug("Created folder " + debugFolder + " for debug output files") 102 | 103 | # Format data into single [cha PE RO phs] array 104 | lin = [acquisition.idx.kspace_encode_step_1 for acquisition in group] 105 | phs = [acquisition.idx.phase for acquisition in group] 106 | 107 | # Use the zero-padded matrix size 108 | data = np.zeros((group[0].data.shape[0], 109 | metadata.encoding[0].encodedSpace.matrixSize.y, 110 | metadata.encoding[0].encodedSpace.matrixSize.x, 111 | max(phs)+1), 112 | group[0].data.dtype) 113 | 114 | rawHead = [None]*(max(phs)+1) 115 | 116 | for acq, lin, phs in zip(group, lin, phs): 117 | if (lin < data.shape[1]) and (phs < data.shape[3]): 118 | # TODO: Account for asymmetric echo in a better way 119 | data[:,lin,-acq.data.shape[1]:,phs] = acq.data 120 | 121 | # center line of k-space is encoded in user[5] 122 | if (rawHead[phs] is None) or (np.abs(acq.getHead().idx.kspace_encode_step_1 - acq.getHead().idx.user[5]) < np.abs(rawHead[phs].idx.kspace_encode_step_1 - rawHead[phs].idx.user[5])): 123 | rawHead[phs] = acq.getHead() 124 | 125 | # Flip matrix in RO/PE to be consistent with ICE 126 | data = np.flip(data, (1, 2)) 127 | 128 | # Format as [row col phs cha] for BART 129 | data = data.transpose((1, 2, 3, 0)) 130 | 131 | logging.debug("Raw data is size %s" % (data.shape,)) 132 | np.save(debugFolder + "/" + "raw.npy", data) 133 | 134 | # Fourier Transform with BART 135 | logging.info("Calling BART FFT") 136 | data = bart(1, 'fft -u -i 3', data) 137 | 138 | # Re-format as [cha row col phs] 139 | data = data.transpose((3, 0, 1, 2)) 140 | 141 | # Sum of squares coil combination 142 | # Data will be [PE RO phs] 143 | data = np.abs(data) 144 | data = np.square(data) 145 | data = np.sum(data, axis=0) 146 | data = np.sqrt(data) 147 | 148 | logging.debug("Image data is size %s" % (data.shape,)) 149 | np.save(debugFolder + "/" + "img.npy", data) 150 | 151 | # Determine max value (12 or 16 bit) 152 | BitsStored = 12 153 | if (mrdhelper.get_userParameterLong_value(metadata, "BitsStored") is not None): 154 | BitsStored = mrdhelper.get_userParameterLong_value(metadata, "BitsStored") 155 | maxVal = 2**BitsStored - 1 156 | 157 | # Normalize and convert to int16 158 | data *= maxVal/data.max() 159 | data = np.around(data) 160 | data = data.astype(np.int16) 161 | 162 | # Remove readout oversampling 163 | if metadata.encoding[0].reconSpace.matrixSize.x != 0: 164 | offset = int((data.shape[1] - metadata.encoding[0].reconSpace.matrixSize.x)/2) 165 | data = data[:,offset:offset+metadata.encoding[0].reconSpace.matrixSize.x] 166 | 167 | # Remove phase oversampling 168 | if metadata.encoding[0].reconSpace.matrixSize.y != 0: 169 | offset = int((data.shape[0] - metadata.encoding[0].reconSpace.matrixSize.y)/2) 170 | data = data[offset:offset+metadata.encoding[0].reconSpace.matrixSize.y,:] 171 | 172 | logging.debug("Image without oversampling is size %s" % (data.shape,)) 173 | np.save(debugFolder + "/" + "imgCrop.npy", data) 174 | 175 | # Format as ISMRMRD image data 176 | imagesOut = [] 177 | for phs in range(data.shape[2]): 178 | # Create new MRD instance for the processed image 179 | # data has shape [PE RO phs], i.e. [y x]. 180 | # from_array() should be called with 'transpose=False' to avoid warnings, and when called 181 | # with this option, can take input as: [cha z y x], [z y x], or [y x] 182 | tmpImg = ismrmrd.Image.from_array(data[...,phs], transpose=False) 183 | 184 | # Set the header information 185 | tmpImg.setHead(mrdhelper.update_img_header_from_raw(tmpImg.getHead(), rawHead[phs])) 186 | tmpImg.field_of_view = (ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.x), 187 | ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.y), 188 | ctypes.c_float(metadata.encoding[0].reconSpace.fieldOfView_mm.z)) 189 | tmpImg.image_index = phs 190 | 191 | # Set ISMRMRD Meta Attributes 192 | tmpMeta = ismrmrd.Meta() 193 | tmpMeta['DataRole'] = 'Image' 194 | tmpMeta['ImageProcessingHistory'] = ['PYTHON', 'BART'] 195 | tmpMeta['WindowCenter'] = str((maxVal+1)/2) 196 | tmpMeta['WindowWidth'] = str((maxVal+1)) 197 | tmpMeta['Keep_image_geometry'] = 1 198 | 199 | # Add image orientation directions to MetaAttributes if not already present 200 | if tmpMeta.get('ImageRowDir') is None: 201 | tmpMeta['ImageRowDir'] = ["{:.18f}".format(tmpImg.getHead().read_dir[0]), "{:.18f}".format(tmpImg.getHead().read_dir[1]), "{:.18f}".format(tmpImg.getHead().read_dir[2])] 202 | 203 | if tmpMeta.get('ImageColumnDir') is None: 204 | tmpMeta['ImageColumnDir'] = ["{:.18f}".format(tmpImg.getHead().phase_dir[0]), "{:.18f}".format(tmpImg.getHead().phase_dir[1]), "{:.18f}".format(tmpImg.getHead().phase_dir[2])] 205 | 206 | xml = tmpMeta.serialize() 207 | logging.debug("Image MetaAttributes: %s", xml) 208 | tmpImg.attribute_string = xml 209 | imagesOut.append(tmpImg) 210 | 211 | return imagesOut 212 | -------------------------------------------------------------------------------- /client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | # from server import Server 4 | 5 | import argparse 6 | import logging 7 | import datetime 8 | import h5py 9 | import socket 10 | import sys 11 | import ismrmrd 12 | import multiprocessing 13 | from connection import Connection 14 | import time 15 | import os 16 | import json 17 | 18 | defaults = { 19 | 'filename': '', 20 | 'in_group': '', 21 | 'address': 'localhost', 22 | 'port': 9002, 23 | 'outfile': 'out.h5', 24 | 'out_group': str(datetime.datetime.now()), 25 | 'config': 'invertcontrast', 26 | 'config_local': '', 27 | 'ignore_json_config': False, 28 | 'send_waveforms': False, 29 | 'verbose': False, 30 | 'logfile': '' 31 | } 32 | 33 | def connection_receive_loop(sock, outfile, outgroup, verbose, logfile, recvAcqs, recvImages, recvWaveforms): 34 | """Start a Connection instance to receive data, generally run in a separate thread""" 35 | 36 | if verbose: 37 | verbosity = logging.DEBUG 38 | else: 39 | verbosity = logging.INFO 40 | 41 | if logfile: 42 | logging.basicConfig(filename=logfile, format='%(asctime)s - %(message)s', level=verbosity) 43 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 44 | else: 45 | logging.basicConfig(format='%(asctime)s - %(message)s', level=verbosity) 46 | 47 | incoming_connection = Connection(sock, True, outfile, "", outgroup) 48 | 49 | try: 50 | for msg in incoming_connection: 51 | if msg is None: 52 | break 53 | finally: 54 | try: 55 | sock.shutdown(socket.SHUT_RDWR) 56 | except: 57 | pass 58 | sock.close() 59 | logging.debug("Socket closed (reader)") 60 | 61 | # Dataset may not be closed properly if a close message is not received 62 | try: 63 | incoming_connection.dset.close() 64 | except: 65 | pass 66 | 67 | recvAcqs.value = incoming_connection.recvAcqs 68 | recvImages.value = incoming_connection.recvImages 69 | recvWaveforms.value = incoming_connection.recvWaveforms 70 | 71 | def main(args): 72 | # ----- Load and validate file --------------------------------------------- 73 | if (args.config_local): 74 | if not os.path.exists(args.config_local): 75 | logging.error("Could not find local config file %s", args.config_local) 76 | return 77 | 78 | localConfigAdditionalText = None 79 | if (args.config): 80 | configAdditionalFile = args.config + '.json' 81 | if os.path.exists(configAdditionalFile): 82 | logging.info("Found additional config file %s", configAdditionalFile) 83 | 84 | fid = open(configAdditionalFile, 'r') 85 | localConfigAdditionalText = fid.read() 86 | fid.close() 87 | 88 | dset = h5py.File(args.filename, 'r') 89 | if not dset: 90 | logging.error("Not a valid dataset: %s" % args.filename) 91 | return 92 | 93 | dsetNames = dset.keys() 94 | logging.info("File %s contains %d groups:", args.filename, len(dset.keys())) 95 | print(" ", "\n ".join(dsetNames)) 96 | 97 | if not args.in_group: 98 | if len(dset.keys()) == 1: 99 | args.in_group = list(dset.keys())[0] 100 | else: 101 | logging.error("Input group not specified and multiple groups are present") 102 | return 103 | 104 | 105 | if args.in_group not in dset: 106 | logging.error("Could not find group %s", args.in_group) 107 | return 108 | 109 | group = dset.get(args.in_group) 110 | 111 | logging.info("Reading data from group '%s' in file '%s'", args.in_group, args.filename) 112 | 113 | # ----- Determine type of data stored -------------------------------------- 114 | # Raw data is stored as: 115 | # /group/config text of recon config parameters (optional) 116 | # /group/xml text of ISMRMRD flexible data header 117 | # /group/data array of IsmsmrdAcquisition data + header 118 | # /group/waveforms array of waveform (e.g. PMU) data 119 | 120 | # Image data is stored as: 121 | # /group/config text of recon config parameters (optional) 122 | # /group/xml text of ISMRMRD flexible data header (optional) 123 | # /group/image_0/data array of IsmrmrdImage data 124 | # /group/image_0/header array of ImageHeader 125 | # /group/image_0/attributes text of image MetaAttributes 126 | hasRaw = False 127 | hasImage = False 128 | hasWaveforms = False 129 | 130 | if ('data' in group): 131 | hasRaw = True 132 | 133 | if len([key for key in group.keys() if (key.startswith('image_') or key.startswith('images_'))]) > 0: 134 | hasImage = True 135 | 136 | if ('waveforms' in group): 137 | hasWaveforms = True 138 | 139 | dset.close() 140 | 141 | if ((hasRaw is False) and (hasImage is False)): 142 | logging.error("File does not contain properly formatted MRD raw or image data") 143 | return 144 | 145 | # ----- Open connection to server ------------------------------------------ 146 | # Spawn a thread to connect and handle incoming data 147 | logging.info("Connecting to MRD server at %s:%d" % (args.address, args.port)) 148 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 149 | 150 | attempt = 0 151 | maxAttempts = 5 152 | success = False 153 | while attempt < maxAttempts: 154 | try: 155 | sock.connect((args.address, args.port)) 156 | except socket.error as error: 157 | logging.warning("Failed to connect (%d/%d): %s" % (attempt+1, maxAttempts, error)) 158 | time.sleep(1) 159 | attempt += 1 160 | else: 161 | success = True 162 | attempt = maxAttempts 163 | 164 | if not success: 165 | sock.close() 166 | logging.error("... Aborting") 167 | return 168 | 169 | recvAcqs = multiprocessing.Value('i', 0) 170 | recvImages = multiprocessing.Value('i', 0) 171 | recvWaveforms = multiprocessing.Value('i', 0) 172 | process = multiprocessing.Process(target=connection_receive_loop, args=(sock, args.outfile, args.out_group, args.verbose, args.logfile, recvAcqs, recvImages, recvWaveforms)) 173 | process.daemon = True 174 | process.start() 175 | 176 | # This connection is only used for outgoing data. It should not be used for 177 | # writing to the HDF5 file as multi-threading issues can occur 178 | connection = Connection(sock, False) 179 | 180 | # --------------- Send config ----------------------------- 181 | if (args.config_local): 182 | fid = open(args.config_local, "r") 183 | config_text = fid.read() 184 | fid.close() 185 | logging.info("Sending local config file '%s' with text:", args.config_local) 186 | logging.info(config_text) 187 | connection.send_config_text(config_text) 188 | else: 189 | logging.info("Sending remote config file name '%s'", args.config) 190 | connection.send_config_file(args.config) 191 | 192 | dset = ismrmrd.Dataset(args.filename, args.in_group, False) 193 | 194 | # --------------- Send MRD metadata ----------------------- 195 | groups = dset.list() 196 | if ('xml' in groups): 197 | xml_header = dset.read_xml_header() 198 | xml_header = xml_header.decode("utf-8") 199 | else: 200 | logging.warning("Could not find MRD metadata xml in file") 201 | xml_header = "Dummy XML header" 202 | connection.send_metadata(xml_header) 203 | 204 | # --------------- Send additional config ----------------------- 205 | groups = dset.list() 206 | if localConfigAdditionalText is None: 207 | if ('configAdditional' in groups): 208 | configAdditionalText = dset._dataset['configAdditional'][0] 209 | configAdditionalText = configAdditionalText.decode("utf-8") 210 | 211 | if args.ignore_json_config: 212 | # Remove the config specified in the JSON, allowing the config passed via command line to the client to be used 213 | configAdditional = json.loads(configAdditionalText) 214 | if ('parameters' in configAdditional): 215 | if ('config' in configAdditional['parameters']): 216 | logging.warning(f"Input file contains JSON configAdditional that specifies config '{configAdditional['parameters']['config']}', but will be ignored because '--ignore-json-config' was specified!") 217 | del configAdditional['parameters']['config'] 218 | 219 | if ('customconfig' in configAdditional['parameters']): 220 | if configAdditional['parameters']['customconfig'] != '': 221 | logging.warning(f"Input file contains JSON configAdditional that specifies customconfig '{configAdditional['parameters']['customconfig']}', but will be ignored because '--ignore-json-config' was specified!") 222 | del configAdditional['parameters']['customconfig'] 223 | 224 | configAdditionalText = json.dumps(configAdditional, indent=2) 225 | 226 | logging.info("Sending configAdditional found in file %s:\n%s", args.filename, configAdditionalText) 227 | connection.send_text(configAdditionalText) 228 | else: 229 | # Do nothing -- no additional config in local .json file or in MRD file 230 | pass 231 | else: 232 | if ('configAdditional' in groups): 233 | logging.warning("configAdditional found in file %s, but is overriden by local file %s!", args.filename, configAdditionalFile) 234 | 235 | if args.ignore_json_config: 236 | # Remove the config specified in the JSON, allowing the config passed via command line to the client to be used 237 | localConfigAdditional = json.loads(localConfigAdditionalText) 238 | if ('parameters' in localConfigAdditional): 239 | if ('config' in localConfigAdditional['parameters']): 240 | logging.warning(f"configAdditional file '{configAdditionalFile}' specifies config '{localConfigAdditional['parameters']['config']}', but will be ignored because '--ignore-json-config' was specified!") 241 | del localConfigAdditional['parameters']['config'] 242 | 243 | if ('customconfig' in localConfigAdditional['parameters']): 244 | if localConfigAdditional['parameters']['customconfig'] != '': 245 | logging.warning(f"configAdditional file '{configAdditionalFile}' specifies customconfig '{localConfigAdditional['parameters']['customconfig']}', but will be ignored because '--ignore-json-config' was specified!") 246 | del localConfigAdditional['parameters']['customconfig'] 247 | 248 | localConfigAdditionalText = json.dumps(localConfigAdditional, indent=2) 249 | 250 | logging.info("Sending configAdditional found in file %s:\n%s", configAdditionalFile, localConfigAdditionalText) 251 | connection.send_text(localConfigAdditionalText) 252 | 253 | # --------------- Send waveform data ---------------------- 254 | # TODO: Interleave waveform and other data so they arrive chronologically 255 | if hasWaveforms: 256 | if args.send_waveforms: 257 | logging.info("Sending waveform data") 258 | logging.info("Found %d waveforms", dset.number_of_waveforms()) 259 | 260 | for idx in range(0, dset.number_of_waveforms()): 261 | wav = dset.read_waveform(idx) 262 | try: 263 | connection.send_waveform(wav) 264 | except: 265 | logging.error('Failed to send waveform %d -- aborting!' % idx) 266 | break 267 | else: 268 | logging.info("Waveform data present, but send-waveforms option turned off") 269 | 270 | # --------------- Send raw data ---------------------- 271 | if hasRaw: 272 | logging.info("Starting raw data session") 273 | logging.info("Found %d raw data readouts", dset.number_of_acquisitions()) 274 | 275 | for idx in range(dset.number_of_acquisitions()): 276 | acq = dset.read_acquisition(idx) 277 | try: 278 | connection.send_acquisition(acq) 279 | except: 280 | logging.error('Failed to send acquisition %d -- aborting!' % idx) 281 | break 282 | 283 | # --------------- Send image data ---------------------- 284 | if hasImage: 285 | logging.info("Starting image data session") 286 | for group in [key for key in groups if (key.startswith('image_') or key.startswith('images_'))]: 287 | logging.info("Reading images from '/" + args.in_group + "/" + group + "'") 288 | 289 | for imgNum in range(0, dset.number_of_images(group)): 290 | image = dset.read_image(group, imgNum) 291 | 292 | if not isinstance(image.attribute_string, str): 293 | image.attribute_string = image.attribute_string.decode('utf-8') 294 | 295 | logging.debug("Sending image %d of %d", imgNum, dset.number_of_images(group)-1) 296 | try: 297 | connection.send_image(image) 298 | except: 299 | logging.error('Failed to send image %d -- aborting!' % imgNum) 300 | break 301 | 302 | dset.close() 303 | try: 304 | connection.send_close() 305 | except: 306 | logging.error('Failed to send close message!') 307 | 308 | # Wait for incoming data and cleanup 309 | logging.debug("Waiting for threads to finish") 310 | process.join() 311 | 312 | sock.close() 313 | logging.info("Socket closed (writer)") 314 | 315 | # Save a copy of the MRD XML header now that the connection thread is finished with the file 316 | logging.debug("Writing MRD metadata to file") 317 | dset = ismrmrd.Dataset(args.outfile, args.out_group) 318 | dset.write_xml_header(bytes(xml_header, 'utf-8')) 319 | dset.close() 320 | 321 | logging.info("---------------------- Summary ----------------------") 322 | logging.info("Sent %5d acquisitions | Received %5d acquisitions", connection.sentAcqs, recvWaveforms.value) 323 | logging.info("Sent %5d images | Received %5d images", connection.sentImages, recvImages.value) 324 | logging.info("Sent %5d waveforms | Received %5d waveforms", connection.sentWaveforms, recvWaveforms.value) 325 | logging.info("Session complete") 326 | 327 | return 328 | 329 | if __name__ == '__main__': 330 | 331 | parser = argparse.ArgumentParser(description='Example client for MRD streaming format', 332 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 333 | parser.add_argument('filename', help='Input file') 334 | parser.add_argument('-a', '--address', help='Address (hostname) of MRD server') 335 | parser.add_argument('-p', '--port', type=int, help='Port') 336 | parser.add_argument('-o', '--outfile', help='Output file') 337 | parser.add_argument('-g', '--in-group', help='Input data group') 338 | parser.add_argument('-G', '--out-group', help='Output group name') 339 | parser.add_argument('-c', '--config', help='Remote configuration file') 340 | parser.add_argument('-C', '--config-local', help='Local configuration file') 341 | parser.add_argument('-w', '--send-waveforms', action='store_true', help='Send waveform (physio) data') 342 | parser.add_argument('-v', '--verbose', action='store_true', help='Verbose mode') 343 | parser.add_argument('-l', '--logfile', type=str, help='Path to log file') 344 | parser.add_argument( '--ignore-json-config', action='store_true', help='Ignore config specified in JSON') 345 | 346 | parser.set_defaults(**defaults) 347 | 348 | args = parser.parse_args() 349 | 350 | if args.logfile: 351 | print("Logging to file: ", args.logfile) 352 | logging.basicConfig(filename=args.logfile, format='%(asctime)s - %(message)s', level=logging.WARNING) 353 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 354 | else: 355 | print("No logfile provided") 356 | logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.WARNING) 357 | 358 | if args.verbose: 359 | logging.root.setLevel(logging.DEBUG) 360 | else: 361 | logging.root.setLevel(logging.INFO) 362 | 363 | # If a config is specified via the command line arguments, then set ignore_json_config to True 364 | if ('-c' in sys.argv) or ('--config' in sys.argv): 365 | args.ignore_json_config = True 366 | 367 | main(args) 368 | -------------------------------------------------------------------------------- /connection.py: -------------------------------------------------------------------------------- 1 | import constants 2 | import ismrmrd 3 | import ctypes 4 | import os 5 | from datetime import datetime 6 | import h5py 7 | import random 8 | import threading 9 | 10 | import logging 11 | import socket 12 | import numpy as np 13 | 14 | class Connection: 15 | def __init__(self, socket, savedata, savedataFile = "", savedataFolder = "", savedataGroup = "dataset"): 16 | self.savedata = savedata 17 | self.savedataFile = savedataFile 18 | self.savedataFolder = savedataFolder 19 | self.savedataGroup = savedataGroup 20 | self.mrdFilePath = None 21 | self.dset = None 22 | self.socket = socket 23 | self.is_exhausted = False 24 | self.sentAcqs = 0 25 | self.sentImages = 0 26 | self.sentWaveforms = 0 27 | self.recvAcqs = 0 28 | self.recvImages = 0 29 | self.recvWaveforms = 0 30 | self.lock = threading.Lock() 31 | self.handlers = { 32 | constants.MRD_MESSAGE_CONFIG_FILE: self.read_config_file, 33 | constants.MRD_MESSAGE_CONFIG_TEXT: self.read_config_text, 34 | constants.MRD_MESSAGE_METADATA_XML_TEXT: self.read_metadata, 35 | constants.MRD_MESSAGE_CLOSE: self.read_close, 36 | constants.MRD_MESSAGE_TEXT: self.read_text, 37 | constants.MRD_MESSAGE_ISMRMRD_ACQUISITION: self.read_acquisition, 38 | constants.MRD_MESSAGE_ISMRMRD_WAVEFORM: self.read_waveform, 39 | constants.MRD_MESSAGE_ISMRMRD_IMAGE: self.read_image 40 | } 41 | 42 | def create_save_file(self): 43 | if self.savedata is True: 44 | # Create savedata folder, if necessary 45 | if ((self.savedataFolder) and (not os.path.exists(self.savedataFolder))): 46 | os.makedirs(self.savedataFolder) 47 | logging.debug("Created folder " + self.savedataFolder + " to save incoming data") 48 | 49 | if (self.savedataFile): 50 | self.mrdFilePath = self.savedataFile 51 | else: 52 | self.mrdFilePath = os.path.join(self.savedataFolder, "MRD_input_" + datetime.now().strftime("%Y-%m-%d-%H%M%S" + "_" + str(random.randint(0,100)) + ".h5")) 53 | 54 | # Create HDF5 file to store incoming MRD data 55 | logging.info("Incoming data will be saved to: '%s' in group '%s'", self.mrdFilePath, self.savedataGroup) 56 | self.dset = ismrmrd.Dataset(self.mrdFilePath, self.savedataGroup) 57 | self.dset._file.require_group(self.savedataGroup) 58 | 59 | def save_additional_config(self, configAdditionalText): 60 | if self.savedata is True: 61 | if self.dset is None: 62 | self.create_save_file() 63 | 64 | self.dset._file.require_group("dataset") 65 | dsetConfigAdditional = self.dset._dataset.require_dataset('configAdditional',shape=(1,), dtype=h5py.special_dtype(vlen=bytes)) 66 | dsetConfigAdditional[0] = bytes(configAdditionalText, 'utf-8') 67 | 68 | def send_logging(self, level, contents): 69 | try: 70 | formatted_contents = "%s %s" % (level, contents) 71 | except: 72 | logging.warning("Unsupported logging level: " + level) 73 | formatted_contents = contents 74 | 75 | self.send_text(formatted_contents) 76 | 77 | def __iter__(self): 78 | while not self.is_exhausted: 79 | yield self.next() 80 | 81 | def __next__(self): 82 | return self.next() 83 | 84 | def read(self, nbytes): 85 | return self.socket.recv(nbytes, socket.MSG_WAITALL) 86 | 87 | def peek(self, nbytes): 88 | return self.socket.recv(nbytes, socket.MSG_PEEK) 89 | 90 | def next(self): 91 | with self.lock: 92 | id = self.read_mrd_message_identifier() 93 | 94 | if (self.is_exhausted == True): 95 | return 96 | 97 | handler = self.handlers.get(id, lambda: Connection.unknown_message_identifier(id)) 98 | return handler() 99 | 100 | def shutdown_close(self): 101 | # Encapsulate shutdown in a try block because the socket may have 102 | # already been closed on the other side 103 | try: 104 | self.socket.shutdown(socket.SHUT_RDWR) 105 | except: 106 | pass 107 | self.socket.close() 108 | logging.info("Socket closed") 109 | 110 | @staticmethod 111 | def unknown_message_identifier(identifier): 112 | logging.error("Received unknown message type: %d", identifier) 113 | raise StopIteration 114 | 115 | def read_mrd_message_identifier(self): 116 | try: 117 | identifier_bytes = self.read(constants.SIZEOF_MRD_MESSAGE_IDENTIFIER) 118 | except ConnectionResetError: 119 | logging.error("Connection closed unexpectedly") 120 | self.is_exhausted = True 121 | return 122 | 123 | if (len(identifier_bytes) == 0): 124 | self.is_exhausted = True 125 | return 126 | 127 | return constants.MrdMessageIdentifier.unpack(identifier_bytes)[0] 128 | 129 | def peek_mrd_message_identifier(self): 130 | try: 131 | identifier_bytes = self.peek(constants.SIZEOF_MRD_MESSAGE_IDENTIFIER) 132 | except ConnectionResetError: 133 | logging.error("Connection closed unexpectedly") 134 | self.is_exhausted = True 135 | return 136 | 137 | if (len(identifier_bytes) == 0): 138 | self.is_exhausted = True 139 | return 140 | 141 | return constants.MrdMessageIdentifier.unpack(identifier_bytes)[0] 142 | 143 | def read_mrd_message_length(self): 144 | length_bytes = self.read(constants.SIZEOF_MRD_MESSAGE_LENGTH) 145 | return constants.MrdMessageLength.unpack(length_bytes)[0] 146 | 147 | # ----- MRD_MESSAGE_CONFIG_FILE (1) ---------------------------------------- 148 | # This message contains the file name of a configuration file used for 149 | # image reconstruction/post-processing. The file must exist on the server. 150 | # Message consists of: 151 | # ID ( 2 bytes, unsigned short) 152 | # Config file name (1024 bytes, char ) 153 | def send_config_file(self, filename): 154 | with self.lock: 155 | logging.info("--> Sending MRD_MESSAGE_CONFIG_FILE (1)") 156 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_CONFIG_FILE)) 157 | self.socket.send(constants.MrdMessageConfigurationFile.pack(filename.encode())) 158 | 159 | def read_config_file(self): 160 | logging.info("<-- Received MRD_MESSAGE_CONFIG_FILE (1)") 161 | config_file_bytes = self.read(constants.SIZEOF_MRD_MESSAGE_CONFIGURATION_FILE) 162 | config_file = constants.MrdMessageConfigurationFile.unpack(config_file_bytes)[0] 163 | config_file = config_file.split(b'\x00',1)[0].decode('utf-8') # Strip off null terminators in fixed 1024 size 164 | 165 | logging.debug(" " + config_file) 166 | if (config_file == "savedataonly"): 167 | logging.info("Save data, but no processing based on config") 168 | if self.savedata is True: 169 | logging.debug("Saving data is already enabled") 170 | else: 171 | self.savedata = True 172 | self.create_save_file() 173 | 174 | if self.savedata is True: 175 | if self.dset is None: 176 | self.create_save_file() 177 | 178 | self.dset._file.require_group("dataset") 179 | dsetConfigFile = self.dset._dataset.require_dataset('config_file',shape=(1,), dtype=h5py.special_dtype(vlen=bytes)) 180 | dsetConfigFile[0] = bytes(config_file, 'utf-8') 181 | 182 | return config_file 183 | 184 | # ----- MRD_MESSAGE_CONFIG_TEXT (2) -------------------------------------- 185 | # This message contains the configuration information (text contents) used 186 | # for image reconstruction/post-processing. Text is null-terminated. 187 | # Message consists of: 188 | # ID ( 2 bytes, unsigned short) 189 | # Length ( 4 bytes, uint32_t ) 190 | # Config text data ( variable, char ) 191 | def send_config_text(self, contents): 192 | with self.lock: 193 | logging.info("--> Sending MRD_MESSAGE_CONFIG_TEXT (2)") 194 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_CONFIG_TEXT)) 195 | contents_with_nul = '%s\0' % contents # Add null terminator 196 | self.socket.send(constants.MrdMessageLength.pack(len(contents_with_nul.encode()))) 197 | self.socket.send(contents_with_nul.encode()) 198 | 199 | def read_config_text(self): 200 | logging.info("<-- Received MRD_MESSAGE_CONFIG_TEXT (2)") 201 | length = self.read_mrd_message_length() 202 | config = self.read(length) 203 | config = config.split(b'\x00',1)[0].decode('utf-8') # Strip off null teminator 204 | 205 | if self.savedata is True: 206 | if self.dset is None: 207 | self.create_save_file() 208 | 209 | self.dset._file.require_group("dataset") 210 | dsetConfig = self.dset._dataset.require_dataset('config',shape=(1,), dtype=h5py.special_dtype(vlen=bytes)) 211 | dsetConfig[0] = bytes(config, 'utf-8') 212 | 213 | return config 214 | 215 | # ----- MRD_MESSAGE_METADATA_XML_TEXT (3) ----------------------------------- 216 | # This message contains the metadata for the entire dataset, formatted as 217 | # MRD XML flexible data header text. Text is null-terminated. 218 | # Message consists of: 219 | # ID ( 2 bytes, unsigned short) 220 | # Length ( 4 bytes, uint32_t ) 221 | # Text xml data ( variable, char ) 222 | def send_metadata(self, contents): 223 | with self.lock: 224 | logging.info("--> Sending MRD_MESSAGE_METADATA_XML_TEXT (3)") 225 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_METADATA_XML_TEXT)) 226 | contents_with_nul = '%s\0' % contents # Add null terminator 227 | self.socket.send(constants.MrdMessageLength.pack(len(contents_with_nul.encode()))) 228 | self.socket.send(contents_with_nul.encode()) 229 | 230 | def read_metadata(self): 231 | logging.info("<-- Received MRD_MESSAGE_METADATA_XML_TEXT (3)") 232 | length = self.read_mrd_message_length() 233 | metadata = self.read(length) 234 | metadata = metadata.split(b'\x00',1)[0].decode('utf-8') # Strip off null teminator 235 | 236 | if self.savedata is True: 237 | if self.dset is None: 238 | self.create_save_file() 239 | 240 | logging.debug(" Saving XML header to file") 241 | self.dset.write_xml_header(bytes(metadata, 'utf-8')) 242 | 243 | return metadata 244 | 245 | # ----- MRD_MESSAGE_CLOSE (4) ---------------------------------------------- 246 | # This message signals that all data has been sent (either from server or client). 247 | def send_close(self): 248 | with self.lock: 249 | logging.info("--> Sending MRD_MESSAGE_CLOSE (4)") 250 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_CLOSE)) 251 | 252 | def read_close(self): 253 | logging.info("<-- Received MRD_MESSAGE_CLOSE (4)") 254 | logging.info(" Total received acquisitions: %5d", self.recvAcqs) 255 | logging.info(" Total received images: %5d", self.recvImages) 256 | logging.info(" Total received waveforms: %5d", self.recvWaveforms) 257 | logging.info("------------------------------------------") 258 | 259 | if self.savedata is True: 260 | if self.dset is None: 261 | self.create_save_file() 262 | 263 | logging.debug("Closing file %s", self.dset._file.filename) 264 | self.dset.close() 265 | self.dset = None 266 | 267 | self.is_exhausted = True 268 | return 269 | 270 | # ----- MRD_MESSAGE_TEXT (5) ----------------------------------- 271 | # This message contains arbitrary text data. 272 | # Message consists of: 273 | # ID ( 2 bytes, unsigned short) 274 | # Length ( 4 bytes, uint32_t ) 275 | # Text data ( variable, char ) 276 | def send_text(self, contents): 277 | with self.lock: 278 | logging.info("--> Sending MRD_MESSAGE_TEXT (5)") 279 | logging.info(" %s", contents) 280 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_TEXT)) 281 | contents_with_nul = '%s\0' % contents # Add null terminator 282 | self.socket.send(constants.MrdMessageLength.pack(len(contents_with_nul.encode()))) 283 | self.socket.send(contents_with_nul.encode()) 284 | 285 | def read_text(self): 286 | logging.info("<-- Received MRD_MESSAGE_TEXT (5)") 287 | length = self.read_mrd_message_length() 288 | text = self.read(length) 289 | text = text.split(b'\x00',1)[0].decode('utf-8') # Strip off null teminator 290 | logging.info(" %s", text) 291 | return text 292 | 293 | # ----- MRD_MESSAGE_ISMRMRD_ACQUISITION (1008) ----------------------------- 294 | # This message contains raw k-space data from a single readout. 295 | # Message consists of: 296 | # ID ( 2 bytes, unsigned short) 297 | # Fixed header ( 340 bytes, mixed ) 298 | # Trajectory ( variable, float ) 299 | # Raw k-space data ( variable, float ) 300 | def send_acquisition(self, acquisition): 301 | with self.lock: 302 | self.sentAcqs += 1 303 | if (self.sentAcqs == 1) or (self.sentAcqs % 100 == 0): 304 | logging.info("--> Sending MRD_MESSAGE_ISMRMRD_ACQUISITION (1008) (total: %d)", self.sentAcqs) 305 | 306 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_ISMRMRD_ACQUISITION)) 307 | acquisition.serialize_into(self.socket.send) 308 | 309 | def read_acquisition(self): 310 | self.recvAcqs += 1 311 | if (self.recvAcqs == 1) or (self.recvAcqs % 100 == 0): 312 | logging.info("<-- Received MRD_MESSAGE_ISMRMRD_ACQUISITION (1008) (total: %d)", self.recvAcqs) 313 | 314 | acq = ismrmrd.Acquisition.deserialize_from(self.read) 315 | 316 | if self.savedata is True: 317 | if self.dset is None: 318 | self.create_save_file() 319 | 320 | self.dset.append_acquisition(acq) 321 | 322 | return acq 323 | 324 | # ----- MRD_MESSAGE_ISMRMRD_IMAGE (1022) ----------------------------------- 325 | # This message contains a single [x y z cha] image. 326 | # Message consists of: 327 | # ID ( 2 bytes, unsigned short) 328 | # Fixed header ( 198 bytes, mixed ) 329 | # Attribute length ( 8 bytes, uint64_t ) 330 | # Attribute data ( variable, char ) 331 | # Image data ( variable, variable ) 332 | def send_image(self, images): 333 | with self.lock: 334 | if not isinstance(images, list): 335 | images = [images] 336 | 337 | logging.info("--> Sending MRD_MESSAGE_ISMRMRD_IMAGE (1022) (%d images)", len(images)) 338 | for image in images: 339 | if image is None: 340 | continue 341 | 342 | self.sentImages += 1 343 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_ISMRMRD_IMAGE)) 344 | image.serialize_into(self.socket.send) 345 | 346 | # Explicit version of serialize_into() for more verbose debugging 347 | # self.socket.send(image.getHead()) 348 | # self.socket.send(constants.MrdMessageAttribLength.pack(len(image.attribute_string))) 349 | # self.socket.send(bytes(image.attribute_string, 'utf-8')) 350 | # self.socket.send(bytes(image.data)) 351 | 352 | def read_image(self): 353 | self.recvImages += 1 354 | logging.info("<-- Received MRD_MESSAGE_ISMRMRD_IMAGE (1022)") 355 | # return ismrmrd.Image.deserialize_from(self.read) 356 | 357 | # Explicit version of deserialize_from() for more verbose debugging 358 | logging.debug(" Reading in %d bytes of image header", ctypes.sizeof(ismrmrd.ImageHeader)) 359 | header_bytes = self.read(ctypes.sizeof(ismrmrd.ImageHeader)) 360 | 361 | attribute_length_bytes = self.read(ctypes.sizeof(ctypes.c_uint64)) 362 | attribute_length = ctypes.c_uint64.from_buffer_copy(attribute_length_bytes) 363 | logging.debug(" Reading in %d bytes of attributes", attribute_length.value) 364 | 365 | attribute_bytes = self.read(attribute_length.value) 366 | if (attribute_length.value > 25000): 367 | logging.debug(" Attributes (truncated): %s", attribute_bytes[0:24999].decode('utf-8')) 368 | else: 369 | logging.debug(" Attributes: %s", attribute_bytes.decode('utf-8')) 370 | 371 | image = ismrmrd.Image(header_bytes, attribute_bytes.split(b'\x00',1)[0].decode('utf-8')) # Strip off null teminator 372 | 373 | logging.info(" Image is size %d x %d x %d with %d channels of type %s", image.getHead().matrix_size[0], image.getHead().matrix_size[1], image.getHead().matrix_size[2], image.channels, ismrmrd.get_dtype_from_data_type(image.data_type)) 374 | def calculate_number_of_entries(nchannels, xs, ys, zs): 375 | return nchannels * xs * ys * zs 376 | 377 | nentries = calculate_number_of_entries(image.channels, *image.getHead().matrix_size) 378 | nbytes = nentries * ismrmrd.get_dtype_from_data_type(image.data_type).itemsize 379 | 380 | logging.debug("Reading in %d bytes of image data", nbytes) 381 | data_bytes = self.read(nbytes) 382 | 383 | image.data.ravel()[:] = np.frombuffer(data_bytes, dtype=ismrmrd.get_dtype_from_data_type(image.data_type)) 384 | 385 | if self.savedata is True: 386 | if self.dset is None: 387 | self.create_save_file() 388 | self.dset.append_image("image_%d" % image.image_series_index, image) 389 | 390 | return image 391 | 392 | # ----- MRD_MESSAGE_ISMRMRD_WAVEFORM (1026) ----------------------------- 393 | # This message contains abitrary (e.g. physio) waveform data. 394 | # Message consists of: 395 | # ID ( 2 bytes, unsigned short) 396 | # Fixed header ( 240 bytes, mixed ) 397 | # Waveform data ( variable, uint32_t ) 398 | def send_waveform(self, waveform): 399 | with self.lock: 400 | self.sentWaveforms += 1 401 | if (self.sentWaveforms == 1) or (self.sentWaveforms % 100 == 0): 402 | logging.info("--> Sending MRD_MESSAGE_ISMRMRD_WAVEFORM (1026) (total: %d)", self.sentWaveforms) 403 | 404 | self.socket.send(constants.MrdMessageIdentifier.pack(constants.MRD_MESSAGE_ISMRMRD_WAVEFORM)) 405 | waveform.serialize_into(self.socket.send) 406 | 407 | def read_waveform(self): 408 | self.recvWaveforms += 1 409 | if (self.recvWaveforms == 1) or (self.recvWaveforms % 100 == 0): 410 | logging.info("<-- Received MRD_MESSAGE_ISMRMRD_WAVEFORM (1026) (total: %d)", self.recvWaveforms) 411 | 412 | waveform = ismrmrd.Waveform.deserialize_from(self.read) 413 | 414 | if self.savedata is True: 415 | if self.dset is None: 416 | self.create_save_file() 417 | 418 | self.dset.append_waveform(waveform) 419 | 420 | return waveform 421 | 422 | -------------------------------------------------------------------------------- /constants.py: -------------------------------------------------------------------------------- 1 | 2 | import struct 3 | 4 | MRD_MESSAGE_INT_ID_MIN = 0 # CONTROL 5 | MRD_MESSAGE_CONFIG_FILE = 1 6 | MRD_MESSAGE_CONFIG_TEXT = 2 7 | MRD_MESSAGE_METADATA_XML_TEXT = 3 8 | MRD_MESSAGE_CLOSE = 4 9 | MRD_MESSAGE_TEXT = 5 10 | MRD_MESSAGE_INT_ID_MAX = 999 # CONTROL 11 | MRD_MESSAGE_EXT_ID_MIN = 1000 # CONTROL 12 | MRD_MESSAGE_ACQUISITION = 1001 # DEPRECATED 13 | MRD_MESSAGE_NEW_MEASUREMENT = 1002 # DEPRECATED 14 | MRD_MESSAGE_END_OF_SCAN = 1003 # DEPRECATED 15 | MRD_MESSAGE_IMAGE_CPLX_FLOAT = 1004 # DEPRECATED 16 | MRD_MESSAGE_IMAGE_REAL_FLOAT = 1005 # DEPRECATED 17 | MRD_MESSAGE_IMAGE_REAL_USHORT = 1006 # DEPRECATED 18 | MRD_MESSAGE_EMPTY = 1007 # DEPRECATED 19 | MRD_MESSAGE_ISMRMRD_ACQUISITION = 1008 20 | MRD_MESSAGE_ISMRMRD_IMAGE_CPLX_FLOAT = 1009 # DEPRECATED 21 | MRD_MESSAGE_ISMRMRD_IMAGE_REAL_FLOAT = 1010 # DEPRECATED 22 | MRD_MESSAGE_ISMRMRD_IMAGE_REAL_USHORT = 1011 # DEPRECATED 23 | MRD_MESSAGE_DICOM = 1012 # DEPRECATED 24 | MRD_MESSAGE_CLOUD_JOB = 1013 # UNSUPPORTED 25 | MRD_MESSAGE_GADGETCLOUD_JOB = 1014 # UNSUPPORTED 26 | MRD_MESSAGE_ISMRMRD_IMAGEWITHATTRIB_CPLX_FLOAT = 1015 # DEPRECATED 27 | MRD_MESSAGE_ISMRMRD_IMAGEWITHATTRIB_REAL_FLOAT = 1016 # DEPRECATED 28 | MRD_MESSAGE_ISMRMRD_IMAGEWITHATTRIB_REAL_USHORT = 1017 # DEPRECATED 29 | MRD_MESSAGE_DICOM_WITHNAME = 1018 # UNSUPPORTED 30 | MRD_MESSAGE_DEPENDENCY_QUERY = 1019 # UNSUPPORTED 31 | MRD_MESSAGE_ISMRMRD_IMAGE_REAL_SHORT = 1020 # DEPRECATED 32 | MRD_MESSAGE_ISMRMRD_IMAGEWITHATTRIB_REAL_SHORT = 1021 # DEPRECATED 33 | MRD_MESSAGE_ISMRMRD_IMAGE = 1022 34 | MRD_MESSAGE_RECONDATA = 1023 # UNSUPPORTED 35 | MRD_MESSAGE_ISMRMRD_WAVEFORM = 1026 36 | MRD_MESSAGE_EXT_ID_MAX = 4096 # CONTROL 37 | 38 | MrdMessageLength = struct.Struct(' 1000): 112 | for i in range(len(dsetsAll)): 113 | dsetsAll[i].SeriesNumber = int(np.floor(dsetsAll[i].SeriesNumber / 1000)) 114 | uSeriesNum = np.unique([dset.SeriesNumber for dset in dsetsAll]) 115 | 116 | print("Found %d unique series from %d files in folder %s" % (len(uSeriesNum), len(dsetsAll), args.folder)) 117 | 118 | print("Creating MRD XML header from file %s" % dsetsAll[0].filename) 119 | mrdHead = CreateMrdHeader(dsetsAll[0]) 120 | print(mrdHead.toXML()) 121 | 122 | imgAll = [None]*len(uSeriesNum) 123 | 124 | for iSer in range(len(uSeriesNum)): 125 | dsets = [dset for dset in dsetsAll if dset.SeriesNumber == uSeriesNum[iSer]] 126 | 127 | imgAll[iSer] = [None]*len(dsets) 128 | 129 | # Sort images by instance number, as they may be read out of order 130 | def get_instance_number(item): 131 | return item.InstanceNumber 132 | dsets = sorted(dsets, key=get_instance_number) 133 | 134 | # Build a list of unique SliceLocation and TriggerTimes, as the MRD 135 | # slice and phase counters index into these 136 | uSliceLoc = np.unique([dset.SliceLocation for dset in dsets]) 137 | if dsets[0].SliceLocation != uSliceLoc[0]: 138 | uSliceLoc = uSliceLoc[::-1] 139 | 140 | try: 141 | # This field may not exist for non-gated sequences 142 | uTrigTime = np.unique([dset.TriggerTime for dset in dsets]) 143 | if dsets[0].TriggerTime != uTrigTime[0]: 144 | uTrigTime = uTrigTime[::-1] 145 | except: 146 | uTrigTime = np.zeros_like(uSliceLoc) 147 | 148 | print("Series %d has %d images with %d slices and %d phases" % (uSeriesNum[iSer], len(dsets), len(uSliceLoc), len(uTrigTime))) 149 | 150 | for iImg in range(len(dsets)): 151 | tmpDset = dsets[iImg] 152 | 153 | # Create new MRD image instance. 154 | # pixel_array data has shape [row col], i.e. [y x]. 155 | # from_array() should be called with 'transpose=False' to avoid warnings, and when called 156 | # with this option, can take input as: [cha z y x], [z y x], or [y x] 157 | tmpMrdImg = ismrmrd.Image.from_array(tmpDset.pixel_array, transpose=False) 158 | tmpMeta = ismrmrd.Meta() 159 | 160 | try: 161 | tmpMrdImg.image_type = imtype_map[tmpDset.ImageType[2]] 162 | except: 163 | print("Unsupported ImageType %s -- defaulting to IMTYPE_MAGNITUDE" % tmpDset.ImageType[2]) 164 | tmpMrdImg.image_type = ismrmrd.IMTYPE_MAGNITUDE 165 | 166 | tmpMrdImg.field_of_view = (tmpDset.PixelSpacing[0]*tmpDset.Rows, tmpDset.PixelSpacing[1]*tmpDset.Columns, tmpDset.SliceThickness) 167 | tmpMrdImg.position = tuple(np.stack(tmpDset.ImagePositionPatient)) 168 | tmpMrdImg.read_dir = tuple(np.stack(tmpDset.ImageOrientationPatient[0:3])) 169 | tmpMrdImg.phase_dir = tuple(np.stack(tmpDset.ImageOrientationPatient[3:7])) 170 | tmpMrdImg.slice_dir = tuple(np.cross(np.stack(tmpDset.ImageOrientationPatient[0:3]), np.stack(tmpDset.ImageOrientationPatient[3:7]))) 171 | tmpMrdImg.acquisition_time_stamp = round((int(tmpDset.AcquisitionTime[0:2])*3600 + int(tmpDset.AcquisitionTime[2:4])*60 + int(tmpDset.AcquisitionTime[4:6]) + float(tmpDset.AcquisitionTime[6:]))*1000/2.5) 172 | try: 173 | tmpMrdImg.physiology_time_stamp[0] = round(int(tmpDset.TriggerTime/2.5)) 174 | except: 175 | pass 176 | 177 | try: 178 | ImaAbsTablePosition = tmpDset.get_private_item(0x0019, 0x13, 'SIEMENS MR HEADER').value 179 | tmpMrdImg.patient_table_position = (ctypes.c_float(ImaAbsTablePosition[0]), ctypes.c_float(ImaAbsTablePosition[1]), ctypes.c_float(ImaAbsTablePosition[2])) 180 | except: 181 | pass 182 | 183 | tmpMrdImg.image_series_index = uSeriesNum.tolist().index(tmpDset.SeriesNumber) 184 | tmpMrdImg.image_index = tmpDset.get('InstanceNumber', 0) 185 | tmpMrdImg.slice = uSliceLoc.tolist().index(tmpDset.SliceLocation) 186 | try: 187 | tmpMrdImg.phase = uTrigTime.tolist().index(tmpDset.TriggerTime) 188 | except: 189 | pass 190 | 191 | try: 192 | res = re.search(r'(?<=_v).*$', tmpDset.SequenceName) 193 | venc = re.search(r'^\d+', res.group(0)) 194 | dir = re.search(r'(?<=\d)[^\d]*$', res.group(0)) 195 | 196 | tmpMeta['FlowVelocity'] = float(venc.group(0)) 197 | tmpMeta['FlowDirDisplay'] = venc_dir_map[dir.group(0)] 198 | except: 199 | pass 200 | 201 | try: 202 | tmpMeta['ImageComments'] = tmpDset.ImageComments 203 | except: 204 | pass 205 | 206 | tmpMeta['SequenceDescription'] = tmpDset.SeriesDescription 207 | 208 | # Remove pixel data from pydicom class 209 | del tmpDset['PixelData'] 210 | 211 | # Store the complete base64, json-formatted DICOM header so that non-MRD fields can be 212 | # recapitulated when generating DICOMs from MRD images 213 | tmpMeta['DicomJson'] = base64.b64encode(tmpDset.to_json().encode('utf-8')).decode('utf-8') 214 | 215 | tmpMrdImg.attribute_string = tmpMeta.serialize() 216 | imgAll[iSer][iImg] = tmpMrdImg 217 | 218 | # Create an MRD file 219 | print("Creating MRD file %s with group %s" % (args.outFile, args.outGroup)) 220 | mrdDset = ismrmrd.Dataset(args.outFile, args.outGroup) 221 | mrdDset._file.require_group(args.outGroup) 222 | 223 | # Write MRD Header 224 | mrdDset.write_xml_header(bytes(mrdHead.toXML(), 'utf-8')) 225 | 226 | # Write all images 227 | for iSer in range(len(imgAll)): 228 | for iImg in range(len(imgAll[iSer])): 229 | mrdDset.append_image("image_%d" % imgAll[iSer][iImg].image_series_index, imgAll[iSer][iImg]) 230 | 231 | mrdDset.close() 232 | 233 | if __name__ == '__main__': 234 | """Basic conversion of a folder of DICOM files to MRD .h5 format""" 235 | 236 | parser = argparse.ArgumentParser(description='Convert DICOMs to MRD file', 237 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 238 | parser.add_argument('folder', help='Input folder of DICOMs') 239 | parser.add_argument('-o', '--outFile', help='Output MRD file') 240 | parser.add_argument('-g', '--outGroup', help='Group name in output MRD file') 241 | 242 | parser.set_defaults(**defaults) 243 | 244 | args = parser.parse_args() 245 | 246 | if args.outFile is None: 247 | args.outFile = os.path.basename(args.folder) + '.h5' 248 | 249 | main(args) 250 | -------------------------------------------------------------------------------- /doc/MRD Streaming Format.md: -------------------------------------------------------------------------------- 1 | # MR Data Message Format 2 | The streaming ISMRM data format (MRD) consists of a series of messages sent asynchronously through a standard TCP/IP socket. Each message starts with an ID that identifies the message type and how subsequent stream data should be parsed. The term “server” refers to the process doing the image reconstruction or processing, while “client” refers to the process sending raw data or images. 3 | 4 | 15 | 16 | ## ID 1: MRD_MESSAGE_CONFIG_FILE 17 | 18 | -------------------------------------------------------------------------------- /doc/devcontainers.md: -------------------------------------------------------------------------------- 1 | # Getting started with dev containers 2 | Traditional development involves the local installation of libraries, packages, and other dependencies that are required by code being developed. This can be complex if the native environment is a different operating system than the production environment, such as when developing in Windows because the Docker images will run in Linux. Managing the development environment can also be complicated by different projects requiring different version of the same package. If developing in Python, [virtual environments (venvs)](https://docs.python.org/3/library/venv.html) can be created and managed using tools such as [conda](https://docs.conda.io/en/latest/) or [mamba](https://github.com/mamba-org/mamba). 3 | 4 | [Development containers (devcontainers)](https://code.visualstudio.com/docs/devcontainers/containers) are an alternative approach, where the development environment itself is created within a Docker image. This can simplify the process of setting up a working environment and reduce issues when moving between development and deployment environments. During development, the devcontainer is started automatically in Docker and Visual Studio Code executes the code inside the devcontainer, complete with a debugging environment. 5 | 6 | 1. [Install Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) and clone this repository into a local folder: 7 | ``` 8 | git clone https://github.com/kspaceKelvin/python-ismrmrd-server.git 9 | ``` 10 | 11 | 1. [Install Visual Studio Code](https://code.visualstudio.com/) and install the [Dev Containers](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.remote-containers) plugin from its webpage or from the Extensions panel in Visual Studio Code. 12 | 13 | 1. Start Docker. 14 | 15 | 1. Open the `python-ismrmrd-server` folder in VS Code. The dev container should be detected automatically and a prompt will appear at the bottom right to "Reopen in Container". This action can also be found in the [Command Palette](https://code.visualstudio.com/docs/getstarted/userinterface#_command-palette), which can be invoked using the `Ctrl+Shift+P` key combination in Windows/Linux or `Command+Shift+P` in MacOS and typing in "Reopen in Container". This step may take a few minutes when run for the first time as the dev container Docker image is built (the devcontainer is cached for future runs). 16 | 17 | 1. When the repository is opened in a dev container, the green "Remote Window" section at the bottom left of VS Code will indicate "Dev Container: python-ismrmrd-server-devcon" 18 | 19 | 1. Select "Run and Debug" from the Activity Bar along the left side and then "Start server" from the top left. This will start the server (i.e. main.py) and breakpoints can be marked by opening a .py file and clicking to the left of the the line number (a red circle will appear). 20 | 21 | 1. The [RunClientServerRecon.ipynb](RunClientServerRecon.ipynb) Python notebook contains code snippets for generating an input raw data set, running the client, and displaying the results. Alternatively, the [client.py](client.py) can be run the Terminal window. Note that files (e.g. datasets) placed in this repo's folder will automatically be mapped inside the dev container and output files generated inside this repo's folder will also be visible on the host file system. 22 | 23 | Note: The Docker devcontainer can also be built manually by opening a terminal in the python-ismrmrd-server folder and running: 24 | ``` 25 | docker build --no-cache -t python-mrd-devcontainer --target python-mrd-devcontainer -f docker/Dockerfile ./ 26 | ``` 27 | 28 | ## GitHub Codespaces 29 | [Codespaces](https://github.com/features/codespaces) is a feature of GitHub that allows for dev containers to be configured and run in the cloud. Codespaces can be accessible via a browser, running a web-based version of Visual Studio Code or Jupyter. A codespace can also be opened up in a native VS Code instance running locally for faster performnce. Codespaces are free for registerd GitHub users for up to 60 hours/month as of December 2023 with billable additional usage. 30 | 31 | To set up a GitHub codespace: 32 | 1. Create a GitHub account and log in. 33 | 34 | 1. Browse to the main directory of this repository: https://github.com/kspaceKelvin/python-ismrmrd-server 35 | 36 | 1. Click on the green "Code" button at the top right select "Codespaces", and then the "+" symbol (Create a codespace on master). It may take a few minutes for the codespace to be created for the first time. 37 | 38 | 1. The web version of VS Code can be used in the same way as the native program. For example the server can be started using "Run Server" configuration in Run and Debug, and the [RunClientServerRecon.ipynb](RunClientServerRecon.ipynb) notebook can also be used. 39 | 40 | The default idle timeout of 30 minutes (configurable in the GitHub user settings) will stop the codespace when inactive. Changed and new files are kept within the codespace even if the codespace is stopped (up to 15 GB of free storage per month). Codespaces can be manually stopped or deleted if needed. -------------------------------------------------------------------------------- /doc/docker.md: -------------------------------------------------------------------------------- 1 | ### Getting Started with Docker 2 | Docker is a virtualization platform that allows software to run in isolated environments called containers. It provides a convenient mechanism to package up a reconstruction program and all its libraries in a manner that can be easily deployed to other computers without manually installing dependencies or other configuration steps. 3 | 4 | Install Docker from https://www.docker.com/products/docker-desktop with the standard settings. Windows Subsystem for Linux (WSL) 2 is the preferred backend, although Hyper-V backends are still currently supported. Both Hyper-V and WSL2 backends are supported. The backend can be configured in the settings, as described in https://docs.docker.com/desktop/wsl/. 5 | 6 | 7 | Download the Docker image of this server by opening a command prompt and running: 8 | ``` 9 | docker pull kspacekelvin/fire-python 10 | ``` 11 | 12 | #### Start the Reconstruction Server in a Docker Container 13 | If using Windows, create the folder ``C:\tmp``. In a command prompt, run the command: 14 | ``` 15 | In Windows: 16 | docker run -p=9002:9002 --rm -it -v C:\tmp:/tmp kspacekelvin/fire-python 17 | 18 | In MacOS/Linux: 19 | docker run -p=9002:9002 --rm -it -v /tmp:/tmp kspacekelvin/fire-python 20 | ``` 21 | 22 | The command line options used are: 23 | ``` 24 | -p=9002:9002 Allows access to port 9002 inside the container from port 9002 on the host. Change 25 | the first number to change the host port. 26 | -it Enables “interactive” mode with a pseudo-tty. This is necessary for “ctrl-c” to 27 | stop the program. 28 | --rm Remove the container after it is stopped 29 | -v /tmp:/tmp Maps the /tmp folder on the host to /tmp inside the container. Change the first 30 | path to change the host folder. Log and debug files are stored in this folder. 31 | ``` 32 | 33 | The server can be stopped by pressing ``ctrl-c``. 34 | 35 | #### Start the Reconstruction Client 36 | In a separate command prompt, start another container of the Docker image: 37 | ``` 38 | docker run --rm -it -v /tmp:/tmp kspacekelvin/fire-python /bin/bash 39 | ``` 40 | 41 | In this command, the ``/bin/bash`` argument is used to start the container with a bash shell prompt instead of starting the Python MRD server. Within this bash shell, generate a sample raw data set: 42 | ``` 43 | python3 /opt/code/ismrmrd-python-tools/generate_cartesian_shepp_logan_dataset.py -o /tmp/phantom_raw.h5 44 | ``` 45 | 46 | Run the client and send data to the server in the other Docker container: 47 | ``` 48 | python3 /opt/code/python-ismrmrd-server/client.py -a host.docker.internal -p 9002 -G "dataset" -o /tmp/phantom_img.h5 /tmp/phantom_raw.h5 49 | ``` 50 | 51 | The command line options used are: 52 | ``` 53 | -a host.docker.internal Send data to address host.docker.internal, which 54 | resolves to the IP address of the Docker host. 55 | -p 9002 Send data to port 9002. This is sent to the host, which 56 | is then redirected to the server container due to the 57 | "-p" port mapping when starting the server. 58 | -o Specifies the output file name 59 | -G Specifies the group name in the output file 60 | 61 | The last argument specifies the input file name. 62 | ``` 63 | 64 | MRD image data are also stored in HDF files arranged by groups as named by the ``-G`` argument. If this argument is omitted, a group name with the current date/time is used. Images are further grouped by series index, with a sub-group named ``image_x``, where x is ``image_series_index`` in the [ImageHeader](https://ismrmrd.github.io/apidocs/1.4.2/struct_i_s_m_r_m_r_d_1_1_i_s_m_r_m_r_d___image_header.html). For example: 65 | ``` 66 | /dataset/image_0/data Image data 67 | /dataset/image_0/header MRD ImageHeader structure 68 | /dataset/image_0/attributes MRD MetaAttributes text 69 | ``` 70 | 71 | As Docker provides only a command-line virtualization interface, it not possible to directly view the reconstructed images from within the Docker container. However, the output file stored in ``C:\tmp`` or ``\tmp`` on the host and can be read using [ismrmrdviewer](https://github.com/ismrmrd/ismrmrdviewer) or [HDFView](https://www.hdfgroup.org/downloads/hdfview/). 72 | 73 | 74 | ### Building a Docker Image 75 | A [Dockerfile](../docker/Dockerfile) is provided using based on [python:3.12.0-slim](https://hub.docker.com/layers/library/python/3.12.0-slim/images/sha256-8e216a21d8df597118b46f3fff477ed1c5c11be81531b6da87790a17851b7f1c?context=explore), a light Python image optimized for reduced total size. A multi-stage build is also used to include the [ismrmrd](https://github.com/ismrmrd/ismrmrd) and [siemens_to_ismrmrd](https://github.com/ismrmrd/siemens_to_ismrmrd) packages without needing their build dependencies in the final image. 76 | 77 | For some image analysis code, additional packages or libraries may be required. To create a Docker image with these additional packages, start with the ``kspacekelvin/fire-python`` image (created above) and add ``RUN`` commands corresponding to how the packages would be installed via command line. Temporary files created during each ``RUN`` command are kept in the final image, so group installations of multiple packages from the same manager (e.g. apt or pip) whenever possible. An example for installation of PyTorch is provided in [docker/pytorch/Dockerfile](../docker/pytorch/Dockerfile). Alternatively, it is possible to copy the main [Dockerfile](../docker/Dockerfile) and modify it directly. An example for this approach can be found in [docker/pytorch/Dockerfile_standalone](../docker/pytorch/Dockerfile_standalone). 78 | 79 | When determining the required packages, an interactive approach is often useful. Build the existing Dockerfile by opening a command prompt in the `python-ismrmrd-server` repo folder and run: 80 | ``` 81 | docker build --no-cache -t fire-python -f docker/Dockerfile ./ 82 | ``` 83 | 84 | The above command uses the following options: 85 | ``` 86 | --no-cache Run each step of the Docker build process without caching from previous builds 87 | -t fire-python Tag (name) of the Docker image. 88 | -f docker/Dockerfile Path to the Dockerfile 89 | ./ Build context (folder from which COPY commands use as root) 90 | ``` 91 | 92 | Start an instance of this image: 93 | ``` 94 | docker run -it --rm fire-python 95 | ``` 96 | 97 | The following options are used: 98 | ``` 99 | -it Use an interactive terminal with command line input 100 | --rm Delete the container after exiting 101 | ``` 102 | 103 | Within this container, attempt to start the Python server by running ``python3 /opt/code/python-ismrmrd-server/main.py``. If any errors are encountered, install the libraries using the appropriate apt, pip, or other commands. Copy these commands into the Dockerfile as each dependency is resolved. It may be necessary to send some data to the server to ensure any run-time dependencies are also validated. To do so, a ``-p 9002:9002`` command is required during the ``docker run`` step in order to share the port. More details about running a client/server pair in Docker are available here. Once all dependencies are resolved, exit the Docker container by running ``quit`` at the command prompt, rebuild the Docker image, and ensure that you can start the server with ``docker run`` without any additional steps. 104 | 105 | ### Creation of a chroot image 106 | A chroot image contains the complete contents of a Linux operating system and serves as the root folder for the reconstruction program. The chroot image can contain libraries and other files that can be used by the reconstruction program, isolated from the Linux operating system on the MARS computer. Operating systems tested for FIRE chroot compatibility include Ubuntu, Debian, and Alpine. Chroot images can be generated using manual tools such as debootstrap or be created from existing containers such as Docker. 107 | 108 | A set of scripts is provided to automate the creation of chroot images from Docker images. To use them, open a command prompt inside the [docker](/docker) folder and run the following command: 109 | ``` 110 | In Windows: 111 | docker_to_chroot.bat kspacekelvin/fire-python fire-python-chroot.img 112 | 113 | In MacOS/Linux: 114 | ./docker_to_chroot.sh kspacekelvin/fire-python fire-python-chroot.img 115 | ``` 116 | 117 | The first argument is the name of the (existing) Docker image and the second argument is the chroot image file to be created. An optional third argument can be used to specify the free space buffer added to the chroot in MB (default 50 MB). Note that both the [docker_to_chroot.bat](/docker/docker_to_chroot.bat) and [docker_to_chroot.sh](/docker/docker_to_chroot.sh) scripts require the [docker_tar_to_chroot.sh](/docker/docker_tar_to_chroot.sh) script that is also in the docker folder. 118 | 119 | #### Manual creation of a chroot image 120 | The following steps can be used to manually create a chroot image from a Docker image. These steps are the same as those automated by the ``docker_to_chroot`` scripts above. Here they are performed within a Linux Docker image, but they can also be run on a Linux system natively. 121 | 122 | 1. Create a Docker container instance from an image. If a different tag was used above, change the last argument accordingly. 123 | ``` 124 | docker create --name tmpimage kspacekelvin/fire-python 125 | ``` 126 | 127 | 1. Export the file system contents to a tar archive. Create the tmp folder if necessary. Note that the [docker export](https://docs.docker.com/engine/reference/commandline/export/) command must be used instead of [docker save](https://docs.docker.com/engine/reference/commandline/save/). 128 | ``` 129 | In Windows: 130 | docker export -o C:\tmp\fire-python-contents.tar tmpimage 131 | 132 | In MacOS/Linux: 133 | docker export -o /tmp/fire-python-contents.tar tmpimage 134 | ``` 135 | 136 | 1. Remove the Docker container instance. 137 | ``` 138 | docker rm tmpimage 139 | ``` 140 | 141 | 1. Start a Ubuntu Linux Docker container, sharing the tmp folder. 142 | ``` 143 | In Windows: 144 | docker run -it --rm --privileged=true -v C:\tmp:/tmp ubuntu 145 | 146 | In MacOS/Linux: 147 | docker run -it --rm --privileged=true -v /tmp:/tmp ubuntu 148 | ``` 149 | 150 | The following options are used: 151 | ``` 152 | -it Use an interactive terminal with command line input 153 | --rm Delete the container after exiting 154 | --privileged=true Use extended privileges to allow mount commands 155 | -v /tmp:/tmp Share volume (folder) from host to container 156 | ``` 157 | 158 | 1. Create a blank chroot file with an ext3 file system 450 MB in size. The total file size is the product of the number of blocks (count) and the block size (bs). However, the available space is ~30 MB less than the file size due to file system overhead. The available space must be greater than the size of the tar archive above, with sufficient additional space (~10%) for temporary files that may be created during image reconstruction. 159 | ``` 160 | dd if=/dev/zero of=/tmp/fire-python-chroot.img bs=1M count=450 161 | mke2fs -F -t ext3 /tmp/fire-python-chroot.img 162 | ``` 163 | 164 | 1. Mount the chroot file. If not using Docker, add “sudo” before the mount command. 165 | ``` 166 | mkdir /mnt/chroot 167 | mount -o loop /tmp/fire-python-chroot.img /mnt/chroot 168 | ``` 169 | 170 | 1. Extract the image contents into the mounted chroot image. 171 | ``` 172 | tar -xvf /tmp/fire-python-contents.tar --directory=/mnt/chroot 173 | ``` 174 | 175 | 1. Verify the amount of free space available on the chroot image by running ``df -h`` (52 MB in the below): 176 | ``` 177 | root@0cdce2f7e3cf:/# df -h 178 | Filesystem Size Used Avail Use% Mounted on 179 | /dev/loop0 396M 324M 52M 87% /mnt/chroot 180 | ``` 181 | 182 | 1. Unmount the chroot image. 183 | ``` 184 | umount /mnt/chroot 185 | ``` 186 | 187 | 1. (Optional) The chroot is highly compressible using the zip file format. 188 | ``` 189 | zip -j /tmp/fire-python-chroot.zip /tmp/fire-python-chroot.img 190 | ``` 191 | 192 | 1. Exit the Docker container instance if started in step 4. 193 | ``` 194 | exit 195 | ``` 196 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # ----- 1. First stage to build ismrmrd and siemens_to_ismrmrd ----- 2 | FROM python:3.12.0-slim AS mrd_converter 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | ENV TZ=America/Chicago 5 | 6 | RUN apt-get update && apt-get install -y git cmake g++ libhdf5-dev libxml2-dev libxslt1-dev libboost-all-dev libfftw3-dev libpugixml-dev 7 | RUN mkdir -p /opt/code 8 | 9 | # ISMRMRD library 10 | RUN cd /opt/code && \ 11 | git clone https://github.com/ismrmrd/ismrmrd.git && \ 12 | cd ismrmrd && \ 13 | git checkout d364e03 && \ 14 | mkdir build && \ 15 | cd build && \ 16 | cmake ../ && \ 17 | make -j $(nproc) && \ 18 | make install 19 | 20 | # siemens_to_ismrmrd converter 21 | RUN cd /opt/code && \ 22 | git clone https://github.com/ismrmrd/siemens_to_ismrmrd.git && \ 23 | cd siemens_to_ismrmrd && \ 24 | git checkout v1.2.11 && \ 25 | mkdir build && \ 26 | cd build && \ 27 | cmake ../ && \ 28 | make -j $(nproc) && \ 29 | make install 30 | 31 | # Create archive of ISMRMRD libraries (including symlinks) for second stage 32 | RUN cd /usr/local/lib && tar -czvf libismrmrd.tar.gz libismrmrd* 33 | 34 | # ----- 2. Create a devcontainer without all of the build dependencies of MRD ----- 35 | FROM python:3.12.0-slim AS python-mrd-devcontainer 36 | 37 | LABEL org.opencontainers.image.description="Python MRD Image Reconstruction and Analysis Server" 38 | LABEL org.opencontainers.image.url="https://github.com/kspaceKelvin/python-ismrmrd-server" 39 | LABEL org.opencontainers.image.authors="Kelvin Chow (kelvin.chow@siemens-healthineers.com)" 40 | 41 | # Copy ISMRMRD files from last stage 42 | COPY --from=mrd_converter /usr/local/include/ismrmrd /usr/local/include/ismrmrd/ 43 | COPY --from=mrd_converter /usr/local/share/ismrmrd /usr/local/share/ismrmrd/ 44 | COPY --from=mrd_converter /usr/local/bin/ismrmrd* /usr/local/bin/ 45 | COPY --from=mrd_converter /usr/local/lib/libismrmrd.tar.gz /usr/local/lib/ 46 | RUN cd /usr/local/lib && tar -zxvf libismrmrd.tar.gz && rm libismrmrd.tar.gz && ldconfig 47 | 48 | # Copy siemens_to_ismrmrd from last stage 49 | COPY --from=mrd_converter /usr/local/bin/siemens_to_ismrmrd /usr/local/bin/siemens_to_ismrmrd 50 | 51 | # Add dependencies for siemens_to_ismrmrd 52 | RUN apt-get update && apt-get install --no-install-recommends -y libxslt1.1 libhdf5-103 libboost-program-options1.74.0 libpugixml1v5 git dos2unix nano 53 | RUN mkdir -p /opt/code 54 | 55 | # Tell nano to remember its position from the last time it opened a file 56 | RUN echo "set positionlog" > ~/.nanorc 57 | 58 | # Python MRD library 59 | RUN pip3 install h5py==3.10.0 ismrmrd==1.14.1 60 | 61 | RUN cd /opt/code && \ 62 | git clone https://github.com/ismrmrd/ismrmrd-python-tools.git && \ 63 | cd /opt/code/ismrmrd-python-tools && \ 64 | pip3 install --no-cache-dir . 65 | 66 | # matplotlib is used by rgb.py and provides various visualization tools including colormaps 67 | # pydicom is used by dicom2mrd.py to parse DICOM data 68 | RUN pip3 install --no-cache-dir matplotlib==3.8.2 pydicom==3.0.1 69 | 70 | # Cleanup files not required after installation 71 | RUN apt-get clean && \ 72 | rm -rf /var/lib/apt/lists/* && \ 73 | rm -rf /root/.cache/pip 74 | 75 | # ----- 3. Copy deployed code into the devcontainer for deployment ----- 76 | FROM python-mrd-devcontainer AS python-mrd-runtime 77 | 78 | # If building from the GitHub repo, uncomment the below section, open a command 79 | # prompt in the folder containing this Dockerfile and run the command: 80 | # docker build --no-cache -t kspacekelvin/fire-python ./ 81 | # RUN cd /opt/code && \ 82 | # git clone https://github.com/kspaceKelvin/python-ismrmrd-server.git 83 | 84 | # If doing local development, use this section to copy local code into Docker 85 | # image. From the python-ismrmrd-server folder, uncomment the following lines 86 | # below and run the command: 87 | # docker build --no-cache -t fire-python-custom -f docker/Dockerfile ./ 88 | RUN mkdir -p /opt/code/python-ismrmrd-server 89 | COPY . /opt/code/python-ismrmrd-server 90 | 91 | # Throw an explicit error if docker build is run from the folder *containing* 92 | # python-ismrmrd-server instead of within it (i.e. old method) 93 | RUN if [ -d /opt/code/python-ismrmrd-server/python-ismrmrd-server ]; then echo "docker build should be run inside of python-ismrmrd-server instead of one directory up"; exit 1; fi 94 | 95 | # Ensure startup scripts have Unix (LF) line endings, which may not be true 96 | # if the git repo is cloned in Windows 97 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" | xargs dos2unix 98 | 99 | # Ensure startup scripts are marked as executable, which may be lost if files 100 | # are copied in Windows 101 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" -exec chmod +x {} \; 102 | 103 | # Set the starting directory so that code can use relative paths 104 | WORKDIR /opt/code/python-ismrmrd-server 105 | 106 | CMD [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log", "--defaultConfig=invertcontrast"] 107 | 108 | # Replace the above CMD with this ENTRYPOINT to allow allow "docker stop" 109 | # commands to be passed to the server. This is useful for deployments, but 110 | # more annoying for development 111 | # ENTRYPOINT [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log"] -------------------------------------------------------------------------------- /docker/alpine/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.8.2-alpine3.11 AS mrd_converter 2 | 3 | RUN apk --no-cache add hdf5 hdf5-dev --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ \ 4 | && apk --no-cache add cmake build-base boost-dev boost-static libxml2-dev libxslt-dev git fftw-dev 5 | 6 | RUN mkdir -p /opt/code 7 | 8 | # ISMRMRD library 9 | RUN cd /opt/code && \ 10 | git clone https://github.com/ismrmrd/ismrmrd.git && \ 11 | cd ismrmrd && \ 12 | mkdir build && \ 13 | cd build && \ 14 | cmake ../ && \ 15 | make -j $(nproc) && \ 16 | make install 17 | 18 | # siemens_to_ismrmrd converter 19 | RUN cd /opt/code && \ 20 | git clone https://github.com/ismrmrd/siemens_to_ismrmrd.git 21 | 22 | RUN cd /opt/code/siemens_to_ismrmrd && \ 23 | mkdir build && \ 24 | cd build && \ 25 | cmake ../ && \ 26 | make -j $(nproc) && \ 27 | make install 28 | 29 | # ----- Start another clean build without all of the build dependencies of siemens_to_ismrmrd ----- 30 | FROM python:3.8.2-alpine3.11 31 | 32 | LABEL org.opencontainers.image.description="Python MRD Image Reconstruction and Analysis Server (Alpine Linux)" 33 | LABEL org.opencontainers.image.url="https://github.com/kspaceKelvin/python-ismrmrd-server" 34 | LABEL org.opencontainers.image.authors="Kelvin Chow (kelvin.chow@siemens-healthineers.com)" 35 | 36 | # Copy siemens_to_ismrmrd from last stage and re-add necessary dependencies 37 | COPY --from=mrd_converter /usr/local/bin/siemens_to_ismrmrd /usr/local/bin/siemens_to_ismrmrd 38 | COPY --from=mrd_converter /usr/local/lib/libismrmrd* /usr/local/lib/ 39 | RUN apk --no-cache add hdf5 --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ \ 40 | && apk --no-cache add libxslt 41 | 42 | # Dependencies for Python MRD server 43 | RUN apk --no-cache add hdf5 hdf5-dev --repository http://dl-cdn.alpinelinux.org/alpine/edge/community/ \ 44 | && apk --no-cache add bash git openssh gcc libc-dev libxslt-dev libxml2-dev \ 45 | && pip install --no-cache-dir h5py 46 | 47 | RUN mkdir -p /opt/code 48 | 49 | RUN cd /opt/code \ 50 | && git clone https://github.com/ismrmrd/ismrmrd-python.git \ 51 | && cd /opt/code/ismrmrd-python \ 52 | && pip3 install --no-cache-dir . 53 | 54 | RUN cd /opt/code \ 55 | && git clone https://github.com/ismrmrd/ismrmrd-python-tools.git \ 56 | && cd /opt/code/ismrmrd-python-tools \ 57 | && pip3 install --no-cache-dir . 58 | 59 | # If building from the GitHub repo, uncomment the below section, open a command 60 | # prompt in the folder containing this Dockerfile and run the command: 61 | # docker build --no-cache -t kspacekelvin/fire-python ./ 62 | # RUN cd /opt/code && \ 63 | # git clone https://github.com/kspaceKelvin/python-ismrmrd-server.git 64 | 65 | # If doing local development, use this section to copy local code into Docker 66 | # image. From the python-ismrmrd-server folder, uncomment the following lines 67 | # below and run the command: 68 | # docker build --no-cache -t fire-python-custom -f docker/Dockerfile ./ 69 | RUN mkdir -p /opt/code/python-ismrmrd-server 70 | COPY . /opt/code/python-ismrmrd-server 71 | 72 | # Throw an explicit error if docker build is run from the folder *containing* 73 | # python-ismrmrd-server instead of within it (i.e. old method) 74 | RUN if [ -d /opt/code/python-ismrmrd-server/python-ismrmrd-server ]; then echo "docker build should be run inside of python-ismrmrd-server instead of one directory up"; exit 1; fi 75 | 76 | # Ensure startup scripts have Unix (LF) line endings, which may not be true 77 | # if the git repo is cloned in Windows 78 | RUN apt-get install -y dos2unix \ 79 | && find /opt/code/python-ismrmrd-server -name *.sh | xargs dos2unix \ 80 | && apt-get remove dos2unix -y 81 | 82 | # Ensure startup scripts are marked as executable, which may be lost if files 83 | # are copied in Windows 84 | RUN find /opt/code/python-ismrmrd-server -name *.sh -exec chmod +x {} \; 85 | 86 | # Cleanup files not required after compiling 87 | RUN apk del gcc git \ 88 | && rm -r /root/.cache/pip 89 | 90 | # Set the starting directory so that code can use relative paths 91 | WORKDIR /opt/code/python-ismrmrd-server 92 | 93 | CMD [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log"] 94 | -------------------------------------------------------------------------------- /docker/bart/Dockerfile: -------------------------------------------------------------------------------- 1 | # ----- First stage to build BART ----- 2 | FROM python:3.9-slim AS bart_build 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | ENV TZ=America/Chicago 5 | 6 | RUN apt-get update && apt-get install -y git cmake g++ libfftw3-dev liblapacke-dev libpng-dev gfortran 7 | RUN mkdir -p /opt/code 8 | 9 | # BART (static linked) 10 | RUN cd /opt/code && \ 11 | git clone https://github.com/mrirecon/bart.git --branch v0.7.00 && \ 12 | cd bart && \ 13 | make SLINK=1 -j $(nproc) && \ 14 | make install 15 | 16 | # ----- Main stage without build dependencies ----- 17 | # Re-use already built Docker image, but the contents of \docker\Dockerfile can also be 18 | # recapitulated here instead to ensure the latest build 19 | FROM kspacekelvin/fire-python 20 | ENV PYTHONPATH=/opt/code/bart/python 21 | 22 | # Copy BART from previous stage 23 | COPY --from=bart_build /usr/local/bin/bart /usr/local/bin/ 24 | COPY --from=bart_build /usr/local/lib/bart/commands /usr/local/lib/bart/commands 25 | COPY --from=bart_build /usr/local/share/doc/bart /usr/local/share/doc/bart 26 | COPY --from=bart_build /opt/code/bart /opt/code/bart 27 | -------------------------------------------------------------------------------- /docker/docker_tar_to_chroot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script takes a Docker container export (.tar) creates a chroot image (.img) 3 | # Note that root privileges are required to mount the loopback images 4 | 5 | # Syntax: ./docker_tar_to_chroot.sh docker-export.tar chroot.img optional_buffer_size_in_mb 6 | 7 | EXPORT_FILE=${1} 8 | CHROOT_FILE=${2} 9 | BUFFER_MB=${3:-50} 10 | 11 | # Files have a minimum storage size of 4k due to block size 12 | exportSize=$(tar -tvf "${EXPORT_FILE}" | awk '{s+=int($3/4096+0.99999)*4096} END{printf "%.0f\n", s}') 13 | 14 | # Add a minimum buffer of free space to account for filesystem overhead 15 | chrootMinSize=$(( exportSize/(1024*1024) * 115/100 + ${BUFFER_MB})) 16 | 17 | # Round up to the nearest 100 MB 18 | chrootSize=$(( ((${chrootMinSize%.*})/100+1)*100 )) 19 | 20 | echo ---------------------------------------------------------------------- 21 | echo Total size of files from Docker image is $(( exportSize/(1024*1024) )) MB with ${BUFFER_MB} MB of buffer 22 | echo Creating chroot file ${CHROOT_FILE} of size ${chrootSize} MB 23 | echo ---------------------------------------------------------------------- 24 | 25 | if test -f "${CHROOT_FILE}"; then 26 | echo "Warning -- ${CHROOT_FILE} exists and will be overwritten!" 27 | rm ${CHROOT_FILE} 28 | fi 29 | 30 | # Create blank ext3 chroot image 31 | dd if=/dev/zero of=${CHROOT_FILE} bs=1M count=${chrootSize} 32 | mke2fs -F -t ext3 ${CHROOT_FILE} 33 | 34 | # Mount image and copy contents from tar export 35 | echo Copying files to chroot image -- please wait... 36 | mkdir /mnt/chroot 37 | mount -o loop ${CHROOT_FILE} /mnt/chroot 38 | tar -xf ${EXPORT_FILE} --directory=/mnt/chroot --totals 39 | 40 | # Show the amount of free space left on the chroot 41 | df -h 42 | 43 | umount /mnt/chroot 44 | 45 | echo Finished! Verify that no errors have occured and that available space on the 46 | echo last row of the above df output is greater than 100 MB 47 | -------------------------------------------------------------------------------- /docker/docker_to_chroot.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | setlocal enabledelayedexpansion 3 | 4 | rem This script takes a Docker image and creates a chroot image (.img) 5 | rem Note that this script also requires docker_tar_to_chroot.sh to be located in the same folder 6 | 7 | rem Syntax: docker_to_chroot.bat kspacekelvin/fire-python fire-python-chroot.img optional_buffer_size_in_mb 8 | 9 | if "%1"=="" GOTO wrongargnum 10 | if "%2"=="" GOTO wrongargnum 11 | 12 | set DOCKER_NAME=%1 13 | set CHROOT_FILE=%2 14 | set EXPORT_FILE=docker-export.tar 15 | 16 | if "%3"=="" ( 17 | set BUFFER_SIZE=50 18 | ) else ( 19 | set BUFFER_SIZE=%3 20 | ) 21 | 22 | if exist %EXPORT_FILE% ( 23 | echo Warning -- %EXPORT_FILE% exists and will be overwritten! 24 | del %EXPORT_FILE% 25 | ) 26 | 27 | rem Create a Docker container and export to a .tar file 28 | echo ------------------------------------------------------------ 29 | echo Exporting Docker image %DOCKER_NAME% 30 | echo ------------------------------------------------------------ 31 | 32 | docker create --name tmpimage %DOCKER_NAME% 33 | docker export -o %EXPORT_FILE% tmpimage 34 | docker rm tmpimage 35 | 36 | rem Run a privileged Docker to create the chroot file 37 | docker run -it --rm ^ 38 | --privileged=true ^ 39 | -v "%cd%":/share ^ 40 | ubuntu ^ 41 | /bin/bash -c "sed -i -e 's/\r//g' /share/docker_tar_to_chroot.sh && /share/docker_tar_to_chroot.sh /share/%EXPORT_FILE% /share/%CHROOT_FILE% !BUFFER_SIZE!" 42 | 43 | del %EXPORT_FILE% 44 | goto eof 45 | 46 | :wrongargnum 47 | echo Syntax: docker_to_chroot.bat docker_image_name chroot_file_name optional_buffer_size_in_mb 48 | 49 | :eof 50 | -------------------------------------------------------------------------------- /docker/docker_to_chroot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # This script takes a Docker image and creates a chroot image (.img) 3 | 4 | # Syntax: ./docker_to_chroot.sh docker_image_name chroot_file_name optional_buffer_size_in_mb 5 | 6 | if [[ $# -gt 3 ]]; then 7 | echo "Wrong number of arguments" >&2 8 | echo "Syntax: ./docker_to_chroot.sh docker_image_name chroot_file_name optional_buffer_size_in_mb" >&2 9 | exit 2 10 | fi 11 | 12 | DOCKER_NAME=${1} 13 | CHROOT_FILE=${2} 14 | EXPORT_FILE=docker-export.tar 15 | BUFFER_MB=${3:-50} 16 | 17 | # Create a Docker container and export to a .tar file 18 | echo ------------------------------------------------------------ 19 | echo Exporting Docker image ${DOCKER_NAME} 20 | echo ------------------------------------------------------------ 21 | 22 | if test -f "${EXPORT_FILE}"; then 23 | echo "Warning -- ${EXPORT_FILE} exists and will be overwritten!" 24 | rm ${EXPORT_FILE} 25 | fi 26 | 27 | docker create --name tmpimage ${DOCKER_NAME} 28 | docker export -o ${EXPORT_FILE} tmpimage 29 | docker rm tmpimage 30 | 31 | # Run a privileged Docker to create the chroot file 32 | docker run -it --rm \ 33 | --privileged=true \ 34 | -v $(pwd):/share \ 35 | ubuntu \ 36 | /bin/bash -c "sed -i -e 's/\r//g' /share/docker_tar_to_chroot.sh && /share/docker_tar_to_chroot.sh /share/${EXPORT_FILE} /share/${CHROOT_FILE} ${BUFFER_MB}" 37 | 38 | rm ${EXPORT_FILE} 39 | -------------------------------------------------------------------------------- /docker/pytorch/Dockerfile: -------------------------------------------------------------------------------- 1 | # ----- 1. Create devcontainer image ----- 2 | # Start from the fire-python devcontainer image that has all base dependencies 3 | # Note: This does not contain the actual code for python-ismrmrd-server! 4 | FROM kspacekelvin/fire-python-devcon AS python-mrd-pytorch-devcontainer 5 | 6 | # Install PyTorch and its dependencies 7 | RUN pip3 install --no-cache-dir torch torchvision \ 8 | && rm -rf /root/.cache/pip 9 | 10 | # ----- 2. Copy deployed code into the devcontainer for deployment ----- 11 | FROM python-mrd-pytorch-devcontainer as python-mrd-pytorch-runtime 12 | 13 | # If building from the GitHub repo, uncomment the below section, open a command 14 | # prompt in the folder containing this Dockerfile and run the command: 15 | # docker build --no-cache -t kspacekelvin/fire-python ./ 16 | # RUN cd /opt/code && \ 17 | # git clone https://github.com/kspaceKelvin/python-ismrmrd-server.git 18 | 19 | # If doing local development, use this section to copy local code into Docker 20 | # image. From the python-ismrmrd-server folder, uncomment the following lines 21 | # below and run the command: 22 | # docker build --no-cache -t fire-python-custom -f docker/Dockerfile ./ 23 | RUN mkdir -p /opt/code/python-ismrmrd-server 24 | COPY . /opt/code/python-ismrmrd-server 25 | 26 | # Throw an explicit error if docker build is run from the folder *containing* 27 | # python-ismrmrd-server instead of within it (i.e. old method) 28 | RUN if [ -d /opt/code/python-ismrmrd-server/python-ismrmrd-server ]; then echo "docker build should be run inside of python-ismrmrd-server instead of one directory up"; exit 1; fi 29 | 30 | # Ensure startup scripts have Unix (LF) line endings, which may not be true 31 | # if the git repo is cloned in Windows 32 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" | xargs dos2unix 33 | 34 | # Ensure startup scripts are marked as executable, which may be lost if files 35 | # are copied in Windows 36 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" -exec chmod +x {} \; 37 | 38 | # Set the starting directory so that code can use relative paths 39 | WORKDIR /opt/code/python-ismrmrd-server 40 | 41 | CMD [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log", "--defaultConfig=invertcontrast"] 42 | 43 | # Replace the above CMD with this ENTRYPOINT to allow allow "docker stop" 44 | # commands to be passed to the server. This is useful for deployments, but 45 | # more annoying for development 46 | # ENTRYPOINT [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log"] -------------------------------------------------------------------------------- /docker/pytorch/Dockerfile_standalone: -------------------------------------------------------------------------------- 1 | # ----- First stage to build ismrmrd and siemens_to_ismrmrd ----- 2 | FROM python:3.10.2-slim AS mrd_converter 3 | ARG DEBIAN_FRONTEND=noninteractive 4 | ENV TZ=America/Chicago 5 | 6 | RUN apt-get update && apt-get install -y git cmake g++ libhdf5-dev libxml2-dev libxslt1-dev libboost-all-dev libfftw3-dev 7 | RUN mkdir -p /opt/code 8 | 9 | # ISMRMRD library 10 | RUN cd /opt/code && \ 11 | git clone https://github.com/ismrmrd/ismrmrd.git && \ 12 | cd ismrmrd && \ 13 | mkdir build && \ 14 | cd build && \ 15 | cmake ../ && \ 16 | make -j $(nproc) && \ 17 | make install 18 | 19 | # siemens_to_ismrmrd converter 20 | RUN cd /opt/code && \ 21 | git clone https://github.com/ismrmrd/siemens_to_ismrmrd.git && \ 22 | cd siemens_to_ismrmrd && \ 23 | mkdir build && \ 24 | cd build && \ 25 | cmake ../ && \ 26 | make -j $(nproc) && \ 27 | make install 28 | 29 | # Create archive of ISMRMRD libraries (including symlinks) for second stage 30 | RUN cd /usr/local/lib && tar -czvf libismrmrd.tar.gz libismrmrd* 31 | 32 | # ----- Start another clean build without all of the build dependencies of siemens_to_ismrmrd ----- 33 | FROM python:3.10.2-slim 34 | 35 | LABEL org.opencontainers.image.description="Python MRD Image Reconstruction and Analysis Server" 36 | LABEL org.opencontainers.image.url="https://github.com/kspaceKelvin/python-ismrmrd-server" 37 | LABEL org.opencontainers.image.authors="Kelvin Chow (kelvin.chow@siemens-healthineers.com)" 38 | 39 | # Copy ISMRMRD files from last stage 40 | COPY --from=mrd_converter /usr/local/include/ismrmrd /usr/local/include/ismrmrd/ 41 | COPY --from=mrd_converter /usr/local/share/ismrmrd /usr/local/share/ismrmrd/ 42 | COPY --from=mrd_converter /usr/local/bin/ismrmrd* /usr/local/bin/ 43 | COPY --from=mrd_converter /usr/local/lib/libismrmrd.tar.gz /usr/local/lib/ 44 | RUN cd /usr/local/lib && tar -zxvf libismrmrd.tar.gz && rm libismrmrd.tar.gz && ldconfig 45 | 46 | # Copy siemens_to_ismrmrd from last stage 47 | COPY --from=mrd_converter /usr/local/bin/siemens_to_ismrmrd /usr/local/bin/siemens_to_ismrmrd 48 | 49 | # xslt and hdf5 are dependencies for siemens_to_ismrmrd 50 | RUN apt-get update && apt-get install --no-install-recommends -y libxslt1.1 libhdf5-103 git dos2unix nano 51 | 52 | # Tell nano to remember its position from the last time it opened a file 53 | RUN echo "set positionlog" > ~/.nanorc 54 | 55 | # Install PyTorch and its dependencies 56 | COPY python-ismrmrd-server/docker/pytorch/requirements.txt / 57 | RUN apt-get install -y git && \ 58 | apt-get install -y libglib2.0-0 && \ 59 | apt install libgl1-mesa-glx -y && \ 60 | pip3 install --no-cache-dir -r requirements.txt && \ 61 | rm requirements.txt && \ 62 | rm -r /root/.cache/pip 63 | 64 | RUN mkdir -p /opt/code 65 | 66 | RUN cd /opt/code \ 67 | && git clone https://github.com/ismrmrd/ismrmrd-python.git \ 68 | && cd /opt/code/ismrmrd-python \ 69 | && pip3 install --no-cache-dir . 70 | 71 | RUN cd /opt/code \ 72 | && git clone https://github.com/ismrmrd/ismrmrd-python-tools.git \ 73 | && cd /opt/code/ismrmrd-python-tools \ 74 | && pip3 install --no-cache-dir . 75 | 76 | # If building from the GitHub repo, uncomment the below section, open a command 77 | # prompt in the folder containing this Dockerfile and run the command: 78 | # docker build --no-cache -t kspacekelvin/fire-python ./ 79 | # RUN cd /opt/code && \ 80 | # git clone https://github.com/kspaceKelvin/python-ismrmrd-server.git 81 | 82 | # If doing local development, use this section to copy local code into Docker 83 | # image. From the python-ismrmrd-server folder, uncomment the following lines 84 | # below and run the command: 85 | # docker build --no-cache -t fire-python-custom -f docker/Dockerfile ./ 86 | RUN mkdir -p /opt/code/python-ismrmrd-server 87 | COPY . /opt/code/python-ismrmrd-server 88 | 89 | # Throw an explicit error if docker build is run from the folder *containing* 90 | # python-ismrmrd-server instead of within it (i.e. old method) 91 | RUN if [ -d /opt/code/python-ismrmrd-server/python-ismrmrd-server ]; then echo "docker build should be run inside of python-ismrmrd-server instead of one directory up"; exit 1; fi 92 | 93 | # Ensure startup scripts have Unix (LF) line endings, which may not be true 94 | # if the git repo is cloned in Windows 95 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" | xargs dos2unix 96 | 97 | # Ensure startup scripts are marked as executable, which may be lost if files 98 | # are copied in Windows 99 | RUN find /opt/code/python-ismrmrd-server -name "*.sh" -exec chmod +x {} \; 100 | 101 | CMD [ "python3", "/opt/code/python-ismrmrd-server/main.py", "-v", "-H=0.0.0.0", "-p=9002", "-l=/tmp/python-ismrmrd-server.log"] -------------------------------------------------------------------------------- /docker/pytorch/requirements.txt: -------------------------------------------------------------------------------- 1 | cython 2 | pyxb 3 | opencv-python 4 | pydicom 5 | scipy 6 | matplotlib 7 | numpy 8 | scikit_image 9 | torchvision 10 | torch 11 | pytorch_lightning 12 | h5py 13 | 14 | 15 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: mrd 2 | channels: 3 | - ismrmrd 4 | - conda-forge 5 | dependencies: 6 | - python=3.10.13 7 | - ismrmrd::ismrmrd-python=1.14.1 8 | - ismrmrd::siemens_to_ismrmrd=1.2.11 9 | - boost=1.76.0 # For ismrmrd 10 | - libxml2=2.9.12 # For ismrmrd 11 | - h5py=3.7.0 # For ismrmrd 12 | - matplotlib=3.8.2 # used by rgb.py and provides various visualization tools including colormaps 13 | - pydicom=3.0.1 # used by dicom2mrd.py to parse DICOM data 14 | - numpy=1.26.0 15 | - git=2.30.2 16 | - m2-dos2unix=7.3.3 # For fixing line ending issues from Windows -------------------------------------------------------------------------------- /environment_windows.yml: -------------------------------------------------------------------------------- 1 | name: mrd 2 | channels: 3 | - ismrmrd 4 | - conda-forge 5 | dependencies: 6 | - python=3.10.13 7 | # - ismrmrd::ismrmrd-python=1.14.1 # Package not available on Windows -- use pip install instead 8 | # - ismrmrd::siemens_to_ismrmrd=1.2.11 # Package not available on Windows 9 | - boost=1.76.0 # For ismrmrd 10 | - libxml2=2.9.12 # For ismrmrd 11 | - h5py=3.7.0 # For ismrmrd 12 | - matplotlib=3.8.2 # used by rgb.py and provides various visualization tools including colormaps 13 | - pydicom=3.0.1 # used by dicom2mrd.py to parse DICOM data 14 | - numpy=1.26.0 15 | - git=2.30.2 16 | - m2-dos2unix=7.3.3 # For fixing line ending issues from Windows -------------------------------------------------------------------------------- /generate_cartesian_shepp_logan_dataset.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import os 3 | import ismrmrd 4 | import ismrmrd.xsd 5 | from ismrmrdtools import simulation, transform 6 | import numpy as np 7 | import argparse 8 | 9 | def create(filename='testdata.h5', matrix_size=256, coils=8, oversampling=2, repetitions=1, acceleration=1, noise_level=0.05): 10 | print("Creating Shepp-Logan phantom raw data:") 11 | print("Matrix %dx%d at R=%d with %d repetitions" % (matrix_size, matrix_size, acceleration, repetitions)) 12 | print("%d coils with %d oversampling and %1.2f noise level" % (coils, oversampling, noise_level)) 13 | 14 | # Generate the phantom and coil sensitivity maps 15 | phan = simulation.phantom(matrix_size) 16 | csm = simulation.generate_birdcage_sensitivities(matrix_size, coils) 17 | coil_images = np.tile(phan,(coils, 1, 1)) * csm 18 | 19 | # Oversample if needed 20 | if oversampling>1: 21 | padding = round((oversampling*phan.shape[1] - phan.shape[1])/2) 22 | phan = np.pad(phan,((0,0),(padding,padding)),mode='constant') 23 | csm = np.pad(csm,((0,0),(0,0),(padding,padding)),mode='constant') 24 | coil_images = np.pad(coil_images,((0,0),(0,0),(padding,padding)),mode='constant') 25 | 26 | # The number of points in x,y,kx,ky 27 | nx = matrix_size 28 | ny = matrix_size 29 | nkx = oversampling*nx 30 | nky = ny 31 | 32 | # Open the dataset 33 | dset = ismrmrd.Dataset(filename, "dataset", create_if_needed=True) 34 | 35 | # Create the XML header and write it to the file 36 | header = ismrmrd.xsd.ismrmrdHeader() 37 | 38 | # Experimental Conditions 39 | exp = ismrmrd.xsd.experimentalConditionsType() 40 | exp.H1resonanceFrequency_Hz = 128000000 41 | header.experimentalConditions = exp 42 | 43 | # Acquisition System Information 44 | sys = ismrmrd.xsd.acquisitionSystemInformationType() 45 | sys.receiverChannels = coils 46 | header.acquisitionSystemInformation = sys 47 | 48 | # Encoding 49 | encoding = ismrmrd.xsd.encodingType() 50 | encoding.trajectory = ismrmrd.xsd.trajectoryType('cartesian') 51 | 52 | # encoded and recon spaces 53 | efov = ismrmrd.xsd.fieldOfViewMm() 54 | efov.x = oversampling*256 55 | efov.y = 256 56 | efov.z = 5 57 | rfov = ismrmrd.xsd.fieldOfViewMm() 58 | rfov.x = 256 59 | rfov.y = 256 60 | rfov.z = 5 61 | 62 | ematrix = ismrmrd.xsd.matrixSizeType() 63 | ematrix.x = nkx 64 | ematrix.y = nky 65 | ematrix.z = 1 66 | rmatrix = ismrmrd.xsd.matrixSizeType() 67 | rmatrix.x = nx 68 | rmatrix.y = ny 69 | rmatrix.z = 1 70 | 71 | espace = ismrmrd.xsd.encodingSpaceType() 72 | espace.matrixSize = ematrix 73 | espace.fieldOfView_mm = efov 74 | rspace = ismrmrd.xsd.encodingSpaceType() 75 | rspace.matrixSize = rmatrix 76 | rspace.fieldOfView_mm = rfov 77 | 78 | # Set encoded and recon spaces 79 | encoding.encodedSpace = espace 80 | encoding.reconSpace = rspace 81 | 82 | # Encoding limits 83 | limits = ismrmrd.xsd.encodingLimitsType() 84 | 85 | limits1 = ismrmrd.xsd.limitType() 86 | limits1.minimum = 0 87 | limits1.center = round(ny/2) 88 | limits1.maximum = ny - 1 89 | limits.kspace_encoding_step_1 = limits1 90 | 91 | limits_rep = ismrmrd.xsd.limitType() 92 | limits_rep.minimum = 0 93 | limits_rep.center = round(repetitions / 2) 94 | limits_rep.maximum = repetitions - 1 95 | limits.repetition = limits_rep 96 | 97 | limits_rest = ismrmrd.xsd.limitType() 98 | limits_rest.minimum = 0 99 | limits_rest.center = 0 100 | limits_rest.maximum = 0 101 | limits.kspace_encoding_step_0 = limits_rest 102 | limits.slice = limits_rest 103 | limits.average = limits_rest 104 | limits.contrast = limits_rest 105 | limits.kspaceEncodingStep2 = limits_rest 106 | limits.phase = limits_rest 107 | limits.segment = limits_rest 108 | limits.set = limits_rest 109 | 110 | encoding.encodingLimits = limits 111 | header.encoding.append(encoding) 112 | 113 | # User Parameters 114 | user = ismrmrd.xsd.userParametersType() 115 | userParameterLong = ismrmrd.xsd.userParameterLongType() 116 | userParameterLong.name = 'TestLong' 117 | userParameterLong.value = '42' 118 | user.userParameterLong.append(userParameterLong) 119 | userParameterDouble = ismrmrd.xsd.userParameterDoubleType() 120 | userParameterDouble.name = 'TestDouble' 121 | userParameterDouble.value = '3.14159' 122 | user.userParameterDouble.append(userParameterDouble) 123 | userParameterString = ismrmrd.xsd.userParameterStringType() 124 | userParameterString.name = 'TestString' 125 | userParameterString.value = 'This is a test' 126 | user.userParameterString.append(userParameterString) 127 | userParameterBase64 = ismrmrd.xsd.userParameterBase64Type() 128 | userParameterBase64.name = 'TestBase64' 129 | userParameterBase64.value = 'QWxsIHlvdXIgYmFzZSBhcmUgYmVsb25nIHRvIHVz' 130 | user.userParameterBase64.append(userParameterBase64) 131 | header.userParameters = user 132 | 133 | dset.write_xml_header(header.toXML('utf-8')) 134 | 135 | # Synthesize the k-space data 136 | Ktrue = transform.transform_image_to_kspace(coil_images,(1,2)) 137 | 138 | # Create an acquistion and reuse it 139 | acq = ismrmrd.Acquisition() 140 | acq.resize(nkx, coils) 141 | acq.version = 1 142 | acq.available_channels = coils 143 | acq.center_sample = round(nkx/2) 144 | acq.read_dir[0] = 1.0 145 | acq.phase_dir[1] = 1.0 146 | acq.slice_dir[2] = 1.0 147 | 148 | # Initialize an acquisition counter 149 | counter = 0 150 | 151 | # Write out a few noise scans 152 | for n in range(32): 153 | noise = noise_level * (np.random.randn(coils, nkx) + 1j * np.random.randn(coils, nkx)) 154 | # here's where we would make the noise correlated 155 | acq.scan_counter = counter 156 | acq.clearAllFlags() 157 | acq.setFlag(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) 158 | acq.data[:] = noise 159 | dset.append_acquisition(acq) 160 | counter += 1 # increment the scan counter 161 | 162 | # Loop over the repetitions, add noise and write to disk 163 | # simulating a T-SENSE type scan 164 | for rep in range(repetitions): 165 | noise = noise_level * (np.random.randn(coils, nky, nkx) + 1j * np.random.randn(coils, nky, nkx)) 166 | # here's where we would make the noise correlated 167 | K = Ktrue + noise 168 | acq.idx.repetition = rep 169 | for acc in range(acceleration): 170 | for line in np.arange(acc,nky,acceleration): 171 | # set some fields in the header 172 | acq.scan_counter = counter 173 | acq.idx.kspace_encode_step_1 = line 174 | acq.clearAllFlags() 175 | if line == 0: 176 | acq.setFlag(ismrmrd.ACQ_FIRST_IN_ENCODE_STEP1) 177 | acq.setFlag(ismrmrd.ACQ_FIRST_IN_SLICE) 178 | acq.setFlag(ismrmrd.ACQ_FIRST_IN_REPETITION) 179 | elif line == nky - 1: 180 | acq.setFlag(ismrmrd.ACQ_LAST_IN_ENCODE_STEP1) 181 | acq.setFlag(ismrmrd.ACQ_LAST_IN_SLICE) 182 | acq.setFlag(ismrmrd.ACQ_LAST_IN_REPETITION) 183 | # set the data and append 184 | acq.data[:] = K[:,line,:] 185 | dset.append_acquisition(acq) 186 | counter += 1 187 | 188 | # Clean up 189 | dset.close() 190 | print("Saved to %s" % (filename)) 191 | 192 | def main(): 193 | parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) 194 | parser.add_argument('-o', '--output', help='output filename') 195 | parser.add_argument('-m', '--matrix-size', type=int, dest='matrix_size', help='k-space matrix size') 196 | parser.add_argument('-c', '--coils', type=int, help='number of coils') 197 | parser.add_argument('-s', '--oversampling', type=int, help='oversampling') 198 | parser.add_argument('-r', '--repetitions', type=int, help='number of repetitions') 199 | parser.add_argument('-a', '--acceleration', type=int, help='acceleration') 200 | parser.add_argument('-n', '--noise-level', type=float, dest='noise_level', help='noise level') 201 | 202 | parser.set_defaults(output='testdata.h5', matrix_size=256, coils=8, 203 | oversampling=2, repetitions=1, acceleration=1, noise_level=0.05) 204 | 205 | args = parser.parse_args() 206 | 207 | create(args.output, args.matrix_size, args.coils, args.oversampling, 208 | args.repetitions, args.acceleration, args.noise_level) 209 | 210 | if __name__ == "__main__": 211 | main() 212 | -------------------------------------------------------------------------------- /invertcontrast.json: -------------------------------------------------------------------------------- 1 | { 2 | "version": "1.1.0", 3 | "parameters": { 4 | "options": "", 5 | "sendOriginal": "False" 6 | } 7 | } -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from server import Server 4 | 5 | import argparse 6 | import logging 7 | import sys 8 | import os 9 | import signal 10 | 11 | defaults = { 12 | 'host': '0.0.0.0', 13 | 'port': 9002, 14 | 'defaultConfig': 'invertcontrast', 15 | 'savedataFolder': '/tmp/share/saved_data' 16 | } 17 | 18 | def main(args): 19 | # Create a multi-threaded dispatcher to handle incoming connections 20 | server = Server(args.host, args.port, args.defaultConfig, args.savedata, args.savedataFolder, args.multiprocessing) 21 | 22 | # Trap signal interrupts (e.g. ctrl+c, SIGTERM) and gracefully stop 23 | def handle_signals(signum, frame): 24 | print("Received signal interrupt -- stopping server") 25 | server.socket.close() 26 | sys.exit(0) 27 | 28 | signal.signal(signal.SIGTERM, handle_signals) 29 | signal.signal(signal.SIGINT, handle_signals) 30 | 31 | # Start server 32 | server.serve() 33 | 34 | if __name__ == '__main__': 35 | 36 | parser = argparse.ArgumentParser(description='Example server for MRD streaming format', 37 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 38 | 39 | parser.add_argument('-p', '--port', type=int, help='Port') 40 | parser.add_argument('-H', '--host', type=str, help='Host') 41 | parser.add_argument('-d', '--defaultConfig', type=str, help='Default (fallback) config module') 42 | parser.add_argument('-v', '--verbose', action='store_true', help='Verbose output.') 43 | parser.add_argument('-l', '--logfile', type=str, help='Path to log file') 44 | parser.add_argument('-s', '--savedata', action='store_true', help='Save incoming data') 45 | parser.add_argument('-S', '--savedataFolder', type=str, help='Folder to save incoming data') 46 | parser.add_argument('-m', '--multiprocessing', action='store_true', help='Use multiprocessing') 47 | parser.add_argument('-r', '--crlf', action='store_true', help='Use Windows (CRLF) line endings') 48 | 49 | parser.set_defaults(**defaults) 50 | 51 | args = parser.parse_args() 52 | 53 | if args.crlf: 54 | fmt='%(asctime)s - %(message)s\r' 55 | else: 56 | fmt='%(asctime)s - %(message)s' 57 | 58 | if args.logfile: 59 | print("Logging to file:", args.logfile) 60 | 61 | if not os.path.exists(os.path.dirname(args.logfile)): 62 | os.makedirs(os.path.dirname(args.logfile)) 63 | 64 | logging.basicConfig(filename=args.logfile, format=fmt, level=logging.WARNING) 65 | logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) 66 | else: 67 | print("No logfile provided") 68 | logging.basicConfig(format=fmt, level=logging.WARNING) 69 | 70 | if args.verbose: 71 | logging.root.setLevel(logging.DEBUG) 72 | else: 73 | logging.root.setLevel(logging.INFO) 74 | 75 | main(args) 76 | -------------------------------------------------------------------------------- /mrd2dicom.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import re 5 | import argparse 6 | import h5py 7 | import ismrmrd 8 | import numpy as np 9 | import pydicom 10 | import base64 11 | 12 | # Lookup table between DICOM and MRD mrdImg types 13 | imtype_map = {ismrmrd.IMTYPE_MAGNITUDE : 'M', 14 | ismrmrd.IMTYPE_PHASE : 'P', 15 | ismrmrd.IMTYPE_REAL : 'R', 16 | ismrmrd.IMTYPE_IMAG : 'I', 17 | 0 : 'M'} # Fallback for unset value 18 | 19 | # Lookup table between DICOM and Siemens flow directions 20 | venc_dir_map = {'FLOW_DIR_R_TO_L' : 'rl', 21 | 'FLOW_DIR_L_TO_R' : 'lr', 22 | 'FLOW_DIR_A_TO_P' : 'ap', 23 | 'FLOW_DIR_P_TO_A' : 'pa', 24 | 'FLOW_DIR_F_TO_H' : 'fh', 25 | 'FLOW_DIR_H_TO_F' : 'hf', 26 | 'FLOW_DIR_TP_IN' : 'in', 27 | 'FLOW_DIR_TP_OUT' : 'out'} 28 | 29 | def main(args): 30 | dset = h5py.File(args.filename, 'r') 31 | if not dset: 32 | print("Not a valid dataset: %s" % (args.filename)) 33 | return 34 | 35 | dsetNames = dset.keys() 36 | print("File %s contains %d groups:" % (args.filename, len(dset.keys()))) 37 | print(" ", "\n ".join(dsetNames)) 38 | 39 | if not args.in_group: 40 | if len(dset.keys()) > 1: 41 | print("Input group not specified -- selecting most recent") 42 | args.in_group = list(dset.keys())[-1] 43 | 44 | if not args.out_folder: 45 | args.out_folder = re.sub('.h5$', '', args.filename) 46 | print("Output folder not specified -- using %s" % args.out_folder) 47 | 48 | if args.in_group not in dset: 49 | print("Could not find group %s" % (args.in_group)) 50 | return 51 | 52 | if not os.path.exists(args.out_folder): 53 | os.makedirs(args.out_folder) 54 | 55 | group = dset.get(args.in_group) 56 | print("Reading data from group '%s' in file '%s'" % (args.in_group, args.filename)) 57 | 58 | # mrdImg data is stored as: 59 | # /group/config text of recon config parameters (optional) 60 | # /group/xml text of ISMRMRD flexible data header (optional) 61 | # /group/image_0/data array of IsmrmrdImage data 62 | # /group/image_0/header array of ImageHeader 63 | # /group/image_0/attributes text of mrdImg MetaAttributes 64 | 65 | isImage = True 66 | imageNames = group.keys() 67 | print("Found %d mrdImg sub-groups: %s" % (len(imageNames), ", ".join(imageNames))) 68 | 69 | for imageName in imageNames: 70 | if ((imageName == 'xml') or (imageName == 'config') or (imageName == 'config_file')): 71 | continue 72 | 73 | mrdImg = group[imageName] 74 | if not (('data' in mrdImg) and ('header' in mrdImg) and ('attributes' in mrdImg)): 75 | isImage = False 76 | 77 | dset.close() 78 | 79 | if (isImage is False): 80 | print("File does not contain properly formatted MRD raw or mrdImg data") 81 | return 82 | 83 | dset = ismrmrd.Dataset(args.filename, args.in_group, False) 84 | 85 | groups = dset.list() 86 | 87 | if ('xml' in groups): 88 | xml_header = dset.read_xml_header() 89 | xml_header = xml_header.decode("utf-8") 90 | mrdHead = ismrmrd.xsd.CreateFromDocument(xml_header) 91 | else: 92 | mrdHead = ismrmrd.xsd.ismrmrdHeader() 93 | 94 | filesWritten = 0 95 | for group in groups: 96 | if ( (group == 'config') or (group == 'config_file') or (group == 'xml') ): 97 | continue 98 | 99 | print("Reading images from '/" + args.in_group + "/" + group + "'") 100 | 101 | for imgNum in range(0, dset.number_of_images(group)): 102 | mrdImg = dset.read_image(group, imgNum) 103 | meta = ismrmrd.Meta.deserialize(mrdImg.attribute_string) 104 | 105 | if ((mrdImg.data.shape[0] == 3) and (mrdImg.getHead().image_type == 6)): 106 | # RGB images 107 | print("RGB data not yet supported") 108 | continue 109 | else: 110 | if (mrdImg.data.shape[1] != 1): 111 | print("Multi-slice data not yet supported") 112 | continue 113 | 114 | if (mrdImg.data.shape[0] != 1): 115 | print("Multi-channel data not yet supported") 116 | continue 117 | 118 | # Use previously JSON serialized header as a starting point, if available 119 | if meta.get('DicomJson') is not None: 120 | dicomDset = pydicom.dataset.Dataset.from_json(base64.b64decode(meta['DicomJson'])) 121 | else: 122 | dicomDset = pydicom.dataset.Dataset() 123 | dicomDset = pydicom.dataset.Dataset() 124 | 125 | # Enforce explicit little endian for written DICOM files 126 | dicomDset.file_meta = pydicom.dataset.FileMetaDataset() 127 | dicomDset.file_meta.TransferSyntaxUID = pydicom.uid.ExplicitVRLittleEndian 128 | dicomDset.file_meta.MediaStorageSOPClassUID = pydicom.uid.MRImageStorage 129 | dicomDset.file_meta.MediaStorageSOPInstanceUID = pydicom.uid.generate_uid() 130 | pydicom.dataset.validate_file_meta(dicomDset.file_meta) 131 | # FileMetaInformationGroupLength is still missing? 132 | dicomDset.is_little_endian = True 133 | dicomDset.is_implicit_VR = False 134 | 135 | # ----- Update DICOM header from MRD header ----- 136 | try: 137 | if mrdHead.measurementInformation is None: 138 | pass 139 | # print(" MRD header does not contain measurementInformation section") 140 | else: 141 | # print("---------- Old -------------------------") 142 | # print("SeriesInstanceUID : %s" % dicomDset.SeriesInstanceUID ) 143 | # print("PatientPosition : %s" % dicomDset.PatientPosition ) 144 | # print("SeriesDescription : %s" % dicomDset.SeriesDescription ) 145 | # print("FrameOfReferenceUID: %s" % dicomDset.FrameOfReferenceUID ) 146 | 147 | if mrdHead.measurementInformation.measurementID is not None: dicomDset.SeriesInstanceUID = mrdHead.measurementInformation.measurementID 148 | if mrdHead.measurementInformation.patientPosition is not None: dicomDset.PatientPosition = mrdHead.measurementInformation.patientPosition.name 149 | if mrdHead.measurementInformation.protocolName is not None: dicomDset.SeriesDescription = mrdHead.measurementInformation.protocolName 150 | if mrdHead.measurementInformation.frameOfReferenceUID is not None: dicomDset.FrameOfReferenceUID = mrdHead.measurementInformation.frameOfReferenceUID 151 | 152 | # print("---------- New -------------------------") 153 | # print("SeriesInstanceUID : %s" % dicomDset.SeriesInstanceUID ) 154 | # print("PatientPosition : %s" % dicomDset.PatientPosition ) 155 | # print("SeriesDescription : %s" % dicomDset.SeriesDescription ) 156 | # print("FrameOfReferenceUID: %s" % dicomDset.FrameOfReferenceUID ) 157 | except: 158 | print("Error setting header information from MRD header's measurementInformation section") 159 | 160 | try: 161 | if mrdHead.acquisitionSystemInformation is None: 162 | pass 163 | else: 164 | # print("---------- Old -------------------------") 165 | # print("mrdHead.acquisitionSystemInformation.systemVendor : %s" % mrdHead.acquisitionSystemInformation.systemVendor ) 166 | # print("mrdHead.acquisitionSystemInformation.systemModel : %s" % mrdHead.acquisitionSystemInformation.systemModel ) 167 | # print("mrdHead.acquisitionSystemInformation.systemFieldStrength_T: %s" % mrdHead.acquisitionSystemInformation.systemFieldStrength_T ) 168 | # print("mrdHead.acquisitionSystemInformation.institutionName : %s" % mrdHead.acquisitionSystemInformation.institutionName ) 169 | # print("mrdHead.acquisitionSystemInformation.stationName : %s" % mrdHead.acquisitionSystemInformation.stationName ) 170 | 171 | if mrdHead.acquisitionSystemInformation.systemVendor is not None: dicomDset.Manufacturer = mrdHead.acquisitionSystemInformation.systemVendor 172 | if mrdHead.acquisitionSystemInformation.systemModel is not None: dicomDset.ManufacturerModelName = mrdHead.acquisitionSystemInformation.systemModel 173 | if mrdHead.acquisitionSystemInformation.systemFieldStrength_T is not None: dicomDset.MagneticFieldStrength = mrdHead.acquisitionSystemInformation.systemFieldStrength_T 174 | if mrdHead.acquisitionSystemInformation.institutionName is not None: dicomDset.InstitutionName = mrdHead.acquisitionSystemInformation.institutionName 175 | if mrdHead.acquisitionSystemInformation.stationName is not None: dicomDset.StationName = mrdHead.acquisitionSystemInformation.stationName 176 | 177 | # print("---------- New -------------------------") 178 | # print("mrdHead.acquisitionSystemInformation.systemVendor : %s" % mrdHead.acquisitionSystemInformation.systemVendor ) 179 | # print("mrdHead.acquisitionSystemInformation.systemModel : %s" % mrdHead.acquisitionSystemInformation.systemModel ) 180 | # print("mrdHead.acquisitionSystemInformation.systemFieldStrength_T: %s" % mrdHead.acquisitionSystemInformation.systemFieldStrength_T ) 181 | # print("mrdHead.acquisitionSystemInformation.institutionName : %s" % mrdHead.acquisitionSystemInformation.institutionName ) 182 | # print("mrdHead.acquisitionSystemInformation.stationName : %s" % mrdHead.acquisitionSystemInformation.stationName ) 183 | except: 184 | print("Error setting header information from MRD header's acquisitionSystemInformation section") 185 | 186 | # Set mrdImg pixel data from MRD mrdImg 187 | dicomDset.PixelData = np.squeeze(mrdImg.data).tobytes() # mrdImg.data is [cha z y x] -- squeeze to [y x] for [row col] 188 | dicomDset.Rows = mrdImg.data.shape[2] 189 | dicomDset.Columns = mrdImg.data.shape[3] 190 | 191 | if (mrdImg.data.dtype == 'uint16') or (mrdImg.data.dtype == 'int16'): 192 | dicomDset.BitsAllocated = 16 193 | dicomDset.BitsStored = 16 194 | dicomDset.HighBit = 15 195 | elif (mrdImg.data.dtype == 'uint32') or (mrdImg.data.dtype == 'int') or (mrdImg.data.dtype == 'float32'): 196 | dicomDset.BitsAllocated = 32 197 | dicomDset.BitsStored = 32 198 | dicomDset.HighBit = 31 199 | elif (mrdImg.data.dtype == 'float64'): 200 | dicomDset.BitsAllocated = 64 201 | dicomDset.BitsStored = 64 202 | dicomDset.HighBit = 63 203 | else: 204 | print("Unsupported data type: ", mrdImg.data.dtype) 205 | 206 | dicomDset.SeriesNumber = mrdImg.image_series_index 207 | dicomDset.InstanceNumber = mrdImg.image_index 208 | 209 | # ----- Set some mandatory default values ----- 210 | if not 'SamplesPerPixel' in dicomDset: 211 | dicomDset.SamplesPerPixel = 1 212 | 213 | if not 'PhotometricInterpretation' in dicomDset: 214 | dicomDset.PhotometricInterpretation = 'MONOCHROME2' 215 | 216 | if not 'PixelRepresentation' in dicomDset: 217 | dicomDset.PixelRepresentation = 0 # Unsigned integer 218 | 219 | if not 'ImageType' in dicomDset: 220 | dicomDset.ImageType = ['ORIGINAL', 'PRIMARY', 'M'] 221 | 222 | if not 'SeriesNumber' in dicomDset: 223 | dicomDset.SeriesNumber = 1 224 | 225 | if not 'SeriesDescription' in dicomDset: 226 | dicomDset.SeriesDescription = '' 227 | 228 | if not 'InstanceNumber' in dicomDset: 229 | dicomDset.InstanceNumber = 1 230 | 231 | # ----- Update DICOM header from MRD ImageHeader ----- 232 | dicomDset.ImageType[2] = imtype_map[mrdImg.image_type] 233 | dicomDset.PixelSpacing = [float(mrdImg.field_of_view[0]) / mrdImg.data.shape[2], float(mrdImg.field_of_view[1]) / mrdImg.data.shape[3]] 234 | dicomDset.SliceThickness = mrdImg.field_of_view[2] 235 | dicomDset.ImagePositionPatient = [mrdImg.position[0], mrdImg.position[1], mrdImg.position[2]] 236 | dicomDset.ImageOrientationPatient = [mrdImg.read_dir[0], mrdImg.read_dir[1], mrdImg.read_dir[2], mrdImg.phase_dir[0], mrdImg.phase_dir[1], mrdImg.phase_dir[2]] 237 | 238 | time_sec = mrdImg.acquisition_time_stamp/1000/2.5 239 | hour = int(np.floor(time_sec/3600)) 240 | min = int(np.floor((time_sec - hour*3600)/60)) 241 | sec = time_sec - hour*3600 - min*60 242 | dicomDset.AcquisitionTime = "%02.0f%02.0f%09.6f" % (hour, min, sec) 243 | dicomDset.TriggerTime = mrdImg.physiology_time_stamp[0] / 2.5 244 | 245 | # ----- Update DICOM header from MRD Image MetaAttributes ----- 246 | if meta.get('SeriesDescription') is not None: 247 | dicomDset.SeriesDescription = meta['SeriesDescription'] 248 | 249 | if meta.get('SeriesDescriptionAdditional') is not None: 250 | dicomDset.SeriesDescription = dicomDset.SeriesDescription + meta['SeriesDescriptionAdditional'] 251 | 252 | if meta.get('ImageComment') is not None: 253 | dicomDset.ImageComment = "_".join(meta['ImageComment']) 254 | 255 | if meta.get('ImageType') is not None: 256 | dicomDset.ImageType = meta['ImageType'] 257 | 258 | if (meta.get('ImageRowDir') is not None) and (meta.get('ImageColumnDir') is not None): 259 | dicomDset.ImageOrientationPatient = [float(meta['ImageRowDir'][0]), float(meta['ImageRowDir'][1]), float(meta['ImageRowDir'][2]), float(meta['ImageColumnDir'][0]), float(meta['ImageColumnDir'][1]), float(meta['ImageColumnDir'][2])] 260 | 261 | if meta.get('RescaleIntercept') is not None: 262 | dicomDset.RescaleIntercept = meta['RescaleIntercept'] 263 | 264 | if meta.get('RescaleSlope') is not None: 265 | dicomDset.RescaleSlope = meta['RescaleSlope'] 266 | 267 | if meta.get('WindowCenter') is not None: 268 | dicomDset.WindowCenter = meta['WindowCenter'] 269 | 270 | if meta.get('WindowWidth') is not None: 271 | dicomDset.WindowWidth = meta['WindowWidth'] 272 | 273 | if meta.get('EchoTime') is not None: 274 | dicomDset.EchoTime = meta['EchoTime'] 275 | 276 | if meta.get('InversionTime') is not None: 277 | dicomDset.InversionTime = meta['InversionTime'] 278 | 279 | # Unhandled fields: 280 | # LUTFileName 281 | # ROI 282 | 283 | # Write DICOM files 284 | fileName = "%02.0f_%s_%03.0f.dcm" % (dicomDset.SeriesNumber, dicomDset.SeriesDescription, dicomDset.InstanceNumber) 285 | print(" Writing file %s" % fileName) 286 | dicomDset.save_as(os.path.join(args.out_folder, fileName), enforce_file_format=True) 287 | filesWritten += 1 288 | 289 | print("Wrote %d DICOM files to %s" % (filesWritten, args.out_folder)) 290 | return 291 | 292 | if __name__ == '__main__': 293 | parser = argparse.ArgumentParser(description='Convert MRD image file to DICOM files', 294 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 295 | parser.add_argument('filename', help='Input file') 296 | parser.add_argument('-g', '--in-group', help='Input data group') 297 | parser.add_argument('-o', '--out-folder', help='Output folder') 298 | 299 | args = parser.parse_args() 300 | 301 | main(args) 302 | -------------------------------------------------------------------------------- /mrd2gif.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | import os 4 | import argparse 5 | import h5py 6 | import ismrmrd 7 | import numpy as np 8 | import mrdhelper 9 | from PIL import Image, ImageDraw 10 | 11 | defaults = { 12 | 'in_group': '', 13 | 'rescale': 1, 14 | 'mosaic_slices': False 15 | } 16 | 17 | def main(args): 18 | dset = h5py.File(args.filename, 'r') 19 | if not dset: 20 | print("Not a valid dataset: %s" % (args.filename)) 21 | return 22 | 23 | dsetNames = dset.keys() 24 | print("File %s contains %d groups:" % (args.filename, len(dset.keys()))) 25 | print(" ", "\n ".join(dsetNames)) 26 | 27 | if not args.in_group: 28 | if len(dset.keys()) > 1: 29 | print("Input group not specified -- selecting most recent") 30 | args.in_group = list(dset.keys())[-1] 31 | 32 | if args.in_group not in dset: 33 | print("Could not find group %s" % (args.in_group)) 34 | return 35 | 36 | group = dset.get(args.in_group) 37 | print("Reading data from group '%s' in file '%s'" % (args.in_group, args.filename)) 38 | 39 | # Image data is stored as: 40 | # /group/config text of recon config parameters (optional) 41 | # /group/xml text of ISMRMRD flexible data header (optional) 42 | # /group/image_0/data array of IsmrmrdImage data 43 | # /group/image_0/header array of ImageHeader 44 | # /group/image_0/attributes text of image MetaAttributes 45 | isImage = True 46 | imageNames = group.keys() 47 | print("Found %d image sub-groups: %s" % (len(imageNames), ", ".join(imageNames))) 48 | 49 | for imageName in imageNames: 50 | if ((imageName == 'xml') or (imageName == 'config') or (imageName == 'config_file')): 51 | continue 52 | 53 | image = group[imageName] 54 | if not (('data' in image) and ('header' in image) and ('attributes' in image)): 55 | isImage = False 56 | 57 | dset.close() 58 | 59 | if (isImage is False): 60 | print("File does not contain properly formatted MRD raw or image data") 61 | return 62 | 63 | dset = ismrmrd.Dataset(args.filename, args.in_group, False) 64 | 65 | groups = dset.list() 66 | for group in groups: 67 | if ( (group == 'config') or (group == 'config_file') or (group == 'xml') ): 68 | continue 69 | 70 | print("Reading images from '/" + args.in_group + "/" + group + "'") 71 | 72 | images = [] 73 | rois = [] 74 | heads = [] 75 | metas = [] 76 | for imgNum in range(0, dset.number_of_images(group)): 77 | image = dset.read_image(group, imgNum) 78 | 79 | if ((image.data.shape[0] == 3) and (image.getHead().image_type == 6)): 80 | # RGB images 81 | data = np.squeeze(image.data.transpose((2, 3, 0, 1))) # Transpose to [row col rgb] 82 | data = data.astype(np.uint8) # Stored as uint16 as per MRD specification, but uint8 required for PIL 83 | images.append(Image.fromarray(data, mode='RGB')) 84 | else: 85 | data = image.data 86 | if np.any(np.iscomplex(data)): 87 | print(" Converting image %d from complex to magnitude for display" % imgNum) 88 | data = np.abs(data) 89 | 90 | for cha in range(data.shape[0]): 91 | for sli in range(data.shape[1]): 92 | images.append(Image.fromarray(np.squeeze(data[cha,sli,...]))) # data is [cha z y x] -- squeeze to [y x] for [row col] 93 | 94 | if image.data.shape[0] > 1: 95 | if image.getHead().image_type == 6: 96 | print(" Image %d is RGB" % imgNum) 97 | else: 98 | print(" Image %d has %d channels" % (imgNum, image.data.shape[0])) 99 | 100 | if image.data.shape[1] > 2: 101 | print(" Image %d is a 3D volume with %d slices" % (imgNum, image.data.shape[1])) 102 | 103 | # Read ROIs 104 | meta = ismrmrd.Meta.deserialize(image.attribute_string) 105 | imgRois = [] 106 | for key in meta.keys(): 107 | if not key.startswith('ROI_') and not key.startswith('GT_ROI_'): 108 | continue 109 | 110 | roi = meta[key] 111 | x, y, rgb, thickness, style, visibility = mrdhelper.parse_roi(roi) 112 | 113 | if visibility == 0: 114 | continue 115 | 116 | imgRois.append((x, y, rgb, thickness)) 117 | 118 | # Same ROIs for each channel and slice (in a single MRD image) 119 | for chasli in range(image.data.shape[0]*image.data.shape[1]): 120 | rois.append(imgRois) 121 | 122 | # MRD ImageHeader 123 | for chasli in range(image.data.shape[0]*image.data.shape[1]): 124 | heads.append(image.getHead()) 125 | 126 | for chasli in range(image.data.shape[0]*image.data.shape[1]): 127 | metas.append(meta) 128 | 129 | print(" Read in %s images of shape %s" % (len(images), images[0].size[::-1])) 130 | 131 | hasRois = any([len(x) > 0 for x in rois]) 132 | 133 | # Window/level for all images in series 134 | seriesMaxVal = np.median([np.percentile(np.array(img), 95) for img in images]) 135 | seriesMinVal = np.median([np.percentile(np.array(img), 5) for img in images]) 136 | 137 | # Special case for "sparse" images, usually just text 138 | if seriesMaxVal == seriesMinVal: 139 | seriesMaxVal = np.median([np.max(np.array(img)) for img in images]) 140 | seriesMinVal = np.median([np.min(np.array(img)) for img in images]) 141 | 142 | imagesWL = [] 143 | for img, roi, meta in zip(images, rois, metas): 144 | # Use window/level from MetaAttributes if available 145 | minVal = seriesMinVal 146 | maxVal = seriesMaxVal 147 | 148 | if (('WindowCenter' in meta) and ('WindowWidth' in meta)): 149 | minVal = float(meta['WindowCenter']) - float(meta['WindowWidth'])/2 150 | maxVal = float(meta['WindowCenter']) + float(meta['WindowWidth'])/2 151 | elif (('GADGETRON_WindowCenter' in meta) and ('GADGETRON_WindowWidth' in meta)): 152 | minVal = float(meta['GADGETRON_WindowCenter']) - float(meta['GADGETRON_WindowWidth'])/2 153 | maxVal = float(meta['GADGETRON_WindowCenter']) + float(meta['GADGETRON_WindowWidth'])/2 154 | 155 | if ('LUTFileName' in meta) or ('GADGETRON_ColorMap' in meta): 156 | LUTFileName = meta['LUTFileName'] if 'LUTFileName' in meta else meta['GADGETRON_ColorMap'] 157 | 158 | # Replace extension with '.npy' 159 | LUTFileName = os.path.splitext(LUTFileName)[0] + '.npy' 160 | 161 | # LUT file is a (256,3) numpy array of RGB values between 0 and 255 162 | if os.path.exists(LUTFileName): 163 | palette = np.load(LUTFileName) 164 | palette = palette.flatten().tolist() # As required by PIL 165 | # Look in subdirectory 'colormaps' if not found in current directory 166 | elif os.path.exists(os.path.join('colormaps', LUTFileName)): 167 | palette = np.load(os.path.join('colormaps', LUTFileName)) 168 | palette = palette.flatten().tolist() # As required by PIL 169 | else: 170 | print("LUT file %s specified by MetaAttributes, but not found" % (LUTFileName)) 171 | palette = None 172 | else: 173 | palette = None 174 | 175 | if img.mode != 'RGB': 176 | if hasRois: 177 | # Convert to RGB mode to allow colored ROI overlays 178 | data = np.array(img).astype(float) 179 | data -= minVal 180 | if maxVal != minVal: 181 | data *= 255/(maxVal - minVal) 182 | data = np.clip(data, 0, 255) 183 | if palette is not None: 184 | tmpImg = Image.fromarray(data.astype(np.uint8), mode='P') 185 | tmpImg.putpalette(palette) 186 | tmpImg = tmpImg.convert('RGB') # Needed in order to draw ROIs 187 | else: 188 | tmpImg = Image.fromarray(np.repeat(data[:,:,np.newaxis],3,axis=2).astype(np.uint8), mode='RGB') 189 | 190 | if args.rescale != 1: 191 | tmpImg = tmpImg.resize(tuple(args.rescale*x for x in tmpImg.size)) 192 | for i in range(len(roi)): 193 | roi[i] = tuple(([args.rescale*x for x in roi[i][0]], [args.rescale*y for y in roi[i][1]], roi[i][2], roi[i][3])) 194 | 195 | for (x, y, rgb, thickness) in roi: 196 | draw = ImageDraw.Draw(tmpImg) 197 | draw.line(list(zip(x, y)), fill=(int(rgb[0]*255), int(rgb[1]*255), int(rgb[2]*255), 255), width=int(thickness)) 198 | imagesWL.append(tmpImg) 199 | else: 200 | data = np.array(img).astype(float) 201 | data -= minVal 202 | data *= 255/(maxVal - minVal) 203 | data = np.clip(data, 0, 255) 204 | 205 | if palette is not None: 206 | tmpImg = Image.fromarray(data.astype(np.uint8), mode='P') 207 | tmpImg.putpalette(palette) 208 | imagesWL.append(tmpImg) 209 | else: 210 | imagesWL.append(Image.fromarray(data)) 211 | else: 212 | imagesWL.append(img) 213 | 214 | # Combine multiple slices into a mosaic 215 | if args.mosaic_slices: 216 | slices = [head.slice for head in heads] 217 | 218 | if np.unique(slices).size > 1: 219 | # Create a list where each element is all images from a given slice 220 | imagesWLSplit = [] 221 | for slice in np.unique(slices): 222 | imagesWLSplit.append([img for img, sli in zip(imagesWL, slices) if sli == slice]) 223 | 224 | if np.unique([len(imgs) for imgs in imagesWLSplit]).size > 1: 225 | print(' ERROR: Failed to create mosaic because not all slices have the same number of images -- skipping mosaic!') 226 | else: 227 | print(f' Creating a mosaic of {len(imagesWLSplit[0])} images with {np.unique(slices).size} slices in each') 228 | 229 | # Loop over non-slice dimension 230 | imagesWLMosaic = [] 231 | for idx in range(len(imagesWLSplit[0])): 232 | imgMode = imagesWLSplit[0][idx].mode 233 | tmpImg = Image.fromarray(np.hstack([img[idx] for img in imagesWLSplit]), mode=imgMode) 234 | if imgMode == 'P': 235 | palette = imagesWLSplit[0][0].getpalette() 236 | tmpImg.putpalette(palette) 237 | imagesWLMosaic.append(tmpImg) 238 | 239 | imagesWL = imagesWLMosaic 240 | 241 | # Add SequenceDescriptionAdditional to filename, if present 242 | image = dset.read_image(group, 0) 243 | meta = ismrmrd.Meta.deserialize(image.attribute_string) 244 | if 'SequenceDescriptionAdditional' in meta.keys(): 245 | seqDescription = '_' + meta['SequenceDescriptionAdditional'] 246 | elif 'GADGETRON_SeqDescription' in meta.keys(): 247 | seqDescription = '_'.join(meta['GADGETRON_SeqDescription']) 248 | else: 249 | seqDescription = '' 250 | 251 | # Make valid file name 252 | gifFileName = os.path.splitext(os.path.basename(args.filename))[0] + '_' + args.in_group + '_' + group + seqDescription + '.gif' 253 | gifFileName = "".join(c for c in gifFileName if c.isalnum() or c in (' ','.','_')).rstrip() 254 | gifFileName = gifFileName.replace(" ", "_") 255 | gifFilePath = os.path.join(os.path.dirname(args.filename), gifFileName) 256 | 257 | print(" Writing image: %s " % (gifFilePath)) 258 | if len(images) > 1: 259 | imagesWL[0].save(gifFilePath, save_all=True, append_images=imagesWL[1:], loop=0, duration=40) 260 | else: 261 | imagesWL[0].save(gifFilePath, save_all=True, append_images=imagesWL[1:]) 262 | 263 | return 264 | 265 | if __name__ == '__main__': 266 | parser = argparse.ArgumentParser(description='Convert MRD image file to animated GIF', 267 | formatter_class=argparse.ArgumentDefaultsHelpFormatter) 268 | parser.add_argument('filename', help='Input file') 269 | parser.add_argument('-g', '--in-group', help='Input data group') 270 | parser.add_argument('-r', '--rescale', type=int, help='Rescale factor (integer) for output images') 271 | parser.add_argument('-m', '--mosaic-slices', action='store_true', help='Mosaic images along slice dimension') 272 | 273 | parser.set_defaults(**defaults) 274 | 275 | args = parser.parse_args() 276 | 277 | main(args) 278 | -------------------------------------------------------------------------------- /mrdhelper.py: -------------------------------------------------------------------------------- 1 | # MRD Helper functions 2 | import ismrmrd 3 | import re 4 | import base64 5 | 6 | def update_img_header_from_raw(imgHead, rawHead): 7 | """Populate ImageHeader fields from AcquisitionHeader""" 8 | 9 | if rawHead is None: 10 | return imgHead 11 | 12 | # # These fields are not translated from the raw header, but filled in 13 | # # during image creation by from_array 14 | # imgHead.data_type = 15 | # imgHead.matrix_size = 16 | # imgHead.channels = 17 | 18 | # # This is mandatory, but must be filled in from the XML header, 19 | # # not from the acquisition header 20 | # imgHead.field_of_view = 21 | 22 | imgHead.version = rawHead.version 23 | imgHead.flags = rawHead.flags 24 | imgHead.measurement_uid = rawHead.measurement_uid 25 | 26 | imgHead.position = rawHead.position 27 | imgHead.read_dir = rawHead.read_dir 28 | imgHead.phase_dir = rawHead.phase_dir 29 | imgHead.slice_dir = rawHead.slice_dir 30 | imgHead.patient_table_position = rawHead.patient_table_position 31 | 32 | imgHead.average = rawHead.idx.average 33 | imgHead.slice = rawHead.idx.slice 34 | imgHead.contrast = rawHead.idx.contrast 35 | imgHead.phase = rawHead.idx.phase 36 | imgHead.repetition = rawHead.idx.repetition 37 | imgHead.set = rawHead.idx.set 38 | 39 | imgHead.acquisition_time_stamp = rawHead.acquisition_time_stamp 40 | imgHead.physiology_time_stamp = rawHead.physiology_time_stamp 41 | 42 | # Defaults, to be updated by the user 43 | imgHead.image_type = ismrmrd.IMTYPE_MAGNITUDE 44 | imgHead.image_index = 1 45 | imgHead.image_series_index = 0 46 | 47 | imgHead.user_float = rawHead.user_float 48 | imgHead.user_int = rawHead.user_int 49 | 50 | return imgHead 51 | 52 | def get_userParameterLong_value(metadata, name): 53 | """Get a value from MRD Header userParameterLong (returns None if key not found)""" 54 | if metadata.userParameters is not None: 55 | for param in metadata.userParameters.userParameterLong: 56 | if param.name == name: 57 | return int(param.value) 58 | return None 59 | 60 | def get_userParameterDouble_value(metadata, name): 61 | """Get a value from MRD Header userParameterDouble (returns None if key not found)""" 62 | if metadata.userParameters is not None: 63 | for param in metadata.userParameters.userParameterDouble: 64 | if param.name == name: 65 | return float(param.value) 66 | return None 67 | 68 | def get_userParameterString_value(metadata, name): 69 | """Get a value from MRD Header userParameterDouble (returns None if key not found)""" 70 | if metadata.userParameters is not None: 71 | for param in metadata.userParameters.userParameterDouble: 72 | if param.name == name: 73 | return float(param.value) 74 | return None 75 | 76 | def get_userParameterBase64_value(metadata, name): 77 | """Get a value from MRD Header userParameterBase64 (returns None if key not found)""" 78 | if metadata.userParameters is not None: 79 | for param in metadata.userParameters.userParameterBase64: 80 | if param.name == name: 81 | return base64.b64decode(param.value).decode('utf-8') 82 | return None 83 | 84 | def get_meta_value(meta, key): 85 | """Get a value from MRD Meta Attributes (returns None if key not found)""" 86 | if key in meta.keys(): 87 | return meta[key] 88 | else: 89 | return None 90 | 91 | def extract_minihead_bool_param(miniHead, name): 92 | """Extract a bool parameter from the serialized text of the ICE MiniHeader""" 93 | val = extract_minihead_param(miniHead, name, 'ParamBool') 94 | 95 | if val is None: 96 | return False 97 | elif val.strip('" ').lower() == 'true'.lower(): 98 | return True 99 | else: 100 | return False 101 | 102 | def extract_minihead_long_param(miniHead, name): 103 | """Extract a long parameter from the serialized text of the ICE MiniHeader""" 104 | val = extract_minihead_param(miniHead, name, 'ParamLong') 105 | 106 | if val is None: 107 | return int(0) 108 | else: 109 | return int(val) 110 | 111 | def extract_minihead_double_param(miniHead, name): 112 | """Extract a double parameter from the serialized text of the ICE MiniHeader""" 113 | val = extract_minihead_param(miniHead, name, 'ParamDouble') 114 | 115 | if val is None: 116 | return float(0) 117 | else: 118 | return float(val) 119 | 120 | def extract_minihead_string_param(miniHead, name): 121 | """Extract a string parameter from the serialized text of the ICE MiniHeader""" 122 | val = extract_minihead_param(miniHead, name, 'ParamString') 123 | 124 | return val.strip(' "') 125 | 126 | def extract_minihead_param(miniHead, name, strType): 127 | """Extract a string parameter from the serialized text of the ICE MiniHeader""" 128 | expr = r'(?<=<' + strType + r'."' + name + r'">)\s*[^}]*\s*' 129 | res = re.search(expr, miniHead) 130 | 131 | if res is None: 132 | return None 133 | 134 | # Strip off beginning '{' and whitespace, then split on newlines 135 | values = res.group(0).strip('{\n ').split('\n') 136 | 137 | # Lines beginning with <> are properties -- ignore them 138 | values = [val for val in values if bool(re.search(r'^\s*<\w+>', val)) is False] 139 | 140 | if len(values) != 1: 141 | return None 142 | else: 143 | return values[0] 144 | 145 | def get_json_config_param(config, key, default=None, type='str'): 146 | """ 147 | Read a parameter from JSON config 148 | Input: 149 | - config : dict of parameters 150 | - key : name (key) of parameter 151 | - default : value if key is not present or config is invalid 152 | - type : type casting of the parameter (int, float, string, bool) 153 | Output: 154 | - value of parameter, or default if absent 155 | """ 156 | if not isinstance(config, dict): 157 | return default 158 | 159 | if not 'parameters' in config: 160 | return default 161 | 162 | if not key in config['parameters']: 163 | return default 164 | 165 | value = config['parameters'][key] 166 | 167 | if type == 'int': 168 | return int(value) 169 | elif (type == 'float') or (type == 'double'): 170 | return float(value) 171 | elif (type == 'string') or (type == 'str') or (type == 'choice'): 172 | return str(value) 173 | elif (type == 'bool') or (type == 'boolean'): 174 | if isinstance(value, bool): 175 | return value 176 | elif 'true' in value.lower(): 177 | return True 178 | elif 'false' in value.lower(): 179 | return False 180 | else: 181 | return default 182 | else: 183 | raise Exception("'type' must be int, float, string, or bool") 184 | 185 | def create_roi(x, y, rgb = (1, 0, 0), thickness = 1, style: int = 0, visibility: int = 1): 186 | """ 187 | Create an MRD-formatted ROI 188 | Parameters: 189 | - x (1D ndarray) : x coordinates in units of pixels, with (0,0) at the top left 190 | - y (1D ndarray) : y coordinates in units of pixels, matching the length of x 191 | - rgb (3 item tuple) : Colour as an (red, green, blue) tuple normalized to 1 192 | - thickness (float) : Line thickness 193 | - style (int) : Line style (0 = solid, 1 = dashed) 194 | - visibility (int) : Line visibility (0 = false, 1 = true) 195 | Returns: 196 | - roi (string list) : MRD-formatted ROI, intended to be stored as a MetaAttribute 197 | with field name starting with "ROI_" 198 | """ 199 | xy = [(x[i], y[i]) for i in range(0, len(x))] # List of (x,y) tuples 200 | 201 | roi = [] 202 | roi.append('%f' % rgb[0]) 203 | roi.append('%f' % rgb[1]) 204 | roi.append('%f' % rgb[2]) 205 | roi.append('%f' % thickness) 206 | roi.append('%d' % style) 207 | roi.append('%d' % visibility) 208 | 209 | for i in range(0, len(xy)): 210 | roi.append('%f' % xy[i][0]) 211 | roi.append('%f' % xy[i][1]) 212 | 213 | return roi 214 | 215 | def parse_roi(roi): 216 | """ 217 | Parse an MRD-formatted ROI 218 | Input: 219 | - roi (string list) : MRD-formatted ROI from a MetaAttribute 220 | Output: 221 | - x (1D ndarray) : x coordinates in units of pixels, with (0,0) at the top left 222 | - y (1D ndarray) : y coordinates in units of pixels, matching the length of x 223 | - rgb (3 item tuple) : Colour as an (red, green, blue) tuple normalized to 1 224 | - thickness (float) : Line thickness 225 | - style (int) : Line style (0 = solid, 1 = dashed) 226 | - visibility (int) : Line visibility (0 = false, 1 = true) 227 | """ 228 | if (not isinstance(roi, list)) or (len(roi) < 8) or (len(roi) % 2): 229 | raise Exception("ROI must be a list, have 6 metadata values, at least one coordinate, and an even number of values (x,y pairs)") 230 | 231 | fRoi = [float(x) for x in roi] 232 | 233 | rgb = tuple(fRoi[0:3]) 234 | thickness = fRoi[3] 235 | style = int(fRoi[4]) 236 | visibility = int(fRoi[5]) 237 | 238 | x = fRoi[6::2] 239 | y = fRoi[7::2] 240 | 241 | return x, y, rgb, thickness, style, visibility 242 | 243 | def create_text(x, y, rgb = (1, 0, 0), visibility: int = 1, string = ''): 244 | """ 245 | Create an MRD-formatted text object 246 | Parameters: 247 | - x (float) : x coordinate in units of pixels, with (0,0) at the top left 248 | - y (float) : y coordinate in units of pixels 249 | - rgb (3 item tuple) : Colour as an (red, green, blue) tuple normalized to 1 250 | - visibility (int) : Line visibility (0 = false, 1 = true) 251 | - string (string) : Text string 252 | Returns: 253 | - txt (string list) : MRD-formatted text, intended to be stored as a MetaAttribute 254 | with field name starting with "Text_" 255 | """ 256 | txt = [] 257 | txt.append('%f' % rgb[0]) 258 | txt.append('%f' % rgb[1]) 259 | txt.append('%f' % rgb[2]) 260 | txt.append('%f' % x) 261 | txt.append('%f' % y) 262 | txt.append('%d' % visibility) 263 | txt.append('%s' % string) 264 | 265 | return txt 266 | 267 | def parse_text(txt): 268 | """ 269 | Parse an MRD-formatted text object 270 | Input: 271 | - txt (string list) : MRD-formatted text from a MetaAttribute 272 | Output: 273 | - x (float) : x coordinate in units of pixels, with (0,0) at the top left 274 | - y (float) : y coordinate in units of pixels 275 | - rgb (3 item tuple) : Colour as an (red, green, blue) tuple normalized to 1 276 | - visibility (int) : Line visibility (0 = false, 1 = true) 277 | - string (string) : Text string 278 | """ 279 | if (not isinstance(txt, list)) or (len(txt) != 7): 280 | raise Exception("txt must be a list that has exactly 7 metadata values") 281 | 282 | rgb = tuple([float(x) for x in txt[0:3]]) 283 | x = float(txt[3]) 284 | y = float(txt[4]) 285 | visibility = int(float(txt[5])) 286 | 287 | string = txt[6] 288 | 289 | return x, y, rgb, visibility, string 290 | -------------------------------------------------------------------------------- /report.py: -------------------------------------------------------------------------------- 1 | import ismrmrd 2 | import os 3 | import itertools 4 | import logging 5 | import traceback 6 | import numpy as np 7 | import numpy.fft as fft 8 | import xml.dom.minidom 9 | import base64 10 | import ctypes 11 | import re 12 | import mrdhelper 13 | import constants 14 | from time import perf_counter 15 | import matplotlib.pyplot as plt 16 | 17 | # Folder for debug output files 18 | debugFolder = "/tmp/share/debug" 19 | 20 | def process(connection, config, mrdHeader): 21 | logging.info("Config: \n%s", config) 22 | 23 | # mrdHeader should be xml formatted MRD header, but may be a string 24 | # if it failed conversion earlier 25 | try: 26 | # Disabled due to incompatibility between PyXB and Python 3.8: 27 | # https://github.com/pabigot/pyxb/issues/123 28 | # # logging.info("MRD header: \n%s", mrdHeader.toxml('utf-8')) 29 | 30 | logging.info("Incoming dataset contains %d encodings", len(mrdHeader.encoding)) 31 | logging.info("First encoding is of type '%s', with a matrix size of (%s x %s x %s) and a field of view of (%s x %s x %s)mm^3", 32 | mrdHeader.encoding[0].trajectory, 33 | mrdHeader.encoding[0].encodedSpace.matrixSize.x, 34 | mrdHeader.encoding[0].encodedSpace.matrixSize.y, 35 | mrdHeader.encoding[0].encodedSpace.matrixSize.z, 36 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.x, 37 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.y, 38 | mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.z) 39 | 40 | except: 41 | logging.info("Improperly formatted MRD header: \n%s", mrdHeader) 42 | 43 | # Continuously parse incoming data parsed from MRD messages 44 | currentSeries = 0 45 | acqGroup = [] 46 | imgGroup = [] 47 | waveformGroup = [] 48 | try: 49 | for item in connection: 50 | # ---------------------------------------------------------- 51 | # Raw k-space data messages 52 | # ---------------------------------------------------------- 53 | if isinstance(item, ismrmrd.Acquisition): 54 | # Accumulate all imaging readouts in a group 55 | if (not item.is_flag_set(ismrmrd.ACQ_IS_NOISE_MEASUREMENT) and 56 | not item.is_flag_set(ismrmrd.ACQ_IS_PARALLEL_CALIBRATION) and 57 | not item.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA) and 58 | not item.is_flag_set(ismrmrd.ACQ_IS_NAVIGATION_DATA)): 59 | acqGroup.append(item) 60 | 61 | # When this criteria is met, run process_data() on the accumulated 62 | # data, which returns images that are sent back to the client. 63 | if item.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE): 64 | logging.info("Processing a group of k-space data") 65 | image = process_data(acqGroup, connection, config, mrdHeader) 66 | connection.send_image(image) 67 | acqGroup = [] 68 | 69 | # ---------------------------------------------------------- 70 | # Image data messages 71 | # ---------------------------------------------------------- 72 | elif isinstance(item, ismrmrd.Image): 73 | # When this criteria is met, run process_group() on the accumulated 74 | # data, which returns images that are sent back to the client. 75 | # e.g. when the series number changes: 76 | if item.image_series_index != currentSeries: 77 | logging.info("Processing a group of images because series index changed to %d", item.image_series_index) 78 | currentSeries = item.image_series_index 79 | image = process_data(imgGroup, connection, config, mrdHeader) 80 | connection.send_image(image) 81 | imgGroup = [] 82 | 83 | # Only process magnitude images -- send phase images back without modification (fallback for images with unknown type) 84 | if (item.image_type is ismrmrd.IMTYPE_MAGNITUDE) or (item.image_type == 0): 85 | imgGroup.append(item) 86 | else: 87 | tmpMeta = ismrmrd.Meta.deserialize(item.attribute_string) 88 | tmpMeta['Keep_image_geometry'] = 1 89 | item.attribute_string = tmpMeta.serialize() 90 | 91 | connection.send_image(item) 92 | continue 93 | 94 | # ---------------------------------------------------------- 95 | # Waveform data messages 96 | # ---------------------------------------------------------- 97 | elif isinstance(item, ismrmrd.Waveform): 98 | waveformGroup.append(item) 99 | 100 | elif item is None: 101 | break 102 | 103 | else: 104 | logging.error("Unsupported data type %s", type(item).__name__) 105 | 106 | # Extract raw ECG waveform data. Basic sorting to make sure that data 107 | # is time-ordered, but no additional checking for missing data. 108 | # ecgData has shape (5 x timepoints) 109 | if len(waveformGroup) > 0: 110 | waveformGroup.sort(key = lambda item: item.time_stamp) 111 | ecgData = [item.data for item in waveformGroup if item.waveform_id == 0] 112 | if len(ecgData) > 0: 113 | ecgData = np.concatenate(ecgData,1) 114 | 115 | # Process any remaining groups of raw or image data. This can 116 | # happen if the trigger condition for these groups are not met. 117 | # This is also a fallback for handling image data, as the last 118 | # image in a series is typically not separately flagged. 119 | if len(acqGroup) > 0: 120 | logging.info("Processing a group of k-space data (untriggered)") 121 | image = process_data(acqGroup, connection, config, mrdHeader) 122 | connection.send_image(image) 123 | acqGroup = [] 124 | 125 | if len(imgGroup) > 0: 126 | logging.info("Processing a group of images (untriggered)") 127 | image = process_data(imgGroup, connection, config, mrdHeader) 128 | connection.send_image(image) 129 | imgGroup = [] 130 | 131 | except Exception as e: 132 | logging.error(traceback.format_exc()) 133 | connection.send_logging(constants.MRD_LOGGING_ERROR, traceback.format_exc()) 134 | 135 | finally: 136 | connection.send_close() 137 | 138 | def process_data(group, connection, config, mrdHeader): 139 | if len(group) == 0: 140 | return [] 141 | 142 | # Start timer 143 | tic = perf_counter() 144 | 145 | # Create folder, if necessary 146 | if not os.path.exists(debugFolder): 147 | os.makedirs(debugFolder) 148 | logging.debug("Created folder " + debugFolder + " for debug output files") 149 | 150 | # Create a dictionary of values to report 151 | data = {} 152 | data['protocolName'] = mrdHeader.measurementInformation.protocolName 153 | data['scanner'] = f'{mrdHeader.acquisitionSystemInformation.systemVendor} {mrdHeader.acquisitionSystemInformation.systemModel} {mrdHeader.acquisitionSystemInformation.systemFieldStrength_T:{".1f" if mrdHeader.acquisitionSystemInformation.systemFieldStrength_T > 1 else ".2f"}}T' 154 | data['fieldOfView'] = f'{mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.x:.1f} x {mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.y:.1f} x {mrdHeader.encoding[0].encodedSpace.fieldOfView_mm.z:.1f} mm^3' 155 | data['matrixSize'] = f'{mrdHeader.encoding[0].encodedSpace.matrixSize.x} x {mrdHeader.encoding[0].encodedSpace.matrixSize.y} x {mrdHeader.encoding[0].encodedSpace.matrixSize.z}' 156 | 157 | if isinstance(group[0], ismrmrd.acquisition.Acquisition): 158 | data['inputData'] = f'{len(group)} readouts' 159 | elif isinstance(group[0], ismrmrd.image.Image): 160 | data['inputData'] = f'{len(group)} images' 161 | 162 | # Properties of report image to create 163 | width = 512 # pixels 164 | height = 512 # pixels 165 | fontSize = 12 # points 166 | lineSpacing = 1.75 # scale relative to fontSize 167 | 168 | # Create a blank image with white background 169 | plt.style.use('dark_background') 170 | dpi = 100 171 | fig, ax = plt.subplots(figsize=(width/dpi, height/dpi), dpi=dpi, frameon=False) 172 | 173 | # Display the blank image to set image size 174 | ax.imshow(np.zeros((height, width, 3), dtype=np.uint8)) 175 | ax.axis('off') 176 | plt.subplots_adjust(left=0, right=1, top=1, bottom=0) 177 | 178 | # Starting position (relative to top left corner) 179 | x0 = width *0.05 180 | y0 = height*0.05 181 | 182 | # Iterate through dict and print each item 183 | maxKeyLen = max([len(key) for key in data.keys()]) 184 | for index, (key, value) in enumerate(data.items()): 185 | ax.text(x0, y0+index*fontSize*lineSpacing, f'{key:{maxKeyLen}} {value}', va='center', fontsize=fontSize, color='white', fontfamily='monospace') 186 | 187 | # Invoke a draw to create a buffer we can copy the pixel data from 188 | fig.canvas.draw() 189 | 190 | imgReport = np.frombuffer(fig.canvas.buffer_rgba(), dtype='uint8') 191 | w, h = fig.canvas.get_width_height() 192 | imgReport = imgReport.reshape((int(h), int(w), -1)) 193 | 194 | plt.imsave(os.path.join(debugFolder, 'report.png'), imgReport) 195 | 196 | # Conversion as per CCIR 601 (https://en.wikipedia.org/wiki/Luma_(video) 197 | def rgb2gray(rgb): 198 | return np.dot(rgb[...,:3], [0.2989, 0.5870, 0.1140]) 199 | 200 | # Must convert to grayscale float32 or uint16 (float64s are not supported) 201 | imgGray = rgb2gray(np.asarray(imgReport)) 202 | imgGray = imgGray.astype(np.float32) 203 | 204 | imagesOut = [] 205 | 206 | # Create new MRD instance for the report image 207 | # data has shape [PE RO phs], i.e. [y x]. 208 | # from_array() should be called with 'transpose=False' to avoid warnings, and when called 209 | # with this option, can take input as: [cha z y x], [z y x], or [y x] 210 | mrdImg = ismrmrd.Image.from_array(imgGray, transpose=False) 211 | 212 | # Set the minimum appropriate ImageHeader information without using a reference acquisition/image as a starting point 213 | # Note mrdImg.getHead().matrix_size should be used instead of the convenience mrdImg.matrix_size because the latter 214 | # returns a transposed result. See: https://github.com/ismrmrd/ismrmrd-python/pull/54 215 | mrdImg.field_of_view = (mrdImg.getHead().matrix_size[0], mrdImg.getHead().matrix_size[1], mrdImg.getHead().matrix_size[2]) 216 | 217 | # Set image orientation dimensions. Note that the default initialized values (0,0,0) are invalid 218 | # because they are not unit vectors 219 | mrdImg.read_dir = (1, 0, 0) 220 | mrdImg.phase_dir = (0, 1, 0) 221 | mrdImg.slice_dir = (0, 0, 1) 222 | 223 | # mrdImg.position is optional, but is relative to the patient_table_position 224 | # Setting patient_table_position is recommended, otherwise the report may 225 | # significantly shifted from other images in the series_ 226 | mrdImg.patient_table_position = group[0].patient_table_position 227 | 228 | # Optional, but recommended. Default value (0) corresponds to midnight 229 | mrdImg.acquisition_time_stamp = group[0].acquisition_time_stamp 230 | 231 | # Default value of image_type (0) is invalid 232 | mrdImg.image_type = ismrmrd.IMTYPE_MAGNITUDE 233 | 234 | # Use a different image_series_index to have a separate series than the main 235 | # images. Absolute value does not matter, but images with the same 236 | # image_series_index are grouped together in the same DICOM SeriesNumber 237 | mrdImg.image_series_index = 0 238 | 239 | # DICOM InstanceNumber. Should be incremented if multiple images in a series 240 | mrdImg.image_index = 0 241 | 242 | # Set MRD MetaAttributes 243 | tmpMeta = ismrmrd.Meta() 244 | tmpMeta['DataRole'] = 'Image' 245 | tmpMeta['ImageProcessingHistory'] = ['FIRE', 'PYTHON'] 246 | tmpMeta['Keep_image_geometry'] = 1 247 | 248 | # Add image orientation directions to MetaAttributes 249 | # Note that DICOM image orientation is in LPS coordinates, so if another set of image directional 250 | # cosines are chosen, they may be flipped/rotated to bring them into LPS coordinate space 251 | tmpMeta['ImageRowDir'] = ["{:.18f}".format(mrdImg.read_dir[0]), "{:.18f}".format(mrdImg.read_dir[1]), "{:.18f}".format(mrdImg.read_dir[2])] 252 | tmpMeta['ImageColumnDir'] = ["{:.18f}".format(mrdImg.phase_dir[0]), "{:.18f}".format(mrdImg.phase_dir[1]), "{:.18f}".format(mrdImg.phase_dir[2])] 253 | 254 | # Add all of the report data to the MetaAttributes so they can be parsed from the resulting images 255 | tmpMeta.update(data) 256 | 257 | xml = tmpMeta.serialize() 258 | logging.debug("Image MetaAttributes: %s", xml) 259 | mrdImg.attribute_string = xml 260 | imagesOut.append(mrdImg) 261 | 262 | return imagesOut -------------------------------------------------------------------------------- /server.py: -------------------------------------------------------------------------------- 1 | 2 | import constants 3 | from connection import Connection 4 | 5 | import socket 6 | import logging 7 | import multiprocessing 8 | import ismrmrd.xsd 9 | import importlib 10 | import os 11 | import json 12 | import signal 13 | 14 | import simplefft 15 | import invertcontrast 16 | import analyzeflow 17 | 18 | class Server: 19 | """ 20 | Something something docstring. 21 | """ 22 | 23 | def __init__(self, address, port, defaultConfig, savedata, savedataFolder, multiprocessing): 24 | logging.info("Starting server and listening for data at %s:%d", address, port) 25 | 26 | logging.info("Default config is %s", defaultConfig) 27 | if (savedata is True): 28 | logging.debug("Saving incoming data is enabled.") 29 | 30 | if (multiprocessing is True): 31 | logging.debug("Multiprocessing is enabled.") 32 | 33 | self.defaultConfig = defaultConfig 34 | self.multiprocessing = multiprocessing 35 | self.savedata = savedata 36 | self.savedataFolder = savedataFolder 37 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 38 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) 39 | self.socket.bind((address, port)) 40 | 41 | def serve(self): 42 | logging.debug("Serving... ") 43 | self.socket.listen(0) 44 | 45 | while True: 46 | try: 47 | signal.siginterrupt(signal.SIGTERM, True) 48 | signal.siginterrupt(signal.SIGINT, True) 49 | except AttributeError: 50 | # signal.siginterrupt is not available in Windows 51 | pass 52 | 53 | sock, (remote_addr, remote_port) = self.socket.accept() 54 | 55 | logging.info("Accepting connection from: %s:%d", remote_addr, remote_port) 56 | 57 | if (self.multiprocessing is True): 58 | process = multiprocessing.Process(target=self.handle, args=[sock]) 59 | process.daemon = True 60 | process.start() 61 | logging.debug("Spawned process %d to handle connection.", process.pid) 62 | else: 63 | self.handle(sock) 64 | 65 | def handle(self, sock): 66 | 67 | try: 68 | connection = Connection(sock, self.savedata, "", self.savedataFolder, "dataset") 69 | 70 | # First message is the config (file or text) 71 | config = next(connection) 72 | 73 | # Break out if a connection was established but no data was received 74 | if ((config is None) & (connection.is_exhausted is True)): 75 | logging.info("Connection closed without any data received") 76 | return 77 | 78 | # Second messages is the metadata (text) 79 | metadata_xml = next(connection) 80 | logging.debug("XML Metadata: %s", metadata_xml) 81 | try: 82 | metadata = ismrmrd.xsd.CreateFromDocument(metadata_xml) 83 | if (metadata.acquisitionSystemInformation.systemFieldStrength_T != None): 84 | logging.info("Data is from a %s %s at %1.1fT", metadata.acquisitionSystemInformation.systemVendor, metadata.acquisitionSystemInformation.systemModel, metadata.acquisitionSystemInformation.systemFieldStrength_T) 85 | except: 86 | logging.warning("Metadata is not a valid MRD XML structure. Passing on metadata as text") 87 | metadata = metadata_xml 88 | 89 | # Support additional config parameters passed through a JSON text message 90 | if connection.peek_mrd_message_identifier() == constants.MRD_MESSAGE_TEXT: 91 | configAdditionalText = next(connection) 92 | logging.info("Received additional config text: %s", configAdditionalText) 93 | connection.save_additional_config(configAdditionalText) 94 | try: 95 | configAdditional = json.loads(configAdditionalText) 96 | 97 | if ('parameters' in configAdditional): 98 | if ('config' in configAdditional['parameters']): 99 | logging.info("Changing config to: %s", configAdditional['parameters']['config']) 100 | config = configAdditional['parameters']['config'] 101 | 102 | if ('customconfig' in configAdditional['parameters']) and (configAdditional['parameters']['customconfig'] != ""): 103 | logging.info("Changing config to: %s", configAdditional['parameters']['customconfig']) 104 | config = configAdditional['parameters']['customconfig'] 105 | except: 106 | logging.error("Failed to parse as JSON") 107 | else: 108 | configAdditional = config 109 | 110 | # Decide what program to use based on config 111 | # If not one of these explicit cases, try to load file matching name of config 112 | if (config == "simplefft"): 113 | logging.info("Starting simplefft processing based on config") 114 | simplefft.process(connection, configAdditional, metadata) 115 | elif (config == "invertcontrast"): 116 | logging.info("Starting invertcontrast processing based on config") 117 | invertcontrast.process(connection, configAdditional, metadata) 118 | elif (config == "analyzeflow"): 119 | logging.info("Starting analyzeflow processing based on config") 120 | analyzeflow.process(connection, configAdditional, metadata) 121 | elif (config == "null"): 122 | logging.info("No processing based on config") 123 | try: 124 | for msg in connection: 125 | if msg is None: 126 | break 127 | finally: 128 | connection.send_close() 129 | elif (config == "savedataonly"): 130 | # Dummy loop with no processing 131 | try: 132 | for msg in connection: 133 | if msg is None: 134 | break 135 | finally: 136 | connection.send_close() 137 | else: 138 | usedConfig = config 139 | if importlib.util.find_spec(config) is None: 140 | logging.error("Could not find config module '%s' -- falling back to default config: %s", config, self.defaultConfig) 141 | usedConfig = self.defaultConfig 142 | 143 | try: 144 | # Load module from file having exact name as config 145 | module = importlib.import_module(usedConfig) 146 | logging.info("Starting config %s", usedConfig) 147 | module.process(connection, configAdditional, metadata) 148 | except ImportError as e: 149 | logging.error("Failed to load config '%s' with error:\n %s", usedConfig, e) 150 | if usedConfig != self.defaultConfig: 151 | logging.info("Falling back to default config: '%s'", self.defaultConfig) 152 | try: 153 | module = importlib.import_module(self.defaultConfig) 154 | logging.info("Starting config %s", self.defaultConfig) 155 | module.process(connection, configAdditional, metadata) 156 | except ImportError as e: 157 | logging.error("Failed to load default config '%s' with error:\n %s", self.defaultConfig, e) 158 | 159 | except Exception as e: 160 | logging.exception(e) 161 | 162 | finally: 163 | connection.shutdown_close() 164 | 165 | # Dataset may not be closed properly if a close message is not received 166 | if connection.savedata is True: 167 | try: 168 | connection.dset.close() 169 | except: 170 | pass 171 | 172 | if (connection.savedataFile == ""): 173 | try: 174 | # Rename the saved file to use the protocol name 175 | dset = ismrmrd.Dataset(connection.mrdFilePath, connection.savedataGroup, False) 176 | groups = dset.list() 177 | 178 | if ('xml' in groups): 179 | xml_header = dset.read_xml_header() 180 | xml_header = xml_header.decode("utf-8") 181 | mrdHead = ismrmrd.xsd.CreateFromDocument(xml_header) 182 | 183 | if (mrdHead.measurementInformation.protocolName != ""): 184 | newFilePath = connection.mrdFilePath.replace("MRD_input_", mrdHead.measurementInformation.protocolName + "_") 185 | os.rename(connection.mrdFilePath, newFilePath) 186 | connection.mrdFilePath = newFilePath 187 | except: 188 | pass 189 | 190 | if connection.mrdFilePath is not None: 191 | logging.info("Incoming data was saved at %s", connection.mrdFilePath) 192 | -------------------------------------------------------------------------------- /simplefft.py: -------------------------------------------------------------------------------- 1 | 2 | import ismrmrd 3 | import os 4 | import itertools 5 | import logging 6 | import numpy as np 7 | import numpy.fft as fft 8 | import ctypes 9 | import mrdhelper 10 | from datetime import datetime 11 | 12 | # Folder for debug output files 13 | debugFolder = "/tmp/share/debug" 14 | 15 | def groups(iterable, predicate): 16 | group = [] 17 | for item in iterable: 18 | group.append(item) 19 | 20 | if predicate(item): 21 | yield group 22 | group = [] 23 | 24 | 25 | def conditionalGroups(iterable, predicateAccept, predicateFinish): 26 | group = [] 27 | try: 28 | for item in iterable: 29 | if item is None: 30 | break 31 | 32 | if predicateAccept(item): 33 | group.append(item) 34 | 35 | if predicateFinish(item): 36 | yield group 37 | group = [] 38 | finally: 39 | iterable.send_close() 40 | 41 | 42 | def process(connection, config, mrdHeader): 43 | logging.info("Config: \n%s", config) 44 | logging.info("MRD Header: \n%s", mrdHeader) 45 | 46 | # Discard phase correction lines and accumulate lines until "ACQ_LAST_IN_SLICE" is set 47 | for group in conditionalGroups(connection, lambda acq: not acq.is_flag_set(ismrmrd.ACQ_IS_PHASECORR_DATA), lambda acq: acq.is_flag_set(ismrmrd.ACQ_LAST_IN_SLICE)): 48 | image = process_group(group, config, mrdHeader) 49 | 50 | logging.debug("Sending image to client:\n%s", image) 51 | connection.send_image(image) 52 | 53 | 54 | def process_group(group, config, mrdHeader): 55 | if len(group) == 0: 56 | return [] 57 | 58 | logging.info(f'-------------------------------------------------') 59 | logging.info(f' process_group called with {len(group)} readouts') 60 | logging.info(f'-------------------------------------------------') 61 | 62 | # Create folder, if necessary 63 | if not os.path.exists(debugFolder): 64 | os.makedirs(debugFolder) 65 | logging.debug("Created folder " + debugFolder + " for debug output files") 66 | 67 | # Format data into single [cha RO PE] array 68 | data = [acquisition.data for acquisition in group] 69 | data = np.stack(data, axis=-1) 70 | 71 | logging.debug("Raw data is size %s" % (data.shape,)) 72 | np.save(debugFolder + "/" + "raw.npy", data) 73 | 74 | # Fourier Transform 75 | data = fft.fftshift( data, axes=(1, 2)) 76 | data = fft.ifft2( data, axes=(1, 2)) 77 | data = fft.ifftshift(data, axes=(1, 2)) 78 | data *= np.prod(data.shape) # FFT scaling for consistency with ICE 79 | 80 | # Sum of squares coil combination 81 | data = np.abs(data) 82 | data = np.square(data) 83 | data = np.sum(data, axis=0) 84 | data = np.sqrt(data) 85 | 86 | logging.debug("Image data is size %s" % (data.shape,)) 87 | np.save(debugFolder + "/" + "img.npy", data) 88 | 89 | # Determine max value (12 or 16 bit) 90 | BitsStored = 12 91 | if (mrdhelper.get_userParameterLong_value(mrdHeader, "BitsStored") is not None): 92 | BitsStored = mrdhelper.get_userParameterLong_value(mrdHeader, "BitsStored") 93 | maxVal = 2**BitsStored - 1 94 | 95 | # Normalize and convert to int16 96 | data *= maxVal/data.max() 97 | data = np.around(data) 98 | data = data.astype(np.int16) 99 | 100 | # Remove readout oversampling 101 | if mrdHeader.encoding[0].reconSpace.matrixSize.x != 0: 102 | offset = int((data.shape[0] - mrdHeader.encoding[0].reconSpace.matrixSize.x)/2) 103 | data = data[offset:offset+mrdHeader.encoding[0].reconSpace.matrixSize.x,:] 104 | 105 | # Remove phase oversampling 106 | if mrdHeader.encoding[0].reconSpace.matrixSize.y != 0: 107 | offset = int((data.shape[1] - mrdHeader.encoding[0].reconSpace.matrixSize.y)/2) 108 | data = data[:,offset:offset+mrdHeader.encoding[0].reconSpace.matrixSize.y] 109 | 110 | logging.debug("Image without oversampling is size %s" % (data.shape,)) 111 | np.save(debugFolder + "/" + "imgCrop.npy", data) 112 | 113 | # Format as ISMRMRD image data 114 | # data has shape [RO PE], i.e. [x y]. 115 | # from_array() should be called with 'transpose=False' to avoid warnings, and when called 116 | # with this option, can take input as: [cha z y x], [z y x], or [y x] 117 | image = ismrmrd.Image.from_array(data.transpose(), acquisition=group[0], transpose=False) 118 | image.image_index = 1 119 | 120 | # Set field of view 121 | image.field_of_view = (ctypes.c_float(mrdHeader.encoding[0].reconSpace.fieldOfView_mm.x), 122 | ctypes.c_float(mrdHeader.encoding[0].reconSpace.fieldOfView_mm.y), 123 | ctypes.c_float(mrdHeader.encoding[0].reconSpace.fieldOfView_mm.z)) 124 | 125 | # Set ISMRMRD Meta Attributes 126 | meta = ismrmrd.Meta({'DataRole': 'Image', 127 | 'ImageProcessingHistory': ['FIRE', 'PYTHON'], 128 | 'WindowCenter': str((maxVal+1)/2), 129 | 'WindowWidth': str((maxVal+1))}) 130 | 131 | # Add image orientation directions to MetaAttributes if not already present 132 | if meta.get('ImageRowDir') is None: 133 | meta['ImageRowDir'] = ["{:.18f}".format(image.getHead().read_dir[0]), "{:.18f}".format(image.getHead().read_dir[1]), "{:.18f}".format(image.getHead().read_dir[2])] 134 | 135 | if meta.get('ImageColumnDir') is None: 136 | meta['ImageColumnDir'] = ["{:.18f}".format(image.getHead().phase_dir[0]), "{:.18f}".format(image.getHead().phase_dir[1]), "{:.18f}".format(image.getHead().phase_dir[2])] 137 | 138 | xml = meta.serialize() 139 | logging.debug("Image MetaAttributes: %s", xml) 140 | logging.debug("Image data has %d elements", image.data.size) 141 | 142 | image.attribute_string = xml 143 | return image 144 | 145 | 146 | -------------------------------------------------------------------------------- /start-fire-python-server-with-data-storage.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Bash script to start Python ISMRMRD server and save received data 4 | # 5 | # First argument is path to log file. If no argument is provided, 6 | # logging is done to stdout (and discarded) 7 | 8 | # Set Python's default temp folder to one that's shared with the host so that 9 | # it's less likely to accidentally fill up the chroot 10 | export TMPDIR=/tmp/share 11 | 12 | if [ $# -eq 1 ]; then 13 | LOG_FILE=${1} 14 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 -s -S /tmp/share/saved_data -l=${LOG_FILE} & 15 | else 16 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 -s -S /tmp/share/saved_data & 17 | fi 18 | 19 | -------------------------------------------------------------------------------- /start-fire-python-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Bash script to start Python ISMRMRD server 4 | # 5 | # First argument is path to log file. If no argument is provided, 6 | # logging is done to stdout (and discarded) 7 | 8 | # Set Python's default temp folder to one that's shared with the host so that 9 | # it's less likely to accidentally fill up the chroot 10 | export TMPDIR=/tmp/share 11 | 12 | if [ $# -eq 1 ]; then 13 | LOG_FILE=${1} 14 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 -l=${LOG_FILE} & 15 | else 16 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 & 17 | fi 18 | 19 | -------------------------------------------------------------------------------- /sync-code-and-start-fire-python-server.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # Bash script to sync code and start Python ISMRMRD server 4 | # 5 | # First argument is path to log file. If no argument is provided, 6 | # logging is done to stdout (and discarded) 7 | 8 | 9 | # Set Python's default temp folder to one that's shared with the host so that 10 | # it's less likely to accidentally fill up the chroot 11 | export TMPDIR=/tmp/share 12 | 13 | cp -R -f /tmp/share/code/* "/opt/code/python-ismrmrd-server/" 14 | 15 | if [ $# -eq 1 ]; then 16 | LOG_FILE=${1} 17 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 -l=${LOG_FILE} & 18 | else 19 | python3 /opt/code/python-ismrmrd-server/main.py -v -r -H=0.0.0.0 -p=9002 & 20 | fi 21 | 22 | --------------------------------------------------------------------------------