├── mibidata ├── __init__.py ├── tests │ ├── __init__.py │ ├── data │ │ └── v0.1.tiff │ ├── test_combine_tiffs.py │ ├── test_runs.py │ ├── test_util.py │ ├── test_color.py │ ├── test_pseudodepths.py │ ├── test_panels.py │ └── test_segmentation.py ├── runs.py ├── panels.py ├── util.py ├── constants.py ├── pseudodepths.py ├── combine_tiffs.py ├── color.py ├── segmentation.py └── tiff.py ├── tests └── test_imports.py ├── scripts ├── tests │ └── __init__.py ├── tiling │ ├── README.md │ ├── __init__.py │ └── tiling.py └── qupath │ └── export_label_image.groovy ├── mibitracker ├── tests │ ├── __init__.py │ └── test_request_helpers.py ├── __init__.py └── mibitracker_exceptions.py ├── images ├── api_screenshot.PNG ├── postman-token.PNG ├── postman-auth-body.PNG ├── postman-auth-header.PNG ├── postman-auth-token.PNG ├── swagger_tissues_all.png ├── postman-images-retrieve.PNG ├── swagger_images_by_tissue.png ├── swagger_tissues_endpoints.png ├── swagger_tissues_filtered.png └── plot_circular_sectors_schematic.png ├── docs ├── source │ ├── _static │ │ └── favicon.ico │ ├── index.rst │ └── conf.py ├── README.md └── Makefile ├── tox.ini ├── environment.yml ├── setup.py ├── .gitignore ├── check_tag.py ├── README.md ├── .circleci └── config.yml ├── .pylintrc ├── MibiImage_Tutorial.ipynb └── MIBItracker_API_Tutorial.ipynb /mibidata/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /tests/test_imports.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mibidata/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/tiling/README.md: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /scripts/tiling/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /mibitracker/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /images/api_screenshot.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/api_screenshot.PNG -------------------------------------------------------------------------------- /images/postman-token.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/postman-token.PNG -------------------------------------------------------------------------------- /images/postman-auth-body.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/postman-auth-body.PNG -------------------------------------------------------------------------------- /mibitracker/__init__.py: -------------------------------------------------------------------------------- 1 | """Provides classes and methods for interacting with the MIBItracker API 2 | """ 3 | -------------------------------------------------------------------------------- /docs/source/_static/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/docs/source/_static/favicon.ico -------------------------------------------------------------------------------- /images/postman-auth-header.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/postman-auth-header.PNG -------------------------------------------------------------------------------- /images/postman-auth-token.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/postman-auth-token.PNG -------------------------------------------------------------------------------- /images/swagger_tissues_all.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/swagger_tissues_all.png -------------------------------------------------------------------------------- /mibidata/tests/data/v0.1.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/mibidata/tests/data/v0.1.tiff -------------------------------------------------------------------------------- /images/postman-images-retrieve.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/postman-images-retrieve.PNG -------------------------------------------------------------------------------- /images/swagger_images_by_tissue.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/swagger_images_by_tissue.png -------------------------------------------------------------------------------- /images/swagger_tissues_endpoints.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/swagger_tissues_endpoints.png -------------------------------------------------------------------------------- /images/swagger_tissues_filtered.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/swagger_tissues_filtered.png -------------------------------------------------------------------------------- /images/plot_circular_sectors_schematic.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ionpath/mibilib/HEAD/images/plot_circular_sectors_schematic.png -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | envlist = py311 3 | 4 | [testenv] 5 | deps = 6 | mock 7 | pytest 8 | commands = 9 | pytest 10 | -------------------------------------------------------------------------------- /mibitracker/mibitracker_exceptions.py: -------------------------------------------------------------------------------- 1 | """Provides a custom exception for MibiTracker API requests. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | class MibiTrackerError(Exception): 6 | """Raise for exceptions where the response from the MibiTracker API is 7 | invalid or unexpected.""" 8 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Ionpath's mibitracker-client documentation master file. 2 | 3 | Welcome to IONpath's mibilib documentation! 4 | ====================================================== 5 | 6 | Contents: 7 | 8 | .. toctree:: 9 | :maxdepth: 2 10 | 11 | 12 | 13 | Indices and tables 14 | ================== 15 | 16 | * :ref:`genindex` 17 | * :ref:`modindex` 18 | * :ref:`search` 19 | 20 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: mibilib 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - pip=22.3.1 7 | - python=3.11.0 8 | - pip: 9 | - astroid==2.12.13 10 | - attrs==22.1.0 11 | - coverage==6.5.0 12 | - jupyter==1.0.0 13 | - matplotlib==3.6.2 14 | - mock==4.0.3 15 | - numpy==1.23.5 16 | - pandas==1.2.3 17 | - pillow==9.3.0 18 | - pylint==2.15.6 19 | - pytest==7.2.0 20 | - python-dotenv==0.21.0 21 | - requests==2.28.1 22 | - scikit-image==0.19.3 23 | - scikit-learn==1.1.3 24 | - setuptools==65.5.1 25 | - sphinx-argparse==0.2.2 26 | - sphinx==5.3.0 27 | - tifffile==2022.10.10 28 | - tox==3.27.1 29 | - tqdm==4.64.1 30 | -------------------------------------------------------------------------------- /docs/README.md: -------------------------------------------------------------------------------- 1 | # mibilib-doc 2 | Documentation for the mibilib package is available at 3 | https://ionpath.github.io/mibilib/. 4 | 5 | This directory contains the configuration for sphinx to auto-generate 6 | the documentation. 7 | 8 | It assumes the following directory structure: 9 | 10 | ``` 11 | mibilib/ 12 | |-- README 13 | |-- setup.py 14 | `-- mibitracker/ 15 | |-- __init__.py 16 | |-- submodule1.py 17 | |-- submodule2.py 18 | `-- tests/ 19 | docs/ 20 | |-- MakeFile 21 | `-- source/ 22 | ``` 23 | 24 | To generated updated .rst files in the source/ directory, and a build/ 25 | directory containing the .html docs, execute the following commands from the 26 | parent mibilib directory: 27 | ``` 28 | sphinx-apidoc -f -o docs/source ./ */tests ./setup.py 29 | sphinx-build -b html docs/source out 30 | ``` 31 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """Setup file for installing dependencies for mibilib. 2 | 3 | Copyright (C) 2022 Ionpath, Inc. All rights reserved.""" 4 | 5 | from setuptools import setup 6 | 7 | setup(name='mibilib', 8 | author='IONpath, Inc.', 9 | author_email='support@ionpath.com', 10 | version='1.5.0', 11 | url='https://github.com/ionpath/mibilib', 12 | description='Python utilities for IONpath MIBItracker and MIBItiff data', 13 | license='GNU General Public License v3.0', 14 | python_requires='~=3.11.0', 15 | install_requires=[ 16 | 'matplotlib==3.6.2', 17 | 'numpy==1.23.5', 18 | 'pandas==1.2.3', 19 | 'pillow==9.3.0', 20 | 'requests>=2.28.1', 21 | 'scikit-image==0.19.3', 22 | 'scikit-learn==1.1.3', 23 | 'tifffile==2022.10.10', 24 | 'tqdm==4.64.1', 25 | ], 26 | packages=['mibitracker', 'mibidata'] 27 | ) 28 | -------------------------------------------------------------------------------- /mibidata/tests/test_combine_tiffs.py: -------------------------------------------------------------------------------- 1 | """Tests for mibidata.combine_tiffs 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved. 4 | """ 5 | 6 | import unittest 7 | 8 | from mibidata import combine_tiffs 9 | 10 | 11 | class TestRuns(unittest.TestCase): 12 | 13 | def test_match_target_filename(self): 14 | filenames = ['CD45.tif', 'CD8.tiff', 'dsDNA.png', 'FoxP3.TIFF'] 15 | self.assertEqual( 16 | combine_tiffs._match_target_filename(filenames, 'CD45'), 17 | 'CD45.tif' 18 | ) 19 | self.assertEqual( 20 | combine_tiffs._match_target_filename(filenames, 'CD8'), 21 | 'CD8.tiff' 22 | ) 23 | self.assertEqual( 24 | combine_tiffs._match_target_filename(filenames, 'FoxP3'), 25 | 'FoxP3.TIFF' 26 | ) 27 | with self.assertRaises(ValueError): 28 | combine_tiffs._match_target_filename(filenames, 'dsDNA') 29 | 30 | 31 | if __name__ == '__main__': 32 | unittest.main() 33 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | *~ 4 | 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | env/ 16 | build/ 17 | develop-eggs/ 18 | dist/ 19 | downloads/ 20 | eggs/ 21 | .eggs/ 22 | lib/ 23 | lib64/ 24 | parts/ 25 | sdist/ 26 | var/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | .vs/ 31 | .vscode/ 32 | 33 | # PyInstaller 34 | # Usually these files are written by a python script from a template 35 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 36 | *.manifest 37 | *.spec 38 | 39 | # Installer logs 40 | pip-log.txt 41 | pip-delete-this-directory.txt 42 | 43 | # Unit test / coverage reports 44 | htmlcov/ 45 | .tox/ 46 | .coverage 47 | .coverage.* 48 | .cache 49 | nosetests.xml 50 | coverage.xml 51 | *,cover 52 | .hypothesis/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | 62 | # Flask stuff: 63 | instance/ 64 | .webassets-cache 65 | 66 | # Scrapy stuff: 67 | .scrapy 68 | 69 | # Sphinx documentation 70 | docs/build/ 71 | docs/source/*.rst 72 | out/* 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # IPython Notebook 78 | .ipynb_checkpoints 79 | 80 | # pyenv 81 | .python-version 82 | 83 | # celery beat schedule file 84 | celerybeat-schedule 85 | 86 | # dotenv 87 | .env 88 | 89 | # virtualenv 90 | venv/ 91 | ENV/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | .pytest_cache/ 99 | -------------------------------------------------------------------------------- /scripts/qupath/export_label_image.groovy: -------------------------------------------------------------------------------- 1 | import qupath.lib.regions.* 2 | import ij.* 3 | import java.awt.* 4 | import java.awt.image.BufferedImage 5 | import javax.imageio.ImageIO 6 | 7 | def server = getCurrentImageData().getServer() 8 | int w = server.getWidth() 9 | int h = server.getHeight() 10 | 11 | int[][] labelArray = new int[h][w] 12 | 13 | int index = 1 14 | for (detection in getDetectionObjects()) { 15 | roi = detection.getROI() 16 | 17 | for (int y = (int)roi.getBoundsY(); y <= (int)(roi.getBoundsY() + roi.getBoundsHeight()); y++){ 18 | for (int x = (int)roi.getBoundsX(); x <= (int)(roi.getBoundsX() + roi.getBoundsWidth()); x++) { 19 | if (roi.contains(x, y)) { 20 | labelArray[y][x] = index 21 | } 22 | } 23 | } 24 | index++ 25 | } 26 | 27 | def labelImage = new BufferedImage(w, h, BufferedImage.TYPE_USHORT_GRAY) 28 | 29 | def g2d = labelImage.createGraphics() 30 | 31 | for (int y = 0; y < h; y++) { 32 | for (int x = 0; x < w; x++) { 33 | labelValue = labelArray[y][x] 34 | labelImage.getRaster().setPixel(x, y, labelValue) 35 | } 36 | } 37 | 38 | g2d.dispose() 39 | 40 | new ImagePlus('Labels', labelImage).show() 41 | 42 | def projectDirectory = getProjectBaseDirectory() 43 | def imageName = getProjectEntry().getOriginalImageName() 44 | // QuPath defaults to '[image name].tiff - [image name].tiff SIMS' as image name 45 | // Try to make the output file name a little simpler. 46 | def nameParts = imageName.split('.tiff - ') 47 | def outputName = '' 48 | if (nameParts.size() == 2) { 49 | outputName = nameParts[0] 50 | } 51 | // Otherwise, it was renamed by user. 52 | else { 53 | outputName = imageName 54 | } 55 | def fileoutput = new File(projectDirectory, imageName + '-labels.png') 56 | ImageIO.write(labelImage, 'png', fileoutput) 57 | -------------------------------------------------------------------------------- /check_tag.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file contains functions that checks if tag in git and in 3 | setup.py are same. If they are not an exception is raised. 4 | """ 5 | 6 | import re 7 | import subprocess 8 | 9 | 10 | def check_tag(): 11 | """ 12 | This function checks if tag in git and in setup.py are same and 13 | returns a boolean 14 | """ 15 | setuppy_version = get_tag_in_setuppy("setup.py") 16 | tag_version = get_latest_tag() 17 | print("The version number in setup.py is : ", setuppy_version) 18 | print("The version number in git tag is : ", tag_version) 19 | return setuppy_version == tag_version 20 | 21 | def get_latest_tag(): 22 | """ 23 | This function gets the tag from git and formats it as a string 24 | for later use. 25 | """ 26 | proc = subprocess.Popen(["git", "describe", "--tag"],\ 27 | stdout=subprocess.PIPE, stderr=subprocess.PIPE) 28 | stdout, _ = proc.communicate() 29 | version = stdout.strip().decode('utf-8') 30 | pattern = r"v(\d+).(\d+).(\d+)(.*?)" 31 | match = re.match(pattern, version) 32 | if match: 33 | major = match.group(1) 34 | minor = match.group(2) 35 | build = match.group(3) 36 | return "v"+".".join((major, minor, build)) 37 | return "" 38 | 39 | def get_tag_in_setuppy(file_path): 40 | """ 41 | This function gets the tag from setup.py and formats it as a 42 | string for later use. 43 | """ 44 | pattern = r"version='(\d+).(\d+).(\d+)'," 45 | with open(file_path) as fo: 46 | for line in fo: 47 | if line.strip().startswith("version"): 48 | match = re.match(pattern, line.strip()) 49 | if match: 50 | major = match.group(1) 51 | minor = match.group(2) 52 | build = match.group(3) 53 | return "v"+".".join((major, minor, build)) 54 | return "" 55 | 56 | if __name__ == '__main__': 57 | if not check_tag(): 58 | raise \ 59 | Exception("The version number in setup.py and git tag do not match") 60 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![CircleCI](https://circleci.com/gh/ionpath/mibilib.svg?style=svg&circle-token=e798611a4abf9f2503a532c8ad5fd02d849d85a0)](https://circleci.com/gh/ionpath/mibilib) 2 | 3 | # mibilib 4 | 5 | Python client for IONpath MIBItracker API, plus utility functions for working 6 | with MIBItiff images. 7 | 8 | https://ionpath.github.io/mibilib/ 9 | 10 | ## Setup 11 | 12 | ### Install Python 3.7 13 | Install the Python 3.7 version of [Miniconda](https://conda.io/miniconda.html). 14 | Even if your system already has a version of Python installed, it is strongly 15 | recommended to use the `conda` environment manager to install and manage this 16 | library's dependencies. 17 | 18 | ### Option A (Development): Clone repository and create environment 19 | This option downloads the source code and creates a development environment. It does not add this library to the path, so if your use case is to import it into 20 | other projects rather than interact with the source, Option B is recommended. 21 | ```bash 22 | cd 23 | git clone https://github.com/ionpath/mibilib 24 | cd mibilib 25 | ``` 26 | 27 | ```bash 28 | conda env create -f environment.yml 29 | conda activate mibilib 30 | ``` 31 | 32 | ### Option B (Usage): Install with pip 33 | This option is useful if you want this library to be installed as part of an 34 | existing environment or as a dependency inside a requirements.txt file. You may 35 | install a particular release using its tag (recommended) 36 | ```bash 37 | pip install git+git://github.com/ionpath/mibilib@v1.2.8 38 | ``` 39 | or a branch (that may be under development with frequent changes) 40 | ```bash 41 | pip install git+git://github.com/ionpath/master 42 | ``` 43 | 44 | ## Usage 45 | To use the MIBItracker API, you will need to use the backend url listed in the 46 | About page. This can be accessed after you have logged in from the menu 47 | under your username in the upper right of the window. 48 | ```python 49 | from mibitracker.request_helpers import MibiRequests 50 | 51 | request = MibiRequests( 52 | 'https://your-mibitracker-backend.appspot.com', 53 | 'user@example.com', 54 | 'password1234' 55 | ) 56 | image_id = request.image_id('20180927', 'Point3') 57 | image_details = request.get('/images/{}/'.format(image_id)) 58 | ``` 59 | 60 | More examples can be found in the following notebooks: 61 | 62 | - [MIBItracker_API_Tutorial](MIBItracker_API_Tutorial.ipynb) 63 | 64 | - [MibiImage_Tutorial](MibiImage_Tutorial.ipynb) 65 | 66 | - [SingleCellSpatialExamples](SingleCellSpatialExamples.ipynb) 67 | 68 | Full documentation for this library can be found at 69 | https://ionpath.github.io/mibilib/. 70 | 71 | ## Sample data 72 | Access to sample data to run the tutorials in the notebooks can be 73 | requested by creating an account at the following URL: 74 | https://mibi-share.ionpath.com. 75 | -------------------------------------------------------------------------------- /mibidata/runs.py: -------------------------------------------------------------------------------- 1 | """Utilities for working with MIBI run metadata. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import os 6 | import re 7 | import xml.etree.ElementTree as ElementTree 8 | 9 | MASS_CALIBRATION_PARAMETERS = ('TimeResolution', 'MassGain', 'MassOffset', 10 | 'MassRange', 'XSize', 'YSize') 11 | FOV_PATTERN = re.compile('^(Depth_Profile|Chemical_Image)$') 12 | _MICRONS_PER_MOTOR_STEP = 0.1 # value of a motor step in microns 13 | 14 | def parse_xml(path): 15 | """Read a run XML and return a list of image metadata dicts, plus a 16 | dict of the mass calibration and other msdf metadata. 17 | 18 | Args: 19 | path: A path to a run XML file. 20 | 21 | Returns: 22 | fovs: A list of image metadata dicts for each FOV. 23 | calibration: A dict containing mass calibration parameters. 24 | """ 25 | run = os.path.splitext(os.path.split(path)[1])[0] 26 | fovs = [] 27 | calibration = {} 28 | tree = ElementTree.parse(path) 29 | calibration['RasterStyle'] = tree.getroot().attrib.get('RasterStyle') 30 | runtime = tree.find('./Root').attrib.get('RunTime') 31 | # Hack for when run has crashed midway through and datetime is unavailable. 32 | if runtime == '0001-01-01T00:00:00': 33 | runtime = None 34 | for j, point in enumerate(tree.findall('./Root/Point'), 1): 35 | number = 'Point{}'.format(j) 36 | counter = {'Depth_Profile': 0, 'Chemical_Image': 0} 37 | name = point.attrib.get('PointName') 38 | for item in point: 39 | match = re.match(FOV_PATTERN, item.tag) 40 | if item.tag.startswith('RowNumber'): 41 | row_num = item.tag 42 | coordinates = ( 43 | float(item.attrib.get('XAttrib')) * _MICRONS_PER_MOTOR_STEP, 44 | float(item.attrib.get('YAttrib')) * _MICRONS_PER_MOTOR_STEP) 45 | continue 46 | elif match: 47 | parent = '{}{}'.format(match.group(1), counter[match.group(1)]) 48 | counter[match.group(1)] += 1 49 | folder = os.path.join(number, row_num, parent) 50 | for param in MASS_CALIBRATION_PARAMETERS: 51 | # Only use the mass calibration values from the first FOV in 52 | # case the run is stopped prematurely and the values are not 53 | # written out for subsequent FOVs. 54 | if not param in calibration: 55 | calibration[param] = float(item.attrib.get(param)) 56 | fovs.append({ 57 | 'run': run, 58 | 'folder': folder, 59 | 'dwell': float(item.attrib.get('AcquisitionTime')), 60 | 'scans': int(item.attrib.get('MaxNumberOfLevels', 1)), 61 | 'coordinates': coordinates, 62 | 'point_name': name, 63 | 'date': runtime, 64 | }) 65 | return fovs, calibration 66 | -------------------------------------------------------------------------------- /mibidata/tests/test_runs.py: -------------------------------------------------------------------------------- 1 | """Tests for mibidata.runs 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved. 4 | """ 5 | 6 | import os 7 | import shutil 8 | import tempfile 9 | import unittest 10 | 11 | from mibidata import runs 12 | 13 | # Run calibration file. 14 | XML = ( 15 | '\n' 16 | '\n' 17 | '\n' 18 | '\n' 19 | '\n' 20 | '\n' 23 | '\n' 24 | '\n' 25 | '\n' 26 | '\n' 27 | '\n' 28 | '\n' 31 | '\n' 32 | '\n' 33 | '\n' 34 | '' 35 | ) 36 | 37 | 38 | class TestRuns(unittest.TestCase): 39 | 40 | @classmethod 41 | def setUpClass(cls): 42 | # Write mass mml and run xml files to a temporary directory. 43 | cls.test_dir = tempfile.mkdtemp() 44 | 45 | cls.xml = os.path.join(cls.test_dir, 'run.xml') 46 | with open(cls.xml, 'w') as infile: 47 | infile.write(XML) 48 | 49 | @classmethod 50 | def tearDownClass(cls): 51 | shutil.rmtree(cls.test_dir) 52 | 53 | def test_parse_xml(self): 54 | 55 | expected_fovs = [ 56 | { 57 | 'run': 'run', 58 | 'folder': os.path.join('Point1', 'RowNumber0', 59 | 'Chemical_Image0'), 60 | 'dwell': 4.0, 61 | 'coordinates': (100, 200), 62 | 'point_name': 'Point', 63 | 'date': '2016-03-21T15:03:27', 64 | 'scans': 1, 65 | }, 66 | { 67 | 'run': 'run', 68 | 'folder': os.path.join('Point2', 'RowNumber0', 69 | 'Depth_Profile0'), 70 | 'dwell': 0.2, 71 | 'coordinates': (-100, -200), 72 | 'point_name': 'Custom', 73 | 'date': '2016-03-21T15:03:27', 74 | 'scans': 2, 75 | }, 76 | ] 77 | expected_calibration = { 78 | 'RasterStyle': None, 79 | 'TimeResolution': 0.5, 80 | 'MassGain': 1.0, 81 | 'MassOffset': 0.0, 82 | 'MassRange': 200.0, 83 | 'XSize': 2.0, 84 | 'YSize': 2.0 85 | } 86 | fovs, calibration = runs.parse_xml(self.xml) 87 | 88 | self.assertEqual(fovs, expected_fovs) 89 | self.assertEqual(calibration, expected_calibration) 90 | 91 | 92 | if __name__ == '__main__': 93 | unittest.main() 94 | -------------------------------------------------------------------------------- /mibidata/tests/test_util.py: -------------------------------------------------------------------------------- 1 | """Tests for mibidata.util 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import unittest 6 | import numpy as np 7 | from numpy.testing import assert_array_equal 8 | 9 | from mibidata import util 10 | 11 | 12 | class TestUtil(unittest.TestCase): 13 | 14 | def test_natural_sort(self): 15 | names = ['Image0.msdf', 'Image0.h5', 'Image10.h5', 'Image2.h5'] 16 | util.natural_sort(names) 17 | expected_names = ['Image0.h5', 'Image0.msdf', 'Image2.h5', 'Image10.h5'] 18 | self.assertEqual(names, expected_names) 19 | 20 | 21 | def test_encode_list(self): 22 | 23 | strings = ['Five', 'birds', 'one', 'stone'] 24 | bytes_objects = ['Five'.encode('utf-8'), 'birds'.encode('utf-8'), 25 | 'one'.encode('utf-8'), 'stone'.encode('utf-8')] 26 | 27 | encoded = util.encode_strings(strings) 28 | self.assertListEqual(encoded, bytes_objects) 29 | 30 | 31 | def test_decode_list(self): 32 | 33 | strings = ['Five', 'birds', 'one', 'stone'] 34 | bytes_objects = ['Five'.encode('utf-8'), 'birds'.encode('utf-8'), 35 | 'one'.encode('utf-8'), 'stone'.encode('utf-8')] 36 | 37 | decoded = util.decode_strings(bytes_objects) 38 | self.assertListEqual(decoded, strings) 39 | 40 | 41 | def test_sort_channel_names(self): 42 | expected_list = ["30CD", "35 CD", "CD20", "CD45", "dsDNA", "FOXP3", 43 | "HLA DR", "Keratin", "PD-L1", "Vimentin", 44 | "\u03D0-tubulin", "23", "150", "151", "162", "165", 45 | "175"] 46 | 47 | unsorted_list = ["165", "dsDNA", "23", "Keratin", "CD45", "CD20", "162", 48 | "35 CD", "151", "\u03D0-tubulin", "FOXP3", "PD-L1", 49 | "175", "30CD", "Vimentin", "HLA DR", "150"] 50 | 51 | util.sort_channel_names(unsorted_list) 52 | 53 | self.assertListEqual(unsorted_list, expected_list) 54 | 55 | 56 | def test_format_for_filename(self): 57 | 58 | input_names = ['ChannelWith/Slash', 'ChannelWithoutSlash', 59 | 'ChannelWithDouble\\Slash', 'NF-\u03BAB'] 60 | expected_names = ['ChannelWith-Slash', 'ChannelWithoutSlash', 61 | 'ChannelWithDouble-Slash', 'NF-κB'] 62 | 63 | formatted_names = [util.format_for_filename(n) for n in input_names] 64 | 65 | self.assertListEqual(formatted_names, expected_names) 66 | 67 | 68 | def test_car2pol(self): 69 | 70 | x_c, y_c = 0, 0 71 | 72 | x = np.array([1, 0, -1, 0]) 73 | y = np.array([0, 1, 0, -1]) 74 | 75 | expected_r = np.array([1., 1., 1., 1.]) 76 | expected_phi = np.arange(4)*np.pi/2. 77 | expected_phi_deg = np.arange(4)*90. 78 | 79 | r, phi = util.car2pol(x, y, x_c, y_c) 80 | 81 | assert_array_equal(r, expected_r) 82 | assert_array_equal(phi, expected_phi) 83 | 84 | phi_deg = util.car2pol(x, y, x_c, y_c, degrees=True)[1] 85 | assert_array_equal(phi_deg, expected_phi_deg) 86 | 87 | 88 | if __name__ == '__main__': 89 | unittest.main() 90 | -------------------------------------------------------------------------------- /mibidata/panels.py: -------------------------------------------------------------------------------- 1 | """Utility for working with panels saved as CSV files. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import numpy as np 6 | import pandas as pd 7 | from pandas.errors import ParserError 8 | from mibidata import util 9 | 10 | 11 | def read_csv(path): 12 | """Reads a panel CSV file into a dataframe. 13 | 14 | Args: 15 | path: The path to the CSV file. 16 | 17 | Returns: 18 | A dataframe containing columns 'Mass' and 'Target'. 19 | """ 20 | try: 21 | # First try if the CSV is simply Mass,Target, 22 | df = pd.read_csv(path, encoding='utf-8') 23 | # CSV may parse successfully but not have proper columns 24 | if not {'Mass', 'Target'}.issubset(set(df.columns)): 25 | raise ParserError 26 | return merge_masses(df) 27 | except ParserError: 28 | # Determine lines that indicate a table header line 29 | header_lines = [] 30 | with open(path, 'rt', encoding='utf-8') as f: 31 | line_pos = 0 32 | for line in f: 33 | if 'Mass' in line and 'Target' in line: 34 | header_lines.append(line_pos) 35 | line_pos += 1 36 | 37 | last_line = line_pos 38 | 39 | # Determine start and end lines for each batch in file 40 | df = [] 41 | for i, start in enumerate(header_lines): 42 | try: 43 | # Two blank lines and 4 lines of info between 44 | # batches in multi-batch CSV 45 | end = header_lines[i + 1] - 5 46 | except IndexError: 47 | end = last_line 48 | 49 | batch = pd.read_csv(path, skiprows=start, 50 | skipfooter=(last_line - end), 51 | engine='python', 52 | encoding='utf-8')[['Mass', 'Target']] 53 | 54 | # Remove empty rows if they exist 55 | df.append(batch.dropna()) 56 | 57 | # Combine and convert 'Mass' column to int 58 | combined = pd.concat(df, ignore_index=True) 59 | combined['Mass'] = combined['Mass'].astype(np.int64) 60 | combined['Target'] = combined['Target'].astype(str) 61 | return merge_masses(combined) 62 | 63 | 64 | def merge_masses(df): 65 | """Merges 'Target' cells of a DataFrame with the same 'Mass' value. 66 | 67 | This function merges multiple targets that are conjugated to the same mass 68 | tag such that the returned DataFrame contains only unique masses. Target 69 | names are combined using the conventions of :func:'util.natural_sort()'. 70 | 71 | Args: 72 | df: A DataFrame of the panel containing columns 'Mass' and 73 | 'Target'. 74 | 75 | Returns: 76 | A DataFrame containing columns 'Mass' and 'Target' with merged targets 77 | of the same mass. 78 | """ 79 | conjugates = {} 80 | target_list = [] 81 | for conj in df.to_dict(orient='records'): 82 | mass = conj['Mass'] 83 | target = conj['Target'] 84 | if conjugates.get(mass): 85 | conjugates[mass].append(target) 86 | else: 87 | conjugates[mass] = [target] 88 | for mass in conjugates: 89 | target_list = conjugates[mass] 90 | util.natural_sort(target_list) 91 | conjugates[mass] = ', '.join(target_list) 92 | return pd.DataFrame(list(conjugates.items()), columns=['Mass', 'Target']) 93 | -------------------------------------------------------------------------------- /scripts/tiling/tiling.py: -------------------------------------------------------------------------------- 1 | """ Script for creating an FOV list json file that contains a grid of FOVs 2 | generated from a single FOV json. 3 | """ 4 | 5 | import os 6 | import sys 7 | import json 8 | import datetime 9 | import argparse 10 | import copy 11 | import numpy as np 12 | 13 | def tile(fov_list_json_file, xn, yn, overlap_x, overlap_y): 14 | ''' Using a template json file, creates another fov json that includes 15 | the tiled version of the original FOV. 16 | Args: 17 | fov_list_json_file: The FOV json containing the FOV to be tiled. The 18 | resulting tiled json is created in the same directory as this file. 19 | xn: The number of tiles in the x direction. 20 | yn: The number of tiles in the y direction. 21 | overlap_x: The degree of overlap between tiles in the x direction. 22 | Must be between -1 and 1. Negative values result in spacing between 23 | the FOVs. 24 | overlap_y: The degree of overlap between tiles in the y direction. 25 | ''' 26 | 27 | with open(fov_list_json_file, 'r') as f: 28 | fov_list_single = json.load(f) 29 | dt = datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S') 30 | fov_list = { 31 | 'exportDateTime': dt, 32 | 'fovFormatVersion': fov_list_single['fovFormatVersion'], 33 | 'fovs': [] 34 | } 35 | 36 | fov_size = fov_list_single['fovs'][0]['fovSizeMicrons'] 37 | x = fov_list_single['fovs'][0]['centerPointMicrons']['x'] 38 | y = fov_list_single['fovs'][0]['centerPointMicrons']['y'] 39 | overlap_x_microns = fov_size * overlap_x 40 | overlap_y_microns = fov_size * overlap_y 41 | for xi in np.arange(xn): 42 | for yi in np.arange(yn): 43 | cur_x = x + xi * (fov_size - overlap_x_microns) 44 | cur_y = y + yi * (fov_size - overlap_y_microns) 45 | fov = copy.deepcopy(fov_list_single['fovs'][0]) 46 | fov['centerPointMicrons']['x'] = cur_x 47 | fov['centerPointMicrons']['y'] = cur_y 48 | fov['name'] = f'row{yi}_col{xi}' 49 | fov_list['fovs'].append(fov) 50 | json_file_dest = os.path.join( 51 | os.path.dirname(fov_list_json_file), f'fov-list-{xn}x{yn}.json') 52 | with open(json_file_dest, 'w') as f: 53 | json.dump(fov_list, f, indent=4) 54 | 55 | 56 | def get_parser(): 57 | ''' Generates the command line argument parser. ''' 58 | parser = argparse.ArgumentParser( 59 | description='Generate an MIBIcontrol-importable FOV list json file ' 60 | 'that contains a grid of FOVs. Example usage: \n' 61 | 'python tiling.py fov-list.json 5 5 0.1') 62 | parser.add_argument( 63 | 'fov_list_json_file', type=str, 64 | help='The path to the fov-list.json file containing a single FOV that ' 65 | 'has been exported from MIBIcontrol. This script converts this ' 66 | 'json into a json file containing multiple fovs. The convention is' 67 | 'that the top left FOV in the grid is at the same location as the ' 68 | 'single FOV in the original fov-list.json file.') 69 | parser.add_argument('xn', type=int, help='Number of FOVs in x-dir.') 70 | parser.add_argument('yn', type=int, help='Number of FOVs in y-dir.') 71 | parser.add_argument( 72 | 'overlap_x', type=float, help='X Overlap between -1 to 1.') 73 | parser.add_argument( 74 | 'overlap_y', type=float, help='Y Overlap between -1 to 1.') 75 | return parser 76 | 77 | if __name__ == '__main__': 78 | args = get_parser().parse_args(sys.argv[1:]) 79 | tile(**vars(args)) 80 | -------------------------------------------------------------------------------- /mibidata/tests/test_color.py: -------------------------------------------------------------------------------- 1 | """Tests for mibidata.color 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import unittest 6 | 7 | import numpy as np 8 | import numpy.testing as npt 9 | 10 | from mibidata import color 11 | from mibidata.mibi_image import MibiImage 12 | 13 | 14 | RGB = np.array([[ 15 | [1., 1., 1.], 16 | [0.5, 0.5, 0.5], 17 | [0., 0., 0.], 18 | [1., 0., 0.], 19 | [0.75, 0.75, 0.], 20 | [0., 0.5, 0.], 21 | [0.5, 1., 1.], 22 | [0.5, 0.5, 1.], 23 | [0.75, 0.25, 0.75], 24 | [0., 1., 0.], 25 | [0., 0., 1.], 26 | [0.25, 0.25, 0.25] 27 | ]]).reshape(3, 4, 3) 28 | 29 | 30 | HSL = np.array([[ 31 | [0., 0., 1.], 32 | [0., 0., 0.5], 33 | [0., 0., 0.], 34 | [0., 1., 0.5], 35 | [np.pi / 3, 1., 0.375], 36 | [2 * np.pi / 3, 1., 0.25], 37 | [np.pi, 1., 0.75], 38 | [4 * np.pi / 3, 1., 0.75], 39 | [5 * np.pi / 3, 0.5, 0.5], 40 | [2 * np.pi / 3., 1., 0.5], 41 | [4 * np.pi / 3., 1., 0.5], 42 | [0., 0., 0.25], 43 | ]]).reshape(3, 4, 3) 44 | 45 | 46 | CYM = np.array([[ 47 | [1., 1., 1.], 48 | [0.5, 0.5, 0.5], 49 | [0., 0., 0.], 50 | [0., 1., 1.], 51 | [0., 0.75, 0.], 52 | [0.5, 0.5, 0.], 53 | [1., 0.5, 0.5], 54 | [1., 0.5, 1.], 55 | [0.25, 0.25, 0.75], 56 | [1., 1., 0.], 57 | [1., 0., 1.], 58 | [0.25, 0.25, 0.25] 59 | ]]).reshape(3, 4, 3) 60 | 61 | 62 | class TestColor(unittest.TestCase): 63 | 64 | def test_trim(self): 65 | array = np.random.rand(4, 2, 3) 66 | array[3, 1, 1] = -0.01 67 | array[1, 0, 1] = 1.01 68 | expected = array.copy() 69 | expected[3, 1, 1] = 0. 70 | expected[1, 0, 1] = 1. 71 | npt.assert_array_equal(color._trim(array), expected) 72 | 73 | def test_rgb2hsl_rainbow(self): 74 | npt.assert_array_almost_equal(color.rgb2hsl(RGB), HSL) 75 | 76 | def test_rgb2hsl_out_of_range(self): 77 | with self.assertRaises(ValueError): 78 | color.rgb2hsl(RGB + 0.1) 79 | with self.assertRaises(ValueError): 80 | color.rgb2hsl(RGB - 0.1) 81 | 82 | def test_hsl2rgb_rainbow(self): 83 | npt.assert_array_almost_equal(color.hsl2rgb(HSL), RGB) 84 | 85 | def test_hsl2rgb_out_of_range(self): 86 | hsl = HSL.copy() 87 | hsl[:, :, 0] += np.pi / 2 88 | with self.assertRaises(ValueError): 89 | color.hsl2rgb(hsl) 90 | hsl[:, :, 0] -= np.pi / 2 91 | hsl[:, :, 1:] += 0.1 92 | with self.assertRaises(ValueError): 93 | color.hsl2rgb(hsl) 94 | with self.assertRaises(ValueError): 95 | color.hsl2rgb(HSL - 0.1) 96 | 97 | def test_invert_luminosity(self): 98 | inverted = color.invert_luminosity(RGB) 99 | hsl_inverted = color.rgb2hsl(inverted) 100 | npt.assert_array_almost_equal(hsl_inverted[:, :, :2], HSL[:, :, :2]) 101 | npt.assert_array_almost_equal(hsl_inverted[:, :, 2], 1 - HSL[:, :, 2]) 102 | 103 | def test_rgb2cym(self): 104 | npt.assert_array_almost_equal(color.rgb2cym(RGB), CYM) 105 | npt.assert_array_almost_equal(color.rgb2cym(CYM), RGB) 106 | 107 | def test_composite_to_red_and_cyan(self): 108 | red = np.random.randint(0, 100, (10, 10), np.uint16) 109 | cyan = np.arange(100).reshape((10, 10)).astype(np.uint16) 110 | im = MibiImage(np.stack((red, cyan), axis=2), ['red', 'cyan']) 111 | 112 | expected_red = ((red / np.max(red)) * 255).astype(int) 113 | expected_green_blue = (cyan / np.max(cyan) * 255).astype(int) 114 | 115 | screen_red_only = color.composite(im, {'Red': 'red'}, gamma=1) 116 | # Could be off by one after round tripping through this all this 117 | max_diff_red = np.max(np.abs( 118 | (screen_red_only[:, :, 0]).astype(int) - expected_red)) 119 | self.assertTrue(max_diff_red <= 1) 120 | 121 | screen_both = color.composite( 122 | im, {'Red': 'red', 'Cyan': 'cyan'}, gamma=1) 123 | max_diff_red = np.max(np.abs( 124 | (screen_both[:, :, 0]).astype(int) - expected_red)) 125 | self.assertTrue(max_diff_red <= 1) 126 | max_diff_green = np.max(np.abs( 127 | (screen_both[:, :, 1]).astype(int) - expected_green_blue)) 128 | self.assertTrue(max_diff_green <= 1) 129 | max_diff_blue = np.max(np.abs( 130 | (screen_both[:, :, 2]).astype(int) - expected_green_blue)) 131 | self.assertTrue(max_diff_blue <= 1) 132 | 133 | 134 | if __name__ == '__main__': 135 | unittest.main() 136 | -------------------------------------------------------------------------------- /.circleci/config.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | 3 | jobs: 4 | build: 5 | docker: 6 | - image: cimg/python:3.11 7 | working_directory: ~/mibilib 8 | steps: 9 | - checkout: 10 | path: ~/mibilib 11 | - restore_cache: 12 | key: cache2-{{ checksum "environment.yml" }} 13 | - run: 14 | name: Install Python deps in a conda env if not cached 15 | command: | 16 | if [[ ! -d ~/miniconda/envs/mibilib ]]; then 17 | echo "Installing Miniconda ..."; 18 | wget https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O ~/miniconda.sh; 19 | bash ~/miniconda.sh -b -f -p ~/miniconda; 20 | ~/miniconda/bin/conda env create -f environment.yml; 21 | else 22 | echo "Using cached Miniconda install"; 23 | fi 24 | - save_cache: 25 | key: cache2-{{ checksum "environment.yml" }} 26 | paths: 27 | - ~/miniconda/envs/mibilib 28 | - run: 29 | name: Run tests 30 | command: | 31 | sudo apt-get update 32 | sudo apt install default-jre 33 | ~/miniconda/envs/mibilib/bin/coverage run -m py.test mibitracker mibidata scripts 34 | ~/miniconda/envs/mibilib/bin/coverage html -i --omit=*/tests/* 35 | - run: 36 | name: Run pylint on files changed from master 37 | command: | 38 | for i in $(git diff origin/master --name-only | grep \\.py$ | grep -v tests); do 39 | if [[ -f $i ]]; then 40 | echo "linting $i"; 41 | ~/miniconda/envs/mibilib/bin/pylint $i; 42 | else 43 | echo "skipping $i because it was deleted"; 44 | fi 45 | done 46 | for i in $(git diff origin/master --name-only | grep tests/.*\\.py$); do 47 | if [[ -f $i ]]; then 48 | echo "linting $i"; 49 | ~/miniconda/envs/mibilib/bin/pylint \ 50 | --disable=protected-access,no-name-in-module,import-error,no-member,unused-argument \ 51 | $i; 52 | else 53 | echo "skipping $i because it was deleted"; 54 | fi 55 | done 56 | - store_artifacts: 57 | path: htmlcov 58 | - run: 59 | name: Test setup.py with tox if environment has changed. 60 | command: | 61 | if [ $(git diff origin/master --name-only -- environment.yml) ]; then 62 | echo "Running tox to test setup.py" 63 | pip install tox 64 | #tox 65 | else 66 | echo "Environment did not change; skipping tox." 67 | fi 68 | - deploy: 69 | name: Deploy docs 70 | command: | 71 | if [ $CIRCLE_BRANCH == "master" ]; then 72 | git config --global user.email $(git --no-pager show -s --format='%ae' HEAD) 73 | git config --global user.name $(git --no-pager show -s --format='%an' HEAD) 74 | git clone https://github.com/ionpath/mibilib out 75 | cd out 76 | git checkout gh-pages || git checkout --orphan gh-pages 77 | git rm -rf . 78 | cd .. 79 | ~/miniconda/envs/mibilib/bin/sphinx-apidoc -f -o docs/source ./ */tests ./setup.py 80 | ~/miniconda/envs/mibilib/bin/sphinx-build -b html docs/source out 81 | mkdir out/.circleci && cp -a .circleci/. out/.circleci/. 82 | cd out 83 | touch .nojekyll 84 | git add -A 85 | git commit -m "Automated deployment to GitHub Pages: ${CIRCLE_SHA1}" --allow-empty 86 | git push origin gh-pages 87 | fi 88 | - persist_to_workspace: 89 | root: ~/mibilib 90 | paths: 91 | - docs 92 | tag_test: 93 | working_directory: ~/mibilib 94 | docker: 95 | - image: cimg/python:3.11.0 # container for the build job 96 | 97 | steps: 98 | - checkout: 99 | path: ~/mibilib 100 | - run: 101 | name: Check if tag and setup.py are consistent 102 | command: | 103 | python check_tag.py 104 | workflows: 105 | version: 2 106 | untagged-build: 107 | jobs: 108 | - build: 109 | filters: 110 | branches: 111 | ignore: 112 | - gh-pages 113 | tagged-build: 114 | jobs: 115 | - tag_test: 116 | filters: 117 | # ignore any commit on any branch by default 118 | branches: 119 | ignore: /.*/ 120 | # only act on version tags 121 | tags: 122 | only: /^v.*/ 123 | - build: 124 | requires: 125 | - tag_test 126 | -------------------------------------------------------------------------------- /mibidata/util.py: -------------------------------------------------------------------------------- 1 | """ Common convenience functions used in mibidata module. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import re 6 | import functools 7 | import numpy as np 8 | 9 | DELIMITERS = {'/', '\\'} 10 | 11 | 12 | def encode_strings(string_list, encoding='utf-8'): 13 | """Encodes each string in a list into a bytes object and returns a list 14 | of bytes objects. Useful function when writing and reading string lists 15 | which are not supported in pytables. 16 | 17 | Args: 18 | string_list: A list of strings to encode. 19 | encoding: Encoding to use, defaults to UTF-8. 20 | 21 | Returns: 22 | A list of bytes objects that have been encoded with the given encoding. 23 | """ 24 | return [s.encode(encoding) for s in string_list] 25 | 26 | 27 | def decode_strings(bytes_objects_list, encoding='utf-8'): 28 | """Decodes each bytes object in a list into a string and returns a list 29 | of strings. Useful function when writing and reading string lists which are 30 | not supported in pytables. 31 | 32 | Args: 33 | bytes_objects_list: A list of strings to encode. 34 | encoding: Encoding to use, defaults to UTF-8. 35 | 36 | Returns: 37 | A list of strings that have been decoded with the given encoding. 38 | """ 39 | return [b.decode(encoding) for b in bytes_objects_list] 40 | 41 | 42 | def natural_sort(l): 43 | """Sorts a list of strings in the way that humans expect, in place. 44 | 45 | For example, this would order *'Depth2'* before *'Depth11'*, whereas 46 | ``sort`` would place those in the opposite order. 47 | 48 | Args: 49 | l: A list of strings. 50 | """ 51 | def _strtoint(s): 52 | try: 53 | return int(s) 54 | except ValueError: 55 | return s 56 | 57 | def _alphanum_key(s): 58 | """Converts a string into a list of string and number.""" 59 | return [_strtoint(c) for c in re.split('([0-9]+)', s)] 60 | l.sort(key=_alphanum_key) 61 | 62 | 63 | def sort_channel_names(l): 64 | """Sorts a list of string in place such that number only string are after 65 | all text strings and text strings with numbers are sorted alphabetically. 66 | 67 | ex: ['beta-tubulin', 'CD20', 'CD4', 'CD45', 'CD8', 'dsDNA', 'Keratin', 68 | '23', '97', '144', '150'] 69 | 70 | Args: 71 | l: A list of strings. 72 | """ 73 | 74 | def compare_items(x, y): 75 | """The return value is negative if x < y, 76 | zero if x == y and strictly positive if x > y 77 | """ 78 | try: 79 | fx = float(x) 80 | try: 81 | fy = float(y) 82 | # x and y are both numbers 83 | if fx < fy: 84 | return -1 85 | if fx > fy: 86 | return 1 87 | return 0 88 | except ValueError: 89 | # x is a number, y is a non-number 90 | return 1 91 | except ValueError: 92 | try: 93 | fy = float(y) 94 | # y is a number, x is a non-number 95 | return -1 96 | except ValueError: 97 | # x and y are non-numbers, compare lower case 98 | x_lower = x.lower() 99 | y_lower = y.lower() 100 | if x_lower < y_lower: 101 | return -1 102 | if x_lower > y_lower: 103 | return 1 104 | return 0 105 | 106 | l.sort(key=functools.cmp_to_key(compare_items)) 107 | 108 | 109 | def format_for_filename(label): 110 | """Replaces delimiters and utf-8 encodes targets for use as filenames.""" 111 | for char in set(label).intersection(DELIMITERS): 112 | label = label.replace(char, '-') 113 | return label 114 | 115 | 116 | def car2pol(x, y, x_c=0, y_c=0, degrees=False): 117 | """Convert cartesian to polar coordinates w.r.t. a central point. 118 | Angle phi is returned in the range [0, 2 pi) rad. A flag can be activated 119 | to return phi angles in degrees. 120 | 121 | Args: 122 | x, y: arrays of coordinates to transform. 123 | x_c, y_c: numbers representing the coordinates of the center 124 | (origin of polar coordinates). Optional; default is (0, 0). 125 | degrees: flag to return phi angles in degrees instead of radians. 126 | Optional; default is False (i.e. radians). 127 | 128 | Returns: 129 | r, phi: arrays of transformed coordinates. 130 | """ 131 | r = np.sqrt((x - x_c)**2 + (y - y_c)**2) 132 | phi = np.arctan2(y - y_c, x - x_c) 133 | # convert: (-pi, pi] --> [0, 2 pi) 134 | phi[phi[:] < 0] += 2.*np.pi 135 | if degrees: 136 | phi *= 180./np.pi 137 | 138 | return r, phi 139 | -------------------------------------------------------------------------------- /mibidata/constants.py: -------------------------------------------------------------------------------- 1 | """Constants used in mibidata module. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | COLORS = { 6 | 'Cyan': (0.0, 1.0, 1.0), 7 | 'Yellow': (1.0, 1.0, 0.0), 8 | 'Magenta': (1.0, 0.0, 1.0), 9 | 'Green': (0.0, 1.0, 0.0), 10 | 'Orange': (1.0, 0.6470588235294118, 0.0), 11 | 'Violet': (0.6980392156862745, 0.5058823529411764, 0.9333333333333333), 12 | 'Red': (1.0, 0.0, 0.0), 13 | 'Blue': (0.0, 0.0, 1.0), 14 | 'Gray': (1.0, 1.0, 1.0), 15 | } 16 | 17 | OVERLAY_MIN_BRIGHTNESS = -0.9 18 | 19 | # The kernels below are those used by WegGL convolution in MIBItracker. 20 | # pylint: disable=line-too-long 21 | OVERLAY_SMOOTHING_KERNELS = [ 22 | # A blur of 0 still gets a convolution to mimic browser rendering. 23 | [ 24 | [0.05, 0.1, 0.05], 25 | [0.1, 0.4, 0.1], 26 | [0.05, 0.1, 0.05] 27 | ], 28 | [ 29 | [0.001631, 0.03713, 0.001631], 30 | [0.03713, 0.845, 0.03713], 31 | [0.001631, 0.03713, 0.001631], 32 | ], 33 | [ 34 | [0.01134, 0.08382, 0.01134], 35 | [0.08382, 0.6193, 0.08382], 36 | [0.01134, 0.08382, 0.01134], 37 | ], 38 | [ 39 | [0.02768, 0.111, 0.02768], 40 | [0.111, 0.4452, 0.111], 41 | [0.02768, 0.111, 0.02768], 42 | ], 43 | [ 44 | [9.255e-05, 0.001976, 0.005483, 0.001976, 9.255e-05], 45 | [0.001976, 0.0422, 0.1171, 0.0422, 0.001976], 46 | [0.005483, 0.1171, 0.3248, 0.1171, 0.005483], 47 | [0.001976, 0.0422, 0.1171, 0.0422, 0.001976], 48 | [9.255e-05, 0.001976, 0.005483, 0.001976, 9.255e-05], 49 | ], 50 | [ 51 | [0.0008714, 0.006948, 0.01388, 0.006948, 0.0008714], 52 | [0.006948, 0.0554, 0.1107, 0.0554, 0.006948], 53 | [0.01388, 0.1107, 0.2211, 0.1107, 0.01388], 54 | [0.006948, 0.0554, 0.1107, 0.0554, 0.006948], 55 | [0.0008714, 0.006948, 0.01388, 0.006948, 0.0008714], 56 | ], 57 | [ 58 | [0.002969, 0.01331, 0.02194, 0.01331, 0.002969], 59 | [0.01331, 0.05963, 0.09832, 0.05963, 0.01331], 60 | [0.02194, 0.09832, 0.1621, 0.09832, 0.02194], 61 | [0.01331, 0.05963, 0.09832, 0.05963, 0.01331], 62 | [0.002969, 0.01331, 0.02194, 0.01331, 0.002969], 63 | ], 64 | [ 65 | [0.0002145, 0.001217, 0.00345, 0.004882, 0.00345, 0.001217, 0.0002145], 66 | [0.001217, 0.006909, 0.01958, 0.02771, 0.01958, 0.006909, 0.001217], 67 | [0.00345, 0.01958, 0.05549, 0.07852, 0.05549, 0.01958, 0.00345], 68 | [0.004882, 0.02771, 0.07852, 0.1111, 0.07852, 0.02771, 0.004882], 69 | [0.00345, 0.01958, 0.05549, 0.07852, 0.05549, 0.01958, 0.00345], 70 | [0.001217, 0.006909, 0.01958, 0.02771, 0.01958, 0.006909, 0.001217], 71 | [0.0002145, 0.001217, 0.00345, 0.004882, 0.00345, 0.001217, 0.0002145], 72 | ], 73 | [ 74 | [0.001342, 0.004077, 0.00794, 0.009916, 0.00794, 0.004077, 0.001342], 75 | [0.004077, 0.01238, 0.02412, 0.03012, 0.02412, 0.01238, 0.004077], 76 | [0.00794, 0.02412, 0.04698, 0.05867, 0.04698, 0.02412, 0.00794], 77 | [0.009916, 0.03012, 0.05867, 0.07327, 0.05867, 0.03012, 0.009916], 78 | [0.00794, 0.02412, 0.04698, 0.05867, 0.04698, 0.02412, 0.00794], 79 | [0.004077, 0.01238, 0.02412, 0.03012, 0.02412, 0.01238, 0.004077], 80 | [0.001342, 0.004077, 0.00794, 0.009916, 0.00794, 0.004077, 0.001342], 81 | ], 82 | [ 83 | [0.0007634, 0.001831, 0.003422, 0.004978, 0.005641, 0.004978, 0.003422, 0.001831, 0.0007634], 84 | [0.001831, 0.004393, 0.008208, 0.01194, 0.01353, 0.01194, 0.008208, 0.004393, 0.001831], 85 | [0.003422, 0.008208, 0.01533, 0.02231, 0.02528, 0.02231, 0.01533, 0.008208, 0.003422], 86 | [0.004978, 0.01194, 0.02231, 0.03246, 0.03678, 0.03246, 0.02231, 0.01194, 0.004978], 87 | [0.005641, 0.01353, 0.02528, 0.03678, 0.04168, 0.03678, 0.02528, 0.01353, 0.005641], 88 | [0.004978, 0.01194, 0.02231, 0.03246, 0.03678, 0.03246, 0.02231, 0.01194, 0.004978], 89 | [0.003422, 0.008208, 0.01533, 0.02231, 0.02528, 0.02231, 0.01533, 0.008208, 0.003422], 90 | [0.001831, 0.004393, 0.008208, 0.01194, 0.01353, 0.01194, 0.008208, 0.004393, 0.001831], 91 | [0.0007634, 0.001831, 0.003422, 0.004978, 0.005641, 0.004978, 0.003422, 0.001831, 0.0007634], 92 | ], 93 | [ 94 | [0.002276, 0.003984, 0.005944, 0.007556, 0.008186, 0.007556, 0.005944, 0.003984, 0.002276], 95 | [0.003984, 0.006975, 0.01041, 0.01323, 0.01433, 0.01323, 0.01041, 0.006975, 0.003984], 96 | [0.005944, 0.01041, 0.01552, 0.01973, 0.02138, 0.01973, 0.01552, 0.01041, 0.005944], 97 | [0.007556, 0.01323, 0.01973, 0.02509, 0.02718, 0.02509, 0.01973, 0.01323, 0.007556], 98 | [0.008186, 0.01433, 0.02138, 0.02718, 0.02944, 0.02718, 0.02138, 0.01433, 0.008186], 99 | [0.007556, 0.01323, 0.01973, 0.02509, 0.02718, 0.02509, 0.01973, 0.01323, 0.007556], 100 | [0.005944, 0.01041, 0.01552, 0.01973, 0.02138, 0.01973, 0.01552, 0.01041, 0.005944], 101 | [0.003984, 0.006975, 0.01041, 0.01323, 0.01433, 0.01323, 0.01041, 0.006975, 0.003984], 102 | [0.002276, 0.003984, 0.005944, 0.007556, 0.008186, 0.007556, 0.005944, 0.003984, 0.002276], 103 | ], 104 | ] 105 | -------------------------------------------------------------------------------- /mibidata/tests/test_pseudodepths.py: -------------------------------------------------------------------------------- 1 | """Tests for pseudodepths.py 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import os 6 | import struct 7 | import shutil 8 | import tempfile 9 | import unittest 10 | 11 | import numpy as np 12 | 13 | from mibidata import pseudodepths 14 | 15 | # File variant 1, 8 bins per spectrum, 16 = 4x4 pixels. 16 | NUM_PIXELS = 4 17 | HEADER = (1, 8, NUM_PIXELS) 18 | # Assume 10 cycles per pixel, so each data sub-list has 10 zeros and the rest 19 | # of the counts are bin numbers from 1 to 8. The (0, 0) entry to indicate 20 | # new cycle comes at the _end_ of the data for each cycle. 21 | # 22 | # This following data was generated with: 23 | # 24 | # pixel_lengths = np.random.randint(9, 20, 16) 25 | # DATA = [] 26 | # for pl in pixel_lengths: 27 | # entries = np.zeros(pl, int) 28 | # num_counts = pl - 9 29 | # inds = np.random.choice(np.arange(pl), num_counts, replace=False) 30 | # bins = np.random.randint(1, 8, num_counts) 31 | # entries[inds] = bins 32 | # DATA.append(list(entries) + [0]) 33 | DATA = [ 34 | [0, 3, 0, 1, 1, 0, 3, 0, 0, 0, 3, 0, 3, 7, 3, 4, 0, 0, 5, 0], 35 | [0, 5, 4, 2, 0, 0, 0, 0, 0, 0, 3, 0, 2, 2, 0, 2, 0], 36 | [0, 0, 0, 6, 7, 0, 0, 0, 0, 0, 0, 2, 2, 0], 37 | [0, 1, 1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0], 38 | ] 39 | # How many counts to expect if we manually split into two depths. 40 | DEPTH0 = [ 41 | [0, 3, 0, 1, 1, 0, 3, 0, 0], 42 | [0, 5, 4, 2, 0, 0, 0, 0], 43 | [0, 0, 0, 6, 7, 0, 0], 44 | [0, 1, 1, 0, 0, 3, 0, 0], 45 | ] 46 | DEPTH1 = [ 47 | [0, 3, 0, 3, 7, 3, 4, 0, 0, 5, 0], 48 | [0, 0, 3, 0, 2, 2, 0, 2, 0], 49 | [0, 0, 0, 0, 2, 2, 0], 50 | [0, 0, 0, 0, 0], 51 | ] 52 | 53 | 54 | class TestMsdf(unittest.TestCase): 55 | 56 | @classmethod 57 | def setUpClass(cls): 58 | fh, fn = tempfile.mkstemp() 59 | cls.msdf = fn 60 | os.close(fh) 61 | sat = np.zeros((NUM_PIXELS, 2), int) 62 | cls.header = struct.pack(pseudodepths.HEADER_FORMAT, 1, 8, NUM_PIXELS) 63 | with open(fn, 'wb') as infile: 64 | infile.write(cls.header) 65 | end_sat = pseudodepths.HEADER_SIZE + \ 66 | NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE 67 | infile.seek(end_sat) 68 | for i, pixel in enumerate(DATA): 69 | sat[i, 0] = infile.tell() 70 | sat[i, 1] = len(pixel) 71 | for timestamp in pixel: 72 | count = int(timestamp > 0) 73 | infile.write( 74 | struct.pack(pseudodepths.DATA_FORMAT, timestamp, count)) 75 | infile.seek(pseudodepths.HEADER_SIZE) 76 | for offset, length in sat: 77 | infile.write( 78 | struct.pack(pseudodepths.SAT_ENTRY_FORMAT, offset, length)) 79 | cls.data_start = end_sat 80 | 81 | def setUp(self): 82 | self.tempdir = tempfile.mkdtemp() 83 | 84 | @classmethod 85 | def tearDownClass(cls): 86 | os.remove(cls.msdf) 87 | 88 | def tearDown(self): 89 | shutil.rmtree(self.tempdir) 90 | 91 | def _pack_sat(self, depth): 92 | sat = b'' 93 | offset = self.data_start 94 | for d in depth: 95 | sat += struct.pack( 96 | pseudodepths.SAT_ENTRY_FORMAT, offset, len(d)) 97 | offset += pseudodepths.DATA_SIZE * len(d) 98 | return sat 99 | 100 | def _pack_data(self, depth): 101 | data = b'' 102 | for pixel in depth: 103 | for i in pixel: 104 | data += struct.pack( 105 | pseudodepths.DATA_FORMAT, i, int(i > 0)) 106 | return data 107 | 108 | def test_split_into_one(self): 109 | """If we split into one output file, we should get the same file out. 110 | """ 111 | cycles_per_pixel, cycles_per_scan = pseudodepths.divide( 112 | self.msdf, 1, self.tempdir) 113 | self.assertEqual(cycles_per_pixel, 10) 114 | self.assertEqual(cycles_per_scan, 10) 115 | new_file = os.path.join(self.tempdir, 'Depth0', 'Image.msdf') 116 | self.assertTrue(os.path.exists(new_file)) 117 | with open(self.msdf, 'rb') as infile: 118 | expected_buffer = infile.read() 119 | with open(new_file, 'rb') as infile: 120 | new_buffer = infile.read() 121 | self.assertEqual(new_buffer, expected_buffer) 122 | 123 | def test_split_into_two(self): 124 | cycles_per_pixel, cycles_per_scan = pseudodepths.divide( 125 | self.msdf, 2, self.tempdir) 126 | self.assertEqual(cycles_per_pixel, 10) 127 | self.assertEqual(cycles_per_scan, 5) 128 | depth0 = os.path.join(self.tempdir, 'Depth0', 'Image.msdf') 129 | depth1 = os.path.join(self.tempdir, 'Depth1', 'Image.msdf') 130 | with open(depth0, 'rb') as infile: 131 | depth0_header = infile.read(pseudodepths.HEADER_SIZE) 132 | depth0_sat = infile.read(NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE) 133 | depth0_data = infile.read() 134 | with open(depth1, 'rb') as infile: 135 | depth1_header = infile.read(pseudodepths.HEADER_SIZE) 136 | depth1_sat = infile.read(NUM_PIXELS * pseudodepths.SAT_ENTRY_SIZE) 137 | depth1_data = infile.read() 138 | 139 | self.assertEqual(depth0_header, self.header) 140 | self.assertEqual(depth1_header, self.header) 141 | self.assertEqual(depth0_sat, self._pack_sat(DEPTH0)) 142 | self.assertEqual(depth1_sat, self._pack_sat(DEPTH1)) 143 | self.assertEqual(depth0_data, self._pack_data(DEPTH0)) 144 | self.assertEqual(depth1_data, self._pack_data(DEPTH1)) 145 | 146 | def test_split_into_three(self): 147 | """This should raise because the number of cycles is not divisible by 148 | the number of desired pseudo-depths.""" 149 | with self.assertRaises(ValueError): 150 | pseudodepths.divide(self.msdf, 3, self.tempdir) 151 | 152 | if __name__ == '__main__': 153 | unittest.main() 154 | -------------------------------------------------------------------------------- /mibidata/tests/test_panels.py: -------------------------------------------------------------------------------- 1 | """Tests for mibitof.panel""" 2 | 3 | import os 4 | import shutil 5 | import tempfile 6 | import unittest 7 | 8 | import pandas as pd 9 | 10 | from mibidata import panels 11 | 12 | 13 | class TestPanel(unittest.TestCase): 14 | 15 | @classmethod 16 | def setUp(cls): 17 | cls.folder = tempfile.mkdtemp() 18 | cls.filename = os.path.join(cls.folder, 'test.csv') 19 | 20 | cls.simple_csv = 'Mass,Target\n10,Target1\n20,Target2\n30,Target3\n' + \ 21 | '20,Target4' 22 | 23 | cls.tracker_csv = 'Panel ID,0\nPanel Name,The Panel,\nProject ID,0\n' +\ 24 | 'Project Name,The Project\nManufacture Data,2018-04-25\n,' +\ 25 | 'Description,It has a panel\n\n' +\ 26 | 'Batch,0\nTotal Volume (uL),100\nAntibody Volume (uL),5\n' +\ 27 | 'Buffer Volume (uL), 105\n\n' +\ 28 | 'ID (Lot),Target,Clone,Mass,Element\n' +\ 29 | '001,Target1,A,10,B\n' +\ 30 | '003,Target2,B,20,Ne\n' +\ 31 | '002,Target3,C,30,P\n' + \ 32 | '004,Target4,D,20,Ne' 33 | 34 | cls.tracker_multi_csv = 'Panel ID,0\nPanel Name,The Panel\n' +\ 35 | 'Project ID,0\n' +\ 36 | 'Project Name,The Project\nManufacture Data,2018-04-25\n' +\ 37 | 'Description,It has a panel\n\n' +\ 38 | 'Batch,0\nTotal Volume (uL),100\nAntibody Volume (uL),5\n' +\ 39 | 'Buffer Volume (uL),105\n\n' +\ 40 | 'ID (Lot),Target,Clone,Mass,Element\n' +\ 41 | '001,Target1,A,10,B\n\n' +\ 42 | 'Batch,1\nTotal Volume (uL),200\nAntibody Volume (uL),20\n' +\ 43 | 'Buffer Volume (uL),220\n\n' +\ 44 | 'ID (Lot),Target,Clone,Mass,Element\n' +\ 45 | '003,Target2,B,20,Ne\n' +\ 46 | '002,Target3,C,30,P\n' + \ 47 | '004,Target4,D,20,Ne' 48 | 49 | cls.tracker_multi_csv_with_empty_cells =\ 50 | 'Panel ID,0,,,\n' +\ 51 | 'Panel Name,The Panel,,,\n' +\ 52 | 'Project ID,0,,,\n' +\ 53 | 'Project Name,The Project,,,\n' +\ 54 | 'Manufacture Data,4/25/2018,,,\n' +\ 55 | 'Description,It has a panel,,,\n' +\ 56 | ',,,,\n' +\ 57 | 'Batch,0,,,\n' +\ 58 | 'Total Volume (uL),100,,,\n' +\ 59 | 'Antibody Volume (uL),5,,,\n' +\ 60 | 'Buffer Volume (uL),105,,,\n' +\ 61 | ',,,,\n' +\ 62 | 'ID (Lot),Target,Clone,Mass,Element\n' +\ 63 | '001,Target1,A,10,B\n' +\ 64 | ',,,,\n' +\ 65 | 'Batch,1,,,\n' +\ 66 | 'Total Volume (uL),200,,,\n' +\ 67 | 'Antibody Volume (uL),20,,,\n' +\ 68 | 'Buffer Volume (uL),220,,,\n' +\ 69 | ',,,\n' +\ 70 | 'ID (Lot),Target,Clone,Mass,Element\n' +\ 71 | '003,Target2,B,20,Ne\n' +\ 72 | '002,Target3,C,30,P\n' +\ 73 | '004,Target4,D,20,Ne' 74 | 75 | cls.expected_df = pd.DataFrame( 76 | {'Mass': [10, 20, 30], 77 | 'Target': ['Target1', 'Target2, Target4', 'Target3']}, 78 | columns=['Mass', 'Target']) 79 | 80 | cls.expected_merge_df = pd.DataFrame( 81 | {'Mass': [10, 20, 30], 82 | 'Target': ['Target1, Target4, Target6', 'Target2', 'Target3,' + \ 83 | ' Target5, Target7']}, 84 | columns=['Mass', 'Target']) 85 | 86 | @classmethod 87 | def tearDown(cls): 88 | shutil.rmtree(cls.folder) 89 | 90 | @classmethod 91 | def write_csv_string(cls, csv_string): 92 | """Writes the specified csv_string to the test file defined in setUp() 93 | 94 | Args: 95 | csv_string: CSV formatted string to write to a temp file. 96 | """ 97 | 98 | with open(cls.filename, 'wt') as f: 99 | f.write(csv_string) 100 | 101 | def test_read_simple_panel(self): 102 | self.write_csv_string(self.simple_csv) 103 | 104 | loaded = panels.read_csv(self.filename) 105 | 106 | pd.testing.assert_frame_equal(loaded, self.expected_df) 107 | 108 | def test_read_tracker_panel(self): 109 | self.write_csv_string(self.tracker_csv) 110 | 111 | loaded = panels.read_csv(self.filename) 112 | 113 | pd.testing.assert_frame_equal(loaded, self.expected_df) 114 | 115 | def test_read_tracker_panel_with_two_batches(self): 116 | self.write_csv_string(self.tracker_multi_csv) 117 | 118 | loaded = panels.read_csv(self.filename) 119 | 120 | pd.testing.assert_frame_equal(loaded, self.expected_df) 121 | 122 | def test_read_tracker_panel_with_empty_cells(self): 123 | self.write_csv_string(self.tracker_multi_csv_with_empty_cells) 124 | 125 | loaded = panels.read_csv(self.filename) 126 | 127 | pd.testing.assert_frame_equal(loaded, self.expected_df) 128 | 129 | def test_merge_panels_with_unique_masses(self): 130 | df_input = pd.DataFrame( 131 | {'Mass': [10, 20, 30, 40], 132 | 'Target': ['Target1', 'Target2', 'Target3', 'Target4']}, 133 | columns=['Mass', 'Target']) 134 | unique_merge_df = panels.merge_masses(df_input) 135 | 136 | expected_df = df_input 137 | pd.testing.assert_frame_equal(unique_merge_df, expected_df) 138 | 139 | def test_merge_panels_with_repeated_masses(self): 140 | df_input = pd.DataFrame( 141 | {'Mass': [10, 20, 30, 10, 30, 10, 30], 142 | 'Target': ['Target1', 'Target2', 'Target3', 'Target4', 143 | 'Target5', 'Target6', 'Target7']}, 144 | columns=['Mass', 'Target']) 145 | forward_merge_df = panels.merge_masses(df_input) 146 | 147 | pd.testing.assert_frame_equal(forward_merge_df, self.expected_merge_df) 148 | 149 | def test_merge_panels_with_repeated_masses_scrambled(self): 150 | df_input = pd.DataFrame( 151 | {'Mass': [10, 20, 30, 10, 10, 30, 30], 152 | 'Target': ['Target6', 'Target2', 'Target5', 'Target1', 153 | 'Target4', 'Target7', 'Target3']}, 154 | columns=['Mass', 'Target']) 155 | scramble_merge_df = panels.merge_masses(df_input) 156 | 157 | pd.testing.assert_frame_equal(scramble_merge_df, self.expected_merge_df) 158 | 159 | 160 | if __name__ == '__main__': 161 | unittest.main() 162 | -------------------------------------------------------------------------------- /mibidata/pseudodepths.py: -------------------------------------------------------------------------------- 1 | """Splits single-depth MIBI scans into pseudo Depth Profiles. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | # pylint: disable=too-many-branches 6 | 7 | import argparse 8 | import glob 9 | import os 10 | import shutil 11 | import struct 12 | 13 | import numpy as np 14 | import tqdm 15 | 16 | HEADER_SIZE = 12 17 | HEADER_FORMAT = 'iii' 18 | SAT_ENTRY_SIZE = 10 19 | SAT_ENTRY_FORMAT = 'qH' 20 | DATA_SIZE = 4 21 | DATA_FORMAT = 'HH' 22 | 23 | 24 | def divide(msdf_file, num_scans, path=None): 25 | """Creates a pseudo depth profile from a single Image.msdf file. 26 | 27 | Args: 28 | msdf_file: The string path to a local msdf file. 29 | num_scans: The integer number of pseudo-depths into which to divide 30 | this msdf file. This must be a divisor of the number of ToF 31 | cycles per pixel. 32 | path: The string path to the folder into which to write the output 33 | msdf files, which will follow the convention Depth0/Image.msdf, 34 | Depth1/Image.msdf, etc. If Depth/Image.msdf files already 35 | exist in this location, they will be overwritten. If no path is 36 | specified, this will default to creating a folder named 37 | PseudoDepths in the same directory as the input msdf file. 38 | 39 | Returns: 40 | - cycles_per_pixel: The integer number of ToF cycles per pixel in the 41 | original data. 42 | - cycles_per_scan: The integer number of ToF cycles per each output 43 | pseudo-depth. 44 | 45 | Raises: 46 | ValueError: Raised if the msdf file is not >=6.3.4.0 with ToF cycle 47 | number encoded, or if the number of scans given is not a divisor 48 | of the number of cycles per scan. 49 | """ 50 | # Explicitly remove existing depths because there could have been a previous 51 | # splitting with more depths than this time, in which case only overwriting 52 | # the new ones will still leave the old extras around. 53 | if path is None: 54 | path = os.path.join(os.path.dirname(msdf_file), 'PseudoDepths') 55 | old_depths = glob.glob(os.path.join(path, 'Depth*')) 56 | for depth in old_depths: 57 | shutil.rmtree(depth) 58 | handles = [] 59 | for i in range(num_scans): 60 | folder = os.path.join(path, f'Depth{i}') 61 | os.makedirs(folder) 62 | handles.append(open(os.path.join(folder, 'Image.msdf'), 'wb')) 63 | 64 | try: 65 | with open(msdf_file, 'rb') as infile: 66 | header = infile.read(HEADER_SIZE) 67 | file_variant, _, num_spectra = struct.unpack(HEADER_FORMAT, header) 68 | try: 69 | assert file_variant == 1 70 | except AssertionError: 71 | raise ValueError('Invalid input Image.msdf file.') 72 | for handle in handles: 73 | handle.write(header) 74 | # Adding a dimension to the SAT for each psuedo-depth, since 75 | # splitting them up will require creating a new SAT for each. 76 | depth_sat = np.zeros((num_spectra, 2, num_scans), int) 77 | buffer = infile.read(SAT_ENTRY_SIZE * num_spectra) 78 | sat = np.array( 79 | [i for i in struct.iter_unpack(SAT_ENTRY_FORMAT, buffer)], 80 | int) 81 | # Skip after to after SAT in new files; will update it later. 82 | after_sat = infile.tell() 83 | for handle in handles: 84 | handle.seek(after_sat) 85 | 86 | # Get cycles per pixel to calculate cycles per pseudo-depth. 87 | pixel = np.fromfile( 88 | infile, count=2 * sat[0, 1], dtype=np.ushort)[1::2] 89 | cycles_per_pixel = np.count_nonzero(pixel) 90 | if not cycles_per_pixel: 91 | raise ValueError( 92 | 'Cycle start indicators were not found in this file. ' 93 | 'Please confirm that this file was created by MiniSIMS ' 94 | '>=6.3.4.0 with the "Encode ToF Cycle Start" option ' 95 | 'selected.') 96 | cycles_per_scan, remainder = np.divmod(cycles_per_pixel, num_scans) 97 | if remainder: 98 | raise ValueError( 99 | 'Splitting {0} cycles per pixel into {1} depths does not ' 100 | 'result in equal division. Please choose a divisor of {0}.' 101 | .format(cycles_per_pixel, cycles_per_scan) 102 | ) 103 | infile.seek(sat[0, 0]) 104 | 105 | # Iterate through pixels while writing counts to each pseudo-depth. 106 | for i in tqdm.tqdm(range(num_spectra)): 107 | depth_sat[i, 0, :] = [handle.tell() for handle in handles] 108 | # Nx2 array of bins and counts for this pixel 109 | pixel = np.fromfile(infile, count=2*sat[i, 1], dtype=np.ushort 110 | ).reshape((sat[i, 1], 2)) 111 | # splits zero-events (cycle boundaries) into list of n 112 | idx = np.split(np.where(pixel[:, 1] == 0)[0], num_scans) 113 | # gets index of end of each pseudo-depth 114 | boundaries = [i[-1] + 1 for i in idx[:-1]] 115 | # splits array into list of sub-arrays using boundary indices 116 | depths = np.split(pixel, boundaries, axis=0) 117 | for j, depth in enumerate(depths): 118 | handles[j].write(depth) 119 | depth_sat[i, 1, j] = len(depth) 120 | 121 | # Go back and write the new SAT for each file now that we know how 122 | # many entries there are for each pixel in each new pseudo-depth. 123 | for h, handle in enumerate(handles): 124 | handle.seek(HEADER_SIZE) 125 | for i in range(num_spectra): 126 | handle.write(struct.pack( 127 | SAT_ENTRY_FORMAT, 128 | depth_sat[i, 0, h], depth_sat[i, 1, h] 129 | )) 130 | finally: 131 | for handle in handles: 132 | handle.close() 133 | 134 | return cycles_per_pixel, cycles_per_scan 135 | 136 | 137 | if __name__ == '__main__': 138 | parser = argparse.ArgumentParser() 139 | parser.add_argument('msdf_file', help='Path to single-depth msdf file.') 140 | parser.add_argument('num_scans', help='Integer number of pseudo-depths ' 141 | 'to divide this file into.') 142 | parser.add_argument('--path', help='Path to output location. If not ' 143 | 'provided, defaults to a new folder ' 144 | 'named "PseudoDepths" at the same ' 145 | 'location as the input msdf file.') 146 | args = parser.parse_args() 147 | divide(args.msdf_file, int(args.num_scans), args.path) 148 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = build 9 | 10 | # User-friendly check for sphinx-build 11 | ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) 12 | $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) 13 | endif 14 | 15 | # Internal variables. 16 | PAPEROPT_a4 = -D latex_paper_size=a4 17 | PAPEROPT_letter = -D latex_paper_size=letter 18 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 19 | # the i18n builder cannot share the environment and doctrees with the others 20 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source 21 | 22 | .PHONY: help 23 | help: 24 | @echo "Please use \`make ' where is one of" 25 | @echo " html to make standalone HTML files" 26 | @echo " dirhtml to make HTML files named index.html in directories" 27 | @echo " singlehtml to make a single large HTML file" 28 | @echo " pickle to make pickle files" 29 | @echo " json to make JSON files" 30 | @echo " htmlhelp to make HTML files and a HTML help project" 31 | @echo " qthelp to make HTML files and a qthelp project" 32 | @echo " applehelp to make an Apple Help Book" 33 | @echo " devhelp to make HTML files and a Devhelp project" 34 | @echo " epub to make an epub" 35 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 36 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 37 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" 38 | @echo " text to make text files" 39 | @echo " man to make manual pages" 40 | @echo " texinfo to make Texinfo files" 41 | @echo " info to make Texinfo files and run them through makeinfo" 42 | @echo " gettext to make PO message catalogs" 43 | @echo " changes to make an overview of all changed/added/deprecated items" 44 | @echo " xml to make Docutils-native XML files" 45 | @echo " pseudoxml to make pseudoxml-XML files for display purposes" 46 | @echo " linkcheck to check all external links for integrity" 47 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 48 | @echo " coverage to run coverage check of the documentation (if enabled)" 49 | 50 | .PHONY: clean 51 | clean: 52 | rm -rf $(BUILDDIR)/* 53 | 54 | .PHONY: html 55 | html: 56 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 57 | @echo 58 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 59 | 60 | .PHONY: dirhtml 61 | dirhtml: 62 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 63 | @echo 64 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 65 | 66 | .PHONY: singlehtml 67 | singlehtml: 68 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 69 | @echo 70 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 71 | 72 | .PHONY: pickle 73 | pickle: 74 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 75 | @echo 76 | @echo "Build finished; now you can process the pickle files." 77 | 78 | .PHONY: json 79 | json: 80 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 81 | @echo 82 | @echo "Build finished; now you can process the JSON files." 83 | 84 | .PHONY: htmlhelp 85 | htmlhelp: 86 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 87 | @echo 88 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 89 | ".hhp project file in $(BUILDDIR)/htmlhelp." 90 | 91 | .PHONY: qthelp 92 | qthelp: 93 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 94 | @echo 95 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 96 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 97 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/mibilib-doc.qhcp" 98 | @echo "To view the help file:" 99 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/mibilib-doc.qhc" 100 | 101 | .PHONY: applehelp 102 | applehelp: 103 | $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp 104 | @echo 105 | @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." 106 | @echo "N.B. You won't be able to view it unless you put it in" \ 107 | "~/Library/Documentation/Help or install it in your application" \ 108 | "bundle." 109 | 110 | .PHONY: devhelp 111 | devhelp: 112 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 113 | @echo 114 | @echo "Build finished." 115 | @echo "To view the help file:" 116 | @echo "# mkdir -p $$HOME/.local/share/devhelp/mibilib-doc" 117 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/mibilib-doc" 118 | @echo "# devhelp" 119 | 120 | .PHONY: epub 121 | epub: 122 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 123 | @echo 124 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 125 | 126 | .PHONY: latex 127 | latex: 128 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 129 | @echo 130 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 131 | @echo "Run \`make' in that directory to run these through (pdf)latex" \ 132 | "(use \`make latexpdf' here to do that automatically)." 133 | 134 | .PHONY: latexpdf 135 | latexpdf: 136 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 137 | @echo "Running LaTeX files through pdflatex..." 138 | $(MAKE) -C $(BUILDDIR)/latex all-pdf 139 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 140 | 141 | .PHONY: latexpdfja 142 | latexpdfja: 143 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 144 | @echo "Running LaTeX files through platex and dvipdfmx..." 145 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja 146 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 147 | 148 | .PHONY: text 149 | text: 150 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 151 | @echo 152 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 153 | 154 | .PHONY: man 155 | man: 156 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 157 | @echo 158 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 159 | 160 | .PHONY: texinfo 161 | texinfo: 162 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 163 | @echo 164 | @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." 165 | @echo "Run \`make' in that directory to run these through makeinfo" \ 166 | "(use \`make info' here to do that automatically)." 167 | 168 | .PHONY: info 169 | info: 170 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo 171 | @echo "Running Texinfo files through makeinfo..." 172 | make -C $(BUILDDIR)/texinfo info 173 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." 174 | 175 | .PHONY: gettext 176 | gettext: 177 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale 178 | @echo 179 | @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." 180 | 181 | .PHONY: changes 182 | changes: 183 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 184 | @echo 185 | @echo "The overview file is in $(BUILDDIR)/changes." 186 | 187 | .PHONY: linkcheck 188 | linkcheck: 189 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 190 | @echo 191 | @echo "Link check complete; look for any errors in the above output " \ 192 | "or in $(BUILDDIR)/linkcheck/output.txt." 193 | 194 | .PHONY: doctest 195 | doctest: 196 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 197 | @echo "Testing of doctests in the sources finished, look at the " \ 198 | "results in $(BUILDDIR)/doctest/output.txt." 199 | 200 | .PHONY: coverage 201 | coverage: 202 | $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage 203 | @echo "Testing of coverage in the sources finished, look at the " \ 204 | "results in $(BUILDDIR)/coverage/python.txt." 205 | 206 | .PHONY: xml 207 | xml: 208 | $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml 209 | @echo 210 | @echo "Build finished. The XML files are in $(BUILDDIR)/xml." 211 | 212 | .PHONY: pseudoxml 213 | pseudoxml: 214 | $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml 215 | @echo 216 | @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." 217 | -------------------------------------------------------------------------------- /mibidata/combine_tiffs.py: -------------------------------------------------------------------------------- 1 | """Combines a folder of single-channel TIFFs into a multiplexed MIBItiff. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import argparse 6 | import datetime 7 | import os 8 | import re 9 | 10 | import numpy as np 11 | from skimage import io as skio 12 | 13 | from mibidata import mibi_image as mi, panels, runs, tiff 14 | 15 | 16 | def _load_single_channel(file_name): 17 | array = skio.imread(file_name) 18 | if array.dtype not in (np.uint16, np.uint8): 19 | raise ValueError( 20 | f'Invalid dtype {array.dtype}; must be uint8 or uint16') 21 | 22 | if array.dtype == np.uint8: 23 | array = array.astype(np.uint16) 24 | return array 25 | 26 | 27 | def _match_target_filename(filenames, target): 28 | """Finds the file whose name matches target, target.tif, or target.tiff""" 29 | pattern = re.compile(re.escape(target).lower() + r'\.tiff?$') 30 | matches = [f for f in filenames if re.match(pattern, f.lower())] 31 | try: 32 | assert len(matches) == 1 33 | except AssertionError as ae: 34 | raise ValueError(f'TIFF matching {target} not found') from ae 35 | return matches[0] 36 | 37 | 38 | def merge_mibitiffs(input_folder, out=None): 39 | """Merges a folder of single-channel MIBItiff files into a single MIBItiff. 40 | 41 | Args: 42 | input_folder: Path to a folder containing MIBItiff files. While these 43 | files may be single-channel, they are assumed to have accurate and 44 | consistent MIBI metadata. 45 | out: Optionally, a path to a location for saving the combined TIFF. If 46 | not specified, defaults to 'combined.tiff' inside the input folder. 47 | """ 48 | pattern = re.compile(r'.+\.tiff?$') 49 | paths = [ 50 | os.path.join(input_folder, f) for f in os.listdir(input_folder) 51 | if re.match(pattern, f.lower())] 52 | merged = tiff.read(paths[0]) 53 | for path in paths[1:]: 54 | image = tiff.read(path) 55 | merged.append(image) 56 | 57 | if out is None: 58 | out = os.path.join(input_folder, 'combined.tiff') 59 | tiff.write(out, merged, multichannel=True) 60 | 61 | 62 | def create_mibitiffs(input_folder, run_path, point, panel_path, slide, size, 63 | run_label=None, instrument=None, tissue=None, 64 | aperture=None, out=None): 65 | """Combines single-channel TIFFs into a MIBItiff. 66 | 67 | The input TIFFs are not assumed to have any MIBI metadata. If they do, it 68 | is suggested to use the simpler :meth:`~merge_mibitiffs` instead. 69 | 70 | Args: 71 | input_folder: Path to a folder containing single-channel TIFFs. 72 | run_path: Path to a run xml. 73 | point: Point name of the image, e.g. Point1 or Point2. This should match 74 | the name of folder generated for the raw data as it is listed in the 75 | run xml file. 76 | panel_path: Path to a panel CSV. 77 | slide: The slide ID. 78 | size: The size of the FOV in microns, i.e. 500. 79 | run_label: Optional custom run label for the combined TIFF. If uploading 80 | the output to MIBItracker, the run label set here must match the 81 | label of the MIBItracker run. Defaults to the name of the run xml. 82 | instrument: Optionally, the instrument ID. 83 | tissue: Optionally, the name of tissue. 84 | aperture: Optionally, the name of the aperture or imaging preset. 85 | out: Optionally, a path to a location for saving the combined TIFF. If 86 | not specified, defaults to 'combined.tiff' inside the input folder. 87 | run_label: Optionally, a custom run label for the `run` property of the 88 | image. 89 | """ 90 | panel_df = panels.read_csv(panel_path) 91 | panel_name, _ = os.path.splitext(os.path.basename(panel_path)) 92 | tiff_files = os.listdir(input_folder) 93 | 94 | fovs, calibration = runs.parse_xml(run_path) 95 | point_number = int(point[5:]) 96 | try: 97 | fov = fovs[point_number - 1] # point number is 1-based, not 0-based 98 | except IndexError as ie: 99 | raise IndexError(f'{point} not found in run xml.') from ie 100 | if fov['date']: 101 | run_date = datetime.datetime.strptime( 102 | fov['date'], '%Y-%m-%dT%H:%M:%S').date() 103 | else: 104 | run_date = datetime.datetime.now().date() 105 | 106 | image_data = [] 107 | for i in panel_df.index: 108 | tiff_path = os.path.join( 109 | input_folder, _match_target_filename(tiff_files, 110 | panel_df['Target'][i])) 111 | data = _load_single_channel(tiff_path) 112 | image_data.append(data) 113 | 114 | image_data = np.stack(image_data, axis=2) 115 | 116 | image = mi.MibiImage(image_data, 117 | list(zip(panel_df['Mass'], panel_df['Target']))) 118 | 119 | image.size = int(size) 120 | image.coordinates = (fov['coordinates']) 121 | image.filename = fov['run'] 122 | image.run = run_label if run_label else fov['run'] 123 | image.version = tiff.SOFTWARE_VERSION 124 | image.instrument = instrument 125 | image.slide = slide 126 | image.dwell = fov['dwell'] 127 | image.scans = fov['scans'] 128 | image.aperture = aperture 129 | image.fov_name = fov['fov_name'] 130 | image.folder = fov['folder'] 131 | image.tissue = tissue 132 | image.panel = panel_name 133 | image.date = run_date 134 | image.mass_offset = calibration['MassOffset'] 135 | image.mass_gain = calibration['MassGain'] 136 | image.time_resolution = calibration['TimeResolution'] 137 | 138 | if out is None: 139 | out = os.path.join(input_folder, 'combined.tiff') 140 | 141 | tiff.write(out, image, multichannel=True) 142 | 143 | 144 | if __name__ == '__main__': 145 | 146 | description = ('Generates a single multiplexed MIBItiff from a folder ' 147 | 'containing individual single-channel TIFFs without MIBI ' 148 | 'metadata. Note that if the single-channel TIFFs already ' 149 | 'have the MIBItiff metadata, then a much simpler way to ' 150 | 'merge them is to use combine_tiffs.merge_mibitiffs ' 151 | 'instead.') 152 | 153 | parser = argparse.ArgumentParser(description=description) 154 | parser.add_argument( 155 | 'folder', 156 | help='Folder containing single-channel TIFF files with a .tif or ' 157 | '.tiff extension.', 158 | ) 159 | parser.add_argument( 160 | 'run_xml', help='Path to a run XML file.' 161 | ) 162 | parser.add_argument( 163 | 'point', help='The point number in the run, e.g. Point1 or Point2. ' 164 | 'This should match the name of folder generated for the ' 165 | 'raw data as it is listed in the run xml file.' 166 | ) 167 | parser.add_argument( 168 | 'panel', 169 | help='Path to a CSV file containing panel information. The CSV file ' 170 | 'must contain columns named \'Mass\' and \'Target\', where the ' 171 | 'contents of the \'Target\' column matches the names of the ' 172 | 'single-channel TIFFs, i.e. the column contains \'CD45\' and ' 173 | 'there is a file named \'CD45.tif\' or \'CD45.tiff\'. A panel ' 174 | 'downloaded from the MibiTracker is a valid format assuming the ' 175 | 'single-channel TIFF files are named accordingly.', 176 | ) 177 | parser.add_argument( 178 | 'slide', help='The slide ID.' 179 | ) 180 | parser.add_argument( 181 | 'size', help='The size of the FOV in microns e.g. 500.' 182 | ) 183 | parser.add_argument( 184 | '--run_label', 185 | help='Optional custom run label for the combined TIFF. If uploading ' 186 | 'the multiplexed MIBItiff file to MIBItracker, the run label set ' 187 | 'here must match the label of the MIBItracker run.' 188 | ) 189 | parser.add_argument( 190 | '--instrument', help='The instrument ID.' 191 | ) 192 | parser.add_argument( 193 | '--tissue', help='The tissue type.' 194 | ) 195 | parser.add_argument( 196 | '--aperture', help='The aperture or imaging preset used.' 197 | ) 198 | parser.add_argument( 199 | '--out', 200 | help='Optional path to a location for the combined TIFF. If not ' 201 | 'specified, defaults to \'combined.tiff\' inside the ' 202 | 'input folder.', 203 | ) 204 | args = parser.parse_args() 205 | create_mibitiffs(args.folder, args.run_xml, args.point, args.panel, 206 | args.slide, args.size, run_label=args.run_label, 207 | instrument=args.instrument, tissue=args.tissue, 208 | aperture=args.aperture, out=args.out) 209 | -------------------------------------------------------------------------------- /mibidata/color.py: -------------------------------------------------------------------------------- 1 | """Color transformation and composites. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import numpy as np 6 | from scipy import ndimage 7 | 8 | from mibidata import constants 9 | 10 | 11 | def _trim(array, lower=0., upper=1.): 12 | """Trims an array to a specified range; used for floating point errors.""" 13 | return np.minimum(np.maximum(array, lower), upper) 14 | 15 | 16 | def rgb2hsl(rgb): 17 | """Converts an RGB array to HSL. 18 | 19 | The hue is scaled to [0, 2*pi]; the saturation and lightness to [0, 1]. 20 | 21 | Args: 22 | rgb: An NxMx3 array of floats in the unit interval. 23 | 24 | Returns: 25 | An array the same shape as rgb converted to HSL coordinates. 26 | 27 | Raises: 28 | ValueError: Raised if the input array has values outside of the unit 29 | interval. 30 | 31 | References: 32 | HSL_and_HSV. Wikipedia: The Free Encyclopedia. Accessed 09/11/2016. 33 | http://en.wikipedia.org/wiki/HSL_and_HSV. 34 | """ 35 | if not (np.all(rgb >= 0.) and np.all(rgb <= 1.)): 36 | raise ValueError('Input array must have values in the unit interval.') 37 | 38 | max_channel = np.max(rgb, axis=2) 39 | min_channel = np.min(rgb, axis=2) 40 | channel_range = max_channel - min_channel 41 | 42 | # Use polar coordinate conversion rather than hexagons. 43 | alpha = (2 * rgb[:, :, 0] - rgb[:, :, 1] - rgb[:, :, 2]) / 2 44 | beta = np.sqrt(3) / 2 * (rgb[:, :, 1] - rgb[:, :, 2]) 45 | hue = np.arctan2(beta, alpha) 46 | # Shift from [-pi, pi] to [0, 2*pi] 47 | hue[hue < 0] += 2 * np.pi 48 | 49 | luminosity = (max_channel + min_channel) / 2 50 | 51 | saturation = np.zeros_like(channel_range) 52 | denom = (1 - np.abs(2 * luminosity - 1)) 53 | # Set the saturation to zero along the grayscale. 54 | idx = np.logical_and(channel_range > 0, ~np.isclose(denom, 0)) # pylint: disable=assignment-from-no-return 55 | saturation[idx] = channel_range[idx] / ( 56 | 1 - np.abs(2 * luminosity[idx] - 1)) 57 | 58 | return np.stack((_trim(hue, upper=2 * np.pi), 59 | _trim(saturation), 60 | _trim(luminosity)), axis=2) 61 | 62 | 63 | def hsl2rgb(hsl): 64 | """Converts an HSL array to RGB. 65 | 66 | Args: 67 | hsl: An NxMx3 array of floats representing an HSL image. The H layer 68 | must have values in the range [0, 2*pi]; the S and L layers must 69 | have values in the unit interval. 70 | 71 | Returns: 72 | An array the same shape as hsl converted to RGB coordinates. 73 | 74 | Raises: 75 | ValueError: Raised if the input array has values outside of the 76 | expected intervals. 77 | 78 | References: 79 | HSL_and_HSV. Wikipedia: The Free Encyclopedia. Accessed 09/11/2016. 80 | http://en.wikipedia.org/wiki/HSL_and_HSV. 81 | """ 82 | if np.any(hsl < 0.): 83 | raise ValueError('Input array must have values with hue in the ' 84 | 'interval [0, 2*pi] and saturation and luminosity in ' 85 | 'the interval [0, 1], but this array has minimum %s.' 86 | % hsl.min()) 87 | if np.any(hsl[:, :, 1:] > 1.): 88 | raise ValueError('Input array must have values of saturation and ' 89 | 'luminosity in the interval [0, 1], but this array ' 90 | 'has maximum saturation and luminosity of %s.' 91 | % hsl[:, :, 1:].max()) 92 | if np.any(hsl[:, :, 0] > 2 * np.pi): 93 | raise ValueError('Input array must have values with hue in the ' 94 | 'interval [0, 2*pi] and saturation and luminosity in ' 95 | 'the interval [0, 1], but this array has maximum ' 96 | 'hue of %s.' % hsl[:, :, 0].max()) 97 | 98 | chroma = (1 - np.abs(2 * hsl[:, :, 2] - 1)) * hsl[:, :, 1] 99 | # H is in [0, 2*pi], thus Hprime is in [0, 6] 100 | hue_prime = 3 * hsl[:, :, 0] / np.pi 101 | x = chroma * (1 - np.abs(np.mod(hue_prime, 2) - 1)) 102 | # assign bin 1-6 for hue_prime where bin i is [i-1, i] 103 | sector = np.digitize(hue_prime, range(7)) 104 | cxz = np.stack((chroma, x, np.zeros_like(x)), axis=2) 105 | rgb = np.zeros_like(hsl) 106 | 107 | def rgb_sector(ind, order): 108 | idx = sector == ind 109 | ar = cxz[:, :, order] 110 | rgb[idx, :] = ar[idx, :] 111 | 112 | rgb_sector(1, [0, 1, 2]) 113 | rgb_sector(2, [1, 0, 2]) 114 | rgb_sector(3, [2, 0, 1]) 115 | rgb_sector(4, [2, 1, 0]) 116 | rgb_sector(5, [1, 2, 0]) 117 | rgb_sector(6, [0, 2, 1]) 118 | 119 | match_value = hsl[:, :, 2] - chroma / 2 120 | for i in range(3): 121 | rgb[:, :, i] += match_value 122 | np.clip(rgb, 0., 1., out=rgb) 123 | 124 | return rgb 125 | 126 | 127 | def rgb2cym(rgb): 128 | """Converts an RGB array to CYM. 129 | 130 | Args: 131 | rgb: An NxMx3 array of floats in the unit interval. 132 | 133 | Returns: 134 | An array the same shape as rgb converted to CYM colors. 135 | """ 136 | return invert_luminosity(1 - rgb[:, :, [0, 2, 1]]) 137 | 138 | 139 | def invert_luminosity(rgb): 140 | """Inverts the luminosity of an RGB image. 141 | 142 | Args: 143 | rgb: An NxMx3 array of floats in the unit interval. 144 | 145 | Returns: 146 | An array the same shape as rgb that has had its luminosity inverted. 147 | """ 148 | hsl = rgb2hsl(rgb) 149 | hsl[:, :, 2] = 1 - hsl[:, :, 2] 150 | return hsl2rgb(hsl) 151 | 152 | 153 | def _gray2hsl(array, angle): 154 | """Converts NxN grayscale to RGB of a single color. 155 | 156 | The input array is assumed to be scaled in the unit interval [0, 1] 157 | The angle is in the range [0, 2*pi] 158 | """ 159 | hsl = np.zeros((array.shape[0], array.shape[1], 3)) 160 | hsl[:, :, 0] = angle 161 | hsl[array > 0, 1] = 1 162 | hsl[:, :, 2] = array / 2 163 | return hsl 164 | 165 | 166 | def _porter_duff_screen(backdrop, source): 167 | """Reference: https://www.w3.org/TR/compositing-1/#blendingscreen""" 168 | return backdrop + source - (backdrop * source) 169 | 170 | 171 | def composite(image, color_map, gamma=1/3, min_scaling=10): 172 | """Combines multiple image channels by color into a 3-D array. 173 | 174 | Args: 175 | image: A MibiImage. 176 | color_map: A dictionary keyed by color with values of channel names 177 | corresponding to a subset of those in the MibiImage. The 178 | allowed colors are 'Cyan', 'Yellow', 'Magenta', 'Green', 179 | 'Orange', 'Violet', 'Red', 'Blue' and 'Gray'. 180 | gamma: The value with which to scale the image data. Defaults to 1/3. 181 | If no gamma correction is desired, set to 1. 182 | min_scaling: The minimum number of counts used as the divisor for each 183 | channel before applying gamma. This intended to prevent images with 184 | only a few counts from being scaled incorrectly to [0, 1]. 185 | 186 | Returns: 187 | An NxMx3 uint8 array of an RGB image. 188 | """ 189 | overlay = None 190 | for key, val in color_map.items(): 191 | array = np.power( # pylint: disable=assignment-from-no-return 192 | image[val] / np.maximum(np.max(image[val]), min_scaling), 193 | gamma) 194 | rgb = ( 195 | np.stack((array, array, array), axis=2) * 196 | constants.COLORS[key] 197 | ) 198 | if overlay is None: 199 | overlay = rgb 200 | else: 201 | overlay = _porter_duff_screen(overlay, rgb) 202 | return np.uint8(overlay * 255) 203 | 204 | 205 | def compose_overlay(image, overlay_settings): 206 | """Overlays multiple image channels using overlay_settings from MIBItracker. 207 | 208 | The overlay_settings are intended to have the form of a channels.json file 209 | as downloaded from MIBItracker but they can have any of the of the following 210 | forms: 211 | 212 | 1. ``{'image_id': {'channels': {'channel_1': {'color': color, ...}, ...}}``, 213 | 2. ``{'channels': {'channel_1': {'color': color, ...}, ...}``, 214 | 3. ``{'channel_1': {'color': color, ...}, ...}``. 215 | 216 | Each channel is expected to have the following fields: 217 | 218 | - 'color' (required): 219 | One of the following: 'Cyan', 'Yellow', 'Magenta', 'Green', 'Orange', 220 | 'Violet', 'Red', 'Blue', or 'Gray'. 221 | - 'brightness' (optional): 222 | float between -1 and 1; defaults to 0. 223 | - 'intensity_higher' (optional): 224 | Upper limit of the channel intensity; defaults to maximum counts in the 225 | channel. 226 | - 'intensity_lower' (optional): 227 | Lower limit of the channel intensity; defaults to 0. 228 | - 'blur' (optional): 229 | integer between 0 and 10. Defines the gaussian blur of the channel 230 | according to pre-defined convolution kernels. 231 | 232 | Args: 233 | image: A MibiImage. 234 | overlay_settings: Dictionary of MIBItracker visual settings. 235 | 236 | Returns: 237 | An NxMx3 uint8 array of an RGB image. 238 | """ 239 | for v in overlay_settings.values(): 240 | if 'color' in v: 241 | break 242 | if 'channels' in overlay_settings: 243 | overlay_settings = overlay_settings['channels'] 244 | break 245 | if len(overlay_settings) == 1 and 'channels' in v: 246 | overlay_settings = v['channels'] 247 | break 248 | raise ValueError('Unexpected format of overlay_settings dictionary.') 249 | 250 | overlay = None 251 | for channel in overlay_settings: 252 | setting = overlay_settings[channel] 253 | array = image[channel] 254 | # If set to min brightess, skip this channel: 255 | if setting['brightness'] == constants.OVERLAY_MIN_BRIGHTNESS: 256 | continue 257 | array = image[channel] 258 | # Because we treat the min differently, don't use np.clip 259 | range_min = setting.get('intensity_lower', 0) 260 | range_max = setting.get('intensity_higher', array.max()) 261 | array[array > range_max] = range_max 262 | array[array < range_min] = 0 263 | array = array / float(range_max) 264 | array = ndimage.filters.convolve( 265 | array, constants.OVERLAY_SMOOTHING_KERNELS[setting['blur']]) 266 | np.clip(array, 0, 1, out=array) 267 | if setting['brightness'] > 0: 268 | array /= (1 - setting['brightness']) 269 | np.clip(array, 0, 1, out=array) 270 | elif setting['brightness'] < 0: 271 | array = np.power(array, 1 - 3 * setting['brightness']) # pylint: disable=assignment-from-no-return 272 | rgb = ( 273 | np.stack((array, array, array), axis=2) * 274 | constants.COLORS[setting['color']] 275 | ) 276 | if overlay is None: 277 | overlay = rgb 278 | else: 279 | overlay = _porter_duff_screen(overlay, rgb) 280 | return np.uint8(overlay * 255) 281 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | """Configuration sphinx documentation.""" 2 | 3 | # Ionpath's mibilib documentation build configuration file, 4 | # created by sphinx-quickstart on Tue Aug 2 08:57:24 2016. 5 | # 6 | # This file is execfile()d with the current directory set to its 7 | # containing dir. 8 | # 9 | # Note that not all possible configuration values are present in this 10 | # autogenerated file. 11 | # 12 | # All configuration values have a default; values that are commented out 13 | # serve to show the default. 14 | 15 | import sys 16 | import os 17 | 18 | # If extensions (or modules to document with autodoc) are in another directory, 19 | # add these directories to sys.path here. If the directory is relative to the 20 | # documentation root, use os.path.abspath to make it absolute. 21 | 22 | # This works for a docs directory that is parallel to the mibitracker directory: 23 | # mibilib/ 24 | # |-- README 25 | # |-- setup.py 26 | # `-- mibitracker/ 27 | # |-- __init__.py 28 | # |-- submodule1.py 29 | # |-- submodule2.py 30 | # `-- tests/ 31 | # docs/ 32 | # |-- MakeFile 33 | # |-- build/ 34 | # `-- source/ 35 | 36 | sys.path.insert(0, os.path.abspath('../..')) 37 | 38 | 39 | # -- General configuration ------------------------------------------------ 40 | 41 | # If your documentation needs a minimal Sphinx version, state it here. 42 | #needs_sphinx = '1.0' 43 | 44 | # Add any Sphinx extension module names here, as strings. They can be 45 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 46 | # ones. 47 | 48 | # Added sphinx.ext.napoleon since using google-style docstrings. 49 | extensions = [ 50 | 'sphinx.ext.autodoc', 51 | 'sphinx.ext.napoleon', 52 | 'sphinxarg.ext', 53 | 'sphinx.ext.mathjax', 54 | ] 55 | 56 | # Add any paths that contain templates here, relative to this directory. 57 | templates_path = ['_templates'] 58 | 59 | # The suffix(es) of source filenames. 60 | # You can specify multiple suffix as a list of string: 61 | # source_suffix = ['.rst', '.md'] 62 | source_suffix = '.rst' 63 | 64 | # The encoding of source files. 65 | #source_encoding = 'utf-8-sig' 66 | 67 | # The master toctree document. 68 | master_doc = 'index' 69 | 70 | # General information about the project. 71 | project = u'Ionpath MIBIlib' 72 | copyright = u'2021, Ionpath Inc.' # pylint: disable=redefined-builtin 73 | author = u'Ionpath Inc.' 74 | 75 | autodoc_member_order = 'bysource' 76 | napoleon_include_special_with_doc = True 77 | 78 | # The version info for the project you're documenting, acts as replacement for 79 | # |version| and |release|, also used in various other places throughout the 80 | # built documents. 81 | # 82 | # The short X.Y version. 83 | version = u'0.0.1' 84 | # The full version, including alpha/beta/rc tags. 85 | release = u'0.0.1' 86 | 87 | # The language for content autogenerated by Sphinx. Refer to documentation 88 | # for a list of supported languages. 89 | # 90 | # This is also used if you do content translation via gettext catalogs. 91 | # Usually you set "language" from the command line for these cases. 92 | language = None 93 | 94 | # There are two options for replacing |today|: either, you set today to some 95 | # non-false value, then it is used: 96 | #today = '' 97 | # Else, today_fmt is used as the format for a strftime call. 98 | #today_fmt = '%B %d, %Y' 99 | 100 | # List of patterns, relative to source directory, that match files and 101 | # directories to ignore when looking for source files. 102 | 103 | # Exclude is empty here but can exclude the tests directory via the last 104 | # argument in the spinx-apidoc command. Here is it issued from the root 105 | # doc directory: 106 | # $ sphinx-apidoc -f -o docs/source ./mibitracker ./mibitracker/{**,}/tests 107 | 108 | exclude_patterns = [] 109 | 110 | # The reST default role (used for this markup: `text`) to use for all 111 | # documents. 112 | #default_role = None 113 | 114 | # If true, '()' will be appended to :func: etc. cross-reference text. 115 | #add_function_parentheses = True 116 | 117 | # If true, the current module name will be prepended to all description 118 | # unit titles (such as .. function::). 119 | #add_module_names = True 120 | 121 | # If true, sectionauthor and moduleauthor directives will be shown in the 122 | # output. They are ignored by default. 123 | #show_authors = False 124 | 125 | # The name of the Pygments (syntax highlighting) style to use. 126 | pygments_style = 'sphinx' 127 | 128 | # A list of ignored prefixes for module index sorting. 129 | #modindex_common_prefix = [] 130 | 131 | # If true, keep warnings as "system message" paragraphs in the built documents. 132 | #keep_warnings = False 133 | 134 | # If true, `todo` and `todoList` produce output, else they produce nothing. 135 | todo_include_todos = False 136 | 137 | 138 | # -- Options for HTML output ---------------------------------------------- 139 | 140 | # The theme to use for HTML and HTML Help pages. See the documentation for 141 | # a list of builtin themes. 142 | html_theme = 'alabaster' 143 | 144 | # Theme options are theme-specific and customize the look and feel of a theme 145 | # further. For a list of options available for each theme, see the 146 | # documentation. 147 | #html_theme_options = {} 148 | 149 | # Add any paths that contain custom themes here, relative to this directory. 150 | #html_theme_path = [] 151 | 152 | # The name for this set of Sphinx documents. If None, it defaults to 153 | # " v documentation". 154 | #html_title = None 155 | 156 | # A shorter title for the navigation bar. Default is the same as html_title. 157 | #html_short_title = None 158 | 159 | # The name of an image file (relative to this directory) to place at the top 160 | # of the sidebar. 161 | #html_logo = None 162 | 163 | # The name of an image file (within the static path) to use as favicon of the 164 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 165 | # pixels large. 166 | html_favicon = 'favicon.ico' 167 | 168 | # Add any paths that contain custom static files (such as style sheets) here, 169 | # relative to this directory. They are copied after the builtin static files, 170 | # so a file named "default.css" will overwrite the builtin "default.css". 171 | html_static_path = ['_static'] 172 | 173 | # Add any extra paths that contain custom files (such as robots.txt or 174 | # .htaccess) here, relative to this directory. These files are copied 175 | # directly to the root of the documentation. 176 | #html_extra_path = [] 177 | 178 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 179 | # using the given strftime format. 180 | #html_last_updated_fmt = '%b %d, %Y' 181 | 182 | # If true, SmartyPants will be used to convert quotes and dashes to 183 | # typographically correct entities. 184 | #html_use_smartypants = True 185 | 186 | # Custom sidebar templates, maps document names to template names. 187 | #html_sidebars = {} 188 | 189 | # Additional templates that should be rendered to pages, maps page names to 190 | # template names. 191 | #html_additional_pages = {} 192 | 193 | # If false, no module index is generated. 194 | #html_domain_indices = True 195 | 196 | # If false, no index is generated. 197 | #html_use_index = True 198 | 199 | # If true, the index is split into individual pages for each letter. 200 | #html_split_index = False 201 | 202 | # If true, links to the reST sources are added to the pages. 203 | #html_show_sourcelink = True 204 | 205 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 206 | #html_show_sphinx = True 207 | 208 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 209 | #html_show_copyright = True 210 | 211 | # If true, an OpenSearch description file will be output, and all pages will 212 | # contain a tag referring to it. The value of this option must be the 213 | # base URL from which the finished HTML is served. 214 | #html_use_opensearch = '' 215 | 216 | # This is the file name suffix for HTML files (e.g. ".xhtml"). 217 | #html_file_suffix = None 218 | 219 | # Language to be used for generating the HTML full-text search index. 220 | # Sphinx supports the following languages: 221 | # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' 222 | # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' 223 | #html_search_language = 'en' 224 | 225 | # A dictionary with options for the search language support, empty by default. 226 | # Now only 'ja' uses this config value 227 | #html_search_options = {'type': 'default'} 228 | 229 | # The name of a javascript file (relative to the configuration directory) that 230 | # implements a search results scorer. If empty, the default will be used. 231 | #html_search_scorer = 'scorer.js' 232 | 233 | # Output file base name for HTML help builder. 234 | htmlhelp_basename = 'mibilib-doc' 235 | 236 | # -- Options for LaTeX output --------------------------------------------- 237 | 238 | latex_elements = { 239 | # The paper size ('letterpaper' or 'a4paper'). 240 | #'papersize': 'letterpaper', 241 | 242 | # The font size ('10pt', '11pt' or '12pt'). 243 | #'pointsize': '10pt', 244 | 245 | # Additional stuff for the LaTeX preamble. 246 | #'preamble': '', 247 | 248 | # Latex figure (float) alignment 249 | #'figure_align': 'htbp', 250 | } 251 | 252 | # Grouping the document tree into LaTeX files. List of tuples 253 | # (source start file, target name, title, 254 | # author, documentclass [howto, manual, or own class]). 255 | latex_documents = [ 256 | (master_doc, 'mibilib-doc.tex', 257 | u'IONpath MIBItracker Client Documentation', 258 | u'Ionpath Inc.', 'manual'), 259 | ] 260 | 261 | # The name of an image file (relative to this directory) to place at the top of 262 | # the title page. 263 | #latex_logo = None 264 | 265 | # For "manual" documents, if this is true, then toplevel headings are parts, 266 | # not chapters. 267 | #latex_use_parts = False 268 | 269 | # If true, show page references after internal links. 270 | #latex_show_pagerefs = False 271 | 272 | # If true, show URL addresses after external links. 273 | #latex_show_urls = False 274 | 275 | # Documents to append as an appendix to all manuals. 276 | #latex_appendices = [] 277 | 278 | # If false, no module index is generated. 279 | #latex_domain_indices = True 280 | 281 | 282 | # -- Options for manual page output --------------------------------------- 283 | 284 | # One entry per manual page. List of tuples 285 | # (source start file, name, description, authors, manual section). 286 | man_pages = [ 287 | (master_doc, 'mibilib-doc', 288 | u'IONpath MIBItracker Client Documentation', 289 | [author], 1) 290 | ] 291 | 292 | # If true, show URL addresses after external links. 293 | #man_show_urls = False 294 | 295 | 296 | # -- Options for Texinfo output ------------------------------------------- 297 | 298 | # Grouping the document tree into Texinfo files. List of tuples 299 | # (source start file, target name, title, author, 300 | # dir menu entry, description, category) 301 | texinfo_documents = [ 302 | (master_doc, 'mibilib-doc', 303 | u'IONpath MIBItracker Client Documentation', 304 | author, 'mibilib', 'One line description of project.', 305 | 'Miscellaneous'), 306 | ] 307 | 308 | # Documents to append as an appendix to all manuals. 309 | #texinfo_appendices = [] 310 | 311 | # If false, no module index is generated. 312 | #texinfo_domain_indices = True 313 | 314 | # How to display URL addresses: 'footnote', 'no', or 'inline'. 315 | #texinfo_show_urls = 'footnote' 316 | 317 | # If true, do not generate a @detailmenu in the "Top" node's menu. 318 | #texinfo_no_detailmenu = False 319 | -------------------------------------------------------------------------------- /mibidata/segmentation.py: -------------------------------------------------------------------------------- 1 | """Utilities for working with segmentation data. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import warnings 6 | 7 | import numpy as np 8 | import pandas as pd 9 | from scipy import ndimage as ndi 10 | 11 | from mibidata import mibi_image as mi, util 12 | 13 | 14 | def extract_cell_dataframe(label_image, image=None, mode='total', 15 | num_sectors=8): 16 | """Creates a dataframe of single-cell statistics from a labeled image. 17 | 18 | Args: 19 | label_image: An NxM array where each pixel's nonnegative integer value 20 | corresponds to the label of an image region, such as a cell or 21 | other segment. 22 | image: Optionally, a MibiImage of an NxM field of view. Defaults to 23 | None; if not None, the sum or score (depending on the mode) of each 24 | channel within each labeled region is returned as column of the 25 | dataframe. Otherwise, only the regions' size and area are returned. 26 | mode: One of``'total'``, ``'quadrant'`` or ``'circular_sectors'``, 27 | defaulting to ``'total'``. 28 | If ``'total'`, the ion counts within each labeled region are summed. 29 | If ``'quadrant'``, the geometric mean of each regions's four 30 | quadrants is calculated, which favors regions with even spatial 31 | distribution. 32 | If ``'circular_sectors'``, the geometric mean of each regions's 33 | ``num_sectors`` circular sectors is calculated. This is a 34 | generalization of the ``quadrant`` mode. 35 | The mode is ignored if an image is not specified. 36 | num_sectors: number of circular sectors to use in the 37 | ``circular_sectors`` mode. Optional, default is 8. 38 | 39 | Returns: 40 | A dataframe indexed by image region's label, and whose columns 41 | include the area, centroid, and if included the total or scored 42 | counts of the image's channels within each region. 43 | """ 44 | segment_labels = np.unique(label_image) 45 | segment_labels = segment_labels[segment_labels > 0] 46 | 47 | columns = ['label', 'area', 'x_centroid', 'y_centroid'] 48 | if image is not None: 49 | columns += list(image.targets or image.channels) 50 | 51 | rows = [] 52 | for segment_label in segment_labels: 53 | region = label_image == segment_label 54 | nonzeros = np.nonzero(region) 55 | 56 | row = [segment_label, len(nonzeros[0]), int(round(nonzeros[1].mean())), 57 | int(round(nonzeros[0].mean()))] 58 | if image is not None: 59 | if mode == 'total': 60 | vals = image.data[nonzeros[0], nonzeros[1], :].sum(axis=0) 61 | elif mode == 'quadrant': 62 | vals = _circular_sectors_mean(nonzeros, image, num_sectors=4) 63 | elif mode == 'circular_sectors': 64 | vals = _circular_sectors_mean(nonzeros, image, num_sectors) 65 | else: 66 | raise ValueError('"mode" must be either "total", "quadrant" or \ 67 | "circular_sectors"') 68 | row.extend(vals) 69 | rows.append(row) 70 | return pd.DataFrame(rows, columns=columns).set_index('label') 71 | 72 | 73 | def _circular_sectors_mean(inds, image, num_sectors=8): 74 | """Divide a region in circular sectors and get the geometric mean across the 75 | sectors. 76 | 77 | Args: 78 | inds: A tuple of 2 arrays of the y- and x- indices of the pixels in a 79 | segmented region of an image. 80 | image: A MibiImage in which the corresponding pixel indices are located. 81 | num_sectors: number of circular sectors to use. Optional, default is 8. 82 | 83 | Returns: 84 | An array whose length is equal to the number of channels in the image. 85 | Each value in the array is the geometric mean of the image's integrated 86 | channel intensities over the regions's num_sectors circular sectors. 87 | """ 88 | # calculate the geometric center of the cells and get the counts 89 | y_center, x_center = np.mean(inds, axis=1) 90 | vals = image.data[inds] # has shape (num_pixels_in_cell, num_channels) 91 | 92 | # convert to polar coordinates: y, x -> phi, r 93 | phi = util.car2pol(inds[1], inds[0], x_center, y_center)[1] 94 | 95 | # create circular sectors 96 | sectors = [] 97 | ang_step = 2.*np.pi/num_sectors 98 | for i in range(num_sectors): 99 | values = vals[(phi >= i*ang_step) & (phi < (i + 1)*ang_step)] 100 | # check if the sector is empty; if so, fill one (neutral element for 101 | # the multiplication in the geometric mean); otherwise the whole cell 102 | # will be set to zero 103 | if not values.size: 104 | values = np.ones((1, len(image.channels))) 105 | new_sector = values.sum(axis=0) 106 | sectors.append(new_sector) 107 | secs = np.stack(sectors, axis=1) 108 | 109 | # calculate the geometric mean among the sectors 110 | return np.power(np.product(secs, axis=1), 1 / num_sectors) 111 | 112 | 113 | def replace_labeled_pixels(label_image, df, columns=None): 114 | """Replaces the pixels within each label with a value from a dataframe. 115 | 116 | Args: 117 | label_image: An NxM array where each pixel's nonnegative integer value 118 | corresponds to the label of an image region, such as a cell or 119 | other segment. 120 | df: A dataframe whose index corresponds to the integers in the 121 | label_array, and whose column values will replace the labels in the 122 | returned image. It is expected that the index will have name 'label' 123 | as is returned from :function:`extract_cell_dataframe`. 124 | columns: An optional sequence of which columns from the dataframe to 125 | include in the returned image. Defaults to None, which uses all 126 | columns in the dataframe. 127 | 128 | Returns: 129 | A :class:`mibidata.mibi_image.MibiImage` instance where each channel 130 | corresponds to a dataframe column, and the data is a copy of the label 131 | image where each pixel has been replaced with the corresponding value 132 | from that label's row in the dataframe. 133 | 134 | Raises: 135 | IndexError: if the index values of the DataFrame do not match the labels 136 | in the image. 137 | """ 138 | if df.index.name != 'label': 139 | warnings.warn( 140 | f'The dataframe index name is expected to be "label", but was ' 141 | f'found to be {df.index.name}. Check to make sure the index is ' 142 | f'formatted correctly as cell labels.' 143 | ) 144 | if 0 in df.index: 145 | warnings.warn( 146 | 'The dataframe index name is expected to be positive integer ' 147 | 'labels, but was found to contain 0. Check to make sure the index ' 148 | 'is formatted correctly as cell labels.' 149 | ) 150 | if columns is None: 151 | columns = df.columns 152 | label_array = np.zeros((label_image.max() + 1, len(columns)), 153 | dtype=label_image.dtype) 154 | try: 155 | label_array[df.index, :] = df[columns] 156 | except IndexError: 157 | raise IndexError('The values in the dataframe index do not match those ' 158 | 'in the label image.') 159 | columns = [str(i) for i in columns] 160 | return mi.MibiImage(label_array[label_image], columns) 161 | 162 | 163 | def expand_objects(label_image, distance): 164 | """Expands labeled objects in an image by a given number of pixels. 165 | 166 | Args: 167 | label_image: An NxM array where each pixel's nonnegative integer value 168 | corresponds to the label of an image region, such as a cell or 169 | other segment. 170 | distance: The distance (in pixels) to expand each object. 171 | 172 | Returns: 173 | A new label array of the expanded objects. 174 | """ 175 | background = label_image == 0 176 | distances, (i, j) = ndi.distance_transform_edt(background, 177 | return_indices=True) 178 | new_labels = label_image.copy() 179 | 180 | # This creates a mask for the pixels we will expand into, and then 181 | # sets them to the label of the closet non-background pixel. 182 | mask = background & (distances <= distance) 183 | new_labels[mask] = label_image[i[mask], j[mask]] 184 | return new_labels 185 | 186 | 187 | def filter_by_size(label_image, min_size, max_size): 188 | """Removes segments outside of a specified size range. 189 | 190 | Args: 191 | label_image: An NxM array where each pixel's nonnegative integer value 192 | corresponds to the label of an image region, such as a cell or 193 | other segment. 194 | min_size: The minimum area in pixels of a segment. 195 | max_size: The maximum area in pixels of a segment. 196 | 197 | Returns: 198 | A new label image, where segments outside of the size range have been 199 | set to zero, and a dataframe of its labels, centroids and area. 200 | """ 201 | df = extract_cell_dataframe(label_image) 202 | segment_labels = df.index[(df['area'] >= min_size) & 203 | (df['area'] <= max_size)] 204 | new_labels = list(range(1, len(segment_labels) + 1)) 205 | new_image = replace_labeled_pixels( 206 | label_image, pd.DataFrame(new_labels, index=segment_labels)) 207 | new_df = pd.DataFrame(df.loc[segment_labels, :]).set_index( 208 | pd.Index(new_labels, name='label')) 209 | return np.squeeze(new_image.data), new_df 210 | 211 | 212 | def get_adjacency_matrix(label_image): 213 | """Calculates adjacency matrix. 214 | 215 | Args: 216 | label_image: An NxM array where each pixel's nonnegative integer value 217 | corresponds to the label of an image region, such as a cell or 218 | other segment. 219 | 220 | Returns: 221 | adjacency_matrix: NxN array of floats (N is the number of labels) 222 | Each i, j element of the adjacency_matrix corresponds to the 223 | fraction of the i region boundary length that is shared with j 224 | region. 225 | 226 | """ 227 | 228 | # To find the adjacent regions we stack 4-connectivity nearest neighbors 229 | # of label_image and look for pixels with more than 1 label on the stack 230 | 231 | # To create the stack we first need to pad label_image with zeros 232 | pad_image = np.pad(label_image, 1, 'constant').astype(int) 233 | 234 | label_stack = np.array([ 235 | pad_image[:-2, 1:-1], 236 | pad_image[1:-1, :-2], pad_image[1:-1, 1:-1], pad_image[1:-1, 2:], 237 | pad_image[2:, 1:-1], 238 | ]) 239 | # next we sort labels along the stack 240 | label_stack = np.sort(label_stack, 0) 241 | # we find duplicate labels 242 | duplicates = label_stack[1:, :, :] == label_stack[:-1, :, :] 243 | # and set duplicates values to -1 244 | label_stack[1:, :, :][duplicates] = -1 245 | 246 | # We can now create an image of labeled region boundaries 247 | labeled_boundaries = ((label_stack > -1).sum(0) > 1) * label_image 248 | 249 | # Finally we create the adjacency_matrix 250 | number_of_labels = label_image.max() 251 | adjacency_matrix = np.zeros([number_of_labels + 1] * 2) 252 | for label_i in range(1, number_of_labels + 1): 253 | boundary = labeled_boundaries == label_i 254 | boundary_labels = label_stack[:, boundary] 255 | label_j, label_count = np.unique( 256 | boundary_labels[boundary_labels != -1], 257 | return_counts=True) 258 | adjacency_matrix[label_i, label_j] = label_count / boundary.sum() 259 | 260 | return adjacency_matrix 261 | -------------------------------------------------------------------------------- /mibitracker/tests/test_request_helpers.py: -------------------------------------------------------------------------------- 1 | """Tests for mibitracker.helpers. 2 | 3 | This tests for the retry and status checking utilities only. 4 | Specific helper functions that make use of the MIBItracker API and can be 5 | tested by running a local instance of the MIBItracker to ensure proper 6 | integration. A set of examples can be found in 7 | mibitracker/testing/MibiRequestsTesting.ipynb. 8 | 9 | Copyright (C) 2021 Ionpath, Inc. All rights reserved. 10 | """ 11 | 12 | import datetime 13 | import io 14 | import json 15 | import os 16 | import shutil 17 | import tempfile 18 | import unittest 19 | 20 | from mock import patch 21 | import requests 22 | from requests.exceptions import HTTPError 23 | 24 | from mibidata import tiff 25 | from mibitracker import request_helpers 26 | 27 | 28 | class TestMibiRequests(unittest.TestCase): 29 | 30 | def setUp(self): 31 | self.mock_auth = patch.object(request_helpers.MibiRequests, '_auth') 32 | self.mock_auth.start() 33 | self.mtu = request_helpers.MibiRequests( 34 | 'https://mibitracker-instance.ionpath.com', 35 | 'user@ionpath.com', 36 | 'password' 37 | ) 38 | 39 | def tearDown(self): 40 | self.mock_auth.stop() 41 | 42 | @patch('requests.Session.options') 43 | def test_initializing_with_token(self, mock_option): 44 | fake_token = """ 45 | eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6IkpvaG4gRG9lIiwiaWF0IjoxNTE2MjM5MDIyfQ.SflKxwRJSMeKKF2QT4fwpMeJf36POk6yJV_adQssw5c 46 | """ 47 | try: 48 | request_helpers.MibiRequests( 49 | 'https://mibitracker-instance.ionpath.com', 50 | None, 51 | None, 52 | fake_token 53 | ) 54 | mock_option.assert_called_once_with( 55 | 'https://mibitracker-instance.ionpath.com', 56 | timeout=request_helpers.SESSION_TIMEOUT 57 | ) 58 | except ValueError as e: 59 | self.fail(e) 60 | 61 | @patch('requests.Session.options') 62 | def test_initializing_with_bad_token(self, mock_option): 63 | bad_token = """ 64 | bad_token 65 | """ 66 | mock_option.side_effect = HTTPError() 67 | 68 | with self.assertRaises(HTTPError): 69 | request_helpers.MibiRequests( 70 | 'https://mibitracker-instance.ionpath.com', 71 | None, 72 | None, 73 | bad_token 74 | ) 75 | 76 | def test_parameter_validation(self): 77 | with self.assertRaises(ValueError): 78 | request_helpers.MibiRequests( 79 | 'https://mibitracker-instance.ionpath.com', 80 | None, 81 | None, 82 | None 83 | ) 84 | 85 | @patch('requests.Session.post') 86 | def test_refresh(self, mock_post): 87 | self.mtu.session.headers.update({'Authorization': 'JWT token'}) 88 | mock_post().json.return_value = {'token': 'refreshed'} 89 | self.mtu.refresh() 90 | mock_post.assert_called_with( 91 | 'https://mibitracker-instance.ionpath.com/api-token-refresh/', 92 | data=json.dumps({'token': 'token'}), 93 | headers={'content-type': 'application/json'}, 94 | timeout=request_helpers.SESSION_TIMEOUT, 95 | ) 96 | self.assertEqual(self.mtu.session.headers['Authorization'], 97 | 'JWT refreshed') 98 | 99 | @patch.object(request_helpers.MibiRequests, 'refresh') 100 | def test_check_refresh(self, mock_refresh): 101 | self.mtu._check_refresh() 102 | mock_refresh.assert_not_called() 103 | self.mtu._last_refresh = self.mtu._last_refresh - datetime.timedelta( 104 | seconds=self.mtu._refresh_seconds + 1) 105 | self.mtu._check_refresh() 106 | mock_refresh.assert_called_once() 107 | 108 | def test_prepare_route(self): 109 | self.assertEqual(self.mtu._prepare_route('/images/'), '/images/') # pylint: disable=protected-access 110 | self.assertEqual(self.mtu._prepare_route('images/'), '/images/') # pylint: disable=protected-access 111 | 112 | @patch.object(request_helpers.MibiRequests, '_check_refresh') 113 | @patch('requests.Session.get') 114 | def test_get(self, mock_get, mock_check_refresh): 115 | self.mtu.get('images', params={'key': 'value'}) 116 | mock_get.assert_called_once_with( 117 | 'https://mibitracker-instance.ionpath.com/images', 118 | params={'key': 'value'}, timeout=request_helpers.SESSION_TIMEOUT 119 | ) 120 | mock_check_refresh.assert_called_once() 121 | 122 | @patch.object(request_helpers.MibiRequests, '_check_refresh') 123 | @patch('requests.Session.post') 124 | def test_post(self, mock_post, mock_check_refresh): 125 | self.mtu.post('/images/', data={'key': 'value'}) 126 | mock_post.assert_called_once_with( 127 | 'https://mibitracker-instance.ionpath.com/images/', 128 | data={'key': 'value'}, timeout=request_helpers.SESSION_TIMEOUT 129 | ) 130 | mock_check_refresh.assert_called_once() 131 | 132 | @patch.object(request_helpers.MibiRequests, '_check_refresh') 133 | @patch('requests.Session.put') 134 | def test_put(self, mock_put, mock_check_refresh): 135 | self.mtu.put('/images/1/', data={'key': 'value'}) 136 | mock_put.assert_called_once_with( 137 | 'https://mibitracker-instance.ionpath.com/images/1/', 138 | data={'key': 'value'}, timeout=request_helpers.SESSION_TIMEOUT 139 | ) 140 | mock_check_refresh.assert_called_once() 141 | 142 | @patch.object(request_helpers.MibiRequests, '_check_refresh') 143 | @patch('requests.Session.delete') 144 | def test_delete(self, mock_delete, mock_check_refresh): 145 | self.mtu.delete('/images/1/') 146 | mock_delete.assert_called_once_with( 147 | 'https://mibitracker-instance.ionpath.com/images/1/', 148 | timeout=request_helpers.SESSION_TIMEOUT 149 | ) 150 | mock_check_refresh.assert_called_once() 151 | 152 | def test_init_sets_retries(self): 153 | adapter = self.mtu.session.get_adapter(self.mtu.url) 154 | retries = adapter.max_retries 155 | self.assertTrue(retries.is_retry('GET', 502)) 156 | self.assertTrue(retries.is_retry('GET', 503)) 157 | self.assertTrue(retries.is_retry('POST', 502)) 158 | self.assertTrue(retries.is_retry('POST', 503)) 159 | self.assertFalse(retries.is_retry('GET', 302)) 160 | self.assertFalse(retries.is_retry('POST', 403)) 161 | 162 | @patch('requests.Session.get') 163 | @patch('requests.Response.raise_for_status') 164 | def test_status_checks_no_json(self, mock_raise, mock_get): 165 | mock_raise.side_effect = HTTPError('An HTTP error occurred.') 166 | mock_get.return_value = requests.Response() 167 | with self.assertRaises(HTTPError) as e: 168 | self.mtu.session.get('http://example.com') 169 | self.assertTrue('An HTTP error occurred' in str(e.exception)) 170 | 171 | @patch('requests.Session.get') 172 | @patch('requests.Response.json') 173 | @patch('requests.Response.raise_for_status') 174 | def test_status_checks_with_json(self, mock_raise, mock_json, mock_get): 175 | mock_raise.side_effect = HTTPError('An HTTP error occurred.') 176 | mock_json.return_value = {'Error': 'Helpful error description'} 177 | mock_get.return_value = requests.Response() 178 | with self.assertRaises(HTTPError) as e: 179 | self.mtu.session.get('http://domain.com') 180 | self.assertTrue('An HTTP error occurred' in str(e.exception)) 181 | self.assertTrue('Helpful error description' in str(e.exception)) 182 | 183 | @patch('requests.Response.raise_for_status') 184 | def test_status_checks_other_methods(self, mock_raise): 185 | mock_raise.side_effect = HTTPError('An HTTP error occurred.') 186 | for method in ('post', 'put', 'delete'): 187 | with patch.object(requests.Session, method) as mock_method: 188 | mock_method.return_value = requests.Response() 189 | method_to_call = getattr(self.mtu.session, method) 190 | with self.assertRaises(HTTPError): 191 | method_to_call('http://example.com') 192 | 193 | @patch.object(request_helpers.MibiRequests, '_upload_mibitiff') 194 | def test_upload_mibitiff_with_run_id(self, mock_upload): # pylint: disable=unused-argument 195 | buf = io.BytesIO() 196 | response = { 197 | 'location': 'some_path', 198 | 'url': 'http://somewhere' 199 | } 200 | run_id = 1 201 | expected_data = { 202 | 'location': response['location'], 203 | 'run_id': run_id, 204 | } 205 | with patch.object(self.mtu, 'get') as mock_get: 206 | mock_get().json.return_value = response 207 | with patch.object(self.mtu, 'post') as mock_post: 208 | self.mtu.upload_mibitiff(buf, run_id) 209 | mock_post.assert_called_once_with( 210 | '/upload_mibitiff/', 211 | data=json.dumps(expected_data), 212 | headers={'content-type': 'application/json'} 213 | ) 214 | 215 | def test_upload_mibitiff_without_run_id(self): 216 | buf = io.BytesIO() 217 | with self.assertRaises(ValueError): 218 | self.mtu.upload_mibitiff(buf, None) 219 | 220 | @patch.object(request_helpers.MibiRequests, '_upload_channel') 221 | def test_upload_channel_missing_filename(self, mock_upload): # pylint: disable=unused-argument 222 | buf = io.BytesIO() 223 | with self.assertRaises(ValueError): 224 | self.mtu.upload_channel(1, buf) 225 | 226 | @patch.object(request_helpers.MibiRequests, '_upload_channel') 227 | def test_upload_channel_with_filename(self, mock_upload): 228 | buf = io.BytesIO() 229 | self.mtu.upload_channel(1, buf, filename='image.png') 230 | mock_upload.assert_called_once_with(1, buf, 'image.png') 231 | 232 | @patch.object(request_helpers.MibiRequests, '_upload_channel') 233 | def test_upload_channel_use_filename(self, mock_upload): 234 | folder = tempfile.mkdtemp() 235 | path = os.path.join(folder, 'image.tiff') 236 | try: 237 | with open(path, 'w'): 238 | pass 239 | self.mtu.upload_channel(1, path) 240 | finally: 241 | shutil.rmtree(folder) 242 | mock_upload.assert_called_once() 243 | self.assertEqual(mock_upload.call_args[0][2], 'image.tiff') 244 | 245 | @patch.object(request_helpers.MibiRequests, '_upload_channel') 246 | def test_upload_channel_custom_filename(self, mock_upload): 247 | path = 'image.tiff' 248 | with open(path, 'w'): 249 | self.mtu.upload_channel(1, path, filename='image.png') 250 | os.remove(path) 251 | mock_upload.assert_called_once() 252 | self.assertEqual(mock_upload.call_args[0][2], 'image.png') 253 | 254 | @patch.object(request_helpers.MibiRequests, '_upload_channel') 255 | def test_upload_channel_file_object_with_name(self, mock_upload): 256 | path = 'image.tiff' 257 | with open(path, 'w') as fh: 258 | self.mtu.upload_channel(1, fh) 259 | os.remove(path) 260 | mock_upload.assert_called_once_with(1, fh, 'image.tiff') 261 | 262 | @patch('requests.Session.post') 263 | def test_upload_channel(self, mock_post): 264 | buf = io.BytesIO() 265 | self.mtu._upload_channel(1, buf, 'image.tiff') # pylint: disable=protected-access 266 | 267 | expected_files = { 268 | 'attachment': ('image.tiff', buf, 'image/tiff') 269 | } 270 | mock_post.assert_called_once_with( 271 | 'https://mibitracker-instance.ionpath.com/images/1/upload_channel/', 272 | files=expected_files, timeout=request_helpers.SESSION_TIMEOUT 273 | ) 274 | 275 | @patch.object(tiff, 'read') 276 | @patch.object(request_helpers.MibiRequests, 'download_file') 277 | @patch('requests.Session.get') 278 | def test_get_mibi_image(self, mock_get, mock_download, mock_read_tiff): 279 | mock_get.return_value.json.return_value = { 280 | 'run': {'path': 'path/to/run'}, 281 | 'folder': 'path/to/tiff' 282 | } 283 | self.mtu.get_mibi_image(1) 284 | mock_download.assert_called_once_with( 285 | 'path/to/run/path/to/tiff/summed_image.tiff' 286 | ) 287 | mock_read_tiff.assert_called_once_with(mock_download.return_value) 288 | 289 | if __name__ == '__main__': 290 | unittest.main() 291 | -------------------------------------------------------------------------------- /mibidata/tests/test_segmentation.py: -------------------------------------------------------------------------------- 1 | """Tests for mibidata.segmentation 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import unittest 6 | 7 | import numpy as np 8 | from numpy.testing import assert_array_equal 9 | import pandas as pd 10 | import pandas.testing as pdt 11 | 12 | from mibidata import mibi_image as mi, segmentation 13 | 14 | 15 | class TestSegmentation(unittest.TestCase): 16 | 17 | def setUp(self): 18 | np.random.seed(seed=20160906) 19 | 20 | 21 | def test_quadrant_mean(self): 22 | data = np.stack(( 23 | np.array([ 24 | [3, 2, 4], 25 | [1, 1, 3], 26 | [0, 0, 1]]), 27 | np.array([ 28 | [0, 0, 1], 29 | [0, 0, 0], 30 | [0, 0, 2]]), 31 | ), axis=2) 32 | # assume labels are for cell ID 1, such as with label image: 33 | # np.array([ 34 | # [0, 1, 1], 35 | # [1, 1, 1], 36 | # [0, 0, 0] 37 | # ]) 38 | inds = ((0, 0, 1, 1, 1), (1, 2, 0, 1, 2)) 39 | quads_channel_0 = np.power(2 * 4 * (1 + 1) * 3, 1 / 4) # pylint: disable=assignment-from-no-return 40 | quads_channel_1 = np.power(0 * 1 * (0 + 0) * 0, 1 / 4) # pylint: disable=assignment-from-no-return 41 | expected = np.array((quads_channel_0, quads_channel_1)) 42 | image = mi.MibiImage(data, ['1', '2']) 43 | assert_array_equal( 44 | segmentation._circular_sectors_mean(inds, 45 | image, 46 | num_sectors=4), 47 | expected 48 | ) 49 | 50 | 51 | def test_circular_sectors(self): 52 | """Test circular sectors method in segmentation. 53 | """ 54 | # create data for image 2 channels 55 | channels = ['ch0', 'ch1'] 56 | data = np.stack(( 57 | # channel 0 58 | np.arange(36, dtype='float').reshape(6, 6), 59 | # this is the matrix: 60 | #np.array([[ 0, 1, 2, 3, 4, 5], 61 | # [ 6, 7, 8, 9, 10, 11], 62 | # [12, 13, 14, 15, 16, 17], 63 | # [18, 19, 20, 21, 22, 23], 64 | # [24, 25, 26, 27, 28, 29], 65 | # [30, 31, 32, 33, 34, 35]], dtype='float') 66 | # channel 1 67 | np.array([ 68 | [0, 0, 0, 0, 0, 0], 69 | [0, 1, 0, 0, 0, 0], 70 | [0, 0, 0, 1, 0, 0], 71 | [0, 0, 0, 0, 0, 1], 72 | [0, 1, 0, 0, 0, 0], 73 | [0, 0, 3, 0, 0, 2]], dtype='float'), 74 | ), axis=2) 75 | # assume labels are for cell ID 1, such as with label image: 76 | # np.array([ 77 | # [1, 1, 1, 1, 1, 0], 78 | # [1, 1, 1, 1, 1, 0], 79 | # [1, 1, 1, 1, 1, 0], 80 | # [1, 1, 1, 1, 1, 0], 81 | # [1, 1, 1, 1, 1, 0], 82 | # [0, 0, 0, 0, 0, 0]]) 83 | # indices of the pixels of the cell 84 | x = np.arange(5) 85 | y = x 86 | x_inds, y_inds = np.meshgrid(x, y, indexing='ij') 87 | inds = (y_inds.flatten(), x_inds.flatten()) 88 | # sum within sectors and calculate geometric mean for each channel 89 | secs = [] 90 | for i in range(len(channels)): 91 | sec1 = data[2][2][i] + data[2][3][i] + data[2][4][i] + data[3][4][i] 92 | sec2 = data[3][3][i] + data[4][3][i] + data[4][4][i] 93 | sec3 = data[3][2][i] + data[4][2][i] + data[4][1][i] 94 | sec4 = data[3][1][i] + data[4][0][i] + data[3][0][i] 95 | sec5 = data[2][1][i] + data[2][0][i] + data[1][0][i] 96 | sec6 = data[1][1][i] + data[0][0][i] + data[0][1][i] 97 | sec7 = data[1][2][i] + data[0][2][i] + data[0][3][i] 98 | sec8 = data[1][3][i] + data[0][4][i] + data[1][4][i] 99 | secs_geom_mean = np.power(sec1 * sec2 * sec3 * sec4 * \ 100 | sec5 * sec6 * sec7 * sec8, 1/8) 101 | secs.append(secs_geom_mean) 102 | expected = np.array(secs) 103 | # test the function 104 | image = mi.MibiImage(data, channels) 105 | circ_secs = segmentation._circular_sectors_mean(inds, 106 | image, 107 | num_sectors=8) 108 | assert_array_equal(circ_secs, expected) 109 | 110 | 111 | def test_circular_sectors_small_cell(self): 112 | """Test small cell with empty sectors. 113 | """ 114 | # create data for image 2 channels 115 | channels = ['ch0', 'ch1'] 116 | data = np.stack(( 117 | # channel 0 118 | np.arange(16, dtype='float').reshape(4, 4), 119 | # this is the matrix: 120 | #np.array([[ 0, 1, 2, 3], 121 | # [ 4, 5, 6, 7], 122 | # [ 8, 9, 10, 11], 123 | # [12, 13, 14, 15]], dtype='float') 124 | # channel 1 125 | np.array([ 126 | [0, 0, 0, 0], 127 | [0, 1, 0, 0], 128 | [0, 0, 0, 1], 129 | [0, 0, 0, 0]], dtype='float'), 130 | ), axis=2) 131 | # assume labels are for cell ID 1, such as with label image: 132 | # np.array([ 133 | # [0, 1, 1, 0], 134 | # [0, 1, 1, 0], 135 | # [0, 0, 0, 0], 136 | # [0, 0, 0, 0]]) 137 | # indices of the pixels of the cell 138 | inds = ((0, 0, 1, 1), (1, 2, 1, 2)) 139 | # sum within sectors and calculate geometric mean for each channel 140 | secs = [] 141 | for i in range(len(channels)): 142 | sec1 = 1 # empty sector 143 | sec2 = data[1][2][i] 144 | sec3 = 1 # empty sector 145 | sec4 = data[1][1][i] 146 | sec5 = 1 # empty sector 147 | sec6 = data[0][1][i] 148 | sec7 = 1 # empty sector 149 | sec8 = data[0][2][i] 150 | secs_geom_mean = np.power(sec1 * sec2 * sec3 * sec4 * \ 151 | sec5 * sec6 * sec7 * sec8, 1/8) 152 | secs.append(secs_geom_mean) 153 | expected = np.array(secs) 154 | # test the function 155 | image = mi.MibiImage(data, channels) 156 | circ_secs = segmentation._circular_sectors_mean(inds, 157 | image, 158 | num_sectors=8) 159 | assert_array_equal(circ_secs, expected) 160 | 161 | 162 | def test_extract_cell_dataframe(self): 163 | data = np.stack(( 164 | np.array([ 165 | [3, 2, 4, 0], 166 | [1, 1, 3, 1], 167 | [0, 0, 1, 1], 168 | [5, 0, 3, 1]]), 169 | np.array([ 170 | [0, 0, 1, 0], 171 | [0, 0, 0, 1], 172 | [0, 0, 2, 0], 173 | [5, 0, 0, 0]]), 174 | ), axis=2) 175 | cell_labels = np.array([ 176 | [0, 1, 1, 0], 177 | [1, 1, 3, 3], 178 | [0, 0, 3, 3], 179 | [0, 0, 3, 3] 180 | ]) 181 | image = mi.MibiImage(data, ['1', '2']) 182 | labels = [1, 3] 183 | areas = [4, 6] 184 | x_centroids = [1, 2] 185 | y_centroids = [0, 2] 186 | first_total = [8, 10] 187 | second_total = [1, 3] 188 | # Check coords and areas only 189 | expected_from_labels = pd.DataFrame( 190 | np.array([areas, x_centroids, y_centroids]).T, 191 | columns=['area', 'x_centroid', 'y_centroid'], 192 | index=pd.Index(labels, name='label')) 193 | pdt.assert_frame_equal( 194 | segmentation.extract_cell_dataframe(cell_labels), 195 | expected_from_labels) 196 | # Check mode 'total' 197 | expected_from_total = pd.DataFrame( 198 | np.array([first_total, second_total]).T, 199 | columns=['1', '2'], index=pd.Index(labels, name='label')) 200 | pdt.assert_frame_equal( 201 | segmentation.extract_cell_dataframe(cell_labels, image), 202 | pd.concat((expected_from_labels, expected_from_total), axis=1)) 203 | # Check mode 'quadrant' 204 | quads = [] 205 | for label in labels: 206 | inds = np.nonzero(cell_labels == label) 207 | quads.append(segmentation._circular_sectors_mean(inds, 208 | image, 209 | num_sectors=4)) 210 | expected_from_quadrants = pd.DataFrame( 211 | np.array(quads), 212 | columns=['1', '2'], index=pd.Index(labels, name='label')) 213 | pdt.assert_frame_equal( 214 | segmentation.extract_cell_dataframe( 215 | cell_labels, image, mode='quadrant'), 216 | pd.concat((expected_from_labels, expected_from_quadrants), axis=1)) 217 | # Check mode 'circular_sectors' 218 | secs = [] 219 | for label in labels: 220 | inds = np.nonzero(cell_labels == label) 221 | num_sectors = 8 222 | secs.append(segmentation._circular_sectors_mean(inds, image, 223 | num_sectors)) 224 | expected_from_circular_sectors = pd.DataFrame( 225 | np.array(secs), 226 | columns=['1', '2'], index=pd.Index(labels, name='label')) 227 | pdt.assert_frame_equal( 228 | segmentation.extract_cell_dataframe( 229 | cell_labels, image, mode='circular_sectors', 230 | num_sectors=num_sectors), 231 | pd.concat((expected_from_labels, expected_from_circular_sectors), 232 | axis=1)) 233 | 234 | 235 | def test_replace_labeled_pixels(self): 236 | cell_labels = np.array([ 237 | [0, 1, 1, 0], 238 | [1, 1, 3, 3], 239 | [0, 0, 3, 3], 240 | [0, 0, 3, 3] 241 | ]) 242 | df = pd.DataFrame([ 243 | [100, 0], 244 | [150, 25], 245 | ], columns=['dsDNA', 'CD45'], index=pd.Index([1, 3], name='label')) 246 | expected_data = np.stack(( 247 | np.array([ 248 | [0, 100, 100, 0], 249 | [100, 100, 150, 150], 250 | [0, 0, 150, 150], 251 | [0, 0, 150, 150] 252 | ]), 253 | np.array([ 254 | [0, 0, 0, 0], 255 | [0, 0, 25, 25], 256 | [0, 0, 25, 25], 257 | [0, 0, 25, 25] 258 | ]), 259 | ), axis=2) 260 | self.assertEqual( 261 | segmentation.replace_labeled_pixels(cell_labels, df), 262 | mi.MibiImage(expected_data, ['dsDNA', 'CD45'])) 263 | self.assertEqual( 264 | segmentation.replace_labeled_pixels(cell_labels, df, 265 | columns=['dsDNA']), 266 | mi.MibiImage(expected_data[:, :, [0]], ['dsDNA'])) 267 | 268 | 269 | def test_filter_by_size(self): 270 | cell_labels = np.array([ 271 | [0, 1, 1, 2], 272 | [1, 1, 3, 3], 273 | [4, 4, 3, 3], 274 | [0, 4, 3, 3] 275 | ]) 276 | expected = np.array([ 277 | [0, 1, 1, 0], 278 | [1, 1, 0, 0], 279 | [2, 2, 0, 0], 280 | [0, 2, 0, 0] 281 | ]) 282 | df = segmentation.extract_cell_dataframe(expected) 283 | filtered_image, filtered_df = segmentation.filter_by_size( 284 | cell_labels, 3, 5) 285 | assert_array_equal(filtered_image, expected) 286 | pdt.assert_frame_equal(filtered_df, df) 287 | 288 | 289 | def test_expand_objects(self): 290 | labels = np.array([ 291 | [0, 0, 1, 1, 0], 292 | [2, 2, 1, 1, 1], 293 | [2, 2, 0, 1, 0], 294 | [0, 2, 0, 0, 0], 295 | [0, 0, 0, 0, 0] 296 | ]) 297 | expected_1 = np.array([ 298 | [2, 2, 1, 1, 1], 299 | [2, 2, 1, 1, 1], 300 | [2, 2, 2, 1, 1], 301 | [2, 2, 2, 1, 0], 302 | [0, 2, 0, 0, 0] 303 | ]) 304 | expected_2 = np.array([ 305 | [2, 2, 1, 1, 1], 306 | [2, 2, 1, 1, 1], 307 | [2, 2, 2, 1, 1], 308 | [2, 2, 2, 1, 1], 309 | [2, 2, 2, 1, 0] 310 | ]) 311 | assert_array_equal(segmentation.expand_objects(labels, 0), labels) 312 | assert_array_equal(segmentation.expand_objects(labels, 1), expected_1) 313 | assert_array_equal(segmentation.expand_objects(labels, 2), expected_2) 314 | 315 | 316 | def test_adjacency_matrix(self): 317 | cell_labels = np.array([ 318 | [0, 1, 2, 2, 0], 319 | [1, 1, 3, 3, 3], 320 | [1, 1, 3, 3, 3], 321 | [0, 0, 3, 3, 3] 322 | ]) 323 | expected = np.array([ 324 | [0, 0, 0, 0], 325 | [4/5, 1, 1/5, 2/5], 326 | [2/2, 1/2, 1, 2/2], 327 | [5/8, 2/8, 2/8, 1]]) 328 | 329 | assert_array_equal(segmentation.get_adjacency_matrix(cell_labels), 330 | expected) 331 | 332 | 333 | if __name__ == '__main__': 334 | unittest.main() 335 | -------------------------------------------------------------------------------- /.pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # A comma-separated list of package or module names from where C extensions may 4 | # be loaded. Extensions are loading into the active Python interpreter and may 5 | # run arbitrary code 6 | extension-pkg-whitelist= 7 | 8 | # Add files or directories to the blacklist. They should be base names, not 9 | # paths. 10 | ignore= 11 | 12 | # Add files or directories matching the regex patterns to the blacklist. The 13 | # regex matches against base names, not paths. 14 | ignore-patterns= 15 | 16 | # Python code to execute, usually for sys.path manipulation such as 17 | # pygtk.require(). 18 | init-hook= 19 | 20 | # Use multiple processes to speed up Pylint. 21 | jobs=1 22 | 23 | # List of plugins (as comma separated values of python modules names) to load, 24 | # usually to register additional checkers. 25 | load-plugins= 26 | 27 | # Pickle collected data for later comparisons. 28 | persistent=no 29 | 30 | # Specify a configuration file. 31 | #rcfile= 32 | 33 | # Allow loading of arbitrary C extensions. Extensions are imported into the 34 | # active Python interpreter and may run arbitrary code. 35 | unsafe-load-any-extension=yes 36 | 37 | 38 | [MESSAGES CONTROL] 39 | 40 | # Only show warnings with the listed confidence levels. Leave empty to show 41 | # all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED 42 | confidence= 43 | 44 | # Disable the message, report, category or checker with the given id(s). You 45 | # can either give multiple identifiers separated by comma (,) or put this 46 | # option multiple times (only on the command line, not in the configuration 47 | # file where it should appear only once).You can also use "--disable=all" to 48 | # disable everything first and then reenable specific checks. For example, if 49 | # you want to run only the similarities checker, you can use "--disable=all 50 | # --enable=similarities". If you want to run only the classes checker, but have 51 | # no Warning level messages displayed, use"--disable=all --enable=classes 52 | # --disable=W" 53 | disable=invalid-name,fixme,arguments-differ 54 | 55 | # Enable the message, report, category or checker with the given id(s). You can 56 | # either give multiple identifier separated by comma (,) or put this option 57 | # multiple time (only on the command line, not in the configuration file where 58 | # it should appear only once). See also the "--disable" option for examples. 59 | enable= 60 | 61 | 62 | [REPORTS] 63 | 64 | # Python expression which should return a note less than 10 (10 is the highest 65 | # note). You have access to the variables errors warning, statement which 66 | # respectively contain the number of errors / warnings messages and the total 67 | # number of statements analyzed. This is used by the global evaluation report 68 | # (RP0004). 69 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 70 | 71 | # Template used to display messages. This is a python new-style format string 72 | # used to format the message information. See doc for all details 73 | #msg-template= 74 | 75 | # Set the output format. Available formats are text, parseable, colorized, json 76 | # and msvs (visual studio).You can also give a reporter class, eg 77 | # mypackage.mymodule.MyReporterClass. 78 | output-format=text 79 | 80 | # Tells whether to display a full report or only the messages 81 | reports=no 82 | 83 | # Activate the evaluation score. 84 | score=no 85 | 86 | 87 | [REFACTORING] 88 | 89 | # Maximum number of nested blocks for function / method body 90 | max-nested-blocks=5 91 | 92 | 93 | [TYPECHECK] 94 | 95 | # List of decorators that produce context managers, such as 96 | # contextlib.contextmanager. Add to this list to register other decorators that 97 | # produce valid context managers. 98 | contextmanager-decorators=contextlib.contextmanager 99 | 100 | # List of members which are set dynamically and missed by pylint inference 101 | # system, and so shouldn't trigger E1101 when accessed. Python regular 102 | # expressions are accepted. 103 | generated-members= 104 | 105 | # Tells whether missing members accessed in mixin class should be ignored. A 106 | # mixin class is detected if its name ends with "mixin" (case insensitive). 107 | ignore-mixin-members=yes 108 | 109 | # This flag controls whether pylint should warn about no-member and similar 110 | # checks whenever an opaque object is returned when inferring. The inference 111 | # can return multiple potential results while evaluating a Python object, but 112 | # some branches might not be evaluated, which results in partial inference. In 113 | # that case, it might be useful to still emit no-member and other checks for 114 | # the rest of the inferred objects. 115 | ignore-on-opaque-inference=yes 116 | 117 | # List of class names for which member attributes should not be checked (useful 118 | # for classes with dynamically set attributes). This supports the use of 119 | # qualified names. 120 | ignored-classes= 121 | 122 | # List of module names for which member attributes should not be checked 123 | # (useful for modules/projects where namespaces are manipulated during runtime 124 | # and thus existing member attributes cannot be deduced by static analysis. It 125 | # supports qualified module names, as well as Unix pattern matching. 126 | 127 | # Pylint has problems with numpy: https://github.com/PyCQA/pylint/issues/779 128 | ignored-modules=numpy,ipywidgets.widgets 129 | 130 | # Show a hint with possible names when a member name was not found. The aspect 131 | # of finding the hint is based on edit distance. 132 | missing-member-hint=yes 133 | 134 | # The minimum edit distance a name should have in order to be considered a 135 | # similar match for a missing member name. 136 | missing-member-hint-distance=1 137 | 138 | # The total number of similar names that should be taken in consideration when 139 | # showing a hint for a missing member. 140 | missing-member-max-choices=1 141 | 142 | 143 | [SIMILARITIES] 144 | 145 | # Ignore comments when computing similarities. 146 | ignore-comments=yes 147 | 148 | # Ignore docstrings when computing similarities. 149 | ignore-docstrings=yes 150 | 151 | # Ignore imports when computing similarities. 152 | ignore-imports=yes 153 | 154 | # Minimum lines number of a similarity. 155 | min-similarity-lines=8 156 | 157 | 158 | [LOGGING] 159 | 160 | # Logging modules to check that the string format arguments are in logging 161 | # function parameter format 162 | logging-modules=logging 163 | 164 | 165 | [SPELLING] 166 | 167 | # Spelling dictionary name. Available dictionaries: none. To make it working 168 | # install python-enchant package. 169 | spelling-dict= 170 | 171 | # List of comma separated words that should not be checked. 172 | spelling-ignore-words= 173 | 174 | # A path to a file that contains private dictionary; one word per line. 175 | spelling-private-dict-file= 176 | 177 | # Tells whether to store unknown words to indicated private dictionary in 178 | # --spelling-private-dict-file option instead of raising a message. 179 | spelling-store-unknown-words=no 180 | 181 | 182 | [VARIABLES] 183 | 184 | # List of additional names supposed to be defined in builtins. Remember that 185 | # you should avoid to define new builtins when possible. 186 | additional-builtins= 187 | 188 | # Tells whether unused global variables should be treated as a violation. 189 | allow-global-unused-variables=yes 190 | 191 | # List of strings which can identify a callback function by name. A callback 192 | # name must start or end with one of those strings. 193 | callbacks=cb_,_cb 194 | 195 | # A regular expression matching the name of dummy variables (i.e. expectedly 196 | # not used). 197 | dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ 198 | 199 | # Argument names that match this expression will be ignored. Default to name 200 | # with leading underscore 201 | ignored-argument-names=_.*|^ignored_|^unused_ 202 | 203 | # Tells whether we should check for unused import in __init__ files. 204 | init-import=no 205 | 206 | # List of qualified module names which can have objects that can redefine 207 | # builtins. 208 | redefining-builtins-modules=six.moves,future.builtins 209 | 210 | 211 | [MISCELLANEOUS] 212 | 213 | # List of note tags to take in consideration, separated by a comma. 214 | notes=TODO 215 | 216 | 217 | [FORMAT] 218 | 219 | # Expected format of line ending, e.g. empty (any line ending), LF or CRLF. 220 | expected-line-ending-format= 221 | 222 | # Regexp for a line that is allowed to be longer than the limit. 223 | ignore-long-lines=^\s*(# )??$ 224 | 225 | # Number of spaces of indent required inside a hanging or continued line. 226 | indent-after-paren=4 227 | 228 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 229 | # tab). 230 | indent-string=' ' 231 | 232 | # Maximum number of characters on a single line. 233 | max-line-length=80 234 | 235 | # Maximum number of lines in a module 236 | max-module-lines=1000 237 | 238 | # Allow the body of a class to be on the same line as the declaration if body 239 | # contains single statement. 240 | single-line-class-stmt=no 241 | 242 | # Allow the body of an if to be on the same line as the test if there is no 243 | # else. 244 | single-line-if-stmt=no 245 | 246 | 247 | [BASIC] 248 | 249 | # Regular expression matching correct argument names 250 | argument-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 251 | 252 | # Regular expression matching correct attribute names 253 | attr-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 254 | 255 | # Bad variable names which should always be refused, separated by a comma 256 | bad-names=foo,bar,baz,toto,tutu,tata 257 | 258 | # Regular expression matching correct class attribute names 259 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 260 | 261 | # Regular expression matching correct class names 262 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 263 | 264 | # Regular expression matching correct constant names 265 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 266 | 267 | # Minimum line length for functions/classes that require docstrings, shorter 268 | # ones are exempt. 269 | docstring-min-length=4 270 | 271 | # Regular expression matching correct function names 272 | function-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 273 | 274 | # Good variable names which should always be accepted, separated by a comma 275 | good-names=i,j,k,ex,Run,_ 276 | 277 | # Include a hint for the correct naming format with invalid-name 278 | include-naming-hint=no 279 | 280 | # Regular expression matching correct inline iteration names 281 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 282 | 283 | # Regular expression matching correct method names 284 | method-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 285 | 286 | # Regular expression matching correct module names 287 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 288 | 289 | # Colon-delimited sets of names that determine each other's naming style when 290 | # the name regexes allow several styles. 291 | name-group= 292 | 293 | # Regular expression which should only match function or class names that do 294 | # not require a docstring. 295 | no-docstring-rgx=^_|test|Test 296 | 297 | # List of decorators that produce properties, such as abc.abstractproperty. Add 298 | # to this list to register other decorators that produce valid properties. 299 | property-classes=abc.abstractproperty 300 | 301 | # Regular expression matching correct variable names 302 | variable-rgx=(([a-z][a-z0-9_]{2,30})|(_[a-z0-9_]*))$ 303 | 304 | 305 | [CLASSES] 306 | 307 | # List of method names used to declare (i.e. assign) instance attributes. 308 | defining-attr-methods=__init__,__new__,setUp 309 | 310 | # List of member names, which should be excluded from the protected access 311 | # warning. 312 | exclude-protected=_asdict,_fields,_replace,_source,_make 313 | 314 | # List of valid names for the first argument in a class method. 315 | valid-classmethod-first-arg=cls 316 | 317 | # List of valid names for the first argument in a metaclass class method. 318 | valid-metaclass-classmethod-first-arg=mcs 319 | 320 | 321 | [IMPORTS] 322 | 323 | # Allow wildcard imports from modules that define __all__. 324 | allow-wildcard-with-all=no 325 | 326 | # Analyse import fallback blocks. This can be used to support both Python 2 and 327 | # 3 compatible code, which means that the block might have code that exists 328 | # only in one or another interpreter, leading to false positives when analysed. 329 | analyse-fallback-blocks=no 330 | 331 | # Deprecated modules which should not be used, separated by a comma 332 | deprecated-modules=optparse,tkinter.tix 333 | 334 | # Create a graph of external dependencies in the given file (report RP0402 must 335 | # not be disabled) 336 | ext-import-graph= 337 | 338 | # Create a graph of every (i.e. internal and external) dependencies in the 339 | # given file (report RP0402 must not be disabled) 340 | import-graph= 341 | 342 | # Create a graph of internal dependencies in the given file (report RP0402 must 343 | # not be disabled) 344 | int-import-graph= 345 | 346 | # Force import order to recognize a module as part of the standard 347 | # compatibility libraries. 348 | known-standard-library= 349 | 350 | # Force import order to recognize a module as part of a third party library. 351 | known-third-party= 352 | 353 | 354 | [DESIGN] 355 | 356 | # Maximum number of arguments for function / method 357 | max-args=20 358 | 359 | # Maximum number of attributes for a class (see R0902). 360 | max-attributes=30 361 | 362 | # Maximum number of boolean expressions in a if statement 363 | max-bool-expr=5 364 | 365 | # Maximum number of branch for function / method body 366 | max-branches=12 367 | 368 | # Maximum number of parents for a class (see R0901). 369 | max-parents=7 370 | 371 | # Maximum number of public methods for a class (see R0904). 372 | max-public-methods=100 373 | 374 | # Maximum number of return / yield for function / method body 375 | max-returns=10 376 | 377 | # Maximum number of statements in function / method body 378 | max-statements=50 379 | 380 | # Minimum number of public methods for a class (see R0903). 381 | min-public-methods=0 382 | 383 | # Maximum number of local variables 384 | max-locals=50 385 | 386 | [EXCEPTIONS] 387 | 388 | # Exceptions that will emit a warning when being caught. Defaults to 389 | # "Exception" 390 | overgeneral-exceptions=Exception -------------------------------------------------------------------------------- /MibiImage_Tutorial.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

Introduction

\n", 8 | "\n", 9 | "This notebook provides examples for working with MIBI data in the form of a MibiImage instance, which is a class in the `mibidata` module of the [mibilib](https://github.com/ionpath/mibilib/tree/master/mibidata) library. Complete documentation for the module is available [here](https://ionpath.github.io/mibilib/mibidata.html#module-mibidata)." 10 | ] 11 | }, 12 | { 13 | "cell_type": "code", 14 | "execution_count": null, 15 | "metadata": {}, 16 | "outputs": [], 17 | "source": [ 18 | "%matplotlib notebook\n", 19 | "%load_ext autoreload\n", 20 | "%autoreload 2\n", 21 | "\n", 22 | "import os\n", 23 | "\n", 24 | "import matplotlib\n", 25 | "import matplotlib.pyplot as plt\n", 26 | "from mpl_toolkits.axes_grid1 import make_axes_locatable\n", 27 | "import numpy as np\n", 28 | "from skimage import filters\n", 29 | "\n", 30 | "from mibidata.mibi_image import MibiImage\n", 31 | "from mibidata import color, tiff\n", 32 | "from mibitracker import request_helpers\n", 33 | "\n", 34 | "matplotlib.rcParams.update({\n", 35 | " 'font.size': 8,\n", 36 | " 'axes.grid': False,\n", 37 | "})" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "

Creating a MibiImage Instance

\n", 45 | "\n", 46 | "A MibiImage instance can be created with as little as a numpy array of count data and a list of channel names. Additional args in the constructor allow you to add relevant metadata, including run name and point name. For a full list of metadata args, see the [class documentation](https://ionpath.github.io/mibilib/mibidata.html#module-mibidata.mibi_image). Note that channels can be specified as either a list of integer masses, a list of string channel names, or a list of tuples of (integer mass, string channel name)." 47 | ] 48 | }, 49 | { 50 | "cell_type": "markdown", 51 | "metadata": {}, 52 | "source": [ 53 | "#### From a numpy array" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": null, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "# Create some random channel data. MIBI data is typically 16-bit uint, so\n", 63 | "# limit range to (0, 65535). Image size here is 128 x 128 pixels with 3\n", 64 | "# channels.\n", 65 | "random_channel_data = np.random.randint(0, 65535, (128, 128, 3), dtype=np.uint16)\n", 66 | "channels = ['Channel 1', 'Channel 2', 'Channel 3']\n", 67 | "random_mibi_image = MibiImage(\n", 68 | " random_channel_data, channels,\n", 69 | " run='Random Run', fov_name='Random Point Name')\n", 70 | "print(random_mibi_image)\n", 71 | "for key, val in random_mibi_image.metadata().items():\n", 72 | " print(f'{key}: {val}')" 73 | ] 74 | }, 75 | { 76 | "cell_type": "markdown", 77 | "metadata": {}, 78 | "source": [ 79 | "The most common and easiest way to import data into a MibiImage instance is to use the [mibidata.tiff.read](https://ionpath.github.io/mibilib/mibidata.html#module-mibidata.tiff) method. This method loads a Multiplexed MIBITIFF file that has been saved in the MIBITIFF format, which is the output of the IONpath TIFF Generator. In this case, all of the metadata is already saved with the image, and so the filename is the only parameter necessary." 80 | ] 81 | }, 82 | { 83 | "cell_type": "markdown", 84 | "metadata": {}, 85 | "source": [ 86 | "#### From a local file" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": null, 92 | "metadata": {}, 93 | "outputs": [], 94 | "source": [ 95 | "image = tiff.read('/path/to/mibi_tiff.tiff')\n", 96 | "image" 97 | ] 98 | }, 99 | { 100 | "cell_type": "markdown", 101 | "metadata": {}, 102 | "source": [ 103 | "#### From MIBItracker" 104 | ] 105 | }, 106 | { 107 | "cell_type": "code", 108 | "execution_count": null, 109 | "metadata": { 110 | "scrolled": false 111 | }, 112 | "outputs": [], 113 | "source": [ 114 | "# Load MIBItracker credentials from file.\n", 115 | "# For details about credentials, please review the 'MIBItracker_API_Tutorial.ipynb' notebook.\n", 116 | "from dotenv import load_dotenv\n", 117 | "fname_login = '/path/to/MIBItracker_login.dat'\n", 118 | "load_dotenv(fname_login)\n", 119 | "\n", 120 | "# This assumes your MIBItracker credentials are saved as environment variables.\n", 121 | "mr = request_helpers.MibiRequests(\n", 122 | " os.getenv('MIBITRACKER_PUBLIC_URL'),\n", 123 | " os.getenv('MIBITRACKER_PUBLIC_EMAIL'),\n", 124 | " os.getenv('MIBITRACKER_PUBLIC_PASSWORD')\n", 125 | ")\n", 126 | "image_ids = (17, 26)\n", 127 | "images = []\n", 128 | "for image_id in image_ids:\n", 129 | " im = mr.get_mibi_image(image_id)\n", 130 | " images.append(im)" 131 | ] 132 | }, 133 | { 134 | "cell_type": "markdown", 135 | "metadata": {}, 136 | "source": [ 137 | "

MibiImage Slicing

\n", 138 | "\n", 139 | "Once loaded or created, the MibiImage class provides convenience methods for slicing, copying, and saving MIBI data. For example, we can select out certain channels of interest from the entire multiplexed image. An image can be sliced using a list of channel names either with the `slice_data` method or indexing." 140 | ] 141 | }, 142 | { 143 | "cell_type": "code", 144 | "execution_count": null, 145 | "metadata": {}, 146 | "outputs": [], 147 | "source": [ 148 | "image = images[0]\n", 149 | "image.channels" 150 | ] 151 | }, 152 | { 153 | "cell_type": "code", 154 | "execution_count": null, 155 | "metadata": {}, 156 | "outputs": [], 157 | "source": [ 158 | "data_of_interest = image['dsDNA', 'beta-tubulin', 'HLA class 1 A, B, and C', 'Na-K-ATPase alpha1']\n", 159 | "print(type(data_of_interest), np.shape(data_of_interest))" 160 | ] 161 | }, 162 | { 163 | "cell_type": "markdown", 164 | "metadata": {}, 165 | "source": [ 166 | "If the channel masses are provided, they can also be used to slice the data." 167 | ] 168 | }, 169 | { 170 | "cell_type": "code", 171 | "execution_count": null, 172 | "metadata": {}, 173 | "outputs": [], 174 | "source": [ 175 | "data_of_interest = image[89, 166, 160, 176]\n", 176 | "print(type(data_of_interest), np.shape(data_of_interest))" 177 | ] 178 | }, 179 | { 180 | "cell_type": "markdown", 181 | "metadata": {}, 182 | "source": [ 183 | "The above commands that slice the data by index are equivalent to `MibiImage.slice_data()`.\n", 184 | "\n", 185 | "To create a new MibiImage instance from a subset of the channel, use `MibiImage.slice_image()`." 186 | ] 187 | }, 188 | { 189 | "cell_type": "code", 190 | "execution_count": null, 191 | "metadata": {}, 192 | "outputs": [], 193 | "source": [ 194 | "sub_image = image.slice_image(['dsDNA', 'beta-tubulin', 'HLA class 1 A, B, and C', 'Na-K-ATPase alpha1'])\n", 195 | "print(type(sub_image), np.shape(sub_image.data))" 196 | ] 197 | }, 198 | { 199 | "cell_type": "markdown", 200 | "metadata": {}, 201 | "source": [ 202 | "

Image Visualization

\n", 203 | "\n", 204 | "We can\n", 205 | "- slice out three channels for a simple RGB view\n", 206 | "- rotate that to CYM\n", 207 | "- use the `mibidata.color.composite` method to select a hue\n", 208 | "- use the `mibidata.color.composite` with up to 8 different colors" 209 | ] 210 | }, 211 | { 212 | "cell_type": "code", 213 | "execution_count": null, 214 | "metadata": {}, 215 | "outputs": [], 216 | "source": [ 217 | "fig, ax = plt.subplots(2, 2, figsize=(9.5, 8))\n", 218 | "\n", 219 | "rgb = image[['Keratin', 'dsDNA', 'CD45']]\n", 220 | "rgb = np.power(rgb / rgb.max(axis=(0, 1)), 1/2)\n", 221 | "ax[0, 0].imshow(rgb)\n", 222 | "ax[0, 0].set_title('Red: Keratin, Green: dsDNA, Blue: CD45')\n", 223 | "\n", 224 | "cym = color.rgb2cym(rgb)\n", 225 | "ax[0, 1].imshow(cym)\n", 226 | "ax[0, 1].set_title('Cyan: Keratin, Yellow: dsDNA, Magenta: CD45')\n", 227 | "\n", 228 | "ax[1, 0].imshow(color.composite(image, {'Orange': 'Lamin A/C'}))\n", 229 | "ax[1, 0].set_title('Orange: Lamin A/C')\n", 230 | "\n", 231 | "overlay = color.composite(\n", 232 | " image,\n", 233 | " {\n", 234 | " 'Green': 'CD3',\n", 235 | " 'Cyan': 'CD4',\n", 236 | " 'Yellow': 'CD8',\n", 237 | " 'Magenta': 'CD68'\n", 238 | " })\n", 239 | "ax[1, 1].imshow(overlay)\n", 240 | "ax[1, 1].set_title('Cyan: CD4, Yellow: CD8\\n'\n", 241 | " 'Magenta: CD68, Green: CD3')\n", 242 | "\n", 243 | "fig.tight_layout()" 244 | ] 245 | }, 246 | { 247 | "cell_type": "markdown", 248 | "metadata": {}, 249 | "source": [ 250 | "

Image Manipluation

\n", 251 | "\n", 252 | "A deep copy of MibiImage instances can be made using the `copy` method." 253 | ] 254 | }, 255 | { 256 | "cell_type": "code", 257 | "execution_count": null, 258 | "metadata": {}, 259 | "outputs": [], 260 | "source": [ 261 | "new_image = image.copy()\n", 262 | "new_image.channels" 263 | ] 264 | }, 265 | { 266 | "cell_type": "markdown", 267 | "metadata": {}, 268 | "source": [ 269 | "To alter channel data in the image, you can access the `.data` property with the indices of the channels of interest." 270 | ] 271 | }, 272 | { 273 | "cell_type": "code", 274 | "execution_count": null, 275 | "metadata": {}, 276 | "outputs": [], 277 | "source": [ 278 | "new_image.data[:, :, new_image.channel_inds('CD45')] = filters.median(image['CD45'])\n", 279 | "\n", 280 | "def show_image(data, axis, title):\n", 281 | " cb = axis.imshow(data)\n", 282 | " divider = make_axes_locatable(axis)\n", 283 | " cax = divider.append_axes('right', size='5%', pad=0.05)\n", 284 | " plt.colorbar(cb, cax=cax)\n", 285 | " axis.set_title(title)\n", 286 | "\n", 287 | "fig, ax = plt.subplots(2, 2, figsize=(9.5, 8))\n", 288 | "show_image(image['CD45'], ax[0, 0], 'Raw CD45: gamma = 1')\n", 289 | "show_image(new_image['CD45'], ax[0, 1], 'Median-Filtered CD45: gamma = 1')\n", 290 | "show_image(np.power(image['CD45'], 1/2), ax[1, 0],\n", 291 | " 'Raw CD45: gamma = 1/2')\n", 292 | "show_image(np.power(new_image['CD45'], 1/2), ax[1, 1],\n", 293 | " 'Median-Filtered CD45: gamma = 1/2')\n", 294 | "fig.tight_layout()" 295 | ] 296 | }, 297 | { 298 | "cell_type": "markdown", 299 | "metadata": {}, 300 | "source": [ 301 | "Rather than scaling a single channel, we can also apply a default scaling to all channels, and then display an RGB overlay of a 3-D slice." 302 | ] 303 | }, 304 | { 305 | "cell_type": "code", 306 | "execution_count": null, 307 | "metadata": {}, 308 | "outputs": [], 309 | "source": [ 310 | "def scale(image, gamma=1/2):\n", 311 | " \"\"\"Scales each 2-D slice of an N-D image to a brightened version in the [0, 1] range.\"\"\"\n", 312 | " \n", 313 | " scaled = np.divide(\n", 314 | " image.data.astype(float),\n", 315 | " image.data.max(axis=(0, 1)))\n", 316 | " scaled = np.power(scaled, gamma)\n", 317 | " image.data = scaled" 318 | ] 319 | }, 320 | { 321 | "cell_type": "code", 322 | "execution_count": null, 323 | "metadata": {}, 324 | "outputs": [], 325 | "source": [ 326 | "scaled_image = image.copy()\n", 327 | "scale(scaled_image, 1/3)\n", 328 | "\n", 329 | "fig, ax = plt.subplots(1, 2, figsize=(9.5, 4), sharex=True, sharey=True)\n", 330 | "\n", 331 | "ax[0].imshow(scaled_image[['CD68', 'Lamin A/C', 'dsDNA']])\n", 332 | "ax[0].set_title('CD68 - Lamin A/C - dsDNA')\n", 333 | "ax[1].imshow(scaled_image[['CD31', 'CD8', 'Keratin']])\n", 334 | "ax[1].set_title('CD31 - CD8 - Keratin')\n", 335 | "\n", 336 | "ax[0].set_xlim((0, 512))\n", 337 | "ax[0].set_ylim((1024, 512))" 338 | ] 339 | }, 340 | { 341 | "cell_type": "markdown", 342 | "metadata": {}, 343 | "source": [ 344 | "

Saving MibiImages

\n", 345 | "\n", 346 | "A MibiImage object can be saved as either a multiplexed MIBITIFF or a series of individual channel PNGs." 347 | ] 348 | }, 349 | { 350 | "cell_type": "markdown", 351 | "metadata": {}, 352 | "source": [ 353 | "When saving a series of channel PNGs, the folder to save to must be specified. The filenames of each PNG will be generated from the channel names of the MibiImage object, either the string label or the target only if they the channels are (mass, target) tuples." 354 | ] 355 | }, 356 | { 357 | "cell_type": "code", 358 | "execution_count": null, 359 | "metadata": {}, 360 | "outputs": [], 361 | "source": [ 362 | "tiff.write('/path/to/file.tiff', new_image)" 363 | ] 364 | }, 365 | { 366 | "cell_type": "code", 367 | "execution_count": null, 368 | "metadata": {}, 369 | "outputs": [], 370 | "source": [ 371 | "new_image.export_pngs('/path/to/folder')\n", 372 | "outputs = [png_name for png_name in os.listdir('/path/to/folder/') if os.path.splitext(png_name)[-1] == '.png']\n", 373 | "outputs" 374 | ] 375 | }, 376 | { 377 | "cell_type": "markdown", 378 | "metadata": {}, 379 | "source": [ 380 | "The exported TIFF or PNGs can then be uploaded to MIBItracker using the `mibitracker` module. See the Python section of the [MIBItracker Tutorial](https://github.com/ionpath/mibilib/blob/master/MIBItracker_API_Tutorial.ipynb) for more information on uploading TIFF or PNG files." 381 | ] 382 | } 383 | ], 384 | "metadata": { 385 | "kernelspec": { 386 | "display_name": "Python 3 (ipykernel)", 387 | "language": "python", 388 | "name": "python3" 389 | }, 390 | "language_info": { 391 | "codemirror_mode": { 392 | "name": "ipython", 393 | "version": 3 394 | }, 395 | "file_extension": ".py", 396 | "mimetype": "text/x-python", 397 | "name": "python", 398 | "nbconvert_exporter": "python", 399 | "pygments_lexer": "ipython3", 400 | "version": "3.11.0" 401 | } 402 | }, 403 | "nbformat": 4, 404 | "nbformat_minor": 2 405 | } 406 | -------------------------------------------------------------------------------- /MIBItracker_API_Tutorial.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "

MIBItracker REST API Tutorial

\n", 8 | "\n", 9 | "In this tutorial, we show how to obtain an authorization token for the MIBItracker REST API, and some basic requests that can be performed with it.\n", 10 | "\n", 11 | "This tutorial references the BACKEND_URL as the base URL for all requests, which is different from the URL of the frontend application. You can find it listed in the MIBItracker About page, which is available from the menu in the upper right corner under your username.\n", 12 | "\n", 13 | "In order to access some sample data to run the tutorials, you can create an account using the following frontend URL: https://mibi-share.ionpath.com. In which case, the backend URL will be: https://backend-dot-mibitracker-share.appspot.com.\n", 14 | "\n", 15 | "This notebook shows four different ways of interacting with the API, with the Python example containing the most detail, but as with any REST API you should be able to choose the language/method of your choice even if it is not included here.\n", 16 | "\n", 17 | "## Table of Contents\n", 18 | "\n", 19 | "1. [API Reference](#API-Reference)\n", 20 | "2. [Postman](#Postman)\n", 21 | "3. [Python](#Python)\n", 22 | "4. [cURL](#cURL)" 23 | ] 24 | }, 25 | { 26 | "cell_type": "markdown", 27 | "metadata": {}, 28 | "source": [ 29 | "## API Reference\n", 30 | "\n", 31 | "A complete reference of all routes available including required and optional parameters can be found using the [Swagger Specification](https://swagger.io/specification/) at [https://[BACKEND_URL]/docs/](). In order to access that, you will first need to log in specifically to the backend with your MIBItracker username and password at the base [https://[BACKEND_URL]](), and *then* navigate to the [/docs/]() route. Note that only the routes which the currently logged-in user is authorized to access will be displayed. A screenshot of the docs with the available routes for tissues expanded is shown below:\n", 32 | "\n", 33 | "![API Screenshot](./images/swagger_tissues_endpoints.png)\n", 34 | "\n", 35 | "You may try out the GET routes directly in the docs. For example, see below for what the request and response is when listing all tissues. Note that most routes in the API return results in a paginated manner to limit large queries. Executing the GET request for `/images/`, for instance, will return the most recent 10 images:\n", 36 | "\n", 37 | "![Tissue Screenshot](./images/swagger_tissues_all.png)\n", 38 | "\n", 39 | "For more information on paginated results, see the [Python](#Python) section below.\n", 40 | "\n", 41 | "Filtering by `?organ=thymus` reduces the results to a single item:\n", 42 | "\n", 43 | "![Filtered Tissue Screenshot](./images/swagger_tissues_filtered.png)\n", 44 | "\n", 45 | "You may also query for all images that have a specific tissue type:\n", 46 | "\n", 47 | "![Images by Tissue Screenshot](./images/swagger_images_by_tissue.png)\n", 48 | "\n", 49 | "Expand out any of the other routes to see their details. Note that onlly the `GET` requests are available to try out directly from the docs; read on for how to make general requests using [Postman](#Postman), [Python](#Python), [cURL](#cURL), or the language of your choice." 50 | ] 51 | }, 52 | { 53 | "cell_type": "markdown", 54 | "metadata": { 55 | "collapsed": true 56 | }, 57 | "source": [ 58 | "## Postman\n", 59 | "\n", 60 | "Postman is a GUI application for sending requests to a server, and can be useful to help visualize the way the requests are sent and responses handled. It can be downloaded [here](https://www.getpostman.com/). To begin, you will need to obtain an authorization token by sending a POST request with your email address and password. First, next to the URL bar, change the dropdown from GET to POST and enter https://[BACKEND_URL]/api-token-auth/ into the URL bar. Then, in the Headers tab, add a key: \"Content-Type\" and a value: \"application/json\" as shown in the screenshot below:\n", 61 | "\n", 62 | "![Postman Auth Header](./images/postman-auth-header.PNG)\n", 63 | "\n", 64 | "Then, in the Body tab add a key-value for your email and password, respectively:\n", 65 | "\n", 66 | "![Postman Auth Body](./images/postman-auth-body.PNG)\n", 67 | "\n", 68 | "Hit the blue Send button to send the request. When the response is returned, you should see a single key-value pair in the response body area. The value is a long string of characters and that is your authorization token:\n", 69 | "\n", 70 | "![Postman Auth Token](./images/postman-auth-token.PNG)\n", 71 | "\n", 72 | "To use this token, create a new request and this time, add a header with the key \"Authorization\" and the value \"JWT token_value\" where token_value is the string from above:\n", 73 | "\n", 74 | "![Postman Token](./images/postman-token.PNG)\n", 75 | "\n", 76 | "As an example, we will then get a specific image from the MIBItracker. The route for retrieving a specific image is `TRACKER_BACKEND_URL/images/[id]`, where 'id' is the primary key of the image. For example, to retrieve the image with id = 429, enter the formatted URL into the URL bar:\n", 77 | "\n", 78 | "![Postman Images Retrieve](./images/postman-images-retrieve.PNG)\n", 79 | "\n", 80 | "The result is a JSON object containing the properties of the image. As you can see from the first key, it has an id of 429, which is the id we used in the example URL above.\n", 81 | "\n", 82 | "For more advanced examples of using the API, such as to perform an advanced search of images, see the Python examples below. As shown in the [API Reference](#API-Reference) section above, visit [https://[INSTANCE_NAME].ionpath.com/tracker/about]() and click the \"API Documentation\" link to view a list of available routes for the API." 83 | ] 84 | }, 85 | { 86 | "cell_type": "markdown", 87 | "metadata": {}, 88 | "source": [ 89 | "## Python\n", 90 | "\n", 91 | "Requests to the API can be made in Python using the `MibiRequests` module from [mibilib](https://github.com/ionpath/mibilib), an open source library for interacting with the MIBItracker API. After downloading the library, make sure to add it to your PATH." 92 | ] 93 | }, 94 | { 95 | "cell_type": "code", 96 | "execution_count": null, 97 | "metadata": {}, 98 | "outputs": [], 99 | "source": [ 100 | "import json\n", 101 | "import os\n", 102 | "from mibitracker.request_helpers import MibiRequests" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "The first step to using `requests_helpers` is to create an instance of `MibiRequests` and obtain an authorization token. Note that once a token has been acquired, it will only be valid for 24 hours and another token must be obtained afterwards.\n", 110 | "\n", 111 | "This code assumes your MIBItracker email and password are stored in environment variables. Please use care in properly securing these credentials.\n", 112 | "\n", 113 | "One way to do this is to store the credentials in a text file called `MIBItracker_login.dat` with the following content:\n", 114 | "```bash\n", 115 | "MIBITRACKER_PUBLIC_URL=https://backend-dot-mibitracker-share.appspot.com\n", 116 | "MIBITRACKER_PUBLIC_EMAIL=your@email.com\n", 117 | "MIBITRACKER_PUBLIC_PASSWORD=YourSecurePassword123!?@\n", 118 | "\n", 119 | "```\n", 120 | "\n", 121 | "Remember to restrict the access to the file. In `bash` this can be done with the following command:
\n", 122 | "`chmod 600 MIBItracker_login.dat`" 123 | ] 124 | }, 125 | { 126 | "cell_type": "code", 127 | "execution_count": null, 128 | "metadata": {}, 129 | "outputs": [], 130 | "source": [ 131 | "# Load MIBItracker credentials from file.\n", 132 | "from dotenv import load_dotenv\n", 133 | "fname_login = '/path/to/MIBItracker_login.dat'\n", 134 | "load_dotenv(fname_login)\n", 135 | "\n", 136 | "# This assumes your MIBItracker credentials are saved as environment variables.\n", 137 | "email = os.getenv('MIBITRACKER_PUBLIC_EMAIL')\n", 138 | "password = os.getenv('MIBITRACKER_PUBLIC_PASSWORD')\n", 139 | "BACKEND_URL = os.getenv('MIBITRACKER_PUBLIC_URL')\n", 140 | "\n", 141 | "mr = MibiRequests(BACKEND_URL, email, password)" 142 | ] 143 | }, 144 | { 145 | "cell_type": "markdown", 146 | "metadata": {}, 147 | "source": [ 148 | "`MibiRequests` contains helper functions for common routes used when accessing the API. These can easily be called using an authorized instance of `MibiRequests`.\n", 149 | "\n", 150 | "For example, to obtain an array of metadata for all images in a particular run:" 151 | ] 152 | }, 153 | { 154 | "cell_type": "code", 155 | "execution_count": null, 156 | "metadata": {}, 157 | "outputs": [], 158 | "source": [ 159 | "run_name = '20180122_1242_1245'\n", 160 | "image_list = mr.run_images(run_name)\n", 161 | "\n", 162 | "print('{} images found from run {}: \\n'.format(len(image_list), run_name))\n", 163 | "for im in image_list:\n", 164 | " print('\\t{}: {}'.format(im['folder'], im['point']))" 165 | ] 166 | }, 167 | { 168 | "cell_type": "markdown", 169 | "metadata": {}, 170 | "source": [ 171 | "To upload an additional channel to an existing image, i.e. a segmentation mask, use the `MibiRequests.upload_channel` route:" 172 | ] 173 | }, 174 | { 175 | "cell_type": "code", 176 | "execution_count": null, 177 | "metadata": {}, 178 | "outputs": [], 179 | "source": [ 180 | "image_id = 164\n", 181 | "png_filename = 'segmentation_labels.png'\n", 182 | "mr.upload_channel(image_id, png_filename)" 183 | ] 184 | }, 185 | { 186 | "cell_type": "markdown", 187 | "metadata": {}, 188 | "source": [ 189 | "Not all routes available in the MIBItracker API have specific helper functions implemented in the `MibiRequests` class. However, an authenticated `MibiRequests` instance can still be used to access the routes using the correct HTTP verb. The HTTP verbs listed on the documentation page correspond to `MibiRequests` methods.\n", 190 | "\n", 191 | "As shown in the [API Reference](#API-Reference) section above, visit [https://[BACKEND_URL/docs/]() to view a list of all available routes for the API. Note that the `mibilib` library is open source, and contributions for helper functions using the API are welcome.\n", 192 | "\n", 193 | "For example, to get a list of all images:" 194 | ] 195 | }, 196 | { 197 | "cell_type": "code", 198 | "execution_count": null, 199 | "metadata": {}, 200 | "outputs": [], 201 | "source": [ 202 | "response_images = mr.get('/images/')\n", 203 | "images_response = response_images.json()\n", 204 | "print('All images: {} images have been returned.'.format(len(images_response['results'])))" 205 | ] 206 | }, 207 | { 208 | "cell_type": "markdown", 209 | "metadata": {}, 210 | "source": [ 211 | "Notice that this doesn't return *all* the items, as this can get quite large. As a result, most API routes default to returning results in a paginated manner. For instance, if we inspect the response json from the previous query, we can see that it's a dictionary containing the paginated results:" 212 | ] 213 | }, 214 | { 215 | "cell_type": "code", 216 | "execution_count": null, 217 | "metadata": {}, 218 | "outputs": [], 219 | "source": [ 220 | "images_response" 221 | ] 222 | }, 223 | { 224 | "cell_type": "markdown", 225 | "metadata": {}, 226 | "source": [ 227 | "The paginated dictionary contains four keys:\n", 228 | "- count: The complete number of records matching the query, regardless of pagination.\n", 229 | "- next: A request URL that, if executed, will return the *next* page of results.\n", 230 | "- previous: A request URL that, if executed, will return the *previous* page of results.\n", 231 | "- results: The records matching the query for the current page of results.\n", 232 | "\n", 233 | "To paginate the results, use `limit` and `offset` parameters to specify how many records to return in a response and which index into the list to begin with. If `limit` and `offset` are not specified (as was the case in the `mr.get.('/images')` example above), `limit` and `offset` default to 10 and 0, respectively. To specify exact values of `limit` and `offset`:" 234 | ] 235 | }, 236 | { 237 | "cell_type": "code", 238 | "execution_count": null, 239 | "metadata": {}, 240 | "outputs": [], 241 | "source": [ 242 | "images_first_page = mr.get(\n", 243 | " '/images/',\n", 244 | " params={'limit': 30, 'offset': 0})\n", 245 | "images_first_page = images_first_page.json()\n", 246 | "print('First page: {}/{} images have been returned in the first page\\'s results.'.format(\n", 247 | " len(images_first_page['results']), images_first_page['count']))\n", 248 | "\n", 249 | "images_second_page = mr.get(\n", 250 | " '/images/',\n", 251 | " params={'limit': 30, 'offset': 30})\n", 252 | "images_second_page = images_second_page.json()\n", 253 | "print('Second page: {}/{} images have been returned in the second page\\'s results.'.format(\n", 254 | " len(images_second_page['results']), images_second_page['count']))" 255 | ] 256 | }, 257 | { 258 | "cell_type": "markdown", 259 | "metadata": {}, 260 | "source": [ 261 | "A similar approach could be used to obtain a list of all images from the run `20171207_1060_1130` (note the use of a double underscore in the query parameter used here):" 262 | ] 263 | }, 264 | { 265 | "cell_type": "code", 266 | "execution_count": null, 267 | "metadata": {}, 268 | "outputs": [], 269 | "source": [ 270 | "run_name = '20171207_1060_1130'\n", 271 | "response_images = mr.get(\n", 272 | " '/images/?run__name={}'.format(run_name))\n", 273 | "image_list = response_images.json()['results']\n", 274 | "\n", 275 | "print('{} images found from run {}: \\n'.format(len(image_list), run_name))\n", 276 | "for im in image_list:\n", 277 | " print('\\t{}: {}'.format(im['folder'], im['point']))" 278 | ] 279 | }, 280 | { 281 | "cell_type": "markdown", 282 | "metadata": {}, 283 | "source": [ 284 | "A route exists for advanced searching of images with additional fields available to filter the results, including a range for run date, antibody targets, and others. To use the advanced search for images containing `lamin` with the status `A` (for available):" 285 | ] 286 | }, 287 | { 288 | "cell_type": "code", 289 | "execution_count": null, 290 | "metadata": {}, 291 | "outputs": [], 292 | "source": [ 293 | "response_advanced = mr.get(\n", 294 | " '/images/search_advanced/?antibodyTarget={target}&status={status}'.format(\n", 295 | " target='Lamin', status='A'))\n", 296 | "advanced_list = response_advanced.json()\n", 297 | "\n", 298 | "print('{} images found containing \"Lamin\" that have status \"Available\"'.format(\n", 299 | " advanced_list['count']))" 300 | ] 301 | }, 302 | { 303 | "cell_type": "markdown", 304 | "metadata": {}, 305 | "source": [ 306 | "Most routes allow to select a single item of that type knowing the primary key (id) of the item in the database. In this case, the id is part of the route and not specified as a query parameter or request data. For example, to get the properties of slide id 5: " 307 | ] 308 | }, 309 | { 310 | "cell_type": "code", 311 | "execution_count": null, 312 | "metadata": { 313 | "scrolled": false 314 | }, 315 | "outputs": [], 316 | "source": [ 317 | "slide_id = 5\n", 318 | "single_slide = mr.get('/slides/{}/'.format(slide_id))\n", 319 | "\n", 320 | "# Print out result with some nice formatting\n", 321 | "print(json.dumps(single_slide.json(), indent=4))" 322 | ] 323 | }, 324 | { 325 | "cell_type": "markdown", 326 | "metadata": {}, 327 | "source": [ 328 | "There are also routes for details such as `/images/{id}/channelnames/` to return only the list of targets for a given image:" 329 | ] 330 | }, 331 | { 332 | "cell_type": "code", 333 | "execution_count": null, 334 | "metadata": { 335 | "scrolled": false 336 | }, 337 | "outputs": [], 338 | "source": [ 339 | "image_id = 17\n", 340 | "image_channelnames = mr.get('/images/{}/channelnames/'.format(image_id))\n", 341 | "\n", 342 | "print(json.dumps(image_channelnames.json(), indent=4))" 343 | ] 344 | }, 345 | { 346 | "cell_type": "markdown", 347 | "metadata": {}, 348 | "source": [ 349 | "POST and PUT routes generally require data to be sent with the request. Some POST and PUT routes will require multiple pieces of data, such as POSTing a new image set. The data will need to be supplied as a JSON dictionary converted to a string, and the content type of the request will need to be set to JSON. In the example below, the images list is expecting the ID of the images to add to the new image set." 350 | ] 351 | }, 352 | { 353 | "cell_type": "code", 354 | "execution_count": null, 355 | "metadata": {}, 356 | "outputs": [], 357 | "source": [ 358 | "data = {\n", 359 | " 'name': 'New Image Set',\n", 360 | " 'description': 'This image set was created from the API',\n", 361 | " 'images': [10, 11, 12, 13]\n", 362 | "}\n", 363 | "headers_with_content_type = {\n", 364 | " 'content-type': 'application/json'\n", 365 | "}\n", 366 | "response_imageset = mr.post('/imagesets/',\n", 367 | " data=json.dumps(data), \n", 368 | " headers=headers_with_content_type)\n", 369 | "\n", 370 | "response_imageset" 371 | ] 372 | }, 373 | { 374 | "cell_type": "markdown", 375 | "metadata": { 376 | "collapsed": true 377 | }, 378 | "source": [ 379 | "## cURL\n", 380 | "\n", 381 | "An alternative that works from the command line is to use cURL to access the API.\n", 382 | "\n", 383 | "Before using any of the routes available in the API, an authorization token must be obtained. Note that once a token has been acquired, it will only be valid for 24 hours and another token must be obtained afterwards." 384 | ] 385 | }, 386 | { 387 | "cell_type": "code", 388 | "execution_count": null, 389 | "metadata": {}, 390 | "outputs": [], 391 | "source": [ 392 | "%%bash\n", 393 | "\n", 394 | "# Assuming $TRACKER_BACKEND_URL, $TRACKER_EMAIL and $TRACKER_PASSWORD\n", 395 | "# have already been set.\n", 396 | "data='{\"email\": \"'\"$TRACKER_EMAIL\"'\", \"password\": \"'\"$TRACKER_PASSWORD\"'\"}'\n", 397 | "\n", 398 | "curl --request POST --header \"Content-Type: application/json\" \\\n", 399 | " --data \"$data\" $TRACKER_BACKEND_URL/api-token-auth/" 400 | ] 401 | }, 402 | { 403 | "cell_type": "markdown", 404 | "metadata": {}, 405 | "source": [ 406 | "To view a list of available routes for the API, visit [https://[INSTANCE_NAME].ionpath.com/tracker/about]() and click the \"API Documentation\" link.\n", 407 | "\n", 408 | "For example, to get a list of all tissues, assuming you've stored the authorization token as `$TOKEN`:" 409 | ] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "metadata": {}, 415 | "outputs": [], 416 | "source": [ 417 | "%%bash\n", 418 | "\n", 419 | "curl --header \"Content-Type: application/json\" \\\n", 420 | " --header \"Authorization: JWT $TOKEN\" $TRACKER_BACKEND_URL/tissues/" 421 | ] 422 | }, 423 | { 424 | "cell_type": "markdown", 425 | "metadata": {}, 426 | "source": [ 427 | "For more advanced examples of using the API, such as to perform an advanced search of images, see the [Python](#Python) examples above." 428 | ] 429 | } 430 | ], 431 | "metadata": { 432 | "kernelspec": { 433 | "display_name": "Python 3 (ipykernel)", 434 | "language": "python", 435 | "name": "python3" 436 | }, 437 | "language_info": { 438 | "codemirror_mode": { 439 | "name": "ipython", 440 | "version": 3 441 | }, 442 | "file_extension": ".py", 443 | "mimetype": "text/x-python", 444 | "name": "python", 445 | "nbconvert_exporter": "python", 446 | "pygments_lexer": "ipython3", 447 | "version": "3.11.0" 448 | } 449 | }, 450 | "nbformat": 4, 451 | "nbformat_minor": 2 452 | } 453 | -------------------------------------------------------------------------------- /mibidata/tiff.py: -------------------------------------------------------------------------------- 1 | """Read and write to and from IONpath MIBItiff files. 2 | 3 | Copyright (C) 2021 Ionpath, Inc. All rights reserved.""" 4 | 5 | import collections 6 | from fractions import Fraction 7 | import datetime 8 | import json 9 | import os 10 | import warnings 11 | 12 | import numpy as np 13 | from tifffile import TiffFile, TiffWriter 14 | 15 | from mibidata import mibi_image as mi, util 16 | 17 | # Increment this when making functional changes. 18 | SOFTWARE_VERSION = 'IonpathMIBIv1.0' 19 | # These are reserved by the tiff writer and cannot be specified by the user. 20 | RESERVED_MIBITIFF_ATTRIBUTES = ('image.type', 'SIMS', 'channel.mass', 21 | 'channel.target', 'shape') 22 | # Coordinates of where the slide labels are within the optical image. 23 | _TOP_LABEL_COORDINATES = ((570, 1170), (355, 955)) 24 | _BOTTOM_LABEL_COORDINATES = ((1420, 2020), (355, 955)) 25 | # Datetime format saved by TiffFile 26 | _DATETIME_FORMAT = '%Y:%m:%d %H:%M:%S' 27 | # Conversion factor from micron to cm 28 | _MICRONS_PER_CM = 10000 29 | # Max denominator for rational arguments in tifffile.py 30 | _MAX_DENOMINATOR = 1000000 31 | 32 | REQUIRED_METADATA_ATTRIBUTES = ('fov_id', 'fov_name', 'run', 'folder', 33 | 'dwell', 'scans', 'mass_gain', 'mass_offset', 34 | 'time_resolution', 'coordinates', 'size', 35 | 'masses', 'targets') 36 | 37 | def _micron_to_cm(arg): 38 | """Converts microns (1cm = 1e4 microns) to a fraction tuple in cm.""" 39 | frac = Fraction(float(arg) / _MICRONS_PER_CM).limit_denominator( 40 | _MAX_DENOMINATOR) 41 | return frac.numerator, frac.denominator 42 | 43 | 44 | def _cm_to_micron(arg): 45 | """Converts cm fraction to microns (1cm = 1e4 microns).""" 46 | return float(arg[0]) / float(arg[1]) * _MICRONS_PER_CM 47 | 48 | def _page_name_string(target, mass): 49 | """ Get the formatted page name string from target and mass. 50 | Uses bytes string to support non-ascii characters. 51 | """ 52 | page_name_string = target.encode() 53 | page_name_string += ' ('.encode() 54 | page_name_string += str(mass).encode() 55 | page_name_string += ')'.encode() 56 | return page_name_string 57 | 58 | # pylint: disable=too-many-branches,too-many-statements 59 | def write(filename, image, sed=None, optical=None, ranges=None, 60 | multichannel=True, dtype=None, write_float=None): 61 | """Writes MIBI data to a multipage TIFF. 62 | 63 | Args: 64 | filename: The path to the target file if multi-channel, or the path to 65 | a folder if single-channel. 66 | image: A :class:`mibidata.mibi_image.MibiImage` instance. 67 | sed: Optional, an array of the SED image data. This is assumed to be 68 | grayscale even if 3-dimensional, in which case only one channel 69 | will be used. 70 | optical: Optional, an RGB array of the optical image data. 71 | ranges: A list of (min, max) tuples the same length as the number of 72 | channels. If None, the min will default to zero and the max to the 73 | max pixel value in that channel. This is used by some external 74 | software to calibrate the display. 75 | multichannel: Boolean for whether to create a single multi-channel TIFF, 76 | or a folder of single-channel TIFFs. Defaults to True; if False, 77 | the sed and optical options are ignored. 78 | dtype: dtype: One of (``np.float32``, ``np.uint16``) to force the dtype 79 | of the saved image data. Defaults to ``None``, which chooses the 80 | format based on the data's input type, and will convert to 81 | ``np.float32`` or ``np.uint16`` from other float or int types, 82 | respectively, if it can do so without a loss of data. 83 | write_float: Deprecated, will raise ValueError if specified. To 84 | specify the dtype of the saved image, please use the `dtype` 85 | argument instead. 86 | 87 | Raises: 88 | ValueError: Raised if 89 | 90 | * The image is not a :class:`mibidata.mibi_image.MibiImage` 91 | instance. 92 | * The :class:`mibidata.mibi_image.MibiImage` coordinates, size, 93 | fov_id, fov_name, run, folder, dwell, scans, mass_gain, 94 | mass_offset, time_resolution, masses or targets are None. 95 | * `dtype` is not one of ``np.float32`` or ``np.uint16``. 96 | * `write_float` has been specified. 97 | * Converting the native :class:`mibidata.mibi_image.MibiImage` dtype 98 | to the specified or inferred ``dtype`` results in a loss of data. 99 | """ 100 | if not isinstance(image, mi.MibiImage): 101 | raise ValueError('image must be a mibidata.mibi_image.MibiImage ' 102 | 'instance.') 103 | missing_required_metadata = [m for m in REQUIRED_METADATA_ATTRIBUTES 104 | if not getattr(image, m)] 105 | if missing_required_metadata: 106 | if len(missing_required_metadata) == 1: 107 | missing_metadata_error = (f'{missing_required_metadata[0]} is ' 108 | f'required and may not be None.') 109 | else: 110 | missing_metadata_error = (f'{", ".join(missing_required_metadata)}' 111 | f' are required and may not be None.') 112 | raise ValueError(missing_metadata_error) 113 | 114 | if write_float is not None: 115 | raise ValueError('`write_float` has been deprecated. Please use the ' 116 | '`dtype` argument instead.') 117 | if dtype and not dtype in [np.float32, np.uint16]: 118 | raise ValueError('Invalid dtype specification.') 119 | 120 | if dtype == np.float32: 121 | save_dtype = np.float32 122 | range_dtype = 'd' 123 | elif dtype == np.uint16: 124 | save_dtype = np.uint16 125 | range_dtype = 'I' 126 | elif np.issubdtype(image.data.dtype, np.floating): 127 | save_dtype = np.float32 128 | range_dtype = 'd' 129 | else: 130 | save_dtype = np.uint16 131 | range_dtype = 'I' 132 | 133 | to_save = image.data.astype(save_dtype) 134 | if not np.all(np.equal(to_save, image.data)): 135 | raise ValueError('Cannot convert data from ' 136 | f'{image.data.dtype} to {save_dtype}') 137 | 138 | if ranges is None: 139 | ranges = [(0, m) for m in to_save.max(axis=(0, 1))] 140 | 141 | coordinates = [ 142 | (286, '2i', 1, _micron_to_cm(image.coordinates[0])), # x-position 143 | (287, '2i', 1, _micron_to_cm(image.coordinates[1])), # y-position 144 | ] 145 | resolution = (image.data.shape[0] * 1e4 / float(image.size), 146 | image.data.shape[1] * 1e4 / float(image.size), 147 | 'CENTIMETER') 148 | 149 | # The mibi. prefix is added to attributes defined in the spec. 150 | # Other user-defined attributes are included too but without the prefix. 151 | prefixed_attributes = mi.SPECIFIED_METADATA_ATTRIBUTES[1:] 152 | description = {} 153 | for key, value in image.metadata().items(): 154 | if key in prefixed_attributes: 155 | description[f'mibi.{key}'] = value 156 | elif key in RESERVED_MIBITIFF_ATTRIBUTES: 157 | warnings.warn(f'Skipping writing user-defined {key} to the ' 158 | f'metadata as it is a reserved attribute.') 159 | elif key != 'date': 160 | description[key] = value 161 | # TODO: Decide if should filter out those that are None or convert to empty 162 | # string so that don't get saved as 'None' 163 | 164 | if multichannel: 165 | targets = list(image.targets) 166 | util.sort_channel_names(targets) 167 | indices = image.channel_inds(targets) 168 | with TiffWriter(filename) as infile: 169 | for i in indices: 170 | metadata = description.copy() 171 | metadata.update({ 172 | 'image.type': 'SIMS', 173 | 'channel.mass': int(image.masses[i]), 174 | 'channel.target': image.targets[i], 175 | }) 176 | 177 | page_name_string = _page_name_string( 178 | image.targets[i], image.masses[i]) 179 | page_name = (285, 's', 0, page_name_string) 180 | min_value = (340, range_dtype, 1, ranges[i][0]) 181 | max_value = (341, range_dtype, 1, ranges[i][1]) 182 | page_tags = coordinates + [page_name, min_value, max_value] 183 | 184 | # Adding rowsperstrip parameter to prevent using the 185 | # auto-calculated value. The auto-calculated value results in 186 | # the "STRIP_OFFSETS directory entry is the wrong type" error. 187 | infile.write( 188 | to_save[:, :, i], compression=8, resolution=resolution, 189 | extratags=page_tags, metadata=metadata, datetime=image.date, 190 | software=SOFTWARE_VERSION, rowsperstrip=to_save.shape[0]) 191 | if sed is not None: 192 | if sed.ndim > 2: 193 | sed = sed[:, :, 0] 194 | 195 | sed_resolution = (sed.shape[0] * 1e4 / float(image.size), 196 | sed.shape[1] * 1e4 / float(image.size), 197 | 'CENTIMETER') 198 | 199 | page_name = (285, 's', 0, 'SED') 200 | page_tags = coordinates + [page_name] 201 | infile.write( 202 | sed, compression=8, resolution=sed_resolution, 203 | extratags=page_tags, metadata={'image.type': 'SED'}, 204 | software=SOFTWARE_VERSION, rowsperstrip=sed.shape[0]) 205 | if optical is not None: 206 | infile.write(optical, compression=8, software=SOFTWARE_VERSION, 207 | metadata={'image.type': 'Optical'}, 208 | rowsperstrip=optical.shape[0]) 209 | label_coordinates = ( 210 | _TOP_LABEL_COORDINATES if image.coordinates[1] > 0 else 211 | _BOTTOM_LABEL_COORDINATES) 212 | slide_label = np.fliplr(np.moveaxis( 213 | optical[label_coordinates[0][0]:label_coordinates[0][1], 214 | label_coordinates[1][0]:label_coordinates[1][1]], 215 | 0, 1)) 216 | infile.write(slide_label, compression=8, 217 | software=SOFTWARE_VERSION, 218 | metadata={'image.type': 'Label'}, 219 | rowsperstrip=slide_label.shape[0]) 220 | 221 | else: 222 | for i in range(image.data.shape[2]): 223 | metadata = description.copy() 224 | metadata.update({ 225 | 'image.type': 'SIMS', 226 | 'channel.mass': int(image.masses[i]), 227 | 'channel.target': image.targets[i], 228 | }) 229 | # Converting to bytes string to support non-ascii characters 230 | page_name_string = _page_name_string( 231 | image.targets[i], image.masses[i]) 232 | page_name = (285, 's', 0, page_name_string) 233 | min_value = (340, range_dtype, 1, ranges[i][0]) 234 | max_value = (341, range_dtype, 1, ranges[i][1]) 235 | page_tags = coordinates + [page_name, min_value, max_value] 236 | 237 | target_filename = os.path.join( 238 | filename, f'{util.format_for_filename(image.targets[i])}.tiff') 239 | 240 | with TiffWriter(target_filename) as infile: 241 | 242 | infile.write( 243 | to_save[:, :, i], compression=8, resolution=resolution, 244 | metadata=metadata, datetime=image.date, 245 | extratags=page_tags, software=SOFTWARE_VERSION, 246 | rowsperstrip=to_save.shape[0]) 247 | 248 | 249 | def read(file, sims=True, sed=False, optical=False, label=False, 250 | masses=None, targets=None): 251 | """Reads MIBI data from an IonpathMIBI TIFF file. 252 | 253 | Args: 254 | file: The string path or an open file object to a MIBItiff file. 255 | sims: Boolean for whether to return the SIMS (MIBI) data. Defaults to 256 | True. 257 | sed: Boolean for whether to return the SED data. Defaults to False. 258 | optical: Boolean for whether to return the optical image. Defaults to 259 | False. 260 | label: Boolean for whether to return the slide label image. Defaults to 261 | False. 262 | masses: A list of integer masses. If specified, only channels 263 | corresponding to these masses will be included in the returned 264 | MibiImage. Either masses or targets can be specified, not both. 265 | targets: A list of string targets. If specified, only channels 266 | corresponding to these targets will be included in the returned 267 | MibiImage. Either masses or targets can be specified, not both. 268 | 269 | Returns: A tuple of the image types set to True in the parameters, in the 270 | order SIMS, SED, Optical, Label (but including only those types 271 | specified). The SIMS data will be returned as a 272 | :class:`mibidata.mibi_image.MibiImage` instance; the other image 273 | types will be returned as numpy arrays. If an image type is selected to 274 | be returned but is not present in the image, it will be returned as 275 | None. If returning SIMS data and the masses or targets parameters are 276 | set, only those channels will be included in the MibiImage instance, 277 | otherwise all channels present in the file will be returned. 278 | 279 | Raises: 280 | ValueError: Raised if 281 | 282 | * The input file is not of the IONpath MIBItiff format 283 | * No image type is selected to be returned. 284 | * Both masses and targets are specified. 285 | """ 286 | return_types = collections.OrderedDict([ 287 | ('sims', sims), ('sed', sed), ('optical', optical), ('label', label) 288 | ]) 289 | if not any((val for val in return_types.values())): 290 | raise ValueError('At least one image type must be specified to return.') 291 | if masses and targets: 292 | raise ValueError('Either masses or targets can be specified, not both.') 293 | to_return = {} 294 | metadata = {} 295 | sims_data = [] 296 | channels = [] 297 | with TiffFile(file) as tif: 298 | _check_software(tif) 299 | for page in tif.pages: 300 | description, image_type = _page_description(page) 301 | if sims and image_type == 'sims' and _include_page( 302 | description, masses, targets): 303 | _get_page_data(page, description, metadata, channels) 304 | sims_data.append(page.asarray()) 305 | elif return_types.get(image_type): 306 | to_return[image_type] = page.asarray() 307 | if sims: 308 | if (targets or masses) and not sims_data: 309 | raise ValueError('None of the channels specified for inclusion ' 310 | 'are present in file.') 311 | image = mi.MibiImage(np.stack(sims_data, axis=2), channels, **metadata) 312 | if masses: 313 | missing_masses = list(set(masses) - set(image.masses)) 314 | if missing_masses: 315 | warnings.warn(f'Requested masses not found in file: ' 316 | f'{missing_masses}') 317 | if targets: 318 | missing_targets = list(set(targets) - set(image.targets)) 319 | if missing_targets: 320 | warnings.warn(f'Requested targets not found in file: ' 321 | f'{missing_targets}') 322 | to_return['sims'] = image 323 | return_vals = tuple( 324 | to_return.get(key)for key, val in return_types.items() if val) 325 | if len(return_vals) == 1: 326 | return return_vals[0] 327 | return return_vals 328 | 329 | 330 | def _include_page(description, masses, targets): 331 | if not masses and not targets: 332 | return True 333 | if masses and description['channel.mass'] in masses: 334 | return True 335 | if targets and description['channel.target'] in targets: 336 | return True 337 | return False 338 | 339 | 340 | def _check_software(file): 341 | """Checks the software version of an open TIF file.""" 342 | software = file.pages[0].tags.get('Software') 343 | if not software or not software.value.startswith('IonpathMIBI'): 344 | raise ValueError('File is not of type IonpathMIBI.') 345 | 346 | 347 | def _page_description(page): 348 | """Loads and decodes the JSON description and image type in a 349 | TIFF page. 350 | """ 351 | description = json.loads(page.tags['ImageDescription'].value) 352 | image_type = description['image.type'].lower() 353 | return description, image_type 354 | 355 | def _page_metadata(page, description): 356 | """Parses the page metadata into a dictionary.""" 357 | assert page.tags['ResolutionUnit'].value == 3 358 | x_resolution = page.tags['XResolution'].value[0] / \ 359 | page.tags['XResolution'].value[1] 360 | y_resolution = page.tags['YResolution'].value[0] / \ 361 | page.tags['YResolution'].value[1] 362 | assert x_resolution == y_resolution, \ 363 | 'x-resolution and y-resolution are not equal' 364 | size = page.tags['ImageWidth'].value / x_resolution * 1e4 365 | date = datetime.datetime.strptime( 366 | page.tags['DateTime'].value, _DATETIME_FORMAT) 367 | 368 | # check version for backwards compatibility 369 | _convert_from_previous(description) 370 | 371 | metadata = {} 372 | for key, val in description.items(): 373 | if key.startswith('mibi.'): 374 | metadata[key[5:]] = val 375 | elif key not in RESERVED_MIBITIFF_ATTRIBUTES: 376 | metadata[key] = val 377 | 378 | metadata.update({ 379 | 'coordinates': ( 380 | _cm_to_micron(page.tags['XPosition'].value), 381 | _cm_to_micron(page.tags['YPosition'].value)), 382 | 'date': date, 383 | 'size': size}) 384 | 385 | return metadata 386 | 387 | 388 | def _convert_from_previous(description): 389 | """Convert old metadata format for backwards compatibility. 390 | 391 | Most of these conversions would happen during MibiImage construction, 392 | but we do them here in case reading the info only. 393 | """ 394 | if not description.get('mibi.fov_name') and description.get( 395 | 'mibi.description'): 396 | description['mibi.fov_name'] = description.pop('mibi.description') 397 | if description.get('mibi.folder') and not description.get('mibi.fov_id'): 398 | description['mibi.fov_id'] = description['mibi.folder'].split('/')[0] 399 | desc = description["mibi.fov_id"] 400 | warnings.warn( 401 | 'The "fov_id" attribute is now required if "folder" is ' 402 | f'specified. Setting "fov_id" to {desc}.') 403 | if (not description.get('mibi.folder') and description.get('mibi.fov_id') 404 | and description.get('mibi.fov_id').startswith('FOV')): 405 | description['mibi.folder'] = description['mibi.fov_id'] 406 | desc = description['mibi.folder'] 407 | warnings.warn( 408 | f'The "folder" attribute is required if "fov_id" is specified. ' 409 | f'Setting "folder" to {desc}.') 410 | if description.get('mibi.aperture'): 411 | description['mibi.aperture'] = mi.MibiImage.parse_aperture( 412 | description['mibi.aperture']) 413 | 414 | def _get_page_data(page, description, metadata, channels): 415 | """Adds to metadata and channel info for single TIFF page. 416 | 417 | Args: 418 | page: Single page in TIFF file. 419 | description: Decoded JSON description. 420 | metadata: Dictionary of metadata for entire TIFF file to add to. 421 | channels: List of channels for entire TIFF file to add to. 422 | """ 423 | channels.append((description['channel.mass'], 424 | description['channel.target'])) 425 | # Get metadata on first SIMS page only 426 | if not metadata: 427 | metadata.update(_page_metadata(page, description)) 428 | 429 | def info(filename): 430 | """Gets the metadata from a MIBItiff file. 431 | 432 | Args: 433 | filename: The path to the TIFF. 434 | 435 | Returns: 436 | A dictionary of metadata as could be supplied as kwargs to 437 | :class:`mibidata.mibi_image.MibiImage`, except with a ``channels`` key 438 | whose value is a list of (mass, target) tuples. 439 | """ 440 | metadata = {} 441 | channels = [] 442 | with TiffFile(filename) as tif: 443 | _check_software(tif) 444 | for page in tif.pages: 445 | description, image_type = _page_description(page) 446 | if image_type == 'sims': 447 | _get_page_data(page, description, metadata, channels) 448 | metadata['conjugates'] = channels 449 | return metadata 450 | --------------------------------------------------------------------------------