├── .github └── ISSUE_TEMPLATE │ ├── bug_report.md │ └── feature_request.md ├── .gitignore ├── Dockerfile.ReconstructOrder ├── DownloadExample.py ├── Fig_Readme.png ├── LICENSE ├── MANIFEST.in ├── README.md ├── ReconstructOrder ├── __init__.py ├── cli_module.py ├── compute │ ├── __init__.py │ ├── reconstruct.py │ ├── reconstruct_phase.py │ └── reconstruct_phase_util.py ├── datastructures │ ├── __init__.py │ ├── create_intensity_data.py │ ├── intensity_data.py │ ├── physical_data.py │ └── stokes_data.py ├── utils │ ├── ConfigReader.py │ ├── __init__.py │ ├── aux_utils.py │ ├── background_estimator.py │ ├── flat_field.py │ ├── imgIO.py │ ├── imgProcessing.py │ ├── mManagerIO.py │ └── plotting.py └── workflow │ ├── __init__.py │ ├── multiDimProcess.py │ └── reconstructBatch.py ├── docs ├── .nojekyll ├── doctrees │ ├── compute.doctree │ ├── environment.pickle │ ├── index.doctree │ ├── installation.doctree │ └── introduction.doctree ├── html │ ├── .buildinfo │ ├── _sources │ │ ├── compute.rst.txt │ │ ├── index.rst.txt │ │ ├── installation.rst.txt │ │ └── introduction.rst.txt │ ├── _static │ │ ├── basic.css │ │ ├── css │ │ │ ├── badge_only.css │ │ │ └── theme.css │ │ ├── doctools.js │ │ ├── documentation_options.js │ │ ├── fonts │ │ │ ├── Inconsolata-Bold.ttf │ │ │ ├── Inconsolata-Regular.ttf │ │ │ ├── Inconsolata.ttf │ │ │ ├── Lato-Bold.ttf │ │ │ ├── Lato-Regular.ttf │ │ │ ├── Lato │ │ │ │ ├── lato-bold.eot │ │ │ │ ├── lato-bold.ttf │ │ │ │ ├── lato-bold.woff │ │ │ │ ├── lato-bold.woff2 │ │ │ │ ├── lato-bolditalic.eot │ │ │ │ ├── lato-bolditalic.ttf │ │ │ │ ├── lato-bolditalic.woff │ │ │ │ ├── lato-bolditalic.woff2 │ │ │ │ ├── lato-italic.eot │ │ │ │ ├── lato-italic.ttf │ │ │ │ ├── lato-italic.woff │ │ │ │ ├── lato-italic.woff2 │ │ │ │ ├── lato-regular.eot │ │ │ │ ├── lato-regular.ttf │ │ │ │ ├── lato-regular.woff │ │ │ │ └── lato-regular.woff2 │ │ │ ├── RobotoSlab-Bold.ttf │ │ │ ├── RobotoSlab-Regular.ttf │ │ │ ├── RobotoSlab │ │ │ │ ├── roboto-slab-v7-bold.eot │ │ │ │ ├── roboto-slab-v7-bold.ttf │ │ │ │ ├── roboto-slab-v7-bold.woff │ │ │ │ ├── roboto-slab-v7-bold.woff2 │ │ │ │ ├── roboto-slab-v7-regular.eot │ │ │ │ ├── roboto-slab-v7-regular.ttf │ │ │ │ ├── roboto-slab-v7-regular.woff │ │ │ │ └── roboto-slab-v7-regular.woff2 │ │ │ ├── fontawesome-webfont.eot │ │ │ ├── fontawesome-webfont.svg │ │ │ ├── fontawesome-webfont.ttf │ │ │ ├── fontawesome-webfont.woff │ │ │ └── fontawesome-webfont.woff2 │ │ ├── jquery-3.2.1.js │ │ ├── jquery.js │ │ ├── js │ │ │ ├── modernizr.min.js │ │ │ └── theme.js │ │ ├── language_data.js │ │ ├── pygments.css │ │ ├── searchtools.js │ │ ├── underscore-1.3.1.js │ │ └── underscore.js │ ├── compute.html │ ├── genindex.html │ ├── index.html │ ├── installation.html │ ├── introduction.html │ ├── objects.inv │ ├── search.html │ └── searchindex.js └── index.html ├── examples ├── ReconstructExample.py └── config_example.yml ├── requirements.txt ├── requirements ├── default.txt └── test.txt ├── runReconstruction.py ├── scripts ├── channel_registration_3D.py ├── flatten_data_structure.py ├── run_image_registration.py └── split_orientaion_components.py ├── setup.py ├── simulations ├── BackgroundCorrection.ipynb ├── BackgroundCorrectionDatastructures.ipynb ├── StokesModelMicroscope.ipynb ├── StokesModelMicroscopeDatastrutures.ipynb ├── datastructures.py ├── mueller_matrices.py ├── plotting.py └── test_plotEllipse.ipynb ├── sphinx-docs ├── Makefile ├── __init__.py ├── _config.yml ├── make.bat └── source │ ├── compute.rst │ ├── conf.py │ ├── index.rst │ ├── installation.rst │ ├── introduction.rst │ ├── usage.rst │ ├── utils.rst │ └── workflow.rst └── tests ├── TestData.txt ├── __init__.py ├── datastructures_tests ├── __init__.py ├── conftest.py ├── intensity_data_test.py ├── physical_data_tests.py └── stokes_data_tests.py ├── integration_tests ├── __init__.py ├── conftest.py ├── multidim_complete_pipeline_tests.py └── singledim_reconstruct_tests.py ├── testMetrics.py └── test_plotVectorField.ipynb /.github/ISSUE_TEMPLATE/bug_report.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Bug report 3 | about: Create a report to help us improve 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Bug** 11 | A clear and concise description of what the bug is. 12 | 13 | **To Reproduce** 14 | 1. Provide link to the data 15 | 2. Provide the config file or source code leading to the bug 16 | 3. Error messages 17 | 18 | **Expected behavior** 19 | A clear and concise description of what you expected to happen. 20 | 21 | **Screenshots** 22 | If applicable, add screenshots to help explain your problem. 23 | 24 | **Desktop (please complete the following information):** 25 | - OS: [e.g. iOS] 26 | - Browser [e.g. chrome, safari] 27 | - Version [e.g. 22] 28 | 29 | **Additional context** 30 | Add any other context about the problem here. 31 | -------------------------------------------------------------------------------- /.github/ISSUE_TEMPLATE/feature_request.md: -------------------------------------------------------------------------------- 1 | --- 2 | name: Feature request 3 | about: Suggest an idea for this project 4 | title: '' 5 | labels: '' 6 | assignees: '' 7 | 8 | --- 9 | 10 | **Are you asking for this feature to solve a problem? Please describe.** 11 | A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]. 12 | Link to the dataset that you are trying to analyze. 13 | 14 | **Describe the solution you'd like** 15 | A clear and concise description of what you want to happen. 16 | 17 | **Describe alternatives you've considered** 18 | A clear and concise description of any alternative solutions or features you've considered. 19 | 20 | **Additional context** 21 | Add any other context or screenshots about the feature request here. 22 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .idea 2 | .DS_Store 3 | __pycache__/ 4 | .ipynb_checkpoints/ 5 | .pyc 6 | *.iml 7 | *.xml 8 | .idea/ReconstructOrder.iml 9 | .idea/workspace.xml 10 | *idea 11 | 12 | ReconstructOrder.egg-info/ 13 | dist/ 14 | build/ 15 | *data_downloaded* 16 | -------------------------------------------------------------------------------- /Dockerfile.ReconstructOrder: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | # Install from requirements 4 | COPY requirements/default.txt /tmp/requirements.txt 5 | RUN pip install --no-cache-dir -r /tmp/requirements.txt 6 | 7 | # Install Jupyter 8 | RUN pip install --no-cache-dir jupyter 9 | 10 | RUN apt-get update && \ 11 | apt-get install -y \ 12 | build-essential \ 13 | cmake \ 14 | git \ 15 | vim \ 16 | wget \ 17 | pkg-config \ 18 | libswscale-dev \ 19 | libtbb2 \ 20 | libtbb-dev \ 21 | libjpeg-dev \ 22 | libpng-dev \ 23 | libtiff-dev \ 24 | tmux 25 | 26 | WORKDIR /ReconstructOrder 27 | 28 | ENV PYTHONPATH /ReconstructOrder 29 | 30 | # Expose Jupyter port 31 | EXPOSE 8888 -------------------------------------------------------------------------------- /DownloadExample.py: -------------------------------------------------------------------------------- 1 | import os 2 | from google_drive_downloader import GoogleDriveDownloader as gdd 3 | 4 | 5 | 6 | 7 | ### Please specify data to download and process! ### 8 | 9 | process_data = ['mouse_brain', 'mouse_kidney'] 10 | # (list) List specifying dataset to download and process 11 | ## ['mouse_brain', 'mouse_kidney'] 12 | 13 | data_path_parameter = {'mouse_brain' : {'gdd_id': '1pB25UcE2nL5ZOuOaoAxTFHf1D3rbnH3f', 14 | 'zip_path': '/mouse_brain_downloaded.zip'}, 15 | 16 | 'mouse_kidney': {'gdd_id': '1N7TxmohOJRi5kTkvf02RaEoCoAuaQ-X7', 17 | 'zip_path': '/mouse_kidney_downloaded.zip'}} 18 | 19 | 20 | 21 | 22 | if __name__ == '__main__': 23 | """ 24 | Reconstruct data shared on the google drive. 25 | 26 | Parameters 27 | ---------- 28 | process_data : list 29 | List specifying dataset to download and process 30 | 31 | Returns 32 | -------- 33 | Outputs data to disk. 34 | """ 35 | configfiles = [] 36 | 37 | working_folder = os.getcwd() + '/data_downloaded' 38 | recon_folder = working_folder + '/recon_result' 39 | 40 | if not os.path.isdir(working_folder): 41 | os.mkdir(working_folder) 42 | print("\nsetting up data folder "+working_folder) 43 | 44 | if not os.path.isdir(recon_folder): 45 | os.mkdir(recon_folder) 46 | print("\nsetting up recon folder "+recon_folder) 47 | 48 | 49 | for item in process_data: 50 | 51 | item_gdd_id = data_path_parameter[item]['gdd_id'] 52 | zipdir = working_folder + data_path_parameter[item]['zip_path'] 53 | 54 | gdd.download_file_from_google_drive(file_id=item_gdd_id, 55 | dest_path=zipdir, 56 | unzip=True, 57 | showsize=True, 58 | overwrite=True) 59 | 60 | -------------------------------------------------------------------------------- /Fig_Readme.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/Fig_Readme.png -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD 3-Clause License 2 | 3 | Copyright (c) 2019, Chan Zuckerberg Biohub 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this 10 | list of conditions and the following disclaimer. 11 | 12 | 2. Redistributions in binary form must reproduce the above copyright notice, 13 | this list of conditions and the following disclaimer in the documentation 14 | and/or other materials provided with the distribution. 15 | 16 | 3. Neither the name of the copyright holder nor the names of its 17 | contributors may be used to endorse or promote products derived from 18 | this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 21 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 23 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 24 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 26 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 27 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 29 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/MANIFEST.in -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ReconstructOrder 2 | 3 | Analyze density (bright-field, phase), anisotropy (birefringence, slow axis), and degree of polarization of specimens from polarization-resolved and depth-resolved images. The acquisition, calibration, background correction, and reconstruction algorithms are described in the following preprint and paper: 4 | 5 | ``` 6 | Guo, S.-M., Yeh, L.-H., Folkesson, J.,..., Mehta, S. B. (2019). [Revealing architectural order with quantitative label-free imaging and deep learning.](https://doi.org/10.1101/631101) BioRxiv 631101. 7 | ``` 8 | 9 | ``` 10 | Syuan-Ming Guo, Li-Hao Yeh, Jenny Folkesson, Ivan E Ivanov, Anitha P Krishnan, Matthew G Keefe, Ezzat Hashemi, David Shin, Bryant B Chhun, Nathan H Cho, Manuel D Leonetti, May H Han, Tomasz J Nowakowski, Shalin B Mehta, "Revealing architectural order with quantitative label-free imaging and deep learning," eLife 2020;9:e55502 DOI: 10.7554/eLife.55502 (2020). 11 | ``` 12 | 13 | 14 | ## Updated software 15 | 16 | We have now added support for Micro-Manager 2.0 and developed a [napari plugin](https://www.napari-hub.org/plugins/recOrder-napari) for QLIPP. The corresponding GitHub repository is [recOrder](https://github.com/mehta-lab/recOrder). `reconstruct-order` is no longer maintained. 17 | 18 | ## Introduction 19 | 20 | As an illustration, following figure shows inputs and outputs of the ReconstructOrder for polarization-resolved data acquired at 21 consecutive focal planes with 2D phase reconstruction algorithm. 21 | 22 | ![Data_flow](Fig_Readme.png) 23 | 24 | ReconstructOrder currently supports data format acquired using Micro-Manager 1.4.22 multi-dimension acquisition ([link](https://micro-manager.org/)) and OpenPolScope acquisition plugin ([link](https://openpolscope.org/)). We will add support for Micro-Manager 2.0 format in the next release. 25 | 26 | 27 | 28 | ## Installation 29 | 30 | ### Create a new conda environment (optional, but recommended) 31 | >Install conda package management system by installing anaconda or miniconda ([link](https://conda.io/)). 32 | >Creating a conda environment dedicated to ReconstructOrder will avoid version conflicts among packages required by ReconstructOrder and packages required by other python software. 33 | > 34 | >```buildoutcfg 35 | >conda create -n python=3.7 36 | >conda activate 37 | >``` 38 | 39 | #### All code blocks below assume you are in the above environment 40 | 41 | ### Option 1: install released version via pip 42 | >ReconstructOrder is available on pip. Running pip install will also install dependencies. 43 | >From your environment created above, type: 44 | >```buildoutcfg 45 | >pip install ReconstructOrder 46 | >``` 47 | 48 | ### Option 2: install developer version via git 49 | >Install the git version control system git : [link](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) 50 | > 51 | >Use git to clone this repository to your current directory: 52 | >```buildoutcfg 53 | >git clone https://github.com/mehta-lab/reconstruct-order.git 54 | >``` 55 | You cab install dependencies via pip (python index package) or run ReconstructOrder inside a docker container with the dependencies pre-installed 56 | > * #### install dependencies via pip 57 | > 58 | >> If you are running ReconstructOrder on your own machine,
59 | >> 60 | >> a) navigate to the cloned repository: 61 | >> 62 | >> ```buildoutcfg 63 | >> cd reconstruct-order 64 | >> ``` 65 | >>
66 | >> b) install python library dependencies: 67 | >> 68 | >> ```buildoutcfg 69 | >> pip install -r requirements.txt 70 | >> ``` 71 | >>
72 | >> c) Create a symbolic library link with setup.py: 73 | >> 74 | >> ```buildoutcfg 75 | >> python setup.py develop 76 | >> ``` 77 | 78 | > * #### Running inside a docker container 79 | > 80 | > If you are running ReconstructOrder on a compute node (e.g., fry2@czbiohub), it is recommended to run it in 81 | > a Docker container. 82 | > Docker is the virtual environment with all the required libraries pre-installed so you can run your copy of 83 | > ReconstructOrder without recreating the environment. 84 | > The docker image for ReconstructOrder has been built on fry2@czbiohub. 85 | > If you are running ReconstructOrder on other servers, you can build the docker image after cloning the repository 86 | > by doing : 87 | 88 | >> ```buildoutcfg 89 | >> docker build -t reconstruct_order:py37 -f Dockerfile.ReconstructOrder . 90 | >> ``` 91 | 92 | >> Now, to start a docker container, do 93 | >> ```buildoutcfg 94 | >> docker run -it -v /data//:/ -v ~/ReconstructOrder:/ReconstructOrder reconstruct_order:py37 bash 95 | >> ``` 96 | 97 | 98 | *ReconstructOrder supports NVIDIA GPU computation through cupy package, please follow [here](https://github.com/cupy/cupy) for installation (check cupy is properly installed by ```import cupy```). To enable gpu processing, set ```processing: use_gpu: True``` in the configuration file.* 99 | 100 | ## Usage 101 | >The reconstruction parameters are specified in the configuration file. 102 | > Configuration file template(```config_example.yml```) can be found [here](https://github.com/mehta-lab/reconstruct-order) under ```examples``` folder, which incluides detailed explanation of parameters for running ReconstructOrder in different modes 103 | > 104 | > To use the configuration file template for your data, you need to at least modify ```dataset: data_dir``` and ```dataset: processed_dir``` to point to source data path and output path. See the template docstrings for the usage of other parameters. 105 | > 106 | > #### There are two ways to run reconstruction: 107 | >>* #### from command line 108 | >> If you pip installed the library, from any folder, simply type: 109 | >> ```buildoutcfg 110 | >> runReconstruction --config path-and-name-to-your-config.yml 111 | >> ``` 112 | >> 113 | >> or 114 | >> ```buildoutcfg 115 | >> python runReconstruction.py --config path-and-name-to-your-config.yml 116 | >> ``` 117 | >> if you the symlink wasn't set up. 118 | > 119 | >>* #### Inside Python scripts 120 | >> To call ReconstructOrder as a library in your own script to run reconstruction: 121 | >> ```buildoutcfg 122 | >> import ReconstructOrder.workflow as wf 123 | >> wf.reconstruct_batch('path-and-name-to-your-config.yml') 124 | >> ``` 125 | 126 | ## Example 127 | In the following, we demonstrate how to download our example dataset (hosted [here](https://drive.google.com/drive/u/3/folders/1axmPgQVNi22ZqGLXzHGHIuP9kA93K9zH)) and run ReconstructOrder on it to get birefringence and phase images. This instruction should work for installation from both Option 1 and 2.
128 | 129 | a) In the terminal, switch to the environment with ReconstructOrder installed 130 | > ```buildoutcfg 131 | > conda activate 132 | > ``` 133 | 134 | b) Navigate to the repository folder: 135 | > ```buildoutcfg 136 | > cd reconstruct-order 137 | > ``` 138 | 139 | c) Download example dataset: 140 | > ```buildoutcfg 141 | > python DownloadExample.py 142 | 143 | The example datasets will be downloaded and upzipped in the ```data_downloaded``` folder, together with the configuration files.
144 | 145 | d) Run ReconstructOrder on the downloaded dataset, e.g. MouseBrain dataset: 146 | > ```buildoutcfg 147 | > python runReconstruction.py --config ./data_downloaded/MouseBrain/config.yml 148 | 149 | e) The reconstructed images will be saved the ```data_downloaded``` folder. You can reconstruct other downloaded datasets following the above steps, or change the parameters in the configuration file and observe the changes in the output images. 150 | 151 | ## License 152 | Chan Zuckerberg Biohub Software License 153 | 154 | This software license is the 2-clause BSD license plus clause a third clause 155 | that prohibits redistribution and use for commercial purposes without further 156 | permission. 157 | 158 | Copyright © 2019. Chan Zuckerberg Biohub. 159 | All rights reserved. 160 | 161 | Redistribution and use in source and binary forms, with or without 162 | modification, are permitted provided that the following conditions are met: 163 | 164 | 1. Redistributions of source code must retain the above copyright notice, 165 | this list of conditions and the following disclaimer. 166 | 167 | 2. Redistributions in binary form must reproduce the above copyright notice, 168 | this list of conditions and the following disclaimer in the documentation 169 | and/or other materials provided with the distribution. 170 | 171 | 3. Redistributions and use for commercial purposes are not permitted without 172 | the Chan Zuckerberg Biohub's written permission. For purposes of this license, 173 | commercial purposes are the incorporation of the Chan Zuckerberg Biohub's 174 | software into anything for which you will charge fees or other compensation or 175 | use of the software to perform a commercial service for a third party. 176 | Contact ip@czbiohub.org for commercial licensing opportunities. 177 | 178 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 179 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 180 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 181 | DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE 182 | FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 183 | DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 184 | SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 185 | CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 186 | OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 187 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 188 | -------------------------------------------------------------------------------- /ReconstructOrder/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/23/19} 2 | 3 | # from .workflow.multiDimProcess import * 4 | 5 | __all__ = ["workflow", 6 | "utils", 7 | "compute", 8 | "datastructures" 9 | ] 10 | -------------------------------------------------------------------------------- /ReconstructOrder/cli_module.py: -------------------------------------------------------------------------------- 1 | # bchhun, {5/1/19} 2 | 3 | 4 | import argparse 5 | 6 | from ReconstructOrder.workflow import reconstruct_batch 7 | 8 | 9 | def parse_args(): 10 | """Parse command line arguments 11 | 12 | In python namespaces are implemented as dictionaries 13 | :return: namespace containing the arguments passed. 14 | """ 15 | 16 | parser = argparse.ArgumentParser() 17 | 18 | parser.add_argument('--config', type=str, 19 | help='path to yaml configuration file') 20 | 21 | args = parser.parse_args() 22 | return args 23 | 24 | 25 | def main(): 26 | args = parse_args() 27 | reconstruct_batch(args.config) 28 | -------------------------------------------------------------------------------- /ReconstructOrder/compute/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/23/19} 2 | 3 | name = "compute" 4 | 5 | from . import (reconstruct, reconstruct_phase) 6 | 7 | __all__ = ["reconstruct", "reconstruct_phase"] -------------------------------------------------------------------------------- /ReconstructOrder/datastructures/__init__.py: -------------------------------------------------------------------------------- 1 | from .intensity_data import IntensityData 2 | from .stokes_data import StokesData 3 | from .physical_data import PhysicalData 4 | from .create_intensity_data import IntensityDataCreator -------------------------------------------------------------------------------- /ReconstructOrder/datastructures/create_intensity_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import warnings 3 | 4 | from ReconstructOrder.datastructures import IntensityData 5 | from ..utils.mManagerIO import mManagerReader 6 | from ..utils.imgProcessing import mean_pooling_2d 7 | 8 | 9 | _fluor_chan_names = ['405', '488', '568', '640', 'ex561em700'] 10 | 11 | class IntensityDataCreator(object): 12 | """Create IntensityData objects from images in mManager/Polacquisition data format 13 | Parameters 14 | ---------- 15 | ROI : list 16 | region of interest for reconstruction in format of [n_start_y, n_start_x, Ny, Nx] 17 | input_chan : list 18 | list of input channel names, subset of same as the img_io.input_chans 19 | int_obj_chans : list 20 | Channels in the output intensity data object 21 | binning : int 22 | binning (or pooling) size for the images 23 | """ 24 | 25 | def __init__(self, input_chans=None, int_obj_chans=None, ROI=None, binning=1): 26 | self.input_chans = input_chans 27 | self.roi = ROI 28 | self.binning = binning 29 | self.int_obj_chans = ['IExt', 'I90', 'I135', 'I45', 'I0', 'BF', 30 | '405', '488', '568', '640', 'ex561em700'] 31 | if int_obj_chans is not None: 32 | self.int_obj_chans = int_obj_chans 33 | 34 | def get_data_object(self, img_io: mManagerReader) -> IntensityData: 35 | """Parse tiff file name following mManager/Polacquisition output format 36 | return intensity data objects with images assigned to corresponding channels 37 | based on the file name 38 | Parameters 39 | ---------- 40 | img_io : obj 41 | mManagerReader instance 42 | Returns 43 | ------- 44 | imgs : IntensityData 45 | images from polarization, fluorescence, bright-field channels 46 | """ 47 | 48 | imgs = IntensityData(channel_names=self.int_obj_chans) 49 | 50 | if self.roi is None: 51 | self.roi = [0, 0, img_io.height, img_io.width] 52 | 53 | for chan_name in _fluor_chan_names: 54 | imgs.replace_image(np.zeros((self.roi[2], self.roi[3])), chan_name) 55 | 56 | assert self.roi[0] + self.roi[2] <= img_io.height and self.roi[1] + self.roi[3] <= img_io.width, \ 57 | "Region of interest is beyond the size of the actual image" 58 | 59 | if self.input_chans is None: 60 | self.input_chans = img_io.input_chans 61 | for chan_name in self.input_chans: 62 | img_io.chan_idx = img_io.input_chans.index(chan_name) 63 | img = img_io.read_img() 64 | if img is None: 65 | warnings.warn('image "{}" cannot be found. Skipped.'.format(chan_name)) 66 | else: 67 | img = img[self.roi[0]:self.roi[0] + self.roi[2], self.roi[1]:self.roi[1] + self.roi[3]] 68 | img -= img_io.blackLevel 69 | img = mean_pooling_2d(img, self.binning) 70 | imgs = IntensityDataCreator.chan_name_parser(imgs, img, chan_name) 71 | return imgs 72 | 73 | 74 | 75 | 76 | 77 | @staticmethod 78 | def chan_name_parser(imgs, img, chan_name): 79 | """Parse the image channel name and assign the image to 80 | the channel in the intensity data object 81 | 82 | Parameters 83 | ---------- 84 | imgs : IntensityData 85 | intensity data object 86 | img : image to assign 87 | chan_name : 88 | image channel name 89 | Returns 90 | ------- 91 | imgs : IntensityData 92 | images from polarization, fluorescence, bright-field channels 93 | """ 94 | if any(substring in chan_name for substring in ['State', 'state', 'Pol']): 95 | if '0' in chan_name: 96 | imgs.replace_image(img, 'IExt') 97 | elif '1' in chan_name: 98 | imgs.replace_image(img, 'I90') 99 | elif '2' in chan_name: 100 | imgs.replace_image(img, 'I135') 101 | elif '3' in chan_name: 102 | imgs.replace_image(img, 'I45') 103 | elif '4' in chan_name: 104 | imgs.replace_image(img, 'I0') 105 | elif any(substring in chan_name for substring in 106 | ['Confocal40', 'Confocal_40', 'Widefield', 'widefield', 'Fluor']): 107 | if any(substring in chan_name for substring in ['DAPI', '405', '405nm']): 108 | imgs.replace_image(img, '405') 109 | elif any(substring in chan_name for substring in ['GFP', '488', '488nm']): 110 | imgs.replace_image(img, '488') 111 | elif any(substring in chan_name for substring in 112 | ['TxR', 'TXR', 'TX', 'RHO', '568', '561', '560']): 113 | imgs.replace_image(img, '568') 114 | elif any(substring in chan_name for substring in ['Cy5', 'IFP', '640', '637']): 115 | imgs.replace_image(img, '640') 116 | elif any(substring in chan_name for substring in ['FM464', 'fm464']): 117 | imgs.replace_image(img, 'ex561em700') 118 | elif any(substring in chan_name for substring in ['BF', 'BrightField', 'Default']): 119 | imgs.replace_image(img, 'BF') 120 | 121 | return imgs -------------------------------------------------------------------------------- /ReconstructOrder/datastructures/intensity_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class IntensityData(object): 5 | """ 6 | Data Structure that contains all raw intensity images used for computing Stokes matrices 7 | only attributes with getters/setters can be assigned to this class 8 | """ 9 | 10 | __data = None 11 | __channel_names = None 12 | __axis_names = None 13 | __current_shape = None 14 | 15 | def __setattr__(self, name, value): 16 | """ 17 | Prevent attribute assignment other than those defined below 18 | 19 | Parameters 20 | ---------- 21 | name : str 22 | value : value 23 | 24 | Returns 25 | ------- 26 | 27 | """ 28 | if hasattr(self, name): 29 | object.__setattr__(self, name, value) 30 | else: 31 | raise TypeError('Cannot set name %r on object of type %s' % ( 32 | name, self.__class__.__name__)) 33 | 34 | def __init__(self, num_channels=None, channel_names: list = None, axis_names: list = None): 35 | """ 36 | Initialize instance variables 37 | 38 | Parameters 39 | ---------- 40 | num_channels : int 41 | channel_names : list of str 42 | axis_names : list of str 43 | """ 44 | 45 | super(IntensityData, self).__init__() 46 | if num_channels and type(num_channels) != int: 47 | raise ValueError("number of channels must be integer type") 48 | 49 | self.__data = [] 50 | if channel_names: 51 | self.channel_names = channel_names 52 | if axis_names: 53 | self.axis_names = axis_names 54 | if num_channels: 55 | for _ in range(num_channels): 56 | self.__data.append([]) 57 | self.__current_shape = () 58 | 59 | def check_shape(self, input_shape=None): 60 | """ 61 | compare the supplied input_shape with the shape of images already in the self.__data 62 | 63 | self.__current_shape is updated at "append_image" and "replace_image" 64 | 65 | Parameters 66 | ---------- 67 | input_shape : tuple 68 | supplied shape from image to add to self.__data 69 | 70 | Returns 71 | ------- 72 | 73 | """ 74 | 75 | 76 | # check for empty __data possibilities: 77 | if len(self.__data) == 0: 78 | return True 79 | if self.__current_shape == (): 80 | return True 81 | 82 | # check for shape consistency 83 | if input_shape and input_shape != self.__current_shape: 84 | return False 85 | 86 | return True 87 | 88 | def check_dtype(self, input_data): 89 | """ 90 | check that supplied images are either numpy arrays or memmaps 91 | 92 | Parameters 93 | ---------- 94 | input_data : np.ndarray, or np.memmap 95 | 96 | Returns 97 | ------- 98 | boolean 99 | 100 | """ 101 | 102 | if type(input_data) is not np.ndarray and \ 103 | type(input_data) is not np.memmap: 104 | return False 105 | else: 106 | return True 107 | 108 | @property 109 | def data(self): 110 | """ 111 | get the underlying np.array data that is built 112 | 113 | Returns 114 | ------- 115 | 116 | np.array of data object 117 | 118 | """ 119 | if not self.check_shape(): 120 | raise ValueError("Inconsistent data dimensions or data not assigned\n") 121 | return np.array(self.__data) 122 | 123 | @property 124 | def num_channels(self): 125 | """ 126 | Get the number of channels already assigned to the data 127 | 128 | Returns 129 | ------- 130 | number of images assigned to this data 131 | 132 | """ 133 | 134 | return len(self.__data) 135 | 136 | @property 137 | def channel_names(self): 138 | return self.__channel_names 139 | 140 | @channel_names.setter 141 | def channel_names(self, names: list): 142 | """ 143 | assign channel names to images in data 144 | 145 | Parameters 146 | ---------- 147 | names : list of str 148 | 149 | Returns 150 | ------- 151 | 152 | """ 153 | for chan in names: 154 | if type(chan) != str: 155 | raise ValueError("channel names must be a list of strings") 156 | 157 | self.__channel_names = names 158 | 159 | # check that data contains entries for each of the channel names 160 | # if it does not, then add a blank entry 161 | if len(self.__data) <= len(self.__channel_names): 162 | for i in range(len(self.__channel_names)-len(self.__data)): 163 | self.__data.append([]) 164 | 165 | @property 166 | def axis_names(self): 167 | return self.__channel_names 168 | 169 | @axis_names.setter 170 | def axis_names(self, ax_names: list): 171 | """ 172 | set names for axes 173 | 174 | Parameters 175 | ---------- 176 | value : list of str 177 | 178 | Returns 179 | ------- 180 | 181 | """ 182 | for axis in ax_names: 183 | if type(axis) != str: 184 | raise ValueError("axis names must be a list of strings") 185 | 186 | if len(set(ax_names)) != len(ax_names): 187 | raise ValueError("duplicate entries in axis_name list") 188 | else: 189 | self.__axis_names = ax_names 190 | 191 | def append_image(self, image): 192 | """ 193 | append image to end of data 194 | 195 | Parameters 196 | ---------- 197 | image : np.ndarray or np.memmap 198 | 199 | Returns 200 | ------- 201 | 202 | """ 203 | if not self.check_dtype(image): 204 | raise TypeError("image is not ndarray") 205 | if not self.check_shape(image.shape): 206 | raise ValueError("image does not conform to current data dimensions") 207 | 208 | if self.__channel_names: 209 | raise ValueError("channel names are already defined for this IntensityData object." 210 | "Append first, then assign channel names. Or use replace_image instead") 211 | 212 | self.__current_shape = image.shape 213 | self.__data.append(image) 214 | 215 | def replace_image(self, image, value): 216 | """ 217 | replace image in self.__data at supplied index. 218 | Index can be string (channel name), or int (position) 219 | 220 | Parameters 221 | ---------- 222 | image : np.ndarray 223 | value : str or int 224 | 225 | Returns 226 | ------- 227 | 228 | """ 229 | 230 | # data checks 231 | if not self.check_dtype(image): 232 | raise TypeError("image is not ndarray") 233 | 234 | if not self.check_shape(image.shape): 235 | raise ValueError("image does not conform to current data dimensions") 236 | self.__current_shape = image.shape 237 | 238 | # find the position in the array, check that it exists 239 | if type(value) == int: 240 | if len(self.__data) <= value: 241 | raise IndexError("replacing Intensity Data image at position that does not exist") 242 | position = value 243 | elif type(value) == str: 244 | if value not in self.__channel_names: 245 | raise IndexError("replacing Intensity Data image at channel name that is not defined") 246 | position = self.__channel_names.index(value) 247 | else: 248 | raise ValueError("index or channel name in data does not exist or not defined") 249 | 250 | # replace image 251 | self.__data[position] = image 252 | 253 | def get_image(self, position): 254 | """ 255 | enable image search by channel name or index 256 | 257 | Parameters 258 | ---------- 259 | position : int or str 260 | if str, search for matching str in supplied channel_names 261 | Returns 262 | ------- 263 | 264 | """ 265 | if type(position) is str: 266 | if position in self.__channel_names: 267 | try: 268 | dat = self.__data[self.__channel_names.index(position)] 269 | except TypeError: 270 | raise TypeError("channel %s does not exist in data" % position) 271 | return dat 272 | else: 273 | raise ValueError("Intensity Data with channel name %s is not found") 274 | elif type(position) is int: 275 | return self.__data[position] 276 | -------------------------------------------------------------------------------- /ReconstructOrder/datastructures/physical_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | 4 | class PhysicalData(object): 5 | """ 6 | Data Structure that contains all computed physical data 7 | only attributes with getters/setters can be assigned to this class 8 | """ 9 | 10 | __I_trans = None 11 | __retard = None 12 | __polarization = None 13 | __depolarization = None 14 | __azimuth = None 15 | __azimuth_vector = None 16 | __azimuth_degree = None 17 | __absorption_2D = None 18 | __phase_2D = None 19 | __absorption_semi3D = None 20 | __phase_semi3D = None 21 | __phase_3D = None 22 | 23 | 24 | def __setattr__(self, name, value): 25 | """ 26 | Prevent attribute assignment other than those defined below 27 | :param name: any attribute 28 | :param value: corresponding value 29 | :return: 30 | """ 31 | if hasattr(self, name): 32 | object.__setattr__(self, name, value) 33 | else: 34 | raise TypeError('Cannot set name %r on object of type %s' % ( 35 | name, self.__class__.__name__)) 36 | 37 | def __init__(self): 38 | """ 39 | Initialize instance variables. 40 | """ 41 | super(PhysicalData, self).__init__() 42 | self.__I_trans = None 43 | self.__retard = None 44 | self.__polarization = None 45 | self.__depolarization = None 46 | self.__azimuth = None 47 | self.__azimuth_vector = None 48 | self.__azimuth_degree = None 49 | self.__absorption_2D = None 50 | self.__phase_2D = None 51 | self.__absorption_semi3D = None 52 | self.__phase_semi3D = None 53 | self.__phase_3D = None 54 | 55 | 56 | 57 | @property 58 | def I_trans(self): 59 | return self.__I_trans 60 | 61 | @I_trans.setter 62 | def I_trans(self, data: np.ndarray): 63 | self.__I_trans = data 64 | 65 | @property 66 | def retard(self): 67 | return self.__retard 68 | 69 | @retard.setter 70 | def retard(self, data: np.ndarray): 71 | self.__retard = data 72 | 73 | @property 74 | def polarization(self): 75 | return self.__polarization 76 | 77 | @polarization.setter 78 | def polarization(self, data: np.ndarray): 79 | self.__polarization = data 80 | 81 | @property 82 | def azimuth(self): 83 | return self.__azimuth 84 | 85 | @azimuth.setter 86 | def azimuth(self, data: np.ndarray): 87 | self.__azimuth = data 88 | 89 | @property 90 | def azimuth_vector(self): 91 | return self.__azimuth_vector 92 | 93 | @azimuth_vector.setter 94 | def azimuth_vector(self, data: np.ndarray): 95 | self.__azimuth_vector = data 96 | 97 | @property 98 | def azimuth_degree(self): 99 | return self.__azimuth_degree 100 | 101 | @azimuth_degree.setter 102 | def azimuth_degree(self, data: np.ndarray): 103 | self.__azimuth_degree = data 104 | 105 | @property 106 | def depolarization(self): 107 | return self.__depolarization 108 | 109 | @depolarization.setter 110 | def depolarization(self, data: np.ndarray): 111 | self.__depolarization = data 112 | 113 | @property 114 | def absorption_2D(self): 115 | return self.__absorption_2D 116 | 117 | @absorption_2D.setter 118 | def absorption_2D(self, data: np.ndarray): 119 | self.__absorption_2D = data 120 | 121 | @property 122 | def phase_2D(self): 123 | return self.__phase_2D 124 | 125 | @phase_2D.setter 126 | def phase_2D(self, data: np.ndarray): 127 | self.__phase_2D = data 128 | 129 | @property 130 | def absorption_semi3D(self): 131 | return self.__absorption_semi3D 132 | 133 | @absorption_semi3D.setter 134 | def absorption_semi3D(self, data: np.ndarray): 135 | self.__absorption_semi3D = data 136 | 137 | @property 138 | def phase_semi3D(self): 139 | return self.__phase_semi3D 140 | 141 | @phase_semi3D.setter 142 | def phase_semi3D(self, data: np.ndarray): 143 | self.__phase_semi3D = data 144 | 145 | @property 146 | def phase_3D(self): 147 | return self.__phase_3D 148 | 149 | @phase_3D.setter 150 | def phase_3D(self, data: np.ndarray): 151 | self.__phase_3D = data 152 | 153 | 154 | 155 | 156 | 157 | 158 | -------------------------------------------------------------------------------- /ReconstructOrder/datastructures/stokes_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from ReconstructOrder.datastructures.intensity_data import IntensityData 3 | 4 | 5 | class StokesData(object): 6 | """ 7 | Data Structure that contains all stokes images 8 | only attributes with getters/setters can be assigned to this class 9 | """ 10 | 11 | __s1_norm = None 12 | __s2_norm = None 13 | __polarization = None 14 | __data = None 15 | 16 | # __current_shape = None 17 | 18 | def __setattr__(self, name, value): 19 | """ 20 | Prevent attribute assignment other than those defined below 21 | :param name: str 22 | attribute name 23 | :param value: value 24 | attribute value 25 | :return: 26 | """ 27 | if hasattr(self, name): 28 | object.__setattr__(self, name, value) 29 | else: 30 | raise TypeError('Cannot set name %r on object of type %s' % ( 31 | name, self.__class__.__name__)) 32 | 33 | def __init__(self, inv_inst_matrix=None, intensity_data=None): 34 | """ 35 | Initialize instance variables 36 | :param inv_inst_matrix: np.array 37 | inverse instrument matrix 38 | if provided with intensity data, automatically compute stokes matrices 39 | :param intensity_data: np.array 40 | if provided with inv_inst_matrix, automatically compute stokes matrices 41 | """ 42 | super(StokesData, self).__init__() 43 | 44 | # self.__current_shape = () 45 | if inv_inst_matrix is not None and intensity_data is not None: 46 | self.__data = [None, None, None, None] 47 | self.compute_stokes(inv_inst_matrix, intensity_data) 48 | else: 49 | self.__s1_norm = None 50 | self.__s2_norm = None 51 | 52 | self.__polarization = None 53 | self.__data = [None, None, None, None] 54 | 55 | def compute_stokes(self, inv_inst_matrix, intensity_data: IntensityData): 56 | """ 57 | compute and assign stokes matrices based on supplied inst matrix and intensity data 58 | :param inv_inst_matrix: inverse of instrument matrix 59 | :param intensity_data: IntensityData datastructure 60 | :return: 61 | """ 62 | img_shape = np.shape(intensity_data.data) 63 | img_raw_flat = np.reshape(intensity_data.data, (intensity_data.num_channels, -1)) 64 | img_stokes_flat = np.dot(inv_inst_matrix, img_raw_flat) 65 | img_stokes = np.reshape(img_stokes_flat, (4,) + img_shape[1:]) 66 | [self.s0, self.s1, self.s2, self.s3] = [img_stokes[i, ...] for i in range(4)] 67 | self.s1_norm = self.s1/self.s3 68 | self.s2_norm = self.s2/self.s3 69 | 70 | 71 | def check_shape(self, input_shape=None): 72 | # check for empty __data possibilities: 73 | if len(self.__data) == 0: 74 | return True 75 | # if self.__current_shape == (): 76 | # return True 77 | 78 | # check for shape consistency 79 | current_shape = (0, 0) 80 | for img in np.array(self.__data): 81 | if img is None: 82 | return False 83 | elif current_shape == (0, 0): 84 | current_shape = img.shape 85 | elif img.shape != current_shape: 86 | return False 87 | 88 | # Method used by Intensity Data 89 | # if input_shape and input_shape != self.__current_shape: 90 | # return False 91 | 92 | return True 93 | 94 | def check_dtype(self, input_data): 95 | if type(input_data) is not np.ndarray and \ 96 | type(input_data) is not np.memmap: 97 | return False 98 | else: 99 | return True 100 | 101 | @property 102 | def data(self): 103 | if not self.check_shape(): 104 | raise ValueError("Inconsistent data dimensions or data not assigned\n") 105 | 106 | # check for blank entries, None or array types exist 107 | # if True in [True for value in self.__data if value is None or value.any() is None]: 108 | # raise ValueError("None present: Not all stokes types are assigned") 109 | return np.array(self.__data) 110 | 111 | # normalized S1 112 | @property 113 | def s1_norm(self): 114 | return self.__s1_norm 115 | 116 | @s1_norm.setter 117 | def s1_norm(self, image): 118 | 119 | if not self.check_dtype(image): 120 | raise TypeError("image is not ndarray") 121 | # if not self.check_shape(image.shape): 122 | # raise ValueError("image does not conform to current data dimensions") 123 | # 124 | # self.__current_shape = image.shape 125 | self.__s1_norm = image 126 | 127 | # normalized S2 128 | @property 129 | def s2_norm(self): 130 | return self.__s2_norm 131 | 132 | @s2_norm.setter 133 | def s2_norm(self, image): 134 | 135 | if not self.check_dtype(image): 136 | raise TypeError("image is not ndarray") 137 | # if not self.check_shape(image.shape): 138 | # raise ValueError("image does not conform to current data dimensions") 139 | # 140 | # self.__current_shape = image.shape 141 | 142 | self.__s2_norm = image 143 | 144 | # polarization based on other stokes params 145 | @property 146 | def polarization(self): 147 | return self.__polarization 148 | 149 | @polarization.setter 150 | 151 | def polarization(self, image): 152 | if not self.check_dtype(image): 153 | raise TypeError("image is not ndarray") 154 | # if not self.check_shape(image.shape): 155 | # raise ValueError("image does not conform to current data dimensions") 156 | # 157 | # self.__current_shape = image.shape 158 | self.__polarization = image 159 | 160 | # Stokes matrices 161 | @property 162 | def s0(self): 163 | return self.__data[0] 164 | 165 | @s0.setter 166 | def s0(self, image: np.ndarray): 167 | 168 | if not self.check_dtype(image): 169 | raise TypeError("image is not ndarray") 170 | # if not self.check_shape(image.shape): 171 | # raise ValueError("image does not conform to current data dimensions") 172 | # 173 | # self.__current_shape = image.shape 174 | self.__data[0] = image 175 | 176 | @property 177 | def s1(self): 178 | return self.__data[1] 179 | 180 | @s1.setter 181 | def s1(self, image: np.ndarray): 182 | 183 | if not self.check_dtype(image): 184 | raise TypeError("image is not ndarray") 185 | # if not self.check_shape(image.shape): 186 | # raise ValueError("image does not conform to current data dimensions") 187 | # 188 | # self.__current_shape = image.shape 189 | self.__data[1] = image 190 | 191 | @property 192 | def s2(self): 193 | return self.__data[2] 194 | 195 | @s2.setter 196 | def s2(self, image: np.ndarray): 197 | 198 | if not self.check_dtype(image): 199 | raise TypeError("image is not ndarray") 200 | # if not self.check_shape(image.shape): 201 | # raise ValueError("image does not conform to current data dimensions") 202 | # 203 | # self.__current_shape = image.shape 204 | self.__data[2] = image 205 | 206 | @property 207 | def s3(self): 208 | return self.__data[3] 209 | 210 | @s3.setter 211 | def s3(self, image: np.ndarray): 212 | 213 | if not self.check_dtype(image): 214 | raise TypeError("image is not ndarray") 215 | # if not self.check_shape(image.shape): 216 | # raise ValueError("image does not conform to current data dimensions") 217 | # 218 | # self.__current_shape = image.shape 219 | self.__data[3] = image 220 | 221 | 222 | -------------------------------------------------------------------------------- /ReconstructOrder/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/29/19} 2 | 3 | from . import (imgIO, imgProcessing, mManagerIO, plotting, ConfigReader) -------------------------------------------------------------------------------- /ReconstructOrder/utils/aux_utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import functools 3 | 4 | def loop_pt(func): 5 | @functools.wraps(func) 6 | def wrapper_loop_pt(*args, **kwargs): 7 | img_io = kwargs['img_io'] 8 | for pos_idx, pos_name in enumerate(img_io.pos_list): 9 | img_io.img_in_pos_path = os.path.join(img_io.img_sm_path, pos_name) 10 | img_io.pos_idx = pos_idx 11 | for t_idx in img_io.t_list: 12 | img_io.t_idx = t_idx 13 | kwargs['img_io'] = img_io 14 | func(*args, **kwargs) 15 | return wrapper_loop_pt 16 | 17 | -------------------------------------------------------------------------------- /ReconstructOrder/utils/background_estimator.py: -------------------------------------------------------------------------------- 1 | """Estimate flat field images""" 2 | 3 | import numpy as np 4 | import itertools 5 | 6 | class BackgroundEstimator2D: 7 | """Estimates flat field image""" 8 | 9 | def __init__(self, 10 | block_size=32): 11 | """ 12 | Background images are estimated once per channel for 2D data 13 | :param int block_size: Size of blocks image will be divided into 14 | """ 15 | 16 | if block_size is None: 17 | block_size = 32 18 | self.block_size = block_size 19 | 20 | def sample_block_medians(self, im): 21 | """Subdivide a 2D image in smaller blocks of size block_size and 22 | compute the median intensity value for each block. Any incomplete 23 | blocks (remainders of modulo operation) will be ignored. 24 | 25 | :param np.array im: 2D image 26 | :return np.array(float) sample_coords: Image coordinates for block 27 | centers 28 | :return np.array(float) sample_values: Median intensity values for 29 | blocks 30 | """ 31 | 32 | im_shape = im.shape 33 | assert self.block_size < im_shape[0], "Block size larger than image height" 34 | assert self.block_size < im_shape[1], "Block size larger than image width" 35 | 36 | nbr_blocks_x = im_shape[0] // self.block_size 37 | nbr_blocks_y = im_shape[1] // self.block_size 38 | sample_coords = np.zeros((nbr_blocks_x * nbr_blocks_y, 2), 39 | dtype=np.float64) 40 | sample_values = np.zeros((nbr_blocks_x * nbr_blocks_y, ), 41 | dtype=np.float64) 42 | for x in range(nbr_blocks_x): 43 | for y in range(nbr_blocks_y): 44 | idx = y * nbr_blocks_x + x 45 | sample_coords[idx, :] = [x * self.block_size + (self.block_size - 1) / 2, 46 | y * self.block_size + (self.block_size - 1) / 2] 47 | sample_values[idx] = np.median( 48 | im[x * self.block_size:(x + 1) * self.block_size, 49 | y * self.block_size:(y + 1) * self.block_size] 50 | ) 51 | return sample_coords, sample_values 52 | 53 | @staticmethod 54 | def fit_polynomial_surface_2D(sample_coords, 55 | sample_values, 56 | im_shape, 57 | order=2, 58 | normalize=True): 59 | """ 60 | Given coordinates and corresponding values, this function will fit a 61 | 2D polynomial of given order, then create a surface of given shape. 62 | 63 | :param np.array sample_coords: 2D sample coords (nbr of points, 2) 64 | :param np.array sample_values: Corresponding intensity values (nbr points,) 65 | :param tuple im_shape: Shape of desired output surface (height, width) 66 | :param int order: Order of polynomial (default 2) 67 | :param bool normalize: Normalize surface by dividing by its mean 68 | for background correction (default True) 69 | 70 | :return np.array poly_surface: 2D surface of shape im_shape 71 | """ 72 | assert (order + 1)*(order + 2)/2 <= len(sample_values), \ 73 | "Can't fit a higher degree polynomial than there are sampled values" 74 | # Number of coefficients is determined by (order + 1)*(order + 2)/2 75 | orders = np.arange(order + 1) 76 | variable_matrix = np.zeros((sample_coords.shape[0], int((order + 1)*(order + 2)/2))) 77 | order_pairs = list(itertools.product(orders, orders)) 78 | # sum of orders of x,y <= order of the polynomial 79 | variable_iterator = itertools.filterfalse(lambda x: sum(x) > order, order_pairs) 80 | for idx, (m, n) in enumerate(variable_iterator): 81 | variable_matrix[:, idx] = sample_coords[:, 0] ** n * sample_coords[:, 1] ** m 82 | # Least squares fit of the points to the polynomial 83 | coeffs, _, _, _ = np.linalg.lstsq(variable_matrix, sample_values, rcond=-1) 84 | # Create a grid of image (x, y) coordinates 85 | x_mesh, y_mesh = np.meshgrid(np.linspace(0, im_shape[1] - 1, im_shape[1]), 86 | np.linspace(0, im_shape[0] - 1, im_shape[0])) 87 | # Reconstruct the surface from the coefficients 88 | poly_surface = np.zeros(im_shape, np.float) 89 | order_pairs = list(itertools.product(orders, orders)) 90 | # sum of orders of x,y <= order of the polynomial 91 | variable_iterator = itertools.filterfalse(lambda x: sum(x) > order, order_pairs) 92 | for coeff, (m, n) in zip(coeffs, variable_iterator): 93 | poly_surface += coeff * x_mesh ** m * y_mesh ** n 94 | 95 | if normalize: 96 | poly_surface /= np.mean(poly_surface) 97 | return poly_surface 98 | 99 | def get_background(self, im, order=2, normalize=True): 100 | """ 101 | Combine sampling and polynomial surface fit for background estimation. 102 | To background correct an image, divide it by background. 103 | 104 | :param np.array im: 2D image 105 | :param int order: Order of polynomial (default 2) 106 | :param bool normalize: Normalize surface by dividing by its mean 107 | for background correction (default True) 108 | 109 | :return np.array background: Background image 110 | """ 111 | 112 | coords, values = self.sample_block_medians(im=im) 113 | background = self.fit_polynomial_surface_2D( 114 | sample_coords=coords, 115 | sample_values=values, 116 | im_shape=im.shape, 117 | order=order, 118 | normalize=normalize, 119 | ) 120 | # Backgrounds can't contain zeros or negative values 121 | # if background.min() <= 0: 122 | # raise ValueError( 123 | # "The generated background was not strictly positive {}.".format( 124 | # background.min()), 125 | # ) 126 | return background 127 | 128 | -------------------------------------------------------------------------------- /ReconstructOrder/utils/flat_field.py: -------------------------------------------------------------------------------- 1 | from .mManagerIO import mManagerReader, PolAcquReader 2 | from ..datastructures import StokesData, IntensityDataCreator, IntensityData 3 | from .ConfigReader import ConfigReader 4 | from .imgProcessing import ImgMin 5 | from .aux_utils import loop_pt 6 | from typing import Union 7 | import numpy as np 8 | import cv2 9 | 10 | 11 | class FlatFieldCorrector(object): 12 | """Compute illumination function of fluorescence channels 13 | for flat-field correction 14 | 15 | Parameters 16 | ---------- 17 | img_io: object 18 | mManagerReader object that holds the image parameters 19 | config: object 20 | ConfigReader object that holds the user input config parameters 21 | img_reconstructor: ImgReconstructor 22 | ImgReconstructor object for image reconstruction 23 | background_data: StokesData 24 | object of type StokesData 25 | 26 | Attributes 27 | ---------- 28 | ff_method : str 29 | flat-field correction method. Using morphological opening if 'open' and empty image if 'empty' 30 | img_fluor_min : array 31 | array of empty flourescence images for each channel 32 | img_fluor_sum : array 33 | array of sum of morphologically opened flourescence images for each channel 34 | kernel : obj 35 | kernel for image opening operation 36 | img_fluor_bg : array 37 | Fluorescence channel background for flat-field correction 38 | binning : int 39 | binning (or pooling) size for the images 40 | """ 41 | 42 | def __init__(self, 43 | img_io: Union[mManagerReader, PolAcquReader], 44 | config: ConfigReader, 45 | method='open'): 46 | self.img_io = img_io 47 | self.binning = config.processing.binning 48 | if config.dataset.ROI is None: 49 | self.height, self.width = img_io.height, img_io.width 50 | else: 51 | self.height, self.width = config.dataset.ROI[2], config.dataset.ROI[3] 52 | self.img_fluor_min = np.full((5, img_io.height // img_io.binning, img_io.width // img_io.binning), 53 | np.inf) # set initial min array to to be Inf 54 | self.img_fluor_sum = np.zeros( 55 | (5, img_io.height // img_io.binning, 56 | img_io.width // img_io.binning)) # set the default background to be Ones (uniform field) 57 | assert method in ['open', 'empty'], "flat-field correction method must be 'open' or 'empty'" 58 | self.method = method 59 | self.img_int_creator = IntensityDataCreator(ROI=config.dataset.ROI, binning=self.binning) 60 | self._fluor_chan_names = ['405', '488', '568', '640', 'ex561em700'] 61 | 62 | self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, 63 | (100, 64 | 100)) # kernel for image opening operation, 100-200 is usually good 65 | self.img_fluor_bg = IntensityData(channel_names=self._fluor_chan_names) 66 | for chan_name in self.img_fluor_bg.channel_names: 67 | self.img_fluor_bg.replace_image( 68 | np.ones((self.height // self.binning, self.width // self.binning)), chan_name) 69 | 70 | def compute_flat_field(self): 71 | """ 72 | Compute illumination function of fluorescence channels 73 | for flat-field correction. Computes the illumination function 74 | of fluorescence channels using image opening or looking for empty images, 75 | currently only process the first Z for speed 76 | 77 | """ 78 | if self.method == 'open': 79 | for chan_name in self.img_fluor_bg.channel_names: 80 | self.img_fluor_bg.replace_image( 81 | np.zeros(self.height // self.binning, self.width // self.binning), chan_name) 82 | elif self.method == 'empty': 83 | for chan_name in self.img_fluor_bg.channel_names: 84 | self.img_fluor_bg.replace_image( 85 | np.full((self.height // self.binning, self.width // self.binning), np.inf), chan_name) 86 | 87 | print('Calculating illumination function for flatfield correction...') 88 | self._compute_ff_helper(img_io=self.img_io) 89 | 90 | for chan_name in self.img_fluor_bg.channel_names: 91 | img_bg_new = self.img_fluor_bg.get_image(chan_name) 92 | img_bg_new = img_bg_new - min(np.nanmin(img_bg_new), 0) + 1 #add 1 to avoid 0 93 | img_bg_new /= np.mean(img_bg_new) # normalize the background to have mean = 1 94 | self.img_fluor_bg.replace_image(img_bg_new, chan_name) 95 | 96 | @loop_pt 97 | def _compute_ff_helper(self, 98 | img_io: Union[mManagerReader, PolAcquReader]=None, 99 | ): 100 | 101 | for z_idx in range(0, 1): # only use the first z 102 | img_io.z_idx = z_idx 103 | img_int_raw = self.img_int_creator.get_data_object(img_io) 104 | for chan_idx, chan_name in enumerate(self._fluor_chan_names): 105 | img = img_int_raw.get_image(chan_name) 106 | if np.any(img): # if the flour channel exists 107 | img_bg_new = self.img_fluor_bg.get_image(chan_name) 108 | if self.method == 'open': 109 | img_bg_new += \ 110 | cv2.morphologyEx(img, cv2.MORPH_OPEN, self.kernel, borderType=cv2.BORDER_REPLICATE) 111 | elif self.method == 'empty': 112 | img_bg_new = \ 113 | ImgMin(img, img_bg_new) 114 | self.img_fluor_bg.replace_image(img_bg_new, chan_name) 115 | 116 | def correct_flat_field(self, img_int_sm: IntensityData) -> IntensityData: 117 | """ 118 | flat-field correction for fluorescence channels 119 | Parameters 120 | ---------- 121 | img_int_sm : IntensityData 122 | stack of fluorescence images with with shape (channel, y, x) 123 | 124 | Returns 125 | ------- 126 | img_int_sm : IntensityData 127 | flat-field corrected fluorescence images 128 | """ 129 | 130 | for chan_name in self._fluor_chan_names: 131 | img = img_int_sm.get_image(chan_name) 132 | if np.any(img): # if the flour channel exists 133 | img_int_sm.replace_image(img / self.img_fluor_bg.get_image(chan_name), 134 | chan_name) 135 | return img_int_sm 136 | -------------------------------------------------------------------------------- /ReconstructOrder/utils/imgIO.py: -------------------------------------------------------------------------------- 1 | """ 2 | Read and write Tiff in mManager format. Will be replaced by mManagerIO.py 3 | """ 4 | import os 5 | import numpy as np 6 | import glob 7 | import cv2 8 | from shutil import copy2 9 | import natsort 10 | 11 | 12 | 13 | def get_sorted_names(dir_name): 14 | """ 15 | Get image names in directory and sort them by their indices 16 | 17 | :param str dir_name: Image directory name 18 | :return list of strs im_names: Image names sorted according to indices 19 | """ 20 | im_names = [f for f in os.listdir(dir_name) if f.startswith('im')] 21 | # Sort image names according to indices 22 | return natsort.natsorted(im_names) 23 | 24 | def get_sub_dirs(ImgPath): 25 | """Return sub-directory names in a directory 26 | 27 | Parameters 28 | ---------- 29 | ImgPath: str 30 | Path to the input directory 31 | 32 | Returns 33 | ------- 34 | subDirName: list 35 | list of sub-directory names 36 | """ 37 | assert os.path.exists(ImgPath), 'Input folder does not exist!' 38 | subDirPath = glob.glob(os.path.join(ImgPath, '*/')) 39 | subDirName = [os.path.split(subdir[:-1])[1] for subdir in subDirPath] 40 | # assert subDirName, 'No sub directories found' 41 | return natsort.natsorted(subDirName) 42 | 43 | 44 | def FindDirContain_pos(ImgPath): 45 | """Recursively find the parent directory of "Pos#" directory 46 | Parameters 47 | ---------- 48 | ImgPath: str 49 | Path to the input directory 50 | 51 | Returns 52 | ------- 53 | ImgPath: str 54 | Path to the parent directory of "Pos#" directory 55 | 56 | """ 57 | subDirName = get_sub_dirs(ImgPath) 58 | assert subDirName, 'No "Pos" directories found. Check if the input folder contains "Pos"' 59 | subDir = subDirName[0] # get pos0 if it exists 60 | ImgSubPath = os.path.join(ImgPath, subDir) 61 | if 'Pos' not in subDir: 62 | ImgPath = FindDirContain_pos(ImgSubPath) 63 | return ImgPath 64 | else: 65 | return ImgPath 66 | 67 | 68 | 69 | 70 | def copy_files_in_sub_dirs(input_path, output_path): 71 | """copy files in each sub-directory in the input path to 72 | output path 73 | 74 | Parameters 75 | ---------- 76 | input_path: str 77 | input path 78 | output_path: 79 | output path 80 | 81 | """ 82 | assert os.path.exists(input_path), 'Input folder does not exist!' 83 | os.makedirs(output_path, exist_ok=True) 84 | sub_dir_paths = glob.glob(os.path.join(input_path, '*/')) 85 | for sub_dir_path in sub_dir_paths: 86 | src_file_paths = glob.glob(os.path.join(sub_dir_path, '*.*')) 87 | for src_file_path in src_file_paths: 88 | if os.path.isfile(src_file_path): 89 | copy2(src_file_path, output_path) 90 | 91 | 92 | def loadTiff(acquDirPath, acquFiles): 93 | """Load a single tiff file 94 | 95 | Parameters 96 | ---------- 97 | acquDirPath : str 98 | directory of the tiff file 99 | acquFiles 100 | name of the tiff file 101 | Returns 102 | ------- 103 | img : 2D float32 array 104 | image 105 | 106 | """ 107 | 108 | TiffFile = os.path.join(acquDirPath, acquFiles) 109 | img = cv2.imread(TiffFile,-1) # flag -1 to preserve the bit dept of the raw image 110 | img = img.astype(np.float32, copy=False) # convert to float32 without making a copy to save memory 111 | return img 112 | 113 | 114 | 115 | def sort_pol_channels(img_pol): 116 | """sort Polacquisition output images according to their polarization states 117 | 118 | Parameters 119 | ---------- 120 | img_pol : 3d float32 arrays 121 | images of polarization state channels output by Polacquisition plug-in 122 | 123 | Returns 124 | img_pol : 3d float32 arrays 125 | sorted polarization images in order of I_ext, I_0, I_45, I_90, I_135 126 | ------- 127 | 128 | """ 129 | I_ext = img_pol[0, :, :] # Sigma0 in Fig.2 130 | I_90 = img_pol[1, :, :] # Sigma2 in Fig.2 131 | I_135 = img_pol[2, :, :] # Sigma4 in Fig.2 132 | I_45 = img_pol[3, :, :] # Sigma3 in Fig.2 133 | if img_pol.shape[0] == 4: # if the images were taken using 4-frame scheme 134 | img_pol = np.stack((I_ext, I_45, I_90, I_135)) # order the channel following stokes calculus convention 135 | elif img_pol.shape[0] == 5: # if the images were taken using 5-frame scheme 136 | I_0 = img_pol[4, :, :] 137 | img_pol = np.stack((I_ext, I_0, I_45, I_90, I_135)) # order the channel following stokes calculus convention 138 | return img_pol 139 | 140 | 141 | def export_img(img_io, img_dict, separate_pos=False): 142 | """export images in tiff format 143 | 144 | Parameters 145 | ---------- 146 | img_io : obj 147 | mManagerReader instance 148 | img_dict: dict 149 | dictionary of images with (key, value) = (channel, image array) 150 | separate_pos: bool 151 | save images from different positions in separate folders if True 152 | ------- 153 | 154 | """ 155 | t_idx = img_io.t_idx 156 | z_idx = img_io.z_idx 157 | pos_idx = img_io.pos_idx 158 | if separate_pos: 159 | pos_name = img_io.pos_list[pos_idx] 160 | output_path = os.path.join(img_io.img_output_path, pos_name) 161 | os.makedirs(output_path, exist_ok=True) # create folder for processed images 162 | else: 163 | output_path = img_io.img_output_path 164 | 165 | for tiffName in img_dict: 166 | if tiffName in img_io.output_chans: 167 | fileName = 'img_'+tiffName+'_t%03d_p%03d_z%03d.tif'%(t_idx, pos_idx, z_idx) 168 | if len(img_dict[tiffName].shape)<3: 169 | cv2.imwrite(os.path.join(output_path, fileName), img_dict[tiffName]) 170 | else: 171 | cv2.imwrite(os.path.join(output_path, fileName), cv2.cvtColor(img_dict[tiffName], cv2.COLOR_RGB2BGR)) 172 | 173 | -------------------------------------------------------------------------------- /ReconstructOrder/workflow/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/29/19} 2 | 3 | from . import multiDimProcess 4 | from .reconstructBatch import reconstruct_batch -------------------------------------------------------------------------------- /ReconstructOrder/workflow/reconstructBatch.py: -------------------------------------------------------------------------------- 1 | from ..workflow.multiDimProcess import process_background, process_sample_imgs, read_metadata, parse_bg_options, phase_reconstructor_initializer 2 | from ..utils.ConfigReader import ConfigReader 3 | from ..utils.flat_field import FlatFieldCorrector 4 | from ..datastructures import IntensityDataCreator 5 | import os 6 | 7 | 8 | def process_position_list(img_obj_list, config): 9 | """Make sure all members of positions are part of io_obj. 10 | If positions = 'all', replace with actual list of positions 11 | 12 | Parameters 13 | ---------- 14 | img_obj_list: list 15 | list of mManagerReader instances 16 | config: obj 17 | ConfigReader instance 18 | 19 | Returns 20 | ------- 21 | img_obj_list: list 22 | list of modified mManagerReader instances 23 | 24 | """ 25 | for idx, io_obj in enumerate(img_obj_list): 26 | config_pos_list = config.dataset.positions[idx] 27 | 28 | if not config_pos_list[0] == 'all': 29 | try: 30 | img_obj_list[idx].pos_list = config_pos_list 31 | except Exception as e: 32 | print('Position list {} for sample in {} is invalid'.format(config_pos_list, io_obj.img_sm_path)) 33 | ValueError(e) 34 | return img_obj_list 35 | 36 | 37 | def process_z_slice_list(img_obj_list, config): 38 | """Make sure all members of z_slices are part of io_obj. 39 | If z_slices = 'all', replace with actual list of z_slices 40 | 41 | Parameters 42 | ---------- 43 | img_obj_list: list 44 | list of mManagerReader instances 45 | config: obj 46 | ConfigReader instance 47 | 48 | Returns 49 | ------- 50 | img_obj_list: list 51 | list of modified mManagerReader instances 52 | 53 | """ 54 | n_slice_local_bg = config.processing.n_slice_local_bg 55 | for idx, io_obj in enumerate(img_obj_list): 56 | config_z_list = config.dataset.z_slices[idx] 57 | if not config_z_list[0] == 'all': 58 | try: 59 | img_obj_list[idx].z_list = config_z_list 60 | except Exception as e: 61 | print('z_slice list {} for sample in {} is invalid'.format(config_z_list, io_obj.img_sm_path)) 62 | ValueError(e) 63 | if not n_slice_local_bg == 'all': 64 | # adjust slice number to be multiple of n_slice_local_bg 65 | img_obj_list[idx].z_list = \ 66 | img_obj_list[idx].z_list[0:len(img_obj_list[idx].z_list) // n_slice_local_bg * n_slice_local_bg] 67 | return img_obj_list 68 | 69 | 70 | def process_timepoint_list(img_obj_list, config): 71 | """Make sure all members of timepoints are part of io_obj. 72 | If timepoints = 'all', replace with actual list of timepoints 73 | 74 | Parameters 75 | ---------- 76 | img_obj_list: list 77 | list of mManagerReader instances 78 | config: obj 79 | ConfigReader instance 80 | 81 | Returns 82 | ------- 83 | img_obj_list: list 84 | list of modified mManagerReader instances 85 | """ 86 | for idx, io_obj in enumerate(img_obj_list): 87 | config_t_list = config.dataset.timepoints[idx] 88 | 89 | if not config_t_list[0] == 'all': 90 | try: 91 | img_obj_list[idx].t_list = config_t_list 92 | except Exception as e: 93 | print('Timepoint list {} for sample in {} is invalid'.format(config_t_list, io_obj.img_sm_path)) 94 | ValueError(e) 95 | return img_obj_list 96 | 97 | 98 | def _process_one_acqu(img_obj, bg_obj, config): 99 | """ 100 | 101 | Parameters 102 | ---------- 103 | img_obj : mManagerReader 104 | mManagerReader instance for sample images 105 | bg_obj : mManagerReader instance for background images 106 | config : obj 107 | ConfigReader instance 108 | 109 | Returns 110 | ------- 111 | 112 | """ 113 | ph_recon = None 114 | print('Processing ' + img_obj.name + ' ....') 115 | img_int_creator_bg = IntensityDataCreator(ROI=config.dataset.ROI, 116 | binning=config.processing.binning) 117 | 118 | # Write metadata in processed folder 119 | img_obj.writeMetaData() 120 | 121 | # Write config file in processed folder 122 | config.write_config(os.path.join(img_obj.img_output_path, 'config.yml')) # save the config file in the processed folder 123 | 124 | # img_obj, img_reconstructor = process_background(img_obj, bg_obj, config) 125 | stokes_bg_norm, int_bg, img_reconstructor = process_background(img_obj, bg_obj, config, img_int_creator_bg) 126 | 127 | ff_corrector = FlatFieldCorrector(img_obj, config, method='open') 128 | flatField = config.processing.flatfield_correction 129 | if flatField: # find background fluorescence for flatField correction 130 | ff_corrector.compute_flat_field() 131 | # determine if we will initiate phase reconstruction 132 | phase_names = ['Phase2D', 'Phase_semi3D', 'Phase3D'] 133 | save_phase = any(chan in phase_names for chan in img_obj.output_chans) 134 | if save_phase: 135 | ph_recon = phase_reconstructor_initializer(img_obj, config) 136 | 137 | # old int_creator object has bg-channels assigned. Need to create a new one 138 | img_int_creator_sm = IntensityDataCreator(ROI=config.dataset.ROI, 139 | binning=config.processing.binning) 140 | 141 | process_sample_imgs(img_io=img_obj, 142 | config=config, 143 | img_reconstructor=img_reconstructor, 144 | img_int_creator=img_int_creator_sm, 145 | ff_corrector=ff_corrector, 146 | int_bg=int_bg, 147 | stokes_bg=stokes_bg_norm, 148 | ph_recon=ph_recon) 149 | 150 | #TODO: Write log file and metadata at the end of reconstruction 151 | 152 | 153 | def reconstruct_batch(configfile): 154 | config = ConfigReader() 155 | config.read_config(configfile) 156 | 157 | 158 | # read meta data 159 | img_obj_list, bg_obj_list = read_metadata(config) 160 | img_obj_list = process_position_list(img_obj_list, config) 161 | img_obj_list = process_z_slice_list(img_obj_list, config) 162 | img_obj_list = process_timepoint_list(img_obj_list, config) 163 | # process background options 164 | img_obj_list = parse_bg_options(img_obj_list, config) 165 | 166 | for img_obj, bg_obj in zip(img_obj_list, bg_obj_list): 167 | _process_one_acqu(img_obj, bg_obj, config) 168 | -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/.nojekyll -------------------------------------------------------------------------------- /docs/doctrees/compute.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/doctrees/compute.doctree -------------------------------------------------------------------------------- /docs/doctrees/environment.pickle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/doctrees/environment.pickle -------------------------------------------------------------------------------- /docs/doctrees/index.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/doctrees/index.doctree -------------------------------------------------------------------------------- /docs/doctrees/installation.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/doctrees/installation.doctree -------------------------------------------------------------------------------- /docs/doctrees/introduction.doctree: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/doctrees/introduction.doctree -------------------------------------------------------------------------------- /docs/html/.buildinfo: -------------------------------------------------------------------------------- 1 | # Sphinx build info version 1 2 | # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. 3 | config: 5df5eea1c79dd8668051a1de58318b19 4 | tags: 645f666f9bcd5a90fca523b33c5a78b7 5 | -------------------------------------------------------------------------------- /docs/html/_sources/compute.rst.txt: -------------------------------------------------------------------------------- 1 | compute 2 | ============== 3 | 4 | contains methods to calculate stokes and physical images from intensity images 5 | 6 | Subpackages 7 | ----------- 8 | 9 | .. automodule:: compute.reconstruct 10 | :members: ImgReconstructor 11 | :undoc-members: 12 | :show-inheritance: 13 | -------------------------------------------------------------------------------- /docs/html/_sources/index.rst.txt: -------------------------------------------------------------------------------- 1 | .. Reconstruct Order documentation master file, created by 2 | sphinx-quickstart on Wed Apr 17 17:25:41 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Reconstruct Order 7 | ============================================= 8 | Reconstruct birefringence, slow axis, transmission, and degree of polarization from polarization-resolved images. 9 | The data is acquired with Micro-Manager and OpenPolScope acquisition plugin. 10 | 11 | 12 | Quick start 13 | ----------- 14 | 15 | ReconstructOrder is available on Python Package Index using Pip. 16 | We highly recommend you install to a separate environment 17 | 18 | .. code-block:: bash 19 | 20 | # USING venv 21 | python3 -m venv /path/to/new/virtual/environment 22 | cd /path/to/new/virtual/environment 23 | activate 24 | 25 | .. code-block:: bash 26 | 27 | # USING anaconda 28 | # from anaconda command prompt, or terminal 29 | conda create -n name-of-new-environment 30 | source activate name-of-new-environment 31 | 32 | Then install ReconstructOrder to this environment 33 | 34 | .. code-block:: bash 35 | 36 | pip install ReconstructOrder 37 | 38 | If you wish to run ReconstructOrder from command line, and not from the python interpreter, 39 | you will need to clone this github repo, and run commands from within. 40 | 41 | .. code-block:: bash 42 | 43 | git clone https://github.com/czbiohub/ReconstructOrder.git 44 | 45 | 46 | Running Reconstruction on your data 47 | ----------------------------------- 48 | 49 | To reconstruct birefringence images, you will need to create a configuration file that reflects your experiment's 50 | parameters. You can see example configurations in this github repo under examples/example_configs. 51 | 52 | Modify paths to your data in there. See "config_example.yml" for detailed description of the fields. It's important 53 | that your data is organized in a hierarchy as described. 54 | 55 | finally, when the config file is ready, run the following: 56 | 57 | FROM PYTHON 58 | 59 | .. code-block:: python 60 | 61 | from ReconstructOrder import workflow as wf 62 | 63 | wf.runReconstruction('path_to_your_config_file') 64 | 65 | 66 | FROM COMMAND LINE 67 | 68 | .. code-block:: bash 69 | 70 | # first navigate to your cloned ReconstructOrder directory 71 | cd ReconstructOrder 72 | python runReconstruction.py --config path_to_your_config_file 73 | 74 | 75 | .. toctree:: 76 | :maxdepth: 2 77 | :caption: Contents: 78 | 79 | introduction 80 | installation 81 | compute 82 | 83 | 84 | Thanks 85 | ------ 86 | 87 | This work is made possible by the Chan-Zuckerberg Biohub 88 | 89 | 90 | 91 | Indices and tables 92 | ================== 93 | 94 | * :ref:`genindex` 95 | * :ref:`modindex` 96 | * :ref:`search` 97 | -------------------------------------------------------------------------------- /docs/html/_sources/installation.rst.txt: -------------------------------------------------------------------------------- 1 | Installation 2 | ============== 3 | 4 | using git clone 5 | --------------- 6 | 7 | install git: (links) 8 | run git clone 9 | 10 | 11 | using pip 12 | --------- 13 | 14 | Not implemented yet 15 | 16 | 17 | Other 18 | ----------------------- 19 | 20 | anything else here 21 | 22 | -------------------------------------------------------------------------------- /docs/html/_sources/introduction.rst.txt: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============== 3 | 4 | Reconstruct Order 5 | ----------------- 6 | 7 | This block is a placeholder for any intro information about this repo 8 | We should discuss this repo's purpose, it's audience, the kind of data etc... 9 | 10 | Requirements 11 | ------------ 12 | 13 | This is a placeholder for hardware/software requirements 14 | 15 | 16 | Other 17 | ----------------------- 18 | 19 | anything else here 20 | 21 | 22 | -------------------------------------------------------------------------------- /docs/html/_static/css/badge_only.css: -------------------------------------------------------------------------------- 1 | .fa:before{-webkit-font-smoothing:antialiased}.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;content:""}.clearfix:after{clear:both}@font-face{font-family:FontAwesome;font-weight:normal;font-style:normal;src:url("../fonts/fontawesome-webfont.eot");src:url("../fonts/fontawesome-webfont.eot?#iefix") format("embedded-opentype"),url("../fonts/fontawesome-webfont.woff") format("woff"),url("../fonts/fontawesome-webfont.ttf") format("truetype"),url("../fonts/fontawesome-webfont.svg#FontAwesome") format("svg")}.fa:before{display:inline-block;font-family:FontAwesome;font-style:normal;font-weight:normal;line-height:1;text-decoration:inherit}a .fa{display:inline-block;text-decoration:inherit}li .fa{display:inline-block}li .fa-large:before,li .fa-large:before{width:1.875em}ul.fas{list-style-type:none;margin-left:2em;text-indent:-0.8em}ul.fas li .fa{width:.8em}ul.fas li .fa-large:before,ul.fas li .fa-large:before{vertical-align:baseline}.fa-book:before{content:""}.icon-book:before{content:""}.fa-caret-down:before{content:""}.icon-caret-down:before{content:""}.fa-caret-up:before{content:""}.icon-caret-up:before{content:""}.fa-caret-left:before{content:""}.icon-caret-left:before{content:""}.fa-caret-right:before{content:""}.icon-caret-right:before{content:""}.rst-versions{position:fixed;bottom:0;left:0;width:300px;color:#fcfcfc;background:#1f1d1d;font-family:"Lato","proxima-nova","Helvetica Neue",Arial,sans-serif;z-index:400}.rst-versions a{color:#2980B9;text-decoration:none}.rst-versions .rst-badge-small{display:none}.rst-versions .rst-current-version{padding:12px;background-color:#272525;display:block;text-align:right;font-size:90%;cursor:pointer;color:#27AE60;*zoom:1}.rst-versions .rst-current-version:before,.rst-versions .rst-current-version:after{display:table;content:""}.rst-versions .rst-current-version:after{clear:both}.rst-versions .rst-current-version .fa{color:#fcfcfc}.rst-versions .rst-current-version .fa-book{float:left}.rst-versions .rst-current-version .icon-book{float:left}.rst-versions .rst-current-version.rst-out-of-date{background-color:#E74C3C;color:#fff}.rst-versions .rst-current-version.rst-active-old-version{background-color:#F1C40F;color:#000}.rst-versions.shift-up{height:auto;max-height:100%;overflow-y:scroll}.rst-versions.shift-up .rst-other-versions{display:block}.rst-versions .rst-other-versions{font-size:90%;padding:12px;color:gray;display:none}.rst-versions .rst-other-versions hr{display:block;height:1px;border:0;margin:20px 0;padding:0;border-top:solid 1px #413d3d}.rst-versions .rst-other-versions dd{display:inline-block;margin:0}.rst-versions .rst-other-versions dd a{display:inline-block;padding:6px;color:#fcfcfc}.rst-versions.rst-badge{width:auto;bottom:20px;right:20px;left:auto;border:none;max-width:300px;max-height:90%}.rst-versions.rst-badge .icon-book{float:none}.rst-versions.rst-badge .fa-book{float:none}.rst-versions.rst-badge.shift-up .rst-current-version{text-align:right}.rst-versions.rst-badge.shift-up .rst-current-version .fa-book{float:left}.rst-versions.rst-badge.shift-up .rst-current-version .icon-book{float:left}.rst-versions.rst-badge .rst-current-version{width:auto;height:30px;line-height:30px;padding:0 6px;display:block;text-align:center}@media screen and (max-width: 768px){.rst-versions{width:85%;display:none}.rst-versions.shift{display:block}} 2 | -------------------------------------------------------------------------------- /docs/html/_static/doctools.js: -------------------------------------------------------------------------------- 1 | /* 2 | * doctools.js 3 | * ~~~~~~~~~~~ 4 | * 5 | * Sphinx JavaScript utilities for all documentation. 6 | * 7 | * :copyright: Copyright 2007-2019 by the Sphinx team, see AUTHORS. 8 | * :license: BSD, see LICENSE for details. 9 | * 10 | */ 11 | 12 | /** 13 | * select a different prefix for underscore 14 | */ 15 | $u = _.noConflict(); 16 | 17 | /** 18 | * make the code below compatible with browsers without 19 | * an installed firebug like debugger 20 | if (!window.console || !console.firebug) { 21 | var names = ["log", "debug", "info", "warn", "error", "assert", "dir", 22 | "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace", 23 | "profile", "profileEnd"]; 24 | window.console = {}; 25 | for (var i = 0; i < names.length; ++i) 26 | window.console[names[i]] = function() {}; 27 | } 28 | */ 29 | 30 | /** 31 | * small helper function to urldecode strings 32 | */ 33 | jQuery.urldecode = function(x) { 34 | return decodeURIComponent(x).replace(/\+/g, ' '); 35 | }; 36 | 37 | /** 38 | * small helper function to urlencode strings 39 | */ 40 | jQuery.urlencode = encodeURIComponent; 41 | 42 | /** 43 | * This function returns the parsed url parameters of the 44 | * current request. Multiple values per key are supported, 45 | * it will always return arrays of strings for the value parts. 46 | */ 47 | jQuery.getQueryParameters = function(s) { 48 | if (typeof s === 'undefined') 49 | s = document.location.search; 50 | var parts = s.substr(s.indexOf('?') + 1).split('&'); 51 | var result = {}; 52 | for (var i = 0; i < parts.length; i++) { 53 | var tmp = parts[i].split('=', 2); 54 | var key = jQuery.urldecode(tmp[0]); 55 | var value = jQuery.urldecode(tmp[1]); 56 | if (key in result) 57 | result[key].push(value); 58 | else 59 | result[key] = [value]; 60 | } 61 | return result; 62 | }; 63 | 64 | /** 65 | * highlight a given string on a jquery object by wrapping it in 66 | * span elements with the given class name. 67 | */ 68 | jQuery.fn.highlightText = function(text, className) { 69 | function highlight(node, addItems) { 70 | if (node.nodeType === 3) { 71 | var val = node.nodeValue; 72 | var pos = val.toLowerCase().indexOf(text); 73 | if (pos >= 0 && 74 | !jQuery(node.parentNode).hasClass(className) && 75 | !jQuery(node.parentNode).hasClass("nohighlight")) { 76 | var span; 77 | var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg"); 78 | if (isInSVG) { 79 | span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); 80 | } else { 81 | span = document.createElement("span"); 82 | span.className = className; 83 | } 84 | span.appendChild(document.createTextNode(val.substr(pos, text.length))); 85 | node.parentNode.insertBefore(span, node.parentNode.insertBefore( 86 | document.createTextNode(val.substr(pos + text.length)), 87 | node.nextSibling)); 88 | node.nodeValue = val.substr(0, pos); 89 | if (isInSVG) { 90 | var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect"); 91 | var bbox = node.parentElement.getBBox(); 92 | rect.x.baseVal.value = bbox.x; 93 | rect.y.baseVal.value = bbox.y; 94 | rect.width.baseVal.value = bbox.width; 95 | rect.height.baseVal.value = bbox.height; 96 | rect.setAttribute('class', className); 97 | addItems.push({ 98 | "parent": node.parentNode, 99 | "target": rect}); 100 | } 101 | } 102 | } 103 | else if (!jQuery(node).is("button, select, textarea")) { 104 | jQuery.each(node.childNodes, function() { 105 | highlight(this, addItems); 106 | }); 107 | } 108 | } 109 | var addItems = []; 110 | var result = this.each(function() { 111 | highlight(this, addItems); 112 | }); 113 | for (var i = 0; i < addItems.length; ++i) { 114 | jQuery(addItems[i].parent).before(addItems[i].target); 115 | } 116 | return result; 117 | }; 118 | 119 | /* 120 | * backward compatibility for jQuery.browser 121 | * This will be supported until firefox bug is fixed. 122 | */ 123 | if (!jQuery.browser) { 124 | jQuery.uaMatch = function(ua) { 125 | ua = ua.toLowerCase(); 126 | 127 | var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || 128 | /(webkit)[ \/]([\w.]+)/.exec(ua) || 129 | /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || 130 | /(msie) ([\w.]+)/.exec(ua) || 131 | ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || 132 | []; 133 | 134 | return { 135 | browser: match[ 1 ] || "", 136 | version: match[ 2 ] || "0" 137 | }; 138 | }; 139 | jQuery.browser = {}; 140 | jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; 141 | } 142 | 143 | /** 144 | * Small JavaScript module for the documentation. 145 | */ 146 | var Documentation = { 147 | 148 | init : function() { 149 | this.fixFirefoxAnchorBug(); 150 | this.highlightSearchWords(); 151 | this.initIndexTable(); 152 | if (DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) { 153 | this.initOnKeyListeners(); 154 | } 155 | }, 156 | 157 | /** 158 | * i18n support 159 | */ 160 | TRANSLATIONS : {}, 161 | PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; }, 162 | LOCALE : 'unknown', 163 | 164 | // gettext and ngettext don't access this so that the functions 165 | // can safely bound to a different name (_ = Documentation.gettext) 166 | gettext : function(string) { 167 | var translated = Documentation.TRANSLATIONS[string]; 168 | if (typeof translated === 'undefined') 169 | return string; 170 | return (typeof translated === 'string') ? translated : translated[0]; 171 | }, 172 | 173 | ngettext : function(singular, plural, n) { 174 | var translated = Documentation.TRANSLATIONS[singular]; 175 | if (typeof translated === 'undefined') 176 | return (n == 1) ? singular : plural; 177 | return translated[Documentation.PLURALEXPR(n)]; 178 | }, 179 | 180 | addTranslations : function(catalog) { 181 | for (var key in catalog.messages) 182 | this.TRANSLATIONS[key] = catalog.messages[key]; 183 | this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); 184 | this.LOCALE = catalog.locale; 185 | }, 186 | 187 | /** 188 | * add context elements like header anchor links 189 | */ 190 | addContextElements : function() { 191 | $('div[id] > :header:first').each(function() { 192 | $('\u00B6'). 193 | attr('href', '#' + this.id). 194 | attr('title', _('Permalink to this headline')). 195 | appendTo(this); 196 | }); 197 | $('dt[id]').each(function() { 198 | $('\u00B6'). 199 | attr('href', '#' + this.id). 200 | attr('title', _('Permalink to this definition')). 201 | appendTo(this); 202 | }); 203 | }, 204 | 205 | /** 206 | * workaround a firefox stupidity 207 | * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 208 | */ 209 | fixFirefoxAnchorBug : function() { 210 | if (document.location.hash && $.browser.mozilla) 211 | window.setTimeout(function() { 212 | document.location.href += ''; 213 | }, 10); 214 | }, 215 | 216 | /** 217 | * highlight the search words provided in the url in the text 218 | */ 219 | highlightSearchWords : function() { 220 | var params = $.getQueryParameters(); 221 | var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; 222 | if (terms.length) { 223 | var body = $('div.body'); 224 | if (!body.length) { 225 | body = $('body'); 226 | } 227 | window.setTimeout(function() { 228 | $.each(terms, function() { 229 | body.highlightText(this.toLowerCase(), 'highlighted'); 230 | }); 231 | }, 10); 232 | $('') 234 | .appendTo($('#searchbox')); 235 | } 236 | }, 237 | 238 | /** 239 | * init the domain index toggle buttons 240 | */ 241 | initIndexTable : function() { 242 | var togglers = $('img.toggler').click(function() { 243 | var src = $(this).attr('src'); 244 | var idnum = $(this).attr('id').substr(7); 245 | $('tr.cg-' + idnum).toggle(); 246 | if (src.substr(-9) === 'minus.png') 247 | $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); 248 | else 249 | $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); 250 | }).css('display', ''); 251 | if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { 252 | togglers.click(); 253 | } 254 | }, 255 | 256 | /** 257 | * helper function to hide the search marks again 258 | */ 259 | hideSearchWords : function() { 260 | $('#searchbox .highlight-link').fadeOut(300); 261 | $('span.highlighted').removeClass('highlighted'); 262 | }, 263 | 264 | /** 265 | * make the url absolute 266 | */ 267 | makeURL : function(relativeURL) { 268 | return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; 269 | }, 270 | 271 | /** 272 | * get the current relative url 273 | */ 274 | getCurrentURL : function() { 275 | var path = document.location.pathname; 276 | var parts = path.split(/\//); 277 | $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { 278 | if (this === '..') 279 | parts.pop(); 280 | }); 281 | var url = parts.join('/'); 282 | return path.substring(url.lastIndexOf('/') + 1, path.length - 1); 283 | }, 284 | 285 | initOnKeyListeners: function() { 286 | $(document).keyup(function(event) { 287 | var activeElementType = document.activeElement.tagName; 288 | // don't navigate when in search box or textarea 289 | if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { 290 | switch (event.keyCode) { 291 | case 37: // left 292 | var prevHref = $('link[rel="prev"]').prop('href'); 293 | if (prevHref) { 294 | window.location.href = prevHref; 295 | return false; 296 | } 297 | case 39: // right 298 | var nextHref = $('link[rel="next"]').prop('href'); 299 | if (nextHref) { 300 | window.location.href = nextHref; 301 | return false; 302 | } 303 | } 304 | } 305 | }); 306 | } 307 | }; 308 | 309 | // quick alias for translations 310 | _ = Documentation.gettext; 311 | 312 | $(document).ready(function() { 313 | Documentation.init(); 314 | }); 315 | -------------------------------------------------------------------------------- /docs/html/_static/documentation_options.js: -------------------------------------------------------------------------------- 1 | var DOCUMENTATION_OPTIONS = { 2 | URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'), 3 | VERSION: '0', 4 | LANGUAGE: 'en', 5 | COLLAPSE_INDEX: false, 6 | FILE_SUFFIX: '.html', 7 | HAS_SOURCE: true, 8 | SOURCELINK_SUFFIX: '.txt', 9 | NAVIGATION_WITH_KEYS: false 10 | }; -------------------------------------------------------------------------------- /docs/html/_static/fonts/Inconsolata-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Inconsolata-Bold.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Inconsolata-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Inconsolata-Regular.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Inconsolata.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Inconsolata.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato-Bold.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato-Regular.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bold.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bold.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bold.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bold.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bolditalic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bolditalic.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bolditalic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bolditalic.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bolditalic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bolditalic.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-bolditalic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-bolditalic.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-italic.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-italic.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-italic.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-italic.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-italic.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-italic.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-italic.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-italic.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-regular.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-regular.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-regular.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/Lato/lato-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/Lato/lato-regular.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab-Bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab-Bold.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab-Regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab-Regular.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-bold.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/RobotoSlab/roboto-slab-v7-regular.woff2 -------------------------------------------------------------------------------- /docs/html/_static/fonts/fontawesome-webfont.eot: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/fontawesome-webfont.eot -------------------------------------------------------------------------------- /docs/html/_static/fonts/fontawesome-webfont.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/fontawesome-webfont.ttf -------------------------------------------------------------------------------- /docs/html/_static/fonts/fontawesome-webfont.woff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/fontawesome-webfont.woff -------------------------------------------------------------------------------- /docs/html/_static/fonts/fontawesome-webfont.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/_static/fonts/fontawesome-webfont.woff2 -------------------------------------------------------------------------------- /docs/html/_static/js/theme.js: -------------------------------------------------------------------------------- 1 | /* sphinx_rtd_theme version 0.4.3 | MIT license */ 2 | /* Built 20190212 16:02 */ 3 | require=function r(s,a,l){function c(e,n){if(!a[e]){if(!s[e]){var i="function"==typeof require&&require;if(!n&&i)return i(e,!0);if(u)return u(e,!0);var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}var o=a[e]={exports:{}};s[e][0].call(o.exports,function(n){return c(s[e][1][n]||n)},o,o.exports,r,s,a,l)}return a[e].exports}for(var u="function"==typeof require&&require,n=0;n"),i("table.docutils.footnote").wrap("
"),i("table.docutils.citation").wrap("
"),i(".wy-menu-vertical ul").not(".simple").siblings("a").each(function(){var e=i(this);expand=i(''),expand.on("click",function(n){return t.toggleCurrent(e),n.stopPropagation(),!1}),e.prepend(expand)})},reset:function(){var n=encodeURI(window.location.hash)||"#";try{var e=$(".wy-menu-vertical"),i=e.find('[href="'+n+'"]');if(0===i.length){var t=$('.document [id="'+n.substring(1)+'"]').closest("div.section");0===(i=e.find('[href="#'+t.attr("id")+'"]')).length&&(i=e.find('[href="#"]'))}0this.docHeight||(this.navBar.scrollTop(i),this.winPosition=n)},onResize:function(){this.winResize=!1,this.winHeight=this.win.height(),this.docHeight=$(document).height()},hashChange:function(){this.linkScroll=!0,this.win.one("hashchange",function(){this.linkScroll=!1})},toggleCurrent:function(n){var e=n.closest("li");e.siblings("li.current").removeClass("current"),e.siblings().find("li.current").removeClass("current"),e.find("> ul li.current").removeClass("current"),e.toggleClass("current")}},"undefined"!=typeof window&&(window.SphinxRtdTheme={Navigation:e.exports.ThemeNav,StickyNav:e.exports.ThemeNav}),function(){for(var r=0,n=["ms","moz","webkit","o"],e=0;e 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | compute — Reconstruct Order 0 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 | 97 | 98 |
99 | 100 | 101 | 107 | 108 | 109 |
110 | 111 |
112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 |
130 | 131 |
    132 | 133 |
  • Docs »
  • 134 | 135 |
  • compute
  • 136 | 137 | 138 |
  • 139 | 140 | 141 | View page source 142 | 143 | 144 |
  • 145 | 146 |
147 | 148 | 149 |
150 |
151 |
152 |
153 | 154 |
155 |

compute

156 |

contains methods to calculate stokes and physical images from intensity images

157 |
158 |

Subpackages

159 |
160 |
161 | 162 | 163 |
164 | 165 |
166 |
167 | 168 | 174 | 175 | 176 |
177 | 178 |
179 |

180 | © Copyright 2019, Shalin Mehta 181 | 182 |

183 |
184 | Built with Sphinx using a theme provided by Read the Docs. 185 | 186 |
187 | 188 |
189 |
190 | 191 |
192 | 193 |
194 | 195 | 196 | 197 | 202 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | -------------------------------------------------------------------------------- /docs/html/genindex.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | Index — Reconstruct Order 0 documentation 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 | 94 | 95 |
96 | 97 | 98 | 104 | 105 | 106 |
107 | 108 |
109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 |
127 | 128 |
    129 | 130 |
  • Docs »
  • 131 | 132 |
  • Index
  • 133 | 134 | 135 |
  • 136 | 137 | 138 | 139 |
  • 140 | 141 |
142 | 143 | 144 |
145 |
146 |
147 |
148 | 149 | 150 |

Index

151 | 152 |
153 | 154 |
155 | 156 | 157 |
158 | 159 |
160 |
161 | 162 | 163 |
164 | 165 |
166 |

167 | © Copyright 2019, Shalin Mehta 168 | 169 |

170 |
171 | Built with Sphinx using a theme provided by Read the Docs. 172 | 173 |
174 | 175 |
176 |
177 | 178 |
179 | 180 |
181 | 182 | 183 | 184 | 189 | 190 | 191 | 192 | 193 | 194 | 195 | 196 | -------------------------------------------------------------------------------- /docs/html/installation.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Installation — Reconstruct Order 0 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 |
46 | 47 | 100 | 101 |
102 | 103 | 104 | 110 | 111 | 112 |
113 | 114 |
115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 |
133 | 134 |
    135 | 136 |
  • Docs »
  • 137 | 138 |
  • Installation
  • 139 | 140 | 141 |
  • 142 | 143 | 144 | View page source 145 | 146 | 147 |
  • 148 | 149 |
150 | 151 | 152 |
153 |
154 |
155 |
156 | 157 |
158 |

Installation

159 |
160 |

using git clone

161 |

install git: (links) 162 | run git clone

163 |
164 |
165 |

using pip

166 |

Not implemented yet

167 |
168 |
169 |

Other

170 |

anything else here

171 |
172 |
173 | 174 | 175 |
176 | 177 |
178 |
179 | 180 | 188 | 189 | 190 |
191 | 192 |
193 |

194 | © Copyright 2019, Shalin Mehta 195 | 196 |

197 |
198 | Built with Sphinx using a theme provided by Read the Docs. 199 | 200 |
201 | 202 |
203 |
204 | 205 |
206 | 207 |
208 | 209 | 210 | 211 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | -------------------------------------------------------------------------------- /docs/html/introduction.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Introduction — Reconstruct Order 0 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 |
46 | 47 | 100 | 101 |
102 | 103 | 104 | 110 | 111 | 112 |
113 | 114 |
115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | 132 |
133 | 134 |
    135 | 136 |
  • Docs »
  • 137 | 138 |
  • Introduction
  • 139 | 140 | 141 |
  • 142 | 143 | 144 | View page source 145 | 146 | 147 |
  • 148 | 149 |
150 | 151 | 152 |
153 |
154 |
155 |
156 | 157 |
158 |

Introduction

159 |
160 |

Reconstruct Order

161 |

This block is a placeholder for any intro information about this repo 162 | We should discuss this repo’s purpose, it’s audience, the kind of data etc…

163 |
164 |
165 |

Requirements

166 |

This is a placeholder for hardware/software requirements

167 |
168 |
169 |

Other

170 |

anything else here

171 |
172 |
173 | 174 | 175 |
176 | 177 |
178 |
179 | 180 | 188 | 189 | 190 |
191 | 192 |
193 |

194 | © Copyright 2019, Shalin Mehta 195 | 196 |

197 |
198 | Built with Sphinx using a theme provided by Read the Docs. 199 | 200 |
201 | 202 |
203 |
204 | 205 |
206 | 207 |
208 | 209 | 210 | 211 | 216 | 217 | 218 | 219 | 220 | 221 | 222 | 223 | -------------------------------------------------------------------------------- /docs/html/objects.inv: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/mehta-lab/reconstruct-order/addd06131d1d28e8df9d86aa803c7ebdf9679d52/docs/html/objects.inv -------------------------------------------------------------------------------- /docs/html/search.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | Search — Reconstruct Order 0 documentation 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 |
45 | 46 | 94 | 95 |
96 | 97 | 98 | 104 | 105 | 106 |
107 | 108 |
109 | 110 | 111 | 112 | 113 | 114 | 115 | 116 | 117 | 118 | 119 | 120 | 121 | 122 | 123 | 124 | 125 | 126 |
127 | 128 |
    129 | 130 |
  • Docs »
  • 131 | 132 |
  • Search
  • 133 | 134 | 135 |
  • 136 | 137 | 138 | 139 |
  • 140 | 141 |
142 | 143 | 144 |
145 |
146 |
147 |
148 | 149 | 157 | 158 | 159 |
160 | 161 |
162 | 163 |
164 | 165 |
166 |
167 | 168 | 169 |
170 | 171 |
172 |

173 | © Copyright 2019, Shalin Mehta 174 | 175 |

176 |
177 | Built with Sphinx using a theme provided by Read the Docs. 178 | 179 |
180 | 181 |
182 |
183 | 184 |
185 | 186 |
187 | 188 | 189 | 190 | 195 | 196 | 197 | 198 | 199 | 200 | 203 | 204 | 205 | 206 | 207 | 208 | 209 | -------------------------------------------------------------------------------- /docs/html/searchindex.js: -------------------------------------------------------------------------------- 1 | Search.setIndex({docnames:["compute","index","installation","introduction"],envversion:{"sphinx.domains.c":1,"sphinx.domains.changeset":1,"sphinx.domains.cpp":1,"sphinx.domains.javascript":1,"sphinx.domains.math":2,"sphinx.domains.python":1,"sphinx.domains.rst":1,"sphinx.domains.std":1,sphinx:56},filenames:["compute.rst","index.rst","installation.rst","introduction.rst"],objects:{},objnames:{},objtypes:{},terms:{"final":1,"import":1,"new":1,Not:2,The:1,Then:1,USING:1,about:3,acquir:1,acquisit:1,activ:1,anaconda:1,ani:3,anyth:[2,3],audienc:3,avail:1,axi:1,biohub:1,birefring:1,block:3,calcul:0,can:1,chan:1,clone:1,com:1,command:1,comput:1,conda:1,config:1,config_exampl:1,configur:1,contain:0,content:1,creat:1,czbiohub:1,data:3,degre:1,describ:1,descript:1,detail:1,directori:1,discuss:3,els:[2,3],environ:1,etc:3,exampl:1,example_config:1,experi:1,field:1,file:1,first:1,follow:1,from:[0,1],git:1,github:1,hardwar:3,here:[2,3],hierarchi:1,highli:1,http:1,imag:[0,1],implement:2,index:1,inform:3,instal:1,intens:0,interpret:1,intro:3,introduct:1,kind:3,line:1,link:2,made:1,manag:1,method:0,micro:1,modifi:1,modul:1,name:1,navig:1,need:1,openpolscop:1,organ:1,other:1,packag:1,page:1,paramet:1,path:1,path_to_your_config_fil:1,physic:0,pip:1,placehold:3,plugin:1,polar:1,possibl:1,prompt:1,purpos:3,python3:1,python:1,readi:1,recommend:1,reconstructord:1,reflect:1,repo:[1,3],requir:1,resolv:1,run:2,runreconstruct:1,search:1,see:1,separ:1,should:3,slow:1,softwar:3,sourc:1,stoke:0,subpackag:1,termin:1,thi:[1,3],transmiss:1,under:1,using:1,venv:1,virtual:1,when:1,wish:1,within:1,work:1,workflow:1,yet:2,yml:1,you:1,zuckerberg:1},titles:["compute","Reconstruct Order","Installation","Introduction"],titleterms:{clone:2,comput:0,data:1,git:2,indic:1,instal:2,introduct:3,order:[1,3],other:[2,3],pip:2,quick:1,reconstruct:[1,3],requir:3,run:1,start:1,subpackag:0,tabl:1,thank:1,using:2,your:1}}) -------------------------------------------------------------------------------- /docs/index.html: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /examples/ReconstructExample.py: -------------------------------------------------------------------------------- 1 | import os 2 | import ReconstructOrder.workflow as wf 3 | from google_drive_downloader import GoogleDriveDownloader as gdd 4 | 5 | 6 | 7 | 8 | ### Please specify data to download and process! ### 9 | 10 | process_data = ['mouse_brain', 'mouse_kidney'] 11 | # (list) List specifying dataset to download and process 12 | ## ['mouse_brain', 'mouse_kidney'] 13 | 14 | data_path_parameter = {'mouse_brain' : {'gdd_id': '1pB25UcE2nL5ZOuOaoAxTFHf1D3rbnH3f', 15 | 'zip_path': '/mouse_brain_downloaded.zip', 16 | 'config_path': './data_downloaded/MouseBrain/config.yml'}, 17 | 18 | 'mouse_kidney': {'gdd_id': '1N7TxmohOJRi5kTkvf02RaEoCoAuaQ-X7', 19 | 'zip_path': '/mouse_kidney_downloaded.zip', 20 | 'config_path': './data_downloaded/MouseKidneyTissue/config.yml'}} 21 | 22 | 23 | 24 | 25 | if __name__ == '__main__': 26 | """ 27 | Reconstruct data shared on the google drive. 28 | 29 | Parameters 30 | ---------- 31 | process_data : list 32 | List specifying dataset to download and process 33 | 34 | Returns 35 | -------- 36 | Outputs data to disk. 37 | """ 38 | configfiles = [] 39 | 40 | working_folder = os.getcwd() + '/data_downloaded' 41 | recon_folder = working_folder + '/recon_result' 42 | 43 | if not os.path.isdir(working_folder): 44 | os.mkdir(working_folder) 45 | print("\nsetting up data folder "+working_folder) 46 | 47 | if not os.path.isdir(recon_folder): 48 | os.mkdir(recon_folder) 49 | print("\nsetting up recon folder "+recon_folder) 50 | 51 | 52 | for item in process_data: 53 | 54 | item_gdd_id = data_path_parameter[item]['gdd_id'] 55 | zipdir = working_folder + data_path_parameter[item]['zip_path'] 56 | configfiles.append(data_path_parameter[item]['config_path']) 57 | 58 | gdd.download_file_from_google_drive(file_id=item_gdd_id, 59 | dest_path=zipdir, 60 | unzip=True, 61 | showsize=True, 62 | overwrite=True) 63 | 64 | print('\n--------------') 65 | for configfile in configfiles: 66 | print(configfile + '\n--------------') 67 | wf.reconstruct_batch(configfile) 68 | print('\n--------------') 69 | 70 | 71 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements/default.txt -------------------------------------------------------------------------------- /requirements/default.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.10.0 2 | opencv-python>=3.4.2.16 3 | pandas>=0.24.2 4 | pyyaml>=3.13 5 | matplotlib>=3.0.3 6 | scikit-image>=0.15 7 | scipy>=1.2.1 8 | tifffile>=0.15.1 9 | googledrivedownloader>=0.4 10 | natsort>=7 11 | -------------------------------------------------------------------------------- /requirements/test.txt: -------------------------------------------------------------------------------- 1 | pytest>=5.0.0 2 | googledrivedownloader>=0.4 3 | requests>=2.22.0 4 | -------------------------------------------------------------------------------- /runReconstruction.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | runReconstruction: 5 | Reconstruct birefringence, slow axis, transmission, and degree of polarization from polarization-resolved images. 6 | This script provides a convenient method to workflow process multi-dimensional images acquired with Micro-Manager and OpenPolScope acquisition plugin. 7 | 8 | Parameters 9 | ---------- 10 | --config: path to configuration file. 11 | Returns 12 | ------- 13 | None: the script writes data to disk. 14 | 15 | """ 16 | 17 | import argparse 18 | 19 | from ReconstructOrder.workflow import reconstruct_batch 20 | 21 | 22 | def parse_args(): 23 | """Parse command line arguments 24 | 25 | In python namespaces are implemented as dictionaries 26 | :return: namespace containing the arguments passed. 27 | """ 28 | 29 | parser = argparse.ArgumentParser() 30 | 31 | parser.add_argument('--config', type=str, 32 | help='path to yaml configuration file') 33 | 34 | args = parser.parse_args() 35 | return args 36 | 37 | 38 | if __name__ == '__main__': 39 | args = parse_args() 40 | reconstruct_batch(args.config) 41 | 42 | 43 | 44 | -------------------------------------------------------------------------------- /scripts/channel_registration_3D.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append(".") # Adds current directory to python search path. 3 | sys.path.append("..") # Adds parent directory to python search path. 4 | from scipy.ndimage import affine_transform, sobel 5 | from skimage.feature import register_translation 6 | import os 7 | import json 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | from ReconstructOrder.utils.mManagerIO import mManagerReader 11 | from ReconstructOrder.utils.plotting import CompositeImg, plot_sub_images 12 | 13 | def imshow_pair(images, chann_names, OutputPath, fig_name): 14 | image_pairs = [] 15 | titles = [] 16 | image_ref = images[0] 17 | chann_names_ref = chann_names[0] 18 | for image, chann_name in zip(images[1:], chann_names[1:]): 19 | image_pair = [image_ref, image] 20 | title = ['{}_{}'.format(chann_names_ref, chann_name)] 21 | # plot_sub_images(image_pairs, ['0', '1'], OutputPath, fig_name, colorbar=False) 22 | image_pair_rgb = CompositeImg(image_pair, norm=True) 23 | image_pairs += [image_pair_rgb] 24 | titles += title 25 | plot_sub_images(image_pairs, titles, OutputPath, fig_name, colorbar=False) 26 | 27 | def channel_register(target_images, channels): 28 | """ 29 | Returns a list of shift vectors for unregistered images. 30 | Requires scikit-image 0.15 or above. 31 | :param target_images: list of target images. 32 | :return registration_params: dictionary of channel (key) and its translational shifts [z, y, x] (value) 33 | """ 34 | channels = ['640' if channel == 'ex561em700' else channel for channel in channels] 35 | shifts = [[0,0,0]] 36 | for image in target_images[1:]: 37 | shift, error, phase = register_translation(target_images[0], image, 4) 38 | shifts.append(list(shift)) 39 | registration_params = dict(zip(channels, shifts)) 40 | return registration_params 41 | 42 | def translate_3D(images, 43 | channels, 44 | registration_params, 45 | size_z_um, 46 | binning): 47 | """ 48 | 49 | Parameters 50 | ---------- 51 | images : list 52 | list of images to translate 53 | channels : list 54 | list of channels corresponding to the images 55 | registration_params : dict 56 | dictionary of channel (key) and its translational shifts [z, y, x] (value) 57 | size_z_um : float 58 | z step size in um 59 | binning : int 60 | total xy binning that has been applied during processing compared to the raw images. 61 | 62 | Returns 63 | ------- 64 | registered_images : 65 | """""" 66 | 67 | 68 | @param images: list of images. 69 | @param registration_params: 70 | @return: reg_img - list of registered images. 71 | """ 72 | 73 | # applying transformations to all images except for the first one (first channel is source img). 74 | registered_images = [] 75 | for chan, image in zip(channels, images): 76 | # use shifts of retardance channel for all label-free channels 77 | if chan in ['Retardance', 'Orientation','Orientation_x', 78 | 'Orientation_y', 'Polarization', 'Scattering', 79 | 'Pol_State_0', 'Pol_State_1', 80 | 'Pol_State_2', 'Pol_State_3', 'Pol_State_4', 81 | 'Transmission', 'Brightfield', 'Brightfield_computed', 'phase']: 82 | chan = 'Retardance' 83 | elif chan == 'ex561em700': 84 | chan = '640' 85 | 86 | # Brightfield registration is not robust 87 | # elif chan in ['Transmission', 'Brightfield']: 88 | # chan = '568' 89 | # !!!!"[:]" is necessary to create a copy rather than a reference of the list in the dict!!!! 90 | shift = registration_params[chan][:] 91 | # only warp the image if shift is non-zero 92 | if any(shift): 93 | if size_z_um == 0: # 2D translation 94 | shift[0] = 0 95 | else: 96 | # 3D translation. Scale z-shift according to the z-step size. 97 | shift[0] = shift[0]*registration_params['size_z_um']/size_z_um 98 | if not binning == 1: 99 | shift[1] = shift[1] / binning 100 | shift[2] = shift[2] / binning 101 | image = affine_transform(image, np.ones(3), [-x for x in shift], order=1) 102 | registered_images.append(image) 103 | return registered_images 104 | 105 | def imshow_xy_xz_slice(img_stacks, img_io, y_crop_range, z_crop_range, 106 | y_plot_range, z_plot_range): 107 | for z_idx in range(z_plot_range[0], z_plot_range[1]): 108 | img_io.z_idx = z_idx 109 | output_chan = img_io.output_chans 110 | img_stack = [img[z_idx - z_crop_range[0], :, :] for img in img_stacks] 111 | fig_name = 'img_pair_z%03d.png' % (z_idx) 112 | imshow_pair(img_stack, output_chan, img_io.img_output_path, fig_name) 113 | fig_name = 'img_pair_z%03d_2.png' % (z_idx) 114 | imshow_pair(img_stack[1:] + [img_stack[0]], 115 | output_chan[1:] + [output_chan[0]], img_io.img_output_path, fig_name) 116 | plt.close("all") 117 | 118 | for yIdx in range(y_plot_range[0], y_plot_range[1]): 119 | img_io.yIdx = yIdx 120 | img_stack = [img[:, yIdx - y_crop_range[0], :] for img in img_stacks] 121 | fig_name = 'img_pair_y%03d.png' % (yIdx) 122 | imshow_pair(img_stack, output_chan, img_io.img_output_path, fig_name) 123 | fig_name = 'img_pair_y%03d_2.png' % (yIdx) 124 | imshow_pair(img_stack[1:] + [img_stack[0]], 125 | output_chan[1:] + [output_chan[0]], img_io.img_output_path, fig_name) 126 | plt.close("all") 127 | 128 | def edge_filter_2D(img): 129 | dx = sobel(img, 0) # horizontal derivative 130 | dy = sobel(img, 1) # vertical derivative 131 | img_edge = np.hypot(dx, dy) # magnitude 132 | 133 | return img_edge 134 | 135 | 136 | 137 | if __name__ == '__main__': 138 | RawDataPath = r'Y:/SpinningDisk/RawData/Dragonfly_Calibration' 139 | ProcessedPath = r'Y:/Projects/Dragonfly_Calibration' 140 | # ImgDir = '2019_05_20_Argolight_10X_widefield_zyla' 141 | # SmDir = 'SMS_052019_1842_1_SMS_052019_1842_1_fit_order2' 142 | ImgDir = 'BFalignment_20191114_CG' 143 | SmDir = 'BF_Confocal-DAPI-GFP-RFP-IFP_1' 144 | # input_chan = output_chan = ['640', 'Retardance', 'Brightfield_computed', '405', '488', '568'] # first channel is the reference channel 145 | input_chan = output_chan = ["EMCCD_Confocal40_RFP", 146 | "EMCCD_BF_Confocal", 147 | "EMCCD_Confocal40_DAPI", 148 | "EMCCD_Confocal40_GFP", 149 | "EMCCD_Confocal40_IFP"] # first channel is the reference channel 150 | 151 | z_crop_range = [0, 161] 152 | x_crop_range = [0, 460] 153 | y_crop_range = [0, 383] 154 | z_plot_range = [0, 161] 155 | y_plot_range = [0, 383] 156 | img_sm_path = os.path.join(RawDataPath, ImgDir, SmDir) # Sample image folder path, of form 'SM_yyyy_mmdd_hhmm_X' 157 | OutputPath = os.path.join(ProcessedPath, ImgDir, SmDir,'registration', 'raw') 158 | shift_file_path = os.path.join(ProcessedPath, ImgDir, SmDir, 'registration', 'registration_param_63X.json') 159 | img_io = mManagerReader(img_sm_path, OutputPath, input_chans=input_chan, output_chans=output_chan) 160 | img_io.pos_idx = 0 161 | img_io.t_idx = 0 162 | img_io.binning = 1 163 | target_images = img_io.read_multi_chan_img_stack(z_range=z_crop_range) 164 | os.makedirs(img_io.img_output_path, exist_ok=True) 165 | 166 | target_images_cropped = [target_image[:, y_crop_range[0]:y_crop_range[1], 167 | x_crop_range[0]:x_crop_range[1]] for target_image in target_images] 168 | # use edge filter to change BF image to positive contrast (doesn't work for noisy images) 169 | target_images_filtered = [] 170 | for chan, img in zip(input_chan, target_images_cropped): 171 | if any([name in chan for name in ['Transmission', 'Brightfield', 'BF']]): 172 | imgs_filtered = [] 173 | for z_idx in range(img.shape[2]): 174 | img_filtered = edge_filter_2D(img[:, :, z_idx]) 175 | imgs_filtered.append(img_filtered) 176 | img = np.stack(imgs_filtered, axis=2) 177 | target_images_filtered.append(img) 178 | 179 | imshow_xy_xz_slice(target_images_filtered, img_io, y_crop_range, z_crop_range, 180 | y_plot_range, z_plot_range) 181 | 182 | registration_params = channel_register(target_images_filtered, output_chan) 183 | registration_params['size_z_um'] = size_z_um = img_io.size_z_um 184 | 185 | with open(shift_file_path, 'w') as f: 186 | json.dump(registration_params, f, indent=4) 187 | 188 | target_images_warped = translate_3D(target_images_filtered, 189 | output_chan, 190 | registration_params, 191 | size_z_um, 192 | img_io.binning) 193 | 194 | img_io.img_output_path = os.path.join(ProcessedPath, ImgDir, SmDir,'registration', 'processed') 195 | os.makedirs(img_io.img_output_path, exist_ok=True) 196 | 197 | imshow_xy_xz_slice(target_images_warped, img_io, y_crop_range, z_crop_range, 198 | y_plot_range, z_plot_range) 199 | 200 | -------------------------------------------------------------------------------- /scripts/flatten_data_structure.py: -------------------------------------------------------------------------------- 1 | """Covert data structure from hierarchical to flat""" 2 | from ReconstructOrder.utils import copy_files_in_sub_dirs 3 | import argparse 4 | import os 5 | from shutil import copy2 6 | 7 | def parse_args(): 8 | """Parse command line arguments 9 | :return: namespace containing the arguments passed. 10 | """ 11 | 12 | parser = argparse.ArgumentParser() 13 | 14 | parser.add_argument('--input', type=str, 15 | help='path to input directory') 16 | 17 | args = parser.parse_args() 18 | return args 19 | 20 | if __name__ == '__main__': 21 | args = parse_args() 22 | input_path = args.input 23 | output_path = ''.join([input_path, '_flat']) 24 | copy_files_in_sub_dirs(input_path, output_path) 25 | copy2(os.path.join(input_path, 'meta'), output_path) 26 | -------------------------------------------------------------------------------- /scripts/run_image_registration.py: -------------------------------------------------------------------------------- 1 | import sys 2 | sys.path.append("..") # Adds parent directory to python search path. 3 | import os 4 | from ReconstructOrder.utils.mManagerIO import mManagerReader 5 | import argparse 6 | import yaml 7 | from scripts.channel_registration_3D import translate_3D 8 | import json 9 | from ReconstructOrder.utils.imgProcessing import im_bit_convert 10 | 11 | def parse_args(): 12 | """Parse command line arguments 13 | 14 | In python namespaces are implemented as dictionaries 15 | :return: namespace containing the arguments passed. 16 | """ 17 | 18 | parser = argparse.ArgumentParser() 19 | 20 | parser.add_argument('--config', type=str, 21 | help='path to yaml configuration file') 22 | 23 | args = parser.parse_args() 24 | return args 25 | 26 | def read_config(config_fname): 27 | """Read the config file in yml format 28 | :param str config_fname: fname of config yaml with its full path 29 | :return: dict dictionary of config parameters 30 | """ 31 | 32 | with open(config_fname, 'r') as f: 33 | config = yaml.load(f) 34 | 35 | return config 36 | 37 | def run_action(args): 38 | config = read_config(args.config) 39 | RawDataPath = config['dataset']['RawDataPath'] 40 | ProcessedPath = config['dataset']['ProcessedPath'] 41 | ImgDir = config['dataset']['ImgDir'] 42 | SmDir = config['dataset']['SmDir'] 43 | registration_params_path = config['processing']['registration_params'] 44 | input_chan = output_chan = config['processing']['output_chan'] 45 | binning = config['processing']['binning'] 46 | 47 | img_sm_path = os.path.join(RawDataPath, ImgDir, SmDir) 48 | 49 | OutputPath = os.path.join(ProcessedPath, ImgDir, SmDir+'_registered') 50 | img_io = mManagerReader(img_sm_path, OutputPath, input_chans=input_chan, output_chans=output_chan) 51 | size_z_um = img_io.size_z_um 52 | z_ids = list(range(0, img_io.n_z)) 53 | if 'z_ids' in config['processing']: 54 | z_ids = config['processing']['z_ids'] 55 | with open(registration_params_path, 'r') as f: 56 | registration_params = json.load(f) 57 | 58 | if not os.path.exists(img_io.img_sm_path): 59 | raise FileNotFoundError( 60 | "image file doesn't exist at:", img_io.img_sm_path 61 | ) 62 | os.makedirs(img_io.img_output_path, exist_ok=True) 63 | for t_idx in range(img_io.n_time): 64 | img_io.t_idx = t_idx 65 | for pos_idx in range(img_io.n_pos): # nXY 66 | img_io.pos_idx = pos_idx 67 | print('Processing position %03d, time %03d ...' % (pos_idx, t_idx)) 68 | images = img_io.read_multi_chan_img_stack(z_ids=z_ids) 69 | images_registered = translate_3D(images, 70 | output_chan, 71 | registration_params, 72 | size_z_um, 73 | binning) 74 | 75 | for chan_idx, images in enumerate(images_registered): 76 | img_io.chan_idx = chan_idx 77 | for idx, z_idx in enumerate(z_ids): 78 | img_io.z_idx = z_idx 79 | image = im_bit_convert(images[idx], bit=16, norm=False) 80 | img_io.write_img(image) 81 | 82 | if __name__ == '__main__': 83 | args = parse_args() 84 | run_action(args) -------------------------------------------------------------------------------- /scripts/split_orientaion_components.py: -------------------------------------------------------------------------------- 1 | import os 2 | import numpy as np 3 | from ReconstructOrder.utils import mManagerReader 4 | from ReconstructOrder.utils import imBitConvert 5 | 6 | if __name__ == '__main__': 7 | RawDataPath = '/flexo/ComputationalMicroscopy/Projects/brainarchitecture' 8 | ProcessedPath = RawDataPath 9 | ImgDir = '2019_01_04_david_594CTIP2_647SATB2_20X' 10 | SmDir = 'SMS_2019_0104_1257_2_SMS_2019_0104_1257_2' 11 | input_chan = ['Orientation'] 12 | output_chan = ['Orientation_x', 'Orientation_y'] 13 | img_sm_path = os.path.join(RawDataPath, ImgDir, SmDir) # Sample image folder path, of form 'SM_yyyy_mmdd_hhmm_X' 14 | OutputPath = img_sm_path 15 | img_io = mManagerReader(img_sm_path, OutputPath, input_chan=input_chan, output_chan=output_chan) 16 | 17 | for t_idx in range(img_io.n_time): 18 | img_io.t_idx = t_idx 19 | for pos_idx in range(img_io.n_pos): # nXY 20 | img_io.pos_idx = pos_idx 21 | for z_idx in range(img_io.n_z): 22 | print('Processing position %03d, time %03d z %03d ...' % (pos_idx, t_idx, z_idx)) 23 | img_io.z_idx = z_idx 24 | img_io.chan_idx = 0 25 | azimuth_degree = img_io.read_img() 26 | azimuth = azimuth_degree/18000*np.pi 27 | azimuth_imgs = [np.cos(2 * azimuth), np.sin(2 * azimuth)] 28 | azimuth_imgs = [imBitConvert((img + 1) * 1000, bit=16) for img in azimuth_imgs] # scale to [0, 1000] 29 | for chan_idx, image in enumerate(azimuth_imgs): 30 | img_io.chan_idx = chan_idx 31 | img_io.write_img(image) 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os.path as osp 2 | from setuptools import setup, find_packages 3 | 4 | MIN_PY_VER = '3.6' 5 | DISTNAME = 'ReconstructOrder' 6 | DESCRIPTION = 'Reconstruct birefringence, slow axis, bright-field, and degree of polarization from polarization-resolved images.' 7 | with open("README.md", "r") as fh: 8 | LONG_DESCRIPTION = fh.read() 9 | LONG_DESCRIPTION_content_type = "text/markdown" 10 | # LONG_DESCRIPTION = __doc__ 11 | LICENSE = 'Chan Zuckerberg Biohub Software License' 12 | DOWNLOAD_URL = 'https://github.com/czbiohub/ReconstructOrder' 13 | 14 | INSTALL_REQUIRES = [] 15 | REQUIRES = [] 16 | 17 | CLASSIFIERS = [ 18 | 'License :: OSI Approved :: BSD License', 19 | 'Programming Language :: Python', 20 | 'Programming Language :: Python :: 3 :: Only', 21 | 'Programming Language :: Python :: 3.6', 22 | 'Programming Language :: Python :: 3.7', 23 | 'Topic :: Scientific/Engineering', 24 | 'Topic :: Scientific/Engineering :: Visualization', 25 | 'Topic :: Scientific/Engineering :: Information Analysis', 26 | 'Topic :: Scientific/Engineering :: Bio-Informatics', 27 | 'Topic :: Utilities', 28 | 'Operating System :: Microsoft :: Windows', 29 | 'Operating System :: POSIX', 30 | 'Operating System :: Unix', 31 | 'Operating System :: MacOS' 32 | ] 33 | 34 | # populate packages 35 | PACKAGES = [package for package in find_packages()] 36 | 37 | # parse requirements 38 | with open(osp.join('requirements', 'default.txt')) as f: 39 | requirements = [line.strip() for line in f 40 | if line and not line.startswith('#')] 41 | 42 | # populate requirements 43 | for l in requirements: 44 | sep = l.split(' #') 45 | INSTALL_REQUIRES.append(sep[0].strip()) 46 | if len(sep) == 2: 47 | REQUIRES.append(sep[1].strip()) 48 | 49 | if __name__ == '__main__': 50 | setup( 51 | name=DISTNAME, 52 | description=DESCRIPTION, 53 | long_description=LONG_DESCRIPTION, 54 | long_description_content_type=LONG_DESCRIPTION_content_type, 55 | license=LICENSE, 56 | download_url=DOWNLOAD_URL, 57 | version="0.0.1", 58 | classifiers=CLASSIFIERS, 59 | install_requires=INSTALL_REQUIRES, 60 | requires=REQUIRES, 61 | python_requires=f'>={MIN_PY_VER}', 62 | packages=PACKAGES, 63 | include_package_data=True, 64 | entry_points={ 65 | 'console_scripts': ['runReconstruction=ReconstructOrder.cli_module:main'] 66 | } 67 | ) 68 | -------------------------------------------------------------------------------- /simulations/datastructures.py: -------------------------------------------------------------------------------- 1 | #%% 2 | from ReconstructOrder.compute.reconstruct import ImgReconstructor 3 | from ReconstructOrder.datastructures import PhysicalData 4 | from ReconstructOrder.datastructures.intensity_data import IntensityData 5 | import numpy as np 6 | 7 | #%% 8 | """ 9 | The typical reconstruction workflow is as follows: 10 | 1) load background data 11 | 2) compute stokes on background data then normalize it 12 | 3) load sample data 13 | 4) compute stokes on sample then normalize it 14 | 6) Correct Background 15 | 16 | """ 17 | 18 | #%% 19 | 20 | """ 21 | load data into an Intensity Data object 22 | """ 23 | import os 24 | import tifffile as tf 25 | 26 | int_dat = IntensityData() 27 | int_dat.channel_names = ['IExt','I90','I135','I45','I0'] 28 | 29 | raw_data_path = "/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Raw/Sample/Pos0" 30 | int_dat.replace_image(tf.imread(os.path.join(raw_data_path, 'img_000000010_state0_002.tif')), 'IExt') 31 | int_dat.replace_image(tf.imread(os.path.join(raw_data_path, 'img_000000010_state1_002.tif')), 'I90') 32 | int_dat.replace_image(tf.imread(os.path.join(raw_data_path, 'img_000000010_state2_002.tif')), 'I135') 33 | int_dat.replace_image(tf.imread(os.path.join(raw_data_path, 'img_000000010_state3_002.tif')), 'I45') 34 | int_dat.replace_image(tf.imread(os.path.join(raw_data_path, 'img_000000010_state4_002.tif')), 'I0') 35 | 36 | bg_int = IntensityData() 37 | bg_int.channel_names = ['IExt','I90','I135','I45','I0'] 38 | 39 | bg_data_path = "/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Raw/Background/Pos0" 40 | bg_int.replace_image(tf.imread(os.path.join(bg_data_path, 'img_000000000_State0 - Acquired Image_000.tif')), 'IExt') 41 | bg_int.replace_image(tf.imread(os.path.join(bg_data_path, 'img_000000000_State1 - Acquired Image_000.tif')), 'I90') 42 | bg_int.replace_image(tf.imread(os.path.join(bg_data_path, 'img_000000000_State2 - Acquired Image_000.tif')), 'I135') 43 | bg_int.replace_image(tf.imread(os.path.join(bg_data_path, 'img_000000000_State3 - Acquired Image_000.tif')), 'I45') 44 | bg_int.replace_image(tf.imread(os.path.join(bg_data_path, 'img_000000000_State4 - Acquired Image_000.tif')), 'I0') 45 | 46 | #%% 47 | """ 48 | reconstructor is initialized based on parameters from the experiment 49 | usually these params are found in the config file parsed from metadata or config.yml 50 | """ 51 | img_reconstructor = ImgReconstructor(bg_int.data.shape, 52 | bg_method="Local_fit", 53 | n_slice_local_bg=1, 54 | poly_fit_order=2, 55 | swing=0.03, 56 | wavelength=532, 57 | azimuth_offset=0, 58 | circularity='rcp') 59 | #%% 60 | 61 | print('computing background') 62 | background_stokes = img_reconstructor.compute_stokes(bg_int) 63 | background_normalized = img_reconstructor.stokes_normalization(background_stokes) 64 | 65 | #%% 66 | 67 | print('computing stokes') 68 | sample_stokes = img_reconstructor.compute_stokes(int_dat) 69 | sample_normalized = img_reconstructor.stokes_normalization(sample_stokes) 70 | 71 | #%% 72 | print('correcting background') 73 | # let's keep a few background correction outputs 74 | 75 | sample_bg_corrected_local_fit = img_reconstructor.correct_background(sample_normalized, background_normalized) 76 | 77 | img_reconstructor.bg_method = "Local_filter" 78 | sample_bg_corrected_local_filter = img_reconstructor.correct_background(sample_normalized, background_normalized) 79 | 80 | img_reconstructor.bg_method = None 81 | sample_bg_corrected_global = img_reconstructor.correct_background(sample_normalized, background_normalized) 82 | 83 | #%% 84 | print('computing physical') 85 | 86 | # let's compute physical using several background correction methods 87 | sample_physical_local_fit = img_reconstructor.reconstruct_birefringence(sample_bg_corrected_local_fit) 88 | 89 | sample_physical_local_filter = img_reconstructor.reconstruct_birefringence(sample_bg_corrected_local_filter) 90 | 91 | sample_physical_global = img_reconstructor.reconstruct_birefringence(sample_bg_corrected_global) 92 | 93 | #%% 94 | from ReconstructOrder.utils.plotting import im_bit_convert 95 | 96 | print('calculate mse') 97 | target_retardance = tf.imread( 98 | '/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Processed/img_Retardance_t010_p000_z002.tif') 99 | target_orientation = tf.imread( 100 | '/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Processed/img_Orientation_t010_p000_z002.tif') 101 | target_polarization = tf.imread( 102 | '/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Processed/img_Polarization_t010_p000_z002.tif') 103 | 104 | 105 | def mse(x, Y): 106 | return np.square(Y-x).mean() 107 | 108 | 109 | print("MSE local fit retardance = "+str(mse( 110 | im_bit_convert(sample_physical_local_fit.retard * 1E4, bit=16, norm=False), 111 | target_retardance))) 112 | 113 | print("MSE local fit orientation = "+str(mse( 114 | im_bit_convert(sample_physical_local_fit.azimuth * 1E4, bit=16, norm=False), 115 | target_retardance))) 116 | 117 | print("MSE local fit polarization = "+str(mse( 118 | im_bit_convert(sample_physical_local_fit.polarization * 1E4, bit=16, norm=False), 119 | target_retardance))) 120 | 121 | 122 | #%% 123 | print('writing data to disk') 124 | from ReconstructOrder.utils.plotting import im_bit_convert 125 | 126 | 127 | def write_birefring(sample_data:PhysicalData, path): 128 | 129 | # 'Brightfield_computed' 130 | tf.imsave(path +"_bf_computed.tif", im_bit_convert(sample_data.I_trans * 1E4, bit=16, norm=False)) # AU, set norm to False for tiling images 131 | 132 | # 'Retardance' 133 | tf.imsave(path +"_retardance.tif", im_bit_convert(sample_data.retard * 1E3, bit=16)) # scale to pm 134 | 135 | # 'Orientation' 136 | tf.imsave(path +"_orientation.tif", im_bit_convert(sample_data.azimuth * 100, bit=16)) # scale to [0, 18000], 100*degree 137 | 138 | # 'Polarization': 139 | tf.imsave(path +"_polarization.tif", im_bit_convert(sample_data.polarization * 50000, bit=16)) 140 | 141 | 142 | TARGET_FILE_FOLDER = '/Users/bryant.chhun/Desktop/Data/ForDataStructuresTests/Raw/untitled folder' 143 | 144 | 145 | write_birefring(sample_physical_local_fit, TARGET_FILE_FOLDER+"/fit") 146 | write_birefring(sample_physical_local_filter, TARGET_FILE_FOLDER+"/filter") 147 | write_birefring(sample_physical_global, TARGET_FILE_FOLDER+"/global") 148 | -------------------------------------------------------------------------------- /simulations/mueller_matrices.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import sympy as sp 4 | import numpy as np 5 | 6 | 7 | def stokes2ellipse(S): 8 | """ 9 | Compute ellipticity and orientation of a polarization state specified by four Stokes parameters. 10 | 11 | :param S: Stokes vector 12 | :return: ellipticity: -1,...,0,...,1 for left circular, linear, and right circular states 13 | :return: orientation: Orientation of polarization ellipse in radian. 14 | """ 15 | s1Norm = S[1] / S[0] 16 | s2Norm = S[2] / S[0] 17 | s3Norm = S[3] / S[0] 18 | linearity = np.sqrt(s1Norm ** 2 + s2Norm ** 2) 19 | 20 | if linearity: 21 | ellipseAngle = np.arctan2(s3Norm, linearity) 22 | ellipt = np.sin(ellipseAngle) 23 | 24 | else: 25 | ellipt = np.sign(s3Norm) 26 | ellipt = np.asscalar(ellipt) 27 | 28 | orient = 0.5 * np.arctan2(s2Norm, s1Norm) 29 | orient = np.asscalar(orient) 30 | 31 | return (ellipt, orient) 32 | 33 | def M_LinearPolarizer(theta=0): 34 | """ 35 | Mueller matrix of ideal linear polarizer rotated at angle theta 36 | 37 | Parameters 38 | ---------- 39 | theta: symbol or float 40 | Angle of the linear polarizer, default value is zero 41 | 42 | """ 43 | M = 0.5 * sp.Matrix([[1, sp.cos(2*theta), sp.sin(2*theta), 0], 44 | [sp.cos(2*theta), sp.cos(2*theta)**2, sp.sin(2*theta)*sp.cos(2*theta), 0], 45 | [sp.sin(2*theta), sp.sin(2*theta)*sp.cos(2*theta), sp.sin(2*theta)**2, 0], 46 | [0, 0, 0, 0]]) 47 | return M 48 | 49 | def M_Diattenuator(theta=0, Tmax=1, Tmin=0): 50 | """ 51 | Mueller matrix of linear diattenuator 52 | 53 | Parameters 54 | ---------- 55 | theta: symbol or float 56 | Angle of the linear polarizer, default value is zero 57 | 58 | Tmax: symbol or float 59 | Maximum transmission of the diattenuator, defaults to one 60 | 61 | Tmin: symbol or float 62 | Minimum transmission of the diattenuator, defaults to zero 63 | 64 | """ 65 | t1 = Tmax + Tmin 66 | t2 = Tmax - Tmin 67 | t3 = 2*sp.sqrt(Tmax*Tmin) 68 | 69 | M = 0.5 * sp.Matrix([[t1, t2*sp.cos(2*theta), t2*sp.sin(2*theta), 0], 70 | [t2*sp.cos(2*theta), t1*sp.cos(2*theta)**2 + t3*sp.sin(2*theta)**2, (t1-t3)*sp.sin(2*theta)*sp.cos(2*theta), 0], 71 | [t2*sp.sin(2*theta), (t1-t3)*sp.sin(2*theta)*sp.cos(2*theta), t1*sp.sin(2*theta)**2 + t3*sp.cos(2*theta)**2, 0], 72 | [0, 0, 0, t3]]) 73 | return M 74 | 75 | def M_Retarder(theta=0, delta=np.pi): 76 | """ 77 | Mueller matrix of linear retarder 78 | 79 | Parameters 80 | ---------- 81 | theta: symbol or float 82 | Angle of fast axis of the retarder, defaults to zero 83 | 84 | delta: symbol or float 85 | Retardance of the retarder, defaults to pi for half-waveplate 86 | """ 87 | M = sp.Matrix([[1, 0, 0, 0], 88 | [0, sp.cos(2*theta)**2+sp.sin(2*theta)**2*sp.cos(delta), sp.sin(2*theta)*sp.cos(2*theta)*(1-sp.cos(delta)), -sp.sin(2*theta)*sp.sin(delta)], 89 | [0, sp.sin(2*theta)*sp.cos(2*theta)*(1-sp.cos(delta)), sp.sin(2*theta)**2+sp.cos(2*theta)**2*sp.cos(delta), sp.cos(2*theta)*sp.sin(delta)], 90 | [0, sp.sin(2*theta)*sp.sin(delta), -sp.cos(2*theta)*sp.sin(delta), sp.cos(delta)]]) 91 | return M 92 | 93 | def M_rotate(M, theta): 94 | """ 95 | Rotates Mueller matrix M by angle theta 96 | 97 | Parameters 98 | ---------- 99 | M: sympy or numpy array 100 | Input Mueller matrix 101 | 102 | theta: symbol or float 103 | Rotation angle 104 | """ 105 | rmTheta = sp.Matrix([[1, 0, 0, 0], 106 | [0, sp.cos(2*theta), sp.sin(2*theta), 0], 107 | [0, -sp.sin(2*theta), sp.cos(2*theta), 0], 108 | [0, 0, 0, 1]]) 109 | 110 | rmNegTheta = sp.Matrix([[1, 0, 0, 0], 111 | [0, sp.cos(2*theta), -sp.sin(2*theta), 0], 112 | [0, sp.sin(2*theta), sp.cos(2*theta), 0], 113 | [0, 0, 0, 1]]) 114 | 115 | M_out = rmNegTheta @ M @ rmTheta 116 | return M_out -------------------------------------------------------------------------------- /simulations/plotting.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import sympy as sp 4 | import numpy as np 5 | import matplotlib.pyplot as plt 6 | 7 | 8 | def plotEllipse(ellipt, orient, size=5, axes=None): 9 | def add_arrow(line, size=20, color='k'): 10 | """ 11 | add an arrow to a line. 12 | 13 | line: Line2D object 14 | position: x-position of the arrow. If None, mean of xdata is taken 15 | direction: 'left' or 'right' 16 | size: size of the arrow in fontsize points 17 | color: if None, line color is taken. 18 | 19 | adapted from: https://stackoverflow.com/questions/34017866/arrow-on-a-line-plot-with-matplotlib 20 | """ 21 | if color is None: 22 | color = line.get_color() 23 | 24 | xdata = line.get_xdata() 25 | ydata = line.get_ydata() 26 | 27 | position = xdata.mean() 28 | # find closest index 29 | start_ind = np.argmin(np.absolute(xdata - position)) 30 | end_ind = start_ind - 1 31 | 32 | line.axes.annotate('', 33 | xytext=(xdata[start_ind], ydata[start_ind]), 34 | xy=(xdata[end_ind], ydata[end_ind]), 35 | arrowprops=dict(arrowstyle="->", color=color), 36 | size=size) 37 | 38 | theta = np.linspace(0, 2 * np.pi, 360) 39 | rotmat = [[np.cos(orient), -np.sin(orient)], [np.sin(orient), np.cos(orient)]] 40 | 41 | x = np.cos(theta) 42 | y = ellipt * np.sin(theta) 43 | (xr, yr) = np.matmul(rotmat, [x.flatten(1), y.flatten(1)]) 44 | 45 | if axes == None: 46 | plt.figure(figsize=[size, size]) 47 | axes = plt.axes() 48 | else: 49 | plt.sca(axes) 50 | axes.axis('square') 51 | line = plt.plot(xr, yr)[0] 52 | add_arrow(line) 53 | plt.xlim(-1, 1) 54 | plt.ylim(-1, 1) -------------------------------------------------------------------------------- /sphinx-docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | SOURCEDIR = source 8 | BUILDDIR = ../docs 9 | 10 | # Put it first so that "make" without argument is like "make help". 11 | help: 12 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 13 | 14 | .PHONY: help Makefile 15 | 16 | # Catch-all target: route all unknown targets to Sphinx using the new 17 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 18 | %: Makefile 19 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) -------------------------------------------------------------------------------- /sphinx-docs/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/17/19} 2 | 3 | -------------------------------------------------------------------------------- /sphinx-docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-slate -------------------------------------------------------------------------------- /sphinx-docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /sphinx-docs/source/compute.rst: -------------------------------------------------------------------------------- 1 | compute 2 | ============== 3 | 4 | contains methods to calculate stokes and physical images from intensity images 5 | 6 | Subpackages 7 | ----------- 8 | 9 | .. automodule:: compute.reconstruct 10 | :members: ImgReconstructor 11 | :undoc-members: 12 | :show-inheritance: 13 | -------------------------------------------------------------------------------- /sphinx-docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # http://www.sphinx-doc.org/en/master/config 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | 16 | sys.path.insert(0, os.path.abspath('../..')) 17 | # include mocks if you are having problems importing these 18 | # autodoc_mock_imports = ['numpy', 'tifffile'] 19 | 20 | 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'Reconstruct Order' 24 | copyright = '2019, MehtaLab' 25 | author = 'MehtaLab, CZ Biohub' 26 | 27 | # The full version, including alpha/beta/rc tags 28 | release = '0' 29 | 30 | 31 | # -- General configuration --------------------------------------------------- 32 | 33 | # Add any Sphinx extension module names here, as strings. They can be 34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 35 | # ones. 36 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.napoleon' 37 | ] 38 | 39 | # Add any paths that contain templates here, relative to this directory. 40 | templates_path = ['_templates'] 41 | 42 | # The suffix(es) of source filenames. 43 | # You can specify multiple suffix as a list of string: 44 | # 45 | # source_suffix = ['.rst', '.md'] 46 | source_suffix = '.rst' 47 | 48 | # The master toctree document. 49 | master_doc = 'index' 50 | 51 | # The language for content autogenerated by Sphinx. Refer to documentation 52 | # for a list of supported languages. 53 | # 54 | # This is also used if you do content translation via gettext catalogs. 55 | # Usually you set "language" from the command line for these cases. 56 | language = 'en' 57 | 58 | # List of patterns, relative to source directory, that match files and 59 | # directories to ignore when looking for source files. 60 | # This pattern also affects html_static_path and html_extra_path. 61 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 62 | 63 | # The name of the Pygments (syntax highlighting) style to use. 64 | pygments_style = None 65 | 66 | 67 | # -- Options for HTML output ------------------------------------------------- 68 | 69 | # The theme to use for HTML and HTML Help pages. See the documentation for 70 | # a list of builtin themes. 71 | # 72 | # html_theme = 'alabaster' 73 | 74 | html_theme = 'sphinx_rtd_theme' 75 | 76 | # Add any paths that contain custom static files (such as style sheets) here, 77 | # relative to this directory. They are copied after the builtin static files, 78 | # so a file named "default.css" will overwrite the builtin "default.css". 79 | html_static_path = ['_static'] 80 | 81 | # -- Options for HTMLHelp output --------------------------------------------- 82 | 83 | # Output file base name for HTML help builder. 84 | htmlhelp_basename = 'Reconstruct_Order' 85 | 86 | -------------------------------------------------------------------------------- /sphinx-docs/source/index.rst: -------------------------------------------------------------------------------- 1 | .. Reconstruct Order documentation master file, created by 2 | sphinx-quickstart on Wed Apr 17 17:25:41 2019. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Reconstruct Order 7 | ============================================= 8 | Reconstruct birefringence, slow axis, transmission, and degree of polarization from polarization-resolved images. 9 | The data is acquired with Micro-Manager and OpenPolScope acquisition plugin. 10 | 11 | 12 | Quick start 13 | ----------- 14 | 15 | ReconstructOrder is available on Python Package Index using Pip. 16 | We highly recommend you install to a separate environment 17 | 18 | .. code-block:: bash 19 | 20 | # USING venv 21 | python3 -m venv /path/to/new/virtual/environment 22 | cd /path/to/new/virtual/environment 23 | activate 24 | 25 | .. code-block:: bash 26 | 27 | # USING anaconda 28 | # from anaconda command prompt, or terminal 29 | conda create -n name-of-new-environment 30 | source activate name-of-new-environment 31 | 32 | Then install ReconstructOrder to this environment 33 | 34 | .. code-block:: bash 35 | 36 | pip install ReconstructOrder 37 | 38 | If you wish to run ReconstructOrder from command line, and not from the python interpreter, 39 | you will need to clone this github repo, and run commands from within. 40 | 41 | .. code-block:: bash 42 | 43 | git clone https://github.com/czbiohub/ReconstructOrder.git 44 | 45 | 46 | Running Reconstruction on your data 47 | ----------------------------------- 48 | 49 | To reconstruct birefringence images, you will need to create a configuration file that reflects your experiment's 50 | parameters. You can see example configurations in this github repo under examples/example_configs. 51 | 52 | Modify paths to your data in there. See "config_example.yml" for detailed description of the fields. It's important 53 | that your data is organized in a hierarchy as described. 54 | 55 | finally, when the config file is ready, run the following: 56 | 57 | FROM PYTHON 58 | 59 | .. code-block:: python 60 | 61 | from ReconstructOrder import workflow as wf 62 | 63 | wf.runReconstruction('path_to_your_config_file') 64 | 65 | 66 | FROM COMMAND LINE 67 | 68 | .. code-block:: bash 69 | 70 | # first navigate to your cloned ReconstructOrder directory 71 | cd ReconstructOrder 72 | python runReconstruction.py --config path_to_your_config_file 73 | 74 | 75 | .. toctree:: 76 | :maxdepth: 2 77 | :caption: Contents: 78 | 79 | introduction 80 | installation 81 | compute 82 | 83 | 84 | Thanks 85 | ------ 86 | 87 | This work is made possible by the Chan-Zuckerberg Biohub 88 | 89 | 90 | 91 | Indices and tables 92 | ================== 93 | 94 | * :ref:`genindex` 95 | * :ref:`modindex` 96 | * :ref:`search` 97 | -------------------------------------------------------------------------------- /sphinx-docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | Installation 2 | ============== 3 | 4 | using git clone 5 | --------------- 6 | 7 | install git: (links) 8 | run git clone 9 | 10 | 11 | using pip 12 | --------- 13 | 14 | Not implemented yet 15 | 16 | 17 | Other 18 | ----------------------- 19 | 20 | anything else here 21 | 22 | -------------------------------------------------------------------------------- /sphinx-docs/source/introduction.rst: -------------------------------------------------------------------------------- 1 | Introduction 2 | ============== 3 | 4 | Reconstruct Order 5 | ----------------- 6 | 7 | This block is a placeholder for any intro information about this repo 8 | We should discuss this repo's purpose, it's audience, the kind of data etc... 9 | 10 | Requirements 11 | ------------ 12 | 13 | This is a placeholder for hardware/software requirements 14 | 15 | 16 | Other 17 | ----------------------- 18 | 19 | anything else here 20 | 21 | 22 | -------------------------------------------------------------------------------- /sphinx-docs/source/usage.rst: -------------------------------------------------------------------------------- 1 | Usage 2 | ----- 3 | There are two ways to run reconstruction: 4 | 5 | **from command line** 6 | 7 | If you pip installed the library, from any folder, simply type: 8 | 9 | .. code-block:: bash 10 | 11 | runReconstruction --config path-and-name-to-your-config.yml 12 | 13 | If you cloned the developer repo, navigate to the repo and call the script: 14 | 15 | .. code-block:: bash 16 | 17 | (C:\ReconstructOrder\) python runReconstruction.py --config path-and-name-to-your-config.yml 18 | 19 | 20 | **from IPython** 21 | 22 | If you are writing your own code and want to use the ReconstructOrder library, you can reconstruct as follows: 23 | 24 | .. code-block:: python 25 | 26 | import ReconstructOrder.workflow as wf 27 | wf.reconstruct_batch('path-and-name-to-your-config.yml') 28 | 29 | -------------------------------------------------------------------------------- /sphinx-docs/source/utils.rst: -------------------------------------------------------------------------------- 1 | Utils 2 | ============== 3 | 4 | The Utils package contains methods useful for .yml configuration file reading, for image metadata parsing, image processing, and image plotting. 5 | 6 | Subpackages 7 | ----------- 8 | 9 | .. automodule:: ReconstructOrder.utils.ConfigReader 10 | :members: ConfigReader 11 | :undoc-members: 12 | :show-inheritance: 13 | 14 | .. automodule:: ReconstructOrder.utils.imgIO 15 | :members: get_sub_dirs, FindDirContain_pos 16 | :undoc-members: 17 | :show-inheritance: -------------------------------------------------------------------------------- /sphinx-docs/source/workflow.rst: -------------------------------------------------------------------------------- 1 | Workflow 2 | ============== 3 | 4 | The workflow package contains methods to iterate through your data and compute label-free images based on configuration file parameters 5 | 6 | Subpackages 7 | ----------- 8 | 9 | .. automodule:: ReconstructOrder.workflow.reconstruct_batch 10 | :members: reconstruct_batch 11 | :undoc-members: 12 | :show-inheritance: 13 | -------------------------------------------------------------------------------- /tests/TestData.txt: -------------------------------------------------------------------------------- 1 | Download the test data from https://bit.ly/2XHttWn and extract it in this folder as TestData/ 2 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {4/26/19} 2 | 3 | from . import testMetrics 4 | -------------------------------------------------------------------------------- /tests/datastructures_tests/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {2019-07-17} 2 | -------------------------------------------------------------------------------- /tests/datastructures_tests/conftest.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | import os 4 | from ReconstructOrder.datastructures.intensity_data import IntensityData 5 | from ReconstructOrder.datastructures.stokes_data import StokesData 6 | from ReconstructOrder.datastructures.physical_data import PhysicalData 7 | 8 | 9 | """ 10 | pytest fixtures are "setup" code that makes resources available for tests 11 | pass a keyword scope = "session", or scope = "module" if the fixture would not 12 | be re-created for each test. 13 | """ 14 | 15 | 16 | @pytest.fixture 17 | def setup_temp_data(): 18 | """ 19 | resource for memory mapped files 20 | 21 | :return: 22 | """ 23 | temp_folder = os.getcwd()+'/temp' 24 | if not os.path.isdir(temp_folder): 25 | os.mkdir(temp_folder) 26 | print("\nsetting up temp folder") 27 | 28 | # setup files, provide memmap type 29 | img = np.random.random((2048, 2048)) 30 | np.save(temp_folder+'/img.npy', img) 31 | print("setting up temp file") 32 | yield np.memmap(temp_folder+'/img.npy', shape=(2048, 2048), dtype=np.uint8) 33 | 34 | # breakdown files 35 | if os.path.isfile(temp_folder+'/img.npy'): 36 | os.remove(temp_folder+'/img.npy') 37 | print("\nbreaking down temp file") 38 | if os.path.isdir(temp_folder): 39 | os.rmdir(temp_folder) 40 | print("breaking down temp folder") 41 | 42 | 43 | @pytest.fixture 44 | def setup_temp_int_stk_phys(setup_temp_data): 45 | # reference data is a memory mapped file 46 | mm = setup_temp_data 47 | 48 | int_data = IntensityData() 49 | int_data.append_image(mm) 50 | int_data.append_image(2 * mm) 51 | int_data.append_image(3 * mm) 52 | int_data.append_image(4 * mm) 53 | int_data.append_image(5 * mm) 54 | 55 | stk_data = StokesData() 56 | stk_data.s0 = 10*mm 57 | stk_data.s1 = 20*mm 58 | stk_data.s2 = 30*mm 59 | stk_data.s3 = 40*mm 60 | 61 | phys_data = PhysicalData() 62 | phys_data.I_trans = 100*mm 63 | phys_data.polarization = 200*mm 64 | phys_data.retard = 300*mm 65 | phys_data.depolarization = 400*mm 66 | phys_data.azimuth = 500*mm 67 | phys_data.azimuth_degree = 600*mm 68 | phys_data.azimuth_vector = 700*mm 69 | 70 | yield int_data, stk_data, phys_data 71 | 72 | 73 | @pytest.fixture 74 | def setup_int_stk_phys(setup_intensity_data, setup_stokes_data, setup_physical_data): 75 | intensity, _, _, _, _, _ = setup_intensity_data 76 | stokes = setup_stokes_data 77 | physical = setup_physical_data 78 | 79 | yield intensity, stokes, physical 80 | 81 | 82 | @pytest.fixture 83 | def setup_intensity_data(): 84 | """ 85 | resource for IntensityData, no channel names 86 | 87 | :return: Intensity Object, component arrays 88 | """ 89 | int_data = IntensityData() 90 | 91 | a = np.ones((512, 512)) 92 | b = 2*np.ones((512, 512)) 93 | c = 3*np.ones((512, 512)) 94 | d = 4*np.ones((512, 512)) 95 | e = 5*np.ones((512, 512)) 96 | 97 | int_data.append_image(a) 98 | int_data.append_image(b) 99 | int_data.append_image(c) 100 | int_data.append_image(d) 101 | int_data.append_image(e) 102 | 103 | yield int_data, a, b, c, d, e 104 | 105 | 106 | @pytest.fixture 107 | def setup_stokes_data(): 108 | """ 109 | resource for Stokes Data 110 | 111 | :return: Stokes Data Object 112 | """ 113 | stk = StokesData() 114 | 115 | a = 10*np.ones((512, 512)) 116 | b = 20*np.ones((512, 512)) 117 | c = 30*np.ones((512, 512)) 118 | d = 40*np.ones((512, 512)) 119 | 120 | stk.s0 = a 121 | stk.s1 = b 122 | stk.s2 = c 123 | stk.s3 = d 124 | 125 | yield stk 126 | 127 | 128 | @pytest.fixture 129 | def setup_physical_data(): 130 | """ 131 | resource for PhysicalData 132 | 133 | :return: PhysicalData object 134 | """ 135 | phys = PhysicalData() 136 | 137 | phys.I_trans = 100*np.ones((512, 512)) 138 | phys.polarization = 200*np.ones((512, 512)) 139 | phys.retard = 300*np.ones((512, 512)) 140 | phys.depolarization = 400*np.ones((512, 512)) 141 | phys.azimuth = 500*np.ones((512, 512)) 142 | phys.azimuth_degree = 600*np.ones((512, 512)) 143 | phys.azimuth_vector = 700*np.ones((512, 512)) 144 | 145 | yield phys 146 | 147 | 148 | @pytest.fixture 149 | def setup_inst_matrix(): 150 | """ 151 | resource to create a 5-frame default instrument matrix 152 | 153 | :return: the INVERSE instrument matrix 154 | """ 155 | chi = 0.03*2*np.pi # if the images were taken using 5-frame scheme 156 | inst_mat = np.array([[1, 0, 0, -1], 157 | [1, np.sin(chi), 0, -np.cos(chi)], 158 | [1, 0, np.sin(chi), -np.cos(chi)], 159 | [1, -np.sin(chi), 0, -np.cos(chi)], 160 | [1, 0, -np.sin(chi), -np.cos(chi)]]) 161 | 162 | iim = np.linalg.pinv(inst_mat) 163 | yield iim 164 | 165 | 166 | @pytest.fixture 167 | def setup_ndarrays(): 168 | """ 169 | resource to create various numpy arrays of different dimensions 170 | 171 | :return: 172 | """ 173 | p = np.ones(shape=(512,512,1,2)) 174 | q = np.ones(shape=(32,32,5)) 175 | r = np.zeros(shape=(32,32,5,2,3)) 176 | 177 | yield p, q, r -------------------------------------------------------------------------------- /tests/datastructures_tests/intensity_data_test.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from numpy.testing import assert_array_equal 4 | from ReconstructOrder.datastructures.intensity_data import IntensityData 5 | 6 | # ==== test basic construction ===== 7 | 8 | 9 | def test_basic_constructor_nparray(): 10 | """ 11 | test assignment using numpy arrays 12 | """ 13 | int_data = IntensityData() 14 | 15 | a = np.ones((512, 512)) 16 | b = 2*np.ones((512, 512)) 17 | c = 3*np.ones((512, 512)) 18 | d = 4*np.ones((512, 512)) 19 | e = 5*np.ones((512, 512)) 20 | 21 | int_data.append_image(a) 22 | int_data.append_image(b) 23 | int_data.append_image(c) 24 | int_data.append_image(d) 25 | int_data.append_image(e) 26 | 27 | assert_array_equal(int_data.get_image(0), a) 28 | assert_array_equal(int_data.get_image(1), b) 29 | assert_array_equal(int_data.get_image(2), c) 30 | assert_array_equal(int_data.get_image(3), d) 31 | assert_array_equal(int_data.get_image(4), e) 32 | 33 | assert_array_equal(int_data.data, np.array([a, b, c, d, e])) 34 | 35 | 36 | def test_basic_constructor_memap(setup_temp_data): 37 | """ 38 | test assignment using memory mapped files 39 | """ 40 | 41 | mm = setup_temp_data 42 | int_data = IntensityData() 43 | 44 | int_data.append_image(mm) 45 | int_data.append_image(2 * mm) 46 | int_data.append_image(3 * mm) 47 | int_data.append_image(4 * mm) 48 | int_data.append_image(5 * mm) 49 | 50 | assert_array_equal(int_data.get_image(0), mm) 51 | assert_array_equal(int_data.get_image(1), 2*mm) 52 | assert_array_equal(int_data.get_image(2), 3*mm) 53 | assert_array_equal(int_data.get_image(3), 4*mm) 54 | assert_array_equal(int_data.get_image(4), 5*mm) 55 | 56 | assert_array_equal(int_data.data, np.array([mm, 2*mm, 3*mm, 4*mm, 5*mm])) 57 | 58 | 59 | 60 | def test_basic_constructor_with_names(): 61 | """ 62 | test construction with channel names 63 | 64 | Returns 65 | ------- 66 | 67 | """ 68 | int_data = IntensityData() 69 | int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] 70 | 71 | a = np.ones((512, 512)) 72 | b = 2 * np.ones((512, 512)) 73 | c = 3 * np.ones((512, 512)) 74 | d = 4 * np.ones((512, 512)) 75 | e = 5 * np.ones((512, 512)) 76 | 77 | int_data.replace_image(a, 'IExt') 78 | int_data.replace_image(b, 'I0') 79 | int_data.replace_image(c, 'I45') 80 | int_data.replace_image(d, 'I90') 81 | int_data.replace_image(e, 'I135') 82 | 83 | assert_array_equal(int_data.get_image("IExt"), a) 84 | 85 | 86 | def test_basic_constructor_without_names(): 87 | """ 88 | test construction with channel names 89 | 90 | Returns 91 | ------- 92 | 93 | """ 94 | int_data = IntensityData() 95 | # int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] 96 | 97 | a = np.ones((512, 512)) 98 | b = 2 * np.ones((512, 512)) 99 | c = 3 * np.ones((512, 512)) 100 | d = 4 * np.ones((512, 512)) 101 | e = 5 * np.ones((512, 512)) 102 | 103 | int_data.append_image(a) 104 | int_data.append_image(b) 105 | int_data.append_image(c) 106 | int_data.append_image(d) 107 | int_data.append_image(e) 108 | 109 | assert_array_equal(int_data.get_image(0), a) 110 | 111 | 112 | # ==== test instances and private/public access ===== 113 | 114 | def test_instances(): 115 | """ 116 | test instance attributes 117 | """ 118 | I1 = IntensityData() 119 | I2 = IntensityData() 120 | 121 | with pytest.raises(AssertionError): 122 | assert(I1 == I2) 123 | 124 | with pytest.raises(AssertionError): 125 | I1.append_image(np.ones((32, 32))) 126 | I2.append_image(np.ones((64, 64))) 127 | assert_array_equal(I1.get_image(0),I2.get_image(0)) 128 | 129 | 130 | def test_private_access(setup_intensity_data): 131 | """ 132 | should not have access to private variables 133 | access is restricted to setters/getters 134 | """ 135 | int_data, a, b, c, d, e = setup_intensity_data 136 | with pytest.raises(AttributeError): 137 | print(int_data.__IExt) 138 | with pytest.raises(AttributeError): 139 | print(int_data.__I0) 140 | 141 | 142 | # ==== test methods ===== 143 | 144 | 145 | # replace_image method 146 | def test_replace_image_shape(setup_intensity_data): 147 | int_data, a, b, c, d, e = setup_intensity_data 148 | 149 | newim = np.ones((5,5)) 150 | with pytest.raises(ValueError): 151 | int_data.replace_image(newim, 0) 152 | 153 | 154 | def test_replace_image_dtype(setup_intensity_data): 155 | int_data, a, b, c, d, e = setup_intensity_data 156 | 157 | newim = 0 158 | with pytest.raises(TypeError): 159 | int_data.replace_image(newim, 0) 160 | 161 | 162 | 163 | def test_replace_image_by_index(setup_intensity_data): 164 | int_data, a, b, c, d, e = setup_intensity_data 165 | 166 | newim = np.ones((512, 512)) 167 | int_data.replace_image(newim, 0) 168 | assert_array_equal(int_data.data[0], newim) 169 | 170 | 171 | 172 | def test_replace_image_by_string(setup_intensity_data): 173 | int_data, a, b, c, d, e = setup_intensity_data 174 | 175 | int_data.channel_names = ['IExt', 'I0', 'I45', 'I90', 'I135'] 176 | 177 | newim = np.ones((512,512)) 178 | int_data.replace_image(newim, 'I90') 179 | assert_array_equal(int_data.get_image('I90'), newim) 180 | 181 | 182 | # channel_names property 183 | def test_channel_names(setup_intensity_data): 184 | int_data, a, b, c, d, e = setup_intensity_data 185 | 186 | names = ['a','b','c','d','e'] 187 | 188 | int_data.channel_names = names 189 | 190 | 191 | 192 | # get_image method 193 | def test_get_image_str(setup_intensity_data): 194 | """ 195 | test query by string channel name 196 | """ 197 | int_data, a, b, c, d, e = setup_intensity_data 198 | 199 | names = ['a','b','c','d','e'] 200 | int_data.channel_names = names 201 | 202 | dat = int_data.get_image('e') 203 | assert(dat.shape, (512,512)) 204 | assert(dat[0][0], 5) 205 | 206 | 207 | def test_get_img_str_undef(setup_intensity_data): 208 | """ 209 | test exception handling of query by string channel name 210 | """ 211 | int_data, a, b, c, d, e = setup_intensity_data 212 | 213 | names = ['a','b','c','d','e','f','g','h'] 214 | int_data.channel_names = names 215 | 216 | with pytest.raises(ValueError): 217 | dat = int_data.get_image('q') 218 | 219 | 220 | def test_get_image_int(setup_intensity_data): 221 | """ 222 | test query by int channel index 223 | """ 224 | int_data, a, b, c, d, e = setup_intensity_data 225 | names = ['a','b','c','d','e'] 226 | int_data.channel_names = names 227 | 228 | dat = int_data.get_image(4) 229 | assert(dat.shape, (512,512)) 230 | assert(dat[0][0], 5) 231 | 232 | 233 | 234 | # axis_names property 235 | def test_axis_names(setup_intensity_data): 236 | int_data, a, b, c, d, e = setup_intensity_data 237 | names = ['c', 'x', 'y', 'z', 't'] 238 | int_data.axis_names = names 239 | 240 | assert(int_data.axis_names, names) 241 | 242 | 243 | # ==== test data dimensions ===== 244 | 245 | def test_ndims_1(setup_ndarrays): 246 | """ 247 | test that shape is preserved 248 | """ 249 | p, q, r = setup_ndarrays 250 | int_data = IntensityData() 251 | 252 | int_data.append_image(p) 253 | int_data.append_image(p) 254 | int_data.append_image(p) 255 | 256 | assert(int_data.data[0].shape == p.shape) 257 | assert(int_data.data.shape == (3,)+p.shape) 258 | 259 | 260 | def test_ndims_2(setup_ndarrays): 261 | """ 262 | test exception handling for image data that is not \ 263 | numpy array or numpy memmap 264 | """ 265 | int_data = IntensityData() 266 | 267 | with pytest.raises(TypeError): 268 | int_data.append_image(1) 269 | with pytest.raises(TypeError): 270 | int_data.append_image([1, 2, 3]) 271 | with pytest.raises(TypeError): 272 | int_data.append_image({1, 2, 3}) 273 | with pytest.raises(TypeError): 274 | int_data.append_image((1, 2, 3)) 275 | 276 | 277 | def test_ndims_3(setup_ndarrays): 278 | """ 279 | test exception handling upon assignment of dim mismatch image 280 | """ 281 | p, q, r = setup_ndarrays 282 | int_data = IntensityData() 283 | 284 | int_data.append_image(p) 285 | 286 | with pytest.raises(ValueError): 287 | int_data.append_image(q) 288 | 289 | 290 | # ==== Attribute assignment ========== 291 | 292 | def test_assignment(setup_intensity_data): 293 | """ 294 | test exception handling of improper assignment 295 | """ 296 | int_data, a, b, c, d, e = setup_intensity_data 297 | with pytest.raises(TypeError): 298 | int_data.Iext = a 299 | with pytest.raises(TypeError): 300 | int_data.__IExt = a 301 | 302 | 303 | def test_set_data(setup_intensity_data): 304 | """ 305 | test that neither data nor frames are set-able attributes 306 | """ 307 | int_data, a, b, c, d, e = setup_intensity_data 308 | with pytest.raises(AttributeError): 309 | int_data.data = 0 310 | with pytest.raises(AttributeError): 311 | int_data.num_channels = 0 312 | -------------------------------------------------------------------------------- /tests/datastructures_tests/physical_data_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest, os 3 | from numpy.testing import assert_array_equal 4 | from ReconstructOrder.datastructures.physical_data import PhysicalData 5 | 6 | 7 | def test_basic_constructor_nparray(): 8 | """ 9 | test assignment using numpy arrays 10 | """ 11 | 12 | phys = PhysicalData() 13 | 14 | phys.I_trans = np.ones((512, 512)) 15 | phys.polarization = 2 * np.ones((512, 512)) 16 | phys.retard = 3 * np.ones((512, 512)) 17 | phys.depolarization = 4 * np.ones((512, 512)) 18 | phys.azimuth = 5 * np.ones((512, 512)) 19 | phys.azimuth_degree = 6 * np.ones((512, 512)) 20 | phys.azimuth_vector = 7 * np.ones((512, 512)) 21 | 22 | assert_array_equal(phys.I_trans, np.ones((512, 512))) 23 | assert_array_equal(phys.polarization, 2*np.ones((512, 512))) 24 | assert_array_equal(phys.retard, 3*np.ones((512, 512))) 25 | assert_array_equal(phys.depolarization, 4*np.ones((512, 512))) 26 | assert_array_equal(phys.azimuth, 5*np.ones((512, 512))) 27 | assert_array_equal(phys.azimuth_degree, 6*np.ones((512, 512))) 28 | assert_array_equal(phys.azimuth_vector, 7*np.ones((512, 512))) 29 | 30 | 31 | def test_basic_constructor_memap(setup_temp_data): 32 | """ 33 | test assignment using memory mapped files 34 | """ 35 | 36 | mm = setup_temp_data 37 | phys = PhysicalData() 38 | 39 | phys.I_trans = mm 40 | phys.polarization = 2 * mm 41 | phys.retard = 3 * mm 42 | phys.depolarization = 4 * mm 43 | phys.azimuth = 5 * mm 44 | phys.azimuth_degree = 6 * mm 45 | phys.azimuth_vector = 7 * mm 46 | 47 | assert_array_equal(phys.I_trans, mm) 48 | assert_array_equal(phys.polarization, 2*mm) 49 | assert_array_equal(phys.retard, 3*mm) 50 | assert_array_equal(phys.depolarization, 4*mm) 51 | assert_array_equal(phys.azimuth, 5*mm) 52 | assert_array_equal(phys.azimuth_degree, 6*mm) 53 | assert_array_equal(phys.azimuth_vector, 7*mm) 54 | 55 | 56 | def test_instances(): 57 | """ 58 | test instance attributes 59 | """ 60 | phs1 = PhysicalData() 61 | phs2 = PhysicalData() 62 | 63 | with pytest.raises(AssertionError): 64 | assert(phs1 == phs2) 65 | 66 | with pytest.raises(AssertionError): 67 | phs1.retard = 1 68 | phs2.retard = 2 69 | assert(phs1.retard == phs2.retard) 70 | 71 | 72 | def test_private_access(setup_physical_data): 73 | """ 74 | test that private attributes are not accessible 75 | """ 76 | phys = setup_physical_data 77 | with pytest.raises(AttributeError): 78 | print(phys.__I_trans) 79 | print(phys.__retard) 80 | 81 | 82 | # ==== Attribute assignment ========== 83 | 84 | def test_assignment(setup_physical_data): 85 | """ 86 | test exception handling of improper assignment 87 | """ 88 | phys = setup_physical_data 89 | with pytest.raises(TypeError): 90 | phys.incorrect_attribute = 1 -------------------------------------------------------------------------------- /tests/datastructures_tests/stokes_data_tests.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import pytest 3 | from numpy.testing import assert_array_equal 4 | from ReconstructOrder.datastructures.stokes_data import StokesData 5 | 6 | # ===== test construction ===== 7 | 8 | def test_naked_constructor(): 9 | """ 10 | test simple construction 11 | """ 12 | stk = StokesData() 13 | 14 | assert(stk.s0 is None) 15 | assert(stk.s1 is None) 16 | assert(stk.s2 is None) 17 | assert(stk.s3 is None) 18 | 19 | 20 | def test_basic_constructor_nparray(): 21 | """ 22 | test assignment using numpy arrays 23 | """ 24 | stk = StokesData() 25 | 26 | a = np.ones((512, 512)) 27 | b = 2*np.ones((512, 512)) 28 | c = 3*np.ones((512, 512)) 29 | d = 4*np.ones((512, 512)) 30 | 31 | stk.s0 = a 32 | stk.s1 = b 33 | stk.s2 = c 34 | stk.s3 = d 35 | 36 | assert_array_equal(stk.s0, a) 37 | assert_array_equal(stk.s1, b) 38 | assert_array_equal(stk.s2, c) 39 | assert_array_equal(stk.s3, d) 40 | 41 | assert_array_equal(stk.data, np.array([a, b, c, d])) 42 | 43 | 44 | def test_stokes_constructor_nparray(setup_intensity_data, setup_inst_matrix): 45 | """ 46 | check that auto-construction assigns to instance, not class 47 | """ 48 | # this intensity data has frames = 5 49 | int_data, _, _, _, _, _, = setup_intensity_data 50 | 51 | # create default inv inst matrix 52 | iim = setup_inst_matrix 53 | 54 | stk_data_unassigned = StokesData() 55 | stk_data = StokesData(inv_inst_matrix=iim, intensity_data=int_data) 56 | 57 | assert(stk_data.s0 is not None) 58 | assert(stk_data.s1 is not None) 59 | assert(stk_data.s2 is not None) 60 | assert(stk_data.s3 is not None) 61 | assert(stk_data.s1_norm is not None) 62 | assert(stk_data.s2_norm is not None) 63 | assert(stk_data.data is not None) 64 | 65 | assert(stk_data_unassigned.s0 is None) 66 | assert(stk_data_unassigned.s1 is None) 67 | assert(stk_data_unassigned.s2 is None) 68 | assert(stk_data_unassigned.s3 is None) 69 | assert(stk_data_unassigned.s1_norm is None) 70 | assert(stk_data_unassigned.s2_norm is None) 71 | 72 | 73 | def test_stokes_assignment_after_construction(setup_intensity_data, setup_inst_matrix): 74 | """ 75 | test that one can compute stokes after class construction 76 | """ 77 | # this intensity data has frames = 5 78 | int_data, _, _, _, _, _, = setup_intensity_data 79 | iim = setup_inst_matrix 80 | stk_data = StokesData() 81 | 82 | # normal construction is fine 83 | assert (stk_data.s0 is None) 84 | assert (stk_data.s1 is None) 85 | assert (stk_data.s2 is None) 86 | assert (stk_data.s3 is None) 87 | assert (stk_data.s1_norm is None) 88 | assert (stk_data.s2_norm is None) 89 | 90 | # calculate stokes and reassign 91 | stk_data.compute_stokes(inv_inst_matrix=iim, intensity_data=int_data) 92 | 93 | assert(stk_data.s0 is not None) 94 | assert(stk_data.s1 is not None) 95 | assert(stk_data.s2 is not None) 96 | assert(stk_data.s3 is not None) 97 | assert(stk_data.s1_norm is not None) 98 | assert(stk_data.s2_norm is not None) 99 | assert(stk_data.data is not None) 100 | 101 | 102 | def test_stokes_constructor_exception(setup_intensity_data): 103 | """ 104 | test that instrument-matrix and intensity data shape mismatches are caught 105 | """ 106 | # this intensity data has frames = 5 107 | int_data, _, _, _, _, _, = setup_intensity_data 108 | 109 | # create incorrect inst matrix 110 | chi = 0.03*2*np.pi # if the images were taken using 5-frame scheme 111 | inst_mat = np.array([[1, 0, 0, -1], 112 | [1, 0, np.sin(chi), -np.cos(chi)], 113 | [1, -np.sin(chi), 0, -np.cos(chi)], 114 | [1, 0, -np.sin(chi), -np.cos(chi)]]) 115 | 116 | iim = np.linalg.pinv(inst_mat) 117 | with pytest.raises(ValueError): 118 | StokesData(inv_inst_matrix=iim, intensity_data=int_data) 119 | 120 | 121 | def test_basic_constructor_memap(setup_temp_data): 122 | """ 123 | test assignment using memory mapped files 124 | """ 125 | 126 | mm = setup_temp_data 127 | stk = StokesData() 128 | 129 | stk.s0 = mm 130 | stk.s1 = 2*mm 131 | stk.s2 = 3*mm 132 | stk.s3 = 4*mm 133 | 134 | assert_array_equal(stk.s0, mm) 135 | assert_array_equal(stk.s1, 2*mm) 136 | assert_array_equal(stk.s2, 3*mm) 137 | assert_array_equal(stk.s3, 4*mm) 138 | 139 | assert_array_equal(stk.data, np.array([mm, 2*mm, 3*mm, 4*mm])) 140 | 141 | # ===== test instances and access ===== 142 | 143 | 144 | def test_instances(): 145 | """ 146 | test instance attributes 147 | """ 148 | stk1 = StokesData() 149 | stk2 = StokesData() 150 | 151 | with pytest.raises(AssertionError): 152 | assert(stk1 == stk2) 153 | 154 | with pytest.raises(AssertionError): 155 | 156 | stk1.s0 = np.ones((512,512)) 157 | stk2.s0 = 2*np.ones((512,512)) 158 | assert_array_equal(stk1.s0, stk2.s0) 159 | 160 | 161 | def test_private_access(setup_stokes_data): 162 | """ 163 | test that private attributes are not accessible 164 | """ 165 | stk_data = setup_stokes_data 166 | with pytest.raises(AttributeError): 167 | print(stk_data.__s0) 168 | print(stk_data.__s1) 169 | 170 | 171 | # ===== test dimensionality ===== 172 | 173 | def test_data_dims(): 174 | """ 175 | test that data dimensionality mismatches are caught 176 | """ 177 | stk_data = StokesData() 178 | 179 | a = np.ones((512, 512)) 180 | b = np.ones((256, 256)) 181 | 182 | stk_data.s0 = a 183 | 184 | stk_data.s1 = a 185 | stk_data.s2 = a 186 | 187 | # None type check 188 | with pytest.raises(ValueError): 189 | dat = stk_data.data 190 | 191 | 192 | stk_data.s3 = a 193 | 194 | # data dim check 195 | with pytest.raises(ValueError): 196 | 197 | stk_data.s2 = b 198 | dat = stk_data.data 199 | 200 | 201 | # ==== Attribute assignment ========== 202 | 203 | def test_assignment(setup_stokes_data): 204 | """ 205 | test exception handling of improper assignment 206 | """ 207 | stk = setup_stokes_data 208 | with pytest.raises(TypeError): 209 | stk.s4 = 10*np.ones((512, 512)) -------------------------------------------------------------------------------- /tests/integration_tests/__init__.py: -------------------------------------------------------------------------------- 1 | # bchhun, {2019-07-17} 2 | -------------------------------------------------------------------------------- /tests/integration_tests/multidim_complete_pipeline_tests.py: -------------------------------------------------------------------------------- 1 | # bchhun, {2019-12-12} 2 | 3 | 4 | from ReconstructOrder.workflow.reconstructBatch import reconstruct_batch 5 | import os, glob 6 | import tifffile as tf 7 | import pytest 8 | import numpy as np 9 | from ..testMetrics import mse 10 | 11 | 12 | def test_reconstruct_source(setup_multidim_src): 13 | """ 14 | Runs a full multidim reconstruction based on supplied config files 15 | 16 | :param setup_multidim_src: 17 | :return: 18 | """ 19 | config = setup_multidim_src 20 | try: 21 | reconstruct_batch(config) 22 | except Exception as ex: 23 | pytest.fail("Exception thrown during reconstruction = "+str(ex)) 24 | 25 | 26 | def test_src_target_mse(setup_multidim_src, setup_multidim_target): 27 | """ 28 | 29 | Runs a comparison between reconstruction from config and target 30 | 31 | :param setup_multidim_src: fixture that returns PATH to config 32 | :param setup_multidim_target: fixture that returns PATH to .tif files 33 | :return: 34 | """ 35 | config = setup_multidim_src 36 | reconstruct_batch(config) 37 | 38 | processed_folder = os.getcwd() + '/temp/predict/src/SM_2019_0612_20x_1_BG_2019_0612_1515_1/B3-Site_1' 39 | processed_files = glob.glob(processed_folder+'/*.tif') 40 | print("PROCESSED FILES" + str(processed_files)) 41 | print("number of proc images ="+str(len(processed_files))) 42 | 43 | target_folder = setup_multidim_target 44 | target_files = glob.glob(target_folder+'/*.tif') 45 | print("TARGET FILES" + str(target_files)) 46 | print("number of target images ="+str(len(target_files))) 47 | 48 | p_sort = sorted(processed_files) 49 | s_sort = sorted(target_files) 50 | 51 | for idx, file in enumerate(p_sort): 52 | if os.path.basename(file) == os.path.basename(s_sort[idx]): 53 | predict = tf.imread(file) 54 | target = tf.imread(s_sort[idx]) 55 | 56 | try: 57 | assert mse(predict, target) <= np.finfo(np.float32).eps 58 | except AssertionError as ae: 59 | if 'img_Phase' in target: 60 | print(f" ==== KNOWN error in Phase Reconstruction ==== ") 61 | print(f"MSE relative = {mse(predict, target)}") 62 | print(f"MSE FAIL ON PREDICT = " + file) 63 | print(f"MSE FAIL ON TARGET = " + target + "\n") 64 | continue 65 | else: 66 | print(f"MSE relative = {mse(predict, target)}") 67 | print(f"MSE FAIL ON PREDICT = " + file) 68 | print(f"MSE FAIL ON TARGET = " + target + "\n") 69 | pytest.fail("Assertion Error = " + str(ae)) 70 | -------------------------------------------------------------------------------- /tests/integration_tests/singledim_reconstruct_tests.py: -------------------------------------------------------------------------------- 1 | # bchhun, {2019-09-16} 2 | 3 | 4 | from ReconstructOrder.compute.reconstruct import ImgReconstructor 5 | from ..testMetrics import mse 6 | import numpy as np 7 | 8 | 9 | def test_complete_reconstruction(setup_gdrive_src_data_small): 10 | """ 11 | tests compute sequence: background correction of birefringence from intensity data 12 | one z, one t, one p 13 | No config file parsing 14 | No plotting 15 | 16 | Parameters 17 | ---------- 18 | setup_gdrive_src_data_small : pytest fixture to load data from gdrive 19 | 20 | Returns 21 | ------- 22 | 23 | """ 24 | bg_dat, sm_dat = setup_gdrive_src_data_small 25 | 26 | # compute initial reconstructor using background data 27 | img_reconstructor = ImgReconstructor(bg_dat, 28 | swing=0.03, 29 | wavelength=532) 30 | bg_stokes = img_reconstructor.compute_stokes(bg_dat) 31 | bg_stokes_normalized = img_reconstructor.stokes_normalization(bg_stokes) 32 | 33 | # compute sample stokes and correct with background data 34 | sm_stokes = img_reconstructor.compute_stokes(sm_dat) 35 | sm_stokes_normalized = img_reconstructor.stokes_normalization(sm_stokes) 36 | sm_stokes_normalized = img_reconstructor.correct_background(sm_stokes_normalized, bg_stokes_normalized) 37 | 38 | reconstructed_birefring = img_reconstructor.reconstruct_birefringence(sm_stokes_normalized) 39 | 40 | assert reconstructed_birefring.I_trans is not None 41 | assert reconstructed_birefring.retard is not None 42 | assert reconstructed_birefring.azimuth is not None 43 | assert reconstructed_birefring.polarization is not None 44 | 45 | 46 | def test_recon_dims_shape(setup_reconstructed_data_small_npy, setup_gdrive_target_data_small_npy): 47 | """ 48 | test dims and dtype 49 | 50 | Parameters 51 | ---------- 52 | setup_reconstructed_data_small_npy 53 | setup_gdrive_target_data_small_npy 54 | 55 | Returns 56 | ------- 57 | 58 | """ 59 | recon_data = setup_reconstructed_data_small_npy 60 | target_dat = setup_gdrive_target_data_small_npy 61 | 62 | assert recon_data.I_trans.shape == target_dat.I_trans.shape 63 | assert recon_data.retard.shape == target_dat.I_trans.shape 64 | assert recon_data.azimuth.shape == target_dat.I_trans.shape 65 | assert recon_data.polarization.shape == target_dat.I_trans.shape 66 | 67 | assert recon_data.I_trans.dtype == target_dat.I_trans.dtype 68 | assert recon_data.retard.dtype == target_dat.I_trans.dtype 69 | assert recon_data.azimuth.dtype == target_dat.I_trans.dtype 70 | assert recon_data.polarization.dtype == target_dat.I_trans.dtype 71 | 72 | 73 | def test_recon_mse_small_npy(setup_reconstructed_data_small_npy, setup_gdrive_target_data_small_npy): 74 | """ 75 | test array by comparing MSE 76 | 77 | Parameters 78 | ---------- 79 | setup_reconstructed_data_small_npy 80 | setup_gdrive_target_data_small_npy 81 | 82 | Returns 83 | ------- 84 | 85 | """ 86 | recon_data = setup_reconstructed_data_small_npy 87 | target_dat = setup_gdrive_target_data_small_npy 88 | 89 | assert mse(recon_data.I_trans, target_dat.I_trans) < np.finfo(np.float32).eps 90 | assert mse(recon_data.retard, target_dat.retard) < np.finfo(np.float32).eps 91 | assert mse(recon_data.azimuth, target_dat.azimuth) < np.finfo(np.float32).eps 92 | assert mse(recon_data.polarization, target_dat.polarization) < np.finfo(np.float32).eps 93 | 94 | 95 | # def test_recon_mse_large_npy(setup_reconstructed_data_large_npy, setup_gdrive_target_data_large_npy): 96 | # """ 97 | # test array by comparing MSE 98 | # 99 | # Parameters 100 | # ---------- 101 | # setup_reconstructed_data_small_npy 102 | # setup_gdrive_target_data_small_npy 103 | # 104 | # Returns 105 | # ------- 106 | # 107 | # """ 108 | # recon_data = setup_reconstructed_data_large_npy 109 | # target_dat = setup_gdrive_target_data_large_npy 110 | # 111 | # assert mse(recon_data.I_trans, target_dat.I_trans) < np.finfo(np.float32).eps 112 | # assert mse(recon_data.retard, target_dat.retard) < np.finfo(np.float32).eps 113 | # assert mse(recon_data.azimuth, target_dat.azimuth) < np.finfo(np.float32).eps 114 | # assert mse(recon_data.polarization, target_dat.polarization) < np.finfo(np.float32).eps 115 | # 116 | # 117 | # def test_recon_mse_small_tif(setup_reconstructed_data_small_tif, setup_gdrive_target_data_small_tif): 118 | # """ 119 | # test array by comparing MSE 120 | # 121 | # Parameters 122 | # ---------- 123 | # setup_reconstructed_data_small_npy 124 | # setup_gdrive_target_data_small_npy 125 | # 126 | # Returns 127 | # ------- 128 | # 129 | # """ 130 | # recon_data = setup_reconstructed_data_small_tif 131 | # target_dat = setup_gdrive_target_data_small_tif 132 | # 133 | # assert mse(recon_data.I_trans, target_dat.I_trans) < np.finfo(np.float32).eps 134 | # assert mse(recon_data.retard, target_dat.retard) < np.finfo(np.float32).eps 135 | # assert mse(recon_data.azimuth, target_dat.azimuth) < np.finfo(np.float32).eps 136 | # assert mse(recon_data.polarization, target_dat.polarization) < np.finfo(np.float32).eps 137 | # 138 | # 139 | # def test_recon_mse_large_tif(setup_reconstructed_data_large_tif, setup_gdrive_target_data_large_tif): 140 | # """ 141 | # test array by comparing MSE 142 | # 143 | # Parameters 144 | # ---------- 145 | # setup_reconstructed_data_small_npy 146 | # setup_gdrive_target_data_small_npy 147 | # 148 | # Returns 149 | # ------- 150 | # 151 | # """ 152 | # recon_data = setup_reconstructed_data_large_tif 153 | # target_dat = setup_gdrive_target_data_large_tif 154 | # 155 | # assert mse(recon_data.I_trans, target_dat.I_trans) < np.finfo(np.float32).eps 156 | # assert mse(recon_data.retard, target_dat.retard) < np.finfo(np.float32).eps 157 | # assert mse(recon_data.azimuth, target_dat.azimuth) < np.finfo(np.float32).eps 158 | # assert mse(recon_data.polarization, target_dat.polarization) < np.finfo(np.float32).eps 159 | 160 | 161 | 162 | 163 | 164 | -------------------------------------------------------------------------------- /tests/testMetrics.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | 4 | def mse(y_pred, y_target): 5 | sqer = (y_target - y_pred)**2 6 | mse = (np.mean(sqer))**(1/2) / np.mean(np.abs(y_target)) 7 | return mse 8 | --------------------------------------------------------------------------------