├── .gitignore ├── .vscode └── settings.json ├── CONTRIBUTING.md ├── README.md ├── config ├── channeltype │ ├── 1ch.yaml │ ├── 2ch.yaml │ ├── 4pi.yaml │ └── MF.yaml ├── config_base.yaml ├── config_user.yaml ├── parameter description.md ├── path │ └── config_path.yaml ├── psftype │ ├── FD.yaml │ ├── insitu.yaml │ ├── insitu_FD.yaml │ ├── voxel.yaml │ └── zernike.yaml └── systemtype │ ├── 4pi.yaml │ ├── Ast_Li.yaml │ ├── DM_Li.yaml │ ├── LLS.yaml │ ├── M2.yaml │ ├── M4.yaml │ ├── MF.yaml │ └── TP.yaml ├── demo ├── Description of output parameters.md ├── datapath.yaml ├── demo_beadPSF_1ch.ipynb ├── demo_beadPSF_1ch_LLS.ipynb ├── demo_beadPSF_2ch.ipynb ├── demo_beadPSF_4pi.ipynb ├── demo_beadPSF_FD.ipynb ├── demo_eval_system.ipynb ├── demo_eval_system_FD.ipynb ├── demo_genPSF.ipynb ├── demo_insituPSF_1ch.ipynb ├── demo_insituPSF_2ch.ipynb ├── demo_insituPSF_4pi.ipynb ├── demo_insituPSF_FD.ipynb └── demo_insituPSF_TP.ipynb ├── environment.yml ├── psflearning ├── __init__.py ├── dataloader.py ├── io │ ├── __init__.py │ ├── h5.py │ └── param.py ├── learning │ ├── __init__.py │ ├── data_representation │ │ ├── PreprocessedImageDataInterface_file.py │ │ ├── PreprocessedImageDataMultiChannel_file.py │ │ ├── PreprocessedImageDataMultiChannel_smlm_file.py │ │ ├── PreprocessedImageDataSingleChannel_file.py │ │ ├── PreprocessedImageDataSingleChannel_smlm_file.py │ │ └── __init__.py │ ├── fitters │ │ ├── FitterInterface_file.py │ │ ├── Fitter_file.py │ │ └── __init__.py │ ├── imagetools.py │ ├── loclib.py │ ├── loss_functions.py │ ├── optimizers.py │ ├── psfs │ │ ├── PSFInterface_file.py │ │ ├── PSFMultiChannel4pi_file.py │ │ ├── PSFMultiChannel4pi_smlm_file.py │ │ ├── PSFMultiChannel_file.py │ │ ├── PSFMultiChannel_smlm_file.py │ │ ├── PSFPupilBased4pi_file.py │ │ ├── PSFPupilBased_file.py │ │ ├── PSFPupilBased_vector_smlm_file.py │ │ ├── PSFVolumeBased4pi_file.py │ │ ├── PSFVolumeBased_file.py │ │ ├── PSFZernikeBased4pi_file.py │ │ ├── PSFZernikeBased4pi_smlm_file.py │ │ ├── PSFZernikeBased_FD_file.py │ │ ├── PSFZernikeBased_FD_smlm_file.py │ │ ├── PSFZernikeBased_file.py │ │ ├── PSFZernikeBased_vector_smlm_file.py │ │ └── __init__.py │ └── utilities.py ├── makeplots.py └── psflearninglib.py ├── setup.py ├── source ├── mleFit_LM_DLL │ ├── CPUmleFit_LM.dll │ ├── CPUmleFit_LM_4Pi.dll │ ├── CPUmleFit_LM_MultiChannel.dll │ ├── GPUmleFit_LM.dll │ ├── GPUmleFit_LM_4Pi.dll │ └── GPUmleFit_LM_MultiChannel.dll ├── mleFit_LM_dylib │ ├── libCPUmleFit_LM.dylib │ ├── libCPUmleFit_LM_4Pi.dylib │ └── libCPUmleFit_LM_MultiChannel.dylib └── mleFit_LM_so │ ├── libCPUmleFit_LM.so │ ├── libCPUmleFit_LM_4Pi.so │ ├── libCPUmleFit_LM_MultiChannel.so │ ├── libGPUmleFit_LM.so │ ├── libGPUmleFit_LM_4Pi.so │ └── libGPUmleFit_LM_MultiChannel.so ├── test └── unit │ └── io │ └── test_param.py └── tutorial ├── Tutorial for FD_aberrations.pdf ├── Tutorial for fit_global_dualchannel.pdf ├── tutorial fit_4pi.pdf └── tutorial for fit_fastsimple.pdf /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | share/python-wheels/ 24 | *.egg-info/ 25 | .installed.cfg 26 | *.egg 27 | MANIFEST 28 | 29 | # PyInstaller 30 | # Usually these files are written by a python script from a template 31 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 32 | *.manifest 33 | *.spec 34 | 35 | # Installer logs 36 | pip-log.txt 37 | pip-delete-this-directory.txt 38 | 39 | # Unit test / coverage reports 40 | htmlcov/ 41 | .tox/ 42 | .nox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | *.py,cover 50 | .hypothesis/ 51 | .pytest_cache/ 52 | cover/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | .pybuilder/ 76 | target/ 77 | 78 | # Jupyter Notebook 79 | .ipynb_checkpoints 80 | 81 | # IPython 82 | profile_default/ 83 | ipython_config.py 84 | 85 | # pyenv 86 | # For a library or package, you might want to ignore these files since the code is 87 | # intended to run in multiple environments; otherwise, check them in: 88 | # .python-version 89 | 90 | # pipenv 91 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 92 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 93 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 94 | # install all needed dependencies. 95 | #Pipfile.lock 96 | 97 | # poetry 98 | # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. 99 | # This is especially recommended for binary packages to ensure reproducibility, and is more 100 | # commonly ignored for libraries. 101 | # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control 102 | #poetry.lock 103 | 104 | # pdm 105 | # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. 106 | #pdm.lock 107 | # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it 108 | # in version control. 109 | # https://pdm.fming.dev/#use-with-ide 110 | .pdm.toml 111 | 112 | # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm 113 | __pypackages__/ 114 | 115 | # Celery stuff 116 | celerybeat-schedule 117 | celerybeat.pid 118 | 119 | # SageMath parsed files 120 | *.sage.py 121 | 122 | # Environments 123 | .env 124 | .venv 125 | env/ 126 | venv/ 127 | ENV/ 128 | env.bak/ 129 | venv.bak/ 130 | 131 | # Spyder project settings 132 | .spyderproject 133 | .spyproject 134 | 135 | # Rope project settings 136 | .ropeproject 137 | 138 | # mkdocs documentation 139 | /site 140 | 141 | # mypy 142 | .mypy_cache/ 143 | .dmypy.json 144 | dmypy.json 145 | 146 | # Pyre type checker 147 | .pyre/ 148 | 149 | # pytype static type analyzer 150 | .pytype/ 151 | 152 | # Cython debug symbols 153 | cython_debug/ 154 | 155 | # PyCharm 156 | # JetBrains specific template is maintained in a separate JetBrains.gitignore that can 157 | # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore 158 | # and can be added to the global gitignore or merged into this file. For a more nuclear 159 | # option (not recommended) you can uncomment the following to ignore the entire idea folder. 160 | #.idea/ 161 | 162 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | When contributing to this repository, please indicate the change you are making via issue or 4 | pull request. 5 | 6 | ## How to Work and Develop Together 7 | Please follow good git practices for developing code. In particular that means: 8 | 1. Use one branch per feature that you develop (checkout from main branch as basis) 9 | 2. Merge main branch frequently (i.e. once a day) if the feature takes longer to develop, 10 | otherwise you will get into merge hell. 11 | 3. After a feature is developed, please open a pull request from the feature branch to main and 12 | describe the changes and let someone take a look. 13 | 14 | ## Pull Requests 15 | 16 | 1. Make sure that the `environment.yaml` is up-to-date, in case you have added other dependencies 17 | 2. Update the README.md if necessary 18 | 3. Remove all hard coded stuff in your code, in particular paths. 19 | 20 | ## Things that should not be pushed 21 | - large files, binaries, images 22 | - compiled code 23 | - cache 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # uiPSF: Universal inverse modelling of point spread functions for SMLM localization and microscope characterization 2 | The point spread function (PSF) of a microscope describes the image of a point emitter. Knowing the accurate PSF model is essential for various imaging tasks, including single molecule localization, aberration correction or deconvolution. 3 | 4 | Here we present uiPSF (universal inverse modelling of Point Spread Functions), a toolbox to infer accurate PSF models either from image stacks of fluorescent beads or directly from images of single blinking fluorophores, the raw data in SMLM. It is a powerful tool to characterize and optimize a microscope as it reports the aberration modes, including field-dependent aberrations. The resulting PSF model enables accurate 3D super-resolution imaging using single molecule localization microscopy. 5 | Our modular framework is applicable to a variety of microscope geometries, and the forward model can include system specific characteristics such as the bead size, camera pixel size and transformations among channels. We demonstrate its application in single objective systems with single or multiple channels, 4Pi-SMLM, and lattice light-sheet microscopes. 6 | 7 | **Reference**: 8 | [Liu, S., Chen, J., Hellgoth, J. et al. Universal inverse modeling of point spread functions for SMLM localization and microscope characterization. Nat Methods 21, 1082–1093 (2024).](https://doi.org/10.1038/s41592-024-02282-x) 9 | 10 | # System requirements 11 | ## Hardware 12 | uiPSF can run on both CPU and GPU, however, we recommend installing the GPU version for fast processing speed. To install the GPU version, a GPU card that supports CUDA 11.2 is required. Reference to [Systems tested](#Systems-tested) for selecting your GPU card. 13 | ## Software 14 | ### OS supported 15 | uiPSF is supported for Windows, Linux and MacOS. Only CPU version is supported for MacOS. 16 | ### Package dependencies 17 | ```base 18 | cudatoolkit (GPU version only) 19 | cudnn (GPU version only) 20 | pip 21 | python 22 | numpy 23 | scipy 24 | matplotlib 25 | tensorflow 26 | tensorflow-probability 27 | scikit-image 28 | tqdm 29 | czifile 30 | hdfdict 31 | dotted_dict 32 | omegaconf 33 | ipykernel 34 | ``` 35 | ## Systems tested 36 | - Windows 11 with RTX 4090, RTX 3080,RTX 3090, RTX 2080 37 | - Windows 10 with RTX 4000, RTX 3090, RTX 1070 38 | - Rocky Linux 8.7 with RTX A6000 39 | - Ubuntu 20.04 with RTX 1070 40 | 41 | # Installation 42 | ## Windows 43 | Installation time for the GPU version is around 10 minutes. 44 | 1. Install miniconda for windows, [miniconda](https://docs.conda.io/en/latest/miniconda.html) 45 | 2. Open Anaconda Powershell Prompt, clone the uiPSF package 46 | ``` 47 | git clone https://github.com/ries-lab/uiPSF.git 48 | cd uiPSF 49 | ``` 50 | 3. Create a new conda enviroment for the uiPSF package 51 | - for GPU: 52 | ``` 53 | conda env create --name psfinv --file=environment.yml 54 | ``` 55 | - for CPU: 56 | ``` 57 | conda create --name psfinv python=3.7.10 58 | ``` 59 | 4. Activate the installed enviroment and install the uiPSF package 60 | ``` 61 | conda activate psfinv 62 | pip install -e . 63 | ``` 64 | 65 | ## Mac 66 | 1. Install [miniconda](https://docs.conda.io/en/latest/miniconda.html) for Mac. 67 | 2. Open Terminal and follow the [installation for Windows](#Windows) to install the uiPSF package. Only the CPU version is supported. 68 | 69 | ## Linux 70 | 1. Install [miniconda](https://docs.conda.io/en/latest/miniconda.html) for Linux. 71 | 2. Install uiPSF package. 72 | - For TensorFlow 2.9 73 | 74 | Follow the [installation for Windows](#Windows) to install the uiPSF package. 75 | 76 | - For lastest TensorFlow (Note that TensorFlow later than 2.10 is no longer supported on Window) 77 | 78 | a. Modify the version numbers in the *environment.yml* file as follows: 79 | ``` 80 | - cudatoolkit=11.8 81 | - cudnn=8.4 82 | - python=3.9 83 | ``` 84 | b. Remove the version numbers in `install_requires` in the *setup.py* file as follows: 85 | ``` 86 | "tensorflow" 87 | "tensorflow-probability" 88 | ``` 89 | c. Follow the [installation for Windows](#Windows) to install the uiPSF package. 90 | 91 | d. If the GPU version is intalled, run the following command 92 | ``` 93 | pip install tensorflow[and-cuda] 94 | ``` 95 | We used above procedure to intall uiPSF on a Linux computer with RTX A6000 to fully utilize the computability from the GPU. 96 | 3. If the GPU version is installed, add cudnn path 97 | ``` 98 | mkdir -p $CONDA_PREFIX/etc/conda/activate.d 99 | echo 'export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$CONDA_PREFIX/lib/' >> $CONDA_PREFIX/etc/conda/activate.d/env_vars.sh 100 | ``` 101 | 102 | # Demo notebooks 103 | - For bead data 104 | - [Single-channel PSF modelling](demo/demo_beadPSF_1ch.ipynb). 105 | - [Multi-channel PSF modelling](demo/demo_beadPSF_2ch.ipynb). 106 | - [4Pi PSF modelling](demo/demo_beadPSF_4pi.ipynb). 107 | - [PSF modelling from a lattice light-sheet microscope](demo/demo_beadPSF_1ch_LLS.ipynb) 108 | - [Field-dependent PSF modelling ](demo/demo_beadPSF_FD.ipynb). 109 | - For SMLM data 110 | - [Single-channel PSF modelling](demo/demo_insituPSF_1ch.ipynb). 111 | - [Tetrapod PSF modelling](demo/demo_insituPSF_TP.ipynb). 112 | - [Multi-channel PSF modelling](demo/demo_insituPSF_2ch.ipynb). 113 | - [4Pi PSF modelling](demo/demo_insituPSF_4pi.ipynb). 114 | - [Field-dependent PSF modelling](demo/demo_insituPSF_FD.ipynb). 115 | - Microscope characterization 116 | - [Evaluation of standard microscope systems](demo/demo_eval_system.ipynb). 117 | - [Evaluation of field-dependent aberration](demo/demo_eval_system_FD.ipynb). 118 | - [Generate PSF model at a given imaging depth](demo/demo_genPSF.ipynb) 119 | ## Run time of learning the PSF models listed in the demos 120 | The following run times were obtained from a desktop PC with Windows 11, RTX 3080. 121 | |**PSF type**| run time (min)| # of parameters| 122 | |:------------------|:----------------|:------------| 123 | |**1ch LLS voxel**|1.9 | 31,144| 124 | |**1ch zernike_vector**|0.5 | 992| 125 | |**2ch zernike_vector**|5.1 | 3,827| 126 | |**4pi zernike**|2.8 | 775| 127 | |**FD zernike_vector**|16.1 | 98,680| 128 | |**1ch *in situ***|4.7 | 10,433| 129 | |**2ch *in situ***|13.1 | 22,404| 130 | |**4pi *in situ***|35 | 35,189| 131 | |**FD *in situ***|49.7 | 143,023| 132 | # Example data 133 | - 40 nm bead data from single-channel, dual-color ratiometric and 4Pi systems. 134 | - bead data from a single-channel system with a FOV of 177 um x 177 um. 135 | - bead data from a lattice-light sheet microscope. 136 | - SMLM data of Nup96-AF647 from a single-channel system with a FOV of 177 um x 177 um. 137 | - SMLM data of tubulin-AF647 from a single-channel system with astigmatism aberration applied by a deformable mirror 138 | - SMLM data of TOMM20-AF647 from a single-channel system with Tetrapod PSFs applied by a phase plate 139 | - SMLM data of Nup96-AF647 and WGA-CF680 from a dual-color ratiometric system. 140 | - SMLM data of Nup96-mMaple from a 4Pi-SMLM system 141 | 142 | Download the [example data](https://zenodo.org/doi/10.5281/zenodo.8267520) 143 | # How to run demo notebook 144 | 1. Install uiPSF for your operating system. 145 | 2. Install [Visual Studio Code](https://code.visualstudio.com/Download). 146 | 3. Open Visual Studio Code (VScode), click *Extensions* from the sidebar menu and search for `Python` and install `Python extension for VScode`. 147 | 4. Go to File->Open Folder, select the uiPSF folder from git clone. 148 | 5. Open the file *demo/datapath.yaml*, change the `main_data_dir` to the path of the downloaded example data. 149 | 6. Navigate to a demo notebook, e.g. *demo/demo_beadPSF_1ch.ipynb*. 150 | 7. Click the run button of the first cell, if running for the first time, a window will popup asking to install the `ipykernel` package, click install. Then a drop down menu will show up asking to select the kernel, select the created conda enviroment `psfinv` during the installation. 151 | - In case there is no window popup, an alternative method is: install `Jupyter` from *Extensions*, then click *Select Kernel* at the upper right corner of the demo notebook and select the `psfinv` from the dropdown menu. 152 | 9. Run subsequent cells sequentially. 153 | 154 | - For explanation of user defined parameters and details of creating config files, please refer to [user defined parameters](config/parameter%20description.md). 155 | - For explanation of the output parameters from PSF learning, please refer to [output parameters](demo/Description%20of%20output%20parameters.md). 156 | 157 | ## Input data format 158 | uiPSF normally assumes the input data are unprocessed raw data from the camera frames. Except for the 4Pi bead data, all data were saved as a 3D array: `[x, y, z]` for bead data or `[x, y, frames]` for SMLM data. 159 | - `.mat` file should be save as Matlab version 7.3. 160 | - `.tif` or `.tiff` file should be multi-page tiff file. 161 | ### Single channel learning: 162 | - File format: .czi, .tif, .tiff, .mat 163 | - Data dimension: 3D 164 | ### Multi-channel learning: 165 | - File format: .tif, .tiff, .mat 166 | - Data dimension: 167 | - For .tif file: 3D, all channels were collected from the same camera. uiPSF will crop the channels from the raw data based on user defined parameters (`mirrortype` and `channel_arrange`, see [user defined parameters](config/parameter%20description.md)). 168 | - For .mat file: each channel is saved as a 3D array in the same file. 169 | ### 4Pi-PSF learning: 170 | - File format: .mat 171 | - Data dimension: 172 | - Bead data: Each channel is saved as a 4D array, `[x, y, phase, z]`, in the same file. 173 | - SMLM data: Each channel is saved as a 3D array, `[x, y, frames]`, in the same file. 174 | ## Tips 175 | - Please ensure that the computer's current graphics card driver supports CUDA 11.2. 176 | - Don't run two notebooks at the same time, click `Restart` at the top of the notebook to release the memory. 177 | - Although uiPSF usually process on raw data from camera frames, where it will select multiple candidate emitters, it can also take small data stack with only one emitter in it. However, in this case, the user need to set the `roi_size` to a value smaller than the FOV of the input data. 178 | - `roi_size` is normally a two-element vector, defining the crop size of one emitter in `[y, x]`. Only for agarose bead data, `roi_size` is a three-element vector, defining the crop size of one bead in `[z, y, x]`. 179 | 180 | # Localization using SMAP and FD-DeepLoc 181 | Tutorials for using the PSF model generated from uiPSF for localization analysis. Use one of the [demo notebooks](#Demo-notebooks) to generate the corresponding PSF model (.h5 file) before using the following tutorials. 182 | - [Single channel SMLM imaging](tutorial/tutorial%20for%20fit_fastsimple.pdf). 183 | - [Ratiometric dual-color SMLM imaging](tutorial/Tutorial%20for%20fit_global_dualchannel.pdf). 184 | - [4Pi-SMLM imaging](tutorial/tutorial%20fit_4pi.pdf) 185 | - [Single channel SMLM imaging with large FOV](tutorial/Tutorial%20for%20FD_aberrations.pdf) 186 | # Need help? 187 | Open an issue here on github, or contact Jonas Ries (jonas.ries@univie.ac.at), Sheng Liu (shengliu@unm.edu) 188 | -------------------------------------------------------------------------------- /config/channeltype/1ch.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | channeltype: single # single, multi, 4pi 3 | option: 4 | model: 5 | bin: 1 -------------------------------------------------------------------------------- /config/channeltype/2ch.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | channeltype: multi # single, multi, 4pi 3 | option: 4 | model: 5 | bin: 1 -------------------------------------------------------------------------------- /config/channeltype/4pi.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | channeltype: 4pi # single, multi, 4pi 3 | dual: # parameters only for dual channel systems 4 | mirrortype: '' # up-down, left-right 5 | channel_arrange: '' # up-down, left-right 6 | option: 7 | model: 8 | bin: 1 -------------------------------------------------------------------------------- /config/channeltype/MF.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | channeltype: multi # single, multi, 4pi 3 | dual: # parameters only for dual channel systems 4 | mirrortype: '' # up-down, left-right 5 | channel_arrange: '' # up-down, left-right 6 | option: 7 | model: 8 | bin: 1 9 | -------------------------------------------------------------------------------- /config/config_base.yaml: -------------------------------------------------------------------------------- 1 | # here the configuration for the usage of this repo should be specified 2 | # if the paths are relative, they are relative to the current working direcotry (cwd) 3 | # that would be reasonable to set to the repo dir 4 | Params: 5 | # ------data specific parameters, should be updated for different data 6 | datapath: E:/EMBL files/data 2022/230202_Nup96SNAP_NC_M2/Nup96SNAP_BME_i50SM_ex200_zpos5um_NC_1 # full path to your data 7 | keyword: Default. # keyword for your data file, used for automatic finding files with the same keyword 8 | savename: E:/EMBL files/data 2022/230202_Nup96SNAP_NC_M2/Nup96SNAP_BME_i50SM_ex200_zpos5um_NC_1/psfmodel_iter # full path to your result foder/filename 9 | subfolder: "" # keyword for subfolder name if each bead data is saved in a individual folder 10 | format: .tif # .mat, .tif, .czi 11 | stage_mov_dir: normal # normal, reverse: normal direction is where stage move towards the objective when taking a z-stack bead data 12 | gain: 0.2 13 | ccd_offset: 398.6 14 | roi: 15 | roi_size: [21,21] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 16 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 17 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 18 | peak_height: 0.2 # relative to maximum bead intensity, bead intensity below which are rejected 19 | max_bead_number: 40 # ignored by insitu PSF learning 20 | bead_radius: 0.0 # unit: micron 21 | pixel_size: # unit: micron 22 | x: 0.127 23 | y: 0.116 24 | z: 0.05 25 | FOV: # define FOV for bead selection, if x_center, y_center, radius are 0, select the full FOV 26 | y_center: 0 27 | x_center: 0 28 | radius: 0 29 | z_start: 0 30 | z_end: -0 31 | z_step: 1 32 | 33 | 34 | #--------type specific paramters, might require update for different data--------------------------- 35 | dual: # parameters only for dual channel systems 36 | mirrortype: up-down # up-down, left-right 37 | channel_arrange: up-down # up-down, left-right 38 | multi: 39 | channel_size: [] 40 | fpi: # parameters only for 4pi systems 41 | modulation_period: 0.26 # unit: micron 42 | LLS: # parameters only for lattice light-sheet psfs 43 | skew_const: [0,0] # unit: pixel 44 | insitu: 45 | frame_range: [0,3000] # set the frames to be loaded 46 | 47 | option: # mainly for pupil and zernike, insitu learning 48 | imaging: 49 | emission_wavelength: 0.68 # unit: micron 50 | RI: 51 | imm: 1.516 52 | med: 1.335 53 | cov: 1.516 54 | NA: 1.43 55 | insitu: 56 | stage_pos: 1 # 0 at the coverslip, positive when move towards the coverslip 57 | min_photon: 0.4 # quantile for rejecting low photon emitters 58 | partition_data: true # select best emitters from each partition 59 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 60 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 61 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 62 | z_range: 2.0 # unit: micron, z range of insitu psf model 63 | zkorder_rank: L # L, H: searching range of zernike coefficient (4,21), (21,45) 64 | var_stagepos: true # estimate stage position 65 | repeat: 2 # iteration number for insitu learning, the previous psf model will be used for the next iteration 66 | backgroundROI: [] # define the region where the emitters are at the coverslip, only for imaging close to the coverslip 67 | fpi: 68 | link_zernikecoeff: true 69 | phase_dm: [2,0,-2] # unit: radian 70 | sampleheight: 2 71 | var_sampleheight: false 72 | phase_delay_dir: descend # phase delay between four channels, 'descend' or 'ascend' 73 | multi: 74 | defocus_offset: 0 # micron 75 | defocus_delay: -0.0 # micron 76 | model: 77 | pupilsize: 64 # unit: pixel 78 | n_max: 8 # maximum zernike order 79 | zernike_nl: [] # set the Zernike terms for PSF learning, e.g. [(2,2),(2,-2)], if empty, all zernike terms defined by n_max will be used 80 | blur_sigma: 0.5 # unit: pixel 81 | var_blur: true # estimate blurring sigma 82 | with_apoid: true # with theoretical apoidization term 83 | const_pupilmag: false # pupil magnitude is constant and equal to one 84 | symmetric_mag: false # pupil magnitude is circular symmetric 85 | with_IMM: false # only used for agarose bead 86 | init_pupil_file: "" # .h file from psf learning 87 | estimate_drift: false # estimate lateral drift between each z slice 88 | var_photon: false # estimate photon variation between each z slice 89 | bin: 2 # upsamplling pixel size equal to camera pixel size divided by bin 90 | division: 40 # number of divisions per lateral dimension for learning field-dependent aberration 91 | 92 | #--------following parameters usually don't need to be changed for the same type of psf learning, will be preset for each learning type 93 | PSFtype: insitu_zernike # voxel, zernike, pupil, zernike_vector, pupil_vector, insitu 94 | channeltype: single # single, multi, 4pi 95 | datatype: smlm # bead, smlm 96 | 97 | loss_weight: # weighting factor for each loss term 98 | mse1: 1 99 | mse2: 1 100 | smooth: 0 101 | edge: 0.01 102 | psf_min: 1 103 | bg_min: 1 104 | photon_min: 1 105 | Inorm: 0 106 | gxy_min: 10 107 | 108 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 109 | bias_z: 0.99 110 | mse: 0.8 111 | photon: 1.5 112 | 113 | usecuda: true # for spline localization 114 | plotall: false 115 | ref_channel: 0 # index of the reference channel, for multi-channel system 116 | batch_size: 1600 # maximum number of beads or emitters to be optimized at one time 117 | iteration: 200 # number of iterations for learning 118 | varname: "" # only for simulated data from matlab, the variable name for the data 119 | filelist: [] # user can also give a list of data files, instead of giving the keyword or subfolder, data loading will skip automatic populate filelist 120 | swapxy: false # permute x,y dimension 121 | 122 | 123 | -------------------------------------------------------------------------------- /config/config_user.yaml: -------------------------------------------------------------------------------- 1 | # here the configuration for the usage of this repo should be specified 2 | # if the paths are relative, they are relative to the current working direcotry (cwd) 3 | # that would be reasonable to set to the repo dir 4 | Params: 5 | # ------data specific parameters, should be updated for different data 6 | datapath: E:/EMBL files/data 2022/230202_Nup96SNAP_NC_M2/Nup96SNAP_BME_i50SM_ex200_zpos5um_NC_1 # full path to your data 7 | keyword: Default. # keyword for your data file, used for automatic finding files with the same keyword 8 | savename: E:/EMBL files/data 2022/230202_Nup96SNAP_NC_M2/Nup96SNAP_BME_i50SM_ex200_zpos5um_NC_1/psfmodel_iter # full path to your result foder/filename 9 | subfolder: "" # keyword for subfolder name if each bead data is saved in a individual folder 10 | format: .tif # .mat, .tif, .czi 11 | stage_mov_dir: normal # normal, reverse: normal direction is where stage move towards the objective when taking a z-stack bead data 12 | gain: 0.2 13 | ccd_offset: 398.6 14 | roi: 15 | roi_size: [21,21] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 16 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 17 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 18 | peak_height: 0.2 # relative to maximum bead intensity, bead intensity below which are rejected 19 | max_bead_number: 40 # ignored by insitu PSF learning 20 | bead_radius: 0.0 # unit: micron 21 | pixel_size: # unit: micron 22 | x: 0.127 23 | y: 0.116 24 | z: 0.05 25 | FOV: # define FOV for bead selection, if x_center, y_center, radius are 0, select the full FOV 26 | y_center: 0 27 | x_center: 0 28 | radius: 0 29 | z_start: 0 30 | z_end: -0 31 | z_step: 1 32 | 33 | 34 | #--------type specific paramters, might require update for different data--------------------------- 35 | dual: # parameters only for dual channel systems 36 | mirrortype: up-down # up-down, left-right 37 | channel_arrange: up-down # up-down, left-right 38 | multi: 39 | channel_size: [] 40 | fpi: # parameters only for 4pi systems 41 | modulation_period: 0.26 # unit: micron 42 | LLS: # parameters only for lattice light-sheet psfs 43 | skew_const: [0,0] # unit: pixel 44 | insitu: 45 | frame_range: [0,3000] # set the frames to be loaded 46 | 47 | option: # mainly for pupil and zernike, insitu learning 48 | imaging: 49 | emission_wavelength: 0.68 # unit: micron 50 | RI: 51 | imm: 1.516 52 | med: 1.335 53 | cov: 1.516 54 | NA: 1.43 55 | insitu: 56 | stage_pos: 1 # 0 at the coverslip, positive when move towards the coverslip 57 | min_photon: 0.4 # quantile for rejecting low photon emitters 58 | partition_data: true # select best emitters from each partition 59 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 60 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 61 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 62 | z_range: 2.0 # unit: micron, z range of insitu psf model 63 | zkorder_rank: L # L, H: searching range of zernike coefficient (4,21), (21,45) 64 | var_stagepos: true # estimate stage position 65 | repeat: 2 # iteration number for insitu learning, the previous psf model will be used for the next iteration 66 | backgroundROI: [] # define the region where the emitters are at the coverslip, only for imaging close to the coverslip 67 | fpi: 68 | link_zernikecoeff: true 69 | phase_dm: [2,0,-2] # unit: radian 70 | sampleheight: 2 71 | var_sampleheight: false 72 | phase_delay_dir: descend # phase delay between four channels, 'descend' or 'ascend' 73 | multi: 74 | defocus_offset: 0 # micron 75 | defocus_delay: -0.0 # micron 76 | model: 77 | pupilsize: 64 # unit: pixel 78 | n_max: 8 # maximum zernike order 79 | zernike_nl: [] # set the Zernike terms for PSF learning, e.g. [(2,2),(2,-2)], if empty, all zernike terms defined by n_max will be used 80 | blur_sigma: 0.5 # unit: pixel 81 | var_blur: true # estimate blurring sigma 82 | with_apoid: true # with theoretical apoidization term 83 | const_pupilmag: false # pupil magnitude is constant and equal to one 84 | symmetric_mag: false # pupil magnitude is circular symmetric 85 | with_IMM: false # only used for agarose bead 86 | init_pupil_file: "" # .h file from psf learning 87 | estimate_drift: false # estimate lateral drift between each z slice 88 | var_photon: false # estimate photon variation between each z slice 89 | bin: 2 # upsamplling pixel size equal to camera pixel size divided by bin 90 | division: 40 # number of divisions per lateral dimension for learning field-dependent aberration 91 | 92 | #--------following parameters usually don't need to be changed for the same type of psf learning, will be preset for each learning type 93 | PSFtype: insitu_zernike # voxel, zernike, pupil, zernike_vector, pupil_vector, insitu 94 | channeltype: single # single, multi, 4pi 95 | datatype: smlm # bead, smlm 96 | 97 | loss_weight: # weighting factor for each loss term 98 | mse1: 1 99 | mse2: 1 100 | smooth: 0 101 | edge: 0.01 102 | psf_min: 1 103 | bg_min: 1 104 | photon_min: 1 105 | Inorm: 0 106 | gxy_min: 10 107 | 108 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 109 | bias_z: 0.99 110 | mse: 0.8 111 | photon: 1.5 112 | 113 | usecuda: true # for spline localization 114 | plotall: false 115 | ref_channel: 0 # index of the reference channel, for multi-channel system 116 | batch_size: 1600 # maximum number of beads or emitters to be optimized at one time 117 | iteration: 200 # number of iterations for learning 118 | varname: "" # only for simulated data from matlab, the variable name for the data 119 | filelist: [] # user can also give a list of data files, instead of giving the keyword or subfolder, data loading will skip automatic populate filelist 120 | swapxy: false # permute x,y dimension 121 | 122 | 123 | -------------------------------------------------------------------------------- /config/parameter description.md: -------------------------------------------------------------------------------- 1 | # Description of user defined parameters 2 | List of all parameters defined in [config_base.yaml](config_base.yaml). Some parameters are system specific or for advanced settings. Users are not required to work with all parameters. We divided those parameters into [system specific](systemtype), [channel specific](channeltype) and [PSF specific](psftype) parameters. For most application, users only need to edit or add system type config file [e.g. 4pi.yaml](systemtype/4pi.yaml) and update the parameters in the demo notebook. 3 | ## Instructions on using the config files 4 | - To create a new system type config file, select one example file from the [systemtype](systemtype) folder and save it as a new *.yaml* file. Some example files are from the following systems: 5 | - [M2.yaml](systemtype/M2.yaml): Used for both single-channel astigmatism imaging and dual-channel ratiometric astigmatism imaging 6 | - [M4.yaml](systemtype/M4.yaml): Used for single-channel large FOV imaging 7 | - [DM_Li.yaml](systemtype/DM_Li.yaml): Used for a single-channel system with a deformable mirror 8 | - [LLS.yaml](systemtype/LLS.yaml): Used for a lattice light-sheet system 9 | - [TP.yaml](systemtype/TP.yaml): Used for a single-channel system with a phase plate generating the tetrapod PSFs 10 | - [4pi.yaml](systemtype/4pi.yaml): Used for a 4Pi-SMLM system 11 | - The [channeltype](channeltype) and [psftype](psftype) config files shouldn't be changed, unless a new PSF type or modelling method is added to the package. 12 | - The [config_base.yaml](config_base.yaml) file shouldn't be changed. For advanced settings, the user can create a new *.yaml* file from [config_user.yaml](config_user.yaml) and interacte with all parameters. To load this config file in a demo notebook, run the following command 13 | ``` 14 | L.param = io.param.combine('config_user') 15 | ``` 16 | 17 | ## List of user defined parameters 18 | |**Parameters**| Description| 19 | |:----------------------------|:---------------------------| 20 | |**datapath** | *string*, full path to the data| 21 | |**keyword** | *string*, keyword for the data file, used for automatic finding files with the same keyword| 22 | |**savename** | *string*, full path to the result folder/filename (no extension)| 23 | |**subfolder** | *string*, keyword for the subfolder name if each bead data is saved in a individual folder| 24 | |**format** | *string*, options are `{'.mat', '.tif', '.h5', '.czi'}`, data format| 25 | |**stage_mov_dir** | *string*, options are `{'normal', 'reverse'}`, normal direction is where stage move towards the objective when taking a z-stack bead data| 26 | |**gain**| *float*, camera gain that converts the raw pixel value to photon by multiplication 27 | |**ccd_offset**| *float*, camera offset, the average pixel value at no light 28 | |**roi**| | 29 | |   **roi_size**|*vector[int]*, crop size of each emitter in `[y,x]` or `[z,y,x]`| 30 | |   **gauss_sigma**|*vector[int]*, smooth kernel size of a Gaussian filter in `[y,x]` or `[z,y,x]`| 31 | |   **max_kernel**|*vector[int]*, kernel size of a maximum filter in `[y,x]` or `[z,y,x]`| 32 | |   **peak_height**|*float*, relative intensity above which the emitters are selected| 33 | |   **max_bead_number**|*int*, maximum number of beads to be selected| 34 | |   **bead_radius**|*float*, `unit: micron`, radius of the bead| 35 | |**pixel_size**| | 36 | |   **x**|*float*, `unit: micron`, pixel size in x at the sample plane| 37 | |   **y**|*float*, `unit: micron`, pixel size in y at the sample plane| 38 | |   **z**|*float*, `unit: micron`, pixel size in z at the sample plane| 39 | |**FOV**| | 40 | |   **y_center**|*int*, y coordinate of defined FOV, within which emitters are selected| 41 | |   **x_center**|*int*, x coordinate of defined FOV, within which emitters are selected| 42 | |   **radius**|*int*, radius of defined FOV, within which emitters are selected| 43 | |   **z_start**|*+int*, start slice in z dimension, e.g. `1` means ignore the first slice| 44 | |   **z_end**|*-int*, end slice in z dimension, e.g. `-1` means ignore the last slice| 45 | |   **z_step**|*int*, sampling step in z, e.g. `2` means sample at every 2 slices from the original data| 46 | |**dual**| | 47 | |   **mirrortype** | *string*, options are `{'up-down','left-right'}`, mirror arrangement between two channels| 48 | |   **channel_arrange** | *string*, options are `{'up-down','left-right'}`, channel arrangement between two channels| 49 | |**multi**| | 50 | |   **channel_size** | *vector[int]*, size of each channel in `[y,x]`, for MFM system| 51 | |**fpi**| | 52 | |   **modulation_period** | *float*, `unit: micron`, modulation period of a 4Pi-SMLM system| 53 | |**LLS**| | 54 | |   **skew_const** | *float*, `unit: pixel`, translation in `[y,x]` per z slice, relative to the detection objective in a LLS system| 55 | |**option**| | 56 | |   **imaging**| | 57 | |      **emission_wavelength** | *float*, `unit: micron`, central wavelength of the emission filter| 58 | |   **RI**| | 59 | |      **imm** | *float*, refractive index of the immersion medium| 60 | |      **med** | *float*, refractive index of the sample medium| 61 | |      **cov** | *float*, refractive index of the coverglass| 62 | |   **NA**| *float*, numerical aperture of the objective| 63 | |   **insitu**| | 64 | |      **stage_pos** | *float*, `unit: micron`, position of the sample stage, equal to 0 at the coverslip and positive when move the objective towards the coverslip| 65 | |      **min_photon** | *float*, quantile of the photon below which the emitters are rejected| 66 | |      **partition_data** | *bool*, options are `{true, false}`, `true` means partition the emitters| 67 | |      **partition_size** | *vector[int]*, define partition size, `[NO. z segments, NO. emitters per segment]` or `[NO. z segments, NO. y segments, NO. x segments, NO. emitters per segment]` for insitu-FD| 68 | |      **zernike_index** | *vector[int]*, indices of non-zero Zernike coefficients for an initial pupil, if `[]`, search from lower or higher order Zernike polynomials| 69 | |      **zernike_coeff** | *vector[float]*, values of Zernike coefficients defined in `zernike_index`| 70 | |      **z_range** | *float*, `unit: micron`, z range of the insitu PSF model| 71 | |      **zkorder_rank** | *string*, options are `{'L', 'H'}`, searching range of zernike coefficient, `'L'` means searching from 5 to 21 Zernike polynomials, `'H'` means searching from 22 to 45 Zernike polynomials| 72 | |      **var_stagepos** | *bool*, options are `{true, false}`, `true` means estimate the stage position| 73 | |      **repeat** | *int*, repeat number for insitu PSF modelling, the previous PSF model will be used for the next iteration| 74 | |   **fpi**| | 75 | |      **link_zernikecoeff** | *bool*, options are `{true, false}`, `true` means link the Zernike coefficients between the four channels of a 4Pi-SMLM system| 76 | |      **phase_dm** | *vector[float]*, `unit: radian`, a vector of three phase positions of a bead stack| 77 | |      **sampleheight** | *float*, `unit: micron`, height of the sample chamber between the two coverslips| 78 | |      **var_sampleheight** | *bool*, options are `{true, false}`, `true` means estimate the sample height| 79 | |      **phase_delay_dir** | *string*, options are `{'descend', 'ascend'}`, direction of the phase increment between the four channels| 80 | |   **multi**| | 81 | |      **defocus_offset** | *float*, `unit: micron`, defocus of the first channel in a MFM system| 82 | |      **defocus_delay** | *float*, `unit: micron`, defocus increment between the channels in a MFM system| 83 | |   **model**| | 84 | |      **pupilsize** | *int*, pixel size of the pupil image| 85 | |      **n_max** | *int*, maximum radial order of the Zernike polynomials used in modelling| 86 | |      **zernike_nl** | *vector([int,int])*, define the Zernike terms for PSF modelling in terms of `[n, l]` index, e.g. `[[2,2],[2,-2]]`, if `[]`, all zernike terms defined by `n_max` will be used| 87 | |      **blur_sigma** | *float*, `unit: pixel`, the standard deviation of a 2D Gaussian kernel used to account for the extra blur of the measured PSF| 88 | |      **var_blur** | *bool*, options are `{true, false}`, `true` means estimate the `blur_sigma`| 89 | |      **with_apoid** | *bool*, options are `{true, false}`, `true` means include the apodization term in the pupil| 90 | |      **const_pupilmag** | *bool*, options are `{true, false}`, `true` means set the pupil magnitude to be a unit circle 91 | |      **symmetric_mag** | *bool*, options are `{true, false}`, `true` means use only radial symmetric Zernike polynomials for the pupil magnitude| 92 | |      **with_IMM** | *bool*, options are `{true, false}`, `true` means include index mismatch aberration, only used for agarose bead sample| 93 | |      **init_pupil_file** | *string*, full path to the initial PSF file, the output file from uiPSF| 94 | |      **estimate_drift** | *bool*, options are `{true, false}`, `true` means estimate the lateral drifts in z from bead data| 95 | |      **var_photon** | *bool*, options are `{true, false}`, `true` means estimate the intensity fluctuation in z from bead data| 96 | |      **bin** | *int*, upsampling rate of the camera pixel size, e.g. `2` means the pixel size of the upsampled PSF model is half of the camera pixel size| 97 | |      **division** | *int*, number of divisions per lateral dimension of the FOV, used for modelling field-dependent aberration 98 | |**PSFtype**| *string*, type of PSF model, options are `{'voxel', 'zernike', 'pupil', 'zernike_vector', 'pupil_vector', 'zernike_FD', 'zernike_vector_FD', 'insitu_zernike', 'insitu_pupil', 'insitu_FD'}`| 99 | |**channeltype**| *string*, type of system channel, options are `{'single', 'multi', '4pi'}`| 100 | |**datatype**| *string*, type of data, options are `{'bead', 'smlm'}`| 101 | |**loss_weight**| | 102 | |   **mse1**| *float*, weight for MSE in the loss function| 103 | |   **mse2**| *float*, weight for modified MSE in the loss function| 104 | |   **smooth**| *float*, weight for smooth regularization in the loss function| 105 | |   **edge**| *float*, weight for reducing edge effect in the loss function| 106 | |   **psf_min**| *float*, weight for ensuring positive PSF model in the loss function| 107 | |   **bg_min**| *float*, weight for ensuring positive background in the loss function| 108 | |   **photon_min**| *float*, weight for ensuring positive photon in the loss function| 109 | |   **Inorm**| *float*, weight for ensuring sum of the PSF model in lateral dimension being constant in the loss function| 110 | |   **gxy_min**| *float*, weight for ensuring small lateral drifts in the loss function| 111 | |**rej_threshold**| | 112 | |   **bias_z**| *float*, relative z localization bias, above which the emitters will be rejected for re-learning step| 113 | |   **mse**| *float*, relative MSE error, above which the emitters will be rejected for re-learning step| 114 | |   **photon**| *float*, relative photon count, above which the emitters will be rejected for re-learning step| 115 | |**usecuda**| *bool*, options are `{true, false}`, `true` means use GPU for localization| 116 | |**plotall**| *bool*, options are `{true, false}`, `true` means show plots during the modelling process| 117 | |**ref_channel**| *int*, index of the reference channel, for multi-channel or 4Pi system| 118 | |**batch_size**| *int*, maximum number of emitters to be modeled at one time| 119 | |**iteration**| *int*, number of iterations for the optimization process| 120 | |**varname**| *string*, the variable name for the data, used for simulated matlab data| 121 | |**filelist**| *vector[string]*, a list of data files, if `[]`, the data files will be identified based on the `keyword`| 122 | |**swapxy**| *bool*, options are `{true, false}`, `true` means permute the x, y dimensions of the raw images| 123 | 124 | 125 | 126 | 127 | 128 | 129 | 130 | 131 | -------------------------------------------------------------------------------- /config/path/config_path.yaml: -------------------------------------------------------------------------------- 1 | # here the configuration for the usage of this repo should be specified 2 | # if the paths are relative, they are relative to the current working direcotry (cwd) 3 | # that would be reasonable to set to the repo dir 4 | Paths: 5 | spline: 6 | win: 7 | cpu: 8 | astM: /source/mleFit_LM_DLL/CPUmleFit_LM_MultiChannel.dll 9 | ast: /source/mleFit_LM_DLL/CPUmleFit_LM.dll 10 | fpi: /source/mleFit_LM_DLL/CPUmleFit_LM_4Pi.dll 11 | cuda: 12 | astM: /source/mleFit_LM_DLL/GPUmleFit_LM_MultiChannel.dll 13 | ast: /source/mleFit_LM_DLL/GPUmleFit_LM.dll 14 | fpi: /source/mleFit_LM_DLL/GPUmleFit_LM_4Pi.dll 15 | mac: 16 | cpu: 17 | astM: /source/mleFit_LM_dylib/libCPUmleFit_LM_MultiChannel.dylib 18 | ast: /source/mleFit_LM_dylib/libCPUmleFit_LM.dylib 19 | fpi: /source/mleFit_LM_dylib/libCPUmleFit_LM_4Pi.dylib 20 | linux: 21 | cpu: 22 | astM: /source/mleFit_LM_so/libCPUmleFit_LM_MultiChannel.so 23 | ast: /source/mleFit_LM_so/libCPUmleFit_LM.so 24 | fpi: /source/mleFit_LM_so/libCPUmleFit_LM_4Pi.so 25 | cuda: 26 | astM: /source/mleFit_LM_so/libGPUmleFit_LM_MultiChannel.so 27 | ast: /source/mleFit_LM_so/libGPUmleFit_LM.so 28 | fpi: /source/mleFit_LM_so/libGPUmleFit_LM_4Pi.so 29 | 30 | main_data_dir: 31 | E:/EMBL files/data for PSF learning/ # main folder to your data, it is only useful if all your data are saved in one location, see example file 32 | #/mnt/e/EMBL files/data for PSF learning/ 33 | -------------------------------------------------------------------------------- /config/psftype/FD.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | PSFtype: zernike_FD # voxel, zernike, pupil, zernike-vector, pupil-vector, insitu 3 | datatype: bead 4 | loss_weight: # weighting factor for each loss term 5 | smooth: 0.001 6 | gxy_min: 0.1 7 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 8 | bias_z: 10 9 | mse: 3 10 | photon: 3 11 | option: 12 | model: 13 | var_photon: true 14 | -------------------------------------------------------------------------------- /config/psftype/insitu.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | PSFtype: insitu_zernike # voxel, zernike, pupil, zernike-vector, pupil-vector, insitu 3 | datatype: smlm 4 | loss_weight: # weighting factor for each loss term 5 | smooth: 0 6 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 7 | bias_z: 0.99 8 | mse: 0.8 9 | photon: 1.5 10 | -------------------------------------------------------------------------------- /config/psftype/insitu_FD.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | PSFtype: insitu_FD # voxel, zernike, pupil, zernike-vector, pupil-vector, insitu 3 | datatype: smlm 4 | loss_weight: # weighting factor for each loss term 5 | smooth: 0.001 6 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 7 | bias_z: 0.99 8 | mse: 0.99 9 | photon: 1.5 10 | 11 | 12 | -------------------------------------------------------------------------------- /config/psftype/voxel.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | PSFtype: voxel # voxel, zernike, pupil, zernike-vector, pupil-vector, insitu 3 | datatype: bead 4 | loss_weight: # weighting factor for each loss term 5 | smooth: 1 6 | gxy_min: 10 7 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 8 | bias_z: 3 9 | mse: 3 10 | photon: 1.5 11 | option: 12 | model: 13 | var_photon: false 14 | -------------------------------------------------------------------------------- /config/psftype/zernike.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | PSFtype: zernike_vector # voxel, zernike, pupil, zernike-vector, pupil-vector, insitu 3 | datatype: bead 4 | loss_weight: # weighting factor for each loss term 5 | smooth: 0 6 | gxy_min: 0.1 7 | rej_threshold: # threshold for ourlier removal after initial learning, which are relative values or quantile for insitu PSF 8 | bias_z: 3 9 | mse: 3 10 | photon: 1.5 11 | option: 12 | model: 13 | var_photon: true 14 | -------------------------------------------------------------------------------- /config/systemtype/4pi.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .mat # .mat, .tif, .czi 3 | stage_mov_dir: reverse # normal, reverse: normal direction is where stage move towards the object when taking a z-stack bead data 4 | pixel_size: # unit: micron 5 | x: 0.129 6 | y: 0.129 7 | roi: 8 | roi_size: [21,21] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 9 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 10 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 11 | 12 | fpi: # parameters only for 4pi systems 13 | modulation_period: 0.26 # unit: micron 14 | insitu: 15 | frame_range: [] # set the frames to be loaded, only for .tif format 16 | 17 | option: 18 | imaging: 19 | emission_wavelength: 0.6 # unit: micron 20 | RI: 21 | imm: 1.406 22 | med: 1.406 23 | cov: 1.516 24 | NA: 1.35 25 | fpi: 26 | link_zernikecoeff: true 27 | phase_dm: [2,0,-2] # unit: radian 28 | sampleheight: 2 29 | var_sampleheight: false 30 | phase_delay_dir: descend # phase delay between four channels, 'descend' or 'ascend' 31 | insitu: 32 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 33 | zernike_coeff: [-0.5] # for initial pupil, value corresponding to the zernike index 34 | partition_size: [11,400] # number of z segments, maximum number of emitter per segment 35 | z_range: 1.2 # unit: micron, z range of insitu psf model 36 | -------------------------------------------------------------------------------- /config/systemtype/Ast_Li.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tif # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.11 5 | y: 0.11 6 | roi: 7 | roi_size: [31,31] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | swapxy: true # permute x,y dimension 11 | 12 | insitu: 13 | frame_range: [0,800] # set the frames to be loaded, only for .tif format 14 | 15 | option: 16 | imaging: 17 | emission_wavelength: 0.68 # unit: micron 18 | RI: 19 | imm: 1.515 20 | med: 1.406 21 | cov: 1.525 22 | NA: 1.5 23 | insitu: 24 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 25 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 26 | partition_size: [11,1000] # number of z segments, maximum number of emitter per segment 27 | z_range: 2.0 # unit: micron, z range of insitu psf model 28 | model: 29 | n_max: 6 30 | -------------------------------------------------------------------------------- /config/systemtype/DM_Li.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tif # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.108 5 | y: 0.108 6 | roi: 7 | roi_size: [23,23] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | swapxy: false # permute x,y dimension 11 | 12 | insitu: 13 | frame_range: [1000,6000] # set the frames to be loaded, only for .tif format 14 | 15 | option: 16 | imaging: 17 | emission_wavelength: 0.67 # unit: micron 18 | RI: 19 | imm: 1.406 20 | med: 1.406 21 | cov: 1.524 22 | NA: 1.35 23 | insitu: 24 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 25 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 26 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 27 | z_range: 2.4 # unit: micron, z range of insitu psf model 28 | model: 29 | n_max: 8 30 | zernike_nl: [[2,-2],[2,2],[3,-1],[3,1],[4,0],[3,-3],[3,3], 31 | [4,-2],[4,2],[5,-1],[5,1],[6,0],[4,-4],[4,4], 32 | [5,-3],[5,3],[6,-2],[6,2],[7,1],[7,-1],[8,0]] 33 | -------------------------------------------------------------------------------- /config/systemtype/LLS.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tiff # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.104 5 | y: 0.104 6 | roi: 7 | roi_size: [37,27,27] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [6,2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [9,3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | 11 | LLS: # parameters only for lattice light-sheet psfs 12 | skew_const: [0,-0.7845] # unit: pixel 13 | insitu: 14 | frame_range: [] # set the frames to be loaded, only for .tif format 15 | 16 | option: 17 | imaging: 18 | emission_wavelength: 0.6 # unit: micron 19 | RI: 20 | imm: 1.334 21 | med: 1.334 22 | cov: 1.334 23 | NA: 1.1 24 | insitu: 25 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 26 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 27 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 28 | z_range: 2.0 # unit: micron, z range of insitu psf model 29 | model: 30 | estimate_drift: true 31 | -------------------------------------------------------------------------------- /config/systemtype/M2.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tif # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.127 5 | y: 0.116 6 | roi: 7 | roi_size: [25,25] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | FOV: # define FOV for bead selection, if x_center, y_center, radius are 0, select the full FOV 11 | z_start: 1 12 | z_end: -1 13 | swapxy: true # permute x,y dimension 14 | 15 | insitu: 16 | frame_range: [8000,10000] # set the frames to be loaded, only for .tif format 17 | 18 | option: 19 | imaging: 20 | emission_wavelength: 0.6 # unit: micron 21 | RI: 22 | imm: 1.516 23 | med: 1.335 24 | cov: 1.516 25 | NA: 1.43 26 | insitu: 27 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 28 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 29 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 30 | z_range: 2.0 # unit: micron, z range of insitu psf model 31 | -------------------------------------------------------------------------------- /config/systemtype/M4.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tif # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.123 5 | y: 0.123 6 | roi: 7 | roi_size: [31,31] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | swapxy: false # permute x,y dimension 11 | 12 | insitu: 13 | frame_range: [1000,3000] # set the frames to be loaded, only for .tif format 14 | 15 | option: 16 | imaging: 17 | emission_wavelength: 0.6 # unit: micron 18 | RI: 19 | imm: 1.406 20 | med: 1.35 21 | cov: 1.516 22 | NA: 1.35 23 | insitu: 24 | zernike_index: [5] # for initial pupil, a vector of any size, if [], search for optimal zernike term 25 | zernike_coeff: [0.5] # for initial pupil, value corresponding to the zernike index 26 | partition_size: [21,100] # number of z segments, maximum number of emitter per segment 27 | z_range: 2.0 # unit: micron, z range of insitu psf model 28 | -------------------------------------------------------------------------------- /config/systemtype/MF.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .tif # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.12 5 | y: 0.12 6 | roi: 7 | roi_size: [21,21] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [2,2] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [3,3] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | FOV: # define FOV for bead selection, if x_center, y_center, radius are 0, select the full FOV 11 | z_start: 10 12 | z_end: -3 13 | 14 | multi: 15 | channel_size: [170,170] 16 | 17 | option: 18 | imaging: 19 | emission_wavelength: 0.607 # unit: micron 20 | RI: 21 | imm: 1.515 22 | med: 1.56 23 | cov: 1.515 24 | NA: 1.4 25 | multi: 26 | defocus_offset: 1.4 # micron 27 | defocus_delay: -0.4 # micron 28 | model: 29 | n_max: 6 # maximum zernike order 30 | 31 | -------------------------------------------------------------------------------- /config/systemtype/TP.yaml: -------------------------------------------------------------------------------- 1 | Params: 2 | format: .mat # .mat, .tif, .czi 3 | pixel_size: # unit: micron 4 | x: 0.11 5 | y: 0.11 6 | roi: 7 | roi_size: [31,31] # vector of 2 or 3 element, roi size in [y, x] or [z, y, x] 8 | gauss_sigma: [12,12] # blur kernel size, corresponding to dimensions defined in roi_size 9 | max_kernel: [13,13] # maximum filter kernel size, corresponding to dimensions defined in roi_size 10 | 11 | insitu: 12 | frame_range: [] # set the frames to be loaded, only for .tif format 13 | 14 | option: 15 | imaging: 16 | emission_wavelength: 0.67 # unit: micron 17 | RI: 18 | imm: 1.518 19 | med: 1.33 20 | cov: 1.518 21 | NA: 1.45 22 | insitu: 23 | zernike_index: [12] # for initial pupil, a vector of any size, if [], search for optimal zernike term 24 | zernike_coeff: [-2] # for initial pupil, value corresponding to the zernike index 25 | partition_size: [31,100] # number of z segments, maximum number of emitter per segment 26 | z_range: 4.0 # unit: micron, z range of insitu psf model 27 | -------------------------------------------------------------------------------- /demo/Description of output parameters.md: -------------------------------------------------------------------------------- 1 | # Description of output parameters from PSF learning 2 | In the end of each demo notebook, it listed all the output parameters obtained from PSF learning. Here we provide the description of those parameters from different imaging modalities. 3 | ## List of output parameters 4 | ### Single channel 5 | |**Parameters**| Description| 6 | |:----------------------------|:---------------------------| 7 | |**locres**|localization results of the data used for learning | 8 | |   **CRLB**|CRLB, theoretical localization variance of each variable| 9 | |   **LL**|Loglikelihood ratio of each emitter| 10 | |   **loc**|Estimated positions, unit: pixel| 11 | |   **coeff**|Spline coefficients used for spline based localization algorithm| 12 | |   **coeff_reverse**|Same as `coeff` but with z dimension reversed| 13 | |   **coeff_bead**|Same as `coeff` but only for localizing bead data| 14 | |**res**|PSF learning results| 15 | |   **I_model**|Learned PSF model for modelling single molecules, a 3D matrix| 16 | |   **I_model_reverse**|Same as `I_model` but with z dimension reversed| 17 | |   **I_model_bead**|Learned PSF model for modelling bead data| 18 | |   **bg**|Learned background values of each emitter| 19 | |   **intensity**|Learned total photon count of each emitter| 20 | |   **pos**|Learned x,y,z positions of each emitter, unit: pixel| 21 | |   **pupil**|Learned pupil function, a 2D complex matrix| 22 | |   **zernike_coeff**|Learned Zernike coefficients of the pupil function, including both the coefficients for pupil magnitude and pupil phase| 23 | |   **sigma**|Learned widths in x,y of the Gaussian blurring kernel, unit: pixel| 24 | |   **drift_rate**|Learned x,y drift for each bead stack, unit: pixel per z slice| 25 | |   **cor**|Pixel coordinates of final emitters| 26 | |   **cor_all**|Pixel coordinates of all candidate emitters| 27 | |   **apodization**|The apodization term of the pupil, a 2D matrix| 28 | |   **zernike_polynomials**|The matrix representation of each Zernike polynomials used in learning, a set of 2D matrices| 29 | |   **offset**|The minimum value of `I_model`, ideally it should be greater than zero| 30 | |**rois**| | 31 | |   **cor**|Pixel coordinates of final emitters| 32 | |   **fileID**|Data file No. of final emitters| 33 | |   **image_size**|The image size of the raw data, unit: pixel| 34 | |   **psf_data**|The selected rois of final emitters| 35 | |   **psf_fit**|The PSF models of final emitters, same size as `psf_data`| 36 | ### Multi-channel 37 | Below list parameters that are different from [single channel](#Single%20channel) 38 | |**Parameters**| Description| 39 | |:----------------------------|:---------------------------| 40 | |**res**|PSF learning results| 41 | |   **T**|Affine transformation matrix between each target channel to the reference channel, a stack of 3x3 matrices| 42 | |   **channelN**|Learned results from Nth channel, see `res` in [single channel](#Single%20channel), `N` counts from 0.| 43 | |   **imgcenter**|The pixel coordinate of the image center from the raw data, it defines the rotation center of `T`| 44 | |   **xyshift**|The initial estimation of the lateral shift between the target channel to the reference channel, unit: pixel| 45 | ### 4Pi 46 | The first level output parameters are the same as the ones in [multi-channel](#Multi-channel), however the parameters in `channelN` are different from the ones in [single channel](#Single%20channel), below list the difference. 47 | |**Parameters**| Description| 48 | |:----------------------------|:---------------------------| 49 | |**channelN**|Learned results from Nth channel | 50 | |   **I_model**|Learned model for matrix I in the IAB model, a 3D matrix| 51 | |   **A_model**|Learned model for matrix A and B in the IAB model, a complex 3D matrix| 52 | |   **I_model_reverse**|Same as `I_model` but with z dimension reversed| 53 | |   **A_model_reverse**|Same as `A_model` but with z dimension reversed| 54 | |   **intensity**|Learned total photon (`real(intensity)`) and interference phase (`angle(intensity)`) of each emitter, a complex vector| 55 | |   **phase_dm**|Learned relative phases of the three axial scans in one dataset, a vector of three values| 56 | |   **pupil1**|Learned pupil function of the top emission path, a 2D complex matrix| 57 | |   **pupil2**|Learned pupil function of the bottom emission path, a 2D complex matrix| 58 | |   **zernike_coeff_mag**|Learned Zernike coefficients of the magnitude parts of `pupil1` and `pupil2`| 59 | |   **zernike_coeff_phase**|Learned Zernike coefficients of the phase parts of `pupil1` and `pupil2`| 60 | |   **modulation_depth**|Learned modulation depth, defines the weight factor of the coherent part of the PSF model| 61 | |   **offset**|The minimum value of the PSF model, ideally it should be greater than zero. In IAB model, the PSF model at interference phase equal to zero is $PSF_{model}=I_{model}-2\left\|A_{model}\right\|$| 62 | |   **Zphase**|The stage position (in pixels) multiplied by $2\pi$| 63 | ### Field dependent 64 | Below list parameters that are different from [single channel](#Single%20channel) 65 | |**Parameters**| Description| 66 | |:----------------------------|:---------------------------| 67 | |**locres**|localization results of the data used for learning | 68 | |   *others*|Corresponding values from averaged PSF model| 69 | |   **loc_FD**|Estimated positions from the PSF model for each emitter| 70 | |**res**|PSF learning results| 71 | |   **I_model_all**|Learned PSF model for each emitter, a set of 3D matrices| 72 | |   **I_model_bead**|Learned averaged PSF model for modelling bead data, a 3D matrix| 73 | |   **I_model**|Learned averaged PSF model for modelling single molecules, a 3D matrix| 74 | |   **pupil**|Learned pupil function of each emitter, a set of 2D complex matrix| 75 | |   **zernike_coeff**|Learned Zernike coefficients of the pupil function of each emitter, including both the coefficients for pupil magnitude and pupil phase. A set of 2D arrays| 76 | |   **zernike_map**|Learned aberration maps of both the pupil magnitude and pupil phase for each Zernike polynomial| 77 | ### *In situ* PSF 78 | Below list additional parameters from *in situ* learning 79 | |**Parameters**| Description| 80 | |:----------------------------|:---------------------------| 81 | |**res** or **res/channelN**|PSF learning results| 82 | |   **stagepos**|Learned stage position, a positive scalar, unit: micron| 83 | |   **zoffset**|z position of the first slice in the learned PSF model, unit: pixel| 84 | |   **sampleheight**|Learned thickness of the sample chamber in a 4Pi system, unit: micron| 85 | 86 | 87 | 88 | -------------------------------------------------------------------------------- /demo/datapath.yaml: -------------------------------------------------------------------------------- 1 | main_data_dir: 2 | E:/EMBL files/data for PSF learning/example data for uiPSF/ # main folder to your data, it is only useful if all your data are saved in one location, see demo notebooks. 3 | 4 | -------------------------------------------------------------------------------- /environment.yml: -------------------------------------------------------------------------------- 1 | name: envname 2 | channels: 3 | - conda-forge 4 | - defaults 5 | dependencies: 6 | - cudatoolkit=11.2 7 | - cudnn=8.1 8 | - pip 9 | - python=3.7.10 -------------------------------------------------------------------------------- /psflearning/__init__.py: -------------------------------------------------------------------------------- 1 | from . import io 2 | 3 | -------------------------------------------------------------------------------- /psflearning/dataloader.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ries Lab, EMBL, Heidelberg, Germany 3 | All rights reserved 4 | 5 | @author: Sheng Liu 6 | """ 7 | #%% 8 | from pickle import FALSE 9 | import h5py as h5 10 | import czifile as czi 11 | import numpy as np 12 | from skimage import io 13 | # append the path of the parent directory as long as it's not a real package 14 | import glob 15 | import json 16 | from PIL import Image 17 | 18 | 19 | class dataloader: 20 | def __init__(self,param=None): 21 | self.param = param 22 | 23 | def getfilelist(self): 24 | param = self.param 25 | if not param.subfolder: 26 | filelist = glob.glob(param.datapath+'/*'+param.keyword+'*'+param.format) 27 | else: 28 | filelist = [] 29 | folderlist = glob.glob(param.datapath+'/*'+param.subfolder+'*/') 30 | for f in folderlist: 31 | filelist.append(glob.glob(f+'/*'+param.keyword+'*'+param.format)[0]) 32 | 33 | return filelist 34 | 35 | 36 | def loadtiff(self,filelist): 37 | param = self.param 38 | imageraw = [] 39 | for filename in filelist: 40 | print(filename) 41 | if param.datatype == 'smlm': 42 | dat = [] 43 | fID = Image.open(filename) 44 | 45 | for ii in range(param.insitu.frame_range[0],param.insitu.frame_range[1]): 46 | fID.seek(ii) 47 | dat.append(np.asarray(fID)) 48 | dat = np.stack(dat).astype(np.float32) 49 | else: 50 | dat = np.squeeze(io.imread(filename).astype(np.float32)) 51 | if param.channeltype == 'multi': 52 | dat = self.splitchannel(dat) 53 | 54 | dat = (dat-param.ccd_offset)*param.gain 55 | imageraw.append(dat) 56 | imagesall = np.stack(imageraw) 57 | 58 | return imagesall 59 | 60 | def loadmat(self,filelist): 61 | param = self.param 62 | imageraw = [] 63 | for filename in filelist: 64 | print(filename) 65 | fdata = h5.File(filename,'r') 66 | if param.varname: 67 | name = [param.varname] 68 | else: 69 | name = list(fdata.keys()) 70 | try: 71 | name.remove('metadata') 72 | except: 73 | pass 74 | try: 75 | name.remove('#refs#') 76 | except: 77 | pass 78 | 79 | if param.channeltype == 'single': 80 | dat = np.squeeze(np.array(fdata.get(name[0])).astype(np.float32)) 81 | else: 82 | if len(name)>1: 83 | dat = [] 84 | for ch in name: 85 | datai = np.squeeze(np.array(fdata.get(ch)).astype(np.float32)) 86 | dat.append(datai) 87 | dat = np.squeeze(np.stack(dat)) 88 | else: 89 | dat = np.squeeze(np.array(fdata.get(name[0])).astype(np.float32)) 90 | dat = self.splitchannel(dat) 91 | 92 | dat = (dat-param.ccd_offset)*param.gain 93 | imageraw.append(dat) 94 | imagesall = np.stack(imageraw) 95 | 96 | return imagesall 97 | 98 | def loadh5(self,filelist): 99 | # currently only for smlm data 100 | param = self.param 101 | imageraw = [] 102 | 103 | for filename in filelist: 104 | f = h5.File(filename,'r') 105 | k = list(f.keys()) 106 | gname = '' 107 | while len(k)==1: 108 | gname += k[0]+'/' 109 | k = list(f[gname].keys()) 110 | datalist = list(f[gname].keys()) 111 | try: 112 | dat = np.squeeze(np.array(f.get(gname+datalist[0])).astype(np.float32)) 113 | except: 114 | dat = np.squeeze(np.array(f.get(gname+datalist[0]+'/'+datalist[0])).astype(np.float32)) 115 | dat = dat[param.insitu.frame_range[0]:param.insitu.frame_range[1]] 116 | dat = (dat-param.ccd_offset)*param.gain 117 | imageraw.append(dat) 118 | imagesall = np.stack(imageraw) 119 | 120 | return imagesall 121 | 122 | 123 | def loadczi(self,filelist): 124 | param = self.param 125 | imageraw = [] 126 | for filename in filelist: 127 | dat = np.squeeze(czi.imread(filename).astype(np.float32)) 128 | dat = (dat-param.ccd_offset)*param.gain 129 | imageraw.append(dat) 130 | imagesall = np.stack(imageraw) 131 | 132 | return imagesall 133 | 134 | def splitchannel(self,dat): 135 | param = self.param 136 | if param.dual.channel_arrange: 137 | if param.dual.channel_arrange == 'up-down': 138 | cc = dat.shape[-2]//2 139 | if param.dual.mirrortype == 'up-down': 140 | dat = np.stack([dat[:,:-cc],np.flip(dat[:,cc:],axis=-2)]) 141 | elif param.dual.mirrortype == 'left-right': 142 | dat = np.stack([dat[:,:-cc],np.flip(dat[:,cc:],axis=-1)]) 143 | else: 144 | dat = np.stack([dat[:,:-cc],dat[:,cc:]]) 145 | else: 146 | cc = dat.shape[-1]//2 147 | if param.dual.mirrortype == 'up-down': 148 | dat = np.stack([dat[...,:-cc],np.flip(dat[...,cc:],axis=-2)]) 149 | elif param.dual.mirrortype == 'left-right': 150 | dat = np.stack([dat[...,:-cc],np.flip(dat[...,cc:],axis=-1)]) 151 | else: 152 | dat = np.stack([dat[...,:-cc],dat[...,cc:]]) 153 | if param.multi.channel_size: 154 | roisz = param.multi.channel_size 155 | xdiv = list(range(0,dat.shape[-1],roisz[-1])) 156 | ydiv = list(range(0,dat.shape[-2],roisz[-2])) 157 | im = [] 158 | for yd in ydiv[:-1]: 159 | for xd in xdiv[:-1]: 160 | im.append(dat[...,yd:yd+roisz[-2],xd:xd+roisz[-1]]) 161 | 162 | dat = np.stack(im) 163 | 164 | 165 | return dat 166 | -------------------------------------------------------------------------------- /psflearning/io/__init__.py: -------------------------------------------------------------------------------- 1 | from . import param 2 | from . import h5 -------------------------------------------------------------------------------- /psflearning/io/h5.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Union 3 | 4 | from omegaconf import OmegaConf, DictConfig 5 | 6 | import hdfdict 7 | from dotted_dict import DottedDict 8 | import h5py 9 | 10 | def load(path: Union[str, Path]) -> DictConfig: 11 | f = h5py.File(path, 'r') 12 | res = DottedDict(hdfdict.load(f,lazy=False)) 13 | params = OmegaConf.create(f.attrs['params']) 14 | return res, params 15 | -------------------------------------------------------------------------------- /psflearning/io/param.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from typing import Union 3 | import os 4 | from omegaconf import OmegaConf, DictConfig 5 | 6 | 7 | def load(path: Union[str, Path]) -> DictConfig: 8 | return OmegaConf.load(path) 9 | 10 | def combine(basefile,psftype=None,channeltype=None,sysfile=None): 11 | thispath = os.path.dirname(os.path.abspath(__file__)) 12 | pkgpath = os.path.dirname(os.path.dirname(thispath)) 13 | fparam = load(pkgpath+'/config/'+basefile+'.yaml').Params 14 | if psftype is not None: 15 | psfparam = load(pkgpath+'/config/psftype/'+psftype+'.yaml').Params 16 | fparam = redefine(fparam,psfparam) 17 | if channeltype is not None: 18 | chparam = load(pkgpath+'/config/channeltype/'+channeltype+'.yaml').Params 19 | fparam = redefine(fparam,chparam) 20 | if sysfile is not None: 21 | sysparam = load(pkgpath+'/config/systemtype/'+sysfile+'.yaml').Params 22 | fparam = redefine(fparam,sysparam) 23 | if psftype == 'zernike' and channeltype == '4pi': 24 | fparam.PSFtype = 'zernike' 25 | if 'insitu' in psftype: 26 | fparam.roi.gauss_sigma[-1] = max([4,fparam.roi.gauss_sigma[-1]]) 27 | fparam.roi.gauss_sigma[-2] = max([4,fparam.roi.gauss_sigma[-2]]) 28 | fparam.roi.max_kernel[-1] = max([5,fparam.roi.max_kernel[-1]]) 29 | fparam.roi.max_kernel[-2] = max([5,fparam.roi.max_kernel[-2]]) 30 | if 'FD' in psftype: 31 | fparam.option.model.bin = 1 32 | 33 | return fparam 34 | 35 | def redefine(baseparam,userparam): 36 | for k, v in userparam.items(): 37 | try: 38 | for k1, v1 in v.items(): 39 | try: 40 | for k2, v2 in v1.items(): 41 | try: 42 | for k3, v3 in v2.items(): 43 | baseparam[k][k1][k2][k3] = v3 44 | except: 45 | baseparam[k][k1][k2] = v2 46 | except: 47 | baseparam[k][k1] = v1 48 | except: 49 | baseparam[k]=v 50 | 51 | return baseparam 52 | 53 | 54 | -------------------------------------------------------------------------------- /psflearning/learning/__init__.py: -------------------------------------------------------------------------------- 1 | from .data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 2 | from .data_representation.PreprocessedImageDataMultiChannel_file import PreprocessedImageDataMultiChannel 3 | from .data_representation.PreprocessedImageDataSingleChannel_file import PreprocessedImageDataSingleChannel 4 | from .data_representation.PreprocessedImageDataSingleChannel_smlm_file import PreprocessedImageDataSingleChannel_smlm 5 | from .data_representation.PreprocessedImageDataMultiChannel_smlm_file import PreprocessedImageDataMultiChannel_smlm 6 | 7 | 8 | from .fitters.FitterInterface_file import FitterInterface 9 | from .fitters.Fitter_file import Fitter 10 | 11 | from .psfs.PSFInterface_file import PSFInterface 12 | from .psfs.PSFVolumeBased_file import PSFVolumeBased 13 | from .psfs.PSFPupilBased_file import PSFPupilBased 14 | from .psfs.PSFZernikeBased_file import PSFZernikeBased 15 | from .psfs.PSFZernikeBased_FD_file import PSFZernikeBased_FD 16 | from .psfs.PSFMultiChannel_file import PSFMultiChannel 17 | from .psfs.PSFVolumeBased4pi_file import PSFVolumeBased4pi 18 | from .psfs.PSFPupilBased4pi_file import PSFPupilBased4pi 19 | from .psfs.PSFZernikeBased4pi_file import PSFZernikeBased4pi 20 | from .psfs.PSFMultiChannel4pi_file import PSFMultiChannel4pi 21 | from .psfs.PSFZernikeBased_vector_smlm_file import PSFZernikeBased_vector_smlm 22 | from .psfs.PSFPupilBased_vector_smlm_file import PSFPupilBased_vector_smlm 23 | from .psfs.PSFMultiChannel_smlm_file import PSFMultiChannel_smlm 24 | from .psfs.PSFZernikeBased_FD_smlm_file import PSFZernikeBased_FD_smlm 25 | from .psfs.PSFZernikeBased4pi_smlm_file import PSFZernikeBased4pi_smlm 26 | from .psfs.PSFMultiChannel4pi_smlm_file import PSFMultiChannel4pi_smlm 27 | 28 | from .loclib import localizationlib 29 | 30 | from . import loss_functions 31 | from .loss_functions import * 32 | 33 | from . import optimizers 34 | from .optimizers import * 35 | from .utilities import * -------------------------------------------------------------------------------- /psflearning/learning/data_representation/PreprocessedImageDataInterface_file.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import pickle 3 | 4 | 5 | class PreprocessedImageDataInterface: 6 | """ 7 | Interface that ensures consistency and compatability between all old and new implementations of data classes, fitters and psfs. 8 | Classes implementing this interafce should hold preprocessed image data and provide needed information to fitters and psfs. 9 | In general, we use the following convention for the dimensions of image data. This can be extended by adding dimensions on the left. 10 | channels, images/rois, z, y, x 11 | """ 12 | 13 | __metaclass__ = ABCMeta 14 | 15 | @abstractmethod 16 | def get_image_data(self) -> list: 17 | """ 18 | Is called from a fitter or a psf and returns a list containing the needed image information. 19 | In general, these are the cropped rois, the centers of the rois and in some 20 | cases a list of indices that indicate from which image the roi was cut. 21 | """ 22 | raise NotImplementedError("You need to implement a 'get_image_data' method in your data representation class.") 23 | 24 | def save(self, filename: str) -> None: 25 | """ 26 | Save object to file. 27 | """ 28 | with open(filename, "wb") as f: 29 | pickle.dump(self, f) 30 | 31 | @classmethod 32 | def load(filename: str): 33 | """ 34 | Load object from file. 35 | """ 36 | with open(filename, "rb") as f: 37 | self = pickle.load(f) 38 | return self -------------------------------------------------------------------------------- /psflearning/learning/data_representation/PreprocessedImageDataMultiChannel_file.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | import matplotlib.pyplot as plt 4 | from typing import Type 5 | 6 | from .PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 7 | 8 | class PreprocessedImageDataMultiChannel(PreprocessedImageDataInterface): 9 | """ 10 | Class that handles preprocessed data for multi-channel case. 11 | Provides access to images data (rois, centers, etc.) for fitter and psf classes. 12 | Is basically a wrapper around multiple instance of the provided single-channel class. 13 | """ 14 | def __init__(self, images, single_channel_dtype: Type[PreprocessedImageDataInterface], is4pi=None) -> None: 15 | if is4pi is None or is4pi is False: 16 | self.is4pi = False 17 | elif is4pi is True: 18 | self.is4pi = True 19 | else: 20 | raise ValueError("is4pi should be True or False.") 21 | 22 | self.single_channel_dtype = single_channel_dtype 23 | self.rois_available = False 24 | self.channels = [] # each element is an instance of single_channel_dtype 25 | 26 | for channel_data in images: 27 | new_single_channel_instance = self.single_channel_dtype(channel_data, self.is4pi) 28 | self.channels.append(new_single_channel_instance) 29 | 30 | self.min_border_dist = None # needed for get_min_border_dist() 31 | self.numofchannel = len(self.channels) 32 | self.shiftxy = None 33 | return 34 | 35 | def find_rois(self, roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV=None,min_center_dist=None,max_bead_number=None): 36 | """ 37 | Cuts out rois around local maxima in all channels seperately. 38 | Just calls the 'find_rois' function for each channel. 39 | """ 40 | self.min_border_dist = min_border_dist # needed for get_min_border_dist() 41 | 42 | for channel in self.channels: 43 | channel.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV, min_center_dist,max_bead_number) 44 | 45 | self.rois_available = True 46 | 47 | return 48 | 49 | 50 | def cut_new_rois(self, channel, centers, file_idxs, roi_shape=None, min_border_dist=None): 51 | """ 52 | Cuts new rois from images with specified centers in specified channel. 53 | Just calls 'cut_new_rois' function of specified channel. 54 | """ 55 | self.channels[channel].cut_new_rois(centers, file_idxs, roi_shape, min_border_dist) 56 | self.rois_available = True 57 | 58 | return 59 | 60 | def get_image_data(self): 61 | """ 62 | Provides the necessary image information (e.g., rois, centers, ...) for the psf class 63 | and the fitter class. Just calls 'get_image_data' function of each channel and appends 64 | the results to list. 65 | """ 66 | if self.rois_available: 67 | results = [] 68 | for channel in self.channels: 69 | results.append(channel.get_image_data()) 70 | 71 | return map(list, zip(*results)) # a way to tranpose a list of iterateables 72 | # needed to correct the order of the resuts without inferring how the results look like 73 | # see: https://stackoverflow.com/questions/6473679/transpose-list-of-lists 74 | else: 75 | raise RuntimeError("Can't call 'get_image_data()' since 'rois_available' flag is False.\nThis is probably due to the fact that you did not call 'find_rois()' before using this ImageData.") 76 | 77 | def get_channel(self, channel): 78 | """ 79 | Returns the object holding the data for the channel with index 'channel'. 80 | """ 81 | return self.channels[channel] 82 | 83 | def get_min_border_dist(self): 84 | """ 85 | Returns the min_border_dist parameter from the find_rois() function. 86 | """ 87 | return self.min_border_dist 88 | 89 | 90 | 91 | def pair_coordinates(self,delete_id=None): 92 | _, rois, centers, file_idxs = self.get_image_data() 93 | mask = np.ones(centers[0].shape[0]) 94 | if delete_id is not None: 95 | mask[delete_id]=0 96 | mask = mask==1 97 | ref_pos = centers[0][mask,:] 98 | ref_fid = file_idxs[0][mask] 99 | pair_pos = [None]*self.numofchannel 100 | pair_file_id = [None]*self.numofchannel 101 | for i in range(0,self.numofchannel): 102 | tar_pos = centers[i] 103 | tar_fid = file_idxs[i] 104 | pairs_tar_pos_id = [] 105 | pairs_ref_pos_id = [] 106 | for ref_pos_idx in range(ref_pos.shape[0]): 107 | same_file_idxs = np.where(tar_fid == ref_fid[ref_pos_idx])[0] 108 | # only allow pairs when they are from same file and not already paired 109 | available = [i for i in same_file_idxs if i not in pairs_tar_pos_id] 110 | if not available: 111 | continue 112 | tar_posi = tar_pos[available] 113 | ref_posi = ref_pos[ref_pos_idx] 114 | if self.shiftxy is None: 115 | distances = np.sqrt(np.sum(np.square(tar_posi[:,-2:] - ref_posi[-2:]), axis=1)) 116 | else: 117 | distances = np.sqrt(np.sum(np.square(tar_posi[:,-2:]-self.shiftxy[i] - ref_posi[-2:]), axis=1)) 118 | 119 | min_idx = np.argmin(distances) 120 | # TODO: is it necessary to add an additional hyperparameter for this 121 | if distances[min_idx] <= 5.: 122 | pairs_tar_pos_id.append(available[min_idx]) 123 | pairs_ref_pos_id.append(ref_pos_idx) 124 | 125 | ref_fid = ref_fid[pairs_ref_pos_id] 126 | ref_pos = ref_pos[pairs_ref_pos_id] 127 | pair_pos[i] = tar_pos[pairs_tar_pos_id] 128 | pair_file_id[i] = tar_fid[pairs_tar_pos_id] 129 | for j in range(0,i): 130 | pair_pos[j] = pair_pos[j][pairs_ref_pos_id] 131 | pair_file_id[j] = pair_file_id[j][pairs_ref_pos_id] 132 | 133 | for i in range(0,self.numofchannel): 134 | self.cut_new_rois(i, pair_pos[i], pair_file_id[i],roi_shape=rois[0].shape[-centers[0].shape[1]:]) 135 | 136 | def process(self,roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,pixelsize_x,pixelsize_z,bead_radius, 137 | min_center_dist=None,FOV=None, modulation_period=None, padPSF=True, plot=True,pixelsize_y=None, isVolume = True,skew_const=None, max_bead_number=None): 138 | 139 | self.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV,min_center_dist, max_bead_number) 140 | _, rois, _, _ = self.get_image_data() 141 | for i in range(len(rois)): 142 | print(f"rois shape channel {i}: {rois[i].shape}") 143 | 144 | # find channel shift 145 | if self.shiftxy is None: 146 | self.find_channel_shift_cor(plot=False) 147 | 148 | _, _, centers, _ = self.get_image_data() 149 | self.centers_all = centers 150 | # pair coordinates 151 | self.pair_coordinates() 152 | _, rois, centers, _ = self.get_image_data() 153 | cor0 = centers[0] 154 | pv = self.shiftxy[1:] 155 | if plot: 156 | for i, cor1 in enumerate(centers[1:]): 157 | plt.figure(figsize=[6,6]) 158 | plt.plot(cor0[:,-1],cor0[:,-2],'o',markersize = 8,markerfacecolor='none') 159 | plt.plot(cor1[:,-1]-pv[i,-1],cor1[:,-2]-pv[i,-2],'x') 160 | plt.show() 161 | 162 | for i in range(len(rois)): 163 | print(f"rois shape channel {i}: {rois[i].shape}") 164 | 165 | self.pixelsize_z = pixelsize_z 166 | self.pixelsize_x = pixelsize_x 167 | self.bead_radius = bead_radius 168 | offset = np.min((np.quantile(rois,1e-3),0)) 169 | for i in range(len(self.channels)): 170 | self.channels[i].rois = rois[i]-offset 171 | 172 | # pad rois along z dimension 173 | _, rois, _, _ = self.get_image_data() 174 | if padPSF: 175 | rois = np.stack(rois) 176 | value = np.empty((), dtype=object) 177 | value[()] = (0, 0) 178 | padsize = np.full((len(rois.shape), ), value, dtype=object) 179 | padsize[-3] = (np.int32(bead_radius//pixelsize_z),np.int32(bead_radius//pixelsize_z)) 180 | roisL = np.pad(rois,tuple(padsize),mode='edge') 181 | for i in range(len(self.channels)): 182 | self.channels[i].rois = roisL[i] 183 | print(f"roisL shape channel {i}: {roisL.shape}") 184 | # generate bead kernel 185 | if pixelsize_y is None: 186 | pixelsize_y = pixelsize_x 187 | self.pixelsize_y = pixelsize_y 188 | 189 | if modulation_period is not None: 190 | for channel in self.channels: 191 | channel.zT = modulation_period/pixelsize_z 192 | 193 | for channel in self.channels: 194 | channel.pixelsize_x = pixelsize_x 195 | channel.pixelsize_y = pixelsize_y 196 | channel.pixelsize_z = pixelsize_z 197 | channel.bead_radius = bead_radius 198 | return 199 | 200 | def find_channel_shift_img(self): 201 | imgs, _, centers, _ = self.get_image_data() 202 | img0 = np.sum(np.max(imgs[0],axis = 1),axis=0) 203 | shiftxy = [] 204 | for img in imgs: 205 | 206 | img1 = np.sum(np.max(img,axis = 1),axis=0) 207 | cor_img_ft = np.fft.fft2(img0) * np.conj(np.fft.fft2(img1)) 208 | cor_img_ft = sp.ndimage.fourier_gaussian(cor_img_ft, sigma=2.) 209 | cor_img = np.real(np.fft.fftshift(np.fft.ifft2(cor_img_ft))) 210 | 211 | # find max and calculate dx, dy 212 | # TODO: is argmax okay or is there a better suited way to find maximum like some gaussian fitting? 213 | dy, dx = np.unravel_index(np.argmax(cor_img), shape=cor_img.shape) 214 | dy = (cor_img.shape[0]-1)/2 - dy 215 | dx = (cor_img.shape[1]-1)/2 - dx 216 | shiftxy.append([dy,dx]) 217 | 218 | self.shiftxy = np.float32(shiftxy) 219 | return 220 | 221 | def find_channel_shift_cor(self,plot=True): 222 | _, _, centers, _ = self.get_image_data() 223 | cor0 = centers[0][:,-2:] 224 | shiftxy = [] 225 | for cor1 in centers: 226 | cor1 = cor1[:,-2:] 227 | pv = (np.mean(cor1,axis=0)-np.mean(cor0,axis=0)) 228 | N1 = cor1.shape[0] 229 | for k in range(0,5): 230 | dist = np.sqrt(np.sum((cor1.reshape((N1,1,2))-cor0-pv)**2,axis=-1)) 231 | q = 1/(dist+1e-3) 232 | ind1,ind0 = np.where(q>=np.quantile(q.flatten(),0.985)) 233 | pv = np.mean(cor1[ind1],axis=0)-np.mean(cor0[ind0],axis=0) 234 | if plot: 235 | plt.figure(figsize=[6,6]) 236 | plt.plot(cor0[ind0,0],cor0[ind0,1],'o',markersize = 8,markerfacecolor='none') 237 | plt.plot(cor1[ind1,0]-pv[0],cor1[ind1,1]-pv[1],'x') 238 | plt.show() 239 | 240 | shiftxy.append(pv) 241 | 242 | self.shiftxy = np.float32(shiftxy) 243 | return -------------------------------------------------------------------------------- /psflearning/learning/data_representation/PreprocessedImageDataMultiChannel_smlm_file.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | import matplotlib.pyplot as plt 4 | from typing import Type 5 | 6 | from .PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 7 | 8 | class PreprocessedImageDataMultiChannel_smlm(PreprocessedImageDataInterface): 9 | """ 10 | Class that handles preprocessed data for multi-channel case. 11 | Provides access to images data (rois, centers, etc.) for fitter and psf classes. 12 | Is basically a wrapper around multiple instance of the provided single-channel class. 13 | """ 14 | def __init__(self, images, single_channel_dtype: Type[PreprocessedImageDataInterface], is4pi=None) -> None: 15 | if is4pi is None or is4pi is False: 16 | self.is4pi = False 17 | elif is4pi is True: 18 | self.is4pi = True 19 | else: 20 | raise ValueError("is4pi should be True or False.") 21 | 22 | self.single_channel_dtype = single_channel_dtype 23 | self.rois_available = False 24 | self.channels = [] # each element is an instance of single_channel_dtype 25 | 26 | for channel_data in images: 27 | new_single_channel_instance = self.single_channel_dtype(channel_data, self.is4pi) 28 | self.channels.append(new_single_channel_instance) 29 | 30 | self.min_border_dist = None # needed for get_min_border_dist() 31 | self.numofchannel = len(self.channels) 32 | self.shiftxy = None 33 | return 34 | 35 | def find_rois(self, roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV=None,min_center_dist=None,max_bead_number=None): 36 | """ 37 | Cuts out rois around local maxima in all channels seperately. 38 | Just calls the 'find_rois' function for each channel. 39 | """ 40 | self.min_border_dist = min_border_dist # needed for get_min_border_dist() 41 | ch = [2,3,0,1] 42 | for j,channel in enumerate(self.channels): 43 | if self.is4pi: 44 | channel.patterns = self.channels[ch[j]].images 45 | channel.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV, min_center_dist,max_bead_number) 46 | 47 | self.rois_available = True 48 | 49 | return 50 | 51 | def cut_new_rois(self, channel, centers, file_idxs, roi_shape=None, min_border_dist=None): 52 | """ 53 | Cuts new rois from images with specified centers in specified channel. 54 | Just calls 'cut_new_rois' function of specified channel. 55 | """ 56 | self.channels[channel].cut_new_rois(centers, file_idxs, roi_shape, min_border_dist) 57 | self.rois_available = True 58 | 59 | return 60 | 61 | def get_image_data(self): 62 | """ 63 | Provides the necessary image information (e.g., rois, centers, ...) for the psf class 64 | and the fitter class. Just calls 'get_image_data' function of each channel and appends 65 | the results to list. 66 | """ 67 | if self.rois_available: 68 | results = [] 69 | for channel in self.channels: 70 | results.append(channel.get_image_data()) 71 | 72 | return map(list, zip(*results)) # a way to tranpose a list of iterateables 73 | # needed to correct the order of the resuts without inferring how the results look like 74 | # see: https://stackoverflow.com/questions/6473679/transpose-list-of-lists 75 | else: 76 | raise RuntimeError("Can't call 'get_image_data()' since 'rois_available' flag is False.\nThis is probably due to the fact that you did not call 'find_rois()' before using this ImageData.") 77 | 78 | def get_channel(self, channel): 79 | """ 80 | Returns the object holding the data for the channel with index 'channel'. 81 | """ 82 | return self.channels[channel] 83 | 84 | def resetdata(self): 85 | for channel in self.channels: 86 | channel.resetdata() 87 | _ = self.pair_coordinates() 88 | 89 | 90 | def get_min_border_dist(self): 91 | """ 92 | Returns the min_border_dist parameter from the find_rois() function. 93 | """ 94 | return self.min_border_dist 95 | 96 | 97 | 98 | def pair_coordinates(self,delete_id=None): 99 | _, _, centers, file_idxs = self.get_image_data() 100 | mask = np.ones(centers[0].shape[0]) 101 | if delete_id is not None: 102 | mask[delete_id]=0 103 | mask = mask==1 104 | ref_pos = centers[0][mask,:] 105 | ref_fid = file_idxs[0][mask] 106 | pair_pos = [None]*self.numofchannel 107 | pair_file_id = [None]*self.numofchannel 108 | pair_pos_id = [None]*self.numofchannel 109 | for i in range(0,self.numofchannel): 110 | tar_pos = centers[i] 111 | tar_fid = file_idxs[i] 112 | pairs_tar_pos_id = [] 113 | pairs_ref_pos_id = [] 114 | for ref_pos_idx in range(ref_pos.shape[0]): 115 | same_file_idxs = np.where(tar_fid == ref_fid[ref_pos_idx])[0] 116 | # only allow pairs when they are from same file and not already paired 117 | available = [i for i in same_file_idxs if i not in pairs_tar_pos_id] 118 | if not available: 119 | continue 120 | tar_posi = tar_pos[available] 121 | ref_posi = ref_pos[ref_pos_idx] 122 | if self.shiftxy is None: 123 | distances = np.sqrt(np.sum(np.square(tar_posi - ref_posi), axis=1)) 124 | else: 125 | distances = np.sqrt(np.sum(np.square(tar_posi-self.shiftxy[i] - ref_posi), axis=1)) 126 | 127 | min_idx = np.argmin(distances) 128 | # TODO: is it necessary to add an additional hyperparameter for this 129 | if distances[min_idx] <= 5.: 130 | pairs_tar_pos_id.append(available[min_idx]) 131 | pairs_ref_pos_id.append(ref_pos_idx) 132 | 133 | ref_fid = ref_fid[pairs_ref_pos_id] 134 | ref_pos = ref_pos[pairs_ref_pos_id] 135 | pair_pos[i] = tar_pos[pairs_tar_pos_id] 136 | pair_file_id[i] = tar_fid[pairs_tar_pos_id] 137 | pair_pos_id[i] = np.array(pairs_tar_pos_id) 138 | for j in range(0,i): 139 | pair_pos[j] = pair_pos[j][pairs_ref_pos_id] 140 | pair_file_id[j] = pair_file_id[j][pairs_ref_pos_id] 141 | pair_pos_id[j] = pair_pos_id[j][pairs_ref_pos_id] 142 | 143 | for i in range(0,self.numofchannel): 144 | self.cut_new_rois(i, pair_pos[i], pair_file_id[i]) 145 | 146 | return pair_pos_id 147 | 148 | def process(self,roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,pixelsize_x,pixelsize_z,bead_radius, 149 | min_center_dist=None,FOV=None, modulation_period=None, padPSF=True, plot=True,pixelsize_y=None, isVolume = True,skew_const=None, max_bead_number=None): 150 | 151 | self.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV,min_center_dist, max_bead_number) 152 | _, rois, _, _ = self.get_image_data() 153 | for i in range(len(rois)): 154 | print(f"rois shape channel {i}: {rois[i].shape}") 155 | 156 | # find channel shift 157 | self.find_channel_shift_img() 158 | 159 | _, _, centers, _ = self.get_image_data() 160 | self.centers_all = centers 161 | # pair coordinates 162 | _ = self.pair_coordinates() 163 | _, rois, centers, _ = self.get_image_data() 164 | cor0 = centers[0] 165 | pv = self.shiftxy[1:] 166 | if plot: 167 | for i, cor1 in enumerate(centers[1:]): 168 | plt.figure(figsize=[6,6]) 169 | plt.plot(cor0[:,1],cor0[:,0],'o',markersize = 8,markerfacecolor='none') 170 | plt.plot(cor1[:,1]-pv[i,1],cor1[:,0]-pv[i,0],'x') 171 | plt.show() 172 | 173 | for i in range(len(rois)): 174 | print(f"rois shape channel {i}: {rois[i].shape}") 175 | 176 | self.pixelsize_z = pixelsize_z 177 | self.pixelsize_x = pixelsize_x 178 | self.bead_radius = bead_radius 179 | offset = np.min((np.quantile(rois,1e-3),0)) 180 | for i in range(len(self.channels)): 181 | self.channels[i].rois = rois[i]-offset 182 | self.channels[i].offset = offset 183 | # pad rois along z dimension 184 | _, rois, _, _ = self.get_image_data() 185 | if padPSF: 186 | rois = np.stack(rois) 187 | value = np.empty((), dtype=object) 188 | value[()] = (0, 0) 189 | padsize = np.full((len(rois.shape), ), value, dtype=object) 190 | padsize[-3] = (np.int32(bead_radius//pixelsize_z),np.int32(bead_radius//pixelsize_z)) 191 | roisL = np.pad(rois,tuple(padsize),mode='edge') 192 | for i in range(len(self.channels)): 193 | self.channels[i].rois = roisL[i] 194 | print(f"roisL shape channel {i}: {roisL.shape}") 195 | # generate bead kernel 196 | if pixelsize_y is None: 197 | pixelsize_y = pixelsize_x 198 | self.pixelsize_y = pixelsize_y 199 | 200 | if modulation_period is not None: 201 | for channel in self.channels: 202 | channel.zT = modulation_period/pixelsize_z 203 | 204 | for channel in self.channels: 205 | channel.pixelsize_x = pixelsize_x 206 | channel.pixelsize_y = pixelsize_y 207 | channel.pixelsize_z = pixelsize_z 208 | channel.bead_radius = bead_radius 209 | return 210 | 211 | def find_channel_shift_img(self): 212 | imgs, _, centers, _ = self.get_image_data() 213 | img0 = np.max(imgs[0],axis = 0) 214 | img0 /= np.max(img0) 215 | shiftxy = [] 216 | for img in imgs: 217 | 218 | img1 = np.max(img,axis = 0) 219 | img1 /= np.max(img1) 220 | cor_img_ft = np.fft.fft2(img0) * np.conj(np.fft.fft2(img1)) 221 | cor_img_ft = sp.ndimage.fourier_gaussian(cor_img_ft, sigma=2.) 222 | cor_img = np.real(np.fft.fftshift(np.fft.ifft2(cor_img_ft))) 223 | 224 | # find max and calculate dx, dy 225 | # TODO: is argmax okay or is there a better suited way to find maximum like some gaussian fitting? 226 | dy, dx = np.unravel_index(np.argmax(cor_img), shape=cor_img.shape) 227 | dy = (cor_img.shape[0]-1)/2 - dy 228 | dx = (cor_img.shape[1]-1)/2 - dx 229 | shiftxy.append([dy,dx]) 230 | 231 | self.shiftxy = np.float32(shiftxy) 232 | return 233 | 234 | def find_channel_shift_cor(self,plot=True): 235 | _, _, centers, _ = self.get_image_data() 236 | cor0 = centers[0] 237 | shiftxy = [] 238 | for cor1 in centers: 239 | pv = (np.mean(cor1,axis=0)-np.mean(cor0,axis=0)) 240 | N1 = cor1.shape[0] 241 | for k in range(0,5): 242 | dist = np.sqrt(np.sum((cor1.reshape((N1,1,2))-cor0-pv)**2,axis=-1)) 243 | q = 1/(dist+1e-3) 244 | ind1,ind0 = np.where(q>=np.quantile(q.flatten(),0.975)) 245 | pv = np.mean(cor1[ind1],axis=0)-np.mean(cor0[ind0],axis=0) 246 | if plot: 247 | plt.figure(figsize=[6,6]) 248 | plt.plot(cor0[ind0,0],cor0[ind0,1],'o',markersize = 8,markerfacecolor='none') 249 | plt.plot(cor1[ind1,0]-pv[0],cor1[ind1,1]-pv[1],'x') 250 | plt.show() 251 | 252 | shiftxy.append(pv) 253 | 254 | self.shiftxy = np.float32(shiftxy) 255 | return -------------------------------------------------------------------------------- /psflearning/learning/data_representation/PreprocessedImageDataSingleChannel_file.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import h5py 4 | import numpy as np 5 | import scipy as sp 6 | import scipy.special as spf 7 | 8 | import matplotlib.pyplot as plt 9 | from .. import imagetools as nip 10 | from tkinter import messagebox as mbox 11 | import sys 12 | 13 | from .PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 14 | 15 | class PreprocessedImageDataSingleChannel(PreprocessedImageDataInterface): 16 | """ 17 | Class that handles preprocessed data for single-channel case. 18 | Provides access to images data (rois, centers, etc.) for fitter and psf classes. 19 | """ 20 | def __init__(self, images, is4pi=None) -> None: 21 | # TODO: instead of using a boolean flag one could think of using a string 22 | # this would allow for more options 23 | # the question is if this makes sense or if it makes more sense to create a new class 24 | # for other types of psfs 25 | # here we used the flag since almost everything is identical, only the check function 26 | # and the func_2Dimage are different 27 | if is4pi is None or is4pi is False: 28 | self.is4pi = False 29 | self.num_dims = 4 30 | self.dim_names = "images, z, y, x" 31 | self.func_2Dimage = lambda ims: np.max(ims, axis=-3) # used in find_rois() 32 | elif is4pi is True: 33 | self.is4pi = True 34 | self.num_dims = 5 35 | self.dim_names = "images, phi, z, y, x" 36 | self.func_2Dimage = lambda ims: np.max(ims[0], axis=0) # used in find_rois() 37 | else: 38 | raise ValueError("is4pi should be True or False.") 39 | 40 | self.images = None 41 | self.check_and_init_images(images) # check if input is valid 42 | self.rois = [] 43 | self.centers = [] 44 | self.file_idxs = [] 45 | self.rois_available = False 46 | self.min_border_dist = None # needed in cut_new_rois() 47 | self.skew_const = None 48 | self.zT = None 49 | return 50 | 51 | def check_and_init_images(self, images): 52 | """ 53 | Checks if input is valid and initializes image attribute. 54 | """ 55 | try: 56 | # check if everything has same shape 57 | # and cast to float32 --> allows correct loss calculation in fitter 58 | self.images = np.array(images, dtype=np.float32) 59 | except: 60 | raise ValueError("Was not able to convert input to numpy array.\nCheck that dimensions are the same for all channels and for all images.") 61 | 62 | if self.images.ndim != self.num_dims: 63 | raise ValueError(f"Input needs to have {self.num_dims} dimensions: {self.dim_names}.") 64 | 65 | return 66 | 67 | def find_rois(self, roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,FOV=None, min_center_dist=None, max_bead_number=None): 68 | """ 69 | Cuts out rois around local maxima. 70 | """ 71 | self.min_border_dist = min_border_dist # needed in cut_new_rois() 72 | 73 | all_rois = [] 74 | all_centers = [] 75 | file_idxs = [] 76 | 77 | for file_idx, image in enumerate(self.images): 78 | # TODO: try/except, since nip.extractMuliPeaks throws error if no roi is found 79 | if len(roi_size)>2: 80 | im2 = image 81 | else: 82 | im2 = self.func_2Dimage(image) 83 | rois, centers = nip.extractMultiPeaks(im2, ROIsize=roi_size, sigma=gaus_sigma, 84 | borderDist=min_border_dist, threshold_rel=max_threshold, 85 | alternateImg=image, kernel=max_kernel) 86 | 87 | # remove rois/centers that are to close together 88 | if rois is not None: 89 | if min_center_dist is None: 90 | min_center_dist = np.hypot(roi_size[-2], roi_size[-1]) 91 | rois, centers = self.remove_close_rois(rois, centers, min_center_dist) 92 | if FOV is not None: 93 | fov = np.array(FOV) 94 | #inFov = (coordinates[:,-1]>= fov[0]-fov[2]/2) & (coordinates[:,-1] <= fov[0]+fov[2]/2) & (coordinates[:,-2]>= fov[1]-fov[3]/2) & (coordinates[:,-2] <= fov[1]+fov[3]/2) 95 | coord_r = (centers[:,-1]-fov[1])**2+(centers[:,-2]-fov[0])**2 96 | inFov = coord_r<(fov[2]**2) 97 | rois = rois[inFov] 98 | centers = centers[inFov] 99 | 100 | all_rois.append(rois) 101 | all_centers.append(centers) 102 | file_idxs += [file_idx] * rois.shape[0] 103 | if max_bead_number: 104 | if len(file_idxs)>max_bead_number: 105 | break 106 | L = np.min((max_bead_number,len(file_idxs))) 107 | # convert to numpy arrays and make sure everything has correct dtypes 108 | if not all_rois: 109 | #mbox.showerror("segmentation error","no bead is found") 110 | raise RuntimeError('no bead is found') 111 | self.rois = np.concatenate(all_rois)[0:L].astype(np.float32) 112 | self.centers = np.concatenate(all_centers)[0:L].astype(np.int32) 113 | self.centers_all = np.concatenate(all_centers)[0:L].astype(np.int32) 114 | self.file_idxs = np.array(file_idxs)[0:L].astype(np.int32) 115 | self.rois_available = True 116 | self.image_size = self.images.shape 117 | return 118 | 119 | 120 | def remove_close_rois(self, rois, centers, min_dist): 121 | """ 122 | Calculates the distance between all rois/centers and removes the ones 123 | that are to close to each other in order to ensure that there is only 124 | one signal/bead per roi. 125 | """ 126 | # TODO: there is one corner case that is not handled here: 127 | # if two beads are close together and one (and only one) is to close to border 128 | # in this case only the rois that is not to close to the border is cut 129 | # but since the other one is not the first one is not filtered out here 130 | # so it could be possible that there are two beads visible in one roi... 131 | dist_matrix = sp.spatial.distance_matrix(centers, centers) 132 | keep_matrix_idxs = np.where((0 == dist_matrix) | (dist_matrix > min_dist)) 133 | unique, counts = np.unique(keep_matrix_idxs[0], return_counts=True) 134 | keep_idxs = unique[counts == centers.shape[0]] 135 | return rois[keep_idxs], centers[keep_idxs] 136 | 137 | def cut_new_rois(self, centers, file_idxs, roi_size=None, min_border_dist=None): 138 | """ 139 | Cuts new rois from images with specified centers. 140 | """ 141 | # set default values 142 | if roi_size is None: 143 | roi_size = self.rois.shape[-2:] 144 | if min_border_dist is None: 145 | min_border_dist = self.min_border_dist 146 | 147 | if len(roi_size)==3: 148 | Nz = roi_size[0] 149 | else: 150 | Nz = self.images.shape[-3] 151 | 152 | if hasattr(self,'skew_const'): 153 | if self.skew_const: 154 | 155 | roisize_x = np.int32(roi_size[-1]+Nz*np.abs(self.skew_const[-1])+1) 156 | roisize_y = np.int32(roi_size[-2]+Nz*np.abs(self.skew_const[-2])+1) 157 | if len(roi_size)==3: 158 | roi_shape = [roi_size[0],roisize_y,roisize_x] 159 | else: 160 | roi_shape = [roisize_x,roisize_x] 161 | else: 162 | roi_shape = roi_size 163 | else: 164 | roi_shape = roi_size 165 | # checking border_dist not needed since we check this in psf class 166 | # nevertheless, I left it here just in case one does need it for another purpose 167 | ''' 168 | # make sure rois are not too close to border 169 | # adapted from NanoImaginPack --> coordinates.py --> extractMuliPeaks() 170 | border_dist = np.array(min_border_dist) 171 | valid_idxs = np.all(centers - border_dist >= 0, axis=1) & np.all(self.images.shape[-2:] - centers - border_dist >= 0, axis=1) 172 | centers = centers[valid_idxs, :] 173 | ''' 174 | 175 | # iterate over file_index to make sure that new roi is cut from correct file 176 | new_rois = [] 177 | for i, file_idx in enumerate(file_idxs): 178 | new_rois.append(nip.multiROIExtract(self.images[file_idx], [centers[i]], roi_shape)) 179 | 180 | # convert to numpy arrays and make sure everything has correct dtypes 181 | self.rois = np.concatenate(new_rois).astype(np.float32) 182 | self.centers = centers.astype(np.int32) 183 | self.file_idxs = file_idxs.astype(np.int32) 184 | self.rois_available = True 185 | 186 | return 187 | 188 | def get_image_data(self): 189 | """ 190 | Provides the necessary image information (e.g., rois, centers, ...) for the psf class 191 | and the fitter class. 192 | """ 193 | if self.rois_available: 194 | return self.images, self.rois, self.centers, self.file_idxs 195 | else: 196 | raise RuntimeError("Can't call 'get_image_data()' since 'rois_available' flag is False.\nThis is probably due to the fact that you did not call 'find_rois()' before using this ImageData.") 197 | 198 | def process(self,roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,pixelsize_x,pixelsize_z,bead_radius, 199 | min_center_dist=None,FOV=None, modulation_period=None,padPSF=True,plot=True, isVolume=True, pixelsize_y=None, skew_const=None,max_bead_number=None): 200 | 201 | if len(roi_size)==3: 202 | Nz = roi_size[0] 203 | else: 204 | Nz = self.images.shape[-3] 205 | if skew_const: 206 | 207 | roisize_x = np.int32(1+roi_size[-1]+Nz*np.abs(skew_const[-1])) 208 | roisize_y = np.int32(1+roi_size[-2]+Nz*np.abs(skew_const[-2])) 209 | if len(roi_size)==3: 210 | roiszL = [roi_size[0],roisize_y,roisize_x] 211 | else: 212 | roiszL = [roisize_x,roisize_x] 213 | min_border_dist=list(np.array(roiszL)//2+1) 214 | self.find_rois(roiszL, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV, min_center_dist,max_bead_number) 215 | else: 216 | self.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV, min_center_dist,max_bead_number) 217 | img, rois, cor, _ = self.get_image_data() 218 | self.centers_all = cor 219 | self.image_size = img.shape 220 | print(f"rois shape channel : {rois.shape}") 221 | 222 | self.pixelsize_z = pixelsize_z 223 | self.pixelsize_x = pixelsize_x 224 | self.bead_radius = bead_radius 225 | offset = np.min((np.quantile(rois,1e-3),0)) 226 | self.rois = rois-offset 227 | if plot: 228 | plt.figure(figsize=[6,6]) 229 | plt.plot(cor[:,-1],cor[:,-2],'o',markersize = 8,markerfacecolor='none') 230 | plt.show() 231 | # pad rois along z dimension 232 | if padPSF: 233 | _, rois, _, _ = self.get_image_data() 234 | value = np.empty((), dtype=object) 235 | value[()] = (0, 0) 236 | padsize = np.full((len(rois.shape), ), value, dtype=object) 237 | padsize[-3] = (np.int32(bead_radius//pixelsize_z),np.int32(bead_radius//pixelsize_z)) 238 | roisL = np.pad(rois,tuple(padsize),mode='edge') 239 | self.rois = roisL 240 | print(f"padded rois shape channel : {roisL.shape}") 241 | 242 | # generate bead kernel 243 | if pixelsize_y is None: 244 | pixelsize_y = pixelsize_x 245 | self.pixelsize_y = pixelsize_y 246 | 247 | 248 | 249 | if modulation_period is not None: 250 | self.zT = modulation_period/pixelsize_z 251 | 252 | self.skew_const = skew_const 253 | if skew_const: 254 | self.deskew_roi(roi_size) 255 | return 256 | 257 | def deskew_roi(self,roi_size): 258 | _, rois, cor, _ = self.get_image_data() 259 | skew_const = self.skew_const 260 | Nz = rois.shape[-3] 261 | roisize_x = rois.shape[-1] 262 | roisize_y = rois.shape[-2] 263 | bxsz = roi_size 264 | rois1 = np.zeros(rois.shape[0:-2]+(bxsz[-2],bxsz[-1]),dtype = np.float32) 265 | for i in range(0,Nz): 266 | ccx = np.int32(np.round(roisize_x//2-skew_const[-1]*Nz/2 + i*skew_const[-1])) 267 | ccy = np.int32(np.round(roisize_y//2-skew_const[-2]*Nz/2 + i*skew_const[-2])) 268 | tmp = rois[...,i,ccy-bxsz[-2]//2:ccy+bxsz[-2]//2+bxsz[-2]%2,ccx-bxsz[-1]//2:ccx+bxsz[-1]//2+bxsz[-1]%2] 269 | rois1[...,i,:,:] = tmp 270 | self.rois = rois1 271 | self.skew_const = skew_const 272 | #self.rawrois = rois 273 | print(f"deskewed rois shape channel : {rois1.shape}") -------------------------------------------------------------------------------- /psflearning/learning/data_representation/PreprocessedImageDataSingleChannel_smlm_file.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import h5py 4 | import numpy as np 5 | import scipy as sp 6 | import scipy.special as spf 7 | 8 | import matplotlib.pyplot as plt 9 | from .. import imagetools as nip 10 | from tkinter import messagebox as mbox 11 | import sys 12 | 13 | from .PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 14 | 15 | class PreprocessedImageDataSingleChannel_smlm(PreprocessedImageDataInterface): 16 | """ 17 | Class that handles preprocessed data for single-channel case. 18 | Provides access to images data (rois, centers, etc.) for fitter and psf classes. 19 | """ 20 | def __init__(self, images, is4pi=None) -> None: 21 | # TODO: instead of using a boolean flag one could think of using a string 22 | # this would allow for more options 23 | # the question is if this makes sense or if it makes more sense to create a new class 24 | # for other types of psfs 25 | # here we used the flag since almost everything is identical, only the check function 26 | # and the func_2Dimage are different 27 | if is4pi is None or is4pi is False: 28 | self.is4pi = False 29 | self.num_dims = 3 30 | self.dim_names = "images, y, x" 31 | elif is4pi is True: 32 | self.is4pi = True 33 | self.num_dims = 3 34 | self.dim_names = "images, y, x" 35 | else: 36 | raise ValueError("is4pi should be True or False.") 37 | 38 | self.images = None 39 | self.check_and_init_images(images) # check if input is valid 40 | self.rois = [] 41 | self.centers = [] 42 | self.frames = [] 43 | self.rois_available = False 44 | self.min_border_dist = None # needed in cut_new_rois() 45 | self.zT = None 46 | self.patterns = None 47 | return 48 | 49 | def check_and_init_images(self, images): 50 | """ 51 | Checks if input is valid and initializes image attribute. 52 | """ 53 | try: 54 | # check if everything has same shape 55 | # and cast to float32 --> allows correct loss calculation in fitter 56 | self.images = np.array(images, dtype=np.float32) 57 | except: 58 | raise ValueError("Was not able to convert input to numpy array.\nCheck that dimensions are the same for all channels and for all images.") 59 | 60 | if self.images.ndim != self.num_dims: 61 | raise ValueError(f"Input needs to have {self.num_dims} dimensions: {self.dim_names}.") 62 | 63 | return 64 | 65 | def find_rois(self, roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,FOV=None, min_center_dist=None,max_bead_number=None): 66 | """ 67 | Cuts out rois around local maxima. 68 | """ 69 | self.min_border_dist = min_border_dist # needed in cut_new_rois() 70 | 71 | all_rois = [] 72 | all_centers = [] 73 | frames = [] 74 | 75 | for frame, image in enumerate(self.images): 76 | # TODO: try/except, since nip.extractMuliPeaks throws error if no roi is found 77 | if self.is4pi: 78 | im = image + self.patterns[frame] 79 | else: 80 | ed = np.min((frame+100,self.images.shape[0])) 81 | im = image-np.mean(self.images[frame:ed],axis=0) 82 | #im = image 83 | rois, centers = nip.extractMultiPeaks_smlm(im, ROIsize=roi_size, sigma=gaus_sigma, 84 | borderDist=min_border_dist, threshold_rel=max_threshold, 85 | alternateImg=image, kernel=max_kernel, min_dist = min_center_dist,FOV=FOV) 86 | 87 | # remove rois/centers that are too close together 88 | if rois is not None: 89 | if min_center_dist is None: 90 | min_center_dist = np.hypot(roi_size[-2], roi_size[-1]) 91 | rois, centers = self.remove_close_rois(rois, centers, min_center_dist) 92 | 93 | 94 | all_rois.append(rois) 95 | all_centers.append(centers) 96 | frames += [frame] * rois.shape[0] 97 | 98 | # convert to numpy arrays and make sure everything has correct dtypes 99 | if not all_rois: 100 | #mbox.showerror("segmentation error","no bead is found") 101 | raise RuntimeError('no bead is found') 102 | self.rois = np.concatenate(all_rois).astype(np.float32) 103 | self.centers = np.concatenate(all_centers).astype(np.int32) 104 | self.frames = np.array(frames).astype(np.int32) 105 | self.rois_available = True 106 | self.centers_all = np.concatenate(all_centers).astype(np.int32) 107 | self.alldata = dict(rois=self.rois,centers=self.centers,frames=self.frames) 108 | self.offset = np.min((np.quantile(self.rois,1e-3),0)) 109 | self.image_size = self.images.shape 110 | return 111 | 112 | def resetdata(self): 113 | self.rois = self.alldata['rois'] 114 | self.centers = self.alldata['centers'] 115 | self.frames = self.alldata['frames'] 116 | 117 | 118 | def remove_close_rois(self, rois, centers, min_dist): 119 | """ 120 | Calculates the distance between all rois/centers and removes the ones 121 | that are to close to each other in order to ensure that there is only 122 | one signal/bead per roi. 123 | """ 124 | # TODO: there is one corner case that is not handled here: 125 | # if two beads are close together and one (and only one) is to close to border 126 | # in this case only the rois that is not to close to the border is cut 127 | # but since the other one is not the first one is not filtered out here 128 | # so it could be possible that there are two beads visible in one roi... 129 | dist_matrix = sp.spatial.distance_matrix(centers, centers) 130 | keep_matrix_idxs = np.where((0 == dist_matrix) | (dist_matrix > min_dist)) 131 | unique, counts = np.unique(keep_matrix_idxs[0], return_counts=True) 132 | keep_idxs = unique[counts == centers.shape[0]] 133 | return rois[keep_idxs], centers[keep_idxs] 134 | 135 | def cut_new_rois(self, centers, frames, roi_size=None, min_border_dist=None): 136 | """ 137 | Cuts new rois from images with specified centers. 138 | """ 139 | # set default values 140 | if roi_size is None: 141 | roi_size = self.rois.shape[-2:] 142 | if min_border_dist is None: 143 | min_border_dist = self.min_border_dist 144 | 145 | 146 | 147 | roi_shape = roi_size 148 | # checking border_dist not needed since we check this in psf class 149 | # nevertheless, I left it here just in case one does need it for another purpose 150 | ''' 151 | # make sure rois are not too close to border 152 | # adapted from NanoImaginPack --> coordinates.py --> extractMuliPeaks() 153 | border_dist = np.array(min_border_dist) 154 | valid_idxs = np.all(centers - border_dist >= 0, axis=1) & np.all(self.images.shape[-2:] - centers - border_dist >= 0, axis=1) 155 | centers = centers[valid_idxs, :] 156 | ''' 157 | 158 | # iterate over file_index to make sure that new roi is cut from correct file 159 | new_rois = [] 160 | for i, frame in enumerate(frames): 161 | new_rois.append(nip.multiROIExtract(self.images[frame], [centers[i]], roi_shape)) 162 | 163 | # convert to numpy arrays and make sure everything has correct dtypes 164 | self.rois = np.concatenate(new_rois).astype(np.float32)-self.offset 165 | self.centers = centers.astype(np.int32) 166 | self.frames = frames.astype(np.int32) 167 | self.rois_available = True 168 | 169 | return 170 | 171 | def get_image_data(self): 172 | """ 173 | Provides the necessary image information (e.g., rois, centers, ...) for the psf class 174 | and the fitter class. 175 | """ 176 | if self.rois_available: 177 | return self.images, self.rois, self.centers, self.frames 178 | else: 179 | raise RuntimeError("Can't call 'get_image_data()' since 'rois_available' flag is False.\nThis is probably due to the fact that you did not call 'find_rois()' before using this ImageData.") 180 | 181 | 182 | def process(self,roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel,pixelsize_x,pixelsize_z,bead_radius, 183 | min_center_dist=None,FOV=None, modulation_period=None, padPSF=False, plot=True,pixelsize_y=None, isVolume = False,skew_const=None, max_bead_number=None): 184 | 185 | self.find_rois(roi_size, gaus_sigma, min_border_dist, max_threshold, max_kernel, FOV, min_center_dist) 186 | img, rois, cor, _ = self.get_image_data() 187 | 188 | print(f"rois shape channel : {rois.shape}") 189 | 190 | self.pixelsize_z = pixelsize_z 191 | self.pixelsize_x = pixelsize_x 192 | self.bead_radius = bead_radius 193 | offset = np.min((np.quantile(rois,1e-3),0)) 194 | #offset = np.min(rois)-1e-6 195 | self.rois = rois-offset 196 | self.centers_all = cor 197 | self.offset = offset 198 | self.image_size = img.shape 199 | if plot: 200 | plt.figure(figsize=[6,6]) 201 | plt.plot(cor[:,-1],cor[:,-2],'o',markersize = 8,markerfacecolor='none') 202 | plt.show() 203 | 204 | 205 | 206 | # generate bead kernel 207 | if pixelsize_y is None: 208 | pixelsize_y = pixelsize_x 209 | self.pixelsize_y = pixelsize_y 210 | 211 | 212 | 213 | if modulation_period is not None: 214 | self.zT = modulation_period/pixelsize_z 215 | 216 | 217 | return 218 | 219 | -------------------------------------------------------------------------------- /psflearning/learning/data_representation/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/psflearning/learning/data_representation/__init__.py -------------------------------------------------------------------------------- /psflearning/learning/fitters/FitterInterface_file.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import pickle 3 | 4 | import numpy as np 5 | 6 | class FitterInterface: 7 | """ 8 | Interface that ensures consistency and compatability between all old and new implementations of data classes, fitters and psfs. 9 | Classes implementing this interafce define the fitting procedure. They combine image data and a psf model to do this (and also optimizer???). 10 | For example, the procedure for fitting a psf for a single-channel experiment can be very different from one for a multi-channel experiment. 11 | """ 12 | 13 | __metaclass__ = ABCMeta 14 | 15 | @abstractmethod 16 | def objective(self, variables: list) -> list: 17 | """ 18 | Defines the objective that is optimized. In general, calculates the loss from forward images and real images 19 | and return the loss and its graient. 20 | """ 21 | raise NotImplementedError("You need to implement a 'objective' method in your fitter class.") 22 | 23 | @abstractmethod 24 | def learn_psf(self, variables: list=None) -> list: 25 | """ 26 | Is called by the user and defines the procedure of the psf learning. 27 | Returns a list containing the results, e.g., a psf object, the final postioins, intensities and backgrounds. 28 | """ 29 | raise NotImplementedError("You need to implement a 'learn_psf' method in your psf class.") 30 | 31 | def save(self, filename: str): 32 | """ 33 | Save object to file. 34 | """ 35 | with open(filename, "wb") as f: 36 | pickle.dump(self, f) 37 | 38 | @classmethod 39 | def load(filename: str): 40 | """ 41 | Load object from file. 42 | """ 43 | with open(filename, "rb") as f: 44 | self = pickle.load(f) 45 | return self -------------------------------------------------------------------------------- /psflearning/learning/fitters/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/psflearning/learning/fitters/__init__.py -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFInterface_file.py: -------------------------------------------------------------------------------- 1 | from abc import ABCMeta, abstractmethod 2 | import pickle 3 | 4 | import numpy as np 5 | import tensorflow as tf 6 | import scipy.special as spf 7 | 8 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 9 | from .. import utilities as im 10 | from .. import imagetools as nip 11 | 12 | class PSFInterface(): 13 | """ 14 | Interface that ensures consistency and compatability between all old and new implementations of data classes, fitters and psfs. 15 | Classes implementing this interafce define a psf model/parametrization. They describe how the parameters of the psf are used to calculate a forward image 16 | at a specific position. They also provide initial values and postprocessing of the variables for the fitter, 17 | since they depend on the nature of the psf model/parametrization. 18 | """ 19 | 20 | __metaclass__ = ABCMeta 21 | 22 | @abstractmethod 23 | def calc_initials(self, data: PreprocessedImageDataInterface) -> list: 24 | """ 25 | Calculates the initial values for the optimizable variables. 26 | """ 27 | raise NotImplementedError("You need to implement a 'calc_initials' method in your psf class.") 28 | 29 | @abstractmethod 30 | def calc_forward_images(self, variables: list) -> tf.Tensor: 31 | """ 32 | Calculates the forward images. 33 | """ 34 | raise NotImplementedError("You need to implement a 'calc_forward_images' method in your psf class.") 35 | 36 | @abstractmethod 37 | def postprocess(self, variables: list) -> list: 38 | """ 39 | Postprocesses the optimized variables. For example, normalizes the psf or calculates global positions. 40 | """ 41 | raise NotImplementedError("You need to implement a 'postprocess' method in your psf class.") 42 | 43 | def save(self, filename: str) -> None: 44 | """ 45 | Save object to file. 46 | """ 47 | with open(filename, "wb") as f: 48 | pickle.dump(self, f) 49 | 50 | @classmethod 51 | def load(filename: str): 52 | """ 53 | Load object from file. 54 | """ 55 | with open(filename, "rb") as f: 56 | self = pickle.load(f) 57 | return self 58 | 59 | def calpupilfield(self,fieldtype='vector',Nz=None,datatype='bead'): 60 | if Nz is None: 61 | Nz = self.bead_kernel.shape[0] 62 | bin = self.options.model.bin 63 | Lx = self.data.rois.shape[-1]*bin 64 | Ly = self.data.rois.shape[-2]*bin 65 | Lz = self.data.rois.shape[-3] 66 | xsz =self.options.model.pupilsize 67 | 68 | xrange = np.linspace(-Lx/2+0.5,Lx/2-0.5,Lx) 69 | [xx,yy] = np.meshgrid(xrange,xrange) 70 | pkx = xx/Lx 71 | pky = yy/Lx 72 | self.kspace = np.float32(pkx*pkx+pky*pky) 73 | self.kspace_x = np.float32(pkx*pkx) 74 | self.kspace_y = np.float32(pky*pky) 75 | 76 | pixelsize_x = self.data.pixelsize_x/bin 77 | pixelsize_y = self.data.pixelsize_y/bin 78 | NA = self.options.imaging.NA 79 | emission_wavelength = self.options.imaging.emission_wavelength 80 | nimm = self.options.imaging.RI.imm 81 | nmed = self.options.imaging.RI.med 82 | ncov = self.options.imaging.RI.cov 83 | n_max = self.options.model.n_max 84 | Zk = im.genZern1(n_max,xsz) 85 | 86 | n1 = np.array(range(-1,n_max,2)) 87 | self.spherical_terms = (n1+1)*(n1+2)//2 88 | 89 | pupilradius = 1 90 | krange = np.linspace(-pupilradius+pupilradius/xsz,pupilradius-pupilradius/xsz,xsz) 91 | [xx,yy] = np.meshgrid(krange,krange) 92 | kr = np.lib.scimath.sqrt(xx**2+yy**2) 93 | kz = np.lib.scimath.sqrt((nimm/emission_wavelength)**2-(kr*NA/emission_wavelength)**2) 94 | 95 | cos_imm = np.lib.scimath.sqrt(1-(kr*NA/nimm)**2) 96 | cos_med = np.lib.scimath.sqrt(1-(kr*NA/nmed)**2) 97 | cos_cov = np.lib.scimath.sqrt(1-(kr*NA/ncov)**2) 98 | kz_med = nmed/emission_wavelength*cos_med 99 | FresnelPmedcov = 2*nmed*cos_med/(nmed*cos_cov+ncov*cos_med) 100 | FresnelSmedcov = 2*nmed*cos_med/(nmed*cos_med+ncov*cos_cov) 101 | FresnelPcovimm = 2*ncov*cos_cov/(ncov*cos_imm+nimm*cos_cov) 102 | FresnelScovimm = 2*ncov*cos_cov/(ncov*cos_cov+nimm*cos_imm) 103 | Tp = FresnelPmedcov*FresnelPcovimm 104 | Ts = FresnelSmedcov*FresnelScovimm 105 | Tavg = (Tp+Ts)/2 106 | 107 | phi = np.arctan2(yy,xx) 108 | cos_phi = np.cos(phi) 109 | sin_phi = np.sin(phi) 110 | sin_med = kr*NA/nmed 111 | 112 | pvec = Tp*np.stack([cos_med*cos_phi,cos_med*sin_phi,-sin_med]) 113 | svec = Ts*np.stack([-sin_phi,cos_phi,np.zeros(cos_phi.shape)]) 114 | 115 | hx = cos_phi*pvec-sin_phi*svec 116 | hy = sin_phi*pvec+cos_phi*svec 117 | h = np.concatenate((hx,hy),axis=0) 118 | self.dipole_field = np.complex64(h) 119 | if self.options.model.with_apoid: 120 | #apoid = 1/np.lib.scimath.sqrt(cos_med) 121 | apoid = np.lib.scimath.sqrt(cos_imm)/cos_med 122 | #apoid = np.lib.scimath.sqrt(cos_med)/cos_imm 123 | if fieldtype=='scalar': 124 | apoid=apoid*Tavg 125 | else: 126 | apoid = 1 127 | 128 | kpixelsize = 2.0*NA/emission_wavelength/xsz 129 | self.paramxy = im.prechirpz1(kpixelsize,pixelsize_x,pixelsize_y,xsz,Lx) 130 | 131 | self.aperture = np.complex64(kr<1) 132 | pupil = self.aperture*apoid 133 | pupil = tf.cast(pupil,tf.complex64) 134 | if fieldtype=='scalar': 135 | psfA = im.cztfunc1(pupil,self.paramxy) 136 | self.normf = np.complex64(1/np.sum(psfA*np.conj(psfA))) 137 | else: 138 | I_res = 0.0 139 | for h in self.dipole_field: 140 | PupilFunction = pupil*h 141 | psfA = im.cztfunc1(PupilFunction,self.paramxy) 142 | I_res += psfA*tf.math.conj(psfA) 143 | self.normf = np.complex64(1/np.sum(I_res)) 144 | #if datatype == 'bead': 145 | # self.Zrange = -1*np.linspace(-Nz/2+0.5,Nz/2-0.5,Nz,dtype=np.complex64).reshape((Nz,1,1)) 146 | #elif datatype == 'insitu': 147 | self.Zrange = np.linspace(-Nz/2+0.5,Nz/2-0.5,Nz,dtype=np.complex64).reshape((Nz,1,1)) 148 | self.kx = np.complex64(xx*NA/emission_wavelength)*pixelsize_x 149 | self.ky = np.complex64(yy*NA/emission_wavelength)*pixelsize_y 150 | self.kz = np.complex64(kz)*self.data.pixelsize_z 151 | self.kz_med = np.complex64(kz_med)*self.data.pixelsize_z 152 | self.k = np.complex64(nmed/emission_wavelength)*self.data.pixelsize_z 153 | self.apoid = np.complex64(apoid) 154 | self.nimm = nimm 155 | self.nmed = nmed 156 | self.Zk = np.float32(Zk) 157 | 158 | # only for bead data, precompute phase ramp 159 | Lx = self.data.rois.shape[-1] 160 | Ly = self.data.rois.shape[-2] 161 | Lz = self.data.rois.shape[-3] 162 | 163 | self.zv = np.linspace(0,Lz-1,Lz,dtype=np.float32).reshape(Lz,1,1)-Lz/2 164 | self.kxv = np.linspace(-Lx/2+0.5,Lx/2-0.5,Lx,dtype=np.float32)/Lx 165 | self.kyv = (np.linspace(-Ly/2+0.5,Ly/2-0.5,Ly,dtype=np.float32).reshape(Ly,1))/Ly 166 | self.kzv = (np.linspace(-Lz/2+0.5,Lz/2-0.5,Lz,dtype=np.float32).reshape(Lz,1,1))/Lz 167 | 168 | 169 | def calnorm(self,pupil): 170 | psfA = im.cztfunc1(pupil,self.paramxy) 171 | normf = tf.math.real(tf.reduce_sum(psfA*tf.math.conj(psfA))) 172 | return normf 173 | 174 | def gen_bead_kernel(self,isVolume = False): 175 | pixelsize_z = self.data.pixelsize_z 176 | bead_radius = self.data.bead_radius 177 | if isVolume: 178 | Nz = self.data.rois.shape[-3] 179 | bin = 1 180 | else: 181 | Nz = self.data.rois.shape[-3]+np.int32(bead_radius//pixelsize_z)*2+4 182 | bin = self.options.model.bin 183 | 184 | Lx = self.data.rois.shape[-1]*bin 185 | pixelsize_x = self.data.pixelsize_x/bin 186 | pixelsize_y = self.data.pixelsize_y/bin 187 | 188 | xrange = np.linspace(-Lx/2+0.5,Lx/2-0.5,Lx)+1e-6 189 | zrange = np.linspace(-Nz/2+0.5,Nz/2-0.5,Nz) 190 | [xx,yy,zz] = np.meshgrid(xrange,xrange,zrange) 191 | xx = np.swapaxes(xx,0,2) 192 | yy = np.swapaxes(yy,0,2) 193 | zz = np.swapaxes(zz,0,2) 194 | 195 | pkx = 1/Lx/pixelsize_x 196 | pky = 1/Lx/pixelsize_y 197 | pkz = 1/Nz/pixelsize_z 198 | if bead_radius>0: 199 | Zk0 = np.sqrt((xx*pkx)**2+(yy*pky)**2+(zz*pkz)**2)*bead_radius 200 | mu = 1.5 201 | kernel = spf.jv(mu,2*np.pi*Zk0)/(Zk0**mu)*bead_radius**3 202 | kernel = kernel/np.max(kernel) 203 | kernel = np.float32(kernel) 204 | else: 205 | kernel = np.ones((Nz,Lx,Lx),dtype=np.float32) 206 | self.bead_kernel = tf.complex(kernel,0.0) 207 | 208 | return 209 | 210 | 211 | def applyPhaseRamp(self, img, shiftvec): 212 | """ 213 | Applies a frequency ramp as a phase factor according to the shiftvec to a Fourier transform to shift the image. 214 | Identical to implementation in InverseModelling. Just removed if-statement (0) that does not make sense for me and prevent my code to work correctly. 215 | img: input Fourier transform tensor 216 | shiftvec: real-space shifts 217 | """ 218 | # TODO: no im 219 | res = im.totensor(img) 220 | myshape = im.shapevec(res) 221 | ShiftDims = shiftvec.shape[-1] 222 | for d in range(1, ShiftDims+1): 223 | myshifts = shiftvec[..., -d] 224 | for ed in range(len(myshape) - len(myshifts.shape)): 225 | myshifts = tf.expand_dims(myshifts,-1) 226 | res = res * tf.exp(tf.complex(im.totensor(0.0), 2.0 * np.pi * myshifts * nip.ramp1D(myshape[-d], ramp_dim = -d, freq='ftfreq'))) 227 | return res 228 | 229 | def phaseRamp(self,pos): 230 | if pos.shape[1]==2: 231 | shiftphase = 1j*2*np.pi*(self.kxv*pos[:,1]+self.kyv*pos[:,0]) 232 | if pos.shape[1]==3: 233 | shiftphase = 1j*2*np.pi*(self.kxv*pos[:,2]+self.kyv*pos[:,1]+self.kzv*pos[:,0]) 234 | 235 | return tf.exp(shiftphase) 236 | 237 | def applyDrfit(self,psfin,gxy): 238 | otf2d = im.fft2d(tf.complex(psfin,0.0)) 239 | if self.data.skew_const: 240 | sk = np.array([self.data.skew_const],dtype=np.float32)+np.zeros(gxy.shape,dtype=np.float32) 241 | sk = np.reshape(sk,sk.shape+(1,1,1)) 242 | dxy = tf.complex(-sk*self.zv+tf.round(sk*self.zv),0.0) 243 | shiftphase = self.phaseRamp(dxy) 244 | 245 | else: 246 | gxy = tf.complex(tf.reshape(gxy,gxy.shape+(1,1,1)),0.0)*self.zv 247 | shiftphase = self.phaseRamp(gxy) 248 | psf_shift = tf.math.real(im.ifft2d(otf2d*shiftphase)) 249 | 250 | return psf_shift 251 | 252 | def psf2IAB(self, ROIs): 253 | G = np.zeros(ROIs.shape, dtype = np.complex64) 254 | G[:,0] = ROIs[:,0]*np.exp(-2*np.pi/3*1j)+ROIs[:,1]+ROIs[:,2]*np.exp(2*np.pi/3*1j) 255 | G[:,1] = np.sum(ROIs,axis=1) 256 | G[:,2] = ROIs[:,0]*np.exp(2*np.pi/3*1j)+ROIs[:,1]+ROIs[:,2]*np.exp(-2*np.pi/3*1j) # G[:,2] = np.conj(G[:,0]) 257 | # solving above equations for ROIs and redefine it as O 258 | O = np.zeros(ROIs.shape, dtype = np.complex64) 259 | O[:,0] = 1/3*(G[:,0]*np.exp(2*np.pi/3*1j)+G[:,1]+G[:,2]*np.exp(-2*np.pi/3*1j)) 260 | O[:,1] = 1/3*np.sum(G,axis=1) 261 | O[:,2] = 1/3*(G[:,0]*np.exp(-2*np.pi/3*1j)+G[:,1]+G[:,2]*np.exp(2*np.pi/3*1j)) # O[:,2] = np.conj(O[:,0]) 262 | # above derivation is purely based on the definition of FFT and the fact that cos(2pi/3) and cos(4pi/3) are all equal to -0.5. 263 | # it is true for PSF at any 3 phases, however, if the 3 phases are exactly at [-2pi/3, 0, 2pi/3], then G can be used to represent the complex IAB model, where 264 | I = np.real(G[:,1])/3 265 | A = G[:,0]/3 266 | B = G[:,2]/3 # B = np.conj(A) 267 | 268 | a = np.squeeze(np.sum(np.real(A[0]),axis = (-1,-2))) 269 | b = np.squeeze(np.sum(np.imag(A[0]),axis = (-1,-2))) 270 | 271 | y1 = np.squeeze(np.sum((ROIs[:,2]-ROIs[:,0])/np.sqrt(3),axis = (-1,-2))) 272 | y2 = np.squeeze(np.sum(ROIs[:,1]-np.sum(ROIs,axis = 1)/3,axis = (-1,-2))) 273 | 274 | q = np.squeeze(1j*(a*y1-b*y2) + (a*y2+b*y1)) 275 | if len(q.shape)>1: 276 | phi = 1*np.median(np.angle(q),axis=1) 277 | else: 278 | phi = 1*np.median(np.angle(q)) 279 | 280 | 281 | return I, A, B, phi 282 | 283 | -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFMultiChannel4pi_file.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import Type 3 | 4 | import numpy as np 5 | from requests import options 6 | import scipy as sp 7 | import tensorflow as tf 8 | from psflearning.learning.psfs.PSFPupilBased4pi_file import PSFPupilBased4pi 9 | 10 | from psflearning.learning.psfs.PSFVolumeBased4pi_file import PSFVolumeBased4pi 11 | from psflearning.learning.psfs.PSFZernikeBased4pi_file import PSFZernikeBased4pi 12 | 13 | from .PSFInterface_file import PSFInterface 14 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 15 | from ..fitters.Fitter_file import Fitter 16 | from ..optimizers import OptimizerABC, L_BFGS_B 17 | 18 | class PSFMultiChannel4pi(PSFInterface): 19 | def __init__(self, psftype: Type[PSFInterface], init_optimizer: OptimizerABC=None,options = None, loss_weight=None) -> None: 20 | self.parameters = None 21 | self.updateflag = None 22 | self.psftype = psftype 23 | self.sub_psfs = [] # each element is an instance of psftype 24 | self.data = None 25 | self.weight = None 26 | self.loss_weight = loss_weight 27 | self.options = options 28 | if init_optimizer is None: 29 | self.init_optimizer = L_BFGS_B(100) 30 | else: 31 | self.init_optimizer = init_optimizer 32 | 33 | def calc_initials(self, data: PreprocessedImageDataInterface, start_time=None): 34 | """ 35 | Provides initial values for the optimizable varibales for the fitter class. 36 | Since this is a multi-channel PSF, it performs an initial fitting for each 37 | channel first and then calculates an initial guess for the transformations. 38 | """ 39 | 40 | self.data = data 41 | images, rois, centers, file_idxs = self.data.get_image_data() 42 | num_channels = len(images) 43 | self.sub_psfs = [None]*num_channels 44 | self.imgcenter = np.hstack((np.array(images[0].shape[-2:])/2,0)).astype(np.float32) 45 | # choose first channel as reference and run first round of optimization 46 | options = self.options.copy() 47 | ref_psf = self.psftype(options=self.options) 48 | ref_psf.dphase = 0.0 49 | self.sub_psfs[0] = ref_psf 50 | fitter_ref_channel = Fitter(self.data.get_channel(0), ref_psf,self.init_optimizer, ref_psf.default_loss_func,loss_weight=self.loss_weight) # TODO: redesign multiData 51 | res_ref, toc = fitter_ref_channel.learn_psf(start_time=start_time) 52 | ref_pos = res_ref[0] 53 | ref_pos_yx1 = np.concatenate((ref_pos[:, 1:], np.ones((ref_pos.shape[0], 1))), axis=1) 54 | self.ref_pos_yx = ref_pos_yx1 55 | 56 | # create empty initial guess lists 57 | # and fill in values for first channel 58 | 59 | init_trafos = [] 60 | #ref_zpos = np.transpose(np.expand_dims(-ref_pos[:,0]+self.data.channels[0].rois.shape[-3]//2,axis=0)) 61 | 62 | 63 | init_params = [res_ref[-1]] 64 | # do everything for the other channels and put initial values in corresponding array 65 | for i in range(1, num_channels): 66 | # run first round of optimization 67 | current_psf = self.psftype(options=self.options) 68 | self.sub_psfs[i] = current_psf 69 | if options.fpi.phase_delay_dir == 'ascend': 70 | current_psf.dphase = i*np.pi/2 71 | else: 72 | current_psf.dphase = -i*np.pi/2 73 | #self.sub_psfs[i].dphase = -i*np.pi/2 74 | fitter_current_channel = Fitter(self.data.get_channel(i), current_psf, self.init_optimizer,current_psf.default_loss_func,loss_weight=self.loss_weight) 75 | res_cur, toc = fitter_current_channel.learn_psf(start_time=toc) 76 | current_pos = res_cur[0] 77 | # calculate transformation 78 | current_pos_yx1 = np.concatenate((current_pos[:, 1:], np.ones((current_pos.shape[0], 1))), axis=1) 79 | current_trafo = np.linalg.lstsq(ref_pos_yx1-self.imgcenter, current_pos_yx1-self.imgcenter, rcond=None)[0] 80 | 81 | self.sub_psfs[i].weight = self.sub_psfs[0].weight 82 | # fill initial arrays 83 | init_params.append(res_cur[-1]) 84 | init_trafos.append(current_trafo) 85 | 86 | 87 | # get current status of image data 88 | images, _, centers, _ = self.data.get_image_data() 89 | num_channels = len(images) 90 | 91 | # stack centers of ref channel num_channels-1 times for easier calc in calc_forward_images 92 | cor_ref = np.concatenate((centers[0], np.ones((centers[0].shape[0], 1))), axis=1) 93 | self.cor_ref_channel = np.stack([cor_ref] * (num_channels-1)).astype(np.float32) 94 | # self.pos_ref_channel_yx1 = np.stack([ref_pos_yx1] * (num_channels-1)).astype(np.float32) 95 | # centers of other channels needed to calculate diffs in objective 96 | self.cor_other_channels = np.stack(centers[1:]).astype(np.float32) 97 | 98 | self.init_trafos = np.stack(init_trafos).astype(np.float32) 99 | 100 | param = map(list, zip(*init_params)) # a way to tranpose the first two dimensions of a list of iterateables 101 | param = [np.stack(var) for var in param] 102 | param[0] = param[0][0] 103 | #init_subpixel_pos_ref_channel = np.concatenate((param[0][:,0:1], centers[0]-ref_pos[:, 1:]), axis=1) 104 | 105 | #param.insert(0,init_subpixel_pos_ref_channel.astype(np.float32)) 106 | param.append(self.init_trafos) 107 | self.weight = np.ones((len(param))) 108 | self.weight[-1] = 1e-4 109 | param[-1] = param[-1]/self.weight[-1] 110 | self.varinfo = self.sub_psfs[0].varinfo 111 | for k, vinfo in enumerate(self.varinfo[1:]): 112 | if vinfo['type'] == 'Nfit': 113 | self.varinfo[k+1]['id'] += 1 114 | self.varinfo.append(dict(type='shared')) 115 | 116 | if self.psftype == PSFZernikeBased4pi: 117 | if self.options.fpi.link_zernikecoeff: 118 | param[4][0]=np.hstack((param[4][0][:,0:1],np.mean(param[4][:,:,1:],axis=0))) 119 | #param[5][0]=np.hstack((param[5][0][:,0:1],np.mean(param[5][:,:,1:3],axis=0),param[5][0][:,3:4],np.mean(param[5][:,:,4:],axis=0))) 120 | param[5][0]=np.hstack((param[5][0][:,0:4],np.mean(param[5][:,:,4:],axis=0))) 121 | 122 | 123 | return param, toc 124 | 125 | 126 | 127 | def calc_forward_images(self, variables): 128 | """ 129 | Calculate forward images from the current guess of the variables. 130 | """ 131 | 132 | init_pos_ref = variables[0] 133 | trafos = variables[-1]*self.weight[-1] 134 | 135 | # calc positions from pos in ref channel and trafos 136 | positions = self.calc_positions_from_trafos(init_pos_ref, trafos) 137 | 138 | # use calc_forward_images of every sub_psf and stack at the end 139 | forward_images = [None] * len(self.sub_psfs) # needed since tf.function does not support .append() 140 | for i, sub_psf in enumerate(self.sub_psfs): 141 | pos = positions[i] 142 | #link pos, intensity, phase 143 | sub_variables = [pos, variables[1][i], variables[2][0], variables[3][0]] 144 | if self.psftype == PSFVolumeBased4pi: 145 | for k in range(4,len(variables)-3): 146 | sub_variables.append(variables[k][i]) 147 | sub_variables.append(variables[-3][0]) 148 | sub_variables.append(variables[-2][0]) 149 | elif self.psftype == PSFZernikeBased4pi: 150 | if self.options.fpi.link_zernikecoeff: 151 | sub_variables.append(tf.concat((variables[4][i][:,0:1],variables[4][0][:,1:]),axis=1)) 152 | #sub_variables.append(tf.concat((variables[5][i][:,0:1],variables[5][0][:,1:3],variables[5][i][:,3:4],variables[5][0][:,4:]),axis=1)) 153 | sub_variables.append(tf.concat((variables[5][i][:,0:4],variables[5][0][:,4:]),axis=1)) 154 | 155 | for k in range(6,len(variables)-4): 156 | sub_variables.append(variables[k][i]) 157 | else: 158 | for k in range(4,len(variables)-4): 159 | sub_variables.append(variables[k][i]) 160 | sub_variables.append(variables[-4][0]) 161 | sub_variables.append(variables[-3][0]) 162 | sub_variables.append(variables[-2][0]) 163 | elif self.psftype == PSFPupilBased4pi: 164 | for k in range(4,len(variables)-4): 165 | sub_variables.append(variables[k][i]) 166 | sub_variables.append(variables[-4][0]) 167 | sub_variables.append(variables[-3][0]) 168 | sub_variables.append(variables[-2][0]) 169 | 170 | forward_images[i] = sub_psf.calc_forward_images(sub_variables) 171 | 172 | return tf.stack(forward_images) 173 | 174 | def calc_positions_from_trafos(self, init_subpixel_pos_ref_channel, trafos): 175 | # calculate positions from position in ref channel and transformation 176 | cor_target = tf.linalg.matmul(self.cor_ref_channel[:,self.ind[0]:self.ind[1]]-self.imgcenter, trafos)[..., :-1] 177 | 178 | diffs = tf.math.subtract(self.cor_other_channels[:,self.ind[0]:self.ind[1]]-self.imgcenter[:-1],cor_target) 179 | pos_other_channels = init_subpixel_pos_ref_channel + tf.concat((tf.zeros(diffs.shape[:-1] + (1,)), diffs), axis=2) 180 | positions = tf.concat((tf.expand_dims(init_subpixel_pos_ref_channel, axis=0), pos_other_channels), axis=0) 181 | 182 | return positions 183 | 184 | def postprocess(self, variables): 185 | """ 186 | Applies postprocessing to the optimized variables. In this case calculates 187 | real positions in the image from the positions in the roi. Also, normalizes 188 | psf and adapts intensities and background accordingly. 189 | """ 190 | #init_subpixel_pos_ref_channel, backgrounds, intensities, psf_params, trafos = variables 191 | res = variables.copy() 192 | res[-1] = variables[-1]*self.weight[-1] 193 | init_subpixel_pos_ref_channel = res[0] 194 | trafos = res[-1] 195 | 196 | # calc positions from pos in ref channel and trafos 197 | positions = self.calc_positions_from_trafos(init_subpixel_pos_ref_channel, trafos) 198 | 199 | # calc_positions_from_trafos is implemented using tf, 200 | # therefore convert to numpy here 201 | positions = positions.numpy() 202 | 203 | # just call postprocess of every sub_psf and stack results at the end 204 | results = [] 205 | for i, sub_psf in enumerate(self.sub_psfs): 206 | 207 | if self.psftype == PSFZernikeBased4pi: 208 | sub_variables = [positions[i],res[1][i], res[2][0], res[3][0]] 209 | if self.options.fpi.link_zernikecoeff: 210 | sub_variables.append(np.hstack((res[4][i][:,0:1],res[4][0][:,1:]))) 211 | #sub_variables.append(np.hstack((res[5][i][:,0:1],res[5][0][:,1:3],res[5][i][:,3:4],res[5][0][:,4:]))) 212 | sub_variables.append(np.hstack((res[5][i][:,0:4],res[5][0][:,4:]))) 213 | for k in range(6,len(variables)-4): 214 | sub_variables.append(res[k][i]) 215 | else: 216 | for k in range(4,len(variables)-4): 217 | sub_variables.append(res[k][i]) 218 | sub_variables.append(res[-4][0]) 219 | sub_variables.append(res[-3][0]) 220 | sub_variables.append(res[-2][0]) 221 | elif self.psftype == PSFPupilBased4pi: 222 | sub_variables = [positions[i],res[1][i], res[2][0], res[3][0]] 223 | for k in range(4,len(variables)-4): 224 | sub_variables.append(res[k][i]) 225 | sub_variables.append(res[-4][0]) 226 | sub_variables.append(res[-3][0]) 227 | sub_variables.append(res[-2][0]) 228 | else: 229 | sub_variables = [positions[i]] 230 | for k in range(1,len(variables)-1): 231 | sub_variables.append(res[k][i]) 232 | results.append(sub_psf.postprocess(sub_variables)[:-1]) 233 | 234 | results = map(list, zip(*results)) # a way to tranpose the first two dimensions of a list of iterateables 235 | results = [np.stack(variable) for variable in results] 236 | for k in range(-1,0): 237 | results.append(res[k]) 238 | results.append(variables) 239 | return results 240 | 241 | 242 | def res2dict(self,res): 243 | res_dict = dict() 244 | for i,sub_psf in enumerate(self.sub_psfs): 245 | sub_res = [] 246 | for k in range(0,len(res)-2): 247 | sub_res.append(res[k][i]) 248 | res_dict['channel'+str(i)]=sub_psf.res2dict(sub_res) 249 | res_dict['T'] = np.squeeze(res[-2]) 250 | res_dict['imgcenter'] = self.imgcenter 251 | res_dict['xyshift'] = self.data.shiftxy 252 | 253 | return res_dict 254 | -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFMultiChannel_file.py: -------------------------------------------------------------------------------- 1 | 2 | from typing import Type 3 | 4 | import numpy as np 5 | import scipy as sp 6 | import tensorflow as tf 7 | 8 | from .PSFInterface_file import PSFInterface 9 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 10 | from ..fitters.Fitter_file import Fitter 11 | from ..optimizers import OptimizerABC, L_BFGS_B 12 | 13 | class PSFMultiChannel(PSFInterface): 14 | def __init__(self, psftype: Type[PSFInterface], init_optimizer: OptimizerABC=None, options = None,loss_weight=None) -> None: 15 | self.parameters = None 16 | self.updateflag = None 17 | self.psftype = psftype 18 | self.PSFtype = 'scalar' 19 | self.sub_psfs = [] # each element is an instance of psftype 20 | self.data = None 21 | self.weight = None 22 | self.loss_weight = loss_weight 23 | self.options = options 24 | self.init_trafos = None 25 | if init_optimizer is None: 26 | self.init_optimizer = L_BFGS_B(100) 27 | else: 28 | self.init_optimizer = init_optimizer 29 | 30 | def calc_initials(self, data: PreprocessedImageDataInterface, start_time=None): 31 | """ 32 | Provides initial values for the optimizable varibales for the fitter class. 33 | Since this is a multi-channel PSF, it performs an initial fitting for each 34 | channel first and then calculates an initial guess for the transformations. 35 | """ 36 | 37 | self.data = data 38 | images, rois, centers, file_idxs = self.data.get_image_data() 39 | num_channels = len(images) 40 | self.sub_psfs = [None]*num_channels 41 | self.imgcenter = np.hstack((np.array(images[0].shape[-2:])/2,0)).astype(np.float32) 42 | # choose first channel as reference and run first round of optimization 43 | ref_psf = self.psftype(options = self.options) 44 | ref_psf.psftype = self.PSFtype 45 | if hasattr(self,'initpsf'): 46 | ref_psf.initpsf = self.initpsf[0] 47 | ref_psf.defocus = np.float32(self.options.multi.defocus[0]/self.data.pixelsize_z) 48 | self.sub_psfs[0] = ref_psf 49 | fitter_ref_channel = Fitter(self.data.get_channel(0), ref_psf,self.init_optimizer, ref_psf.default_loss_func,loss_weight=self.loss_weight) # TODO: redesign multiData 50 | res_ref, toc = fitter_ref_channel.learn_psf(start_time=start_time) 51 | ref_pos = res_ref[0] 52 | ref_pos_yx1 = np.concatenate((ref_pos[:, 1:], np.ones((ref_pos.shape[0], 1))), axis=1) 53 | self.ref_pos_yx = ref_pos_yx1 54 | 55 | # create empty initial guess lists 56 | # and fill in values for first channel 57 | 58 | 59 | init_trafos = [] 60 | #ref_zpos = np.transpose(np.expand_dims(-ref_pos[:,0]+self.data.channels[0].rois.shape[-3]//2,axis=0)) 61 | #init_subpixel_pos_ref_channel = np.concatenate((ref_zpos, centers[0]-ref_pos[:, 1:]), axis=1) 62 | 63 | init_params = [res_ref[-1]] 64 | # do everything for the other channels and put initial values in corresponding array 65 | for i in range(1, num_channels): 66 | # run first round of optimization 67 | current_psf = self.psftype(options = self.options) 68 | current_psf.psftype = self.PSFtype 69 | if hasattr(self,'initpsf'): 70 | current_psf.initpsf = self.initpsf[i] 71 | current_psf.defocus = np.float32(self.options.multi.defocus[i]/self.data.pixelsize_z) 72 | self.sub_psfs[i] = current_psf 73 | fitter_current_channel = Fitter(self.data.get_channel(i), current_psf, self.init_optimizer,current_psf.default_loss_func,loss_weight=self.loss_weight) 74 | res_cur,toc = fitter_current_channel.learn_psf(start_time=toc) 75 | current_pos = res_cur[0] 76 | # calculate transformation 77 | current_pos_yx1 = np.concatenate((current_pos[:, 1:], np.ones((current_pos.shape[0], 1))), axis=1) 78 | current_trafo = np.linalg.lstsq(ref_pos_yx1-self.imgcenter, current_pos_yx1-self.imgcenter, rcond=None)[0] 79 | 80 | #relative_shift = np.mean(centers[0],axis=0)-self.imgcenter[:-1] 81 | #current_trafo[-1][:-1] = self.data.shiftxy[i]-(np.matmul(relative_shift,current_trafo[:-1,:-1])-relative_shift) 82 | # fill initial arrays 83 | #self.sub_psfs[i].weight = self.sub_psfs[0].weight 84 | tmp = self.sub_psfs[i].weight.copy() 85 | self.sub_psfs[i].weight = self.sub_psfs[0].weight.copy() 86 | self.sub_psfs[i].weight[1] = tmp[1] 87 | init_params.append(res_cur[-1]) 88 | init_trafos.append(current_trafo) 89 | 90 | 91 | # get current status of image data 92 | images, _, centers, _ = self.data.get_image_data() 93 | num_channels = len(images) 94 | 95 | # stack centers of ref channel num_channels-1 times for easier calc in calc_forward_images 96 | cor_ref = np.concatenate((centers[0][:,-2:], np.ones((centers[0].shape[0], 1))), axis=1) 97 | self.cor_ref_channel = np.stack([cor_ref] * (num_channels-1)).astype(np.float32) 98 | # self.pos_ref_channel_yx1 = np.stack([ref_pos_yx1] * (num_channels-1)).astype(np.float32) 99 | # centers of other channels needed to calculate diffs in objective 100 | self.cor_other_channels = (np.stack(centers[1:])[...,-2:]).astype(np.float32) 101 | 102 | self.init_trafos = np.stack(init_trafos).astype(np.float32) 103 | 104 | 105 | param = map(list, zip(*init_params)) # a way to tranpose the first two dimensions of a list of iterateables 106 | param = [np.stack(var) for var in param] 107 | param[0] = param[0][0] 108 | 109 | #param.insert(0,init_subpixel_pos_ref_channel.astype(np.float32)) 110 | param.append(self.init_trafos) 111 | self.weight = np.ones((len(param))) 112 | self.weight[-1] = 1e-3 113 | if hasattr(self.sub_psfs[0],'pos_weight'): 114 | self.weight[0] = self.sub_psfs[0].pos_weight 115 | param[-1] = param[-1]/self.weight[-1] 116 | self.varinfo = self.sub_psfs[0].varinfo 117 | for k, vinfo in enumerate(self.varinfo[1:]): 118 | if vinfo['type'] == 'Nfit': 119 | self.varinfo[k+1]['id'] += 1 120 | self.varinfo.append(dict(type='shared')) 121 | return param, toc 122 | 123 | 124 | def calc_forward_images(self, variables): 125 | """ 126 | Calculate forward images from the current guess of the variables. 127 | """ 128 | init_pos_ref = variables[0]*self.weight[0] 129 | trafos = variables[-1]*self.weight[-1] 130 | 131 | # calc positions from pos in ref channel and trafos 132 | positions = self.calc_positions_from_trafos(init_pos_ref, trafos) 133 | 134 | # use calc_forward_images of every sub_psf and stack at the end 135 | forward_images = [None] * len(self.sub_psfs) # needed since tf.function does not support .append() 136 | for i, sub_psf in enumerate(self.sub_psfs): 137 | pos = positions[i]/self.weight[0] 138 | #link pos, intensity, phase 139 | #sub_variables = [pos, variables[1][i], variables[2][0], variables[3][i],variables[4][0]] 140 | sub_variables = [pos, variables[1][i], variables[2][0]] 141 | for k in range(3,len(variables)-2): 142 | sub_variables.append(variables[k][i]) 143 | sub_variables.append(variables[-2][0]) 144 | forward_images[i] = sub_psf.calc_forward_images(sub_variables) 145 | 146 | return tf.stack(forward_images) 147 | 148 | def calc_positions_from_trafos(self, init_subpixel_pos_ref_channel, trafos): 149 | # calculate positions from position in ref channel and transformation 150 | 151 | cor_target = tf.linalg.matmul(self.cor_ref_channel[:,self.ind[0]:self.ind[1]]-self.imgcenter, trafos)[..., :-1] 152 | 153 | diffs = tf.math.subtract(self.cor_other_channels[:,self.ind[0]:self.ind[1]]-self.imgcenter[:-1],cor_target) 154 | pos_other_channels = init_subpixel_pos_ref_channel + tf.concat((tf.zeros(diffs.shape[:-1] + (1,)), diffs), axis=2) 155 | positions = tf.concat((tf.expand_dims(init_subpixel_pos_ref_channel, axis=0), pos_other_channels), axis=0) 156 | 157 | return positions 158 | 159 | def postprocess(self, variables): 160 | """ 161 | Applies postprocessing to the optimized variables. In this case calculates 162 | real positions in the image from the positions in the roi. Also, normalizes 163 | psf and adapts intensities and background accordingly. 164 | """ 165 | res = variables.copy() 166 | res[-1] = variables[-1]*self.weight[-1] 167 | res[2] = variables[2] 168 | init_subpixel_pos_ref_channel = res[0]*self.weight[0] 169 | trafos = res[-1] 170 | self.ind = [0,res[0].shape[0]] 171 | # calc positions from pos in ref channel and trafos 172 | positions = self.calc_positions_from_trafos(init_subpixel_pos_ref_channel, trafos) 173 | 174 | # calc_positions_from_trafos is implemented using tf, 175 | # therefore convert to numpy here 176 | positions = positions.numpy() 177 | 178 | # just call postprocess of every sub_psf and stack results at the end 179 | results = [] 180 | for i, sub_psf in enumerate(self.sub_psfs): 181 | sub_variables = [positions[i]/self.weight[0]] 182 | for k in range(1,len(variables)-1): 183 | sub_variables.append(res[k][i]) 184 | results.append(sub_psf.postprocess(sub_variables)[:-1]) 185 | 186 | results = map(list, zip(*results)) # a way to tranpose the first two dimensions of a list of iterateables 187 | results = [np.stack(variable) for variable in results] 188 | for k in range(-1,0): 189 | results.append(res[k]) 190 | results.append(variables) 191 | return results 192 | 193 | 194 | def res2dict(self,res): 195 | res_dict = dict() 196 | for i,sub_psf in enumerate(self.sub_psfs): 197 | sub_res = [] 198 | for k in range(0,len(res)-2): 199 | sub_res.append(res[k][i]) 200 | res_dict['channel'+str(i)]=sub_psf.res2dict(sub_res) 201 | res_dict['T'] = np.squeeze(res[-2]) 202 | res_dict['imgcenter'] = self.imgcenter 203 | res_dict['xyshift'] = self.data.shiftxy 204 | 205 | 206 | 207 | return res_dict 208 | -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFPupilBased_file.py: -------------------------------------------------------------------------------- 1 | """ 2 | Copyright (c) 2022 Ries Lab, EMBL, Heidelberg, Germany 3 | All rights reserved 4 | 5 | @author: Sheng Liu, Jonas Hellgoth 6 | """ 7 | 8 | import numpy as np 9 | import scipy as sp 10 | import tensorflow as tf 11 | from scipy.ndimage.filters import gaussian_filter 12 | from .PSFInterface_file import PSFInterface 13 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 14 | from ..loss_functions import mse_real_pupil 15 | from .. import utilities as im 16 | from .. import imagetools as nip 17 | 18 | class PSFPupilBased(PSFInterface): 19 | """ 20 | PSF class that uses a 3D volume to describe the PSF. 21 | Should only be used with single-channel data. 22 | """ 23 | def __init__(self,options=None) -> None: 24 | self.parameters = None 25 | self.data = None 26 | self.Zphase = None 27 | self.zT = None 28 | self.bead_kernel = None 29 | self.options = options 30 | self.initpupil = None 31 | self.defocus = np.float32(0) 32 | self.default_loss_func = mse_real_pupil 33 | self.psftype = 'scalar' 34 | return 35 | 36 | def calc_initials(self, data: PreprocessedImageDataInterface, start_time=None): 37 | """ 38 | Provides initial values for the optimizable varibales for the fitter class. 39 | """ 40 | self.data = data 41 | _, rois, _, _ = self.data.get_image_data() 42 | 43 | options = self.options 44 | if options.model.with_IMM: 45 | init_positions = np.zeros((rois.shape[0], len(rois.shape))) 46 | else: 47 | init_positions = np.zeros((rois.shape[0], len(rois.shape)-1)) 48 | 49 | init_backgrounds = np.array(np.min(gaussian_filter(rois, [0, 2, 2, 2]), axis=(-3, -2, -1), keepdims=True)) 50 | init_intensitiesL = np.sum(rois - init_backgrounds, axis=(-2, -1), keepdims=True) 51 | init_intensities = np.mean(init_intensitiesL,axis=1,keepdims=True) 52 | 53 | self.gen_bead_kernel() 54 | N = rois.shape[0] 55 | Nz = rois.shape[-3] 56 | Lx = rois.shape[-1] 57 | xsz =options.model.pupilsize 58 | 59 | if self.psftype == 'vector': 60 | self.calpupilfield('vector') 61 | else: 62 | self.calpupilfield('scalar') 63 | #self.sincfilter = np.sinc(np.sqrt(self.kspace_x))*np.sinc(np.sqrt(self.kspace_y)) 64 | self.const_mag = options.model.const_pupilmag 65 | #self.bead_kernel = tf.complex(self.data.bead_kernel,0.0) 66 | #self.weight = np.array([np.median(init_intensities), 10, 0.1, 10, 10],dtype=np.float32) 67 | #weight = [1e4,10] + list(np.array([0.1,5,2])/np.median(init_intensities)*2e4) 68 | init_backgrounds[init_backgrounds<0.1] = 0.1 69 | bgmean = np.median(init_backgrounds) 70 | wI = np.lib.scimath.sqrt(np.median(init_intensities)) 71 | weight = [wI*100,bgmean] + list(np.array([1,30,30])/wI*40) 72 | self.weight = np.array(weight,dtype=np.float32) 73 | sigma = np.ones((2,))*self.options.model.blur_sigma*np.pi 74 | self.init_sigma = sigma 75 | 76 | init_pupil = np.zeros((xsz,xsz))+(1+0.0*1j)/self.weight[4] 77 | 78 | init_backgrounds = np.ones((N,1,1,1),dtype = np.float32)*np.median(init_backgrounds,axis=0, keepdims=True) / self.weight[1] 79 | gxy = np.zeros((N,2),dtype=np.float32) 80 | gI = np.ones((N,Nz,1,1),dtype = np.float32)*init_intensities 81 | self.varinfo = [dict(type='Nfit',id=0), 82 | dict(type='Nfit',id=0), 83 | dict(type='Nfit',id=0), 84 | dict(type='shared'), 85 | dict(type='shared'), 86 | dict(type='shared'), 87 | dict(type='Nfit',id=0)] 88 | 89 | if options.model.var_photon: 90 | init_Intensity = gI/self.weight[0] 91 | else: 92 | init_Intensity = init_intensities / self.weight[0] 93 | return [init_positions.astype(np.float32), 94 | init_backgrounds.astype(np.float32), 95 | init_Intensity.astype(np.float32), 96 | np.real(init_pupil).astype(np.float32), 97 | np.imag(init_pupil).astype(np.float32), 98 | sigma.astype(np.float32), 99 | gxy], start_time 100 | 101 | def calc_forward_images(self, variables): 102 | """ 103 | Calculate forward images from the current guess of the variables. 104 | Shifting is done by Fourier transform and applying a phase ramp. 105 | """ 106 | 107 | pos, backgrounds, intensities, pupilR, pupilI, sigma,gxy = variables 108 | 109 | if self.const_mag: 110 | pupil_mag = tf.complex(1.0,0.0) 111 | else: 112 | pupil_mag = tf.complex(pupilR*self.weight[4],0.0) 113 | #pupil = tf.complex(tf.math.cos(pupilI*self.weight[3]),tf.math.sin(pupilI*self.weight[3]))*pupil_mag*self.aperture*self.apoid 114 | 115 | if self.initpupil is not None: 116 | pupil = self.initpupil 117 | normp = tf.complex(1.0,0.0) 118 | else: 119 | pupil_phase = tf.complex(tf.math.cos(pupilI*self.weight[3]),tf.math.sin(pupilI*self.weight[3]))*self.aperture 120 | pupil_phase0 = tf.complex(tf.math.cos(pupilI*0.0),tf.math.sin(pupilI*0.0))*self.aperture 121 | normp = self.calnorm(pupil_phase)/self.calnorm(pupil_phase0) 122 | pupil = pupil_phase*pupil_mag*self.apoid 123 | 124 | self.psfnorm = normp 125 | 126 | Nz = self.Zrange.shape[0] 127 | pos = tf.complex(tf.reshape(pos,pos.shape+(1,1,1)),0.0) 128 | phiz = -1j*2*np.pi*self.kz*(pos[:,0]+self.Zrange+self.defocus) 129 | if pos.shape[1]>3: 130 | phixy = 1j*2*np.pi*self.ky*pos[:,2]+1j*2*np.pi*self.kx*pos[:,3] 131 | phiz = 1j*2*np.pi*(self.kz_med*pos[:,1]-self.kz*(pos[:,0]+self.Zrange)) 132 | else: 133 | phixy = 1j*2*np.pi*self.ky*pos[:,1]+1j*2*np.pi*self.kx*pos[:,2] 134 | 135 | 136 | if self.psftype == 'vector': 137 | I_res = 0.0 138 | for h in self.dipole_field: 139 | PupilFunction = pupil*tf.exp(phiz+phixy)*h 140 | psfA = im.cztfunc1(PupilFunction,self.paramxy) 141 | I_res += psfA*tf.math.conj(psfA)*self.normf 142 | else: 143 | PupilFunction = pupil*tf.exp(phiz+phixy) 144 | I_res = im.cztfunc1(PupilFunction,self.paramxy) 145 | I_res = I_res*tf.math.conj(I_res)*self.normf 146 | 147 | bin = self.options.model.bin 148 | if not self.options.model.var_blur: 149 | sigma = self.init_sigma 150 | filter2 = tf.exp(-2*sigma[1]*sigma[1]*self.kspace_x-2*sigma[0]*sigma[0]*self.kspace_y) 151 | filter2 = tf.complex(filter2/tf.reduce_max(filter2),0.0) 152 | I_blur = im.ifft3d(im.fft3d(I_res)*self.bead_kernel*filter2) 153 | #I_blur = im.ifft3d(im.fft3d(I_res)*filter2) 154 | I_blur = tf.expand_dims(tf.math.real(I_blur),axis=-1) 155 | 156 | kernel = np.ones((1,bin,bin,1,1),dtype=np.float32) 157 | I_blur_bin = tf.nn.convolution(I_blur,kernel,strides=(1,1,bin,bin,1),padding='SAME',data_format='NDHWC') 158 | 159 | psf_fit = I_blur_bin[...,0] 160 | 161 | st = (self.bead_kernel.shape[0]-self.data.rois[0].shape[-3])//2 162 | psf_fit = psf_fit[:,st:Nz-st] 163 | 164 | if self.options.model.estimate_drift: 165 | gxy = gxy*self.weight[2] 166 | psf_shift = self.applyDrfit(psf_fit,gxy) 167 | forward_images = psf_shift*intensities*self.weight[0] + backgrounds*self.weight[1] 168 | else: 169 | forward_images = psf_fit*intensities*self.weight[0] + backgrounds*self.weight[1] 170 | 171 | return forward_images 172 | 173 | def genpsfmodel(self,sigma,pupil,addbead=False): 174 | phiz = -1j*2*np.pi*self.kz*(self.Zrange+self.defocus) 175 | if self.psftype == 'vector': 176 | I_res = 0.0 177 | for h in self.dipole_field: 178 | PupilFunction = pupil*tf.exp(phiz)*h 179 | psfA = im.cztfunc1(PupilFunction,self.paramxy) 180 | I_res += psfA*tf.math.conj(psfA)*self.normf 181 | else: 182 | PupilFunction = pupil*tf.exp(phiz) 183 | I_res = im.cztfunc1(PupilFunction,self.paramxy) 184 | I_res = I_res*tf.math.conj(I_res)*self.normf 185 | 186 | bin = self.options.model.bin 187 | 188 | filter2 = tf.exp(-2*sigma[1]*sigma[1]*self.kspace_x-2*sigma[0]*sigma[0]*self.kspace_y) 189 | filter2 = tf.complex(filter2/tf.reduce_max(filter2),0.0) 190 | if addbead: 191 | I_blur = np.real(im.ifft3d(im.fft3d(I_res)*filter2*self.bead_kernel)) 192 | else: 193 | I_blur = np.real(im.ifft3d(im.fft3d(I_res)*filter2)) 194 | I_blur = tf.expand_dims(tf.math.real(I_blur),axis=-1) 195 | 196 | kernel = np.ones((bin,bin,1,1),dtype=np.float32) 197 | I_model = tf.nn.convolution(I_blur,kernel,strides=(1,bin,bin,1),padding='SAME',data_format='NHWC') 198 | I_model = I_model[...,0] 199 | 200 | return I_model 201 | 202 | def postprocess(self, variables): 203 | """ 204 | Applies postprocessing to the optimized variables. In this case calculates 205 | real positions in the image from the positions in the roi. Also, normalizes 206 | psf and adapts intensities and background accordingly. 207 | """ 208 | positions, backgrounds, intensities, pupilR,pupilI,sigma,gxy = variables 209 | z_center = (self.Zrange.shape[-3] - 1) // 2 210 | bin = self.options.model.bin 211 | positions[:,1:] = positions[:,1:]/bin 212 | 213 | pupil_mag = tf.complex(pupilR*self.weight[4],0.0) 214 | if self.initpupil is not None: 215 | pupil = self.initpupil 216 | else: 217 | pupil = tf.complex(tf.math.cos(pupilI*self.weight[3]),tf.math.sin(pupilI*self.weight[3]))*pupil_mag*self.aperture*self.apoid 218 | 219 | I_model = self.genpsfmodel(sigma,pupil) 220 | I_model_bead = self.genpsfmodel(sigma,pupil,addbead=True) 221 | #I_model_bead = np.real(im.ifft3d(im.fft3d(I_res)*self.bead_kernel*filter2)) 222 | 223 | images, _, centers, _ = self.data.get_image_data() 224 | if positions.shape[1]>3: 225 | global_positions = np.swapaxes(np.vstack((positions[:,0]+z_center,positions[:,1],centers[:,-2]-positions[:,-2],centers[:,-1]-positions[:,-1])),1,0) 226 | else: 227 | global_positions = np.swapaxes(np.vstack((positions[:,0]+z_center,centers[:,-2]-positions[:,-2],centers[:,-1]-positions[:,-1])),1,0) 228 | 229 | return [global_positions.astype(np.float32), 230 | backgrounds*self.weight[1], # already correct 231 | intensities*self.weight[0], # already correct 232 | I_model_bead, 233 | I_model, 234 | np.complex64(pupil), 235 | sigma, 236 | gxy*self.weight[2], 237 | np.flip(I_model,axis=-3), 238 | variables] # already correct 239 | 240 | 241 | def res2dict(self,res): 242 | res_dict = dict(pos=res[0], 243 | bg=np.squeeze(res[1]), 244 | intensity=np.squeeze(res[2]), 245 | I_model_bead = res[3], 246 | I_model = res[4], 247 | pupil = res[5], 248 | sigma = res[6]/np.pi, 249 | drift_rate=res[7], 250 | I_model_reverse = res[8], 251 | offset=np.min(res[4]), 252 | apodization = self.apoid, 253 | cor_all = self.data.centers_all, 254 | cor = self.data.centers) 255 | return res_dict -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFVolumeBased4pi_file.py: -------------------------------------------------------------------------------- 1 | 2 | import numpy as np 3 | import tensorflow as tf 4 | 5 | 6 | from scipy.ndimage.filters import gaussian_filter 7 | from .PSFInterface_file import PSFInterface 8 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 9 | from ..loss_functions import mse_real_4pi 10 | from .. import utilities as im 11 | from .. import imagetools as nip 12 | 13 | class PSFVolumeBased4pi(PSFInterface): 14 | def __init__(self, max_iter: int=None,options=None) -> None: 15 | 16 | self.parameters = None 17 | self.updateflag = None 18 | self.data = None 19 | self.Zphase = None 20 | self.zT = None 21 | self.bead_kernel = None 22 | self.default_loss_func = mse_real_4pi 23 | self.options = options 24 | if max_iter is None: 25 | self.max_iter = 10 26 | else: 27 | self.max_iter = max_iter 28 | 29 | 30 | def calc_initials(self, data: PreprocessedImageDataInterface,start_time=None): 31 | """ 32 | Provides initial values for the optimizable varibales for the fitter class. 33 | """ 34 | self.data = data 35 | _, rois, _, _ = self.data.get_image_data() # TODO: check if file_idx are returned at all 36 | N = rois.shape[0] 37 | Nz = rois.shape[-3] 38 | I_data, A_data, _, init_phi = self.psf2IAB(rois) 39 | #init_phi = np.reshape(init_phi,(I_data.shape[0],1,1,1)) 40 | init_phi = np.zeros((I_data.shape[0],1,1,1)) 41 | init_positions = np.zeros([I_data.shape[0],len(I_data.shape)-1]).astype(np.float32) 42 | init_backgrounds = np.min(gaussian_filter(I_data, [0, 2, 2, 2]), axis=(-3, -2, -1), keepdims=True) 43 | init_intensities = np.sum(I_data - init_backgrounds, axis=(-2, -1), keepdims=True) 44 | init_intensities = np.mean(init_intensities,axis=1,keepdims=True) 45 | self.gen_bead_kernel(isVolume=True) 46 | 47 | self.zT = self.data.zT 48 | #self.weight = np.array([np.quantile(init_intensities,0.1), 20, 0.1, 0.1],dtype=np.float32) 49 | #weight = [1e4,20] + list(np.array([0.3,0.2])/np.median(init_intensities)*2e4) 50 | init_backgrounds[init_backgrounds<0.1] = 0.1 51 | bgmean = np.median(init_backgrounds) 52 | wI = np.lib.scimath.sqrt(np.median(init_intensities)) 53 | weight = [100*wI,bgmean] + list(np.array([1,1])*40/wI) 54 | self.weight = np.array(weight,dtype=np.float32) 55 | I1 = np.zeros(I_data[0].shape,dtype=np.float32)+0.002 / self.weight[3] 56 | A1 = np.ones(I1.shape, dtype=np.float32)*(1+1j)*0.002/2/np.sqrt(2)/self.weight[3] 57 | phase_dm = self.options.fpi.phase_dm 58 | phase = np.reshape(np.array(phase_dm)*-1,(len(phase_dm),1,1,1,1)).astype(np.float32) 59 | 60 | self.calpupilfield('scalar',Nz) 61 | self.Zphase = (np.linspace(-Nz/2+0.5,Nz/2-0.5,Nz,dtype=np.float32).reshape(Nz,1,1))*2*np.pi 62 | 63 | init_backgrounds = np.ones((N,1,1,1),dtype = np.float32)*np.median(init_backgrounds,axis=0, keepdims=True) / self.weight[1] 64 | 65 | gxy = np.zeros((N,2),dtype=np.float32) 66 | gI = np.ones((N,Nz,1,1),dtype = np.float32)*init_intensities 67 | self.varinfo = [dict(type='Nfit',id=0), 68 | dict(type='Nfit',id=0), 69 | dict(type='Nfit',id=0), 70 | dict(type='Nfit',id=0), 71 | dict(type='shared'), 72 | dict(type='shared'), 73 | dict(type='shared'), 74 | dict(type='shared'), 75 | dict(type='Nfit',id=0)] 76 | 77 | if self.options.model.var_photon: 78 | init_Intensity = gI/self.weight[0] 79 | else: 80 | init_Intensity = init_intensities / self.weight[0] 81 | 82 | return [init_positions.astype(np.float32), 83 | init_backgrounds.astype(np.float32), 84 | init_Intensity.astype(np.float32), 85 | init_phi.astype(np.float32), 86 | I1.astype(np.float32), 87 | np.real(A1).astype(np.float32), 88 | np.imag(A1).astype(np.float32), 89 | phase, 90 | gxy], start_time 91 | 92 | 93 | def calc_forward_images(self, variables): 94 | """ 95 | Calculate forward images from the current guess of the variables. 96 | """ 97 | 98 | 99 | pos = variables[0] 100 | bg = variables[1] 101 | intensity_abs = variables[2]*self.weight[0] 102 | intensity_phase = tf.complex(tf.math.cos(variables[3]),tf.math.sin(variables[3])) 103 | I_model = variables[4]*self.weight[3] 104 | A_model = tf.complex(variables[5],variables[6])*self.weight[3] 105 | phase0 = tf.complex(tf.math.cos(variables[7]), tf.math.sin(variables[7])) 106 | Zphase = self.Zphase/self.zT 107 | zphase = tf.complex(tf.math.cos(Zphase),tf.math.sin(Zphase)) 108 | 109 | 110 | I_model = tf.complex(I_model,0.0) 111 | I_otfs = im.fft3d(I_model)*tf.complex(intensity_abs*0.0+1.0,0.0) 112 | pos = tf.complex(tf.reshape(pos,pos.shape+(1,1,1)),0.0) 113 | I_res = im.ifft3d(I_otfs*self.phaseRamp(pos)) 114 | I_res = tf.math.real(I_res) 115 | 116 | A_otfs = im.fft3d(A_model*zphase)*intensity_phase 117 | A_res = im.ifft3d(A_otfs*self.phaseRamp(pos)) 118 | 119 | psf0 = (I_res)*tf.math.abs(phase0) + tf.math.real(A_res*phase0)*2 120 | psf_otfs = im.fft3d(tf.complex(psf0,0.0))*tf.expand_dims(tf.expand_dims(self.bead_kernel,axis=0),axis=0) 121 | psfmodel = tf.math.real(im.ifft3d(psf_otfs)) 122 | 123 | if self.options.model.estimate_drift: 124 | gxy = variables[8]*self.weight[2] 125 | psf_shift = self.applyDrfit(psfmodel,gxy) 126 | psf_shift = psf_shift* intensity_abs + bg*self.weight[1] 127 | forward_images = tf.transpose(psf_shift, perm = [1,0,2,3,4]) 128 | else: 129 | psfmodel = psfmodel* intensity_abs + bg*self.weight[1] 130 | forward_images = tf.transpose(psfmodel, perm = [1,0,2,3,4]) 131 | return forward_images 132 | 133 | 134 | 135 | def postprocess(self, variables): 136 | """ 137 | Applies postprocessing to the optimized variables. In this case calculates 138 | real positions in the image from the positions in the roi. Also, normalizes 139 | psf and adapts intensities and background accordingly. 140 | """ 141 | 142 | 143 | positions = variables[0] 144 | backgrounds = variables[1] * self.weight[1] 145 | intensities = variables[2] * np.exp(1j*variables[3]) * self.weight[0] 146 | I_model = variables[4]*self.weight[3] 147 | A_model = (variables[5] + 1j*variables[6])*self.weight[3] 148 | phase = variables[7] 149 | gxy = variables[8]*self.weight[2] 150 | z_center = I_model.shape[-3] // 2 151 | 152 | # calculate global positions in images since positions variable just represents the positions in the rois 153 | images, _, centers, _ = self.data.get_image_data() 154 | 155 | centers_with_z = np.concatenate((np.full((centers.shape[0], 1), z_center), centers), axis=1) 156 | global_positions = centers_with_z - positions 157 | return [global_positions, 158 | backgrounds, 159 | intensities, 160 | I_model, 161 | A_model, 162 | phase, 163 | gxy, 164 | np.flip(I_model,axis=-3), 165 | np.flip(A_model,axis=-3), 166 | variables] 167 | 168 | 169 | def res2dict(self,res): 170 | res_dict = dict(pos=res[0], 171 | bg=np.squeeze(res[1]), 172 | intensity=np.squeeze(res[2]), 173 | I_model=res[3], 174 | A_model=res[4], 175 | phase_dm = np.squeeze(res[5]), 176 | drift_rate=res[6], 177 | I_model_reverse=res[7], 178 | A_model_reverse=res[8], 179 | offset=np.min(res[3]-2*np.abs(res[4])), 180 | Zphase = self.Zphase, 181 | cor_all = self.data.centers_all, 182 | cor = self.data.centers) 183 | 184 | return res_dict -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFVolumeBased_file.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from scipy.ndimage.filters import gaussian_filter 4 | from .PSFInterface_file import PSFInterface 5 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 6 | from ..loss_functions import mse_real 7 | from .. import utilities as im 8 | from .. import imagetools as nip 9 | 10 | class PSFVolumeBased(PSFInterface): 11 | """ 12 | PSF class that uses a 3D volume to describe the PSF. 13 | Should only be used with single-channel data. 14 | """ 15 | def __init__(self, options = None) -> None: 16 | self.parameters = None 17 | self.data = None 18 | self.bead_kernel = None 19 | self.options = options 20 | self.default_loss_func = mse_real 21 | return 22 | 23 | def calc_initials(self, data: PreprocessedImageDataInterface, start_time=None): 24 | """ 25 | Provides initial values for the optimizable varibales for the fitter class. 26 | """ 27 | self.data = data 28 | _, rois, _, _ = self.data.get_image_data() 29 | 30 | init_positions = np.zeros((rois.shape[0], len(rois.shape)-1)) 31 | init_backgrounds = np.array(np.min(gaussian_filter(rois, [0, 2, 2, 2]), axis=(-3, -2, -1), keepdims=True)) 32 | init_intensities = np.sum(rois - init_backgrounds, axis=(-2, -1), keepdims=True) 33 | init_intensities = np.mean(init_intensities,axis=1,keepdims=True) 34 | 35 | N = rois.shape[0] 36 | Nz = rois.shape[-3] 37 | self.calpupilfield('scalar',Nz) 38 | self.gen_bead_kernel(isVolume=True) 39 | 40 | #self.weight = np.array([np.median(init_intensities)*1, 10, 0.1, 0.1],dtype=np.float32) 41 | #weight = [5e4,20] + list(np.array([0.1,0.2])/np.median(init_intensities)*2e4) 42 | init_backgrounds[init_backgrounds<0.1] = 0.1 43 | bgmean = np.median(init_backgrounds) 44 | wI = np.lib.scimath.sqrt(np.median(init_intensities)) 45 | weight = [1000*wI,bgmean] + list(np.array([1,1])*40/wI) 46 | self.weight = np.array(weight,dtype=np.float32) 47 | init_psf_model = np.zeros(rois[0].shape)+0.002/self.weight[3] 48 | init_backgrounds = np.ones((N,1,1,1),dtype = np.float32)*np.median(init_backgrounds,axis=0, keepdims=True) / self.weight[1] 49 | gxy = np.zeros((N,2),dtype=np.float32) 50 | gI = np.ones((N,Nz,1,1),dtype = np.float32)*init_intensities 51 | #gI = np.ones((N,Nz,1,1),dtype = np.float32)*np.mean(init_intensities,keepdims=True) 52 | self.varinfo = [dict(type='Nfit',id=0), 53 | dict(type='Nfit',id=0), 54 | dict(type='Nfit',id=0), 55 | dict(type='shared'), 56 | dict(type='Nfit',id=0)] 57 | 58 | if self.options.model.var_photon: 59 | init_Intensity = gI/self.weight[0] 60 | else: 61 | init_Intensity = init_intensities / self.weight[0] 62 | return [init_positions.astype(np.float32), 63 | init_backgrounds.astype(np.float32), 64 | init_Intensity.astype(np.float32), 65 | init_psf_model.astype(np.float32), 66 | gxy],start_time 67 | 68 | def calc_forward_images(self, variables): 69 | """ 70 | Calculate forward images from the current guess of the variables. 71 | Shifting is done by Fourier transform and applying a phase ramp. 72 | """ 73 | 74 | pos, backgrounds, intensities, I_model, gxy = variables 75 | 76 | I_model = tf.complex(I_model,0.0) 77 | I_otfs = im.fft3d(I_model*self.weight[3])*self.bead_kernel #*tf.complex(intensities*self.weight[0],0.0) 78 | pos = tf.complex(tf.reshape(pos,pos.shape+(1,1,1)),0.0) 79 | I_res = im.ifft3d(I_otfs*self.phaseRamp(pos)) 80 | 81 | psf_fit = tf.math.real(I_res) 82 | if self.options.model.estimate_drift: 83 | gxy = gxy*self.weight[2] 84 | psf_shift = self.applyDrfit(psf_fit,gxy) 85 | forward_images = psf_shift*intensities*self.weight[0] + backgrounds*self.weight[1] 86 | else: 87 | forward_images = psf_fit*intensities*self.weight[0] + backgrounds*self.weight[1] 88 | 89 | return forward_images 90 | 91 | def postprocess(self, variables): 92 | """ 93 | Applies postprocessing to the optimized variables. In this case calculates 94 | real positions in the image from the positions in the roi. Also, normalizes 95 | psf and adapts intensities and background accordingly. 96 | """ 97 | positions, backgrounds, intensities, I_model,gxy = variables 98 | 99 | I_model = I_model*self.weight[3] 100 | I_model = I_model.astype(np.complex64) 101 | I_model_bead = np.real(im.ifft3d(im.fft3d(I_model)*self.bead_kernel)) 102 | I_model = I_model.astype(np.float32) 103 | z_center = (I_model.shape[-3] - 1) // 2 104 | images, _, centers, _ = self.data.get_image_data() 105 | centers_with_z = np.concatenate((np.full((centers.shape[0], 1), z_center), centers[:,-2:]), axis=1) 106 | 107 | global_positions = centers_with_z - positions 108 | 109 | return [global_positions.astype(np.float32), 110 | backgrounds*self.weight[1], # already correct 111 | intensities*self.weight[0], # already correct 112 | I_model_bead, 113 | I_model, 114 | gxy*self.weight[2], 115 | np.flip(I_model,axis=-3), 116 | variables] # already correct 117 | 118 | def res2dict(self,res): 119 | res_dict = dict(pos=res[0], 120 | I_model_bead =res[3], 121 | I_model = res[4], 122 | bg=np.squeeze(res[1]), 123 | intensity=np.squeeze(res[2]), 124 | drift_rate=res[5], 125 | I_model_reverse = res[6], 126 | offset=np.min(res[4]), 127 | cor_all = self.data.centers_all, 128 | cor = self.data.centers) 129 | 130 | return res_dict -------------------------------------------------------------------------------- /psflearning/learning/psfs/PSFZernikeBased_file.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy as sp 3 | import tensorflow as tf 4 | from scipy.ndimage.filters import gaussian_filter 5 | from .PSFInterface_file import PSFInterface 6 | from ..data_representation.PreprocessedImageDataInterface_file import PreprocessedImageDataInterface 7 | from ..loss_functions import mse_real_zernike 8 | from .. import utilities as im 9 | from .. import imagetools as nip 10 | 11 | class PSFZernikeBased(PSFInterface): 12 | """ 13 | PSF class that uses a 3D volume to describe the PSF. 14 | Should only be used with single-channel data. 15 | """ 16 | def __init__(self,options=None) -> None: 17 | self.parameters = None 18 | self.data = None 19 | self.Zphase = None 20 | self.zT = None 21 | self.bead_kernel = None 22 | self.options = options 23 | self.initpupil = None 24 | self.defocus = np.float32(0) 25 | self.default_loss_func = mse_real_zernike 26 | self.psftype = 'scalar' 27 | return 28 | 29 | def calc_initials(self, data: PreprocessedImageDataInterface, start_time=None): 30 | """ 31 | Provides initial values for the optimizable varibales for the fitter class. 32 | """ 33 | self.data = data 34 | _, rois, _, _ = self.data.get_image_data() 35 | 36 | options = self.options 37 | if options.model.with_IMM: 38 | init_positions = np.zeros((rois.shape[0], len(rois.shape))) 39 | else: 40 | init_positions = np.zeros((rois.shape[0], len(rois.shape)-1)) 41 | 42 | init_backgrounds = np.array(np.min(gaussian_filter(rois, [0, 2, 2, 2]), axis=(-3, -2, -1), keepdims=True)) 43 | init_intensitiesL = np.sum(rois - init_backgrounds, axis=(-2, -1), keepdims=True) 44 | init_intensities = np.mean(init_intensitiesL,axis=1,keepdims=True) 45 | 46 | self.gen_bead_kernel() 47 | N = rois.shape[0] 48 | Nz = rois.shape[-3] 49 | Lx = rois.shape[-1] 50 | 51 | if self.psftype=='vector': 52 | self.calpupilfield('vector') 53 | else: 54 | self.calpupilfield('scalar') 55 | if options.model.const_pupilmag: 56 | self.n_max_mag = 0 57 | else: 58 | self.n_max_mag = 100 59 | 60 | 61 | #self.weight = np.array([np.median(init_intensities)*10, 100, 0.1, 0.2, 0.2],dtype=np.float32) 62 | #weight = [1e5,10] + list(np.array([0.1,0.2,0.2])/np.median(init_intensities)*2e4) 63 | init_backgrounds[init_backgrounds<0.1] = 0.1 64 | bgmean = np.median(init_backgrounds) 65 | wI = np.lib.scimath.sqrt(np.median(init_intensities)) 66 | weight = [wI*100,bgmean] + list(np.array([1,0.5,0.5])/wI*40) 67 | self.weight = np.array(weight,dtype=np.float32) 68 | sigma = np.ones((2,))*self.options.model.blur_sigma*np.pi 69 | self.init_sigma = sigma 70 | init_Zcoeff = np.zeros((2,self.Zk.shape[0],1,1)) 71 | init_Zcoeff[:,0,0,0] = [1,0]/self.weight[4] 72 | init_backgrounds = np.ones((N,1,1,1),dtype = np.float32)*np.median(init_backgrounds,axis=0, keepdims=True) / self.weight[1] 73 | gxy = np.zeros((N,2),dtype=np.float32) 74 | gI = np.ones((N,Nz,1,1),dtype = np.float32)*init_intensities 75 | 76 | self.varinfo = [dict(type='Nfit',id=0), 77 | dict(type='Nfit',id=0), 78 | dict(type='Nfit',id=0), 79 | dict(type='shared'), 80 | dict(type='shared'), 81 | dict(type='Nfit',id=0)] 82 | 83 | if options.model.var_photon: 84 | init_Intensity = gI/self.weight[0] 85 | else: 86 | init_Intensity = init_intensities / self.weight[0] 87 | return [init_positions.astype(np.float32), 88 | init_backgrounds.astype(np.float32), 89 | init_Intensity.astype(np.float32), 90 | init_Zcoeff.astype(np.float32), 91 | sigma.astype(np.float32), 92 | gxy], start_time 93 | 94 | def calc_forward_images(self, variables): 95 | """ 96 | Calculate forward images from the current guess of the variables. 97 | Shifting is done by Fourier transform and applying a phase ramp. 98 | """ 99 | 100 | pos, backgrounds, intensities, Zcoeff, sigma, gxy = variables 101 | c1 = self.spherical_terms 102 | n_max = self.n_max_mag 103 | Nk = np.min(((n_max+1)*(n_max+2)//2,self.Zk.shape[0])) 104 | mask = c13: 124 | phixy = 1j*2*np.pi*self.ky*pos[:,2]+1j*2*np.pi*self.kx*pos[:,3] 125 | phiz = 1j*2*np.pi*(self.kz_med*pos[:,1]-self.kz*(pos[:,0]+self.Zrange)) 126 | else: 127 | phixy = 1j*2*np.pi*self.ky*pos[:,1]+1j*2*np.pi*self.kx*pos[:,2] 128 | 129 | if self.psftype == 'vector': 130 | I_res = 0.0 131 | for h in self.dipole_field: 132 | PupilFunction = pupil*tf.exp(phiz+phixy)*h 133 | psfA = im.cztfunc1(PupilFunction,self.paramxy) 134 | I_res += psfA*tf.math.conj(psfA)*self.normf 135 | else: 136 | PupilFunction = pupil*tf.exp(phiz+phixy) 137 | I_res = im.cztfunc1(PupilFunction,self.paramxy) 138 | I_res = I_res*tf.math.conj(I_res)*self.normf 139 | bin = self.options.model.bin 140 | if not self.options.model.var_blur: 141 | sigma = self.init_sigma 142 | 143 | #sigma = sigma*self.weight[5] 144 | filter2 = tf.exp(-2*sigma[1]*sigma[1]*self.kspace_x-2*sigma[0]*sigma[0]*self.kspace_y) 145 | filter2 = tf.complex(filter2/tf.reduce_max(filter2),0.0) 146 | I_blur = im.ifft3d(im.fft3d(I_res)*self.bead_kernel*filter2) 147 | I_blur = tf.expand_dims(tf.math.real(I_blur),axis=-1) 148 | kernel = np.ones((1,bin,bin,1,1),dtype=np.float32) 149 | I_blur_bin = tf.nn.convolution(I_blur,kernel,strides=(1,1,bin,bin,1),padding='SAME',data_format='NDHWC') 150 | 151 | psf_fit = I_blur_bin[...,0] 152 | st = (self.bead_kernel.shape[0]-self.data.rois[0].shape[-3])//2 153 | psf_fit = psf_fit[:,st:Nz-st] 154 | 155 | if self.options.model.estimate_drift: 156 | gxy = gxy*self.weight[2] 157 | psf_shift = self.applyDrfit(psf_fit,gxy) 158 | forward_images = psf_shift*intensities*self.weight[0] + backgrounds*self.weight[1] 159 | else: 160 | forward_images = psf_fit*intensities*self.weight[0] + backgrounds*self.weight[1] 161 | 162 | return forward_images 163 | 164 | 165 | def genpsfmodel(self,sigma,Zcoeff=None,pupil=None, addbead=False): 166 | if pupil is None: 167 | pupil_mag = tf.reduce_sum(self.Zk*Zcoeff[0],axis=0) 168 | pupil_mag = tf.math.maximum(pupil_mag,0) 169 | pupil_phase = tf.reduce_sum(self.Zk*Zcoeff[1],axis=0) 170 | pupil = tf.complex(pupil_mag*tf.math.cos(pupil_phase),pupil_mag*tf.math.sin(pupil_phase))*self.aperture*self.apoid 171 | 172 | phiz = -1j*2*np.pi*self.kz*(self.Zrange+self.defocus) 173 | if self.psftype == 'vector': 174 | I_res = 0.0 175 | for h in self.dipole_field: 176 | PupilFunction = pupil*tf.exp(phiz)*h 177 | psfA = im.cztfunc1(PupilFunction,self.paramxy) 178 | I_res += psfA*tf.math.conj(psfA)*self.normf 179 | else: 180 | PupilFunction = pupil*tf.exp(phiz) 181 | I_res = im.cztfunc1(PupilFunction,self.paramxy) 182 | I_res = I_res*tf.math.conj(I_res)*self.normf 183 | 184 | bin = self.options.model.bin 185 | filter2 = tf.exp(-2*sigma[1]*sigma[1]*self.kspace_x-2*sigma[0]*sigma[0]*self.kspace_y) 186 | filter2 = tf.complex(filter2/tf.reduce_max(filter2),0.0) 187 | if addbead: 188 | I_blur = np.real(im.ifft3d(im.fft3d(I_res)*filter2*self.bead_kernel)) 189 | else: 190 | I_blur = np.real(im.ifft3d(im.fft3d(I_res)*filter2)) 191 | 192 | I_blur = tf.expand_dims(tf.math.real(I_blur),axis=-1) 193 | kernel = np.ones((bin,bin,1,1),dtype=np.float32) 194 | I_model = tf.nn.convolution(I_blur,kernel,strides=(1,bin,bin,1),padding='SAME',data_format='NHWC') 195 | I_model = I_model[...,0] 196 | 197 | return I_model, pupil 198 | 199 | def postprocess(self, variables): 200 | """ 201 | Applies postprocessing to the optimized variables. In this case calculates 202 | real positions in the image from the positions in the roi. Also, normalizes 203 | psf and adapts intensities and background accordingly. 204 | """ 205 | positions, backgrounds, intensities, Zcoeff,sigma,gxy = variables 206 | z_center = (self.Zrange.shape[-3] - 1) // 2 207 | Zcoeff[0]=Zcoeff[0]*self.weight[4] 208 | Zcoeff[1]=Zcoeff[1]*self.weight[3] 209 | #sigma = sigma*self.weight[5] 210 | bin = self.options.model.bin 211 | positions[:,1:] = positions[:,1:]/bin 212 | if self.initpupil is not None: 213 | pupil = self.initpupil 214 | I_model, _ = self.genpsfmodel(sigma,pupil=pupil) 215 | I_model_bead, _ = self.genpsfmodel(sigma,pupil=pupil,addbead=True) 216 | else: 217 | I_model, pupil = self.genpsfmodel(sigma,Zcoeff=Zcoeff) 218 | I_model_bead,_ = self.genpsfmodel(sigma,Zcoeff=Zcoeff,addbead=True) 219 | 220 | images, _, centers, _ = self.data.get_image_data() 221 | original_shape = images.shape[-3:] 222 | Nbead = centers.shape[0] 223 | if positions.shape[1]>3: 224 | global_positions = np.swapaxes(np.vstack((positions[:,0]+z_center,positions[:,1],centers[:,-2]-positions[:,-2],centers[:,-1]-positions[:,-1])),1,0) 225 | else: 226 | global_positions = np.swapaxes(np.vstack((positions[:,0]+z_center,centers[:,-2]-positions[:,-2],centers[:,-1]-positions[:,-1])),1,0) 227 | 228 | return [global_positions.astype(np.float32), 229 | backgrounds*self.weight[1], # already correct 230 | intensities*self.weight[0], # already correct 231 | I_model_bead, 232 | I_model, 233 | np.complex64(pupil), 234 | Zcoeff, 235 | sigma, 236 | gxy*self.weight[2], 237 | np.flip(I_model,axis=-3), 238 | variables] # already correct 239 | 240 | def res2dict(self,res): 241 | res_dict = dict(pos=res[0], 242 | bg=np.squeeze(res[1]), 243 | intensity=np.squeeze(res[2]), 244 | I_model_bead = res[3], 245 | I_model = res[4], 246 | pupil = res[5], 247 | zernike_coeff = np.squeeze(res[6]), 248 | sigma = np.squeeze(res[7])/np.pi, 249 | drift_rate=res[8], 250 | I_model_reverse = res[9], 251 | offset=np.min(res[4]), 252 | zernike_polynomial = self.Zk, 253 | apodization = self.apoid, 254 | cor_all = self.data.centers_all, 255 | cor = self.data.centers) 256 | 257 | return res_dict -------------------------------------------------------------------------------- /psflearning/learning/psfs/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/psflearning/learning/psfs/__init__.py -------------------------------------------------------------------------------- /psflearning/learning/utilities.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | Copyright (c) 2022 Ries Lab, EMBL, Heidelberg, Germany 4 | All rights reserved Heintzmann Lab, Friedrich-Schiller-University Jena, Germany 5 | 6 | @author: Rainer Heintzmann, Sheng Liu, Jonas Hellgoth 7 | """ 8 | 9 | import tensorflow as tf 10 | import numpy as np 11 | import scipy as sp 12 | from math import factorial 13 | from . import imagetools as nip 14 | import numbers 15 | from scipy import ndimage 16 | import scipy.fft as fft 17 | defaultTFDataType="float32" 18 | defaultTFCpxDataType="complex64" 19 | #%% 20 | # The functions below are Tensorflow now 21 | 22 | def fft3d(tfin): 23 | return tf.signal.fftshift(tf.signal.fft3d(tf.signal.fftshift(tfin,axes=[-1,-2,-3])),axes=[-1,-2,-3]) 24 | 25 | def ifft3d(tfin): 26 | return tf.signal.ifftshift(tf.signal.ifft3d(tf.signal.ifftshift(tfin,axes=[-1,-2,-3])),axes=[-1,-2,-3]) 27 | 28 | def fft2d(tfin): 29 | return tf.signal.fftshift(tf.signal.fft2d(tf.signal.fftshift(tfin,axes=[-1,-2])),axes=[-1,-2]) 30 | 31 | def ifft2d(tfin): 32 | return tf.signal.ifftshift(tf.signal.ifft2d(tf.signal.ifftshift(tfin,axes=[-1,-2])),axes=[-1,-2]) 33 | 34 | 35 | 36 | def psf2cspline_np(psf): 37 | # calculate A 38 | A = np.zeros((64, 64)) 39 | for i in range(1, 5): 40 | dx = (i-1)/3 41 | for j in range(1, 5): 42 | dy = (j-1)/3 43 | for k in range(1, 5): 44 | dz = (k-1)/3 45 | for l in range(1, 5): 46 | for m in range(1, 5): 47 | for n in range(1, 5): 48 | A[(i-1)*16+(j-1)*4+k - 1, (l-1)*16+(m-1)*4+n - 1] = dx**(l-1) * dy**(m-1) * dz**(n-1) 49 | 50 | # upsample psf with factor of 3 51 | psf_up = ndimage.zoom(psf, 3.0, mode='grid-constant', grid_mode=True)[1:-1, 1:-1, 1:-1] 52 | A = np.float32(A) 53 | coeff = calsplinecoeff(A,psf,psf_up) 54 | return coeff 55 | 56 | 57 | def calsplinecoeff(A,psf,psf_up): 58 | # calculate cspline coefficients 59 | coeff = np.zeros((64, psf.shape[0]-1, psf.shape[1]-1, psf.shape[2]-1)) 60 | for i in range(coeff.shape[1]): 61 | for j in range(coeff.shape[2]): 62 | for k in range(coeff.shape[3]): 63 | temp = psf_up[i*3 : 3*(i+1)+1, j*3 : 3*(j+1)+1, k*3 : 3*(k+1)+1] 64 | #x = sp.linalg.solve(A, temp.reshape(64)) 65 | x = sp.linalg.solve(A,temp.flatten()) 66 | coeff[:, i, j, k] = x 67 | 68 | return coeff 69 | 70 | def nl2noll(n,l): 71 | mm = abs(l) 72 | j = n * (n + 1) / 2 + 1 + max(0, mm - 1) 73 | if ((l > 0) & (np.mod(n, 4) >= 2)) | ((l < 0) & (np.mod(n, 4) <= 1)): 74 | j = j + 1 75 | 76 | return np.int32(j) 77 | 78 | def noll2nl(j): 79 | n = np.ceil((-3 + np.sqrt(1 + 8*j)) / 2) 80 | l = j - n * (n + 1) / 2 - 1 81 | if np.mod(n, 2) != np.mod(l, 2): 82 | l = l + 1 83 | 84 | if np.mod(j, 2) == 1: 85 | l= -l 86 | 87 | return np.int32(n),np.int32(l) 88 | 89 | def radialpoly(n,m,rho): 90 | if m==0: 91 | g = np.sqrt(n+1) 92 | else: 93 | g = np.sqrt(2*n+2) 94 | r = np.zeros(rho.shape) 95 | for k in range(0,(n-m)//2+1): 96 | coeff = g*((-1)**k)*factorial(n-k)/factorial(k)/factorial((n+m)//2-k)/factorial((n-m)//2-k) 97 | p = rho**(n-2*k) 98 | r += coeff*p 99 | 100 | return r 101 | 102 | def genZern1(n_max,xsz): 103 | Nk = (n_max+1)*(n_max+2)//2 104 | Z = np.ones((Nk,xsz,xsz)) 105 | pkx = 2/xsz 106 | xrange = np.linspace(-xsz/2+0.5,xsz/2-0.5,xsz) 107 | [xx,yy] = np.meshgrid(xrange,xrange) 108 | rho = np.lib.scimath.sqrt((xx*pkx)**2+(yy*pkx)**2) 109 | phi = np.arctan2(yy,xx) 110 | 111 | for j in range(0,Nk): 112 | [n,l] = noll2nl(j+1) 113 | m = np.abs(l) 114 | r = radialpoly(n,m,rho) 115 | if l<0: 116 | Z[j] = r*np.sin(phi*m) 117 | else: 118 | Z[j] = r*np.cos(phi*m) 119 | return Z 120 | 121 | 122 | 123 | 124 | def prechirpz1(kpixelsize,pixelsize_x,pixelsize_y,N,M): 125 | krange = np.linspace(-N/2+0.5,N/2-0.5,N,dtype=np.float32) 126 | [xxK,yyK] = np.meshgrid(krange,krange) 127 | xrange = np.linspace(-M/2+0.5,M/2-0.5,M,dtype=np.float32) 128 | [xxR,yyR] = np.meshgrid(xrange,xrange) 129 | a = 1j*np.pi*kpixelsize 130 | A = np.exp(a*(pixelsize_x*xxK*xxK+pixelsize_y*yyK*yyK)) 131 | C = np.exp(a*(pixelsize_x*xxR*xxR+pixelsize_y*yyR*yyR)) 132 | 133 | brange = np.linspace(-(N+M)/2+1,(N+M)/2-1,N+M-1,dtype=np.float32) 134 | [xxB,yyB] = np.meshgrid(brange,brange) 135 | B = np.exp(-a*(pixelsize_x*xxB*xxB+pixelsize_y*yyB*yyB)) 136 | Bh = tf.signal.fft2d(B) 137 | 138 | return A,Bh,C 139 | 140 | 141 | def cztfunc1(datain,param): 142 | A = param[0] 143 | Bh = param[1] 144 | C = param[2] 145 | N = A.shape[0] 146 | L = Bh.shape[0] 147 | M = C.shape[0] 148 | 149 | Apad = tf.concat((A*datain/N,tf.zeros(datain.shape[0:-1]+(L-N),tf.complex64)),axis=-1) 150 | Apad = tf.concat((Apad,tf.zeros(Apad.shape[0:-2]+(L-N,Apad.shape[-1]),tf.complex64)),axis=-2) 151 | Ah = tf.signal.fft2d(Apad) 152 | cztout = tf.signal.ifft2d(Ah*Bh/L) 153 | dataout = C*cztout[...,-M:,-M:] 154 | 155 | return dataout 156 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | 3 | with open("README.md", "r") as f: 4 | long_descrition = f.read() 5 | 6 | setup( 7 | name='psflearning', 8 | version='0.0.1', 9 | description='A versatile and modular toolbox that uses inverse modelling to extract accurate PSF models for most SMLM imaging modalities from bead and single-molecule data.', 10 | long_descrition=long_descrition, 11 | long_descrition_content_type="text/markdown", 12 | 13 | url='https://github.com/ries-lab/uiPSF.git', 14 | author='Sheng Liu,Jonas Hellgoth, Jianwei Chen', 15 | author_email='shengliu@unm.edu, jonas.hellgoth@embl.de, 12149038@mail.sustech.edu.cn', 16 | 17 | license='LICENSE.txt', # TODO: choose a license and put it in license.txt --> https://choosealicense.com/ 18 | classifiers=[ # availabel on https://pypi.org/classifiers/ 19 | "Development Status :: 2 - Pre-Alpha", 20 | "Environment :: GPU :: NVIDIA CUDA :: 11.2", 21 | "Intended Audience :: Developers", 22 | "Intended Audience :: Science/Research", 23 | # TODO: add license here 24 | "Natural Language :: English", 25 | "Operating System :: Microsoft :: Windows", 26 | "Operating System :: POSIX :: Linux", 27 | "Programming Language :: Python :: 3.7", 28 | "Topic :: Scientific/Engineering :: Bio-Informatics", 29 | "Topic :: Scientific/Engineering :: Image Processing", 30 | "Topic :: Scientific/Engineering :: Physics" 31 | ], 32 | 33 | 34 | packages=['psflearning'], 35 | python_requires='>=3.7', 36 | install_requires=[ 37 | "numpy", 38 | "scipy", 39 | "matplotlib", 40 | "tensorflow==2.9.1", 41 | "tensorflow-probability==0.17", 42 | "scikit-image", 43 | "tqdm", 44 | "czifile", 45 | "hdfdict", 46 | "dotted_dict", 47 | "omegaconf", 48 | "ipykernel" 49 | 50 | ] 51 | ) -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/CPUmleFit_LM.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/CPUmleFit_LM.dll -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/CPUmleFit_LM_4Pi.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/CPUmleFit_LM_4Pi.dll -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/CPUmleFit_LM_MultiChannel.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/CPUmleFit_LM_MultiChannel.dll -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/GPUmleFit_LM.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/GPUmleFit_LM.dll -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/GPUmleFit_LM_4Pi.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/GPUmleFit_LM_4Pi.dll -------------------------------------------------------------------------------- /source/mleFit_LM_DLL/GPUmleFit_LM_MultiChannel.dll: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_DLL/GPUmleFit_LM_MultiChannel.dll -------------------------------------------------------------------------------- /source/mleFit_LM_dylib/libCPUmleFit_LM.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_dylib/libCPUmleFit_LM.dylib -------------------------------------------------------------------------------- /source/mleFit_LM_dylib/libCPUmleFit_LM_4Pi.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_dylib/libCPUmleFit_LM_4Pi.dylib -------------------------------------------------------------------------------- /source/mleFit_LM_dylib/libCPUmleFit_LM_MultiChannel.dylib: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_dylib/libCPUmleFit_LM_MultiChannel.dylib -------------------------------------------------------------------------------- /source/mleFit_LM_so/libCPUmleFit_LM.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libCPUmleFit_LM.so -------------------------------------------------------------------------------- /source/mleFit_LM_so/libCPUmleFit_LM_4Pi.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libCPUmleFit_LM_4Pi.so -------------------------------------------------------------------------------- /source/mleFit_LM_so/libCPUmleFit_LM_MultiChannel.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libCPUmleFit_LM_MultiChannel.so -------------------------------------------------------------------------------- /source/mleFit_LM_so/libGPUmleFit_LM.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libGPUmleFit_LM.so -------------------------------------------------------------------------------- /source/mleFit_LM_so/libGPUmleFit_LM_4Pi.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libGPUmleFit_LM_4Pi.so -------------------------------------------------------------------------------- /source/mleFit_LM_so/libGPUmleFit_LM_MultiChannel.so: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/source/mleFit_LM_so/libGPUmleFit_LM_MultiChannel.so -------------------------------------------------------------------------------- /test/unit/io/test_param.py: -------------------------------------------------------------------------------- 1 | from pathlib import Path 2 | from omegaconf import OmegaConf 3 | 4 | from psflearning import io 5 | 6 | 7 | def test_param(tmpdir): 8 | p = Path(tmpdir) / "dummy_conf.yaml" 9 | 10 | cfg_dict = {"a": 42} 11 | cfg = OmegaConf.create(cfg_dict) 12 | 13 | OmegaConf.save(cfg, p) 14 | 15 | cfg_reloaded = io.param.load(p) 16 | assert cfg_reloaded == cfg 17 | -------------------------------------------------------------------------------- /tutorial/Tutorial for FD_aberrations.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/tutorial/Tutorial for FD_aberrations.pdf -------------------------------------------------------------------------------- /tutorial/Tutorial for fit_global_dualchannel.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/tutorial/Tutorial for fit_global_dualchannel.pdf -------------------------------------------------------------------------------- /tutorial/tutorial fit_4pi.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/tutorial/tutorial fit_4pi.pdf -------------------------------------------------------------------------------- /tutorial/tutorial for fit_fastsimple.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ries-lab/uiPSF/55669d47d45a35a20996a4a9c1c87680b6198d0b/tutorial/tutorial for fit_fastsimple.pdf --------------------------------------------------------------------------------