├── src ├── __init__.py └── cohere_core │ ├── __init__ │ ├── lib │ ├── __init__.py │ ├── check_libs.py │ ├── nplib.py │ ├── cplib.py │ └── torchlib.py │ ├── controller │ ├── __init__.py │ ├── AI_guess.py │ ├── op_flow.py │ └── features.py │ ├── data │ ├── __init__.py │ ├── standard_preprocess.py │ └── alien_tools.py │ └── utilities │ ├── __init__.py │ ├── ga_utils.py │ ├── config_errors_dict.py │ └── utils.py ├── docs ├── requirements.txt ├── source │ ├── api_standard_preprocess.rst │ ├── api_phasing.rst │ ├── configuration.rst │ ├── api_utils.rst │ ├── index.rst │ ├── config_prep.rst │ ├── api_cohere_ui.rst │ ├── about.rst │ ├── config_main.rst │ ├── conf.py │ ├── for_developers.rst │ ├── installation.rst │ ├── config_instr.rst │ ├── config_disp.rst │ ├── config_data.rst │ ├── config_mp.rst │ ├── define_alg_seq.rst │ ├── cohere_experiment.rst │ └── config_rec.rst ├── Makefile └── make.bat ├── .gitignore ├── install.sh ├── .gitmodules ├── install.bat ├── .readthedocs.yaml ├── pyproject.toml ├── LICENSE └── README.md /src/__init__.py: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /src/cohere_core/__init__: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/cohere_core/lib/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/cohere_core/controller/__init__.py: -------------------------------------------------------------------------------- 1 | from .phasing import * -------------------------------------------------------------------------------- /src/cohere_core/data/__init__.py: -------------------------------------------------------------------------------- 1 | from .standard_preprocess import * 2 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | sphinx_pyproject 2 | sphinx 3 | sphinx-rtd-theme 4 | -------------------------------------------------------------------------------- /src/cohere_core/utilities/__init__.py: -------------------------------------------------------------------------------- 1 | from .utils import * 2 | from .config_verifier import * 3 | from .dvc_utils import * 4 | from .ga_utils import * -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build/* 2 | docs/build/* 3 | dist/* 4 | core 5 | *.so 6 | *.o 7 | *.swp 8 | __pycache__ 9 | .idea/ 10 | *.egg 11 | *.egg-info 12 | .DS_Store 13 | -------------------------------------------------------------------------------- /docs/source/api_standard_preprocess.rst: -------------------------------------------------------------------------------- 1 | ======================= 2 | Standard Preprocess API 3 | ======================= 4 | .. automodule:: cohere_core.data.standard_preprocess 5 | :members: 6 | -------------------------------------------------------------------------------- /docs/source/api_phasing.rst: -------------------------------------------------------------------------------- 1 | =========== 2 | Phasing API 3 | =========== 4 | .. automodule:: cohere_core.controller.phasing 5 | :members: 6 | :exclude-members: Rec 7 | 8 | .. autoclass:: Rec 9 | :members: 10 | :special-members: __init__ -------------------------------------------------------------------------------- /docs/source/configuration.rst: -------------------------------------------------------------------------------- 1 | Configuration 2 | ============= 3 | 4 | .. toctree:: 5 | :maxdepth: 3 6 | :caption: Contents: 7 | 8 | config_main 9 | config_instr 10 | config_prep 11 | config_data 12 | config_rec 13 | config_disp 14 | config_mp -------------------------------------------------------------------------------- /install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | pip install cohere_core 4 | pip install cohere_ui 5 | pip install cohere_beamlines 6 | 7 | if [ "$#" -ne 0 ]; then 8 | if [ "$1" = "cupy" ]; then 9 | conda install cupy=12.2.0 -c conda-forge 10 | fi 11 | if [ "$1" = "torch" ]; then 12 | pip install torch torchvision torchaudio 13 | fi 14 | fi 15 | -------------------------------------------------------------------------------- /docs/source/api_utils.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | Utils API 3 | ========= 4 | config_verifier 5 | --------------- 6 | .. automodule:: cohere_core.utilities.config_verifier 7 | :members: 8 | 9 | utils 10 | ----- 11 | .. automodule:: cohere_core.utilities.utils 12 | :members: 13 | 14 | dvc_utils 15 | --------- 16 | .. automodule:: cohere_core.utilities.dvc_utils 17 | :members: -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "cohere-ui"] 2 | path = cohere-ui 3 | url = https://github.com/AdvancedPhotonSource/cohere-ui.git 4 | [submodule "cohere_examples"] 5 | path = cohere_examples 6 | url = https://github.com/AdvancedPhotonSource/cohere_examples 7 | [submodule "cohere_beamlines"] 8 | path = cohere_beamlines 9 | url = https://github.com/AdvancedPhotonSource/cohere_beamlines 10 | -------------------------------------------------------------------------------- /install.bat: -------------------------------------------------------------------------------- 1 | @echo off 2 | 3 | :: Install Python packages 4 | pip install cohere_core 5 | pip install cohere_ui 6 | pip install cohere_beamlines 7 | 8 | :: Check for arguments 9 | IF NOT "%~1"=="" ( 10 | IF "%~1"=="cupy" ( 11 | :: Install cupy via conda 12 | conda install cupy=12.2.0 -c conda-forge 13 | ) 14 | IF "%~1"=="torch" ( 15 | :: Install torch via pip 16 | pip install torch torchvision torchaudio 17 | ) 18 | ) 19 | -------------------------------------------------------------------------------- /docs/source/index.rst: -------------------------------------------------------------------------------- 1 | Welcome to cohere documentation! 2 | ================================ 3 | 4 | .. toctree:: 5 | :maxdepth: 2 6 | :caption: Contents: 7 | 8 | about 9 | installation 10 | define_alg_seq 11 | api_standard_preprocess 12 | api_phasing 13 | api_utils 14 | api_cohere_ui 15 | configuration 16 | cohere_experiment 17 | for_developers 18 | 19 | Indices and tables 20 | ================== 21 | 22 | * :ref:`genindex` 23 | * :ref:`modindex` 24 | * :ref:`search` 25 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = source 9 | BUILDDIR = build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/make.bat: -------------------------------------------------------------------------------- 1 | @ECHO OFF 2 | 3 | pushd %~dp0 4 | 5 | REM Command file for Sphinx documentation 6 | 7 | if "%SPHINXBUILD%" == "" ( 8 | set SPHINXBUILD=sphinx-build 9 | ) 10 | set SOURCEDIR=source 11 | set BUILDDIR=build 12 | 13 | if "%1" == "" goto help 14 | 15 | %SPHINXBUILD% >NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.http://sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /.readthedocs.yaml: -------------------------------------------------------------------------------- 1 | # .readthedocs.yaml 2 | # Read the Docs configuration file 3 | # See https://docs.readthedocs.io/en/stable/config-file/v2.html for details 4 | 5 | # Required 6 | version: 2 7 | 8 | submodules: 9 | include: ['cohere-ui'] 10 | 11 | # Set the OS, Python version and other tools you might need 12 | build: 13 | os: "ubuntu-22.04" 14 | tools: 15 | python: "3.12" 16 | # You can also specify other tool versions: 17 | # nodejs: "19" 18 | # rust: "1.64" 19 | # golang: "1.19" 20 | 21 | # Build documentation in the "docs/" directory with Sphinx 22 | sphinx: 23 | configuration: docs/source/conf.py 24 | 25 | # Optionally build your docs in additional formats such as PDF and ePub 26 | # formats: 27 | # - pdf 28 | # - epub 29 | 30 | # Optional but recommended, declare the Python requirements required 31 | # to build your documentation 32 | # See https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html 33 | python: 34 | install: 35 | - requirements: docs/requirements.txt 36 | -------------------------------------------------------------------------------- /docs/source/config_prep.rst: -------------------------------------------------------------------------------- 1 | .. _config_prep: 2 | 3 | =========== 4 | config_prep 5 | =========== 6 | | The "config_prep" file defines parameters used during data preprocessing. 7 | 8 | Parameters 9 | ========== 10 | 11 | - min_frames 12 | 13 | | Optional, defines a minimum number of raw data files in the scan directory. If number of files is less than minimum, the directory is not processed. 14 | 15 | :: 16 | 17 | min_frames = 80 18 | 19 | - exclude_scans 20 | 21 | | A list containing scan indexes that will be excluded from preprocessing process. 22 | 23 | :: 24 | 25 | exclude_scans = [78,81] 26 | 27 | - remove_outliers 28 | 29 | | Optional, boolean parameter for auto removal of outlier scans in large data sets. 30 | 31 | :: 32 | 33 | remove_outliers = True 34 | 35 | - outliers_scans 36 | 37 | | This list is determined automatically when remove_outliers is set to True. 38 | 39 | :: 40 | 41 | outliers_scans = [78,80] 42 | 43 | - max_crop 44 | 45 | | Size of frame cut out around maximum 46 | 47 | :: 48 | 49 | max_crop = [200,200] 50 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools >= 77.0.3"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "cohere_core" 7 | version = "4.4.0" 8 | authors = [ 9 | { name="Barbara Frosik", email="bfrosik@anl.gov" }, 10 | { name="Ross Harder", email="rharder@anl.gov" } 11 | ] 12 | description = "cohere reconstruction tools" 13 | readme = "README.md" 14 | requires-python = ">=3.11" 15 | classifiers = [ 16 | "Programming Language :: Python :: 3", 17 | "Operating System :: OS Independent", 18 | ] 19 | license = { file = "LICENSE" } 20 | 21 | dependencies = [ 22 | 'numpy<2', 23 | 'scikit-learn', 24 | 'tifffile', 25 | 'psutil', 26 | 'tqdm', 27 | 'matplotlib', 28 | 'scipy==1.14.1', 29 | 'sphinx', 30 | 'sphinx-pyproject' 31 | ] 32 | 33 | [tool.setuptools.packages.find] 34 | where = ["src"] 35 | 36 | [project.urls] 37 | Homepage = "https://github.com/AdvancedPhotonSource/cohere" 38 | Documentation = "https://readthedocs.org" 39 | Repository = "https://github.com//AdvancedPhotonSource/cohere.git" 40 | 41 | [tool.sphinx-pyproject] 42 | extensions = [ 43 | "sphinx.ext.autodoc", 44 | "sphinx.ext.napoleon", 45 | 'sphinx.ext.intersphinx', 46 | ] 47 | -------------------------------------------------------------------------------- /docs/source/api_cohere_ui.rst: -------------------------------------------------------------------------------- 1 | .. _api_cohere_ui: 2 | 3 | ========= 4 | cohere_ui 5 | ========= 6 | 7 | cohere_gui 8 | ---------- 9 | This script provides GUI interface to cohere tools. User can create new cohere experiment or load an existing one. The GUI offers user friendly interface that allows to define configuration, set parameters to defaults, and run the scripts: beamline_preprocessing, standard_preprocessing, run_rec, and beamline_visualization, with push buttons. 10 | 11 | To run this script from command line:: 12 | 13 | cohere_gui 14 | 15 | optional arguments may follow: --no_verify, --debug 16 | 17 | One can use --help to get explanation of command line parameters. 18 | 19 | beamline_preprocess 20 | ------------------- 21 | .. automodule:: cohere_ui.beamline_preprocess 22 | 23 | standard_preprocess 24 | ------------------- 25 | .. automodule:: cohere_ui.standard_preprocess 26 | 27 | run_reconstruction 28 | ------------------- 29 | .. automodule:: cohere_ui.run_reconstruction 30 | 31 | beamline_visualization 32 | ---------------------- 33 | .. automodule:: cohere_ui.beamline_visualization 34 | 35 | everything 36 | ---------- 37 | .. automodule:: cohere_ui.everything 38 | 39 | simple_phasing 40 | -------------- 41 | .. automodule:: cohere_ui.simple_phasing 42 | 43 | create_aps34idc_experiment 44 | -------------------------- 45 | .. automodule:: cohere_ui.create_aps34idc_experiment 46 | 47 | copy_setup 48 | ---------- 49 | .. automodule:: cohere_ui.copy_setup -------------------------------------------------------------------------------- /docs/source/about.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | About 3 | ===== 4 | 5 | The cohere package provides tools for reconstruction of image of a nanoscale structures from data obtained using Bragg Coherent Diffraction Imaging technique. 6 | 7 | The reconstruction has very good performance, in particular when utilizing GPU. User has a choice to run on cpu or GPU by choosing the processing library. 8 | The solution offers parallel processing for fast reconstruction of multiple starting points. 9 | 10 | A powerful feature that can deliver good reconstruction result offered by the cohere package is genetic algorithm (GA). 11 | 12 | The tools are supplemented by another package cohere-ui which contains users scripts ready to use and easy to modify. 13 | Refer to :ref:`api_cohere_ui` for instructions on how to use the supplemental software. Combined together both of the packages: cohere_core and cohere_ui offer a full solution from beamline preprocessing data followed by standard preprocessing data, reconstruction, and beamline postprocessing that enables visualization of the results. 14 | The cohere_core project handles standard preprocessing on beamline data that has appied instrument correction, and reconstruction. These processes apply to any beamline. 15 | The beamline preprocessing, where the raw data is corrected for beamline specific instrument, and beamline postprocessing (visualization) are handled in cohere_ui suplemental project. 16 | Currently cohere_ui supports the following beamlines: aps_1ide, aps_34idc, esrf_id01, Petra3_P10. 17 | Another supplemental package cohere_examples provides examples with configuration files and data for the supported beamlines. 18 | 19 | The cohere is written in Python as this choice offers simplicity of development, and thus the tool can be expanded by community. 20 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright © 2019, UChicago Argonne, LLC 2 | All Rights Reserved 3 | Software Name: CDI Reconstruction (cohere) 4 | By: Argonne National Laboratory 5 | OPEN SOURCE LICENSE 6 | 7 | Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 8 | 9 | 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 10 | 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 11 | 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. 12 | 13 | 14 | ****************************************************************************************************** 15 | DISCLAIMER 16 | 17 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 18 | *************************************************************************************************** -------------------------------------------------------------------------------- /src/cohere_core/lib/check_libs.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | from pathlib import Path 8 | import re 9 | 10 | 11 | def main(): 12 | lib_dir = Path(__file__).parent.as_posix() 13 | cplib = Path(f"{lib_dir}/cplib.py").read_text() 14 | cohlib = Path(f"{lib_dir}/cohlib.py").read_text() 15 | nplib = Path(f"{lib_dir}/nplib.py").read_text() 16 | torchlib = Path(f"{lib_dir}/torchlib.py").read_text() 17 | 18 | p0 = re.compile(r" def (\S+)\(") 19 | 20 | needs_subclasshook = [] 21 | needs_abstractmethod = [] 22 | needs_npfunction = [] 23 | needs_torchfunction = [] 24 | for funk in re.findall(p0, cplib): 25 | p1 = re.compile(rf"callable\(subclass.{funk}\)") 26 | p2 = re.compile(rf"hasattr\(subclass, '{funk}'\)") 27 | p3 = re.compile(rf"def {funk}\(") 28 | if not (re.search(p1, cohlib) and re.search(p2, cohlib)): 29 | needs_subclasshook.append(funk) 30 | if not re.search(p3, cohlib): 31 | needs_abstractmethod.append(funk) 32 | if not re.search(p3, nplib): 33 | needs_npfunction.append(funk) 34 | if not re.search(p3, torchlib): 35 | needs_torchfunction.append(funk) 36 | 37 | names = ["cohlib", "cohlib.__subclasshook__", "nplib", "torchlib"] 38 | libs = [needs_subclasshook, needs_abstractmethod, needs_npfunction, needs_torchfunction] 39 | if not any([len(lib) for lib in libs]): 40 | print("All devlibs are up to date!") 41 | return 42 | for name, lib in zip(names, libs): 43 | if len(lib): 44 | print(f"\nMissing from {name}:") 45 | for s in lib: 46 | print(f"\t{s}") 47 | 48 | 49 | if __name__ == "__main__": 50 | main() -------------------------------------------------------------------------------- /docs/source/config_main.rst: -------------------------------------------------------------------------------- 1 | .. _config_main: 2 | 3 | ====== 4 | config 5 | ====== 6 | | The "config" file is a main configuration defining the experiment and is applicable to all parts of processing: beamline preprocessing, standard preprocessing, phasing, and beamline visualization. 7 | 8 | Parameters 9 | ========== 10 | - working_dir 11 | 12 | | Mandatory, root directory of the experiment files. 13 | 14 | :: 15 | 16 | working_dir = "/path/to/the/experiment/" 17 | 18 | - experiment_id 19 | 20 | | Mandatory, a string identifying the experiment. 21 | 22 | :: 23 | 24 | experiment_id = "ab" 25 | 26 | - scan 27 | 28 | | Optional (but typically needed), string type encapsulating a single number, a range, or combination of numbers and ranges separated by comma, defining scans that will be read and combined to create data file. This parameter should not be set when running multi-peak case. 29 | 30 | :: 31 | 32 | scan = "54" 33 | scan = "2-7" 34 | scan = "2-7, 10, 15, 20-22" 35 | 36 | - beamline 37 | 38 | | Optional, beamline where the experiment was conducted. If not configured, the beamline preprocessing and beamline visualization scripts are not available. 39 | 40 | :: 41 | 42 | beamline = "aps_34idc" 43 | 44 | - multipeak 45 | 46 | | Optional, boolean parameter indicating whether it is multi-peak case. Defaults to False. 47 | 48 | :: 49 | 50 | multipeak = True 51 | 52 | - separate_scans 53 | 54 | | Optional, deafults to False. In typical scenario the data from all scans in experiment are combined. If specified as separate scans, each scan will be processed separately and will have sub-experiment name containing scan index ex. "scan_9", where 9 is scan index. 55 | 56 | :: 57 | 58 | separate_scans = False 59 | 60 | - separate_scan_ranges 61 | 62 | | Optional, defaults to False. In typical scenario the data from all scans in experiment are combined. If specified as separate scan ranges, each scan or scan range in the experiment will be processed separately and will have sub-experiment name containing scan index, or scan index range, ex. "scan_9", where 9 is scan index, or "scan_10-15", where 10-15 is the scan range. The scans and scan ranges are defined in main configuration "config" file as scan parameter, and are part of experiment name. 63 | 64 | :: 65 | 66 | separate_scan_ranges = True 67 | 68 | - converter_ver 69 | 70 | | Mandatory after cohere ver 3.0. If not configured, it will be auto-updated by scripts to match the latest converter version. 71 | 72 | :: 73 | 74 | converter_ver = 3 75 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Documentation Status](https://readthedocs.org/projects/cohere/badge/?version=latest)](http://cohere.readthedocs.io/en/latest/?badge=latest) 2 | 3 | Project home page: [https://cohere.readthedocs.io/](https://cohere.readthedocs.io/) 4 | 5 | The cohere package provides tools for reconstruction of image of a nanoscale structures from data obtained using Bragg Coherent Diffraction Imaging technique. 6 | 7 | The reconstruction has very good performance, in particular when utilizing GPU. User has a choice to run on cpu or GPU by selecting Python library. Supported libraries: numpy, cupy, torch. The libraries cupy or torch are installed by user. The solution offers concurrent processing for fast reconstruction of multiple starting points. 8 | 9 | Important features: 10 | - Genetic Algorithm (GA) - powerful feature that can deliver good reconstruction result by using GA principles. Based on research "Three-dimensional imaging of dislocation propagation during crystal growth and dissolution, Supplementary Information" by Jesse N. Clark et. al. 11 | - Artificial Intelligence initial guess for reconstruction - uses AI to find reconstructed object that is subsequently used as input to further reconstruction. The work is built on the research by Yudong Yao, et. al: "AutoPhaseNN: Unsupervised Physics-aware Deep Learning of 3D Nanoscale Bragg Coherent Diffraction Imaging". 12 | A trained model must be provided when using this feature. User can download trained model by clicking the following link 13 | https://g-29c18.fd635.8443.data.globus.org/cherukara/cohere-trained_model.hdf5 14 | - AutoAlien1 algorithm - a method to remove aliens by automatic means during standard data preprocessing. Based on work "Removal of spurious data in Bragg coherent diffraction imaging: an algorithm for automated data preprocessing" by Kenley Pelzer et. al. 15 | - Multipeak - support for an experiment where data is collected for adjacent peaks simultaneously and reconstructing this multipeak scenario. The research is in experimental stage. Implemented by Jason (Nick) Porter. 16 | - chrono CDI - allows the oversampling requirement at each time step to be reduced. The increased time resolution will allow imaging of faster dynamics and of radiation-dose-sensitive samples. Based on work "Coherent diffractive imaging of time-evolving samples with improved temporal resolution" by A. Ulvestat et. al. 17 | 18 | The tools offers a full solution for reading beamline specific experiment data, formatting the data, reconstruction, and visualization. Each of the components can be utilized independently. The project was implemented for the Advanced Photon Source beamline 34-ID-C. 19 | 20 | Author(s) 21 | 22 | Barbara Frosik - Principal Software Engineer at Argonne National Laboratory 23 | 24 | Ross Harder - Scientist at Argonne National Laboratory 25 | 26 | License 27 | 28 | Copyright (c) UChicago Argonne, LLC. All rights reserved. See LICENSE file. 29 | -------------------------------------------------------------------------------- /docs/source/conf.py: -------------------------------------------------------------------------------- 1 | # Configuration file for the Sphinx documentation builder. 2 | # 3 | # This file only contains a selection of the most common options. For a full 4 | # list see the documentation: 5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 6 | 7 | # -- Path setup -------------------------------------------------------------- 8 | 9 | # If extensions (or modules to document with autodoc) are in another directory, 10 | # add these directories to sys.path here. If the directory is relative to the 11 | # documentation root, use os.path.abspath to make it absolute, like shown here. 12 | # 13 | import os 14 | import sys 15 | from sphinx_pyproject import SphinxConfig 16 | 17 | config = SphinxConfig("../../pyproject.toml", globalns=globals()) 18 | 19 | sys.path.insert(0, os.path.abspath('../../cohere-ui/src')) 20 | sys.path.insert(0, os.path.abspath('../../src')) 21 | # -- Project information ----------------------------------------------------- 22 | 23 | project = 'cohere_core' 24 | copyright = '2020, Barbara Frosik, Ross Harder' 25 | author = 'Barbara Frosik, Ross Harder' 26 | 27 | # The full version, including alpha/beta/rc tags 28 | release = '4.4.0' 29 | 30 | # -- General configuration --------------------------------------------------- 31 | 32 | # Add any Sphinx extension module names here, as strings. They can be 33 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 34 | # ones. 35 | extensions = [ 36 | 'sphinx.ext.autodoc', 37 | 'sphinx.ext.napoleon', 38 | 'sphinx.ext.intersphinx', 39 | ] 40 | napoleon_google_docstring = False 41 | napoleon_numpy_docstring = True 42 | 43 | # Add any paths that contain templates here, relative to this directory. 44 | # templates_path = ['_templates'] 45 | 46 | # List of patterns, relative to source directory, that match files and 47 | # directories to ignore when looking for source files. 48 | # This pattern also affects html_static_path and html_extra_path. 49 | 50 | # exclude_patterns = globals().get('exclude_patterns', []) 51 | # exclude_patterns.extend(['_build']) 52 | 53 | 54 | # -- Options for HTML output ------------------------------------------------- 55 | 56 | # The theme to use for HTML and HTML Help pages. See the documentation for 57 | # a list of builtin themes. 58 | # 59 | #html_theme = 'alabaster' 60 | html_theme = 'sphinxdoc' 61 | smartquotes = False 62 | 63 | # Add any paths that contain custom static files (such as style sheets) here, 64 | # relative to this directory. They are copied after the builtin static files, 65 | # so a file named "default.css" will overwrite the builtin "default.css". 66 | #html_static_path = ['_static'] 67 | 68 | autodoc_mock_imports = ['numpy', 69 | 'scikit-learn', 70 | 'tifffile', 71 | 'tensorflow', 72 | 'mpi4py', 73 | 'GPUtil', 74 | 'psutil', 75 | 'tqdm', 76 | 'scipy', 77 | 'matplotlib', 78 | 'pyqt5', 79 | 'PyQt5', 80 | 'scikit-image', 81 | 'skimage', 82 | 'xrayutilities', 83 | 'pyvista', 84 | 'notebook', 85 | 'gputil', 86 | ] 87 | 88 | master_doc = 'index' 89 | -------------------------------------------------------------------------------- /docs/source/for_developers.rst: -------------------------------------------------------------------------------- 1 | ======= 2 | develop 3 | ======= 4 | | This chapter has info for developers. 5 | 6 | Adding new trigger 7 | ================== 8 | The design allows to add a new feature in a standardized way. Typical feature is defined by a trigger and supporting parameters. The following modifications/additions need to be done to add a new feature: 9 | - In cohere_core/controller/phasing.py, Rec constructor, insert a new function name ending with '_operation' to the self.iter_functions list in the correct order. 10 | - Implement the new trigger function in cohere_core/controller/phasing.py, Rec class. 11 | - In cohere_core/controller/phasing.py add code to set any new defaults when creating Rec object. 12 | - In utilities/config_verifier.py add code to verify added parameters. 13 | 14 | Adding new sub-trigger 15 | ====================== 16 | If the new feature will be used in a context of sub-triggers, in addition to the above steps, the following modifications/additions need to be done: 17 | - In cohere_core/controller/op_flow.py add entry in the sub_triggers dictionary, where key is the arbitrary assigned mnemonics, and value is the trigger name. 18 | - In cohere_core/controller/phasing.py, Rec.init function, create_feat_objects sub-function, add the new feature object, created the same way as shrink_wrap_obj, and other features. 19 | - In cohere_core/controller/phasing.py, Rec class add the trigger function. The code inside should call the trigger on the feature object with args. 20 | - in cohere_core/controller/features.py add new feature class. 21 | 22 | | The constructor factory function create should have a new lines to construct the new object. 23 | | The feature class should be subclass of Feature and 24 | | should have implemented create_obj function that creates sub-object(s) and 25 | | should have defined the sub-object(s) class(es). The embedded class contains the apply_trigger function that has the trigger code. Some features can be configured to different types and therefore multiple classes can be defined. 26 | | 27 | | The easiest way to implement the feature is to copy one already implemented and modify. 28 | 29 | Adding new algorithm 30 | ==================== 31 | The algorithm sequence defines functions executed during modulus projection and during modulus. Adding new algorithm requires the following steps: 32 | - In cohere_core/controller/op_flow.py add entry in the algs dictionary, where key is the mnemonic used in algorithm_sequence, and value is the tuple defining functions, ex: 'ER': ('to_reciprocal_space', 'modulus', 'to_direct_space', 'er') 33 | - In cohere_core/controller/phasing.py, Rec constructor, insert a new function name to the self.iter_functions list in the correct order. 34 | - In cohere_core/controller/phasing.py, Rec class add the new algorithm function(s). 35 | 36 | Pypi Build 37 | ========== 38 | For a new build change version in and pyproject.toml files to the new version and run pypi build: 39 | 40 | :: 41 | 42 | python3 -m pip install twine 43 | python3 -m build --sdist 44 | python3 -m build --wheel 45 | twine check dist/* 46 | 47 | | Upload to the test server and test 48 | 49 | :: 50 | 51 | twine upload --repository testpypi dist/* 52 | 53 | | Test in a new environment 54 | 55 | :: 56 | 57 | pip install --index-url https://test.pypi.org/simple/ --extra-index-url https://pypi.org/simple/ cohere_core --user 58 | 59 | | Test Linux, Windows, and Mac 60 | 61 | | upload build to pypi cloud 62 | 63 | :: 64 | 65 | twine upload dist/* 66 | 67 | -------------------------------------------------------------------------------- /docs/source/installation.rst: -------------------------------------------------------------------------------- 1 | ============ 2 | Installation 3 | ============ 4 | The cohere project can be installed on Linux, Windows and Mac operating systems. 5 | 6 | Official Release Installation 7 | ============================= 8 | Create and activate conda environment:: 9 | 10 | conda create -n -c conda-forge python=3.11 mpi4py pyzmq 11 | conda activate 12 | 13 | The cohere project consists of three modules and each of them is a separate package in PyPi.:: 14 | 15 | pip install cohere_core cohere_ui cohere_beamlines 16 | 17 | Note: If the user's choice is own beamline dependent preprocessing and postprocessing and cohere_core package as a reconstruction engine, the user can access cohere_core APIs or use cohere_ui user's scripts. In the first case only the cohere_core package has to be installed, and in the second case, the cohere_ui has to be added. 18 | 19 | If using cupy library:: 20 | 21 | conda install cupy=12.2.0 -c conda-forge 22 | 23 | If using torch library:: 24 | 25 | pip install torch torchvision torchaudio 26 | 27 | After installation you may start using scripts as described in :ref:`api_cohere_ui`, for example:: 28 | 29 | cohere_gui 30 | beamline_preprocess /example_workspace/scan_54 31 | standard_preprocess /example_workspace/scan_54 32 | run_reconstruction /example_workspace/scan_54 33 | beamline_visualization /example_workspace/scan_54 34 | 35 | The cohere project provides examples in the cohere_examples submodule. The examples contain experiment data and configuration files in defined directory structure. To get the examples refer to :ref:`examples` 36 | 37 | .. _latest: 38 | 39 | Latest development installation 40 | =============================== 41 | This will install the latest development. This installation might be right for a user that is looking for the latest development or do some development. 42 | 43 | Create conda environment, activate it and clone cohere repository. Cohere project has cohere_core module and three submodules: cohere-ui, cohere_beamlines, and cohere_examples. 44 | Run the following commands:: 45 | 46 | conda create -n -c conda-forge python=3.11 mpi4py pyzmq 47 | conda activate 48 | git clone https://github.com/advancedPhotonSource/cohere --recurse-submodules 49 | cd cohere 50 | git checkout Dev 51 | pip install -e . # include the -e option if you intend to edit cohere_core 52 | cd cohere_beamlines 53 | git checkout Dev 54 | pip install -e . # include the -e option if you intend to edit cohere_beamlines 55 | cd ../cohere-ui 56 | git checkout Dev 57 | pip install -e . # include the -e option if you intend to edit cohere_ui 58 | 59 | If using cupy library:: 60 | 61 | conda install cupy=12.2.0 -c conda-forge 62 | 63 | If using torch library:: 64 | 65 | pip install torch torchvision torchaudio 66 | 67 | After installation you may start using scripts as described in :ref:`api_cohere_ui`, for example:: 68 | 69 | cohere_gui 70 | 71 | The examples are cloned as one of submodule to cohere project. The examples are in cohere_experiment subdirectory. 72 | 73 | Beamlines specific installation 74 | =============================== 75 | For Petra beamline install additional packages:: 76 | 77 | pip install hdf5plugin pandas 78 | 79 | Some of the modules can be installed with Pypi, as some can be cloned. A common scenario when developing on a beamline is installing release version of cohere_core and cohere_ui, and clone cohere_beamlines. Such arrangement offers reliability of the main modules and allows flexibility when dealing with changes on a beamline. 80 | 81 | .. _examples: 82 | 83 | Examples installation 84 | ===================== 85 | To obtain the examples from cohere_examples module, clone the repository and initialize it to align with your directory structure:: 86 | 87 | git clone https://github.com/advancedPhotonSource/cohere_examples 88 | cd cohere_examples 89 | python init_examples.py 90 | 91 | -------------------------------------------------------------------------------- /docs/source/config_instr.rst: -------------------------------------------------------------------------------- 1 | .. _config_instr: 2 | 3 | ============ 4 | config_instr 5 | ============ 6 | The "config_instr" file defines parameters related to experiment and instrument used during the experiment, such as detector and diffractometer. 7 | 8 | beamline aps_1ide 9 | ----------------- 10 | All the listed parameters are mandatory for the 1-IDE beamline. 11 | 12 | Parameters 13 | ========== 14 | - diffractometer 15 | 16 | | Name of diffractometer used in experiment. 17 | 18 | :: 19 | 20 | diffractometer = "1ide" 21 | 22 | - energy 23 | 24 | :: 25 | 26 | energy = 7.2 27 | 28 | - vff_r_offset: 29 | 30 | :: 31 | 32 | vff_r_offset = -0.107 33 | 34 | - vff_eta_offset: 35 | 36 | :: 37 | 38 | vff_eta_offset = 0.232 39 | 40 | - aero: 41 | 42 | :: 43 | 44 | aero = -10.97994 45 | 46 | - vff_eta: 47 | 48 | :: 49 | 50 | vff_eta = -24.8 51 | 52 | - detdist: 53 | 54 | :: 55 | 56 | detdist = 6.22ro = -10.97994 57 | 58 | - vff_r: 59 | 60 | :: 61 | 62 | vff_r = 692.50052 63 | 64 | - scanmot: 65 | 66 | :: 67 | 68 | scanmot = "aero" 69 | 70 | - scanmot_del: 71 | 72 | :: 73 | 74 | scanmot_del = 0.004999999999999997 75 | 76 | - detector 77 | 78 | | detector name 79 | 80 | :: 81 | 82 | detector = "ASI" 83 | 84 | beamline aps_34idc 85 | ------------------ 86 | The diffractometer is mandatory parameter for the 34-IDC beamline. If specfile is defined, the other parameters are parsed from the specfile. They can be overridden when included in this configuration file. 87 | 88 | Parameters 89 | ========== 90 | - diffractometer 91 | 92 | | Mandatory, name of diffractometer used in experiment 93 | 94 | :: 95 | 96 | diffractometer = "34idc" 97 | 98 | - specfile 99 | 100 | | Optional (but necessary when the parsed parameters are not provided), specfile recorded when the experiment was conducted. 101 | 102 | :: 103 | 104 | specfile = "/path/to/specfile/Staff20.spec" 105 | 106 | Parsed parameters 107 | ================= 108 | | In a typical scenario at APS 34-idc beamline a spec file is generated during experiment and the parameters listed below are parsed from this file, so the user do not configure them. User may override the parameters. 109 | 110 | - energy 111 | 112 | :: 113 | 114 | energy = 7.2 115 | 116 | - delta 117 | 118 | | delta (degrees) 119 | 120 | :: 121 | 122 | delta = 30.1 123 | 124 | - gamma 125 | 126 | | gamma (degrees) 127 | 128 | :: 129 | 130 | gamma = 14.0 131 | 132 | - detdist 133 | 134 | | camera distance (mm) 135 | 136 | :: 137 | 138 | detdist = 500.0 139 | 140 | - theta 141 | 142 | | angular step size 143 | 144 | :: 145 | 146 | theta = 0.1999946 147 | 148 | - chi 149 | 150 | :: 151 | 152 | chi = 90.0 153 | 154 | - phi 155 | 156 | :: 157 | 158 | phi = -5.0 159 | 160 | - scanmot 161 | 162 | :: 163 | 164 | scanmot = "th" 165 | 166 | - scanmot_del 167 | 168 | :: 169 | 170 | scanmot_del = 0.005 171 | 172 | - detector 173 | 174 | | detector name 175 | 176 | :: 177 | 178 | detector = "34idcTIM2" 179 | 180 | beamline esrf_id01 181 | ------------------ 182 | 183 | Parameters 184 | ========== 185 | - diffractometer 186 | 187 | | Mandatory, name of diffractometer used in experiment. 188 | 189 | :: 190 | 191 | diffractometer = "id01" 192 | 193 | - detector 194 | 195 | | Detector name 196 | 197 | :: 198 | 199 | detector = "mpxgaas" 200 | 201 | - h5file 202 | 203 | | File of hd5 format containing data and metadata. 204 | 205 | :: 206 | 207 | h5file = "path/to/data_file.h5" 208 | 209 | 210 | beamline Petra3_P10 211 | ------------------- 212 | 213 | Parameters 214 | ========== 215 | - diffractometer 216 | 217 | | Mandatory, name of diffractometer used in experiment. 218 | | Must be defined for the beamline. 219 | 220 | :: 221 | 222 | diffractometer = "P10sixc" 223 | 224 | - sample 225 | 226 | | Defines sample name that is used in directory structure. 227 | 228 | :: 229 | 230 | sample = "MC7_insitu" 231 | 232 | - data_dir 233 | 234 | | Directory where experiment data is stored; contains subdirectories related to samples. 235 | 236 | :: 237 | 238 | data_dir = "example_data" 239 | -------------------------------------------------------------------------------- /docs/source/config_disp.rst: -------------------------------------------------------------------------------- 1 | .. _config_disp: 2 | 3 | =========== 4 | config_disp 5 | =========== 6 | | The "config_disp" file defines parameters needed to run postprocessing that will allow visualization of the reconstructed image. By configuring parameters in the config_disp file user can obtain various images viewable with Paraview. 7 | | By default user will get the visualized image as amplitudes and phases (or real and imagnary if configured complex_mode = "ReIm"), support, and optionally unwrapped phases. User can add interpolation to the processing by configuring interpolation_mode. This option will create vts file containing interpolated image. Another optional return is reciprocal space visualization, i.e. phasing data and inverse fourier transfer. User can request it by setting write_recip = True. To view resolution in direct and reciprocal space user can set determine_resolution_type. This will produce vts file containing resolution. 8 | 9 | Parameters 10 | ========== 11 | - results_dir 12 | 13 | | Optional, defaults to /results_phasing. A directory that has a tree, or leaf with reconstruction results. The results will be used as input to the visualization processing. There could be several results in the given directory tree, and all will be processed in parallel. 14 | 15 | :: 16 | 17 | results_dir = "/path/to/results_phasing" 18 | 19 | - rampups 20 | 21 | | Optional, upsize when running ramp removal, default is 1. Expect long processing time for greater numbers. 22 | 23 | :: 24 | 25 | rampups = 2 26 | 27 | - unwrap 28 | 29 | | Defaults to False. If True the vts file will contain unwrapped phase in addition to phase and amplitude. 30 | 31 | :: 32 | 33 | unwrap = False 34 | 35 | - make_twin 36 | 37 | | A switch to whether visualize twin image. 38 | 39 | :: 40 | 41 | make_twin = False 42 | 43 | - complex_mode 44 | 45 | | This mode determines arrays that will be saved in the direct space images file. If mode is "AmpPhase" the "imAmp" and "imPh" arrays will be saved that hold image amplitudes and image phases. if mode is "ReIm" the "imRe" and "imImag" arrays will be saved that hold real values and imaginary values. Defaults to "AmpPhase". 46 | 47 | :: 48 | 49 | complex_mode = "AmpPhase" 50 | 51 | - crop_type 52 | 53 | | Optional, Defines how crop is determined. Supported values: "fraction' and "tight". If defined as fraction, the defined fraction of each dimension is cropped around maximum value. The "tight" defines crop being determined by imcrop_margin and imcrop_thresh parameters applied to the image. The extend subarray will be derived from image array by finding points greater than threshold multiplied by maximum value. A margin will be added to each side of the extend array. 54 | 55 | :: 56 | 57 | crop_type = "tight" 58 | crop_type = "fraction" 59 | 60 | - crop_fraction 61 | 62 | | Required parameter when crop_type is configured "fraction". Defines size of the cropped array relative to the full image array. The full array is cropped around maximum value. 63 | 64 | :: 65 | 66 | crop_fraction = [.5, .5, .5] 67 | 68 | - crop_margin 69 | 70 | | Required parameter when crop_type is configured "tight". The margin will be added to each side of the extend array. 71 | 72 | :: 73 | 74 | crop_margin = 10 75 | 76 | - crop_thresh 77 | 78 | | Required parameter when crop_type is configured "tight". The threshold will determine the extend of the array. 79 | 80 | :: 81 | 82 | crop_thresh = 0.5 83 | 84 | - interpolation_mode 85 | 86 | | If present the reconstructed object will be interpolated. The parameter defines how the image is interpolated. Supported values: "AmpPhase" and "ReIm". If defined as "AmpPhase" the image amplitudes and image phases are interpolated. If defined as "ReIm" the image real values and imaginary are interpolated, and then the interpolated image amplitudes and image phases are calculated. 87 | 88 | :: 89 | 90 | interpolation_mode = "AmpPhase" 91 | 92 | - interpolation_resolution 93 | 94 | | Required parameter for interpolation. Supported values: "min_deconv_res", int value, float value, list. If set to "min_deconv_res" the resolution will be determined by including the deconvolution resolution. If defined as integer value the resolution will be set to this value in each dimension. If defined as list, the list will define resolution in corresponding dimension. If set to "min_deconv_res" the resolution capability must be configured by setting the "determine_resolution_type" parameter. 95 | 96 | :: 97 | 98 | interpolation_resolution = "min_deconv_res" 99 | 100 | - determine_resolution_type 101 | 102 | | If present, the resolution in direct and reciprocal spaces will be found. Supported value: "deconv". 103 | 104 | :: 105 | 106 | determine_resolution_type = "deconv" 107 | 108 | - resolution_deconv_contrast 109 | 110 | | A fraction less than 0, required when "determine_resolution_type" is set to "deconv". 111 | 112 | :: 113 | 114 | resolution_deconv_contrast = 0.25 115 | 116 | - write_recip 117 | 118 | | If True the reciprocal_space.vts file will be saved with arrays of phasing data and inverse fourier of that data. 119 | 120 | :: 121 | 122 | write_recip = True 123 | -------------------------------------------------------------------------------- /docs/source/config_data.rst: -------------------------------------------------------------------------------- 1 | .. _config_data: 2 | 3 | =========== 4 | config_data 5 | =========== 6 | | The "config_data" file defines parameters needed to format data for phasing. The input is an instrument corrected data file obtained in the beamline preprocessing phase. 7 | 8 | Parameters 9 | ========== 10 | - data_dir 11 | 12 | | Optional, defaults to /preprocessed_data in cohere experiment directory structure. 13 | | Defines directory that contains data.tif file with beamline preprocessed experiment data. 14 | 15 | :: 16 | 17 | data_dir = "/path/to/data_dir" 18 | 19 | - alien_alg 20 | 21 | | Optional, if not defined, no alien algorithm is used. 22 | | Name of method used to remove aliens. Possible options are: 'block_aliens', 'alien_file', and 'AutoAlien1'. The 'block_aliens' algorithm will zero out defined blocks, 'alien_file' method will use given file as a mask, and 'AutoAlien1' will use auto mechanism to remove aliens. Each of these algorithms require different parameters, explained below. 23 | 24 | :: 25 | 26 | alien_alg = "AutoAlien1" 27 | 28 | - aliens 29 | 30 | | Needed when the 'block_aliens' method is configured. Used when the data contains regions with intensity produced by interference. The regions are zeroed out. The aliens can be defined as regions, each region defined by coordinates of starting point, and ending point (i.e. [[xb0,yb0,zb0,xe0,ye0,ze0],[xb1,yb1,zb1,xe1,ye1,ze1],...[xbn,ybn,zbn,xen,yen,zen]] ). 31 | 32 | :: 33 | 34 | aliens = [[170,220,112,195,245,123], [50,96,10,60,110,20]] 35 | 36 | - alien_file 37 | 38 | | Needed when the 'alien_file' method is configured. User can produce a file in npy format that contains zeros and ones, where zero means to set the pixel to zero, and one to leave it. 39 | 40 | :: 41 | 42 | alien_file = "/path/to/mask_file/AlienImg.npy" 43 | 44 | - AA1_size_threshold 45 | 46 | | Used in the 'AutoAliens1' method. If not given it will default to 0.01. The AutoAlien1 algorithm will calculate relative sizes of all clusters with respect to the biggest cluster. The clusters with relative size smaller than the given threshold will be possibly deemed aliens. It also depends on asymmetry. 47 | 48 | :: 49 | 50 | AA1_size_threshold = 0.01 51 | 52 | - AA1_asym_threshold 53 | 54 | | Used in the 'AutoAliens1' method. If not given it will default to 1.75. The AutoAlien1 algorithm will calculate average asymmetry of all clusters. The clusters with average asymmetry greater than the given threshold will be possibly deemed aliens. It also depends on relative size. 55 | 56 | :: 57 | 58 | AA1_asym_threshold = 1.75 59 | 60 | - AA1_min_pts 61 | 62 | | used in the 'AutoAliens1' method. If not given it will default to 5. Defines minimum non zero points in neighborhood to count the area of data as cluster. 63 | 64 | :: 65 | 66 | AA1_min_pts = 5 67 | 68 | - AA1_eps 69 | 70 | | Used in the 'AutoAliens1' method. If not given it will default to 1.1. Defines neighborhood Used in the clustering algorithm. 71 | 72 | :: 73 | 74 | AA1_eps = 1.1 75 | 76 | - AA1_amp_threshold 77 | 78 | | Mandatory in the 'AutoAliens1' method. Used to zero data points below that threshold. 79 | 80 | :: 81 | 82 | AA1_amp_threshold = 6 83 | 84 | - AA1_save_arrs 85 | 86 | | Used in the 'AutoAliens1' method, optional. If given and set to True multiple results of alien analysis will be saved in files. 87 | 88 | :: 89 | 90 | AA1_save_arrs = True 91 | 92 | - AA1_expandcleanedsigma 93 | 94 | | Used in the 'AutoAliens1' method, optional. If given the algorithm will apply last step of cleaning the data using the configured sigma. 95 | 96 | :: 97 | 98 | AA1_expandcleanedsigma = 5.0 99 | 100 | - auto_intensity_threshold 101 | 102 | | Optional, defaults to False. 103 | | The intensity threshold is calculated programmatically if set to True, otherwise must be provided. 104 | 105 | :: 106 | 107 | auto_intensity_threshold = True 108 | 109 | - intensity_threshold 110 | 111 | | Mandatory, if auto_intensity_threshold is not set. Intensity values below this value are set to 0. The threshold is applied after removing aliens. 112 | | If auto_data is configured , this value is overridden by calculated value. 113 | 114 | :: 115 | 116 | intensity_threshold = 25.0 117 | 118 | - crop_pad 119 | 120 | | Optional, a list of numbers defining how to adjust the size at each side of 3D data. If number is positive, the array will be padded. If negative, cropped. The parameters correspond to [x left, x right, y left, y right, z left, z right]. The final dimensions will be adjusted up to the good number for the FFT such as product of powers of 2, 3 or 5. 121 | 122 | :: 123 | 124 | crop_pad = [13, 0, -65, -65, -65, -65] 125 | 126 | - center_shift 127 | 128 | | Optional, defines offset of max element from the array center. 129 | 130 | :: 131 | 132 | center_shift = [0,0,0] 133 | 134 | - no_center_max 135 | 136 | | Optional, defaults to False. If False the array maximum is centered, otherwise max is not moved. 137 | 138 | :: 139 | 140 | no_center_max = False 141 | 142 | - binning 143 | 144 | | Optional, a list that defines binning values in respective dimensions, [1,1,1] has no effect. 145 | 146 | :: 147 | 148 | binning = [1,1,1] 149 | 150 | -------------------------------------------------------------------------------- /docs/source/config_mp.rst: -------------------------------------------------------------------------------- 1 | ========= 2 | config_mp 3 | ========= 4 | 5 | **Data/geometry parameters** 6 | 7 | - scan 8 | 9 | | Mandatory, string type encapsulating scans or ranges of scans containing data for each peak. The scans/scan ranges should be arranged in ascending order. 10 | 11 | :: 12 | 13 | scan = "898-913,919-934,940-955,961-976" 14 | 15 | - orientations 16 | 17 | | Mandatory, a list of lists, each inner list defining the orientation of a peak. 18 | 19 | :: 20 | 21 | orientations = [[-1, -1, 1], [0, 0, 2], [1, 1, 1], [2, 0, 0]] 22 | 23 | - hkl_in 24 | 25 | | Mandatory, list with Miller indices representing the in-plane lattice vector. 26 | 27 | :: 28 | 29 | hkl_in = [3.031127677370978, 12.31353345906843, 8.75104158816168] 30 | 31 | - hkl_out 32 | 33 | | Mandatory, list with Miller indices representing the out-of-plane lattice vector. 34 | 35 | :: 36 | 37 | hkl_out = [9.77805918769193, -8.402719849515048, 8.436553021703112] 38 | 39 | - twin_plane 40 | 41 | | Mandatory, list with Miller indices of the twin plane. If there is not a twin plane, this should be the same as sample_axis. 42 | 43 | :: 44 | 45 | twin_plane = [1, -1, 1] 46 | 47 | - sample_axis 48 | 49 | | Mandatory, axis of the sample. The data will be rotated so that the twin plane vector in the lattice frame (hkl) corresponds to this vector in the array frame (xyz). 50 | 51 | :: 52 | 53 | sample_axis = [0, 1, 0] 54 | 55 | - final_size 56 | 57 | | Mandatory, a size in each dimension of the array holding reconstructed object. 58 | 59 | :: 60 | 61 | final_size = 180 62 | 63 | - lattice_size 64 | 65 | | Mandatory, lattice parameter of the reconstructing crystal. This is used to define the reciprocal lattice vectors, which are required for projecting to each peak. 66 | 67 | :: 68 | 69 | lattice_size = 0.4078 70 | 71 | - rs_voxel_size 72 | - ds_voxel_size 73 | 74 | | Autofilled, These are calculated from the experimental geometry and added to the config file automatically. Do not edit these unless you REALLY know what you're doing. 75 | 76 | **General reconstruction parameters** 77 | 78 | - switch_peak_trigger 79 | 80 | | Mandatory, a trigger defining at which iteration to switch the peak. 81 | 82 | :: 83 | 84 | switch_peak_trigger = [0, 50] 85 | 86 | - weight_init 87 | 88 | | Mandatory, the initial global weight to use when updating the shared object. A weight of 0 will not update the shared object at all, while a weight of 1 will replace the shared object (except the orthogonal displacement) after phasing each peak. In general, a high weight leads to rapid development, but only forces the object to agree with whichever peak it phased most recently. A low weight converges slowly, but favors information common to all of the peaks. 89 | 90 | :: 91 | 92 | weight_init = 1.0 93 | 94 | - weight_iters 95 | - weight_vals 96 | 97 | | Optional, list of iterations and corresponding list of values. After each iteration in weight_iters, the global weights will be updated to the corresponding value in weight_vals. A good rule of thumb is to start high and end low. 98 | 99 | :: 100 | 101 | weight_iters = [200, 400, 600, 800] 102 | weight_vals = [0.75, 0.5, 0.25, 0.1] 103 | 104 | - calc_strain 105 | 106 | | Optional, boolean, toggles whether to calculate the strain after reconstruction. 107 | 108 | :: 109 | 110 | calc_strain = False 111 | 112 | **Adaptive reconstruction parameters** 113 | 114 | - adapt_trigger 115 | 116 | | Optional, determines when to update the weights assigned to individual peaks. Before each peak-switch, a confidence value is calculated for the current peak, based on how much it changed while phasing (i.e. how much this dataset disagrees with the current state of the reconstruction). These are then periodically compiled into a peak-specific weight, which determines how much each dataset is allowed to impact the reconstruction. 117 | 118 | :: 119 | 120 | adapt_trigger = [100, 100] 121 | 122 | - adapt_power 123 | 124 | | Optional, non-negative number that determines how harshly to punish bad datasets. 125 | 126 | :: 127 | 128 | adapt_power = 2 # 129 | 130 | - adapt_threshold_init 131 | - adapt_threshold_iters 132 | - adapt_threshold_vals 133 | 134 | | Optional, determines the relative confidence threshold required for shrinkwrap to prevent bad datasets from constantly derailing the reconstruction. 135 | 136 | :: 137 | 138 | adapt_threshold_init = 0.5 139 | adapt_threshold_iters = [200, 400, 600, 800] 140 | adapt_threshold_vals = [0.6, 0.7, 0.8, 0.9] 141 | 142 | - adapt_alien_start 143 | 144 | | Optional, determines when to begin adaptive alien removal. Alien removal occurs immediately after switching to a new peak, while the exit wave still agrees perfectly with the shared object. The diffraction amplitude calculated by forward propagating this object is compared with the actual measurement of the same reflection, and voxels where they strongly contradict each other are masked. A hybrid diffraction pattern, where the masked voxels have been replaced by the forward propagation, is then used for phasing. 145 | 146 | :: 147 | 148 | adapt_alien_start = 50 149 | 150 | - adapt_alien_threshold 151 | 152 | | Optional, determines the minimum amount of contradiction needed to mask a voxel. Under the hood, the actual value is this multiplied by the median of the normalized difference map. 153 | 154 | :: 155 | 156 | adapt_alien_threshold = 2 157 | -------------------------------------------------------------------------------- /docs/source/define_alg_seq.rst: -------------------------------------------------------------------------------- 1 | .. _formula: 2 | 3 | ================= 4 | Execution formula 5 | ================= 6 | | The "config_rec" file defines parameters used during reconstruction process. 7 | | The execution sequence is determined by algorithm_sequence parameter, triggers, and sub-triggers. 8 | | Algorithm sequence defines which projection algorithm is executed in each iteration. Cohere supports the following projection algorithms: ER (error reduction), HIO (hybrid input-output), SF (solvent flipping), and RAAR (relaxed averaged alternating reflectors). The algorithms are described in this publication: https://pubs.aip.org/aip/rsi/article/78/1/011301/349838/Invited-Article-A-unified-evaluation-of-iterative. 9 | | Triggers define iterations at which the corresponding triggered operations are active. 10 | | The triggered operations use applicable parameters that are grouped by the corresponding trigger. 11 | | Sub-trigger is a trigger applied for part of iterations, associated with the projection algorithm. 12 | | The trigger, sub-trigger, and algorithm sequence are explained in the sections below. 13 | 14 | Trigger 15 | ======= 16 | | Triggers are defined in config_req, for example: shrink_wrap_trigger = [10, 5]. Refer to :ref:`config_rec` for documentation on all available triggers. 17 | | Trigger defines at which iteration to apply the associated trigger action. Trigger is defined as a list and can be configured for a single iteration, or multiple iterations. 18 | | examples: 19 | | [3] trigger at iteration 3 20 | | [20, 5] trigger starts at iteration 20, repeats every 5 iteration for the rest of run. 21 | | [20, 5, 40] trigger starts at iteration 20, repeats every 5 iteration until iteration 40. 22 | 23 | Sub-Trigger 24 | =========== 25 | | For sub-trigger configuration, the trigger is a list of triggers, i.e. a list of lists. Each internal list is a sub-trigger. 26 | | The following features: shrink wrap (SW), lowpass filter (LPF), and phase constrain (PHC) can be defined as sub-triggers. The literals listed in parenthesis are used to define the sub-triggers in algorithm_sequence. 27 | | For example a shrink_wrap_trigger = [[5, 1, 100], [0, 3], [5]] defines sub-triggers: SW0, SW1, SW2 respectively. 28 | | Since the sub-triggers are associated with algorithm sequence, they must be attached to the projection algorithm in algorithm_sequence parameter, for example: 20*ER.SW0 + 180*HIO.SW1 + 20*ER.SW2 + 180*HIO.SW1 +20*ER.SW0. 29 | | In this case the first sub-trigger SW0 will be applied during the first 20 iterations, then during next 180 iterations the trigger SW1 is applied, and so on. 30 | 31 | Algorithm Sequence 32 | ================== 33 | | This parameter defines sequence of algorithms applied in each iteration during modulus projection and during modulus. The "*" character means repeat, and the "+" means add to the sequence. The sequence may contain single brackets defining a group that will be repeated by the preceding multiplier. The alphabetic entries: ER, ERpc, HIO, HIOpc, RAAR, SF define algorithms used in this iteration. The entries will invoke functions as follows: ER definition will invoke 'er' and 'modulus' functions, the ERpc will invoke 'er' and 'pc_modulus', HIO will invoke 'hio' and 'modulus', and HIOpc will invoke 'hio' and 'pc_modulus', 'RAAR' will invoke 'raar' and modulus, and 'SF' will invoke 'sf' and modulus. The pc_modulus is implementation of modulus with partial coherence correction. If defining ERpc or HIOpc the pcdi (partial coherence) must be activated by configuring pc_interval and pcdi parameters. 34 | | Algorithm sequence can include sub-triggers attached to the algorithm, as described in Sub-Trigger section. 35 | 36 | Formula examples 37 | ================ 38 | | Algorithm sequence and corresponding trigger configuration 39 | 40 | | example 1 41 | 42 | :: 43 | 44 | algorithm_sequence = "2* (20*ER + 180*HIO) + 2* (20*ERpc + 180*HIOpc) + 20*ERpc" 45 | 46 | config_rec: 47 | | shrink_wrap_trigger = [1, 3] 48 | | lowpass_trigger = [0, 2, 130] 49 | 50 | | In this example the program will run: 51 | | twice 20 iterations with ER algorithm and modulus and 180 iterations with HIO algorithm and modulus, 52 | | followed by twice 20 iterations with ER algorithm and partial coherence modulus and 180 iterations with HIO algorithm and partial coherence modulus, 53 | | followed by 20 iterations with ER algorithm and partial coherence modulus. 54 | | In addition, based on the triggers configuration, every three iterations starting from the first, for the rest of the run, the shrink wrap operation will be applied, 55 | | and every two iterations, starting at the beginning, until iteration 130, the lowpass operation will be applied. 56 | 57 | | example 2 58 | 59 | :: 60 | 61 | algorithm_sequence = "20*ER.SW0.PHC0 + 180*HIO.SW1.PHC1 + 20*ER.SW2" 62 | 63 | config_rec: 64 | | shrink_wrap_trigger = [[1, 2], [0, 3], [0, 4]] 65 | | phc_trigger = [[0, 2, 100], [0, 3, 100]] 66 | 67 | | In this example the program will run: 68 | | 20 iterations with ER algorithm and modulus. During this 20 iterations the first sub-trigger in shrink_wrap_trigger, defined as [1,2] will be applied, which will start at iteration one and proceed every second iteration. During these iterations first phase constrain sub-trigger from phc_trigger, defined as [0, 2 ,100] will be applied starting at the beginning iteration, repeating every other iteration, and will continue until the 20 ER iterations, even though it was configured for more iterations. 69 | | The ER iterations will be followed by 180 iterations with HIO algorithm and modulus and sub-triggers. During this 180 iterations the second sub-trigger in shrink_wrap_trigger, defined as [0, 3] will be applied, which will start at first HIO iteration and proceed every third iteration. During these iterations the second phase constrain sub-trigger from phc_trigger, defined as [0, 3 ,100] will be applied that will start at the beginning iteration, repeat every third iteration, and will continue until the 100 HIO iterations. 70 | | This sequence will be follow by 20 iterations with ER algorithm and modulus and shrink wrap sub-trigger, defined as [0,4]. 71 | -------------------------------------------------------------------------------- /src/cohere_core/utilities/ga_utils.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | import os 8 | import cohere_core.utilities.utils as ut 9 | import numpy as np 10 | 11 | 12 | def read_results(read_dir): 13 | """ 14 | Reads results of reconstruction: image, support, and coherence if exists, and returns array representation. 15 | 16 | :param read_dir: directory to read the results from 17 | :return: image, support, and coherence arrays 18 | """ 19 | try: 20 | imagefile = ut.join(read_dir, 'image.npy') 21 | image = np.load(imagefile) 22 | except: 23 | image = None 24 | 25 | try: 26 | supportfile = ut.join(read_dir, 'support.npy') 27 | support = np.load(supportfile) 28 | except: 29 | support = None 30 | 31 | try: 32 | cohfile = ut.join(read_dir, 'coherence.npy') 33 | coh = np.load(cohfile) 34 | except: 35 | coh = None 36 | 37 | return image, support, coh 38 | 39 | class Tracing: 40 | def __init__(self, reconstructions, pars, dir): 41 | self.init_dirs = [] 42 | self.report_tracing = [] 43 | 44 | pars['init_guess'] = pars.get('init_guess', 'random') 45 | if pars['init_guess'] == 'continue': 46 | continue_dir = pars['continue_dir'] 47 | for sub in os.listdir(continue_dir): 48 | image, support, coh = read_results(ut.join(continue_dir, sub)) 49 | if image is not None: 50 | self.init_dirs.append(ut.join(continue_dir, sub)) 51 | self.report_tracing.append([ut.join(continue_dir, sub)]) 52 | if len(self.init_dirs) < reconstructions: 53 | for i in range(reconstructions - len(self.init_dirs)): 54 | self.report_tracing.append([f'random{str(i)}']) 55 | self.init_dirs = self.init_dirs + (reconstructions - len(self.init_dirs)) * [None] 56 | elif pars['init_guess'] == 'AI_guess': 57 | self.report_tracing.append(['AI_guess']) 58 | for i in range(reconstructions - 1): 59 | self.report_tracing.append([f'random{str(i)}']) 60 | self.init_dirs = [ut.join(dir, 'results_AI')] + (reconstructions - 1) * [None] 61 | else: 62 | for i in range(reconstructions): 63 | self.init_dirs.append(None) 64 | self.report_tracing.append([f'random{str(i)}']) 65 | 66 | 67 | def append_gen(self, gen_ranks): 68 | for key in gen_ranks: 69 | self.report_tracing[self.map[key]].append(gen_ranks[key]) 70 | 71 | 72 | def pretty_format_results(self): 73 | """ 74 | Takes in a list of report traces and formats them into human-readable tables. 75 | Performs data conversion in 1 pass and formatting in a second to determine 76 | padding and spacing schemes. 77 | 78 | Parameters 79 | ---------- 80 | none 81 | 82 | Returns 83 | ------- 84 | report_output : str 85 | a string containing the formatted report 86 | """ 87 | col_gap = 2 88 | 89 | num_gens = len(self.report_tracing[0]) - 1 90 | fitnesses = list(self.report_tracing[0][1][1].keys()) 91 | 92 | report_table = [] 93 | report_table.append(['start'] + [f'generation {i}' for i in range(num_gens)]) 94 | report_table.append([''] * len(report_table[0])) 95 | 96 | data_col_width = 15 97 | start_col_width = 15 98 | for pop_data in self.report_tracing: 99 | report_table.append([str(pop_data[0])] + [str(ind_data[0]) for ind_data in pop_data[1:]]) 100 | start_col_width = max(len(pop_data[0]), start_col_width) 101 | 102 | for fit in fitnesses: 103 | fit_row = [''] 104 | for ind_data in pop_data[1:]: 105 | data_out = f'{fit} : {ind_data[1][fit]}' 106 | data_col_width = max(len(data_out), data_col_width) 107 | fit_row.append(data_out) 108 | report_table.append(fit_row) 109 | report_table.append([''] * len(report_table[0])) 110 | 111 | report_str = '' 112 | linesep = os.linesep 113 | for row in report_table: 114 | report_str += row[0].ljust(start_col_width + col_gap) 115 | report_str += (' ' * col_gap).join([cell.ljust(data_col_width) for cell in row[1:]]) + linesep 116 | 117 | return report_str 118 | 119 | 120 | def save(self, save_dir): 121 | try: 122 | report_str = self.pretty_format_results() 123 | except Exception as e: 124 | print(f'WARNING: Report formatting failed due to {type(e)}: {e}! Falling back to raw formatting.') 125 | report_str = '\n'.join([str(l) for l in self.report_tracing]) 126 | 127 | with open(ut.join(save_dir, 'ranks.txt'), 'w+') as rank_file: 128 | rank_file.write(report_str) 129 | rank_file.flush() 130 | 131 | 132 | def set_map(self, map): 133 | self.map = map 134 | 135 | 136 | def set_ga_defaults(pars): 137 | pars['reconstructions'] = pars.get('reconstructions', 1) 138 | pars['ga_generations'] = pars.get('ga_generations', 1) 139 | pars['init_guess'] = pars.get('init_guess', 'random') 140 | 141 | # check if pc feature is on 142 | if 'pc' in pars['algorithm_sequence'] and 'pc_interval' in pars: 143 | if not 'ga_gen_pc_start' in pars: 144 | pars['ga_gen_pc_start'] = 0 145 | pars['ga_gen_pc_start'] = min(pars['ga_gen_pc_start'], pars['ga_generations']-1) 146 | 147 | pars['ga_fast'] = pars.get('ga_fast', False) 148 | 149 | if 'ga_metrics' not in pars: 150 | metrics = ['chi'] * pars['ga_generations'] 151 | else: 152 | metrics = pars['ga_metrics'] 153 | if len(metrics) == 1: 154 | metrics = metrics * pars['ga_generations'] 155 | elif len(metrics) < pars['ga_generations']: 156 | metrics = metrics + ['chi'] * (pars['ga_generations'] - len(metrics)) 157 | pars['ga_metrics'] = metrics 158 | 159 | ga_reconstructions = [] 160 | if 'ga_cullings' in pars: 161 | worst_remove_no = pars['ga_cullings'] 162 | if len(worst_remove_no) < pars['ga_generations']: 163 | worst_remove_no = worst_remove_no + [0] * (pars['ga_generations'] - len(worst_remove_no)) 164 | else: 165 | worst_remove_no = [0] * pars['ga_generations'] 166 | pars['worst_remove_no'] = worst_remove_no 167 | # calculate how many reconstructions should continue 168 | reconstructions = pars['reconstructions'] 169 | for culling in worst_remove_no: 170 | reconstructions = reconstructions - culling 171 | if reconstructions <= 0: 172 | return 'culled down to 0 reconstructions, check configuration' 173 | ga_reconstructions.append(reconstructions) 174 | pars['ga_reconstructions'] = ga_reconstructions 175 | 176 | sw_threshold = .1 177 | if 'ga_sw_thresholds' in pars: 178 | ga_sw_thresholds = pars['ga_sw_thresholds'] 179 | if len(ga_sw_thresholds) == 1: 180 | ga_sw_thresholds = ga_sw_thresholds * pars['ga_generations'] 181 | elif len(ga_sw_thresholds) < pars['ga_generations']: 182 | ga_sw_thresholds = ga_sw_thresholds + [sw_threshold] * (pars['ga_generations'] - len(ga_sw_thresholds)) 183 | else: 184 | ga_sw_thresholds = [sw_threshold] * pars['ga_generations'] 185 | pars['ga_sw_thresholds'] = ga_sw_thresholds 186 | 187 | sw_gauss_sigma = 1.0 188 | if 'ga_sw_gauss_sigmas' in pars: 189 | ga_sw_gauss_sigmas = pars['ga_sw_gauss_sigmas'] 190 | if len(ga_sw_gauss_sigmas) == 1: 191 | ga_sw_gauss_sigmas = ga_sw_gauss_sigmas * pars['ga_generations'] 192 | elif len(pars['ga_sw_gauss_sigmas']) < pars['ga_generations']: 193 | ga_sw_gauss_sigmas = ga_sw_gauss_sigmas + [sw_gauss_sigma] * (pars['ga_generations'] - len(ga_sw_gauss_sigmas)) 194 | else: 195 | ga_sw_gauss_sigmas = [sw_gauss_sigma] * pars['ga_generations'] 196 | pars['ga_sw_gauss_sigmas'] = ga_sw_gauss_sigmas 197 | 198 | if 'ga_breed_modes' not in pars: 199 | ga_breed_modes = ['sqrt_ab'] * pars['ga_generations'] 200 | else: 201 | ga_breed_modes = pars['ga_breed_modes'] 202 | if len(ga_breed_modes) == 1: 203 | ga_breed_modes = ga_breed_modes * pars['ga_generations'] 204 | elif len(ga_breed_modes) < pars['ga_generations']: 205 | ga_breed_modes = ga_breed_modes + ['sqrt_ab'] * (pars['ga_generations'] - len(ga_breed_modes)) 206 | pars['ga_breed_modes'] = ga_breed_modes 207 | 208 | if 'ga_lpf_sigmas' in pars: 209 | pars['low_resolution_generations'] = len(pars['ga_lpf_sigmas']) 210 | else: 211 | pars['low_resolution_generations'] = 0 212 | 213 | if pars['low_resolution_generations'] > 0: 214 | pars['low_resolution_alg'] = pars.get('low_resolution_alg', 'GAUSS') 215 | 216 | return pars 217 | 218 | -------------------------------------------------------------------------------- /src/cohere_core/lib/nplib.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | from cohere_core.lib.cohlib import cohlib 8 | import numpy as np 9 | import scipy.fft as fft 10 | import scipy.ndimage as ndi 11 | import scipy.signal as sig 12 | import scipy.stats as stats 13 | 14 | class nplib(cohlib): 15 | @staticmethod 16 | def array(obj): 17 | return np.array(obj) 18 | 19 | @staticmethod 20 | def dot(arr1, arr2): 21 | return np.dot(arr1, arr2) 22 | 23 | @staticmethod 24 | def cross(arr1, arr2): 25 | return np.cross(arr1, arr2) 26 | 27 | @staticmethod 28 | def set_device(dev_id): 29 | pass 30 | 31 | @staticmethod 32 | def to_numpy(arr): 33 | return arr 34 | 35 | @staticmethod 36 | def load(filename, **kwargs): 37 | return np.load(filename) 38 | 39 | @staticmethod 40 | def from_numpy(arr, **kwargs): 41 | return arr 42 | 43 | @staticmethod 44 | def save(filename, arr): 45 | np.save(filename, arr) 46 | 47 | @staticmethod 48 | def dtype(arr): 49 | return arr.dtype 50 | 51 | @staticmethod 52 | def astype(arr, dtype): 53 | return arr.astype(dtype=dtype) 54 | 55 | @staticmethod 56 | def reshape(arr, shape): 57 | return np.reshape(arr, shape) 58 | 59 | @staticmethod 60 | def size(arr): 61 | return arr.size 62 | 63 | @staticmethod 64 | def next_fast_len(target): 65 | return fft.next_fast_len(target) 66 | 67 | @staticmethod 68 | def nan_to_num(arr, **kwargs): 69 | return np.nan_to_num(arr, **kwargs) 70 | 71 | @staticmethod 72 | def hasnan(arr): 73 | return np.any(np.isnan(arr)) 74 | 75 | @staticmethod 76 | def copy(arr): 77 | return np.copy(arr) 78 | 79 | @staticmethod 80 | def random(shape, **kwargs): 81 | import time 82 | import os 83 | 84 | # return np.random.rand(*shape) 85 | #rng = np.random.default_rng((int(time.time()* 10000000) + os.getpid())) 86 | np.random.seed((int(time.time() * os.getpid()) + os.getpid()) % 2**31) 87 | r = np.random.rand(*shape) 88 | return r 89 | #return rng.random(*shape).astype(float) 90 | 91 | @staticmethod 92 | def roll(arr, sft, axis=None): 93 | if type(sft) != list: 94 | sft = [sft] 95 | if axis is None: 96 | axis = list(range(len(sft))) 97 | sft = [int(s) for s in sft] 98 | return np.roll(arr, sft, axis=axis) 99 | 100 | @staticmethod 101 | def shift(arr, sft): 102 | return ndi.shift(arr, sft) 103 | 104 | @staticmethod 105 | def fftshift(arr): 106 | return np.fft.fftshift(arr) 107 | 108 | @staticmethod 109 | def ifftshift(arr): 110 | return np.fft.ifftshift(arr) 111 | 112 | @staticmethod 113 | def fft(arr): 114 | return fft.fftn(arr, norm='forward') 115 | 116 | @staticmethod 117 | def ifft(arr): 118 | return fft.ifftn(arr, norm='forward') 119 | 120 | @staticmethod 121 | def fftconvolve(arr1, kernel): 122 | return sig.fftconvolve(arr1, kernel, mode='same') 123 | 124 | @staticmethod 125 | def correlate(arr1, arr2, mode='same', method='auto'): 126 | return sig.correlate(arr1, arr2, mode, method) 127 | 128 | @staticmethod 129 | def where(cond, x, y): 130 | return np.where(cond, x, y) 131 | 132 | @staticmethod 133 | def dims(arr): 134 | # get array dimensions 135 | return arr.shape 136 | 137 | @staticmethod 138 | def absolute(arr): 139 | return np.absolute(arr) 140 | 141 | @staticmethod 142 | def sqrt(arr): 143 | return np.sqrt(arr) 144 | 145 | @staticmethod 146 | def square(arr): 147 | return np.square(arr) 148 | 149 | @staticmethod 150 | def sum(arr, axis=None): 151 | return np.sum(arr, axis) 152 | 153 | @staticmethod 154 | def real(arr): 155 | return np.real(arr) 156 | 157 | @staticmethod 158 | def imag(arr): 159 | return np.imag(arr) 160 | 161 | @staticmethod 162 | def amax(arr): 163 | return np.amax(arr) 164 | 165 | @staticmethod 166 | def unravel_index(indices, shape): 167 | return np.unravel_index(indices, shape) 168 | 169 | @staticmethod 170 | def ravel(arr): 171 | return np.ravel(arr) 172 | 173 | @staticmethod 174 | def maximum(arr1, arr2): 175 | return np.maximum(arr1, arr2) 176 | 177 | @staticmethod 178 | def argmax(arr, axis=None): 179 | return np.argmax(arr, axis) 180 | 181 | @staticmethod 182 | def ceil(arr): 183 | return np.ceil(arr) 184 | 185 | @staticmethod 186 | def fix(arr): 187 | return np.fix(arr) 188 | 189 | @staticmethod 190 | def round(val): 191 | return np.round(val) 192 | 193 | @staticmethod 194 | def print(arr, **kwargs): 195 | print(arr) 196 | 197 | @staticmethod 198 | def angle(arr): 199 | return np.angle(arr) 200 | 201 | @staticmethod 202 | def flip(arr, axis=None): 203 | return np.flip(arr, axis) 204 | 205 | @staticmethod 206 | def tile(arr, rep): 207 | return np.tile(arr, rep) 208 | 209 | @staticmethod 210 | def full(shape, fill_value, **kwargs): 211 | return np.full(shape, fill_value) 212 | 213 | @staticmethod 214 | def expand_dims(arr, axis): 215 | return np.expand_dims(arr, axis) 216 | 217 | @staticmethod 218 | def squeeze(arr): 219 | return np.squeeze(arr) 220 | 221 | @staticmethod 222 | def entropy(arr): 223 | return stats.entropy(arr) 224 | 225 | @staticmethod 226 | def gaussian_filter(arr, sigma, **kwargs): 227 | return ndi.gaussian_filter(arr, sigma, **kwargs) 228 | 229 | @staticmethod 230 | def median_filter(arr, size, **kwargs): 231 | return ndi.median_filter(arr, size) 232 | 233 | @staticmethod 234 | def uniform_filter(arr, size, **kwargs): 235 | return ndi.uniform_filter(arr, size) 236 | 237 | @staticmethod 238 | def binary_erosion(arr, **kwargs): 239 | return ndi.binary_erosion(arr, iterations=1) 240 | 241 | @staticmethod 242 | def center_of_mass(inarr): 243 | return ndi.center_of_mass(np.absolute(inarr)) 244 | 245 | @staticmethod 246 | def meshgrid(*xi): 247 | return np.meshgrid(*xi) 248 | 249 | @staticmethod 250 | def exp(arr): 251 | return np.exp(arr) 252 | 253 | @staticmethod 254 | def conj(arr): 255 | return np.conj(arr) 256 | 257 | @staticmethod 258 | def array_equal(arr1, arr2): 259 | return np.array_equal(arr1, arr2) 260 | 261 | @staticmethod 262 | def cos(arr): 263 | return np.cos(arr) 264 | 265 | @staticmethod 266 | def linspace(start, stop, num): 267 | return np.linspace(start, stop, num) 268 | 269 | @staticmethod 270 | def geomspace(start, stop, num): 271 | return np.geomspace(start, stop, num) 272 | 273 | @staticmethod 274 | def clip(arr, min, max=None): 275 | return np.clip(arr, min, max) 276 | 277 | @staticmethod 278 | def diff(arr, axis=None, prepend=0): 279 | return np.diff(arr, axis=axis, prepend=prepend) 280 | 281 | @staticmethod 282 | def gradient(arr, dx=1): 283 | return np.gradient(arr, dx) 284 | 285 | @staticmethod 286 | def argmin(arr, axis=None): 287 | return np.argmin(arr, axis) 288 | 289 | @staticmethod 290 | def take_along_axis(a, indices, axis): 291 | return np.take_along_axis(a, indices, axis) 292 | 293 | @staticmethod 294 | def moveaxis(arr, source, dest): 295 | return np.moveaxis(arr, source, dest) 296 | 297 | @staticmethod 298 | def lstsq(A, B): 299 | return np.linalg.lstsq(A, B, rcond=None) 300 | 301 | @staticmethod 302 | def zeros(shape): 303 | return np.zeros(shape) 304 | 305 | @staticmethod 306 | def indices(dims): 307 | return np.indices(dims) 308 | 309 | @staticmethod 310 | def concatenate(tup, axis=0): 311 | return np.concatenate(tup, axis) 312 | 313 | @staticmethod 314 | def stack(tup): 315 | return np.stack(tup) 316 | 317 | @staticmethod 318 | def amin(arr): 319 | return np.amin(arr) 320 | 321 | @staticmethod 322 | def affine_transform(arr, matrix, order=3, offset=0): 323 | return ndi.affine_transform(arr, matrix, order=order, offset=offset, prefilter=True) 324 | 325 | @staticmethod 326 | def pad(arr, padding): 327 | return np.pad(arr, padding) 328 | 329 | @staticmethod 330 | def histogram2d(arr1, arr2, bins): 331 | return np.histogram2d(np.ravel(arr1), np.ravel(arr2), bins)[0] 332 | 333 | @staticmethod 334 | def log(arr): 335 | return np.log(arr) 336 | 337 | @staticmethod 338 | def log10(arr): 339 | return np.log10(arr) 340 | 341 | @staticmethod 342 | def xlogy(x, y=None): 343 | if y is None: 344 | y = x 345 | return np.xlogy(x, y) 346 | 347 | @staticmethod 348 | def mean(arr): 349 | return np.mean(arr) 350 | 351 | @staticmethod 352 | def median(arr): 353 | return np.median(arr) 354 | 355 | @staticmethod 356 | def clean_default_mem(): 357 | pass 358 | -------------------------------------------------------------------------------- /src/cohere_core/controller/AI_guess.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | import numpy as np 8 | import os 9 | import cohere_core.utilities.utils as ut 10 | import math 11 | from typing import Union 12 | import tensorflow as tf 13 | from tensorflow.keras import backend as K 14 | from tensorflow.keras.models import Model, load_model 15 | from tensorflow.keras.activations import sigmoid, tanh 16 | 17 | 18 | class Mymodel: 19 | __model = None 20 | __amp_layer_model = None 21 | __ph_layer_model = None 22 | 23 | @staticmethod 24 | def get_model(model_file): 25 | """ Static access method. """ 26 | if Mymodel.__model == None: 27 | Mymodel(model_file) 28 | return Mymodel.__amp_layer_model, Mymodel.__ph_layer_model 29 | 30 | def __init__(self, model_file): 31 | """ Virtually private constructor. """ 32 | if Mymodel.__model != None: 33 | raise Exception("This class is a singleton!") 34 | else: 35 | # load trained network 36 | Mymodel.__model = load_model( 37 | model_file, 38 | custom_objects={ 39 | 'tf': tf, 40 | 'loss_comb2_scale': loss_comb2_scale, 41 | 'sigmoid': sigmoid, 42 | 'tanh': tanh, 43 | 'math': math, 44 | 'combine_complex': combine_complex, 45 | 'get_mask': get_mask, 46 | 'ff_propagation': ff_propagation 47 | }) 48 | model = Mymodel.__model 49 | # get the outputs from amplitude and phase layers 50 | Mymodel.__amp_layer_model = Model(inputs=model.input, 51 | outputs=model.get_layer('amp').output) 52 | Mymodel.__ph_layer_model = Model(inputs=model.input, 53 | outputs=model.get_layer('phi').output) 54 | 55 | 56 | def match_oversample_diff( 57 | diff: np.ndarray, 58 | fr: Union[list, np.ndarray, None] = None, 59 | to: Union[list, np.ndarray, None] = None, 60 | shape: Union[list, np.ndarray, None] = [64, 64, 64], 61 | ): 62 | """ resize diff to match oversample ratios 63 | diff = diffraction pattern 64 | fr = from oversample ratio 65 | to = to oversample ratio 66 | shape = output shape 67 | """ 68 | # adjustment needed to match oversample ratio 69 | change = [np.round(f / t).astype('int32') for f, t in zip(fr, to)] 70 | change = [np.max([1, c]) for c in change] 71 | diff = ut.binning(diff, change) 72 | # crop diff to match output shape 73 | shape_arr = np.array(shape) 74 | diff_shape_arr = np.array(diff.shape) 75 | pad_value1 = shape_arr // 2 - diff_shape_arr // 2 76 | pad_value2 = shape_arr - diff_shape_arr -pad_value1 77 | pad = [[pad_value1[0], pad_value2[0]], [pad_value1[1], pad_value2[1]], 78 | [pad_value1[2], pad_value2[2]]] 79 | 80 | output = ut.adjust_dimensions(diff, pad, next_fast_len=False) 81 | return output, diff.shape 82 | 83 | 84 | def shift_com(amp, phi): 85 | from scipy.ndimage.measurements import center_of_mass as com 86 | from scipy.ndimage.interpolation import shift 87 | 88 | h, w, t = 64, 64, 64 89 | coms = com(amp) 90 | deltas = (int(round(h / 2 - coms[0])), int(round(w / 2 - coms[1])), 91 | int(round(t / 2 - coms[2]))) 92 | amp_shift = shift(amp, shift=deltas, mode='wrap') 93 | phi_shift = shift(phi, shift=deltas, mode='wrap') 94 | return amp_shift, phi_shift 95 | 96 | 97 | def post_process(amp, phi, th=0.1, uw=0): 98 | if uw == 1: 99 | phi = np.unwrap(np.unwrap(np.unwrap(phi,0),1),2) 100 | 101 | mask = np.where(amp > th, 1, 0) 102 | amp_out = mask * amp 103 | phi_out = mask * phi 104 | 105 | mean_phi = np.sum(phi_out) / np.sum(mask) 106 | phi_out = phi_out - mean_phi 107 | 108 | amp_out, phi_out = shift_com(amp_out, phi_out) 109 | 110 | mask = np.where(amp_out > th, 1, 0) 111 | amp_out = mask * amp_out 112 | phi_out = mask * phi_out 113 | return amp_out, phi_out 114 | 115 | 116 | # funcions needed in tensorflow model 117 | @tf.function 118 | def combine_complex(amp, phi): 119 | import tensorflow as tf 120 | output = tf.cast(amp, tf.complex64) * tf.exp( 121 | 1j * tf.cast(phi, tf.complex64)) 122 | return output 123 | 124 | 125 | @tf.function 126 | def get_mask(input): 127 | import tensorflow as tf 128 | 129 | mask = tf.where(input >= 0.1, tf.ones_like(input), tf.zeros_like(input)) 130 | return mask 131 | 132 | 133 | @tf.function 134 | def loss_comb2_scale(Y_true, Y_pred): 135 | Y_pred = Y_pred / ( 136 | tf.math.reduce_max(Y_pred, axis=(1, 2, 3), keepdims=True) + 137 | 1e-6) * tf.math.reduce_max(Y_true, axis=(1, 2, 3), keepdims=True) 138 | loss_1 = tf.math.sqrt(loss_sq(Y_true, Y_pred)) 139 | loss_2 = loss_pcc(Y_true, Y_pred) 140 | a1 = 1 141 | a2 = 1 142 | loss_value = (a1 * loss_1 + a2 * loss_2) / (a1 + a2) 143 | return loss_value 144 | 145 | 146 | @tf.function 147 | def loss_sq(Y_true, Y_pred): 148 | top = tf.reduce_sum(tf.math.square(Y_pred - Y_true)) 149 | bottom = tf.reduce_sum(tf.math.square(Y_true)) 150 | loss_value = tf.sqrt(top / bottom) 151 | return loss_value 152 | 153 | 154 | @tf.function 155 | def loss_pcc(Y_true, Y_pred): 156 | pred = Y_pred - tf.reduce_mean(Y_pred) 157 | true = Y_true - tf.reduce_mean(Y_true) 158 | 159 | top = tf.reduce_sum(pred * true) 160 | bottom = tf.math.sqrt(tf.reduce_sum(pred**2) * tf.reduce_sum(true**2)) 161 | loss_value = 1 - top / bottom 162 | return loss_value 163 | 164 | 165 | @tf.function 166 | def ff_propagation(data): 167 | ''' 168 | diffraction. Assume same x and y lengthss and uniform sampling 169 | data: source plane field 170 | 171 | ''' 172 | diff = _fourier_transform(data) 173 | 174 | # far-field amplitude 175 | intensity = tf.math.abs(diff) 176 | intensity = tf.cast(intensity, tf.float32) 177 | return intensity 178 | 179 | 180 | @tf.function 181 | # 3D fourier transform 182 | def _fourier_transform(input): 183 | import tensorflow as tf 184 | # fft3d transform with channel unequal to 1 185 | perm_input = K.permute_dimensions(input, pattern=[4, 0, 1, 2, 3]) 186 | perm_Fr = tf.signal.fftshift(tf.signal.fft3d( 187 | tf.signal.ifftshift(tf.cast(perm_input, tf.complex64), 188 | axes=[-3, -2, -1])), 189 | axes=[-3, -2, -1]) 190 | Fr = K.permute_dimensions(perm_Fr, pattern=[1, 2, 3, 4, 0]) 191 | return Fr 192 | 193 | 194 | def run_AI(data, model_file, dir): 195 | """ 196 | Runs AI process. 197 | 198 | Parameters 199 | ---------- 200 | data : ndarray 201 | data array 202 | 203 | model_file : str 204 | file name containing training model 205 | 206 | dir : str 207 | a parent directory that holds the reconstructions. It can be experiment directory or scan directory. 208 | Result of AI will be saved in dir/results_AI. 209 | 210 | Returns 211 | ------- 212 | nothing 213 | """ 214 | print('AI guess') 215 | 216 | # prepare data to make the oversampling ratio ~3 217 | wos = 3.0 218 | orig_os = ut.get_oversample_ratio(data) 219 | # match oversampling to wos 220 | wanted_os = [wos, wos, wos] 221 | # match diff os 222 | new_data, inshape = match_oversample_diff(data, orig_os, wanted_os) 223 | new_data = new_data[np.newaxis] 224 | 225 | amp_layer_model, ph_layer_model = Mymodel.get_model(model_file) 226 | 227 | preds_amp = amp_layer_model.predict(new_data, verbose=1) 228 | 229 | preds_phi = ph_layer_model.predict(new_data, verbose=1) 230 | 231 | preds_amp, preds_phi = post_process(preds_amp[0, ..., 0], 232 | preds_phi[0, ..., 0], 233 | th=0.1, 234 | uw=0) 235 | 236 | pred_obj = preds_amp * np.exp(1j * preds_phi) 237 | 238 | # match object size with the input data 239 | pred_obj = ut.resample(pred_obj, inshape) 240 | 241 | pad_value = np.array(data.shape) // 2 - np.array(pred_obj.shape) // 2 242 | pad = [[pad_value[0], pad_value[0]], [pad_value[1], pad_value[1]], 243 | [pad_value[2], pad_value[2]]] 244 | # not passing in 'next_fst_len' switch, so the dimensions will not be adjusted for fast processing of 245 | # Fourier transforms 246 | guess = ut.adjust_dimensions(pred_obj, pad, next_fast_len=False) 247 | 248 | np.save(ut.join(dir, 'image.npy'), guess) 249 | 250 | 251 | def start_AI(pars, datafile, dir): 252 | """ 253 | Starts AI process if all conditionas are met. 254 | 255 | Parameters 256 | ---------- 257 | pars : dict 258 | parameters for reconstruction 259 | 260 | datafile : str 261 | file name containing data for reconstruction 262 | 263 | dir : str 264 | a parent directory that holds the reconstructions. It can be experiment directory or scan directory. 265 | Result of AI will be saved in dir/results_AI. 266 | 267 | Returns 268 | ------- 269 | ai_dir : str 270 | directory where results were saved 271 | """ 272 | if 'AI_trained_model' not in pars: 273 | print ('no AI_trained_model in config') 274 | return None 275 | if not os.path.isfile(pars['AI_trained_model']): 276 | print(f'there is no file {pars["AI_trained_model"]}') 277 | return None 278 | 279 | if datafile.endswith('tif') or datafile.endswith('tiff'): 280 | try: 281 | data = ut.read_tif(datafile) 282 | except: 283 | print('could not load data file', datafile) 284 | return None 285 | elif datafile.endswith('npy'): 286 | try: 287 | data = np.load(datafile) 288 | except: 289 | print('could not load data file', datafile) 290 | return None 291 | else: 292 | print('no data file found') 293 | return None 294 | 295 | # The results will be stored in the directory /AI_guess 296 | ai_dir = ut.join(dir, 'results_AI') 297 | if os.path.exists(ai_dir): 298 | pass 299 | else: 300 | os.makedirs(ai_dir) 301 | 302 | run_AI(data, pars['AI_trained_model'], ai_dir) 303 | return ai_dir -------------------------------------------------------------------------------- /src/cohere_core/lib/cplib.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | from cohere_core.lib.cohlib import cohlib 8 | import cupy as cp 9 | import numpy as np 10 | import cupyx.scipy.stats as stats 11 | import cupyx.scipy.ndimage as sc 12 | import cupyx.scipy.special as sp 13 | import cupyx.scipy.signal as sig 14 | import cupyx.scipy.fft as fft 15 | 16 | 17 | class cplib(cohlib): 18 | @staticmethod 19 | def array(obj): 20 | return cp.array(obj) 21 | 22 | @staticmethod 23 | def dot(arr1, arr2): 24 | return cp.dot(arr1, arr2) 25 | 26 | @staticmethod 27 | def cross(arr1, arr2): 28 | return cp.cross(arr1, arr2) 29 | 30 | @staticmethod 31 | def set_device(dev_id): 32 | if dev_id != -1: 33 | cp.cuda.Device(dev_id).use() 34 | 35 | @staticmethod 36 | def to_numpy(arr): 37 | return cp.asnumpy(arr) 38 | 39 | @staticmethod 40 | def from_numpy(arr, **kwargs): 41 | return cp.array(arr) 42 | 43 | @staticmethod 44 | def save(filename, arr): 45 | cp.save(filename, arr) 46 | 47 | @staticmethod 48 | def load(filename, **kwargs): 49 | return cp.load(filename, allow_pickle=True) 50 | 51 | @staticmethod 52 | def dtype(arr): 53 | return arr.dtype 54 | 55 | @staticmethod 56 | def astype(arr, dtype): 57 | return arr.astype(dtype=dtype) 58 | 59 | @staticmethod 60 | def reshape(arr, shape): 61 | return cp.reshape(arr, shape) 62 | 63 | @staticmethod 64 | def size(arr): 65 | return arr.size 66 | 67 | @staticmethod 68 | def next_fast_len(target): 69 | return fft.next_fast_len(target) 70 | 71 | @staticmethod 72 | def hasnan(arr): 73 | return cp.any(cp.isnan(arr)) 74 | 75 | @staticmethod 76 | def nan_to_num(arr, **kwargs): 77 | return cp.nan_to_num(arr, **kwargs) 78 | 79 | @staticmethod 80 | def copy(arr): 81 | return cp.copy(arr) 82 | 83 | @staticmethod 84 | def random(shape, **kwargs): 85 | import time 86 | import os 87 | 88 | seed = np.array([time.time() * 10000 * os.getpid(), os.getpid()]) 89 | rs = cp.random.RandomState(seed=seed) 90 | return cp.random.random(shape, dtype=cp.float32) + 1j * cp.random.random(shape, dtype=cp.float32) 91 | 92 | @staticmethod 93 | def roll(arr, sft, axis=None): 94 | if axis is None: 95 | axis = list(range(len(sft))) 96 | if type(sft) != list: 97 | sft = [sft] 98 | sft = [int(s) for s in sft] 99 | return cp.roll(arr, sft, axis=axis) 100 | 101 | @staticmethod 102 | def shift(arr, sft): 103 | return sc.fourier_shift(arr, sft) 104 | 105 | @staticmethod 106 | def fftshift(arr): 107 | return fft.fftshift(arr) 108 | 109 | @staticmethod 110 | def ifftshift(arr): 111 | return fft.ifftshift(arr) 112 | 113 | @staticmethod 114 | def fft(arr, norm='forward'): 115 | return fft.fftn(arr, norm=norm) 116 | 117 | @staticmethod 118 | def ifft(arr, norm='forward'): 119 | return fft.ifftn(arr, norm=norm) 120 | 121 | @staticmethod 122 | def fftconvolve(arr1, kernel): 123 | return sig.fftconvolve(arr1, kernel, mode='same') 124 | 125 | @staticmethod 126 | def correlate(arr1, arr2, mode='same', method='fft'): 127 | return sig.correlate(arr1, arr2, mode, method) 128 | 129 | @staticmethod 130 | def where(cond, x, y): 131 | return cp.where(cond, x, y) 132 | 133 | @staticmethod 134 | def dims(arr): 135 | # get array dimensions 136 | return arr.shape 137 | 138 | @staticmethod 139 | def absolute(arr): 140 | return cp.absolute(arr) 141 | 142 | @staticmethod 143 | def sqrt(arr): 144 | return cp.sqrt(arr) 145 | 146 | @staticmethod 147 | def square(arr): 148 | return cp.square(arr) 149 | 150 | @staticmethod 151 | def sum(arr, axis=None): 152 | sm = cp.sum(arr, axis) 153 | # if axis is None: 154 | # return sm.tolist() 155 | return sm 156 | 157 | @staticmethod 158 | def real(arr): 159 | return cp.real(arr) 160 | 161 | @staticmethod 162 | def imag(arr): 163 | return cp.imag(arr) 164 | 165 | @staticmethod 166 | def amax(arr): 167 | return cp.amax(arr) 168 | 169 | @staticmethod 170 | def amin(arr): 171 | return cp.amin(arr) 172 | 173 | @staticmethod 174 | def argmax(arr, axis=None): 175 | return cp.argmax(arr, axis) 176 | 177 | @staticmethod 178 | def argmin(arr, axis=None): 179 | return cp.argmin(arr, axis) 180 | 181 | @staticmethod 182 | def unravel_index(indices, shape): 183 | return cp.unravel_index(indices, shape) 184 | 185 | @staticmethod 186 | def ravel(arr): 187 | return cp.ravel(arr) 188 | 189 | @staticmethod 190 | def maximum(arr1, arr2): 191 | return cp.maximum(arr1, arr2) 192 | 193 | @staticmethod 194 | def ceil(arr): 195 | return cp.ceil(arr) 196 | 197 | @staticmethod 198 | def fix(arr): 199 | return cp.fix(arr) 200 | 201 | @staticmethod 202 | def round(val): 203 | return cp.round(val) 204 | 205 | @staticmethod 206 | def print(arr, **kwargs): 207 | print(arr) 208 | 209 | @staticmethod 210 | def angle(arr): 211 | return cp.angle(arr) 212 | 213 | @staticmethod 214 | def flip(arr, axis=None): 215 | return cp.flip(arr, axis) 216 | 217 | @staticmethod 218 | def tile(arr, rep): 219 | return cp.tile(arr, rep) 220 | 221 | @staticmethod 222 | def full(shape, fill_value, **kwargs): 223 | return cp.full(shape, fill_value) 224 | 225 | @staticmethod 226 | def expand_dims(arr, axis): 227 | return cp.expand_dims(arr, axis) 228 | 229 | @staticmethod 230 | def squeeze(arr): 231 | return cp.squeeze(arr) 232 | 233 | @staticmethod 234 | def entropy(arr): 235 | return stats.entropy(arr) 236 | 237 | @staticmethod 238 | def gaussian_filter(arr, sigma, **kwargs): 239 | return sc.gaussian_filter(arr, sigma, **kwargs) 240 | 241 | @staticmethod 242 | def median_filter(arr, size, **kwargs): 243 | return sc.median_filter(arr, size) 244 | 245 | @staticmethod 246 | def uniform_filter(arr, size, **kwargs): 247 | return sc.uniform_filter(arr, size) 248 | 249 | @staticmethod 250 | def binary_erosion(arr, **kwargs): 251 | return sc.binary_erosion(arr, iterations=1) 252 | 253 | @staticmethod 254 | def center_of_mass(inarr): 255 | t = sc.center_of_mass(cp.absolute(inarr)) 256 | return t 257 | 258 | @staticmethod 259 | def meshgrid(*xi): 260 | return cp.meshgrid(*xi) 261 | 262 | @staticmethod 263 | def exp(arr): 264 | return cp.exp(arr) 265 | 266 | @staticmethod 267 | def conj(arr): 268 | return cp.conj(arr) 269 | 270 | @staticmethod 271 | def array_equal(arr1, arr2): 272 | return cp.array_equal(arr1, arr2) 273 | 274 | @staticmethod 275 | def cos(arr): 276 | return cp.cos(arr) 277 | 278 | @staticmethod 279 | def linspace(start, stop, num): 280 | return cp.linspace(start, stop, num) 281 | 282 | @staticmethod 283 | def geomspace(start, stop, num): 284 | return cp.geomspace(start, stop, num) 285 | 286 | @staticmethod 287 | def clip(arr, min, max=None): 288 | return cp.clip(arr, min, max) 289 | 290 | @staticmethod 291 | def diff(arr, axis=None, prepend=0): 292 | return cp.diff(arr, axis=axis, prepend=prepend) 293 | 294 | @staticmethod 295 | def gradient(arr, dx=1): 296 | return cp.gradient(arr, dx) 297 | 298 | @staticmethod 299 | def take_along_axis(a, indices, axis): 300 | return cp.take_along_axis(a, indices, axis) 301 | 302 | @staticmethod 303 | def moveaxis(arr, source, dest): 304 | return cp.moveaxis(arr, source, dest) 305 | 306 | @staticmethod 307 | def lstsq(A, B): 308 | return cp.linalg.lstsq(A, B, rcond=None) 309 | 310 | @staticmethod 311 | def zeros(shape): 312 | return cp.zeros(shape) 313 | 314 | @staticmethod 315 | def indices(dims): 316 | return cp.indices(dims) 317 | 318 | @staticmethod 319 | def concatenate(tup, axis=0): 320 | return cp.concatenate(tup, axis) 321 | 322 | @staticmethod 323 | def stack(tup): 324 | return cp.stack(tup) 325 | 326 | @staticmethod 327 | def affine_transform(arr, matrix, order=3, offset=0): 328 | return sc.affine_transform(arr, matrix, order=order, offset=offset, prefilter=True) 329 | 330 | @staticmethod 331 | def pad(arr, padding): 332 | return cp.pad(arr, padding) 333 | 334 | @staticmethod 335 | def histogram2d(arr1, arr2, bins): 336 | return cp.histogram2d(cp.ravel(arr1), cp.ravel(arr2), bins)[0] 337 | 338 | # @staticmethod 339 | # def calc_nmi(hgram): 340 | # h0 = stats.entropy(cp.sum(hgram, axis=0)) 341 | # h1 = stats.entropy(cp.sum(hgram, axis=1)) 342 | # h01 = stats.entropy(cp.reshape(hgram, -1)) 343 | # return (h0 + h1) / h01 344 | # 345 | @staticmethod 346 | def log(arr): 347 | return cp.log(arr) 348 | 349 | @staticmethod 350 | def log10(arr): 351 | return cp.log10(arr) 352 | 353 | @staticmethod 354 | def xlogy(x, y=None): 355 | if y is None: 356 | y = x 357 | return sp.xlogy(x, y) 358 | 359 | @staticmethod 360 | def mean(arr): 361 | return cp.mean(arr) 362 | 363 | @staticmethod 364 | def median(arr): 365 | return cp.median(arr) 366 | 367 | # @staticmethod 368 | # def calc_ehd(hgram): 369 | # n = hgram.shape[0] * 1j 370 | # x, y = cp.mgrid[0:1:n, 0:1:n] 371 | # return cp.sum(hgram * cp.abs(x - y)) / cp.sum(hgram) 372 | # 373 | # @staticmethod 374 | # def integrate_jacobian(jacobian, dx=1): 375 | # nx, ny, nz, _, _ = jacobian.shape 376 | # u = cp.zeros((nx, ny, nz, 3)) 377 | # for ax in range(3): 378 | # u = u + dx * cp.cumsum(jacobian[:, :, :, ax, :], axis=ax) 379 | # return u 380 | # 381 | @staticmethod 382 | def clean_default_mem(): 383 | cp._default_memory_pool.free_all_blocks() 384 | cp._default_pinned_memory_pool.free_all_blocks() 385 | -------------------------------------------------------------------------------- /docs/source/cohere_experiment.rst: -------------------------------------------------------------------------------- 1 | ================= 2 | Cohere Experiment 3 | ================= 4 | | The experiment executed at the beamline concludes in a collection of data files and metadata for many scans. It is typical that a number of scans are selected and grouped together for analysis. The analysis of selected scans constitute of cohere experiment. 5 | | In order to group the files associated with an cohere experiment in a structured manner user scripts will create a dedicated space. The space, cohere experiment directory, will be a sub-directory in working directory. The name of cohere experiment directory is descriptive as it contains an ID and scan ranges, for example: with experiment ID of "ABC", and scan "56-78", the experiment directory is ABC_56-78. 6 | | Cohere experiment can be configured for different cases. User can request to run the process separately for each scan, or scan range. User can also set the experiment to multipeak. Refer to :ref:`config_main` for description of configuration parameters that set a cohere experiment. 7 | | Below we show experiment directory structure for various use cases. 8 | 9 | Single reconstruction 10 | +++++++++++++++++++++ 11 | | The user starts with the experiment_dir containing conf subdirectory with the configuration files. 12 | | The other subdirectories of the tree are created during execution of users scripts. Refer to :ref:`api_cohere_ui` page for description and explanation how to use the scripts and to :doc:`configuration` page for configuration files and parameters. 13 | | 14 | | \| 15 | | \|--conf 16 | | \|--config 17 | | \|--config_prep 18 | | \|--config_data 19 | | \|--config_rec 20 | | \|--config_disp 21 | | \|--preprocessed_data 22 | | \|--phasing_data 23 | | \|--results_phasing 24 | | \|--results_viz 25 | | 26 | | Below is a sequence of events that produces the cohere experiment directory tree. 27 | 28 | - The experiment should be set up with "conf" subdirectory containing configuration files. User can create it manually or use cohere_gui to create the initial cohere experiment space. 29 | - The script "beamline_preprocess.py" creates "prep_data.tif" file in "preprocessed_data" subdirectory. This is a file ready to be formatted. 30 | - The script "standard_preprocess.py" reads the "preprocessed_data.tif" file, formats it, and saves the result in the "phasing_data" subdirectory as "data.tif" file. This file is ready for reconstruction. 31 | - The "run_reconstruction.py" script reads "data.tif" file and runs phasing. The results are stored in the "results_phasing" subdirectory in "image.npy" file, along with "support.npy, and "coherence.npy" if partial coherence feature is configured. 32 | - The "beamline_visualization.py" script loads the arrays from "results_phasing" directory, processes the image and saves it in the results_viz directory. 33 | 34 | | After running all the scripts the experiment will have the following files: 35 | | 36 | | \| 37 | | \|--conf 38 | | \|--config 39 | | \|--config_prep 40 | | \|--config_data 41 | | \|--config_rec 42 | | \|--config_disp 43 | | \|--preprocessed_data 44 | | \|--prep_data.tif 45 | | \|--phasing_data 46 | | \|--data.tif 47 | | \|--results_phasing 48 | | \|--image.npy 49 | | \|--support.npy 50 | | \|--image.tif 51 | | \|--errors.npy 52 | | \|--errors.txt 53 | | \|--metrics.txt 54 | | \|--results_viz 55 | | \|--image.vts 56 | | \|--support.viz 57 | 58 | Multiple reconstruction 59 | +++++++++++++++++++++++ 60 | | If running multiple reconstructions which is driven by configuration (i.e. the "config_rec" file contains "reconstructions" parameter set to a number greater than 1) the "results" directory will have subdirectories reflecting the runs. The subdirectories are named by the number. The results_phasing will contain the numbered subdirectories, each with image.npy and support.npy files, and results_viz subdirectories, each with image.vts and support.vts files. 61 | | Below is an example of "results" directory structure when running three reconstructions: 62 | | 63 | | \| 64 | | \|--results_phasing 65 | | \|--0 66 | | \|--image.npy 67 | | \|--support.npy 68 | | \|--image.tif 69 | | \|--errors.npy 70 | | \|--errors.txt 71 | | \|--metrics.txt 72 | | \|--1 73 | | \|--image.npy 74 | | \|--support.npy 75 | | \|--image.tif 76 | | \|--errors.npy 77 | | \|--errors.txt 78 | | \|--metrics.txt 79 | | \|--2 80 | | \|--image.npy 81 | | \|--support.npy 82 | | \|--image.tif 83 | | \|--errors.npy 84 | | \|--errors.txt 85 | | \|--metrics.txt 86 | | \|--results_viz 87 | | \|--0 88 | | \|--image.vts 89 | | \|--support.vts 90 | | \|--1 91 | | \|--image.vts 92 | | \|--support.vts 93 | | \|--2 94 | | \|--image.vts 95 | | \|--support.vts 96 | 97 | Genetic Algorithm 98 | +++++++++++++++++ 99 | | When running GA, only the best results are saved. Phasing results are saved in results_phasing directory, and visualization results are saved in results_viz. 100 | 101 | Separate scans 102 | ++++++++++++++ 103 | | When the cohere experiment is configured as separate reconstruction for each scan, the experiment directory will contain a subdirectory for each scan. This use case is configured in "config_prep" file by setting parameter "separate_scans" to True. Each scan directory is a concatenation of "scan", underscore, and the scan number. Each of the scan subdirectories will have preprocessed_data, phasing_data, results_phasing, and results_viz subdirectories. The configuration is common for all scans. If running multiple reconstructions or GA, the directory structure in each scan directory will reflect it, as described in above sections. 104 | | This is also mechanism when the cohere experiment is configured as separate scan ranges. The postscript after "scan" literal will end with scan range instead of scan. 105 | | Below is an example of directory structure for separate scans. 106 | | 107 | | \| 108 | | \|--conf 109 | | \|--config 110 | | \|--config_prep 111 | | \|--config_data 112 | | \|--config_rec 113 | | \|--config_disp 114 | | \|--scan_54 115 | | \|--preprocessed_data 116 | | \|--prep_data.tif 117 | | \|--phasing_data 118 | | \|--data.tif 119 | | \|--results_phasing 120 | | \|--image.npy 121 | | \|--support.npy 122 | | \|--image.tif 123 | | \|--errors.npy 124 | | \|--errors.txt 125 | | \|--metrics.txt 126 | | \|--results_viz 127 | | \|--image.vts 128 | | \|--support.vts 129 | | \|--scan_57 130 | | \|--preprocessed_data 131 | | \|--prep_data.tif 132 | | \|--phasing_data 133 | | \|--data.tif 134 | | \|--results_phasing 135 | | \|--image.npy 136 | | \|--support.npy 137 | | \|--image.tif 138 | | \|--errors.npy 139 | | \|--errors.txt 140 | | \|--metrics.txt 141 | | \|--results_viz 142 | | \|--image.vts 143 | | \|--support.vts 144 | 145 | Alternate configuration 146 | +++++++++++++++++++++++ 147 | | The "run_rec.py" script supports feature of running reconstruction with alternate configuration(s). Each alternate configuration must be named with arbitrary postfix (rec_id), preceded by "confic_rec" and underscore. This file should be created in the conf subdirectory. 148 | | After running the "run_rec" script with this option, the results will be saved in the results_phasing_ directory. 149 | | Below is an example of directory structure with alternate configuration. 150 | | 151 | | \| 152 | | \|--conf 153 | | \|--config 154 | | \|--config_prep 155 | | \|--config_data 156 | | \|--config_rec 157 | | \|--config_rec_aa 158 | | \|--config_rec_bb 159 | | \|--config_disp 160 | | \|--prepprocessed_data 161 | | \|--prep_data.tif 162 | | \|--phasing_data 163 | | \|--data.tif 164 | | \|--results_phasing 165 | | \|--image.npy 166 | | \|--support.npy 167 | | \|--image.tif 168 | | \|--errors.npy 169 | | \|--errors.txt 170 | | \|--metrics.txt 171 | | \|--results_viz 172 | | \|--image.vts 173 | | \|--support.viz 174 | | \|--results_phasing_aa 175 | | \|--image.npy 176 | | \|--support.npy 177 | | \|--image.tif 178 | | \|--errors.npy 179 | | \|--errors.txt 180 | | \|--metrics.txt 181 | | \|--results_viz_aa 182 | | \|--image.vts 183 | | \|--support.viz 184 | | \|--results_phasing_bb 185 | | \|--image.npy 186 | | \|--support.npy 187 | | \|--image.tif 188 | | \|--errors.npy 189 | | \|--errors.txt 190 | | \|--metrics.txt 191 | | \|--results_viz_bb 192 | | \|--image.vts 193 | | \|--support.viz 194 | 195 | -------------------------------------------------------------------------------- /src/cohere_core/data/standard_preprocess.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | 8 | """ 9 | Formats data for reconstruction according to configuration. 10 | """ 11 | 12 | import os 13 | import numpy as np 14 | import cohere_core.data.alien_tools as at 15 | import cohere_core.utilities.utils as ut 16 | import cohere_core.utilities.config_verifier as ver 17 | 18 | 19 | __author__ = "Barbara Frosik" 20 | __copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC." 21 | __docformat__ = 'restructuredtext en' 22 | __all__ = ['prep', 23 | ] 24 | 25 | 26 | def prep(beamline_full_datafile_name, **kwargs): 27 | """ 28 | This function formats data for reconstruction and saves it in data.tif file. The preparation consists of the following steps: 29 | - removing the aliens which is effect of interference. The removal can be done by setting regions or mask file that requires manual inspection of the data file. The removal can be automatic with the AutoAlien1 algorithm. 30 | - clearing the noise, where values below an amplitude threshold are set to zero. The threshold can be set as a parameter or auto determined. 31 | - amplitudes are set to sqrt 32 | - cropping and padding. If the crop-pad is negative in any dimension, the array is cropped in this dimension. The cropping is followed by padding in the dimensions with positive values. After adjusting, the dimensions are adjusted further to find the smallest dimension that is supported by opencl library (multiplier of 2, 3, and 5). 33 | - centering - finding the greatest amplitude and locating it at a center of array. If shift center is defined, the center will be shifted accordingly. 34 | - binning - adding amplitudes of several consecutive points. Binning can be done in any dimension. 35 | 36 | :param beamline_full_datafile_name: full path of tif file containing beamline preprocessed data 37 | :param kwargs: 38 | data_dir : str 39 | directory where prepared data will be saved, default /phasing_data 40 | alien_alg : str 41 | Acronym of method used to remove aliens. Possible options are: ‘block_aliens’, ‘alien_file’, and ‘AutoAlien1’. The ‘block_aliens’ algorithm will zero out defined blocks, ‘alien_file’ method will use given file as a mask, and ‘AutoAlien1’ will use auto mechanism to remove aliens. Each of these algorithms require different parameters 42 | aliens : list 43 | Needed when the ‘block_aliens’ method is configured. Used when the data contains regions with intensity produced by interference. The regions needs to be zeroed out. The aliens can be defined as regions each defined by coordinates of starting point, and ending point (i.e. [[xb0,yb0,zb0,xe0,ye0,ze0],[xb1,yb1,zb1,xe1,ye1,ze1],…[xbn,ybn,zbn,xen,yen,zen]] ). 44 | alien_file : str 45 | Needed when the ‘alien_file’ method is configured. User can produce a file in npy format that contains table of zeros and ones, where zero means to set the pixel to zero, and one to leave it. 46 | AA1_size_threshold : float 47 | Used in the ‘AutoAliens1’ method. If not given it will default to 0.01. The AutoAlien1 algorithm will calculate relative sizes of all clusters with respect to the biggest cluster. The clusters with relative size smaller than the given threshold will be possibly deemed aliens. It also depends on asymmetry. 48 | AA1_asym_threshold : float 49 | Used in the ‘AutoAliens1’ method. If not given it will default to 1.75. The AutoAlien1 algorithm will calculate average asymmetry of all clusters. The clusters with average asymmetry greater than the given threshold will be possibly deemed aliens. It also depends on relative size. 50 | AA1_min_pts : int 51 | Used in the ‘AutoAliens1’ method. If not given it will default to 5. Defines minimum non zero points in neighborhood to count the area of data as cluster. 52 | AA1_eps : float 53 | Used in the ‘AutoAliens1’ method. If not given it will default to 1.1. Used in the clustering algorithm. 54 | AA1_amp_threshold : float 55 | Mandatory in the ‘AutoAliens1’ method. Used to zero data points below that threshold. 56 | AA1_save_arrs : boolean 57 | Used in the ‘AutoAliens1’ method, optional. If given and set to True multiple results of alien analysis will be saved in files. 58 | AA1_expandcleanedsigma : float 59 | Used in the ‘AutoAliens1’ method, optional. If given the algorithm will apply last step of cleaning the data using the configured sigma. 60 | intensity_threshold : float 61 | Mandatory, min data threshold. Intensity values below this are set to 0. The threshold is applied after removing aliens. 62 | crop_pad : list 63 | Optional, a list of number to adjust the size at each side of 3D data. If number is positive, the array will be padded. If negative, cropped. The parameters correspond to [x left, x right, y left, y right, z left, z right] The final dimensions will be adjusted up to the good number for the FFT which also is compatible with opencl supported dimensions powers of 2 multipled by powers of 3 multiplied by powers of5 64 | shift : list 65 | Optional, enter center shift list the array maximum is centered before binning, and moved according to shift, [0,0,0] has no effect 66 | binning : list 67 | Optional, a list that defines binning values in respective dimensions, [1,1,1] has no effect. 68 | no_center_max : boolean, defaults to False 69 | True if the max is not centered 70 | next_fast_len : boolean, defaults to False 71 | Typically True, changes dimensions to numbers that allow fast fourier transform; depends on library 72 | pkg : string 73 | 'cp' for cupy, 'torch' for torch, 'np' for numpy 74 | """ 75 | beamline_full_datafile_name = beamline_full_datafile_name.replace(os.sep, '/') 76 | # The data has been transposed when saved in tif format for the ImageJ to show the right orientation 77 | beam_data = ut.read_tif(beamline_full_datafile_name) 78 | 79 | prep_data_dir, beamline_datafile_name = os.path.split(beamline_full_datafile_name) 80 | if 'data_dir' in kwargs: 81 | data_dir = kwargs['data_dir'].replace(os.sep, '/') 82 | else: 83 | # assuming the directory structure and naming follows cohere-ui experiment directory structure 84 | data_dir = prep_data_dir.replace(os.sep, '/').replace('preprocessed_data', 'phasing_data') 85 | 86 | if 'alien_alg' in kwargs: 87 | data = at.remove_aliens(beam_data, kwargs, data_dir) 88 | else: 89 | data = beam_data 90 | 91 | auto_intensity_threshold = kwargs.get('auto_intensity_threshold', False) 92 | if auto_intensity_threshold: 93 | # the formula for auto threshold was found empirically, may be 94 | # modified in the future if more tests are done 95 | auto_threshold_value = 0.141 * beam_data[np.nonzero(beam_data)].mean().item() - 3.062 96 | intensity_threshold = max(2.0, auto_threshold_value) 97 | print(f'auto intensity threshold: {intensity_threshold}') 98 | else: 99 | intensity_threshold = kwargs.get('intensity_threshold', None) 100 | if intensity_threshold is None: 101 | print('define intensity threshold or set to auto, exiting.') 102 | return 103 | # zero out the noise 104 | data = np.where(data <= intensity_threshold, 0.0, data) 105 | 106 | # square root data 107 | data = np.sqrt(data) 108 | 109 | if 'crop_pad' in kwargs: 110 | crops_pads = kwargs['crop_pad'] 111 | # the adjust_dimension parameter list holds adjustment in each direction. Append 0s, if shorter 112 | if len(crops_pads) < 6: 113 | for _ in range(6 - len(crops_pads)): 114 | crops_pads.append(0) 115 | else: 116 | # the size still has to be adjusted to the opencl supported dimension 117 | crops_pads = (0, 0, 0, 0, 0, 0) 118 | # adjust the size, either pad with 0s or crop array 119 | pairs = [] 120 | for i in range(int(len(crops_pads) / 2)): 121 | pair = crops_pads[2 * i:2 * i + 2] 122 | pairs.append(pair) 123 | 124 | # next_fast_len parameter should be always True. But it needs to be included 125 | # when calling adjust_dimensions functions, as depending on the pkg, the result 126 | # could be different. So for the sake of flexibility, the parameter next_fast_len 127 | # was added as option in kwargs. 128 | next_fast_len = kwargs.get('next_fast_len', True) 129 | pkg = kwargs.get('pkg', 'np') 130 | data = ut.adjust_dimensions(data, pairs, next_fast_len, pkg) 131 | if data is None: 132 | print('check "crop_pad" configuration') 133 | return 134 | 135 | no_center_max = kwargs.get('no_center_max', False) 136 | shift = [0, 0, 0] 137 | if not no_center_max: 138 | data, shift = ut.center_max(data) 139 | 140 | if 'shift' in kwargs: 141 | conf_shift = kwargs['shift'] 142 | data = np.roll(data, conf_shift, tuple(range(data.ndim))) 143 | shift = [s + cs for s, cs in zip(shift, conf_shift)] 144 | 145 | try: 146 | # assuming the mask file is in directory of preprocessed data 147 | mask = ut.read_tif(beamline_full_datafile_name.replace(beamline_datafile_name, 'mask.tif')) 148 | mask = np.roll(mask, shift, tuple(range(mask.ndim))) 149 | ut.save_tif(mask, ut.join(data_dir, 'mask.tif')) 150 | except FileNotFoundError: 151 | pass 152 | 153 | # auto_binning: 154 | # # prepare data to make the oversampling ratio ~3 155 | # wos = 3.0 156 | # orig_os = ut.get_oversample_ratio(data) 157 | # # match oversampling to wos 158 | # wanted_os = [wos, wos, wos] 159 | # change = [np.round(f / t).astype('int32') for f, t in zip(orig_os, wanted_os)] 160 | # bins = [int(max([1, c])) for c in change] 161 | # print(f'auto binning size: {bins}') 162 | # data = ut.binning(data, bins) 163 | if 'binning' in kwargs: 164 | binsizes = kwargs['binning'] 165 | try: 166 | bins = [] 167 | for binsize in binsizes: 168 | bins.append(binsize) 169 | filler = len(data.shape) - len(bins) 170 | for _ in range(filler): 171 | bins.append(1) 172 | data = ut.binning(data, bins) 173 | kwargs['binning'] = bins 174 | except: 175 | print('check "binning" configuration') 176 | raise 177 | 178 | # save data 179 | data_file = ut.join(data_dir, 'data.tif') 180 | ut.save_tif(data, data_file) 181 | print(f'data ready for reconstruction, data dims: {data.shape}') 182 | 183 | # if auto save new config 184 | if auto_intensity_threshold: 185 | kwargs['intensity_threshold'] = intensity_threshold 186 | 187 | return kwargs 188 | -------------------------------------------------------------------------------- /src/cohere_core/utilities/config_errors_dict.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | """ 8 | Error dictionaries and lookup map name dictionary 9 | """ 10 | __author__ = "David Cyl" 11 | __copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC." 12 | __docformat__ = 'restructuredtext en' 13 | 14 | config_error = {'File':['File does not exist','Cannot Read File', 15 | 'Parsing error, check parenthesis,quotation syntax'], 16 | 'Workingdir':['working_dir parameter should be a string', 17 | 'missing mandatory working_dir parameter'], 18 | 'ExperimentID':['experiment_id parameter should be string', 19 | 'missing mandatory experiment_id parameter'], 20 | 'Scan':['scan parameter should be a string', 21 | 'scan parameter parsing error'], 22 | 'Separatescans':['separate_scans parameter should be True or False'], 23 | 'Separatescanranges':['separate_scan_ranges parameter should be True or False'], 24 | 'Multipeak': ['multipeak parameter should be True or False'] 25 | } 26 | config_prep_error = {'File':['No configuration file', 27 | 'cannot read configuration file', 28 | 'Parsing error, check parenthesis,quotation syntax'], 29 | 'Excludescans':['exclude_scans should be a list'], 30 | 'MinFiles':['min_frames should be int', 31 | 'min_frames parameter parsing error'], 32 | 'Maxcrop':['max_crop should be a list', 33 | 'max_crop parameter must have two values to crop 2D frame'], 34 | } 35 | config_data_error = {'File':['No configuration file', 36 | 'Cannot read configuration file', 37 | 'Parsing error, check parenthesis,quotation syntax'], 38 | 'Datadir':['data_dir parameter should be string', 39 | 'data_dir parameter parsing error'], 40 | 'CropPad':['crop_pad should be a list of int'], 41 | 'Shift':['shift should be a list of int'], 42 | 'Binning':['binning should be a list of int'], 43 | 'Intensitythreshold':['intensity_threshold should be float or int', 44 | 'missing mandatory parameter intensity_threshold'], 45 | 'Alienalg':['alien_alg can only be one the following strings: "block_aliens", "alien_file", "AutoAlien1", "none"'], 46 | 'Aliens':['aliens should be a list of alien blocks (lists)', 47 | 'aliens is not a list of ints', 48 | 'misconfigured alien, each alien is defined by list of six int for 3D data', 49 | '"aliens" parameter must be configured if "block_aliens" selected'], 50 | 'AlienFile':['alien_file should be a string (mask file name)', 51 | '"alien_file" parameter must be configured if "alien_file" selected'], 52 | 'Aa1sizethreshold':['AA1_size_threshold should be float', 53 | 'AA1_size_threshold parameter parsing error'], 54 | 'Aa1asymthreshold':['AA1_asym_threshold should be float', 55 | 'AA1_asym_threshold parameter parsing error'], 56 | 'Aa1minpts':['AA1_min_pts should be int', 57 | 'AA1_min_pts parameter parsing error'], 58 | 'Aa1eps':['AA1_eps should be float', 59 | 'AA1_eps parameter parsing error'], 60 | 'Aa1ampthreshold':['AA1_amp_threshold should be float', 61 | 'AA1_amp_threshold parameter parsing error'], 62 | 'Aa1savearrs':['AA1_save_arrs parameter should be True or False', 63 | 'AA1_save_arrs parameter parsing error'], 64 | 'Aa1expandcleanedsigma':['AA1_expandcleanedsigma should be float', 65 | 'AA1_expandcleanedsigma parameter parsing error']} 66 | config_rec_error = {'File':['No configuration file', 67 | 'Cannot read configuration file', 68 | 'Parsing error, check parenthesis,quotation syntax'], 69 | 'Datadir':['data_dir parameter should be string', 70 | 'configured data_dir is not a directory', 71 | 'no data file found in data_dir'], 72 | 'Savedir':['save_dir parameter should be string', 73 | 'save_dir parameter parsing error'], 74 | 'Initguess':['init_guess parameter should be one of the following strings: "random", "continue", "AI_guess"'], 75 | 'Continuedir':['continue_dir parameter should be string', 76 | 'continue_dir parameter is mandatory when init_guess is "continue"'], 77 | 'Aitrainedmodel':['AI_trained_model parameter should be string', 78 | 'AI_trained_model parameter is mandatory when init_guess is "AI algorithm"'], 79 | 'Reconstruction':['reconstructions parameter should be int', 80 | 'reconstructions parameter parsing error'], 81 | 'Device':['device should be a list of ints, or string "all", or dictionary for cluster configuration', 82 | 'device parameter parsing error'], 83 | 'Algorithmsequence':['algorithm_sequence should be string', 84 | 'algorithm_sequence configuration error, only numerical, digital, and the following characters: *, +, (, ), space, are allowed ', 85 | 'algorithm_sequence configuration error, check brackets, nested brackets not supported', 86 | 'missing mandatory algorithm_sequence parameter'], 87 | 'Hiobeta':['hio_beta parameter should be float'], 88 | 'Initialsupportarea':['initial_support_area should be list', 89 | 'initial_support_area should be a list of int or float'], 90 | 'Generations':['ga_generations parameter should be int', 91 | 'when running GA "reconstructions" parameter should be configured and greater than 1'], 92 | 'Gametrics':['ga_metrics is not a list', 93 | 'warning: ga_metrics list can include only following strings: "chi", "sharpness", "summed_phase", "area"'], 94 | 'Gabreedmodes':['ga_breed_mode is not a list', 95 | 'warning: ga_breed_modes list can include only following strings: “none”, “sqrt_ab”, “pixel_switch”, “b_pa”, “2ab_a_b”, “2a_b_pa”,' 96 | ' “sqrt_ab_pa”, “sqrt_ab_pa_recip”, “sqrt_ab_recip”,“max_ab”, “max_ab_pa”, ' 97 | '“min_ab_pa”, “avg_ab”, “avg_ab_pa"'], 98 | 'Gacullings':['ga_cullings parameter should be a ist of int', 99 | 'sum of all cullings should be smaller than number of reconstructions'], 100 | 'Gashrinkwrapthresholds':['ga_sw_thresholds parameter should be a list of floats'], 101 | 'Gashrinkwrapgausssigmas':['ga_sw_gauss_sigmas parameter should be a list of floats'], 102 | 'Galowpassfiltersigmas':['List Float Error', 103 | 'ga_sw_gauss_sigmas parameter parsing error'], 104 | 'Gagenpcstart':['ga_gen_pc_start parameter should be int', 105 | 'ga_gen_pc_start parameter parsing error'], 106 | 'Twintrigger':['Trigger should be a list of int'], 107 | 'Twinhalves':['twin_halves should be a list of int'], 108 | 'Shrinkwraptrigger':['Trigger should be a list of int', 109 | 'Each sub-trigger should be a list of int if multiple shrink wraps'], 110 | 'Shrinkwraptype':['sw_type parameter should be string', 111 | 'supporting sw_type "GAUSS"'], 112 | 'Shrinkwrapthreshold':['sw_threshold should be float', 113 | 'sw_threshold should be a list of floats if multiple shrink wraps'], 114 | 'Shrinkwrapgausssigma':['sw_gauss_sigma should be float', 115 | 'sw_gauss_sigma should be a list of floats if multiple shrink wraps'], 116 | 'Phasesupporttrigger':['Trigger should be a list of int', 117 | 'Each sub-trigger should be a list of int if multiple phase constrain'], 118 | 'Phcphasemin':['phc_phase_min should be float', 119 | 'phc_phase_min should be a list of floats if multiple phase constrain'], 120 | 'Phcphasemax':['phc_phase_max should be float', 121 | 'phc_phase_max should be a list of floats if multiple phase constrain'], 122 | 'Pcinterval':['pc_interval should be int'], 123 | 'Pctype':['pc_type parameter should be string', 124 | 'pc_type parameter can be configured "LUCY"', 125 | 'pc_type parameter parsing error'], 126 | 'Pclucyiterations':['pc_LUCY_iterations should be int', 127 | 'pc_LUCY_iterations parameter parsing error'], 128 | 'Pcnormalize':['pc_normalize parameter should be True or False', 129 | 'pc_normalize parameter parsing error'], 130 | 'Pclucykernel':['pc_LUCY_kernel parameter should be a list of int', 131 | 'pc_LUCY_kernel parameter must be configured when partial coherence feature in active'], 132 | 'Lpftrigger':['Trigger should be a list of int', 133 | 'lawpass_filter_trigger should have upper limit'], 134 | 'Lowpassfilterrange':['lowpass_filter_range parameter should be list of flots', 135 | 'missing lowpass_filter_range parameter'], 136 | 'Averagetrigger':['Trigger should be a list of int'], 137 | 'Progresstrigger':['Trigger should be a list of int'], 138 | } 139 | 140 | config_disp_error = {'File':['No configuration file', 141 | 'Cannot read configuration file', 142 | 'Parsing error, check parenthesis,quotation syntax'], 143 | 'Resultsdir':['results_dir parameter should be string'], 144 | 'Crop':['crop should be list', 145 | 'crop should be a list of int or float'], 146 | 'Rampups':['rampups should be int']} 147 | 148 | config_map_names = {'config_error_map_file':config_error, 149 | 'config_prep_error_map_file':config_prep_error, 150 | 'config_data_error_map_file':config_data_error, 151 | 'config_rec_error_map_file':config_rec_error, 152 | 'config_disp_error_map_file':config_disp_error} 153 | -------------------------------------------------------------------------------- /src/cohere_core/lib/torchlib.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | from cohere_core.lib.cohlib import cohlib 8 | import numpy as np 9 | import torch 10 | 11 | import sys 12 | import math 13 | 14 | 15 | class torchlib(cohlib): 16 | device = "cpu" 17 | # Interface 18 | @staticmethod 19 | def array(obj): 20 | return torch.Tensor(obj, device=torchlib.device) 21 | 22 | @staticmethod 23 | def dot(arr1, arr2): 24 | return torch.dot(arr1, arr2) 25 | 26 | @staticmethod 27 | def cross(arr1, arr2): 28 | raise NotImplementedError 29 | 30 | @staticmethod 31 | def set_device(dev_id): 32 | if sys.platform == 'darwin': 33 | # Check that MPS is available 34 | if not torch.backends.mps.is_available(): 35 | print("MPS not available because the current PyTorch install was not built with MPS enabled. Using cpu.") 36 | torch.device("cpu") 37 | else: 38 | torch.device("mps") 39 | elif torch.backends.cuda.is_built() and dev_id != -1: 40 | torch_dev = f"cuda:{str(dev_id)}" 41 | torchlib.device = torch_dev 42 | else: 43 | torch.device("cpu") 44 | 45 | @staticmethod 46 | def to_numpy(arr): 47 | try: 48 | out = arr.detach().cpu().numpy() 49 | except: 50 | out = arr 51 | return out 52 | 53 | @staticmethod 54 | def save(filename, arr): 55 | try: 56 | arr = arr.detach().cpu().numpy() 57 | except: 58 | pass 59 | np.save(filename, arr) 60 | 61 | @staticmethod 62 | def load(filename, **kwargs): 63 | arr = np.load(filename) 64 | return torch.as_tensor(arr, device=torchlib.device) 65 | 66 | @staticmethod 67 | def from_numpy(arr, **kwargs): 68 | return torch.as_tensor(arr, device=torchlib.device) 69 | 70 | @staticmethod 71 | def dtype(arr): 72 | return arr.dtype 73 | 74 | @staticmethod 75 | def astype(arr, dtype): 76 | # this is kind of nasty, it does not understand 'int', so need to convert for the cases 77 | # that will be used 78 | if dtype == 'int32': 79 | dtype = torch.int32 80 | return arr.type(dtype=dtype) 81 | 82 | @staticmethod 83 | def reshape(arr, shape): 84 | raise NotImplementedError 85 | 86 | @staticmethod 87 | def size(arr): 88 | return torch.numel(arr) 89 | 90 | @staticmethod 91 | def next_fast_len(target): 92 | import scipy 93 | return scipy.fft.next_fast_len(target) 94 | 95 | @staticmethod 96 | def hasnan(arr): 97 | return torch.any(torch.isnan(arr)) 98 | 99 | @staticmethod 100 | def nan_to_num(arr): 101 | raise NotImplementedError 102 | 103 | @staticmethod 104 | def copy(arr): 105 | return arr.clone() 106 | 107 | @staticmethod 108 | def random(shape, **kwargs): 109 | arr = torch.rand(shape, device=torchlib.device) 110 | # return torch.rand(shape, device=torchlib.device) 111 | return arr 112 | 113 | @staticmethod 114 | def roll(arr, sft, axis): 115 | sft = [int(s) for s in sft] 116 | dims = tuple([i for i in range(len(sft))]) 117 | try: 118 | return torch.roll(arr, sft, dims) 119 | except Exception as e: 120 | print('not supported error: ' + repr(e)) 121 | 122 | @staticmethod 123 | def shift(arr, sft): 124 | sft = [int(s) for s in sft] 125 | dims = tuple([i for i in range(len(sft))]) 126 | try: 127 | return torch.roll(arr, sft, dims) 128 | except Exception as e: 129 | print('not supported error: ' + repr(e)) 130 | 131 | @staticmethod 132 | def fftshift(arr): 133 | try: 134 | return torch.fft.fftshift(arr) 135 | except Exception as e: 136 | print('not supported error: ' + repr(e)) 137 | 138 | @staticmethod 139 | def ifftshift(arr): 140 | try: 141 | return torch.fft.ifftshift(arr) 142 | except Exception as e: 143 | print('not supported error: ' + repr(e)) 144 | 145 | @staticmethod 146 | def fft(arr): 147 | try: 148 | return torch.fft.fftn(arr, norm='forward') 149 | except Exception as e: 150 | print('not supported error: ' + repr(e)) 151 | 152 | @staticmethod 153 | def ifft(arr): 154 | try: 155 | return torch.fft.ifftn(arr, norm='forward') 156 | except Exception as e: 157 | print('not supported error: ' + repr(e)) 158 | 159 | @staticmethod 160 | def fftconvolve(arr1, kernel): 161 | print('not supported yet in torch, use different library') 162 | raise 163 | # # kernel shape can be smaller than arr1 shape in each dim 164 | # sh1 = list(arr1.size()) 165 | # sh2 = list(kernel.size()) 166 | # if sh1 != sh2: 167 | # # the pad is added from last dim to first 168 | # sh1.reverse() 169 | # sh2.reverse() 170 | # pad = [((sh1[i]-sh2[i])//2, sh1[i] - sh2[i] - (sh1[i]-sh2[i])//2) for i in range(len(sh1))] 171 | # pad = tuple(sum(pad, ())) 172 | # kernel = torch.nn.functional.pad(kernel, pad) 173 | # conv = torch.fft.ifftn(torch.fft.fftn(arr1) * torch.fft.fftn(kernel)) 174 | # return conv 175 | 176 | @staticmethod 177 | def correlate(arr1, arr2, mode='same', method='fft'): 178 | raise NotImplementedError 179 | 180 | @staticmethod 181 | def where(cond, x, y): 182 | return torch.where(cond, x, y) 183 | 184 | @staticmethod 185 | def dims(arr): 186 | # get array dimensions 187 | return arr.size() 188 | 189 | @staticmethod 190 | def absolute(arr): 191 | return torch.abs(arr) 192 | 193 | @staticmethod 194 | def square(arr): 195 | return torch.square(arr) 196 | 197 | @staticmethod 198 | def sqrt(arr): 199 | return torch.sqrt(arr) 200 | 201 | @staticmethod 202 | def sum(arr, axis=None): 203 | return torch.sum(arr, dim=axis) 204 | 205 | @staticmethod 206 | def real(arr): 207 | return arr.real 208 | 209 | @staticmethod 210 | def imag(arr): 211 | return arr.imag 212 | 213 | @staticmethod 214 | def amax(arr): 215 | return torch.amax(arr) 216 | 217 | @staticmethod 218 | def argmax(arr, axis=None): 219 | return torch.argmax(arr, axis) 220 | 221 | @staticmethod 222 | def unravel_index(indices, shape): 223 | return np.unravel_index(indices.detach().cpu().numpy(), shape) 224 | 225 | @staticmethod 226 | def ravel(arr): 227 | raise NotImplementedError 228 | 229 | @staticmethod 230 | def maximum(arr1, arr2): 231 | return torch.maximum(arr1, arr2) 232 | 233 | @staticmethod 234 | def ceil(arr): 235 | return torch.ceil(arr) 236 | 237 | @staticmethod 238 | def fix(arr): 239 | return torch.fix(arr) 240 | 241 | @staticmethod 242 | def round(val): 243 | return torch.round(val) 244 | 245 | @staticmethod 246 | def full(shape, fill_value, **kwargs): 247 | return torch.full(shape, fill_value, device=torchlib.device) 248 | 249 | @staticmethod 250 | def print(arr, **kwargs): 251 | print(arr) 252 | 253 | @staticmethod 254 | def angle(arr): 255 | return torch.angle(arr) 256 | 257 | @staticmethod 258 | def flip(arr, axis=None): 259 | if axis is None: 260 | axis = [i for i in range(len(arr.size()))] 261 | return torch.flip(arr, axis) 262 | 263 | @staticmethod 264 | def tile(arr, rep): 265 | return torch.tile(arr, rep) 266 | 267 | @staticmethod 268 | def expand_dims(arr, axis): 269 | return arr.unsqeeze(axis) 270 | 271 | @staticmethod 272 | def squeeze(arr): 273 | return torch.squeeze(arr) 274 | 275 | @staticmethod 276 | # this method is only in torchlib, not part of cohlib 277 | def gaussian(sigma, size=5, **kwargs): #sigma, size=5, n=None): 278 | """ 279 | Creates a nD Gaussian kernel with the specified shape and sigma. 280 | It is assumed that all dimensions in shape are equal. 281 | """ 282 | dims = kwargs.get('dims', None) 283 | ranges = [torch.arange(size)] * dims 284 | grid = torch.meshgrid(*ranges) 285 | grid = torch.stack(grid, dim=-1).float() 286 | center = (size - 1) / 2 287 | kernel = torch.exp(-torch.sum((grid - center) ** 2, dim=-1) / (2 * sigma ** 2)) 288 | kernel /= kernel.sum() 289 | return kernel 290 | 291 | @staticmethod 292 | def entropy(arr): 293 | raise NotImplementedError 294 | 295 | @staticmethod 296 | def gaussian_filter(arr, sigma, **kwargs): 297 | """ 298 | Convolves a nD input tensor with a nD Gaussian kernel. 299 | """ 300 | if arr.ndim == 3: 301 | from torch.nn.functional import conv3d as convn 302 | elif arr.ndim == 2: 303 | from torch.nn.functional import conv2d as convn 304 | elif arr.ndim == 1: 305 | from torch.nn.functional import conv1d as convn 306 | 307 | if 'kernel' in kwargs: 308 | # kernel should be on device the arr is 309 | kernel = kwargs.get('kernel') 310 | gauss_kernel_size = kernel.size()[0] 311 | else: 312 | gauss_kernel_size = 5 313 | kernel = torchlib.gaussian(sigma, gauss_kernel_size, dims=arr.ndim) 314 | kernel = kernel.to(arr.device) 315 | 316 | padding = gauss_kernel_size // 2 317 | 318 | blurred = convn(arr.unsqueeze(0).unsqueeze(0), kernel.unsqueeze(0).unsqueeze(0), padding=padding) 319 | return blurred.squeeze() 320 | 321 | @staticmethod 322 | def median_filter(arr, size, **kwargs): 323 | raise NotImplementedError 324 | 325 | @staticmethod 326 | def uniform_filter(arr, size, **kwargs): 327 | raise NotImplementedError 328 | 329 | @staticmethod 330 | def binary_erosion(arr, **kwargs): 331 | raise NotImplementedError 332 | 333 | @staticmethod 334 | def center_of_mass(arr): 335 | normalizer = torch.sum(arr) 336 | shape = arr.shape 337 | ranges = [torch.arange(shape[i], device=torchlib.device) for i in range(arr.ndim)] 338 | grids = torch.meshgrid(*ranges) 339 | com = [(torch.sum(arr * grids[i]) / normalizer).tolist() for i in range(arr.ndim)] 340 | return com 341 | 342 | @staticmethod 343 | def meshgrid(*xi): 344 | # check if need to move to device 345 | return torch.meshgrid(*xi) 346 | 347 | @staticmethod 348 | def exp(arr): 349 | return torch.exp(arr) 350 | 351 | @staticmethod 352 | def conj(arr): 353 | return torch.conj(arr) 354 | 355 | @staticmethod 356 | def cos(arr): 357 | return torch.cos(arr) 358 | 359 | @staticmethod 360 | def array_equal(arr1, arr2): 361 | return torch.equal(arr1, arr2) 362 | 363 | @staticmethod 364 | def linspace(start, stop, num): 365 | return torch.linspace(start, stop, num) 366 | 367 | @staticmethod 368 | def geomspace(start, stop, num): 369 | raise NotImplementedError 370 | 371 | @staticmethod 372 | def clip(arr, min, max=None): 373 | return torch.clip(arr, min, max) 374 | 375 | @staticmethod 376 | def diff(arr, axis=None, prepend=0): 377 | raise NotImplementedError 378 | 379 | @staticmethod 380 | def gradient(arr, dx=1): 381 | raise NotImplementedError 382 | 383 | @staticmethod 384 | def argmin(arr, axis=None): 385 | raise NotImplementedError 386 | 387 | @staticmethod 388 | def take_along_axis(a, indices, axis): 389 | raise NotImplementedError 390 | 391 | @staticmethod 392 | def moveaxis(arr, source, dest): 393 | raise NotImplementedError 394 | 395 | @staticmethod 396 | def lstsq(A, B): 397 | raise NotImplementedError 398 | 399 | @staticmethod 400 | def zeros(shape): 401 | raise NotImplementedError 402 | 403 | @staticmethod 404 | def indices(dims): 405 | raise NotImplementedError 406 | 407 | @staticmethod 408 | def concatenate(tup, axis=0): 409 | raise NotImplementedError 410 | 411 | @staticmethod 412 | def stack(tup): 413 | raise NotImplementedError 414 | 415 | @staticmethod 416 | def amin(arr): 417 | raise NotImplementedError 418 | 419 | @staticmethod 420 | def affine_transform(arr, matrix, order=3, offset=0): 421 | raise NotImplementedError 422 | 423 | @staticmethod 424 | def pad(arr, padding): 425 | raise NotImplementedError 426 | 427 | @staticmethod 428 | def histogram2d(meas, rec, n_bins=100, log=False): 429 | raise NotImplementedError 430 | 431 | @staticmethod 432 | def log(arr): 433 | raise NotImplementedError 434 | 435 | @staticmethod 436 | def log10(arr): 437 | raise NotImplementedError 438 | 439 | @staticmethod 440 | def xlogy(arr, y=None): 441 | raise NotImplementedError 442 | 443 | @staticmethod 444 | def mean(arr): 445 | raise NotImplementedError 446 | 447 | @staticmethod 448 | def median(arr): 449 | raise NotImplementedError 450 | 451 | @staticmethod 452 | def clean_default_mem(): 453 | pass 454 | 455 | 456 | # a1 = torch.Tensor([0.1, 0.2, 0.3, 1.0, 1.2, 1.3]) 457 | # a2 = torch.Tensor([10.1, 10.2, 10.3, 11.0]) 458 | # conv = torchlib.fftconvolve(a1,a2) 459 | # print('torch conv', conv) 460 | # print(conv.real) 461 | # print(torch.abs(conv)) 462 | # print(torch.nn.functional.conv1d(a1,a2)) 463 | -------------------------------------------------------------------------------- /src/cohere_core/controller/op_flow.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | import numpy as np 8 | import re 9 | 10 | # This dict maps the mnemonic used when defining algorithm sequence parameter to the four steps of 11 | # phase retrieval algorithm functions. 12 | 13 | algs = {'ER': ('to_reciprocal_space', 'modulus', 'to_direct_space', 'er'), 14 | 'HIO': ('to_reciprocal_space', 'modulus', 'to_direct_space', 'hio'), 15 | 'ERpc': ('to_reciprocal_space', 'pc_modulus', 'to_direct_space', 'er'), 16 | 'HIOpc': ('to_reciprocal_space', 'pc_modulus', 'to_direct_space', 'hio'), 17 | 'SF': ('to_reciprocal_space', 'modulus', 'to_direct_space', 'sf'), 18 | 'RAAR': ('to_reciprocal_space', 'modulus', 'to_direct_space', 'raar') 19 | } 20 | 21 | # This map keeps the names of triggers that can be configured as sub-trigger, i.e. be a trigger for the iteration span 22 | # defined by preceding algorithm. The key is the trigger name and value is the mnemonic. The mnemonic is used in the 23 | # configuration. 24 | sub_triggers = {'SW' : 'shrink_wrap_trigger', 25 | 'PHC' : 'phc_trigger'} 26 | 27 | # This list contains triggers that will be active at the last iteration defined by trigger, despite 28 | # not being a trigger calculated by the step formula. 29 | # It applies to sub-triggers, setting the last iteration to that of sub-trigger. 30 | last_iter_op_triggers = ['progress_trigger', 31 | 'switch_peaks_trigger', 32 | 'switch_resampling_trigger'] 33 | 34 | def get_alg_rows(s, pc_conf_start): 35 | """ 36 | Parses algorithm sequence string into structures being: algorithm rows, and sub-trigger operations info. 37 | 38 | :param s: str 39 | algorithm sequence 40 | :param pc_conf_start: boolean or None 41 | if None, no partial coherence is scheduled 42 | if True, the configured partial coherence will be scheduled 43 | if False, the partial coherence started ago (in GA case) and will continue here 44 | :return: tuple 45 | rows : ndarry 46 | ndarray that depicts algorithms (modulus, pc_modulus, hio, er) operations 47 | sub_rows : dict 48 | dictionary with entries of k : v, where 49 | k is the trigger name that is being configured as sub-triggers 50 | v is a list of sub-trigger operations 51 | iter_no : int 52 | number of iterations 53 | pc_start : None or int 54 | starting iteration of partial coherence if any 55 | """ 56 | seq = [] 57 | accum_iter = 0 58 | 59 | def parse_entry(ent, accum_iter): 60 | # parses elementary part of the algorithm sequence 61 | r_e = ent.split('*') 62 | seq.append([int(r_e[0]), r_e[1], accum_iter]) 63 | accum_iter += int(r_e[0]) 64 | return accum_iter 65 | 66 | s = s.replace(' ', '') 67 | entries = s.split('+') 68 | i = 0 69 | while i < len(entries): 70 | entry = entries[i] 71 | if '(' in entry: 72 | group = [] 73 | rep_entry = entry.split('(') 74 | repeat = int(rep_entry[0][:-1]) 75 | group.append(rep_entry[1]) 76 | i += 1 77 | group_entry = entries[i] 78 | while ')' not in group_entry: 79 | group.append(group_entry) 80 | i += 1 81 | group_entry = entries[i] 82 | group.append(group_entry[:-1]) 83 | for _ in range(repeat): 84 | for group_entry in group: 85 | accum_iter = parse_entry(group_entry, accum_iter) 86 | i += 1 87 | else: 88 | accum_iter = parse_entry(entry, accum_iter) 89 | i += 1 90 | iter_no = sum([e[0] for e in seq]) 91 | alg_rows = {} 92 | sub_rows = {} 93 | row = np.zeros(iter_no, dtype=int) 94 | fs = set([i for sub in algs.values() for i in sub]) 95 | for f in fs: 96 | alg_rows[f] = row.copy() 97 | i = 0 98 | pc_start = None 99 | for entry in seq: 100 | repeat = entry[0] 101 | funs = entry[1].split('.') 102 | if funs[0] not in algs: 103 | msg = f'algorithm {funs[0]} is not defined in op_flow.py file, algs dict.' 104 | raise NameError(msg) 105 | # the pc will not be executed if pc_conf_start is None 106 | # this code will be revised after each generation has separate config 107 | if pc_conf_start is None: 108 | if funs[0].endswith('pc'): 109 | funs[0] = funs[0][:-2] 110 | elif not pc_conf_start: 111 | if not funs[0].endswith('pc'): 112 | funs[0] = funs[0] + 'pc' 113 | 114 | row_keys = algs[funs[0]] 115 | for row_key in row_keys: 116 | alg_rows[row_key][i:i+repeat] = 1 117 | if 'pc' in row_key and pc_start == None: 118 | if pc_conf_start == True: 119 | pc_start = i 120 | else: 121 | pc_start = 1 122 | # find sub-triggers 123 | for row_key in funs[1:]: 124 | match = re.match(r"([A-Z]+)([0-9]+)", row_key, re.I) 125 | if match: 126 | (trig_op, idx) = match.groups(0) 127 | sub_t = sub_triggers[trig_op] 128 | if trig_op not in sub_triggers.keys(): 129 | msg = f'the sub-trigger {trig_op} must be defined in op_flow.py file, sub_triggers dict.' 130 | raise NameError(msg) 131 | if sub_t not in sub_rows: 132 | sub_rows[sub_t] = [] 133 | sub_rows[sub_t].append((entry[2], entry[0] + entry[2], idx)) 134 | i += repeat 135 | 136 | return alg_rows, sub_rows, iter_no, pc_start 137 | 138 | 139 | def fill_trigger_row(trig, iter_no, last_trig, row=None): 140 | """ 141 | This functions creates ndarray that depicts triggered operations for a given trigger. 142 | 143 | :param trig: list 144 | a list with 1, 2, or 3 elements defining trigger 145 | :param iter_no: int 146 | total number of iterations 147 | :param row: ndarray 148 | if given, the row will be used to fill the trigger 149 | :return: 150 | """ 151 | if row is None: 152 | row = np.zeros(iter_no, dtype=int) 153 | if len(trig) ==1: 154 | trig_iter = trig[0] 155 | if trig_iter < 0: 156 | trig_iter += iter_no 157 | row[trig_iter] = 1 158 | else: # trig is list 159 | trig_start = trig[0] 160 | if trig_start < 0: 161 | trig_start += iter_no 162 | step = trig[1] 163 | if len(trig) == 3: 164 | trig_stop = trig[2] 165 | if trig_stop < 0: 166 | trig_stop += iter_no 167 | else: 168 | trig_stop = iter_no 169 | for it in range(trig_start, trig_stop, step): 170 | row[it] = 1 171 | if last_trig: 172 | row[trig_stop - 1] = 1 173 | return row 174 | 175 | 176 | def fill_sub_trigger_row(sub_iters, sub_trigs, iter_no, last_trig): 177 | """ 178 | Based on iterations allocated to sub-triggers and sub-triggers definitions this functions 179 | creates ndarray that depicts triggered operations. 180 | 181 | :param sub_iters: list 182 | contains entry for each sub-trigger 183 | the entry consisting of starting and ending iterations where the sub-trigger 184 | is active and index value (+1) that specifies sub-trigger. 185 | :param sub_trigs: list 186 | list of sub-trigger, defined in configuration 187 | :param iter_no: int 188 | total number of iterations 189 | :return: ndarray 190 | array of int, value of zero meaning no trigger operation in this iteration 191 | value greater than zero meaning the sub-trigger operation related to the value 192 | will be triggered in this iteration 193 | """ 194 | # create array indicating triggered operation (1) or no action (0) along iterations 195 | sub_trig_row = np.zeros(iter_no, dtype=int) 196 | # create array indicating with index which sub-triggered operation may happen in the iterations 197 | sub_trig_idx_row = np.zeros(iter_no, dtype=int) 198 | # for each defined sub iteration chunk apply corresponding sub-trigger 199 | for i, sub_iter in enumerate(sub_iters): 200 | (b, e, idx) = sub_iter 201 | index = int(idx) 202 | sub_trig_idx_row[b:e] = index + 1 203 | if len(sub_trigs) - 1 < index: 204 | msg = 'not enough entries in sub-trigger' 205 | raise RuntimeError(msg) 206 | trigger = sub_trigs[index].copy() 207 | trigger[0] += b 208 | if len(trigger) == 2: 209 | trigger.append(e) 210 | elif len(trigger) == 3: 211 | trigger[2] = min(e, trigger[0] + trigger[2]) 212 | # update the sub_iters 213 | sub_iters[i] = (b, trigger[2], idx) 214 | sub_trig_row = fill_trigger_row(trigger, iter_no, last_trig, sub_trig_row) 215 | 216 | return sub_trig_row * sub_trig_idx_row 217 | 218 | 219 | def get_flow_arr(params, flow_items_list, curr_gen=None): 220 | # get information about GA and partial coherence from config_map 221 | # pc_conf_start is None if partial coherence is inactive in this reconstruction 222 | # it is True if the partial coherence starts in this generation 223 | # and False if partial coherence started in previous generation and is continued 224 | if 'pc_interval' in params: 225 | if curr_gen is None: 226 | pc_conf_start = True 227 | else: 228 | if curr_gen < params['ga_gen_pc_start']: 229 | pc_conf_start = None 230 | elif curr_gen == params['ga_gen_pc_start']: 231 | pc_conf_start = True 232 | else: 233 | pc_conf_start = False 234 | else: 235 | pc_conf_start = None 236 | if pc_conf_start is None: 237 | params.pop('pc_interval', None) 238 | 239 | # parse algorithm sequence to get the algorithm rows and sub-triggers rows, number iterations, 240 | # and partial coherence starting iteration 241 | try: 242 | (alg_rows, sub_iters, iter_no, pc_start) = get_alg_rows(params['algorithm_sequence'], pc_conf_start) 243 | except: 244 | return False, None, None 245 | 246 | # do some checks to find if the sequence and configuration are runnable 247 | # and special cases 248 | 249 | last_lpf = None 250 | if 'lowpass_filter_trigger' in params: 251 | if len(params['lowpass_filter_trigger']) < 2: 252 | print('Low pass trigger misconfiguration error. This trigger should have upper bound.') 253 | raise 254 | elif params['lowpass_filter_trigger'][2] >= iter_no: 255 | print('Low pass trigger misconfiguration error. The upper bound should be less than total iterations.') 256 | raise 257 | else: 258 | last_lpf = params['lowpass_filter_trigger'][2] 259 | 260 | if pc_start is not None: 261 | if pc_start == 0: 262 | raise ValueError('partial coherence is configured in first iteration, allow several ER before.') 263 | else: 264 | pc_interval = params['pc_interval'] 265 | params['pc_trigger'] = [pc_start, pc_interval] 266 | 267 | # initialize 268 | sub_trig_op = {} 269 | 270 | # create empty array with the size of number of all functions by number of all iterations 271 | flow_arr = np.zeros((len(flow_items_list), iter_no), dtype=int) 272 | 273 | # fill the flow array with ones if function should execute in iteration 274 | for i, flow_item in enumerate(flow_items_list): 275 | if flow_item == 'next': 276 | # these functions are executed in each iteration 277 | flow_arr[i, :] = 1 278 | elif flow_item in alg_rows.keys(): 279 | # fill out the algorithm rows 280 | flow_arr[i] = alg_rows[flow_item] 281 | elif flow_item.endswith('operation'): 282 | # fill out trigger/sub-trigger operations rows 283 | # The function name and associated trigger differ in prefix. 284 | # the function name ends with 'operation', and trigger ends with 'trigger' 285 | trigger_name = flow_item.replace('operation', 'trigger') 286 | if trigger_name in params: 287 | # set the switch last_trig if the trigger should end with operation 288 | last_trig = trigger_name in last_iter_op_triggers 289 | 290 | # determined in algorithm sequence parsing if the triggered operation is configured 291 | # with sub-triggers or trigger 292 | if trigger_name in sub_iters.keys(): 293 | # may throw exception 294 | flow_arr[i] = fill_sub_trigger_row(sub_iters[trigger_name], params[trigger_name], iter_no, last_trig) 295 | # special case 296 | if flow_item == 'phc_operation': 297 | reset = [l[1] for l in list(sub_iters[trigger_name])] 298 | flow_arr[i-1][reset] = 1 299 | 300 | # add entry to sub trigger operation dict with key of the trigger mnemonic 301 | # and the value of a list with the row and sub triggers iterations chunks 302 | sub_trig_op[trigger_name] = (flow_arr[i], sub_iters[trigger_name]) 303 | else: 304 | flow_arr[i] = fill_trigger_row(params[trigger_name], iter_no, last_trig) 305 | # special case 306 | if flow_item == 'phc_operation': 307 | # Assuming phc trigger is configured with upper limit 308 | reset_iter = min(iter_no - 1, params[trigger_name][2] + 1) 309 | flow_arr[i-1][reset_iter] = 1 310 | elif flow_item == 'set_prev_pc' and pc_start is not None: 311 | # set_prev_pc is executed one iteration before pc_trigger 312 | pc_row = flow_items_list.index('pc_operation') 313 | flow_arr[i, : -1] = flow_arr[pc_row, 1:] 314 | elif flow_item == 'reset_resolution' and last_lpf is not None: 315 | # reset low pass filter (i.e. data set to original) after the last LPF operation 316 | flow_arr[i][last_lpf] = 1 317 | 318 | return pc_start is not None, flow_arr, sub_trig_op 319 | -------------------------------------------------------------------------------- /src/cohere_core/controller/features.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | import cohere_core.utilities.utils as ut 8 | import cohere_core.utilities.dvc_utils as dvut 9 | from abc import ABC, abstractmethod 10 | 11 | __author__ = "Barbara Frosik" 12 | __copyright__ = "Copyright (c) 2016, UChicago Argonne, LLC." 13 | __docformat__ = 'restructuredtext en' 14 | __all__ = ['Pcdi', 15 | 'TriggeredOp', 16 | 'ShrinkWrap', 17 | 'PhaseConstrain', 18 | 'LowPassFilter'] 19 | 20 | 21 | def set_lib(dlib): 22 | global devlib 23 | devlib = dlib 24 | 25 | 26 | class Pcdi: 27 | def __init__(self, params, data, dir=None): 28 | self.type = params.get('pc_type', 'LUCY') 29 | if 'pc_LUCY_iterations' in params: 30 | self.iterations = params['pc_LUCY_iterations'] 31 | else: 32 | msg = 'pc_LUCY_iterations parameter not defined' 33 | raise ValueError(msg) 34 | self.normalize = params.get('pc_normalize', True) 35 | if 'pc_LUCY_kernel' in params: 36 | self.kernel_area = params['pc_LUCY_kernel'] 37 | else: 38 | msg = 'pc_LUCY_kernel parameter not defined' 39 | raise ValueError(msg) 40 | if dir is None: 41 | self.kernel = None 42 | else: 43 | try: 44 | self.kernel = devlib.load(ut.join(dir, 'coherence.npy')) 45 | except: 46 | self.kernel = None 47 | 48 | self.dims = devlib.dims(data) 49 | self.roi_data = ut.crop_center(devlib.fftshift(data), self.kernel_area) 50 | if self.normalize: 51 | self.sum_roi_data = devlib.sum(devlib.square(self.roi_data)) 52 | if self.kernel is None: 53 | self.kernel = devlib.full(self.kernel_area, 0.5, dtype=devlib.dtype(data)) 54 | 55 | def set_previous(self, abs_amplitudes): 56 | self.roi_amplitudes_prev = ut.crop_center(devlib.fftshift(abs_amplitudes), self.kernel_area) 57 | 58 | def apply_partial_coherence(self, abs_amplitudes): 59 | abs_amplitudes_2 = devlib.square(abs_amplitudes) 60 | converged_2 = devlib.fftconvolve(abs_amplitudes_2, self.kernel) 61 | converged = devlib.sqrt(converged_2) 62 | return converged 63 | 64 | def update_partial_coherence(self, abs_amplitudes): 65 | roi_amplitudes = ut.crop_center(devlib.fftshift(abs_amplitudes), self.kernel_area) 66 | roi_combined_amp = 2 * roi_amplitudes - self.roi_amplitudes_prev 67 | if self.normalize: 68 | amplitudes_2 = devlib.square(roi_combined_amp) 69 | sum_ampl = devlib.sum(amplitudes_2) 70 | ratio = self.sum_roi_data / sum_ampl 71 | amplitudes = devlib.sqrt(amplitudes_2 * ratio) 72 | else: 73 | amplitudes = roi_combined_amp 74 | 75 | if self.type == "LUCY": 76 | self.kernel = dvut.lucy_deconvolution(devlib.square(amplitudes), devlib.square(self.roi_data), 77 | self.kernel, self.iterations) 78 | 79 | 80 | class LowPassFilter(): 81 | def __init__(self, params): 82 | iter_range = [params['lowpass_filter_trigger'][0], params['lowpass_filter_trigger'][2]] 83 | iter_diff = params['lowpass_filter_trigger'][2] - params['lowpass_filter_trigger'][0] 84 | if len(params['lowpass_filter_range']) == 1: 85 | end_sigma = 1.0 86 | else: 87 | end_sigma = params['lowpass_filter_range'][1] 88 | sigma_diff = end_sigma - params['lowpass_filter_range'][0] 89 | self.filter_sigmas = [sigma_diff / iter_diff * iter + params['lowpass_filter_range'][0] 90 | for iter in range(iter_range[0], iter_range[1])] 91 | 92 | 93 | def apply_trigger(self, *args): 94 | # The filter is applied on data 95 | data = args[0] 96 | iter = args[1] 97 | 98 | filter_sigma = self.filter_sigmas[iter] 99 | return devlib.gaussian_filter(data, filter_sigma) 100 | 101 | 102 | class TriggeredOp(ABC): 103 | """ 104 | Base class for Triggered operation. This class creates feature objects and manages the trigger or subtrigger depending on 105 | configuration. 106 | """ 107 | def __init__(self, trig_op_name): 108 | # the self.objs will hold either one concrete feature object if configuration is for general trigger, 109 | # i.e. spanning across all iterations, 110 | # or it will hold a list of objects, each being a concrete sub-feature, appearing in order that will be 111 | # executed during each trigger 112 | self.objs = None 113 | 114 | # the self.f points to either self.apply_trigger_obj function if configuration is for general trigger, 115 | # or to self.apply_trigger_seq if sub-triggers are configured 116 | self.f = None 117 | 118 | self.trig_op_name = trig_op_name # ex: 'shrink_wrap_trigger' 119 | 120 | 121 | def apply_trigger_obj(self, *args): 122 | """ 123 | This function is called when general trigger is used. In this case there is only one instance of the concrete 124 | feature. The trigger is then executed on this instance. 125 | :param args: variable parameters depending on the concrete feature 126 | :return: depends on the concrete feature 127 | """ 128 | return self.objs.apply_trigger(*args) 129 | 130 | def apply_trigger_seq(self, *args): 131 | """ 132 | This function is called when sub-triggers are used. the self.objs is a list for this case. Every time a trigger 133 | is applied the first object is removed from this list and the trigger is executed on this instance. 134 | :param args: variable parameters depending on the concrete feature 135 | :return: depends on the concrete feature 136 | """ 137 | sub_obj = self.objs.pop(0) 138 | return sub_obj.apply_trigger(*args) 139 | 140 | def apply_trigger(self, *args): 141 | """ 142 | Thhis function is called by a trigger. It then directs the trigger by calling the self.f function. 143 | :param args: variable parameters depending on the concrete feature 144 | :return: depends on the concrete feature 145 | """ 146 | # the f is either update_amp_seq function or update_amp_seq 147 | # The f is set in constructor depending on whether the trigger 148 | # was defined for the entire span of iterations or multiple 149 | # subtriggers were defined in algorithm sequence 150 | return self.f(*args) 151 | 152 | @abstractmethod 153 | def create_obj(self, *args): 154 | """ 155 | This class must be overriden by subclass. It creates a feature object. 156 | :param args: variable parameters depending on the concrete feature 157 | :return: an instance of the subclass 158 | """ 159 | pass 160 | 161 | def create_objs(self, params, sub_rows_trigs): 162 | """ 163 | The params map contains value of the feature trigger. If the trigger is configured as a general one, 164 | i.e. a single trigger, the self.objs is set to the created instance of the feature, and the self.f 165 | pointer is set accordingly, i.e. to the self.apply_trigger_obj function. 166 | If the trigger contains several sub-triggers, the sub_row_trigs contain info about the sequence the 167 | subtrigger should be executed. Based on this info the unique instances are created and are added to the 168 | in the order the triggers should be called. The self.f function is set to the self.apply_trigger_seq 169 | function. 170 | :param sub_rows_trigs: sub-triggers info if sub-triggers are configured 171 | :param params: configuration parameters 172 | :return: 173 | """ 174 | trigger_name = f'{self.trig_op_name}_trigger' 175 | 176 | if trigger_name in sub_rows_trigs.keys(): 177 | row = sub_rows_trigs[trigger_name][0] 178 | sub_trigs = sub_rows_trigs[trigger_name][1] 179 | sub_objs = {} 180 | for sub_t in sub_trigs: 181 | (beg, end, idx) = sub_t 182 | index = int(idx) 183 | if index not in sub_objs.keys(): 184 | sub_objs[index] = self.create_obj(params, index=index, beg=beg, end=end) 185 | trigs = [i-1 for i in row.tolist() if i != 0] 186 | # the operation of creating object might fail 187 | # if conditions are not met, ex: the sw_type is not supported 188 | # This should be already verified by a verifier, so no checking here 189 | self.objs = [sub_objs[idx] for idx in trigs] 190 | self.f = self.apply_trigger_seq 191 | else: 192 | if trigger_name in params: 193 | self.objs = self.create_obj(params) 194 | self.f = self.apply_trigger_obj 195 | 196 | 197 | class ShrinkWrap(TriggeredOp): 198 | def __init__(self, trig_op): 199 | super().__init__(trig_op) 200 | 201 | class GaussSW: 202 | def __init__(self, gauss_sigma, threshold): 203 | self.gauss_sigma = gauss_sigma 204 | self.threshold = threshold 205 | 206 | def apply_trigger(self, *args): 207 | ds_image = args[0] 208 | return dvut.shrink_wrap(ds_image, self.threshold, self.gauss_sigma) 209 | 210 | 211 | class Gauss1SW: 212 | def __init__(self, gauss_sigma, threshold): 213 | self.gauss_sigma = gauss_sigma 214 | self.threshold = threshold 215 | 216 | def apply_trigger(self, *args): 217 | ds_image = args[0] 218 | return dvut.shrink_wrap(ds_image, self.threshold + .01, self.gauss_sigma) 219 | 220 | 221 | def create_obj(self, params, index=None, beg=None, end=None): 222 | def check_Gauss_type(): 223 | # for now cohere supports only Gauss type, so the following parameters are mandatory 224 | if 'shrink_wrap_gauss_sigma' not in params: 225 | msg = 'shrink_wrap_gauss_sigma parameter not defined' 226 | raise ValueError(msg) 227 | if 'shrink_wrap_threshold' not in params: 228 | msg = 'shrink_wrap_threshold parameter not defined' 229 | raise ValueError(msg) 230 | 231 | def check_Gauss1_type(): 232 | # for now cohere supports only Gauss type, so the following parameters are mandatory 233 | if 'shrink_wrap_gauss_sigma' not in params: 234 | msg = 'shrink_wrap_gauss_sigma parameter not defined' 235 | raise ValueError(msg) 236 | if 'shrink_wrap_threshold' not in params: 237 | msg = 'shrink_wrap_threshold parameter not defined' 238 | raise ValueError(msg) 239 | 240 | if 'shrink_wrap_type' not in params: 241 | msg = 'shrink_wrap_type parameter not defined' 242 | raise ValueError(msg) 243 | 244 | if index is None: 245 | sw_type = params['shrink_wrap_type'] 246 | if sw_type == 'GAUSS': 247 | check_Gauss_type() 248 | sigma = params['shrink_wrap_gauss_sigma'] 249 | threshold = params['shrink_wrap_threshold'] 250 | return self.GaussSW(sigma, threshold) 251 | elif sw_type == 'GAUSS1': 252 | check_Gauss1_type() 253 | sigma = params['shrink_wrap_gauss_sigma'] 254 | threshold = params['shrink_wrap_threshold'] 255 | return self.Gauss1SW(sigma, threshold) 256 | else: 257 | msg = f'{sw_type} shrink wrap type is not supported' 258 | raise ValueError(msg) 259 | else: 260 | if len(params['shrink_wrap_type']) - 1 < index: 261 | msg = f'shrink_wrap_type not defined for sub-trigger {index}' 262 | raise ValueError(msg) 263 | sw_type = params['shrink_wrap_type'][index] 264 | if sw_type == 'GAUSS': 265 | check_Gauss_type() 266 | if len(params['shrink_wrap_gauss_sigma']) - 1 < index: 267 | msg = f'shrink_wrap_gauss_sigma not defined for sub-trigger {index}' 268 | raise ValueError(msg) 269 | sigma = params['shrink_wrap_gauss_sigma'][index] 270 | if len(params['shrink_wrap_threshold']) - 1 < index: 271 | msg = f'shrink_wrap_threshold not defined for sub-trigger {index}' 272 | raise ValueError(msg) 273 | threshold = params['shrink_wrap_threshold'][index] 274 | return self.GaussSW(sigma, threshold) 275 | elif sw_type == 'GAUSS1': 276 | check_Gauss1_type() 277 | if len(params['shrink_wrap_gauss_sigma']) - 1 < index: 278 | msg = f'shrink_wrap_gauss_sigma not defined for sub-trigger {index}' 279 | raise ValueError(msg) 280 | sigma = params['shrink_wrap_gauss_sigma'][index] 281 | if len(params['shrink_wrap_threshold']) - 1 < index: 282 | msg = f'shrink_wrap_threshold not defined for sub-trigger {index}' 283 | raise ValueError(msg) 284 | threshold = params['shrink_wrap_threshold'][index] 285 | return self.Gauss1SW(sigma, threshold) 286 | else: 287 | msg = f'{sw_type} shrink wrap type is not supported' 288 | raise ValueError(msg) 289 | 290 | 291 | class PhaseConstrain(TriggeredOp): 292 | def __init__(self, trig_op): 293 | super().__init__(trig_op) 294 | 295 | class PhasePHC: 296 | def __init__(self, phc_phase_min, phc_phase_max): 297 | self.phc_phase_min = phc_phase_min 298 | self.phc_phase_max = phc_phase_max 299 | 300 | def apply_trigger(self, *args): 301 | ds_image = args[0] 302 | phase = devlib.angle(ds_image) 303 | return (phase > self.phc_phase_min) & (phase < self.phc_phase_max) 304 | 305 | 306 | def create_obj(self, params, index=None, beg=None, end=None): 307 | if 'phc_phase_min' not in params: 308 | msg = 'phc_phase_min parameter not defined' 309 | raise ValueError(msg) 310 | if 'phc_phase_max' not in params: 311 | msg = 'phc_phase_max parameter not defined' 312 | raise ValueError(msg) 313 | if index is None: 314 | phase_min = params['phc_phase_min'] 315 | phase_max = params['phc_phase_max'] 316 | else: 317 | if len(params['phc_phase_min']) - 1 < index: 318 | msg = f'phc_phase_min not defined for sub-trigger {index}' 319 | raise ValueError(msg) 320 | phase_min = params['phc_phase_min'][index] 321 | if len(params['phc_phase_max']) - 1 < index: 322 | msg = f'phc_phase_max not defined for sub-trigger {index}' 323 | raise ValueError(msg) 324 | phase_max = params['phc_phase_max'][index] 325 | return self.PhasePHC(phase_min, phase_max) 326 | 327 | 328 | def create(trig_op, params, trig_op_info): 329 | if trig_op == 'shrink_wrap': 330 | to = ShrinkWrap(trig_op) 331 | if trig_op == 'phc': 332 | to = PhaseConstrain(trig_op) 333 | 334 | # this function sets self.objs and self.f and creates all objects 335 | # It may throw exception 336 | to.create_objs(params, trig_op_info) 337 | return to 338 | 339 | 340 | 341 | 342 | -------------------------------------------------------------------------------- /docs/source/config_rec.rst: -------------------------------------------------------------------------------- 1 | .. _config_rec: 2 | 3 | ========== 4 | config_rec 5 | ========== 6 | | The "config_rec" file defines parameters used during reconstruction process. 7 | | 8 | | Parameters are grouped by trigger and corresponding parameters. There is a group of parameters that are not associated with any trigger, and thus, we call them 'general'. 9 | | If trigger is defined then the feature is active and related parameters are used when processing the feature. Refer to :ref:`formula` page, for explanation of triggers, sub-triggers, and algorithm sequence. 10 | 11 | Parameters 12 | ========== 13 | 14 | General 15 | +++++++ 16 | - data_dir 17 | 18 | | Optional, defines a directory with file named "data.tif" that contains experiment data file ready for reconstruction. Default is /phasing_data. 19 | 20 | :: 21 | 22 | data_dir = "/path/to/phasing_data" 23 | 24 | - save_dir 25 | 26 | | Optional, used for specific cases, defualt is /results_phasing. Driven by the scripts, the experiment directory contains four directories: conf, preprocessed_data, phasing_data, results_phasing, and results_viz. The reconstruction results are saved in /results_phasing directory. If the save_dir parameter is configured then the reconstruction result is saved in this directory. 27 | 28 | :: 29 | 30 | save_dir = "/path/to/results_phasing" 31 | 32 | - init_guess 33 | 34 | | Optional, defines how the initial guess is set. Possible options are: 'random', 'continue', and 'AI_guess'. The choice "random" will generate random guess, "continue" will start from previously saved results, and "AI_guess" will run AI reconstruction that will be an initial guess saved at /results_AI. Each of these algorithms require different parameters, explained below. The default is 'random'. 35 | 36 | :: 37 | 38 | init_guess = "random" 39 | 40 | - continue_dir 41 | 42 | | Must be defined if the init_guess parameter is set to 'continue'. Directory from which initial guess, initial support are read for reconstruction continuation. If the directory contains multiple subdirectories with initial guesses, a thread will start for each subdirectory. 43 | 44 | :: 45 | 46 | continue_dir = "/path/to/some_phasing_results_dir" 47 | 48 | - AI_trained_model 49 | 50 | | Must be defined, if init_guess is "AI_guess". Defines the file in hdf5 format that holds trained model. 51 | 52 | :: 53 | 54 | AI_trained_model = "/path/to/trained/model/trained_model.hdf5" 55 | 56 | - reconstructions 57 | 58 | | Optional, default is 1. Number of reconstructions to start with. Typically used when running genetic algorithm. 59 | 60 | :: 61 | 62 | reconstructions = 5 63 | 64 | - processing 65 | 66 | | Optional, the library used when running reconstruction. When the auto option is selected the program will use the best performing library that is available, in the following order: cupy, torch, numpy. The cp option will utilize cupy, torch will utilize torch, and np will utilize numpy. Default is auto. 67 | 68 | :: 69 | 70 | processing = "auto" 71 | 72 | - device 73 | 74 | | Optional, GPU IDs of the target devices for reconstruction(s) or 'all' if all available GPUs should be used. If not defined, the reconstruction process will run on CPU. For cluster configuration it is defined as dict with hosts names as keys and values as described. 75 | 76 | :: 77 | 78 | device = [0,1,2,7] 79 | device = 'all' 80 | device = {'host1':'all', 'host2':[0,1,2,3,4]} 81 | 82 | - algorithm_sequence 83 | 84 | | Mandatory, defines sequence of algorithms applied in each iteration during modulus projection and during modulus. The "*" character means repeat, and the "+" means add to the sequence. The sequence may contain single brackets defining a group that will be repeated by the preceding multiplier. The alphabetic entries: ER, ERpc, HIO, HIOpc define algorithms used in this iteration. The entries will invoke functions as follows: ER definition will invoke 'er' and 'modulus' functions, the ERpc will invoke 'er' and 'pc_modulus', HIO will invoke 'hio' and 'modulus', and HIOpc will invoke 'hio' and 'pc_modulus', 'RAAR' will invoke 'raar' and 'modulus', 'SF' will invoke 'sf' and 'modulus'. The pc_modulus is implementation of modulus with partial coherence correction. In second example the sequence contains sub-triggers, explained in :ref:`formula` page. 85 | 86 | :: 87 | 88 | algorithm_sequence = "2* (20*SF + 180*HIO) + 2* (20*ERpc + 180*HIOpc) + 20*ERpc" 89 | algorithm_sequence = "20*ER.PHC0 + 180*HIO.PHC1 + 2* (20*ER.SW0 + 180*HIO.SW1) + 20*ER.SW2" 90 | 91 | - hio_beta 92 | 93 | | Optional, default is .9. A parameter used in hio algorithm. 94 | 95 | :: 96 | 97 | hio_beta = .9 98 | 99 | - raar_beta 100 | 101 | | Optional, default is .45. A parameter used in raar algorithm. 102 | 103 | :: 104 | 105 | raar_beta = .45 106 | 107 | - initial_support_area 108 | 109 | | Optional, defaults to [.5,.5,.5]. The list define dimensions of initial support area. The support area is calculated by multiplying the fractions in this parameter by the corresponding data array dimensions. The support array is centered. 110 | 111 | :: 112 | 113 | initial_support_area = [.5,.5,.5] 114 | 115 | Twin 116 | ++++ 117 | 118 | - twin_trigger 119 | 120 | | Defines at which iteration to eliminate "twin", i.e. the image under reconstruction is trimmed by preserving a quadrant across x and y dimensions and zeroing the rest of the array. 121 | 122 | :: 123 | 124 | twin_trigger = [2] 125 | 126 | - twin_halves 127 | 128 | | Optional, defines which quadrant of the array is preserved in x and y dimensions, defaults to (0, 0). 129 | | Possible choices: [0, 0], [0, 1], [1, 0], [1,1] 130 | 131 | :: 132 | 133 | twin_halves = [0, 0] 134 | 135 | Shrink wrap 136 | +++++++++++ 137 | | Support area is an array that defines region in which the image is meaningful. This area is recalculated at the shrink wrap trigger iteration, shrinking along when the image develops. The calculations employ an algorithm defined here as shrink_wrap_type. 138 | 139 | - shrink_wrap_trigger 140 | 141 | | Defines when to update support array using the parameters below. 142 | | Alternatively can be defined as list of sub-triggers. If sub-triggers are used, the parameters must be lists as well. 143 | 144 | :: 145 | 146 | shrink_wrap_trigger = [10, 1] 147 | shrink_wrap_trigger = [[10, 1],[0,5,100],[0,2]] # sub-triggers 148 | 149 | - shrink_wrap_type 150 | 151 | | Mandatory, defines type of shrink wrap. Currently only the "GAUSS" type is supported that applies gaussian filter to find support area. 152 | 153 | :: 154 | 155 | shrink_wrap_type = "GAUSS" 156 | shrink_wrap_type = [GAUSS, GAUSS, GAUSS] # sub-triggers 157 | 158 | - shrink_wrap_threshold 159 | 160 | | Mandatory, defines a threshold value used in the gaussian filter algorithm. 161 | 162 | :: 163 | 164 | shrink_wrap_threshold = 0.1 165 | shrink_wrap_threshold = [0.1, 0.11, .12] # sub-triggers 166 | 167 | - shrink_wrap_gauss_sigma 168 | 169 | | Mandatory, defines a sigma value used in the gaussian filter algorithm. 170 | 171 | :: 172 | 173 | shrink_wrap_gauss_sigma = 1.0 174 | shrink_wrap_gauss_sigma = [1.0, 1.1, 1.2] # sub-triggers 175 | 176 | Phase constrain 177 | +++++++++++++++ 178 | | At the beginning iterations the support area is modified in respect to the phase. Support area will include only points with calculated phase inside of the defined bounds. 179 | | Alternatively can be defined as list of sub-triggers. If sub-triggers are used, the parameters must be lists as well. 180 | 181 | - phc_trigger 182 | 183 | | Defines when to update support array using the parameters below by applying phase constrain. 184 | 185 | :: 186 | 187 | phc_trigger = [0, 1, 310] 188 | phc_trigger = [[0, 1, 310], [0,2]] # sub-triggers 189 | 190 | - phc_phase_min 191 | 192 | | Mandatory, defines lower bound phase. 193 | 194 | :: 195 | 196 | phc_phase_min = -1.57 197 | phc_phase_min = [-1.5, -1.57] # sub-triggers 198 | 199 | - phc_phase_max 200 | 201 | | Mandatory, defines upper bound phase. 202 | 203 | :: 204 | 205 | phc_phase_max = 1.57 206 | phc_phase_max = [1.5, 1.57] # sub-triggers 207 | 208 | Partial coherence 209 | +++++++++++++++++ 210 | | Partial coherence operation initiates recalculation of coherence of data with respect to the amplitudes in reciprocal space. After coherence array is determined, it is used in convolution operation during modulus in subsequent iteration. The coherence array is updated periodically, as defined by the pc_interval. Partial coherence operation is active if the interval is defined and the algorithm sequence indicates "pc". 211 | 212 | - pc_interval 213 | 214 | | Defines iteration interval between coherence update. 215 | 216 | :: 217 | 218 | pc_interval = 50 219 | 220 | - pc_type 221 | 222 | | Partial coherence algorithm. Currently "LUCY" is supported. 223 | 224 | :: 225 | 226 | pc_type = "LUCY" 227 | 228 | - pc_LUCY_iterations 229 | 230 | | Optional, defaults to 20. Defines number of iteration inside LUCY algorithm. 231 | 232 | :: 233 | 234 | pc_LUCY_iterations = 20 235 | 236 | - pc_normalize 237 | 238 | | Optional, defaults to True. Internal. 239 | 240 | :: 241 | 242 | pc_normalize = True 243 | 244 | - pc_LUCY_kernel 245 | 246 | | Mandatory, coherence array area. 247 | 248 | :: 249 | 250 | pc_LUCY_kernel = [16, 16, 16] 251 | 252 | Lowpass Filter 253 | ++++++++++++++ 254 | | When active, a lowpass Gaussian filter is applied on data, with iteration dependent sigma calculated by line-spacing the lowpass_filter_range parameter over trigger span iterations. The low resolution trigger is typically configured to be active at the first part of iterations. 255 | 256 | - lowpass_filter_trigger 257 | 258 | | Defines when to apply lowpass filter operation using the parameters below. Typically the last trigger is configured at half of total iterations. 259 | 260 | :: 261 | 262 | lowpass_filter_trigger = [0, 1, 320] 263 | 264 | - lowpass_filter_range 265 | 266 | | The range is line-spaced over trigger iterations to form a list of iteration dependent sigmas. If only one number is given, the last sigma will default to 1. 267 | 268 | :: 269 | 270 | lowpass_filter_range = [.7, 1.0] 271 | 272 | averaging 273 | +++++++++ 274 | | When this feature is activated the amplitudes of the last several iterations are averaged. 275 | 276 | - average_trigger 277 | 278 | | Defines when to apply averaging. Negative start means it is offset from the last iteration. 279 | 280 | :: 281 | 282 | average_trigger = [-65, 1] 283 | 284 | progress 285 | ++++++++ 286 | - progress_trigger 287 | 288 | | Defines when to print info on the console. The info includes current iteration and error. 289 | 290 | :: 291 | 292 | progress_trigger = [0, 20] 293 | 294 | live viewing 295 | ++++++++++++ 296 | | This feature allows for a live view of the amplitude, phase, support, and error as the reconstruction develops. With adaptive multipeak phasing, this will instead show the amplitude, phase, measured diffraction pattern, and adapted diffraction pattern. These are shown using a central slice cropped to half the full array size. 297 | 298 | - live_trigger 299 | 300 | | Defines when to update the live view. 301 | 302 | :: 303 | 304 | live_trigger = [0, 10] 305 | 306 | GA 307 | ++ 308 | - ga_generations 309 | 310 | | Defines number of generations. When defined, and the number is greater than 1, the genetic algorithm (GA) is activated 311 | 312 | :: 313 | 314 | ga_generations = 3 315 | 316 | - ga_metrics 317 | 318 | | Optional, a list of metrics that should be used to rank the reconstruction results for subsequent generations. If not defined, or shorter than number of generations, the metric defaults to "chi". 319 | | If the list contains only one element, it will be used by all generations. 320 | | Supported metrics: 321 | | - 'chi': The last error calculated as norm(rs_amplitudes - data)/norm(data). 322 | | The smallest 'chi' value is the best. 323 | | - 'sharpness': sum(power(abs(image), 4)) 324 | | The smallest 'sharpness' value is the best. 325 | | - 'summed_phase': angle(image) - sum(angle(image) * support) / sum(support) 326 | | where support is calculated with shrink wrap using hardcoded threshold=.2 and sigma=.5 327 | | The greatest 'summed_phase' value is the best. 328 | | - 'area': sum(support) 329 | | where support is calculated with shrink wrap using hardcoded threshold=.2 and sigma=.5 330 | | The greatest 'area' value is the best. 331 | 332 | :: 333 | 334 | ga_metrics = ["chi", "sharpness", "area"] 335 | ga_metrics = ["chi"] 336 | 337 | - ga_breed_modes 338 | 339 | | Optional, a list of breeding modes applied to breed consecutive generation. If not defined, or shorter that number of generations, the mode defaults to "sqrt_ab". 340 | | If the list contains only one element, it will be used by all generations. 341 | | Breeding starts with choosing alpha image. The rest of the images are crossed with alpha. Before the crossing, the image, called beta is aligned with alpha, and phases in both of the arrays are normalized to derive ph_alpha = angle(alpha), and ph_beta = angle(beta) 342 | | Supported modes: 343 | | - 'sqrt_ab': sqrt(abs(alpha) * abs(beta)) * exp(0.5j * (ph_beta + ph_alpha)) 344 | | - 'pixel_switch': where((cond > 0.5), beta, alpha); cond = random(shape(beta)) 345 | | - 'b_pa': abs(beta) * exp(1j * (ph_alpha)) 346 | | - '2ab_a_b': 2 * (beta * alpha) / (beta + alpha) 347 | | - '2a_b_pa': (2 * abs(alpha) - abs(beta)) * exp(1j * ph_alpha) 348 | | - 'sqrt_ab_pa': sqrt(abs(alpha) * abs(beta)) * exp(1j * ph_alpha) 349 | | - 'sqrt_ab_recip': fftshift(ifft(fftshift(temp))), where temp is calculated below 350 | | t1 = fftshift(fft(fftshift(beta))) 351 | | t2 = fftshift(fft(fftshift(alpha))) 352 | | temp = sqrt(abs(t1)*abs(t2))*exp(.5j*angle(t1))*exp(.5j*angle(t2)) 353 | | - 'max_ab': max(abs(alpha), abs(beta)) * exp(.5j * (ph_beta + ph_alpha)) 354 | | - 'max_ab_pa': max(abs(alpha), abs(beta)) * exp(1j * ph_alpha) 355 | | - 'avg_ab': 0.5 * (alpha + beta) 356 | | - 'avg_ab_pa: 0.5 * (abs(alpha) + abs(beta)) * exp(1j * (ph_alpha)) 357 | 358 | :: 359 | 360 | ga_breed_modes = ["sqrt_ab", "pixel_switch", "none"] 361 | ga_breed_modes = ["sqrt_ab"] 362 | 363 | - ga_cullings 364 | 365 | | Optional, defines how many worst samples to remove in a breeding phase for each generation. If not defined for the generation, the culling defaults to 0. 366 | 367 | :: 368 | 369 | ga_cullings = [2,1] 370 | 371 | - ga_sw_thresholds 372 | 373 | | Optional, a list of threshold values for each generation. The support is recalculated with this threshold after breeding phase. Defaults to configured value of support_threshold. 374 | | If the list contains only one element, it will be used by all generations. 375 | 376 | :: 377 | 378 | ga_sw_thresholds = [.15, .1] 379 | 380 | - ga_sw_gauss_sigmas 381 | 382 | | Optional, a list of sigma values for each generation. The support is recalculated with this sigma after breeding phase. Defaults to configured value of support_sigma. 383 | | If the list contains only one element, it will be used by all generations. 384 | 385 | :: 386 | 387 | ga_sw_gauss_sigmas = [1.1, 1.0] 388 | 389 | - ga_lpf_sigmas 390 | 391 | | Optional, a list of sigmas that will be used in subsequent generations to calculate Gaussian low-pass filter applied it to the data. In the example given below this feature will be used in first two generations. 392 | 393 | :: 394 | 395 | ga_lpf_sigmas = [2.0, 1.5] 396 | 397 | - ga_gen_pc_start 398 | 399 | | Optional, a number indicating at which generation the partial coherence will start to be active. If not defined, and the pc feature is active, it will start at the first generation. 400 | 401 | :: 402 | 403 | ga_gen_pc_start = 3 404 | 405 | - ga_fast 406 | 407 | | Defines which GA algorithm to use. If present and True, the number of reconstructions is limited to available resources. This reconstruction is very fast. Otherwise the number of reconstructions is unlimited but the performance is worse as the intermediate results must be stored. 408 | 409 | :: 410 | 411 | ga_fast = True -------------------------------------------------------------------------------- /src/cohere_core/data/alien_tools.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | 8 | """ 9 | This tools auto eliminate aliens from CDI experiment data. It is configuration driven. 10 | 11 | """ 12 | 13 | import numpy as np 14 | import os 15 | import tifffile as tif 16 | from scipy.ndimage import gaussian_filter 17 | import cohere_core.utilities.utils as ut 18 | 19 | __author__ = "Kenly Pelzer, Ross Harder" 20 | __copyright__ = "Copyright (c) 2021, UChicago Argonne, LLC." 21 | __docformat__ = 'restructuredtext en' 22 | __all__ = ['get_asymmetry', 23 | 'analyze_clusters', 24 | 'crop_center', 25 | 'save_arr', 26 | 'save_arrays', 27 | 'auto_alien1', 28 | 'filter_aliens', 29 | 'remove_blocks', 30 | 'remove_aliens'] 31 | 32 | 33 | def get_asymmetry(arr): 34 | """ 35 | Returns asymmetry of an array. 36 | 37 | Parameters 38 | ---------- 39 | arr : ndarray 40 | an array to find asymmetry 41 | 42 | Returns 43 | ------- 44 | ndarray 45 | an array capturing asymmetry of original array 46 | """ 47 | 48 | arr_rev = arr[::-1, ::-1, ::-1] 49 | denom = (arr + arr_rev) / 2.0 50 | denom_nz = np.where(denom == 0, 1.0, denom) 51 | asym = np.where(denom > 0.0, abs(arr - arr_rev) / denom_nz, 0.0) 52 | # asym only assigned to non-zero intensity points in the passed array 53 | return np.where(arr > 0, asym, 0) 54 | 55 | 56 | # add output of absolute cluster size. 57 | def analyze_clusters(arr, labels, nz): 58 | """ 59 | Analyzes clusters and returns characteristics in arrays. 60 | 61 | Parameters 62 | ---------- 63 | arr : ndarray 64 | the analyzed array 65 | labels: arr 66 | cluster labels for each point in the dataset given to fit(). Noisy samples are given the label -1. 67 | nz : tuple 68 | tuple of arrays, each array containing indices of elements in arr that are non-zero along one axis. 69 | 70 | Returns 71 | ------- 72 | tuple 73 | tuple containing the following arrays: 74 | nlabels # number of labels, i.e clusters 75 | labels_arr # array with label for each non zero point 76 | rel_cluster_size # array with cluster size divided by max cluster size for each 77 | # non zero point 78 | cluster_avg # array with cluster average for each non zero point 79 | noise_arr # array with points that are non zero but not in cluster 80 | no_noise # array with noise poits set to 0 81 | label_counts # tuple of two arrays: First is label number, second is number of 82 | # occurances of that label (size of cluster) 83 | cluster_avg_asym # array with average asymmetry of a points in cluster 84 | asymmetry # array of asymmetry with regard to entire array 85 | cluster_size # array with cluster size for each non zero point 86 | """ 87 | 88 | labels_arr = np.zeros_like(arr) 89 | noise_arr = np.zeros_like(arr) 90 | cluster_size = np.zeros_like(arr) 91 | cluster_avg = np.zeros_like(arr).astype(np.float32) 92 | cluster_avg_asym = np.zeros_like(arr).astype(np.float32) 93 | asymmetry = get_asymmetry(arr) 94 | 95 | # label_counts is tuple of two arrays. First is label number, second is number of occurances of that label (size of cluster). 96 | label_counts = np.unique(labels, return_counts=True) 97 | 98 | # nz and labels are the same length. so the indicies given by nz will be set 99 | # to their corresponding cluster number (includes noise pts). 100 | labels_arr[nz] = labels 101 | 102 | # this selects the nz indicies where labels=-1 (noise) 103 | noise_pts = tuple([nz[n][labels == -1] for n in range(3)]) 104 | no_noise = arr 105 | 106 | # move the points labeled noise into their own array 107 | # remove the noise out of arr (no_noise is copy of arr) 108 | noise_arr[noise_pts] = arr[noise_pts] 109 | no_noise[noise_pts] = 0 110 | nlabels = len(label_counts[0]) 111 | # print("processing labels") 112 | # loop over the labels (clusters). label_counts[0] is the unique labels 113 | for n in range(1, nlabels): 114 | # the nth label from the first array of the label_counts tuple 115 | n_lab = label_counts[0][n] 116 | # the indicies of the points belonging to label n 117 | cluspts = tuple([nz[d][labels == n_lab] for d in range(3)]) 118 | # the second array of the label_counts tuple is the number of points 119 | # with that label. So put those into an array. 120 | cluster_size[cluspts] = label_counts[1][n] 121 | # compute the average intensity of each cluster and write into an array. 122 | cluster_avg[cluspts] = np.sum(arr[cluspts]) / cluspts[0].size 123 | # compute average asym of each cluster and store in array. 124 | cluster_avg_asym[cluspts] = np.sum(asymmetry[cluspts]) / cluspts[0].size 125 | # compute relative cluster sizes to largest (main) cluster. 126 | rel_cluster_size = cluster_size / cluster_size.max() 127 | 128 | # return all of these arrays 129 | return ( 130 | nlabels, labels_arr, rel_cluster_size, cluster_avg, noise_arr, no_noise, label_counts, cluster_avg_asym, 131 | asymmetry, 132 | cluster_size) 133 | 134 | 135 | def crop_center(arr): 136 | """ 137 | Finds max element in array and crops the array to be symetrical with regard to this point in each direction. 138 | 139 | Parameters 140 | ---------- 141 | arr : ndarray 142 | an array 143 | 144 | Returns 145 | ------- 146 | centered : ndarray 147 | an array symetrical in all dimensions around the max element of input array 148 | """ 149 | 150 | shape = arr.shape 151 | # This tells us the point of highest intensity, which we will use as the center for inversion operations 152 | center = np.unravel_index(np.argmax(arr, axis=None), shape) 153 | 154 | # clip the largest possible cuboid putting the point of highest intensity at the center 155 | principium = [] 156 | finis = [] 157 | for i in range(len(shape)): 158 | half_shape = min(center[i], shape[i] - center[i] - 1) 159 | principium.append(center[i] - half_shape) 160 | finis.append(center[i] + half_shape + 1) 161 | centered = arr[principium[0]:finis[0], principium[1]:finis[1], principium[2]:finis[2]] 162 | 163 | return centered 164 | 165 | 166 | def save_arr(arr, dir, fname): 167 | """ 168 | Saves an array in 'tif' format file. 169 | 170 | Parameters 171 | ---------- 172 | arr : ndarray 173 | an array to save 174 | dir : str 175 | directory to save the file to 176 | fname : str 177 | file name 178 | 179 | Returns 180 | ------- 181 | nothing 182 | """ 183 | 184 | if dir is not None: 185 | full_name = ut.join(dir, fname) 186 | else: 187 | full_name = fname # save in the current dir 188 | tif.imwrite(full_name, arr.transpose().astype(np.float32)) 189 | 190 | 191 | def save_arrays(arrs, iter, thresh, eps, dir): 192 | """ 193 | Saves multiple arrays in 'tif' format files. Determines file name from given parameters: iteration, threshold, and eps. 194 | 195 | Parameters 196 | ---------- 197 | arr : tuple 198 | a tuple of arrays to save 199 | iter, thresh, eps : str, str, str 200 | parameters: iteration, threshold, and eps, to deliver file name from 201 | dir : str 202 | directory to save the file to 203 | 204 | Returns 205 | ------- 206 | nothing 207 | """ 208 | 209 | save_arr(arrs[1], dir, "db%d_%3.2f_labels_arr%3.2f.tif" % (iter, thresh, eps)) 210 | save_arr(arrs[2], dir, "db%d_%3.2f_rel_clustersizes%3.2f.tif" % (iter, thresh, eps)) 211 | save_arr(arrs[3], dir, "db%d_%3.2f_clusteravg%3.2f.tif" % (iter, thresh, eps)) 212 | save_arr(arrs[4], dir, "db%d_%3.2f_noise%3.2f.tif" % (iter, thresh, eps)) 213 | save_arr(arrs[5], dir, "db%d_%3.2f_no_noise%3.2f.tif" % (iter, thresh, eps)) 214 | save_arr(arrs[7], dir, "db%d_%3.2f_clusteravgasym%3.2f.tif" % (iter, thresh, eps)) 215 | save_arr(arrs[8], dir, "db%d_%3.2f_asym%3.2f.tif" % (iter, thresh, eps)) 216 | save_arr(arrs[9], dir, "db%d_%3.2f_abs_clustersizes%3.2f.tif" % (iter, thresh, eps)) 217 | 218 | 219 | def auto_alien1(data, config, data_dir=None): 220 | from sklearn.cluster import DBSCAN 221 | """ 222 | Removes aliens from experimental CDI data using iterative algorithm and returns the result. 223 | 224 | The algorithm follows the steps: 225 | 1. Initialization: 226 | - initialize variables with the configuration parameters 227 | - crop the data array around maximum element to it's biggest size 228 | - sets points below threshold value to 0 229 | - finds non-zero elements of the data array and keeps them as tuples of indices 230 | 2. Iteration loop, runs until number of clasters remains unchanged 231 | - runs DBSCAN algorithm on the non-zero and returns clasters labels 232 | - analyzes the results to find relative clusters sizes, and clusters average asymmetry, and other characteristics 233 | - removes alien clusters, i.e. the ones with relative cluster size below configured size threshold and with average asymmetry over configured asymmetry threshold 234 | - go back to the loop using the non-zero elements of alien removed array to the DBSCAN 235 | 3. If configured, add final step to apply gaussian convolusion to the result and use it as a filter with configured sigma as threshold 236 | 237 | Parameters 238 | ---------- 239 | data : ndarray 240 | an array with experiment data 241 | config : Object 242 | configuration object providing access to configuration parameters 243 | data_dir : str 244 | a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured 245 | 246 | Returns 247 | ------- 248 | cuboid : ndarray 249 | data array with removed aliens 250 | """ 251 | data_dir = data_dir.replace(os.sep, '/') 252 | size_threshold = config.get('AA1_size_threshold', 0.01) 253 | asym_threshold = config.get('AA1_asym_threshold', 1.75) 254 | min_pts = config.get('AA1_min_pts', 5) 255 | eps = config.get('AA1_eps', 1.1) 256 | threshold = config.get('AA1_amp_threshold', 6) 257 | if 'AA1_save_arrs' in config: 258 | save_arrs = config['AA1_save_arrs'] 259 | if save_arrs: 260 | save_dir = ut.join(data_dir, 'alien_analysis') 261 | if not os.path.exists(save_dir): 262 | os.makedirs(save_dir) 263 | else: 264 | save_arrs = False 265 | 266 | expandcleanedsig = config.get('AA1_expandcleanedsigma', 0.0) 267 | 268 | cuboid = crop_center(data) 269 | cuboid = np.where(cuboid >= threshold, cuboid, 0) 270 | if (save_arrs): 271 | save_arr(cuboid, save_dir, "db%3.2f_cuboid%3.2f.tif" % (threshold, eps)) 272 | save_arr(cuboid[::-1, ::-1, ::-1], save_dir, "db%3.2f_cuboidrev%3.2f.tif" % (threshold, eps)) 273 | 274 | # the non_zero is a tuple of arrays, each array containing indices of elements that are non-zero along one axis. 275 | non_zero = cuboid.nonzero() 276 | 277 | # https://scikit-learn.org/stable/modules/clustering.html#dbscan 278 | # labels is same size as input arr with a cluster label per point 279 | iter = 0 280 | nclusters = 0 281 | while (True): 282 | non_zero = cuboid.nonzero() 283 | # print("running db", iter) 284 | labels = DBSCAN(eps=eps, metric='euclidean', min_samples=min_pts, n_jobs=-1).fit_predict( 285 | np.array(non_zero).transpose().astype(np.float32)) 286 | # print("running analyze_clusters", iter) 287 | arrs = analyze_clusters(cuboid, labels, non_zero) 288 | if (save_arrs): 289 | save_arrays(arrs, iter, threshold, eps, save_dir) 290 | if nclusters == arrs[0]: 291 | break 292 | nclusters = arrs[0] 293 | if iter == 0: # keep values for all iterations 294 | rel_cluster_size = arrs[2] 295 | cluster_avg_asym = arrs[7] 296 | # print("cleaning cuboid", iter) 297 | cuboid = np.where(np.logical_and(rel_cluster_size < size_threshold, cluster_avg_asym > asym_threshold), 0.0, 298 | cuboid) 299 | # print("iter", iter, nclusters) 300 | iter += 1 301 | 302 | if (expandcleanedsig > 0): 303 | cuboid = np.where(cuboid > 0, 1.0, 0.0) 304 | sig = [expandcleanedsig, expandcleanedsig, 1.0] 305 | cuboid = gaussian_filter(cuboid, sig) 306 | no_thresh_cuboid = crop_center(data) 307 | cuboid = np.where(cuboid > 0.1, no_thresh_cuboid, 0.0) 308 | return cuboid 309 | 310 | 311 | def remove_blocks(data, config_map): 312 | """ 313 | Sets to zero given alien blocks in the data array. 314 | 315 | Parameters 316 | ---------- 317 | data : ndarray 318 | an array with experiment data 319 | config : Object 320 | configuration object providing access to configuration parameters 321 | 322 | Returns 323 | ------- 324 | data : ndarray 325 | data array with zeroed out aliens 326 | """ 327 | import ast 328 | 329 | if 'aliens' in config_map: 330 | aliens = ast.literal_eval(config_map['aliens']) 331 | for alien in aliens: 332 | # The ImageJ swaps the x and y axis, so the aliens coordinates needs to be swapped, since ImageJ is used 333 | # to find aliens 334 | data[alien[0]:alien[3], alien[1]:alien[4], alien[2]:alien[5]] = 0 335 | return data 336 | 337 | 338 | def filter_aliens(data, config_map): 339 | """ 340 | Sets to zero points in the data array defined by a file. 341 | 342 | Parameters 343 | ---------- 344 | data : ndarray 345 | an array with experiment data 346 | config : Object 347 | configuration object providing access to configuration parameters 348 | 349 | Returns 350 | ------- 351 | data : ndarray 352 | data array with zeroed out aliens 353 | """ 354 | if 'alien_file' in config_map: 355 | alien_file = config_map['alien_file'] 356 | if os.path.isfile(alien_file): 357 | mask = np.load(alien_file) 358 | for i in range(len(mask.shape)): 359 | if mask.shape[i] != data.shape[i]: 360 | print('exiting, mask must be of the same shape as data:', data.shape) 361 | return 362 | data = np.where((mask == 1), data, 0.0) 363 | else: 364 | print(f'alien file does not exist {alien_file}') 365 | else: 366 | print('alien_file parameter not configured') 367 | return data 368 | 369 | 370 | def remove_aliens(data, config_map, data_dir=None): 371 | """ 372 | Finds which algorithm is cofigured to remove the aliens and applies it to clean the data. 373 | 374 | Parameters 375 | ---------- 376 | data : ndarray 377 | an array with experiment data 378 | config : Object 379 | configuration object providing access to configuration parameters 380 | data_dir : str 381 | a directory where 'alien_analysis' subdirectory will be created to save results of analysis if configured 382 | Returns 383 | ------- 384 | data : ndarray 385 | data array without aliens 386 | """ 387 | 388 | if 'alien_alg' in config_map: 389 | algorithm = config_map['alien_alg'] 390 | if algorithm == 'block_aliens': 391 | data = remove_blocks(data, config_map) 392 | elif algorithm == 'alien_file': 393 | data = filter_aliens(data, config_map) 394 | elif algorithm == 'AutoAlien1': 395 | data = auto_alien1(data, config_map, data_dir) 396 | elif algorithm != 'none': 397 | print('not supported alien removal algorithm', algorithm) 398 | else: 399 | print('alien_alg not configured') 400 | 401 | return data 402 | 403 | ## https://stackoverflow.com/questions/51503672/decorator-for-timeit-timeit-method/51503837#51503837 404 | # from functools import wraps 405 | # from time import time 406 | # 407 | # def measure(func): 408 | # @wraps(func) 409 | # def _time_it(*args, **kwargs): 410 | # start = int(round(time() * 1000)) 411 | # try: 412 | # return func(*args, **kwargs) 413 | # finally: 414 | # end_ = int(round(time() * 1000)) - start 415 | # print(f"Total execution time: {end_ if end_ > 0 else 0} ms") 416 | # 417 | 418 | -------------------------------------------------------------------------------- /src/cohere_core/utilities/utils.py: -------------------------------------------------------------------------------- 1 | # ######################################################################### 2 | # Copyright (c) , UChicago Argonne, LLC. All rights reserved. # 3 | # # 4 | # See LICENSE file. # 5 | # ######################################################################### 6 | 7 | """ 8 | cohere_core.utils 9 | ================= 10 | 11 | This module is a suite of utility functions. 12 | """ 13 | 14 | import tifffile as tf 15 | import numpy as np 16 | import os 17 | import logging 18 | import stat 19 | import scipy.ndimage as ndi 20 | import ast 21 | import importlib 22 | 23 | 24 | __author__ = "Barbara Frosik" 25 | __copyright__ = "Copyright (c), UChicago Argonne, LLC." 26 | __docformat__ = 'restructuredtext en' 27 | __all__ = [ 28 | 'adjust_dimensions', 29 | 'binning', 30 | 'center_max', 31 | 'crop_center', 32 | 'get_central_object_extent', 33 | 'get_good_dim', 34 | 'get_lib', 35 | 'get_logger', 36 | 'get_oversample_ratio', 37 | 'join', 38 | 'normalize', 39 | 'pad_center', 40 | 'read_config', 41 | 'read_tif', 42 | 'resample', 43 | 'select_central_object', 44 | 'save_tif', 45 | 'save_metrics', 46 | 'threshold_by_edge', 47 | 'write_config', 48 | 'write_plot_errors', 49 | ] 50 | 51 | 52 | def adjust_dimensions(arr, pads, next_fast_len=True, pkg='np'): 53 | """ 54 | This function adds to or subtracts from each dimension of the array elements defined by pad. If the pad is positive, the array is padded in this dimension. If the pad is negative, the array is cropped. 55 | 56 | :param arr: ndarray, the array to pad/crop 57 | :param pad: list of pad values, a tuple of two int for each dimension. The values in each tuple will be added/subtracted to the sides of array in corresponding dimension. 58 | :param next_fast_len: bool, whether or not to find the next fast length for each dimension 59 | :param pkg: package acronym: 'cp' for cupy, 'torch' for torch, 'np' for numpy 60 | :return: the padded/cropped and adjusted to opencl compatible format array 61 | """ 62 | # up the dimensions to 3D 63 | for _ in range(len(arr.shape), 3): 64 | arr = np.expand_dims(arr,axis=0) 65 | pads = [(0,0)] + pads 66 | 67 | old_dims = arr.shape 68 | start = [max(0, -pad[0]) for pad in pads] 69 | stop = [arr.shape[i] - max(0, -pads[i][1]) for i in range(3)] 70 | cropped = arr[start[0]:stop[0], start[1]:stop[1], start[2]:stop[2]] #for 1D and 2D it was upped to 3D 71 | 72 | dims = cropped.shape 73 | c_vals = [] 74 | new_pad = [] 75 | for i in range(len(dims)): 76 | pad = pads[i] 77 | # find a good dimension and find padding 78 | temp_dim = old_dims[i] + pad[0] + pad[1] 79 | if next_fast_len and temp_dim > 1: 80 | new_dim = get_good_dim(temp_dim, pkg) 81 | else: 82 | new_dim = temp_dim 83 | added = new_dim - temp_dim 84 | # if the pad is positive 85 | pad_front = max(0, pad[0]) + int(added / 2) 86 | pad_end = new_dim - dims[i] - pad_front 87 | new_pad.append((pad_front, pad_end)) 88 | c_vals.append((0.0, 0.0)) 89 | adjusted = np.pad(cropped, new_pad, 'constant', constant_values=c_vals) 90 | 91 | return np.squeeze(adjusted) 92 | 93 | 94 | def binning(array, binsizes): 95 | """ 96 | This function does the binning of the array. The array is binned in each dimension by the corresponding binsizes elements. 97 | If binsizes list is shorter than the array dimensions, the remaining dimensions are not binned. 98 | 99 | :param array: ndarray the original array to be binned 100 | :param binsizes: a list defining binning factors for corresponding dimensions 101 | :return: binned array 102 | """ 103 | data_dims = array.shape 104 | # trim array 105 | for ax in range(len(binsizes)): 106 | cut_slices = range(data_dims[ax] - data_dims[ax] % binsizes[ax], data_dims[ax]) 107 | array = np.delete(array, cut_slices, ax) 108 | 109 | binned_array = array 110 | new_shape = list(array.shape) 111 | 112 | for ax in range(len(binsizes)): 113 | if binsizes[ax] > 1: 114 | new_shape[ax] = binsizes[ax] 115 | new_shape.insert(ax, int(array.shape[ax] / binsizes[ax])) 116 | binned_array = np.reshape(binned_array, tuple(new_shape)) 117 | binned_array = np.sum(binned_array, axis=ax + 1) 118 | new_shape = list(binned_array.shape) 119 | return binned_array 120 | 121 | 122 | def center_max(arr): 123 | """ 124 | Finds maximum value in the array, and shifts in each dimension to put the max in a center. 125 | 126 | :param arr: ndarray, array to be centered 127 | :return: centered array 128 | """ 129 | shift = (np.array(arr.shape)/2) - np.unravel_index(np.argmax(arr), arr.shape) 130 | return np.roll(arr, shift.astype(int), tuple(range(arr.ndim))), shift.astype(int) 131 | 132 | 133 | def crop_center(arr, new_shape): 134 | """ 135 | This function crops the array to the new size, keeping the center of the array. 136 | The new_size must be smaller or equal to the original size in each dimension. 137 | 138 | :param arr: ndarray, the array to crop 139 | :param new_shape: tuple, new size 140 | :return: cropped array 141 | """ 142 | shape = arr.shape 143 | principio = [] 144 | finem = [] 145 | for i in range(3): 146 | principio.append(int((shape[i] - new_shape[i]) / 2)) 147 | finem.append(principio[i] + new_shape[i]) 148 | if len(shape) == 1: 149 | cropped = arr[principio[0]: finem[0]] 150 | elif len(shape) == 2: 151 | cropped = arr[principio[0]: finem[0], principio[1]: finem[1]] 152 | elif len(shape) == 3: 153 | cropped = arr[principio[0]: finem[0], principio[1]: finem[1], principio[2]: finem[2]] 154 | else: 155 | raise NotImplementedError 156 | return cropped 157 | 158 | 159 | def get_central_object_extent(fp: np.ndarray) -> list: 160 | """ 161 | Calculates extent of central cluster. 162 | 163 | :param fp: ndarray tofind extend for 164 | :return: list, an extend 165 | """ 166 | fp_cut = threshold_by_edge(np.abs(fp)) 167 | need = select_central_object(fp_cut) 168 | 169 | # get extend of cluster 170 | extent = [np.max(s) + 1 - np.min(s) for s in np.nonzero(need)] 171 | return extent 172 | 173 | 174 | def get_good_dim(dim, pkg): 175 | """ 176 | Returns the even dimension that the given package found to be good for fast Fourier transform and not smaller than given dimension. . 177 | 178 | :param dim: int, initial dimension 179 | :param pck: python package that will be used for reconstruction: 'np' for numpy, 'cp' for cupy, 'torch' for torch. 180 | :return: a new dimension 181 | """ 182 | devlib = get_lib(pkg) 183 | new_dim = devlib.next_fast_len(dim) 184 | while new_dim % 2 == 1: 185 | new_dim += 1 186 | new_dim = devlib.next_fast_len(new_dim) 187 | return new_dim 188 | 189 | 190 | def get_lib(pkg): 191 | """ 192 | Dynamically imports library module specified in input. 193 | 194 | :param pkg: package acronym 195 | :return: object, library module 196 | """ 197 | if pkg == 'cp': 198 | devlib = importlib.import_module('cohere_core.lib.cplib').cplib 199 | elif pkg == 'np': 200 | devlib = importlib.import_module('cohere_core.lib.nplib').nplib 201 | elif pkg == 'torch': 202 | devlib = importlib.import_module('cohere_core.lib.torchlib').torchlib 203 | else: 204 | devlib = None 205 | 206 | return devlib 207 | 208 | 209 | def get_logger(name, ldir=''): 210 | """ 211 | Creates looger instance that will write to default.log file in a given directory. 212 | 213 | :param name: str, logger name 214 | :param ldir: str, directory where to create log file 215 | :return: logger object from logging module 216 | """ 217 | logger = logging.getLogger(name) 218 | logger.setLevel(logging.DEBUG) 219 | log_file = join(ldir, 'default.log') 220 | fh = logging.FileHandler(log_file) 221 | fh.setLevel(logging.DEBUG) 222 | formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') 223 | fh.setFormatter(formatter) 224 | logger.addHandler(fh) 225 | return logger 226 | 227 | 228 | def get_oversample_ratio(fp: np.ndarray) -> np.ndarray: 229 | """ 230 | Author: Yudong Yao 231 | 232 | Calculates oversampling ratio. 233 | 234 | :param fp: ndarray to calculate oversampling ratio 235 | :return: oversample ratio in each dimension 236 | """ 237 | # autocorrelation 238 | acp = np.fft.fftshift(np.fft.ifftn(np.abs(fp)**2.)) 239 | aacp = np.abs(acp) 240 | 241 | # get extent 242 | blob = get_central_object_extent(aacp) 243 | 244 | # correct for underestimation due to thresholding 245 | correction = [0.025, 0.025, 0.0729][:fp.ndim] 246 | 247 | extent = [ 248 | min(m, s + int(round(f * aacp.shape[i], 1))) 249 | for i, (s, f, m) in enumerate(zip(blob, correction, aacp.shape)) 250 | ] 251 | 252 | # oversample ratio 253 | oversample = [ 254 | 2. * s / (e + (1 - s % 2)) for s, e in zip(aacp.shape, extent) 255 | ] 256 | return np.round(oversample, 3) 257 | 258 | 259 | def join(*args): 260 | """ 261 | Operation on path. Joins arguments in Path string and replaces OS separators with Linux type separators. 262 | 263 | :param args: variable number of arguments 264 | :return: path 265 | """ 266 | return os.path.join(*args).replace(os.sep, '/') 267 | 268 | 269 | def normalize(vec): 270 | """ 271 | Normalizes vector. 272 | 273 | :param vec: vector 274 | :return: normalized vector 275 | """ 276 | return vec / np.linalg.norm(vec) 277 | 278 | 279 | def pad_center(arr, new_shape): 280 | """ 281 | This function pads the array with zeros to the new shape with the array in the center. 282 | 283 | :param arr: ndarray, the original array to be padded 284 | :param new_shape: tuple, new dimensions 285 | :return: the zero padded centered array 286 | """ 287 | shape = arr.shape 288 | centered = np.zeros(new_shape, arr.dtype) 289 | if len(shape) == 1: 290 | centered[: shape[0]] = arr 291 | elif len(shape) == 2: 292 | centered[: shape[0], : shape[1]] = arr 293 | elif len(shape) == 3: 294 | centered[: shape[0], : shape[1], : shape[2]] = arr 295 | 296 | for i in range(len(new_shape)): 297 | centered = np.roll(centered, (new_shape[i] - shape[i] + 1) // 2, i) 298 | 299 | return centered 300 | 301 | 302 | def read_config(config): 303 | """ 304 | Checks if the file exists and parses it into a dictionary. 305 | 306 | :param config: str, configuration file name 307 | :return: dictionary containing parsed configuration, None if the given file does not exist 308 | """ 309 | config = config.replace(os.sep, '/') 310 | if not os.path.isfile(config): 311 | print(config, 'is not a file') 312 | return None 313 | 314 | param_dict = {} 315 | input = open(config, 'r') 316 | line = input.readline() 317 | while line: 318 | # Ignore comment lines and move along 319 | line = line.strip() 320 | if line.startswith('/') or line.startswith('#'): 321 | line = input.readline() 322 | continue 323 | elif "=" in line: 324 | param, value = line.split('=') 325 | # do not replace in strings 326 | value = value.strip() 327 | if value.startswith('('): 328 | value = value.strip().replace('(','[').replace(')',']') 329 | try: 330 | param_dict[param.strip()] = ast.literal_eval(value) 331 | except: 332 | print(f'{param}: {value}\n string value should be surrounded by "" ') 333 | raise 334 | 335 | line = input.readline() 336 | input.close() 337 | return param_dict 338 | 339 | 340 | def read_tif(filename): 341 | """ 342 | This method reads tif type file and returns the data as ndarray. 343 | 344 | :param filename: file name 345 | :return: ndarray with the tif file data 346 | """ 347 | return tf.imread(filename.replace(os.sep, '/')).transpose() 348 | 349 | 350 | def resample(IN, dim): 351 | """ 352 | Author: Yudong Yao 353 | 354 | Resamples to new dimensions. 355 | 356 | :param IN: ndarray 357 | :param dim: new dim 358 | :return: resampled array 359 | """ 360 | ft = np.fft.fftshift(np.fft.fftn(IN)) / np.prod(IN.shape) 361 | 362 | pad_value = np.array(dim) // 2 - np.array(ft.shape) // 2 363 | pad = [[pad_value[0], pad_value[0]], [pad_value[1], pad_value[1]], 364 | [pad_value[2], pad_value[2]]] 365 | ft_resize = adjust_dimensions(ft, pad) 366 | output = np.fft.ifftn(np.fft.ifftshift(ft_resize)) * np.prod(dim) 367 | return output 368 | 369 | 370 | def select_central_object(fp: np.ndarray) -> np.ndarray: 371 | """ 372 | Author: Yudong Yao 373 | 374 | Returns array with central object from input array. 375 | 376 | :param fp: array 377 | :return: central object array 378 | """ 379 | # import scipy.ndimage as ndimage 380 | zero = 1e-6 381 | binary = np.abs(fp) 382 | binary[binary > zero] = 1 383 | binary[binary <= zero] = 0 384 | 385 | # cluster by connectivity 386 | struct = ndi.morphology.generate_binary_structure(fp.ndim, 1).astype("uint8") 387 | label, nlabel = ndi.label(binary, structure=struct) 388 | 389 | # select largest cluster 390 | select = np.argmax(np.bincount(np.ravel(label))[1:]) + 1 391 | 392 | binary[label != select] = 0 393 | 394 | fp[binary == 0] = 0 395 | return fp 396 | 397 | 398 | def save_tif(arr, filename): 399 | """ 400 | Saves array in tif format file. 401 | 402 | :param arr: ndarray, array to save 403 | :param filename: file name 404 | """ 405 | tf.imwrite(filename.replace(os.sep, '/'), np.abs(arr).transpose().astype(np.float32)) 406 | 407 | 408 | def save_metrics(errs, dir, metrics=None): 409 | """ 410 | Saves arrays metrics and errors by iterations in text file. 411 | 412 | :param errs: list of "chi" error by iteration 413 | :param dir: directory to write the file containing array metrics 414 | :param metrics: dictionary with metric type keys, and metric values 415 | """ 416 | metric_file = join(dir, 'summary') 417 | linesep = os.linesep 418 | with open(metric_file, 'w+') as mf: 419 | if metrics is not None: 420 | mf.write(f'metric result{linesep}') 421 | for key in metrics: 422 | value = metrics[key] 423 | mf.write(f'{key} = {str(value)}{linesep}') 424 | mf.write(f'{linesep}errors by iteration{linesep}') 425 | for er in errs: 426 | mf.write(f'str({er}) ') 427 | mf.close() 428 | 429 | 430 | def threshold_by_edge(fp: np.ndarray) -> np.ndarray: 431 | """ 432 | Author: Yudong Yao 433 | 434 | :param fp: 435 | :return: 436 | """ 437 | # threshold by left edge value 438 | mask = np.ones_like(fp, dtype=bool) 439 | mask[tuple([slice(1, None)] * fp.ndim)] = 0 440 | zero = 1e-6 441 | cut = np.max(fp[mask]) 442 | binary = np.zeros_like(fp) 443 | binary[(np.abs(fp) > zero) & (fp > cut)] = 1 444 | return binary 445 | 446 | 447 | def write_config(param_dict, config): 448 | """ 449 | Writes configuration to a file. 450 | 451 | :param param_dict: dictionary containing configuration parameters 452 | :param config: configuration file name the parameters will be written into 453 | """ 454 | with open(config.replace(os.sep, '/'), 'w+') as cf: 455 | cf.truncate(0) 456 | linesep = os.linesep 457 | for key, value in param_dict.items(): 458 | if type(value) == str: 459 | value = f'"{value}"' 460 | cf.write(f'{key} = {str(value)}{linesep}') 461 | 462 | 463 | def write_plot_errors(save_dir): 464 | """ 465 | Creates python executable that draw plot of error by iteration. It assumes that the given directory 466 | contains "errors.npy" file 467 | 468 | :param save_dir: directory containing errors.npy file 469 | """ 470 | plot_file = join(save_dir, 'plot_errors.py') 471 | f = open(plot_file, 'w+') 472 | f.write("#! /usr/bin/env python\n") 473 | f.write("import matplotlib.pyplot as plt\n") 474 | f.write("import numpy as np\n") 475 | f.write("import sys\n") 476 | f.write("import os\n") 477 | f.write("current_dir = sys.path[0]\n") 478 | f.write("errs = np.load(current_dir + '/errors.npy').tolist()\n") 479 | f.write("errs.pop(0)\n") 480 | f.write("plt.plot(errs)\n") 481 | f.write("plt.ylabel('errors')\n") 482 | f.write("plt.show()") 483 | f.close() 484 | st = os.stat(plot_file) 485 | os.chmod(plot_file, st.st_mode | stat.S_IEXEC) 486 | 487 | 488 | # https://stackoverflow.com/questions/51503672/decorator-for-timeit-timeit-method/51503837#51503837 489 | from functools import wraps 490 | from time import time 491 | 492 | def measure(func): 493 | @wraps(func) 494 | def _time_it(*args, **kwargs): 495 | start = int(round(time() * 1000)) 496 | try: 497 | return func(*args, **kwargs) 498 | finally: 499 | end_ = int(round(time() * 1000)) - start 500 | print("Total execution time: {end_ if end_ > 0 else 0} ms") 501 | 502 | return _time_it 503 | --------------------------------------------------------------------------------