├── .github └── workflows │ ├── deploy.yml │ ├── test.yml │ ├── test_cellpose.yml │ └── test_stardist.yml ├── .gitignore ├── .travis.yml ├── CITATION.cff ├── README.md ├── active_plugins ├── addnoise.py ├── calculatemoments.py ├── callbarcodes.py ├── compensatecolors.py ├── cpij │ ├── __init__.py │ ├── bridge.py │ └── server.py ├── declumpobjects.py ├── distancetransform.py ├── enforceobjectsonetoone.py ├── enhancedmeasuretexture.py ├── filterobjects_stringmatch.py ├── histogramequalization.py ├── histogrammatching.py ├── pixelshuffle.py ├── runcellpose.py ├── runilastik.py ├── runimagejscript.py ├── runomnipose.py ├── runstardist.py ├── runvista2d.py └── variancetransform.py ├── dockerfiles └── RunCellpose │ ├── Dockerfile │ └── download_cellpose_models.py ├── documentation └── CP-plugins-documentation │ ├── _config.yml │ ├── _toc.yml │ ├── citing.md │ ├── contributing_plugins.md │ ├── images │ ├── Install_environment_instructions │ │ ├── 2022-05-26T15-39-07.png │ │ └── file-import-solid.svg │ ├── Install_environment_instructions_windows │ │ ├── 2022-06-02T20-56-38.png │ │ ├── 2022-06-02T21-00-53.png │ │ ├── 2022-06-02T21-05-30.png │ │ ├── 2022-06-02T21-11-49.png │ │ ├── 2022-06-02T21-16-31.png │ │ ├── 2022-06-02T21-17-09.png │ │ ├── 2022-06-02T21-30-10.png │ │ ├── 2022-06-02T21-39-05.png │ │ ├── 2022-06-02T21-43-56.png │ │ ├── Control_panel.png │ │ ├── Control_panel_ev.png │ │ ├── Java-download.png │ │ ├── build-tools-download.png │ │ └── windows-file-explorer.png │ └── citation.png │ ├── overview.md │ ├── runcellpose.md │ ├── supported_plugins.md │ ├── troubleshooting.md │ ├── unsupported_plugins.md │ ├── using_plugins.md │ └── versions.md ├── setup.py ├── tests ├── __init__.py ├── conftest.py ├── headless_test │ ├── 4.2.5_plugins_test_pipeline_BASIC.cppipe │ ├── 4.2.5_plugins_test_pipeline_CELLPOSE.cppipe │ ├── 4.2.5_plugins_test_pipeline_STARDIST.cppipe │ ├── test_pipeline_img │ │ └── skimage-mitosis-img.tiff │ └── test_run.sh ├── resources │ ├── callbarcodes_Barcodes.csv │ └── runimagejscript_dummyscript.py ├── test_histogramequalization.py ├── test_runimagejscript.py └── test_runvista2d.py ├── unmaintained_plugins ├── CellProfiler2 │ ├── calculatehistogram.py │ ├── calculatemoments.py │ ├── enhancedmeasuretexture.py │ ├── identifylinearobjects.py │ └── transform.py ├── CellProfiler3 │ ├── DoGNet.py │ ├── DoGNetWeights │ │ └── Simple_Anisotropic_4_11_2_prism17.t7 │ ├── activecontourmodel.py │ ├── blobdetection.py │ ├── calculatehistogram.py │ ├── calculatemoments.py │ ├── callbarcodes.py │ ├── cellstar │ │ ├── __init__.py │ │ ├── core │ │ │ ├── __init__.py │ │ │ ├── config.py │ │ │ ├── image_repo.py │ │ │ ├── parallel │ │ │ │ ├── __init__.py │ │ │ │ └── snake_grow.py │ │ │ ├── point.py │ │ │ ├── polar_transform.py │ │ │ ├── seed.py │ │ │ ├── seeder.py │ │ │ ├── snake.py │ │ │ └── snake_filter.py │ │ ├── parameter_fitting │ │ │ ├── __init__.py │ │ │ ├── pf_auto_params.py │ │ │ ├── pf_mutator.py │ │ │ ├── pf_process.py │ │ │ ├── pf_rank_process.py │ │ │ ├── pf_rank_snake.py │ │ │ ├── pf_runner.py │ │ │ └── pf_snake.py │ │ ├── segmentation.py │ │ └── utils │ │ │ ├── __init__.py │ │ │ ├── calc_util.py │ │ │ ├── debug_util.py │ │ │ ├── image_util.py │ │ │ ├── index.py │ │ │ └── params_util.py │ ├── classifypixelsunet.py │ ├── compensatecolors.py │ ├── constrainobjects.py │ ├── convertoutlinestoobjects.py │ ├── declump.py │ ├── edgedetection.py │ ├── enhancedmeasuretexture.py │ ├── exporttocellh5.py │ ├── gammacorrection.py │ ├── histogramequalization.py │ ├── identifylinearobjects.py │ ├── identifyyeastcells.py │ ├── imagegradient.py │ ├── laplacianofgaussian.py │ ├── measure_population_context.py │ ├── measureimagefocus.py │ ├── measureradialentropy.py │ ├── measuretrackquality.py │ ├── mergeobjects.py │ ├── nucleaizer.py │ ├── predict.py │ ├── randomwalkeralgorithm.py │ ├── rescale_mean_sd.py │ ├── rescale_mode_percentile.py │ ├── runimagej.py │ ├── save_16bit_pngs.py │ ├── seedobjects.py │ ├── shollanalysis.py │ ├── spectralanalysis.py │ ├── tests │ │ └── conftest.py │ ├── thresh_all.py │ ├── tophattransform.py │ ├── transform.py │ └── transformfilters.py ├── CellProfiler4 │ └── declumpobjects.py └── CellProfiler4_autoconverted │ ├── DoGNet.py │ ├── README.md │ ├── activecontourmodel.py │ ├── blobdetection.py │ ├── calculatehistogram.py │ ├── calculatemoments.py │ ├── classifypixelsunet.py │ ├── constrainobjects.py │ ├── convertoutlinestoobjects.py │ ├── edgedetection.py │ ├── exporttocellh5.py │ ├── gammacorrection.py │ ├── histogramequalization.py │ ├── identifylinearobjects.py │ ├── identifyyeastcells.py │ ├── imagegradient.py │ ├── laplacianofgaussian.py │ ├── measureimagefocus.py │ ├── measurepopulationcontext.py │ ├── measureradialentropy.py │ ├── measuretrackquality.py │ ├── mergeobjects.py │ ├── nucleaizer.py │ ├── predict.py │ ├── randomwalkeralgorithm.py │ ├── rescalemeansd.py │ ├── rescalemodepercentile.py │ ├── runimagej.py │ ├── save16bitpngs.py │ ├── seedobjects.py │ ├── shollanalysis.py │ ├── spectralanalysis.py │ ├── testallthresholds.py │ ├── tophattransform.py │ ├── transformfilters.py │ └── transforms.py └── unmaintained_tests ├── test_blobdetection.py ├── test_constrainobjects.py ├── test_edgedetection.py ├── test_gammacorrection.py ├── test_identifyyeastcells.py ├── test_imagegradient.py ├── test_laplacianofgaussian.py ├── test_measuretrackquality.py ├── test_mergeobjects.py ├── test_randomwalkeralgorithm.py ├── test_seedobjects.py ├── test_shollanalysis.py └── test_tophattransform.py /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: deploy-documentation 2 | 3 | # Only run this when the master branch changes 4 | on: 5 | push: 6 | branches: 7 | - master 8 | # Only run if edits in CP-plugins-documentation 9 | paths: 10 | - documentation/CP-plugins-documentation/** 11 | - .github/workflows/deploy.yml 12 | 13 | # This job installs dependencies, builds the book, and pushes it to `gh-pages` 14 | jobs: 15 | deploy-book: 16 | runs-on: ubuntu-latest 17 | steps: 18 | - uses: actions/checkout@v3 19 | 20 | # Install dependencies 21 | - name: Set up Python 3.8 22 | uses: actions/setup-python@v4 23 | with: 24 | python-version: 3.8 25 | 26 | - name: Install dependencies 27 | run: | 28 | pip install jupyter-book 29 | 30 | # Build the book 31 | - name: Build the book 32 | run: | 33 | jupyter-book build documentation/CP-plugins-documentation/ 34 | 35 | # Push the book's HTML to github-pages 36 | - name: GitHub Pages action 37 | uses: peaceiris/actions-gh-pages@v3.6.1 38 | with: 39 | github_token: ${{ secrets.GITHUB_TOKEN }} 40 | publish_dir: ./documentation/CP-plugins-documentation/_build/html 41 | cname: plugins.cellprofiler.org 42 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Basic plugin testing 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | types: [opened, reopened, synchronize, ready_for_review, review_requested] 9 | 10 | jobs: 11 | build_cellprofiler: 12 | name: Build CellProfiler 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-python@v2 17 | with: 18 | architecture: x64 19 | python-version: ${{ matrix.python-version }} 20 | - name: Get pip cache dir 21 | id: pip-cache-dir 22 | run: | 23 | echo "::set-output name=dir::$(pip cache dir)" 24 | - name: Cache pip 25 | uses: actions/cache@v3 26 | id: cache-pip 27 | with: 28 | path: ${{ steps.pip-cache-dir.outputs.dir }} 29 | key: ${{ runner.os }}-${{ env.pythonLocation }}-${{ hashFiles('**/setup.py') }} 30 | - name: Set up Python 31 | uses: actions/setup-python@v2 32 | with: 33 | python-version: 3.8 34 | 35 | - name: Install CellProfiler dependencies 36 | run: | 37 | sudo apt-get update 38 | sudo apt install -y make gcc build-essential libgtk-3-dev 39 | sudo apt-get install -y python3-pip openjdk-11-jdk-headless default-libmysqlclient-dev libnotify-dev libsdl2-dev 40 | export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64 41 | export PATH=$PATH:/home/ubuntu/.local/bin 42 | 43 | - name: Install CellProfiler 44 | run: | 45 | wget https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04/wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 46 | pip install wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 47 | pip install --upgrade pip setuptools wheel 48 | pip install numpy 49 | pip install git+https://github.com/CellProfiler/centrosome.git@cython3 50 | 51 | - name: Install basic CellProfiler plugins require 52 | run: | 53 | pip install -e . 54 | - name: Run basic plugin pipeline 55 | run: | 56 | python -m cellprofiler -c -r -p ./tests/headless_test/4.2.5_plugins_test_pipeline_BASIC.cppipe -i ./tests/headless_test/test_pipeline_img -o . --plugins-directory=./active_plugins --log-level=DEBUG 2>&1 | tee logfile 57 | sh ./tests/headless_test/test_run.sh ", module compensatecolors" logfile 58 | sh ./tests/headless_test/test_run.sh "module distancetransform" logfile 59 | sh ./tests/headless_test/test_run.sh "module enhancedmeasuretexture" logfile 60 | sh ./tests/headless_test/test_run.sh "module histogramequalization" logfile 61 | sh ./tests/headless_test/test_run.sh "module histogrammatching" logfile 62 | sh ./tests/headless_test/test_run.sh "module pixelshuffle" logfile 63 | 64 | -------------------------------------------------------------------------------- /.github/workflows/test_cellpose.yml: -------------------------------------------------------------------------------- 1 | name: CellProfiler-Cellpose 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | types: [opened, reopened, synchronize, ready_for_review, review_requested] 9 | 10 | jobs: 11 | build_cellprofiler: 12 | name: Test CellProfiler-Cellpose 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-python@v2 17 | with: 18 | architecture: x64 19 | python-version: ${{ matrix.python-version }} 20 | - name: Get pip cache dir 21 | id: pip-cache-dir 22 | run: | 23 | echo "::set-output name=dir::$(pip cache dir)" 24 | - name: Cache pip 25 | uses: actions/cache@v3 26 | id: cache-pip 27 | with: 28 | path: ${{ steps.pip-cache-dir.outputs.dir }} 29 | key: ${{ runner.os }}-${{ env.pythonLocation }}-${{ hashFiles('**/setup.py') }} 30 | - name: Set up Python 31 | uses: actions/setup-python@v2 32 | with: 33 | python-version: 3.8 34 | 35 | - name: Install CellProfiler dependencies 36 | run: | 37 | sudo apt-get update 38 | sudo apt install -y make gcc build-essential libgtk-3-dev 39 | sudo apt-get install -y python3-pip openjdk-11-jdk-headless default-libmysqlclient-dev libnotify-dev libsdl2-dev 40 | export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64 41 | export PATH=$PATH:/home/ubuntu/.local/bin 42 | 43 | - name: Install CellProfiler 44 | run: | 45 | wget https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04/wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 46 | pip install wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 47 | pip install --upgrade pip setuptools wheel 48 | pip install numpy 49 | pip install git+https://github.com/CellProfiler/centrosome.git@cython3 50 | 51 | - name: Run cellpose plugin pipeline 52 | run: | 53 | pip install -e .[cellpose] 54 | python -m cellprofiler -c -r -p ./tests/headless_test/4.2.5_plugins_test_pipeline_CELLPOSE.cppipe -i ./tests/headless_test/test_pipeline_img -o . --plugins-directory=./active_plugins --log-level=DEBUG 2>&1 | tee logfile 55 | sh ./tests/headless_test/test_run.sh "module runcellpose" logfile 56 | 57 | -------------------------------------------------------------------------------- /.github/workflows/test_stardist.yml: -------------------------------------------------------------------------------- 1 | name: CellProfiler-Stardist 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | pull_request: 8 | types: [opened, reopened, synchronize, ready_for_review, review_requested] 9 | 10 | jobs: 11 | build_cellprofiler: 12 | name: Test CellProfiler-Stardist 13 | runs-on: ubuntu-latest 14 | steps: 15 | - uses: actions/checkout@v2 16 | - uses: actions/setup-python@v2 17 | with: 18 | architecture: x64 19 | python-version: ${{ matrix.python-version }} 20 | - name: Get pip cache dir 21 | id: pip-cache-dir 22 | run: | 23 | echo "::set-output name=dir::$(pip cache dir)" 24 | - name: Cache pip 25 | uses: actions/cache@v3 26 | id: cache-pip 27 | with: 28 | path: ${{ steps.pip-cache-dir.outputs.dir }} 29 | key: ${{ runner.os }}-${{ env.pythonLocation }}-${{ hashFiles('**/setup.py') }} 30 | - name: Set up Python 31 | uses: actions/setup-python@v2 32 | with: 33 | python-version: 3.8 34 | 35 | - name: Install CellProfiler dependencies 36 | run: | 37 | sudo apt-get update 38 | sudo apt install -y make gcc build-essential libgtk-3-dev 39 | sudo apt-get install -y python3-pip openjdk-11-jdk-headless default-libmysqlclient-dev libnotify-dev libsdl2-dev 40 | export JAVA_HOME=/usr/lib/jvm/java-11-openjdk-amd64 41 | export PATH=$PATH:/home/ubuntu/.local/bin 42 | 43 | - name: Install CellProfiler 44 | run: | 45 | wget https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04/wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 46 | pip install wxPython-4.1.0-cp38-cp38-linux_x86_64.whl 47 | pip install --upgrade pip setuptools wheel 48 | pip install numpy 49 | pip install git+https://github.com/CellProfiler/centrosome.git@cython3 50 | 51 | - name: Run stardist plugin pipeline 52 | run: | 53 | pip install -e .[stardist] 54 | python -m cellprofiler -c -r -p ./tests/headless_test/4.2.5_plugins_test_pipeline_STARDIST.cppipe -i ./tests/headless_test/test_pipeline_img -o . --plugins-directory=./active_plugins --log-level=DEBUG 2>&1 | tee logfile 55 | sh ./tests/headless_test/test_run.sh "module runstardist" logfile 56 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | 21 | # Installer logs 22 | pip-log.txt 23 | 24 | # Unit test / coverage reports 25 | .coverage 26 | .tox 27 | nosetests.xml 28 | 29 | # Translations 30 | *.mo 31 | 32 | # Mr Developer 33 | .mr.developer.cfg 34 | .project 35 | .pydevproject 36 | .idea 37 | 38 | *.egg-info/ 39 | *.iml 40 | *.pyc 41 | .cache/ 42 | .eggs/ 43 | .idea/ 44 | .python-version 45 | build/ 46 | dist/ 47 | frozen_version.py 48 | plugins/ 49 | src/ 50 | 51 | #mac 52 | .DS_Store 53 | **/.DS_Store 54 | 55 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | addons: 2 | apt: 3 | packages: 4 | - libhdf5-serial-dev 5 | - python-pip 6 | cache: 7 | apt: true 8 | directories: $HOME/.cache/pip 9 | dist: trusty 10 | env: 11 | - LC_ALL="en_US.UTF-8" CP_MYSQL_TEST_HOST="127.0.0.1" CP_MYSQL_TEST_USER="root" CP_MYSQL_TEST_PASSWORD="" 12 | install: 13 | - pip install --upgrade pip 14 | - pip install --upgrade cython 15 | - pip install --upgrade joblib 16 | - pip install --upgrade numpy 17 | - pip install --upgrade scipy 18 | - pip install --editable git+https://github.com/CellProfiler/CellProfiler.git#egg=CellProfiler 19 | - pip freeze 20 | language: python 21 | notifications: 22 | email: false 23 | python: 24 | - 2.7 25 | - 3.6 26 | matrix: 27 | allow_failures: 28 | - python: 3.6 29 | before_script: 30 | - pip install flake8 31 | # stop the build if there are Python syntax errors or undefined names 32 | - flake8 . --count --select=E901,E999,F821,F822,F823 --show-source --statistics 33 | # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide 34 | - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics 35 | script: pytest 36 | sudo: false 37 | -------------------------------------------------------------------------------- /CITATION.cff: -------------------------------------------------------------------------------- 1 | cff-version: 1.2.0 2 | message: "If using CellProfiler Plugins in a pipeline used in a publication, please cite our CellProfiler-Plugins paper. Additionally, some plugins require citation themselves if used. You can determine which plugins require citation by reading the Help information for each plugin, accessible through CellProfiler, or by using the Citation generator built into CellProfiler (since CellProfiler 5)." 3 | type: software 4 | authors: 5 | - name: 'Imaging Platform, Broad Institute of Harvard and MIT' 6 | city: Cambridge 7 | country: US 8 | repository: https://github.com/CellProfiler/CellProfiler-plugins 9 | title: "CellProfiler plugins" 10 | doi: https://doi.org/10.1111/jmi.13223 11 | date-released: 2023 12 | preferred-citation: 13 | type: article 14 | authors: 15 | - family-names: "Weisbart" 16 | given-names: "Erin" 17 | orcid: "https://orcid.org/0000-0002-6437-2458" 18 | - family-names: "Tromans-Coia" 19 | given-names: "Callum" 20 | orcid: "https://orcid.org/0000-0002-5518-8915" 21 | - family-names: "Diaz-Rohrer" 22 | given-names: "Barbara" 23 | orcid: "https://orcid.org/0000-0002-4748-7077" 24 | - family-names: "Stirling" 25 | given-names: "David R." 26 | orcid: "https://orcid.org/0000-0001-6802-4103" 27 | - family-names: "Garcia-Fossa" 28 | given-names: "Fernanda" 29 | orcid: "https://orcid.org/0000-0003-2308-0149" 30 | - family-names: "Senft" 31 | given-names: "Rebecca A." 32 | orcid: "https://orcid.org/0000-0003-0081-4170" 33 | - family-names: "Hiner" 34 | given-names: "Mark C." 35 | orcid: "https://orcid.org/0000-0001-9404-7579" 36 | - family-names: "de Jesus" 37 | given-names: "Marcelo B." 38 | orcid: "https://orcid.org/0000-0003-0812-1491" 39 | - family-names: "Eliceiri" 40 | given-names: "Kevin W." 41 | orcid: "https://orcid.org/0000-0001-8678-670X" 42 | - family-names: "Cimini" 43 | given-names: "Beth A." 44 | orcid: "https://orcid.org/0000-0001-9640-9318" 45 | doi: "https://doi.org/10.1111/jmi.13223" 46 | journal: "Journal of Microscopy" 47 | title: "CellProfiler plugins – An easy image analysis platform integration for containers and Python tools." 48 | year: 2023 49 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CellProfiler-plugins 2 | 3 | A home for community-contributed, experimental, and dependency-heavy CellProfiler modules. 4 | 5 | Plugins advance the capabilities of CellProfiler but are not officially supported in the same way as modules. 6 | A module may be in CellProfiler-plugins instead of CellProfiler itself because: 7 | - it is under active development 8 | - it has a niche audience 9 | - it is not documented to CellProfiler's standards 10 | - it only works with certain version of CellProfiler 11 | - it requires extra libraries or other dependencies we are unable or unwilling to require for CellProfiler 12 | - it has been contributed by a community member 13 | 14 | Please see our [CellProfiler-plugins documentation](https://plugins.cellprofiler.org) for more information about installation, currently supported plugins, and how to contribute. 15 | 16 | ## Troubleshooting 17 | 18 | If CellProfiler won't open after setting the CellProfiler Plugins folder (and it returns `error: no commands supplied` in the terminal), it is likely because you have set the CellProfiler Plugins folder to the parent folder of the plugins repository (`CellProfiler-plugins`), not the folder that contains plugins (`CellProfiler-plugins/active_plugins`). 19 | 20 | In order to get CellProfiler to open, remove `setup.py` from the `CellProfiler-plugins` folder. 21 | Open CellProfiler. 22 | Change the CellProfiler Plugins path to the correct path and close CellProfiler. 23 | Return setup.py to the parent folder. 24 | OR 25 | Alternatively, you can edit the `PluginDirectory` line of your config to the correct path and then reload CellProfiler. 26 | On Mac, you can find the config at `/Users/{username}/Library/Preferences/CellProfilerLocal.cfg` 27 | 28 | For other troubleshooting information, please see the [Troubleshooting](https://plugins.cellprofiler.org/troubleshooting.html) page of our documentation. 29 | -------------------------------------------------------------------------------- /active_plugins/cpij/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | __author__ = "Mark Hiner, Alice Lucas, Beth Cimini" 4 | __all__ = ["bridge", "server"] 5 | -------------------------------------------------------------------------------- /active_plugins/cpij/bridge.py: -------------------------------------------------------------------------------- 1 | from multiprocessing.managers import SyncManager 2 | import multiprocessing as mp 3 | import atexit, cpij.server as ijserver 4 | from queue import Queue 5 | from threading import Lock 6 | 7 | 8 | class QueueManager(SyncManager): 9 | pass 10 | 11 | 12 | QueueManager.register("input_queue") 13 | QueueManager.register("output_queue") 14 | QueueManager.register("get_lock") 15 | 16 | _init_method = None 17 | 18 | 19 | def init_method(): 20 | global _init_method 21 | if not _init_method: 22 | if ijserver.is_server_running(): 23 | l = lock() 24 | l.acquire() 25 | to_imagej().put( 26 | {ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_GET_INIT_METHOD} 27 | ) 28 | _init_method = from_imagej().get()[ijserver.PYIMAGEJ_KEY_OUTPUT] 29 | l.release() 30 | 31 | return _init_method 32 | 33 | 34 | def lock() -> Lock: 35 | """ 36 | Helper method to synchronzie requests with the ImageJ server. 37 | 38 | A lock should be acquired before sending data to the server, and released after 39 | receiving the result. 40 | 41 | Returns 42 | --------- 43 | A Lock connected to the ImageJ server. 44 | """ 45 | return _manager().get_lock() 46 | 47 | 48 | def to_imagej() -> Queue: 49 | """ 50 | Helper method to send data to the ImageJ server 51 | 52 | Returns 53 | --------- 54 | A Queue connected to the ImageJ server. Only its put method should be called. 55 | """ 56 | return _manager().input_queue() 57 | 58 | 59 | def from_imagej() -> Queue: 60 | """ 61 | Helper method to retrieve data from the ImageJ server 62 | 63 | Returns 64 | --------- 65 | A Queue connected to the ImageJ server. Only its get method should be called. 66 | """ 67 | return _manager().output_queue() 68 | 69 | 70 | def init_pyimagej(init_string): 71 | """ 72 | Start the pyimagej daemon thread if it isn't already running. 73 | 74 | Parameters 75 | ---------- 76 | init_string : str, optional 77 | This can be a path to a local ImageJ installation, or an initialization string per imagej.init(), 78 | e.g. sc.fiji:fiji:2.1.0 79 | """ 80 | to_imagej().put( 81 | { 82 | ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_START, 83 | ijserver.PYIMAGEJ_KEY_INPUT: init_string, 84 | } 85 | ) 86 | result = from_imagej().get() 87 | if result == ijserver.PYIMAGEJ_STATUS_STARTUP_FAILED: 88 | _shutdown_imagej() 89 | # Wait for the server to shut down 90 | while ijserver.is_server_running(): 91 | pass 92 | return False 93 | 94 | global _init_method 95 | _init_method = init_string 96 | return True 97 | 98 | 99 | def _manager() -> QueueManager: 100 | """ 101 | Helper method to return a QueueManager connected to the ImageJ server 102 | """ 103 | if not ijserver.is_server_running(): 104 | raise RuntimeError("No ImageJ server instance available") 105 | 106 | manager = QueueManager( 107 | address=("127.0.0.1", ijserver.SERVER_PORT), authkey=ijserver._SERVER_KEY 108 | ) 109 | manager.connect() 110 | return manager 111 | 112 | 113 | def _shutdown_imagej(): 114 | """ 115 | Helper method to send the shutdown signal to ImageJ. Intended to be called 116 | at process exit. 117 | """ 118 | if ijserver.is_server_running(): 119 | to_imagej().put({ijserver.PYIMAGEJ_KEY_COMMAND: ijserver.PYIMAGEJ_CMD_EXIT}) 120 | 121 | 122 | def start_imagej_server(): 123 | """ 124 | If the ImageJ server is not already running, spawns the server in a new 125 | Process. Blocks until the server is up and running. 126 | """ 127 | if ijserver.is_server_running(): 128 | return 129 | 130 | ctx = mp.get_context("spawn") 131 | p = ctx.Process(target=ijserver.main) 132 | p.start() 133 | 134 | # wait for the server to start up 135 | ijserver.wait_for_server_startup() 136 | 137 | # Ensure server shuts down when main app closes 138 | atexit.register(_shutdown_imagej) 139 | -------------------------------------------------------------------------------- /active_plugins/distancetransform.py: -------------------------------------------------------------------------------- 1 | ################################# 2 | # 3 | # Imports from useful Python libraries 4 | # 5 | ################################# 6 | 7 | import logging 8 | import scipy.ndimage 9 | import numpy 10 | 11 | ################################# 12 | # 13 | # Imports from CellProfiler 14 | # 15 | ################################## 16 | 17 | import cellprofiler_core.image 18 | import cellprofiler_core.module 19 | import cellprofiler_core.setting 20 | from cellprofiler_core.setting import Binary 21 | 22 | __doc__ = """\ 23 | DistanceTransform 24 | ================= 25 | 26 | **DistanceTransform** computes the distance transform of a binary image. 27 | The distance of each foreground pixel is computed to the nearest background pixel. 28 | The resulting image is then scaled so that the largest distance is 1. 29 | 30 | | 31 | 32 | ============ ============ =============== 33 | Supports 2D? Supports 3D? Respects masks? 34 | ============ ============ =============== 35 | YES YES YES 36 | ============ ============ =============== 37 | 38 | """ 39 | 40 | 41 | class DistanceTransform(cellprofiler_core.module.ImageProcessing): 42 | module_name = "DistanceTransform" 43 | 44 | variable_revision_number = 1 45 | 46 | def create_settings(self): 47 | super(DistanceTransform, self).create_settings() 48 | 49 | self.rescale_values = Binary( 50 | "Rescale values from 0 to 1?", 51 | True, 52 | doc="""\ 53 | Select "*Yes*" to rescale the transformed values to lie between 0 and 54 | 1. This is the option to use if the distance transformed image is to be 55 | used for thresholding by an **Identify** module or the like, which 56 | assumes a 0-1 scaling. 57 | 58 | Select "*No*" to leave the values in absolute pixel units. This useful 59 | in cases where the actual pixel distances are to be used downstream as 60 | input for a measurement module.""", 61 | ) 62 | 63 | def settings(self): 64 | __settings__ = super(DistanceTransform, self).settings() 65 | __settings__ += [ 66 | self.rescale_values, 67 | ] 68 | return __settings__ 69 | 70 | def visible_settings(self): 71 | """Return the settings as displayed to the user""" 72 | __settings__ = super(DistanceTransform, self).settings() 73 | __settings__ += [self.rescale_values] 74 | return __settings__ 75 | 76 | def run(self, workspace): 77 | x_name = self.x_name.value 78 | 79 | y_name = self.y_name.value 80 | 81 | images = workspace.image_set 82 | 83 | x = images.get_image(x_name) 84 | 85 | dimensions = x.dimensions 86 | 87 | x_data = x.pixel_data 88 | 89 | y_data = scipy.ndimage.distance_transform_edt(x_data, sampling=x.spacing) 90 | 91 | if self.rescale_values.value: 92 | y_data = y_data / numpy.max(y_data) 93 | 94 | y = cellprofiler_core.image.Image( 95 | dimensions=dimensions, image=y_data, parent_image=x 96 | ) 97 | 98 | images.add(y_name, y) 99 | 100 | if self.show_window: 101 | workspace.display_data.x_data = x_data 102 | workspace.display_data.y_data = y_data 103 | workspace.display_data.dimensions = dimensions 104 | 105 | def volumetric(self): 106 | return True 107 | -------------------------------------------------------------------------------- /active_plugins/histogrammatching.py: -------------------------------------------------------------------------------- 1 | ################################# 2 | # 3 | # Imports from useful Python libraries 4 | # 5 | ################################# 6 | import numpy 7 | import skimage.exposure 8 | 9 | ################################# 10 | # 11 | # Imports from CellProfiler 12 | # 13 | ################################## 14 | 15 | import cellprofiler_core.image 16 | import cellprofiler_core.module 17 | import cellprofiler_core.setting 18 | import cellprofiler_core.setting.text 19 | from cellprofiler_core.setting.subscriber import ImageSubscriber 20 | 21 | __doc__ = """\ 22 | HistogramMatching 23 | ================+ 24 | **HistogramMatching** manipulates the pixel intensity values an input image and matches 25 | them to the histogram of a reference image. It can be used as a way to normalize intensities 26 | across different images or different frames of the same image. It allows you to choose 27 | which frame to use as the reference. 28 | 29 | | 30 | 31 | ============ ============ =============== 32 | Supports 2D? Supports 3D? Respects masks? 33 | ============ ============ =============== 34 | YES YES NO 35 | ============ ============ =============== 36 | 37 | References 38 | ^^^^^^^^^^ 39 | (`link `__) 40 | (`link `__) 41 | """ 42 | 43 | 44 | class HistogramMatching(cellprofiler_core.module.ImageProcessing): 45 | module_name = "HistogramMatching" 46 | 47 | variable_revision_number = 1 48 | 49 | def create_settings(self): 50 | super(HistogramMatching, self).create_settings() 51 | 52 | self.reference_image = ImageSubscriber( 53 | "Image to use as reference ", 54 | doc="Select the image you want to use the reference.", 55 | ) 56 | 57 | self.do_3D = cellprofiler_core.setting.Binary( 58 | text="Is your image 3D?", 59 | value=False, 60 | doc=""" 61 | If enabled, 3D specific settings are available.""", 62 | ) 63 | 64 | self.do_self_reference = cellprofiler_core.setting.Binary( 65 | text="Use a frame within image as reference?", 66 | value=False, 67 | doc=""" 68 | If enabled, a frame within the 3D image is used as the reference image.""", 69 | ) 70 | 71 | self.frame_number = cellprofiler_core.setting.text.Integer( 72 | "Frame number", 73 | value=5, 74 | minval=1, 75 | doc="""For 3D images, you have the option of performing histogram matching within the image using one of the frames in the image 76 | """, 77 | ) 78 | 79 | def settings(self): 80 | __settings__ = super(HistogramMatching, self).settings() 81 | 82 | return __settings__ + [ 83 | self.do_3D, 84 | self.do_self_reference, 85 | self.reference_image, 86 | self.frame_number, 87 | ] 88 | 89 | def visible_settings(self): 90 | __settings__ = super(HistogramMatching, self).settings() 91 | 92 | __settings__ += [self.do_3D, self.reference_image] 93 | 94 | if self.do_3D.value: 95 | __settings__ += [self.do_self_reference] 96 | 97 | if self.do_self_reference.value: 98 | __settings__.remove(self.reference_image) 99 | __settings__ += [self.frame_number] 100 | 101 | return __settings__ 102 | 103 | def run(self, workspace): 104 | x_name = self.x_name.value 105 | 106 | y_name = self.y_name.value 107 | 108 | images = workspace.image_set 109 | 110 | x = images.get_image(x_name) 111 | 112 | dimensions = x.dimensions 113 | 114 | x_data = x.pixel_data 115 | 116 | if x.volumetric: 117 | y_data = numpy.zeros_like(x_data, dtype=float) 118 | 119 | if self.do_self_reference.value: 120 | reference_image = x_data[self.frame_number.value] 121 | for index, plane in enumerate(x_data): 122 | y_data[index] = skimage.exposure.match_histograms( 123 | plane, reference_image 124 | ) 125 | else: 126 | reference_image = images.get_image(self.reference_image) 127 | for index, plane in enumerate(x_data): 128 | y_data = skimage.exposure.match_histograms(plane, reference_image) 129 | else: 130 | reference_image = images.get_image(self.reference_image).pixel_data 131 | y_data = skimage.exposure.match_histograms(x_data, reference_image) 132 | 133 | y = cellprofiler_core.image.Image( 134 | dimensions=dimensions, image=y_data, parent_image=x 135 | ) 136 | 137 | images.add(y_name, y) 138 | 139 | if self.show_window: 140 | workspace.display_data.x_data = x_data 141 | 142 | workspace.display_data.y_data = y_data 143 | 144 | workspace.display_data.dimensions = dimensions 145 | -------------------------------------------------------------------------------- /active_plugins/pixelshuffle.py: -------------------------------------------------------------------------------- 1 | ################################# 2 | # 3 | # Imports from useful Python libraries 4 | # 5 | ################################# 6 | 7 | import logging 8 | import scipy.ndimage 9 | import numpy 10 | import random 11 | 12 | ################################# 13 | # 14 | # Imports from CellProfiler 15 | # 16 | ################################## 17 | 18 | import cellprofiler_core.image 19 | import cellprofiler_core.module 20 | import cellprofiler_core.setting 21 | 22 | __doc__ = """\ 23 | PixelShuffle 24 | ============ 25 | 26 | **PixelShuffle** takes the intensity of each pixel in an image and it randomly shuffles its position. 27 | 28 | | 29 | 30 | ============ ============ =============== 31 | Supports 2D? Supports 3D? Respects masks? 32 | ============ ============ =============== 33 | YES NO NO 34 | ============ ============ =============== 35 | 36 | """ 37 | 38 | 39 | class PixelShuffle(cellprofiler_core.module.ImageProcessing): 40 | module_name = "PixelShuffle" 41 | 42 | variable_revision_number = 1 43 | 44 | def settings(self): 45 | __settings__ = super(PixelShuffle, self).settings() 46 | return __settings__ 47 | 48 | def visible_settings(self): 49 | """Return the settings as displayed to the user""" 50 | __settings__ = super(PixelShuffle, self).settings() 51 | return __settings__ 52 | 53 | def run(self, workspace): 54 | x_name = self.x_name.value 55 | 56 | y_name = self.y_name.value 57 | 58 | images = workspace.image_set 59 | 60 | x = images.get_image(x_name) 61 | 62 | dimensions = x.dimensions 63 | 64 | x_data = x.pixel_data 65 | 66 | shape = numpy.array(x_data.shape).astype(int) 67 | 68 | pxs = [] 69 | width, height = shape[:2] 70 | for w in range(width): 71 | for h in range(height): 72 | pxs.append(x_data[w, h]) 73 | idx = list(range(len(pxs))) 74 | random.shuffle(idx) 75 | seq = [] 76 | for i in idx: 77 | seq.append(pxs[i]) 78 | out = numpy.asarray(seq) 79 | out = out.reshape(width, height) 80 | 81 | y_data = out 82 | 83 | y = cellprofiler_core.image.Image( 84 | dimensions=dimensions, image=y_data, parent_image=x 85 | ) 86 | 87 | images.add(y_name, y) 88 | 89 | if self.show_window: 90 | workspace.display_data.x_data = x_data 91 | workspace.display_data.y_data = y_data 92 | workspace.display_data.dimensions = dimensions 93 | -------------------------------------------------------------------------------- /dockerfiles/RunCellpose/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM pytorch/pytorch:1.13.0-cuda11.6-cudnn8-runtime 2 | 3 | RUN pip install numpy==1.26.4 cellpose==2.3.2 4 | 5 | # Include if you wish the image to contain Cellpose pretrained models 6 | COPY download_cellpose_models.py / 7 | RUN python /download_cellpose_models.py 8 | -------------------------------------------------------------------------------- /dockerfiles/RunCellpose/download_cellpose_models.py: -------------------------------------------------------------------------------- 1 | import cellpose 2 | from cellpose.models import MODEL_NAMES 3 | 4 | for model in MODEL_NAMES: 5 | for model_index in range(4): 6 | model_name = cellpose.models.model_path(model, model_index) 7 | if model in ("cyto", "nuclei", "cyto2"): 8 | size_model_name = cellpose.models.size_model_path(model) -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/_config.yml: -------------------------------------------------------------------------------- 1 | # Book settings 2 | 3 | # Learn more at https://jupyterbook.org/customize/config.html 4 | title: CP Plugins Documentation 5 | author: Broad Institute 6 | copyright: "2023" 7 | #logo: img/logo.svg 8 | 9 | # Only build files that are in the ToC 10 | only_build_toc_files: true 11 | 12 | # Force re-execution of notebooks on each build. 13 | # See https://jupyterbook.org/content/execute.html 14 | execute: 15 | execute_notebooks: force 16 | 17 | # Information about where the book exists on the web 18 | repository: 19 | url: https://github.com/cellprofiler/cellprofiler-plugins 20 | branch: master # Which branch of the repository should be used when creating links (optional) 21 | path_to_book: documentation/CP-plugins-documentation 22 | 23 | html: 24 | baseurl: cellprofiler.github.io 25 | use_repository_button: true 26 | use_issues_button: true 27 | use_edit_page_button: true 28 | comments: 29 | hypothesis: true 30 | 31 | parse: 32 | myst_enable_extensions: 33 | # Only required if you use html 34 | - html_image 35 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/_toc.yml: -------------------------------------------------------------------------------- 1 | # Table of contents 2 | 3 | format: jb-book 4 | root: overview 5 | parts: 6 | - caption: Overview 7 | chapters: 8 | - file: using_plugins 9 | - file: supported_plugins 10 | - file: unsupported_plugins 11 | - file: contributing_plugins 12 | - file: troubleshooting 13 | - file: citing 14 | - file: versions 15 | 16 | - caption: Extra information about specific plugins 17 | chapters: 18 | - file: runcellpose 19 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/citing.md: -------------------------------------------------------------------------------- 1 | # Citing CellProfiler Plugins 2 | 3 | If using CellProfiler Plugins in a pipeline used in a publication, please cite our CellProfiler Plugins paper: 4 | 5 | Weisbart, E., Tromans-Coia, C., Diaz-Rohrer, B., Stirling, D. R., Garcia-Fossa, F., Senft, R. A., Hiner, M. C., de Jesus, M. B., Eliceiri, K. W., & Cimini, B. A. (2023). CellProfiler plugins - an easy image analysis platform integration for containers and Python tools. Journal of Microscopy. https://doi.org/10.1111/jmi.13223 6 | 7 | Additionally, some plugins require citation themselves if used. 8 | You can determine which plugins require citation by reading the Help information for each plugin, accessible through CellProfiler, or by using the Citation generator built into CellProfiler (since CellProfiler 5). 9 | 10 | ![alt text](images/citation.png) 11 | Figure 1: How to use the citation tool. To use CellProfiler's citation generator, load your pipeline into CellProfiler (A). Navigate to File => Export => Citation (B). If any module in your pipeline is currently disabled, a citation for that module will still be generated but it will include a `disabled` tag. 12 | 13 | Please note that many CellProfiler modules (including plugins) provide references that offer further information/background about the processing happening within the module. 14 | You may wish to read the references and cite any that upon which your analyses are particularly dependent. 15 | However, please do note that `References` and `Citations` are separate and citing module references is not required. 16 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/contributing_plugins.md: -------------------------------------------------------------------------------- 1 | # Contributing New Plugins 2 | 3 | Within our CellProfiler wiki, you can find an [orientation to CellProfiler code](https://github.com/CellProfiler/CellProfiler/wiki/Orientation-to-CellProfiler-code). 4 | 5 | In the CellProfiler repository, within [cellprofiler/modules/plugins](https://github.com/CellProfiler/CellProfiler/tree/master/cellprofiler/modules/plugins) you can find two different templates to use for creating your own plugin. 6 | `imagetemplate.py` provides a template that takes one image as an input and produces a second image for downstream processing. 7 | `measurementtemplate.py` provides a template that measures a property of an image both for the image as a whole and for every object in the image. 8 | 9 | In your plugin, we appreciate if you also include: 10 | - display functionality 11 | - extensive module documentation 12 | - references and citation information in your module documentation 13 | 14 | Please create a Pull Request to the CellProfiler-plugins repository to submit your plugin for inclusion in the repository. 15 | 16 | In your PR, you must: 17 | - add your plugin to the [supported_plugins](supported_plugins.md) documentation page 18 | 19 | In your PR, we appreciate if you also include: 20 | - unit tests for your plugin 21 | - customized installation in setup.py, if your plugin has dependencies 22 | 23 | ## Contributing bug fixes or updating deprecated plugins 24 | 25 | Please create a Pull Request to the CellProfiler-plugins repository to submit a bug fix or plugin un-deprecation. 26 | If you would like to update a currently deprecated plugin, we encourage you to read closed Issues in GitHub relevant to the plugin as they may contain helpful information about bugs present at the time of deprecation. 27 | 28 | ## Having your plugin cited 29 | 30 | While we cannot guarantee that users will cite your plugin, we have introduced a Citation generator into CellProfiler (currently available in CellProfiler from source and will be in CellProfiler 5) that scans all modules in a user's pipeline and generates a citation file for them that includes citation information for any modules (including plugins) that have specific citation information in them. 31 | 32 | See [Citing Plugins](citing.md) for information on citing CellProfiler Plugins. -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions/2022-05-26T15-39-07.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions/2022-05-26T15-39-07.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions/file-import-solid.svg: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T20-56-38.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T20-56-38.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-00-53.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-00-53.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-05-30.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-05-30.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-11-49.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-11-49.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-16-31.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-16-31.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-17-09.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-17-09.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-30-10.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-30-10.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-39-05.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-39-05.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-43-56.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/2022-06-02T21-43-56.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Control_panel.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Control_panel.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Control_panel_ev.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Control_panel_ev.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Java-download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/Java-download.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/build-tools-download.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/build-tools-download.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/windows-file-explorer.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/Install_environment_instructions_windows/windows-file-explorer.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/images/citation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/documentation/CP-plugins-documentation/images/citation.png -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/overview.md: -------------------------------------------------------------------------------- 1 | # What are CellProfiler Plugins? 2 | 3 | Plugins advance the capabilities of CellProfiler but are not officially supported in the same way as modules. 4 | A module may be in CellProfiler-plugins instead of CellProfiler itself because: 5 | - it is under active development 6 | - it has a niche audience 7 | - it is not documented to CellProfiler's standards 8 | - it only works with certain version of CellProfiler 9 | - it requires extra libraries or other dependencies we are unable or unwilling to require for CellProfiler 10 | - it has been contributed by a community member 11 | 12 | ## How do I use CellProfiler Plugins? 13 | 14 | See our [Using Plugins](using_plugins.md) page for information on how to install and use plugins. 15 | 16 | ## What plugins are available? 17 | 18 | See our [Supported Plugins](supported_plugins.md) page for information on all currently supported plugins. 19 | 20 | ## How do I contribute a plugin? 21 | 22 | See our [Contributing Plugins](contributing_plugins.md) page for information on contributing a plugin to CellPainting-plugins. 23 | 24 | ## Who made this? 25 | 26 | CellProfiler and CellProfiler-plugins are maintained and developed in the [Cimini Lab](https://cimini-lab.broadinstitute.org) in the Imaging Platform at the Broad Institute in Cambridge, MA, USA. 27 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/runcellpose.md: -------------------------------------------------------------------------------- 1 | # RunCellpose 2 | 3 | RunCellpose is one of the modules that has additional dependencies that are not packaged with the built CellProfiler. 4 | Therefore, you must additionally download RunCellpose's dependencies. 5 | See [Using Plugins](using_plugins.md) for more information. 6 | 7 | ## Using RunCellpose with a GPU 8 | 9 | If you want to use a GPU to run the model (this is recommended for speed), you'll need a compatible version of PyTorch and a supported GPU. 10 | General instructions are available at this [link](https://pytorch.org/get-started/locally/). 11 | 12 | 1. Your GPU should be visible in Device Manager under Display Adaptors. 13 | If your GPU isn't there, you likely need to install drivers. 14 | [Here](https://www.nvidia.com/Download/Find.aspx) is where you can find NVIDIA GPU drivers if you need to install them. 15 | 16 | 17 | 2. To test whether the GPU is configured correctly: 18 | * Run `python` on the command line (i.e., in Command Prompt or Terminal) to start an interactive session 19 | * Then run the following 20 | ``` 21 | import torch 22 | torch.cuda.is_available() 23 | ``` 24 | * If this returns `True`, you're all set 25 | * If this returns `False`, you likely need to install/reinstall torch. See [here](https://pytorch.org/get-started/locally/) for your exact command. 26 | * Exit the session with `exit()` then install torch if necessary 27 | ``` 28 | pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113 29 | ``` 30 | If you have a previous version of torch installed, make sure to run `pip uninstall torch` first. 31 | 32 | 33 | **NOTE**: You might get a warning like this: 34 | ``` 35 | W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'cudart64_110.dll'; dlerror: cudart64_110.dll not found 36 | 2022-05-26 20:24:21.906286: I tensorflow/stream_executor/cuda/cudart_stub.cc:29] Ignore above cudart dlerror if you do not have a GPU set up on your machine. 37 | ``` 38 | If you don't have a GPU, this is not a problem. If you do, your configuration is incorrect and you need to try reinstalling drivers and the correct version of CUDA for your system. -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/supported_plugins.md: -------------------------------------------------------------------------------- 1 | # Supported Plugins 2 | 3 | Below is a brief overview of our currently supported plugins. 4 | For details about using any particular plugin, please read the module documentation inside the plugin in CellProfiler. 5 | 6 | Most plugins will run without any special installation of either CellProfiler or the plugins. 7 | See [using plugins](using_plugins.md) for how to set up CellProfiler for plugin use as well as for installation information for those plugins that do require installation of dependencies. 8 | 9 | Most plugin documentation can be found within the plugin itself and can be accessed through CellProfiler help. 10 | Those plugins that do have extra documentation contain links below. 11 | 12 | | Plugin | Description | Requires installation of dependencies? | Install flag | Docker version currently available? | 13 | |--------|-------------|----------------------------------------|--------------|-------------------------------------| 14 | | AddNoise | AddNoise adds Gaussian, Poisson, or Salt and Pepper noise to images. Of particular use for data augmentation in deep learning. | No | | N/A | 15 | | CalculateMoments | CalculateMoments extracts moments statistics from a given distribution of pixel values. | No | | N/A | 16 | | CallBarcodes | CallBarcodes is used for assigning a barcode to an object based on the channel with the strongest intensity for a given number of cycles. It is used for optical sequencing by synthesis (SBS). | No | | N/A | 17 | | CompensateColors | CompensateColors determines how much signal in any given channel is because of bleed-through from another channel and removes the bleed-through. It can be performed across an image or masked to objects and provides a number of preprocessing and rescaling options to allow for troubleshooting if input image intensities are not well matched. | No | | N/A | 18 | | DistanceTransform | DistanceTransform computes the distance transform of a binary image. The distance of each foreground pixel is computed to the nearest background pixel and the resulting image is then scaled so that the largest distance is 1. | No | | N/A | 19 | | EnforceObjectsOneToOne| EnforceObjectsOneToOne generates Primary and Secondary object relationships for any pair of objects in a similar manner to the relationships established by IdentifyPrimaryObjects and IdentifySecondaryObjects. It is particularly useful for relating objects identified using Deep Learning. | No | | N/A | 20 | | EnhancedMeasureTexture| EnhancedMeasureTexture measures the degree and nature of textures within an image or objects in a more comprehensive/tuneable manner than the MeasureTexture module native to CellProfiler. | No | | N/A | 21 | | FilterObjects_StringMatch| FilterObjects_StringMatch allows filtering of objects using exact or partial string matching in a manner similar to FilterObjects. | No | | N/A | 22 | | HistogramEqualization | HistogramEqualization increases the global contrast of a low-contrast image or volume. Histogram equalization redistributes intensities to utilize the full range of intensities, such that the most common frequencies are more distinct. This module can perform either global or local histogram equalization. | No | | N/A | 23 | | HistogramMatching | HistogramMatching manipulates the pixel intensity values an input image and matches them to the histogram of a reference image. It can be used as a way to normalize intensities across different 2D or 3D images or different frames of the same 3D image. It allows you to choose which frame to use as the reference. | No | | N/A | 24 | | PixelShuffle | PixelShuffle takes the intensity of each pixel in an image and randomly shuffles its position. | No | | N/A | 25 | | [RunCellpose](RunCellPose.md) | RunCellpose allows you to run Cellpose within CellProfiler. Cellpose is a generalist machine-learning algorithm for cellular segmentation and is a great starting point for segmenting non-round cells. You can use pre-trained Cellpose models or your custom model with this plugin. You can use a GPU with this module to dramatically increase your speed/efficiency. | Yes | `cellpose` | Yes | 26 | | Runilastik | Runilasitk allows to run ilastik within CellProfiler. You can use pre-trained ilastik projects/models to predict the probability of your input images. The plugin supports two types of ilastik projects: Pixel Classification and Autocontext (2-stage).| Yes | | Yes | 27 | | RunImageJScript | RunImageJScript allows you to run any supported ImageJ script directly within CellProfiler. It is significantly more performant than RunImageJMacro, and is also less likely to leave behind temporary files. | Yes | `imagejscript` , though note that conda installation may be preferred, see [this link](https://py.imagej.net/en/latest/Install.html#installing-via-pip) for more information | No | 28 | | RunOmnipose | RunOmnipose allows you to run Omnipose within CellProfiler. Omnipose is a general image segmentation tool that builds on Cellpose. | Yes | `omnipose` | No | 29 | | RunStarDist | RunStarDist allows you to run StarDist within CellProfiler. StarDist is a machine-learning algorithm for object detection with star-convex shapes making it best suited for nuclei or round-ish cells. You can use pre-trained StarDist models or your custom model with this plugin. You can use a GPU with this module to dramatically increase your speed/efficiency. RunStarDist is generally faster than RunCellpose. | Yes | `stardist` | No | 30 | | VarianceTransform | This module allows you to calculate the variance of an image, using a determined window size. It also has the option to find the optimal window size from a predetermined range to obtain the maximum variance of an image. | No | | N/A | 31 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/troubleshooting.md: -------------------------------------------------------------------------------- 1 | # Troubleshooting 2 | 3 | | Problem | Solution | 4 | |---|-----| 5 | | After setting the CellProfiler Plugins folder, CellProfiler won't open and returns an `error: no commands supplied` in the terminal. | You have set the CellProfiler Plugins folder to the parent folder of the plugins repository (`CellProfiler-plugins`), not the folder that contains plugins (`CellProfiler-plugins/active_plugins`). In order to get it to open, remove `setup.py` from the folder. Change the CellProfiler Plugins path to the correct path and close CellProfiler. Return `setup.py` to the parent folder. | 6 | | No plugins are visible in the "Add Modules" panel in CellProfiler. | You have not properly set the plugins path. Go to `CellProfiler` => `Preferences` and set the path in the `CellProfiler plugins directory` to the `active_plugins` folder in the GitHub repository that you just cloned. Select `Save` at the bottom of the Preferences window. | 7 | | Some but not all plugins are visible in the in the "Add Modules" panel in CellProfiler. | Not-visible plugins have unmet dependencies. Follow [installation instructions](using_plugins.md) to install dependencies for plugins. | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/unsupported_plugins.md: -------------------------------------------------------------------------------- 1 | # Currently Unsupported Plugins 2 | 3 | Unsupported plugins are primarily unsupported for either of two reasons: 4 | - They were made for a previous major version of CellProfiler and have not been updated to be compatible with the current version 5 | - Their functions have been integrated into the current version of CellProfiler and therefore a plugin is no longer necessary 6 | 7 | We welcome requests for updating particular unsupported plugins, but please note that we have limited bandwidth for working on plugins and may be unable to complete the update. 8 | Additionally, we cannot commit to maintaining any given plugin, CellProfiler team- or community-contributed. 9 | We welcome community contributed plugin updates. 10 | 11 | ## Where are unsupported plugins? 12 | 13 | Unsupported plugins can be found in the `unmaintained_plugins` folder in the CellProfiler-plugins repository. 14 | Those plugins in the `CellProfiler2`, `CellProfiler3`, and `CellProfiler4` folders were, at one point, supported for those versions of CellProfiler. 15 | Those plugins in the `CellProfiler4_autoconverted` folder were automatically converted from Python2 to Python3 (to support the transition from Python2 in CellProfiler3 to Python3 in CellProfiler4) but were never fully supported and may or may not run. 16 | 17 | ## What plugins are unsupported? 18 | 19 | We cannot provide comprehensive information about why we are not supporting a given plugin. 20 | Information about select plugins is as follows: 21 | 22 | **ClassifyPixelsUNET**: ClassifyPixelsUNET is a pixel classifier for background/object edge/object body. As far as we are aware, other deep learning based plugins that we do currently support (such as RunCellpose) work better. 23 | **DeclumpObjects**: DeclumpObjects will split objects based on a seeded watershed method. Functionality from this module was [added into CellProfiler](https://github.com/CellProfiler/CellProfiler/pull/4397) in the Watershed module as of CellProfiler 4.2.0. 24 | **Predict**: Predict module is not supported anymore and one can use **Runilastik** module to run ilastik pixel classifier in Cellprofiler. 25 | -------------------------------------------------------------------------------- /documentation/CP-plugins-documentation/versions.md: -------------------------------------------------------------------------------- 1 | # Versions 2 | 3 | Current version is 4 | 5 | --- 6 | 7 | # Version History 8 | 9 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from setuptools import setup 2 | import setuptools 3 | 4 | if __name__!="__main__": 5 | print("Please change your plugins folder to the 'active plugins' subfolder") 6 | 7 | else: 8 | install_deps = [ 9 | "cellprofiler", 10 | "cellprofiler-core", 11 | ] 12 | 13 | cellpose_deps = [ 14 | "cellpose>=1.0.2,<3.0" 15 | ] 16 | 17 | omnipose_deps = [ 18 | "omnipose", 19 | "ncolor" 20 | ] 21 | 22 | stardist_deps = [ 23 | "tensorflow", 24 | "stardist" 25 | ] 26 | 27 | imagejscript_deps = [ 28 | "pyimagej" 29 | ] 30 | 31 | setup( 32 | name="cellprofiler_plugins", 33 | packages=setuptools.find_packages(), 34 | install_requires = install_deps, 35 | extras_require = { 36 | "cellpose": cellpose_deps, 37 | "omnipose": omnipose_deps, 38 | "stardist": stardist_deps, 39 | "imagejscript": imagejscript_deps, 40 | } 41 | ) 42 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/tests/__init__.py -------------------------------------------------------------------------------- /tests/conftest.py: -------------------------------------------------------------------------------- 1 | # Something in CellProfiler is importing wx before we can set 2 | # headless mode. Setting headless here efore importing anything 3 | # else from CellProfiler. 4 | import cellprofiler_core.preferences 5 | 6 | cellprofiler_core.preferences.set_headless() 7 | 8 | import cellprofiler_core.image 9 | import cellprofiler_core.measurement 10 | import cellprofiler_core.object 11 | import cellprofiler_core.pipeline 12 | import cellprofiler_core.workspace 13 | import numpy 14 | import skimage.data 15 | import skimage.color 16 | import skimage.filters 17 | import skimage.measure 18 | import pytest 19 | 20 | 21 | @pytest.fixture( 22 | scope="module", 23 | params=[ 24 | (skimage.data.camera()[0:128, 0:128], 2), 25 | (skimage.data.astronaut()[0:128, 0:128, :], 2), 26 | (numpy.tile(skimage.data.camera()[0:32, 0:32], (2, 1)).reshape(2, 32, 32), 3) 27 | ], 28 | ids=[ 29 | "grayscale_image", 30 | "multichannel_image", 31 | "grayscale_volume" 32 | ] 33 | ) 34 | def image(request): 35 | data, dimensions = request.param 36 | 37 | return cellprofiler_core.image.Image(image=data, dimensions=dimensions) 38 | 39 | 40 | @pytest.fixture(scope="function") 41 | def image_empty(): 42 | image = cellprofiler_core.image.Image() 43 | 44 | return image 45 | 46 | 47 | @pytest.fixture(scope="function") 48 | def image_set(image, image_set_list): 49 | image_set = image_set_list.get_image_set(0) 50 | 51 | image_set.add("example", image) 52 | 53 | return image_set 54 | 55 | 56 | @pytest.fixture(scope="function") 57 | def image_set_empty(image_empty, image_set_list): 58 | image_set = image_set_list.get_image_set(0) 59 | image_set.add("example", image_empty) 60 | 61 | return image_set 62 | 63 | 64 | @pytest.fixture(scope="function") 65 | def image_set_list(): 66 | return cellprofiler_core.image.ImageSetList() 67 | 68 | 69 | @pytest.fixture(scope="function") 70 | def measurements(): 71 | return cellprofiler_core.measurement.Measurements() 72 | 73 | 74 | @pytest.fixture(scope="function") 75 | def module(request): 76 | instance = getattr(request.module, "instance") 77 | 78 | return instance() 79 | 80 | 81 | @pytest.fixture(scope="function") 82 | def objects(image): 83 | obj = cellprofiler_core.object.Objects() 84 | obj.parent_image = image 85 | 86 | return obj 87 | 88 | 89 | @pytest.fixture(scope="function") 90 | def objects_empty(): 91 | obj = cellprofiler_core.object.Objects() 92 | 93 | return obj 94 | 95 | 96 | @pytest.fixture(scope="function") 97 | def object_set(objects): 98 | objects_set = cellprofiler_core.object.ObjectSet() 99 | objects_set.add_objects(objects, "InputObjects") 100 | 101 | return objects_set 102 | 103 | 104 | @pytest.fixture(scope="function") 105 | def object_set_empty(objects_empty): 106 | objects_set = cellprofiler_core.object.ObjectSet() 107 | objects_set.add_objects(objects_empty, "InputObjects") 108 | 109 | return objects_set 110 | 111 | 112 | @pytest.fixture(scope="function") 113 | def object_with_data(image): 114 | data = image.pixel_data 115 | 116 | if image.multichannel: 117 | data = skimage.color.rgb2gray(data) 118 | 119 | binary = data > skimage.filters.threshold_li(data) 120 | 121 | labels = skimage.measure.label(binary) 122 | 123 | objects = cellprofiler_core.object.Objects() 124 | 125 | objects.segmented = labels 126 | objects.parent_image = image 127 | 128 | return objects 129 | 130 | 131 | @pytest.fixture(scope="function") 132 | def object_set_with_data(object_with_data): 133 | objects_set = cellprofiler_core.object.ObjectSet() 134 | objects_set.add_objects(object_with_data, "InputObjects") 135 | 136 | return objects_set 137 | 138 | 139 | @pytest.fixture(scope="function") 140 | def pipeline(): 141 | return cellprofiler_core.pipeline.Pipeline() 142 | 143 | 144 | @pytest.fixture(scope="function") 145 | def workspace(pipeline, module, image_set, object_set, measurements, image_set_list): 146 | return cellprofiler_core.workspace.Workspace(pipeline, module, image_set, object_set, measurements, image_set_list) 147 | 148 | 149 | @pytest.fixture(scope="function") 150 | def workspace_empty(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list): 151 | return cellprofiler_core.workspace.Workspace(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list) 152 | 153 | 154 | @pytest.fixture(scope="function") 155 | def workspace_with_data(pipeline, module, image_set, object_set_with_data, measurements, image_set_list): 156 | return cellprofiler_core.workspace.Workspace(pipeline, module, image_set, object_set_with_data, 157 | measurements, image_set_list) 158 | -------------------------------------------------------------------------------- /tests/headless_test/4.2.5_plugins_test_pipeline_CELLPOSE.cppipe: -------------------------------------------------------------------------------- 1 | CellProfiler Pipeline: http://www.cellprofiler.org 2 | Version:5 3 | DateRevision:425 4 | GitHash: 5 | ModuleCount:11 6 | HasImagePlaneDetails:False 7 | 8 | Images:[module_num:1|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 9 | : 10 | Filter images?:Images only 11 | Select the rule criteria:and (extension does isimage) (directory doesnot containregexp "[\\\\/]\\.") 12 | 13 | Metadata:[module_num:2|svn_version:'Unknown'|variable_revision_number:6|show_window:True|notes:['The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, or in an external file.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 14 | Extract metadata?:No 15 | Metadata data type:Text 16 | Metadata types:{} 17 | Extraction method count:1 18 | Metadata extraction method:Extract from file/folder names 19 | Metadata source:File name 20 | Regular expression to extract from file name:^(?P.*)_(?P[A-P][0-9]{2})_s(?P[0-9])_w(?P[0-9]) 21 | Regular expression to extract from folder name:(?P[0-9]{4}_[0-9]{2}_[0-9]{2})$ 22 | Extract metadata from:All images 23 | Select the filtering criteria:and (file does contain "") 24 | Metadata file location:Elsewhere...| 25 | Match file and image metadata:[] 26 | Use case insensitive matching?:No 27 | Metadata file name:None 28 | Does cached metadata exist?:No 29 | 30 | NamesAndTypes:[module_num:3|svn_version:'Unknown'|variable_revision_number:8|show_window:True|notes:['The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 31 | Assign a name to:All images 32 | Select the image type:Grayscale image 33 | Name to assign these images:DNA 34 | Match metadata:[] 35 | Image set matching method:Order 36 | Set intensity range from:Image metadata 37 | Assignments count:1 38 | Single images count:0 39 | Maximum intensity:255.0 40 | Process as 3D?:No 41 | Relative pixel spacing in X:1.0 42 | Relative pixel spacing in Y:1.0 43 | Relative pixel spacing in Z:1.0 44 | Select the rule criteria:and (file does contain "") 45 | Name to assign these images:DNA 46 | Name to assign these objects:Cell 47 | Select the image type:Grayscale image 48 | Set intensity range from:Image metadata 49 | Maximum intensity:255.0 50 | 51 | Groups:[module_num:4|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 52 | Do you want to group your images?:No 53 | grouping metadata count:1 54 | Metadata category:None 55 | 56 | RunCellpose:[module_num:5|svn_version:'Unknown'|variable_revision_number:3|show_window:False|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 57 | Select the input image:DNA 58 | Expected object diameter:12 59 | Detection mode:nuclei 60 | Name the output object:Nuclei_pose 61 | Use GPU:No 62 | Use averaging:Yes 63 | Supply nuclei image as well?:No 64 | Select the nuclei image:None 65 | Save probability image?:No 66 | Name the probability image:Probabilities 67 | Location of the pre-trained model file:Elsewhere...| 68 | Pre-trained model file name:cyto_0 69 | Flow threshold:0.4 70 | Cell probability threshold:0.0 71 | GPU memory share for each worker:0.1 72 | Stitch Threshold:0.0 73 | Use 3D:No 74 | Minimum size:10 75 | Z rescaling factor (anisotropy):1.0 76 | Use Omnipose for mask reconstruction:No 77 | Invert images:No -------------------------------------------------------------------------------- /tests/headless_test/4.2.5_plugins_test_pipeline_STARDIST.cppipe: -------------------------------------------------------------------------------- 1 | CellProfiler Pipeline: http://www.cellprofiler.org 2 | Version:5 3 | DateRevision:425 4 | GitHash: 5 | ModuleCount:11 6 | HasImagePlaneDetails:False 7 | 8 | Images:[module_num:1|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['To begin creating your project, use the Images module to compile a list of files and/or folders that you want to analyze. You can also specify a set of rules to include only the desired files in your selected folders.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 9 | : 10 | Filter images?:Images only 11 | Select the rule criteria:and (extension does isimage) (directory doesnot containregexp "[\\\\/]\\.") 12 | 13 | Metadata:[module_num:2|svn_version:'Unknown'|variable_revision_number:6|show_window:True|notes:['The Metadata module optionally allows you to extract information describing your images (i.e, metadata) which will be stored along with your measurements. This information can be contained in the file name and/or location, or in an external file.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 14 | Extract metadata?:No 15 | Metadata data type:Text 16 | Metadata types:{} 17 | Extraction method count:1 18 | Metadata extraction method:Extract from file/folder names 19 | Metadata source:File name 20 | Regular expression to extract from file name:^(?P.*)_(?P[A-P][0-9]{2})_s(?P[0-9])_w(?P[0-9]) 21 | Regular expression to extract from folder name:(?P[0-9]{4}_[0-9]{2}_[0-9]{2})$ 22 | Extract metadata from:All images 23 | Select the filtering criteria:and (file does contain "") 24 | Metadata file location:Elsewhere...| 25 | Match file and image metadata:[] 26 | Use case insensitive matching?:No 27 | Metadata file name:None 28 | Does cached metadata exist?:No 29 | 30 | NamesAndTypes:[module_num:3|svn_version:'Unknown'|variable_revision_number:8|show_window:True|notes:['The NamesAndTypes module allows you to assign a meaningful name to each image by which other modules will refer to it.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 31 | Assign a name to:All images 32 | Select the image type:Grayscale image 33 | Name to assign these images:DNA 34 | Match metadata:[] 35 | Image set matching method:Order 36 | Set intensity range from:Image metadata 37 | Assignments count:1 38 | Single images count:0 39 | Maximum intensity:255.0 40 | Process as 3D?:No 41 | Relative pixel spacing in X:1.0 42 | Relative pixel spacing in Y:1.0 43 | Relative pixel spacing in Z:1.0 44 | Select the rule criteria:and (file does contain "") 45 | Name to assign these images:DNA 46 | Name to assign these objects:Cell 47 | Select the image type:Grayscale image 48 | Set intensity range from:Image metadata 49 | Maximum intensity:255.0 50 | 51 | Groups:[module_num:4|svn_version:'Unknown'|variable_revision_number:2|show_window:True|notes:['The Groups module optionally allows you to split your list of images into image subsets (groups) which will be processed independently of each other. Examples of groupings include screening batches, microtiter plates, time-lapse movies, etc.']|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 52 | Do you want to group your images?:No 53 | grouping metadata count:1 54 | Metadata category:None 55 | 56 | RunStarDist:[module_num:5|svn_version:'Unknown'|variable_revision_number:1|show_window:True|notes:[]|batch_state:array([], dtype=uint8)|enabled:True|wants_pause:False] 57 | Select the input image:DNA 58 | Model:2D 59 | Name the output object:RunStarDist 60 | Tile input image?:No 61 | Horizontal tiles:1 62 | Vertical tiles:1 63 | Save probability image?:Yes 64 | Name the probability image:Probabilities 65 | Model folder:Elsewhere...| -------------------------------------------------------------------------------- /tests/headless_test/test_pipeline_img/skimage-mitosis-img.tiff: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/tests/headless_test/test_pipeline_img/skimage-mitosis-img.tiff -------------------------------------------------------------------------------- /tests/headless_test/test_run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # if grep -q "$1" "$2" 4 | if grep -qiE "$1" "$2" 5 | then 6 | echo "Pipeline ran successfully" 7 | `exit 0` 8 | else 9 | echo "Failed to run pipeline ($1 failed)" 10 | `exit 1` 11 | fi 12 | 13 | -------------------------------------------------------------------------------- /tests/resources/callbarcodes_Barcodes.csv: -------------------------------------------------------------------------------- 1 | gene_symbol,sgRNA 2 | ABCF1,GCAACACATCAATGTTGGGA 3 | ADAR,TTCTTGTAGGGTGAACACCG 4 | SLC25A6,CGAAGTTGAGGGCTTGAGTG 5 | ANXA2,GGTCCTTCTCTGGTAGGCGA 6 | nontargeting,CGCAATCCCTTAGGATAGCC -------------------------------------------------------------------------------- /tests/resources/runimagejscript_dummyscript.py: -------------------------------------------------------------------------------- 1 | #@ ImgPlus image 2 | #@output ImgPlus copy 3 | 4 | copy = image 5 | -------------------------------------------------------------------------------- /tests/test_histogramequalization.py: -------------------------------------------------------------------------------- 1 | import cellprofiler.image 2 | import numpy 3 | import numpy.testing 4 | import skimage.exposure 5 | 6 | import histogramequalization 7 | 8 | instance = histogramequalization.HistogramEqualization 9 | 10 | 11 | def test_run(image, image_set, module, workspace): 12 | module.x_name.value = "example" 13 | 14 | module.y_name.value = "HistogramEqualization" 15 | 16 | module.nbins.value = 256 17 | 18 | module.mask.value = "Leave blank" 19 | 20 | module.local.value = False 21 | 22 | module.run(workspace) 23 | 24 | actual = image_set.get_image("HistogramEqualization") 25 | 26 | data = image.pixel_data 27 | 28 | expected_data = skimage.exposure.equalize_hist(data) 29 | 30 | expected = cellprofiler.image.Image( 31 | image=expected_data, 32 | parent_image=image, 33 | dimensions=image.dimensions 34 | ) 35 | 36 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 37 | 38 | 39 | def test_run_local(image, image_set, module, workspace): 40 | module.x_name.value = "example" 41 | 42 | module.y_name.value = "HistogramEqualization" 43 | 44 | module.nbins.value = 256 45 | 46 | module.local.value = True 47 | 48 | module.run(workspace) 49 | 50 | actual = image_set.get_image("HistogramEqualization") 51 | 52 | data = image.pixel_data 53 | 54 | if image.volumetric: 55 | expected_data = numpy.zeros_like(data) 56 | 57 | for index, plane in enumerate(data): 58 | expected_data[index] = skimage.exposure.equalize_adapthist(plane, kernel_size=256) 59 | else: 60 | expected_data = skimage.exposure.equalize_adapthist(data, kernel_size=256) 61 | 62 | expected = cellprofiler.image.Image( 63 | image=expected_data, 64 | parent_image=image, 65 | dimensions=image.dimensions 66 | ) 67 | 68 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 69 | 70 | 71 | def test_run_nbins(image, image_set, module, workspace): 72 | module.x_name.value = "example" 73 | 74 | module.y_name.value = "HistogramEqualization" 75 | 76 | module.nbins.value = 128 77 | 78 | module.local.value = False 79 | 80 | module.mask.value = "Leave blank" 81 | 82 | module.run(workspace) 83 | 84 | actual = image_set.get_image("HistogramEqualization") 85 | 86 | data = image.pixel_data 87 | 88 | expected_data = skimage.exposure.equalize_hist(data, nbins=128) 89 | 90 | expected = cellprofiler.image.Image( 91 | image=expected_data, 92 | parent_image=image, 93 | dimensions=image.dimensions 94 | ) 95 | 96 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 97 | 98 | 99 | def test_run_mask(image, image_set, module, workspace): 100 | data = image.pixel_data 101 | 102 | mask_data = numpy.zeros_like(data, dtype="bool") 103 | 104 | if image.multichannel: 105 | mask_data[5:-5, 5:-5, :] = True 106 | elif image.dimensions == 3: 107 | mask_data[:, 5:-5, 5:-5] = True 108 | else: 109 | mask_data[5:-5, 5:-5] = True 110 | 111 | mask = cellprofiler.image.Image( 112 | image=mask_data, 113 | dimensions=image.dimensions 114 | ) 115 | 116 | image_set.add("Mask", mask) 117 | 118 | module.x_name.value = "example" 119 | 120 | module.y_name.value = "HistogramEqualization" 121 | 122 | module.nbins.value = 256 123 | 124 | module.local.value = False 125 | 126 | module.mask.value = "Mask" 127 | 128 | module.run(workspace) 129 | 130 | actual = image_set.get_image("HistogramEqualization") 131 | 132 | expected_data = skimage.exposure.equalize_hist(data, mask=mask_data) 133 | 134 | expected = cellprofiler.image.Image( 135 | image=expected_data, 136 | parent_image=image, 137 | dimensions=image.dimensions 138 | ) 139 | 140 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 141 | -------------------------------------------------------------------------------- /tests/test_runimagejscript.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | 3 | import cellprofiler_core.image 4 | import cellprofiler_core.measurement 5 | 6 | import cellprofiler_core.setting.subscriber 7 | import cellprofiler_core.setting.text.alphanumeric 8 | 9 | from cellprofiler_core.setting.text import Directory, Filename 10 | 11 | 12 | import cellprofiler.modules.crop 13 | import cellprofiler_core.object 14 | import cellprofiler_core.pipeline 15 | import cellprofiler_core.workspace 16 | 17 | import cellprofiler.modules.runimagejscript 18 | 19 | INPUT_IMAGE = "input_image" 20 | CROP_IMAGE = "crop_image" 21 | CROP_OBJECTS = "crop_objects" 22 | CROPPING = "cropping" 23 | OUTPUT_IMAGE = "output_image" 24 | 25 | 26 | def make_workspace(): 27 | """Return a workspace with the given image and the runimagejscript module""" 28 | pipeline = cellprofiler_core.pipeline.Pipeline() 29 | 30 | module = cellprofiler.modules.runimagejscript.RunImageJScript() 31 | module.set_module_num(1) 32 | image_set_list = cellprofiler_core.image.ImageSetList() 33 | image_set = image_set_list.get_image_set(0) 34 | 35 | object_set = cellprofiler_core.object.ObjectSet() 36 | 37 | def callback(caller, event): 38 | assert not isinstance(event, cellprofiler_core.pipeline.event.RunException) 39 | 40 | pipeline.add_listener(callback) 41 | pipeline.add_module(module) 42 | m = cellprofiler_core.measurement.Measurements() 43 | 44 | workspace = cellprofiler_core.workspace.Workspace( 45 | pipeline, module, image_set, object_set, m, image_set_list 46 | ) 47 | 48 | return module, workspace 49 | 50 | def test_start_image_j(): 51 | module, workspace = make_workspace() 52 | module.init_pyimagej() 53 | module.close_pyimagej() 54 | 55 | 56 | def test_parse_parameters(): 57 | module, workspace = make_workspace() 58 | 59 | module.script_directory = Directory( 60 | "Script directory") 61 | module.script_file = Filename( 62 | "ImageJ Script", "./../resources/modules/runimagejscript/dummyscript.py") 63 | module.get_parameters_from_script() 64 | 65 | assert len(module.script_parameter_list) > 0 66 | 67 | assert module.script_parameter_list[0].name.value == "image" 68 | assert module.script_parameter_list[1].name.value == "copy" 69 | 70 | assert isinstance(module.script_parameter_list[0].setting, cellprofiler_core.setting.subscriber.ImageSubscriber) 71 | assert isinstance(module.script_parameter_list[1].setting, cellprofiler_core.setting.text.alphanumeric.name.image_name._image_name.ImageName) 72 | 73 | def test_copy_image(): 74 | x, y = numpy.mgrid[0:10, 0:10] 75 | input_image = (x / 100.0 + y / 10.0).astype(numpy.float32) 76 | 77 | module, workspace = make_workspace() 78 | 79 | module.script_directory = Directory( 80 | "Script directory") 81 | module.script_file = Filename( 82 | "ImageJ Script", "./../resources/modules/runimagejscript/dummyscript.py") 83 | module.get_parameters_from_script() 84 | 85 | workspace.image_set.add("None", cellprofiler_core.image.Image(input_image)) 86 | 87 | module.run(workspace) 88 | 89 | output_image = workspace.image_set.get_image("copy") 90 | 91 | assert numpy.all(output_image.pixel_data == input_image) -------------------------------------------------------------------------------- /tests/test_runvista2d.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | import cellprofiler_core.image 4 | import cellprofiler_core.measurement 5 | import cellprofiler_core.object 6 | import cellprofiler_core.pipeline 7 | import cellprofiler_core.setting 8 | import cellprofiler_core.workspace 9 | import numpy 10 | import pytest 11 | from active_plugins.runvista2d import MONAILabelClient, MONAILabelClientException, MONAILabelUtils, RunVISTA2D 12 | 13 | IMAGE_NAME = "my_image" 14 | OBJECTS_NAME = "my_objects" 15 | MODEL_NAME = "vista2d" 16 | SERVER_ADDRESS = "http://127.0.0.1:8000" 17 | 18 | 19 | class MockResponse: 20 | @staticmethod 21 | def infer(*args, **kwargs): 22 | filepath = os.path.abspath(__file__) 23 | dir = os.path.dirname(filepath) 24 | image = os.path.join(dir, "resources", "vista2d_test.tiff") 25 | return image, {} 26 | 27 | 28 | class MockErrResponse: 29 | @staticmethod 30 | def http_multipart(*args, **kwargs): 31 | return 400, {}, {}, {} 32 | 33 | 34 | def test_mock_failed(): 35 | x = RunVISTA2D() 36 | x.y_name.value = OBJECTS_NAME 37 | x.x_name.value = IMAGE_NAME 38 | x.server_address.value = SERVER_ADDRESS 39 | x.model_name.value = MODEL_NAME 40 | 41 | img = numpy.zeros((128, 128, 3)) 42 | image = cellprofiler_core.image.Image(img) 43 | image_set_list = cellprofiler_core.image.ImageSetList() 44 | image_set = image_set_list.get_image_set(0) 45 | image_set.providers.append(cellprofiler_core.image.VanillaImage(IMAGE_NAME, image)) 46 | object_set = cellprofiler_core.object.ObjectSet() 47 | measurements = cellprofiler_core.measurement.Measurements() 48 | pipeline = cellprofiler_core.pipeline.Pipeline() 49 | 50 | pytest.MonkeyPatch().setattr(MONAILabelUtils, "http_multipart", MockErrResponse.http_multipart) 51 | with pytest.raises(MONAILabelClientException): 52 | x.run(cellprofiler_core.workspace.Workspace(pipeline, x, image_set, object_set, measurements, None)) 53 | 54 | 55 | def test_mock_successful(): 56 | x = RunVISTA2D() 57 | x.y_name.value = OBJECTS_NAME 58 | x.x_name.value = IMAGE_NAME 59 | x.server_address.value = SERVER_ADDRESS 60 | x.model_name.value = MODEL_NAME 61 | 62 | img = numpy.zeros((128, 128, 3)) 63 | image = cellprofiler_core.image.Image(img) 64 | image_set_list = cellprofiler_core.image.ImageSetList() 65 | image_set = image_set_list.get_image_set(0) 66 | image_set.providers.append(cellprofiler_core.image.VanillaImage(IMAGE_NAME, image)) 67 | object_set = cellprofiler_core.object.ObjectSet() 68 | measurements = cellprofiler_core.measurement.Measurements() 69 | pipeline = cellprofiler_core.pipeline.Pipeline() 70 | 71 | pytest.MonkeyPatch().setattr(MONAILabelClient, "infer", MockResponse.infer) 72 | x.run(cellprofiler_core.workspace.Workspace(pipeline, x, image_set, object_set, measurements, None)) 73 | assert len(object_set.object_names) == 1 74 | assert OBJECTS_NAME in object_set.object_names 75 | objects = object_set.get_objects(OBJECTS_NAME) 76 | segmented = objects.segmented 77 | assert numpy.all(segmented == 0) 78 | assert "Image" in measurements.get_object_names() 79 | assert OBJECTS_NAME in measurements.get_object_names() 80 | 81 | assert f"Count_{OBJECTS_NAME}" in measurements.get_feature_names("Image") 82 | count = measurements.get_current_measurement("Image", f"Count_{OBJECTS_NAME}") 83 | assert count == 0 84 | assert "Location_Center_X" in measurements.get_feature_names(OBJECTS_NAME) 85 | location_center_x = measurements.get_current_measurement(OBJECTS_NAME, "Location_Center_X") 86 | assert isinstance(location_center_x, numpy.ndarray) 87 | assert numpy.product(location_center_x.shape) == 0 88 | assert "Location_Center_Y" in measurements.get_feature_names(OBJECTS_NAME) 89 | location_center_y = measurements.get_current_measurement(OBJECTS_NAME, "Location_Center_Y") 90 | assert isinstance(location_center_y, numpy.ndarray) 91 | assert numpy.product(location_center_y.shape) == 0 92 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/DoGNet.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | 4 | import numpy as np 5 | 6 | import dognet 7 | import torch 8 | from torch.autograd import Variable 9 | import skimage.draw 10 | 11 | ################################# 12 | # 13 | # Imports from CellProfiler 14 | # 15 | ################################## 16 | 17 | import cellprofiler.image 18 | import cellprofiler.module 19 | import cellprofiler.setting 20 | 21 | __doc__ = """\ 22 | DoGNet 23 | ====== 24 | 25 | **DoGNet** takes input synapsin1, PSD95, vGlut, and predicts the location of synapses. 26 | 27 | | 28 | 29 | ============ ============ =============== 30 | Supports 2D? Supports 3D? Respects masks? 31 | ============ ============ =============== 32 | YES NO YES 33 | ============ ============ =============== 34 | 35 | 36 | What do I get as output? 37 | ^^^^^^^^^^^^^^^^^^^^^^^^ 38 | 39 | A synapse prediction map. 40 | 41 | 42 | References 43 | ^^^^^^^^^^ 44 | Kulikov V, Guo SM, Stone M, Goodman A, Carpenter A, et al. (2019) 45 | DoGNet: A deep architecture for synapse detection in multiplexed fluorescence images. 46 | PLOS Computational Biology 15(5): e1007012. https://doi.org/10.1371/journal.pcbi.1007012 47 | """ 48 | 49 | class DoGNet(cellprofiler.module.Module): 50 | category = "Advanced" 51 | module_name = "DoGNet" 52 | variable_revision_number = 1 53 | 54 | def create_settings(self): 55 | self.synapsin_image = cellprofiler.setting.ImageNameSubscriber( 56 | "Select the synapsin image", cellprofiler.setting.NONE, doc="""\ 57 | Select the image of the synapsin-1 channel.""") 58 | 59 | self.PSD95_image = cellprofiler.setting.ImageNameSubscriber( 60 | "Select the PSD95 image", cellprofiler.setting.NONE, doc="""\ 61 | Select the image of the PSD95 channel.""") 62 | 63 | self.vGlut_image = cellprofiler.setting.ImageNameSubscriber( 64 | "Select the vGlut image", cellprofiler.setting.NONE, doc="""\ 65 | Select the image of the vGlut channel.""") 66 | 67 | self.prediction_image_name = cellprofiler.setting.ImageNameProvider( 68 | "Output image name", 69 | "SynapsePrediction", 70 | doc="""\ 71 | Enter the name to give the output prediction image created by this module. 72 | """) 73 | self.t7_name = cellprofiler.setting.Pathname( 74 | "Trained network location", 75 | doc="Specify the location of the trained network." 76 | ) 77 | 78 | def settings(self): 79 | 80 | settings = [ 81 | self.synapsin_image, 82 | self.PSD95_image, 83 | self.vGlut_image, 84 | self.prediction_image_name, 85 | self.t7_name 86 | ] 87 | 88 | return settings 89 | 90 | def run(self, workspace): 91 | net = dognet.SimpleAnisotropic(3,15,5,learn_amplitude=False) 92 | net.to('cpu') 93 | net.load_state_dict(torch.load(self.t7_name.value)) 94 | 95 | syn_normed=np.expand_dims( 96 | self.normalize( 97 | workspace.image_set.get_image(self.synapsin_image.value, must_be_grayscale=True) 98 | ) 99 | ,0) 100 | psd_normed=np.expand_dims( 101 | self.normalize( 102 | workspace.image_set.get_image(self.PSD95_image.value, must_be_grayscale=True) 103 | ) 104 | ,0) 105 | vglut_normed=np.expand_dims( 106 | self.normalize( 107 | workspace.image_set.get_image(self.vGlut_image.value, must_be_grayscale=True) 108 | ) 109 | ,0) 110 | 111 | data = np.concatenate([syn_normed,psd_normed,vglut_normed]) 112 | print(data.shape) 113 | y = self.inference(net,data) 114 | 115 | output_image = cellprofiler.image.Image(y[0,0]) 116 | 117 | workspace.image_set.add(self.prediction_image_name.value, output_image) 118 | 119 | if self.show_window: 120 | workspace.display_data.syn_pixels = workspace.image_set.get_image(self.synapsin_image.value).pixel_data 121 | 122 | workspace.display_data.psd_pixels = workspace.image_set.get_image(self.PSD95_image.value).pixel_data 123 | 124 | workspace.display_data.vglut_pixels = workspace.image_set.get_image(self.vGlut_image.value).pixel_data 125 | 126 | workspace.display_data.output_pixels = y[0,0] 127 | 128 | def display(self, workspace, figure): 129 | dimensions = (2, 2) 130 | 131 | figure.set_subplots(dimensions) 132 | 133 | figure.subplot_imshow_grayscale(0, 0, workspace.display_data.syn_pixels, "Synapsin") 134 | 135 | figure.subplot_imshow_grayscale( 136 | 1, 137 | 0, 138 | workspace.display_data.psd_pixels, 139 | "PSD-95", 140 | sharexy=figure.subplot(0, 0), 141 | ) 142 | 143 | figure.subplot_imshow_grayscale( 144 | 0, 145 | 1, 146 | workspace.display_data.vglut_pixels, 147 | "vGlut", 148 | sharexy=figure.subplot(0, 0), 149 | ) 150 | 151 | figure.subplot_imshow_grayscale( 152 | 1, 153 | 1, 154 | workspace.display_data.output_pixels, 155 | "Synapse prediction", 156 | sharexy=figure.subplot(0, 0), 157 | ) 158 | 159 | def normalize(self, im): 160 | meanx = im.pixel_data.mean() 161 | minx = im.pixel_data.min() 162 | maxx = im.pixel_data.max() 163 | x = np.copy(im.pixel_data.astype(np.float32)) 164 | x = (x - meanx - minx)/(maxx - minx).astype(np.float32) 165 | return x 166 | 167 | def inference(self, net,image,get_intermediate=False): 168 | x = np.expand_dims(image,0) 169 | vx = Variable(torch.from_numpy(x).float()).to('cpu') 170 | 171 | res,inter = net(vx) 172 | if get_intermediate: 173 | return res.data.cpu().numpy(),inter.data.cpu().numpy() 174 | return res.data.cpu().numpy() 175 | 176 | def volumetric(self): 177 | return False 178 | 179 | 180 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/DoGNetWeights/Simple_Anisotropic_4_11_2_prism17.t7: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CellProfiler/CellProfiler-plugins/4581206f45d37911471a3a8cdfece077ee44f992/unmaintained_plugins/CellProfiler3/DoGNetWeights/Simple_Anisotropic_4_11_2_prism17.t7 -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | CellStar package providing CellStar algorithm for segmentation of yeast cells in brightfield imagery. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | __author__ = 'Adam Kaczmarek, Filip Mróz, Szymon Stoma' 8 | __all__ = ["segmentation"] 9 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Core package including main components used in CellStar segmentation. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | __all__ = ["image_repo", "point", "seed", "seeder", "snake", "snake_filter"] 8 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/config.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Config is a storage for default CellStar configuration. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | 9 | def default_config(): 10 | return { 11 | 'segmentation': { 12 | 'foreground': { 13 | 'FillHolesWithAreaSmallerThan': 2.26, 14 | 'MaskDilation': 0.136, 15 | 'MaskMinRadius': 0.34, 16 | 'MaskThreshold': 0.03, 17 | 'pickyDetection': False, 18 | 'blur': 1, 19 | 'MinCellClusterArea': 0.85 20 | }, 21 | 'avgCellDiameter': 35, 22 | 'background': { 23 | 'blurSteps': 50, 24 | 'computeByBlurring': 0.5, 25 | 'blur': 0.3 26 | }, 27 | 'ranking': { 28 | 'avgInnerBrightnessWeight': 10, 29 | 'avgBorderBrightnessWeight': 300, 30 | 'stickingWeight': 60, 31 | 'shift': 0.68, 32 | 'maxInnerBrightnessWeight': 10, 33 | 'logAreaBonus': 18, 34 | 'maxRank': 100, 35 | 'avgInnerDarknessWeight': 0 36 | }, 37 | 'minArea': 0.07, 38 | 'cellBorder': { 39 | 'medianFilter': 0.1 40 | }, 41 | 'maxFreeBorder': 0.4, 42 | 'cellContent': { 43 | 'MaskThreshold': 0.0, 44 | 'medianFilter': 0.17, 45 | 'blur': 0.6 46 | }, 47 | 'steps': 2, 48 | 'maxArea': 2.83, 49 | 'stars': { 50 | 'cumBrightnessWeight': 304.45, 51 | 'maxSize': 1.67, 52 | 'gradientWeight': 15.482, 53 | 'sizeWeight': [189.4082], 54 | 'brightnessWeight': 0.0442, 55 | 'step': 0.0335, 56 | 'points': 28, 57 | 'borderThickness': 0.1, 58 | 'unstick': 0.3, 59 | 'backgroundWeight': 0.0, 60 | 'smoothness': 7.0, 61 | 'gradientBlur': 0.0 62 | }, 63 | 'minAvgInnerDarkness': 0.1, 64 | 'maxOverlap': 0.3, 65 | 'seeding': { 66 | 'from': { 67 | 'cellContentRandom': 0, 68 | 'cellBorderRemovingCurrSegmentsRandom': 0, 69 | 'cellContentRemovingCurrSegments': 1, 70 | 'snakesCentroids': 0, 71 | 'cellContent': 0, 72 | 'cellContentRemovingCurrSegmentsRandom': 0, 73 | 'cellBorderRemovingCurrSegments': 0, 74 | 'cellBorder': 1, 75 | 'snakesCentroidsRandom': 0, 76 | 'cellBorderRandom': 0 77 | }, 78 | 'ContentBlur': 2, 79 | 'randomDiskRadius': 0.33, 80 | 'minDistance': 0.27, 81 | 'BorderBlur': 2 82 | } 83 | } 84 | } 85 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/parallel/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Parallel is an attempt to use multiprocessing in snake growing thus improving the speed of the segmentation. 4 | Currently not tested and on hold. 5 | Date: 2015-2016 6 | Website: http://cellstar-algorithm.org/ 7 | """ 8 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/parallel/snake_grow.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Snake grow is an attempt to run time expensive snake in parallel using multiprocessing package. 4 | Currently not tested and on hold. Tried only once for parameter fitting. 5 | Date: 2015-2016 6 | Website: http://cellstar-algorithm.org/ 7 | """ 8 | 9 | import ctypes 10 | from copy import copy 11 | from multiprocessing import Pool, Array, Manager 12 | 13 | import numpy as np 14 | 15 | from cellstar.core.image_repo import ImageRepo 16 | from cellstar.core.polar_transform import PolarTransform 17 | from cellstar.core.snake import Snake 18 | from cellstar.parameter_fitting.pf_snake import PFSnake 19 | 20 | 21 | def conv_single_image(image): 22 | shared_array_base = Array(ctypes.c_double, image.size) 23 | shared_array = np.ctypeslib.as_array(shared_array_base.get_obj()) 24 | shared_array = shared_array.reshape(image.shape) 25 | shared_array[:] = image 26 | 27 | return shared_array 28 | 29 | 30 | def conv_image_repo(images): 31 | return map(conv_single_image, [ 32 | images.foreground_mask, 33 | images.brighter, 34 | images.darker, 35 | images.image_back_difference_blurred, 36 | images.image_back_difference, 37 | images.cell_content_mask, 38 | images.cell_border_mask, 39 | images.background, 40 | images.background_mask 41 | ]) 42 | 43 | 44 | def grow_single_snake(frame, images, parameters, seed): 45 | # 46 | # 47 | # RECONSTRUCT INPUT 48 | # 49 | # 50 | 51 | ir = ImageRepo(frame, parameters) 52 | ir._foreground_mask, ir._brighter, ir._darker, ir._clean, ir._clean_original, \ 53 | ir._cell_content_mask, ir._cell_border_mask, ir._background, ir._background_mask = images 54 | 55 | # 56 | # 57 | # CREATE AND GROW SNAKE 58 | # 59 | # 60 | 61 | polar_transform = PolarTransform.instance(parameters["segmentation"]["avgCellDiameter"], 62 | parameters["segmentation"]["stars"]["points"], 63 | parameters["segmentation"]["stars"]["step"], 64 | parameters["segmentation"]["stars"]["maxSize"]) 65 | 66 | s = Snake.create_from_seed(parameters, seed, parameters["segmentation"]["stars"]["points"], ir) 67 | 68 | size_weight_list = parameters["segmentation"]["stars"]["sizeWeight"] 69 | snakes_to_grow = [(copy(s), w) for w in size_weight_list] 70 | 71 | for snake, weight in snakes_to_grow: 72 | snake.grow(size_weight=weight, polar_transform=polar_transform) 73 | snake.evaluate(polar_transform) 74 | 75 | best_snake = sorted(snakes_to_grow, key=lambda (sn, _): sn.rank)[0][0] 76 | 77 | pf_s = PFSnake(None, None, None, best_snake=best_snake) 78 | pf_s.best_snake = best_snake 79 | 80 | return pf_s 81 | 82 | 83 | def grow_fun((seed, frame, images, parameters)): 84 | return grow_single_snake(frame, images, parameters, seed) 85 | 86 | 87 | def add_snake(snakes, snake): 88 | snakes.append(snake) 89 | 90 | 91 | def mp_snake_grow(images, parameters, seeds): 92 | snakes = [] 93 | manager = Manager() 94 | shared_parameters = manager.dict(parameters) 95 | shared_images = conv_image_repo(images) 96 | shared_frame = conv_single_image(images.image) 97 | 98 | snakes = [] 99 | 100 | pool = Pool(processes=8) 101 | snakes = pool.map_async(grow_fun, [(seed, shared_frame, shared_images, shared_parameters) for seed in seeds]).get() 102 | pool.close() 103 | pool.join() 104 | 105 | return snakes 106 | 107 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/point.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Integer point wrapper. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import math 9 | 10 | 11 | class Point(object): 12 | """ 13 | @ivar x: x coordinate of point 14 | @ivar y: y coordinate of point 15 | """ 16 | 17 | def __init__(self, x, y): 18 | """ 19 | @type x: int 20 | @type y: int 21 | """ 22 | self.x = x 23 | self.y = y 24 | 25 | def __repr__(self): 26 | return "Seed(x={0},y={1})".format(self.x, self.y) 27 | 28 | def __eq__(self, other): 29 | return self.x == other.x and self.y == other.y 30 | 31 | def polar_coords(self, origin): 32 | """ 33 | @type origin : Point 34 | @return: radius, angle 35 | @rtype: (int,int) 36 | """ 37 | r_x = self.x - origin.x 38 | r_y = self.y - origin.y 39 | 40 | radius = math.sqrt(r_x ** 2 + r_y ** 2) 41 | angle = math.atan2(r_y, r_x) 42 | 43 | return radius, angle 44 | 45 | def as_xy(self): 46 | return self.x, self.y 47 | 48 | def euclidean_distance_to(self, other_point): 49 | return math.sqrt((self.x - other_point.x) ** 2 + (self.y - other_point.y) ** 2) 50 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/seed.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Contour seed. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | from cellstar.core.point import Point 9 | 10 | 11 | class Seed(Point): 12 | """ 13 | Object of a seed. 14 | @ivar x: x coordinate of seed 15 | @ivar y: y coordinate of seed 16 | @ivar origin: where seed comes from ('content' or 'background' or 'snakes') 17 | """ 18 | 19 | def __init__(self, x, y, origin): 20 | """ 21 | @type x: int 22 | @type y: int 23 | """ 24 | super(Seed, self).__init__(x, y) 25 | self.origin = origin 26 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/core/snake_filter.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | SnakeFilter is responsible for ranking and filtering out contours that as incorrect or overlap better ones. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import logging 9 | import math 10 | 11 | import numpy as np 12 | 13 | from cellstar.core.snake import Snake 14 | 15 | 16 | class SnakeFilter(object): 17 | """ 18 | Order snakes based on their ranking and checks them for violating other constraints. 19 | Discard snakes that overlap with already approved ones. 20 | """ 21 | 22 | def __init__(self, images, parameters): 23 | """ 24 | @type parameters: dict 25 | @type images: core.image_repo.ImageRepo 26 | """ 27 | self.parameters = parameters 28 | self.images = images 29 | 30 | def is_single_snake_discarded(self, snake): 31 | """ 32 | @type snake: Snake 33 | @rtype: bool 34 | """ 35 | if snake.rank >= Snake.max_rank: 36 | return True 37 | 38 | if snake.avg_inner_darkness < self.parameters["segmentation"]["minAvgInnerDarkness"]: 39 | return True 40 | 41 | max_area = self.parameters["segmentation"]["maxArea"] * self.parameters["segmentation"]["avgCellDiameter"]**2 * math.pi / 4 42 | if snake.area > max_area: 43 | return True 44 | 45 | min_area = self.parameters["segmentation"]["minArea"] * self.parameters["segmentation"]["avgCellDiameter"]**2 * math.pi / 4 46 | if snake.area < min_area: 47 | return True 48 | 49 | max_free_border = self.parameters["segmentation"]["stars"]["points"] * self.parameters["segmentation"]["maxFreeBorder"] 50 | if snake.max_contiguous_free_border > max_free_border: 51 | return True 52 | 53 | return False 54 | 55 | def filter(self, snakes): 56 | """ 57 | @type snakes: list[Snake] 58 | @rtype: list[Snake] 59 | """ 60 | logging.basicConfig(format='%(asctime)-15s %(message)s', level=logging.DEBUG) 61 | logger = logging.getLogger(__package__) 62 | log_message = "Discarding snake {0} for {1}: {2}" 63 | 64 | original = self.images.image 65 | filtered_snakes = [] 66 | segments = np.zeros(original.shape, dtype=int) 67 | 68 | # do not allow cells on masked areas 69 | segments[self.images.mask == 0] = -1 70 | 71 | if len(snakes) > 0: 72 | snakes_sorted = sorted(enumerate(snakes), key=lambda x: x[1].rank) 73 | current_accepted_snake_index = 1 74 | for i in xrange(len(snakes_sorted)): 75 | curr_snake = snakes_sorted[i][1] 76 | snake_index = snakes_sorted[i][0] 77 | 78 | if curr_snake.rank >= Snake.max_rank: 79 | logger.debug(log_message.format(snake_index, 'too high rank', curr_snake.rank)) 80 | break 81 | 82 | local_snake = curr_snake.in_polygon 83 | sxy = curr_snake.in_polygon_slice 84 | local_segments = segments[sxy] 85 | 86 | overlap_area = np.count_nonzero(np.logical_and(local_segments, local_snake)) 87 | overlap = float(overlap_area) / curr_snake.area 88 | 89 | if overlap > self.parameters["segmentation"]["maxOverlap"]: 90 | logger.debug(log_message.format(snake_index, 'too much overlapping', overlap)) 91 | else: 92 | vacant_snake = np.logical_and(local_snake, local_segments == 0) 93 | vacant_cell_content = vacant_snake[self.images.cell_content_mask[sxy]] 94 | curr_snake.area = np.count_nonzero(vacant_snake) + Snake.epsilon 95 | avg_inner_darkness = float(np.count_nonzero(vacant_cell_content)) / float(curr_snake.area) 96 | if avg_inner_darkness < self.parameters["segmentation"]["minAvgInnerDarkness"]: 97 | logger.debug(log_message.format(snake_index, 'too low inner darkness', '...')) 98 | else: 99 | if curr_snake.area > (self.parameters["segmentation"]["maxArea"] * self.parameters["segmentation"]["avgCellDiameter"]**2 * math.pi / 4): 100 | logger.debug(log_message.format(snake_index, 'too big area', str(curr_snake.area))) 101 | else: 102 | if curr_snake.area < (self.parameters["segmentation"]["minArea"] * self.parameters["segmentation"]["avgCellDiameter"]**2 * math.pi / 4): 103 | logger.debug(log_message.format(snake_index, 'too small area:', str(curr_snake.area))) 104 | else: 105 | max_free_border = self.parameters["segmentation"]["stars"]["points"] * self.parameters["segmentation"]["maxFreeBorder"] 106 | if curr_snake.max_contiguous_free_border > max_free_border: 107 | logger.debug(log_message.format(snake_index, 108 | 'too long contiguous free border', 109 | str(curr_snake.max_contiguous_free_border) + 110 | ' over ' + str(max_free_border))) 111 | else: 112 | local_segments[[vacant_snake]] = current_accepted_snake_index 113 | filtered_snakes.append(curr_snake) 114 | current_accepted_snake_index += 1 115 | 116 | segments *= self.images.mask # clear mask 117 | self.images._segmentation = segments 118 | return filtered_snakes 119 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/parameter_fitting/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Parameter fitting package includes all components for automated parameter fitting based on a provided ground truth/ 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | __all__ = ["pf_process", "pf_snake"] -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/parameter_fitting/pf_auto_params.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Pf auto params contains parameters that are optimised as well as encode / decode functions. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import numpy as np 9 | 10 | parameters_range = {#"borderThickness": (0.001, 1.0), 11 | "brightnessWeight": (-0.4, 0.4), 12 | "cumBrightnessWeight": (0, 500), 13 | "gradientWeight": (-30, 30), 14 | "sizeWeight": (10, 300), 15 | #"smoothness": (3, 8) 16 | } 17 | 18 | rank_parameters_range = {"avgBorderBrightnessWeight": (0, 600), 19 | "avgInnerBrightnessWeight": (-100, 100), 20 | "avgInnerDarknessWeight": (-100, 100), 21 | "logAreaBonus": (5, 50), 22 | "maxInnerBrightnessWeight": (-10, 50), 23 | # "maxRank": (5, 300), 24 | # "stickingWeight": (0, 120) # this is set to 60 rest of parameters should adapt to it 25 | } 26 | 27 | 28 | class OptimisationBounds(object): 29 | def __init__(self, size=None, xmax=1, xmin=0): 30 | self.xmax = xmax 31 | self.xmin = xmin 32 | if size is not None: 33 | self.xmax = [xmax] * size 34 | self.xmin = [xmin] * size 35 | 36 | @staticmethod 37 | def from_ranges(ranges_dict): 38 | bounds = OptimisationBounds() 39 | bounds.xmin = [] 40 | bounds.xmax = [] 41 | # bound only two parameters 42 | for k, v in list(sorted(ranges_dict.iteritems())): 43 | if k == "borderThickness": 44 | bounds.xmin.append(0.001) 45 | bounds.xmax.append(2) 46 | elif k == "smoothness": 47 | bounds.xmin.append(4.0) 48 | bounds.xmax.append(10.0) 49 | else: 50 | bounds.xmin.append(-1000000) 51 | bounds.xmax.append(1000000) 52 | 53 | # bounds.xmin, bounds.xmax = zip(*zip(*list(sorted(ranges_dict.iteritems())))[1]) 54 | return bounds 55 | 56 | def __call__(self, **kwargs): 57 | x = kwargs["x_new"] 58 | tmax = bool(np.all(x <= self.xmax)) 59 | tmin = bool(np.all(x >= self.xmin)) 60 | return tmax and tmin 61 | 62 | 63 | ContourBounds = OptimisationBounds.from_ranges(parameters_range) 64 | RankBounds = OptimisationBounds(size=len(rank_parameters_range)) 65 | 66 | 67 | # 68 | # 69 | # PARAMETERS ENCODE DECODE 70 | # 71 | # 72 | 73 | def pf_parameters_encode(parameters): 74 | """ 75 | brightnessWeight: 0.0442 +brightness on cell edges 76 | cumBrightnessWeight: 304.45 -brightness in the cell center 77 | gradientWeight: 15.482 +gradent on the cell edges 78 | sizeWeight: 189.4082 (if list -> avg. will be comp.) +big cells 79 | @param parameters: dictionary segmentation.stars 80 | """ 81 | parameters = parameters["segmentation"]["stars"] 82 | point = [] 83 | for name, (_, _) in sorted(parameters_range.iteritems()): 84 | val = parameters[name] 85 | if name == "sizeWeight": 86 | if not isinstance(val, float): 87 | val = np.mean(val) 88 | point.append(val) # no scaling 89 | return point 90 | 91 | 92 | def pf_parameters_decode(param_vector, org_size_weights_list): 93 | """ 94 | sizeWeight is one number (mean of the future list) 95 | @type param_vector: numpy.ndarray 96 | @return: 97 | """ 98 | parameters = {} 99 | for (name, (_, _)), val in zip(sorted(parameters_range.iteritems()), param_vector): 100 | if name == "sizeWeight": 101 | val = list(np.array(org_size_weights_list) * (val / np.mean(org_size_weights_list))) 102 | elif name == "borderThickness": 103 | val = min(max(0.001, val), 3) 104 | parameters[name] = val 105 | 106 | # set from default 107 | parameters["borderThickness"] = 0.1 108 | parameters["smoothness"] = 6 109 | return parameters 110 | 111 | 112 | def pf_rank_parameters_encode(parameters, complete_params_given=True): 113 | """ 114 | avgBorderBrightnessWeight: 300 115 | avgInnerBrightnessWeight: 10 116 | avgInnerDarknessWeight: 0 117 | logAreaBonus: 18 118 | maxInnerBrightnessWeight: 10 119 | @param parameters: dictionary all ranking params or a complete 120 | @param complete_params_given: is parameters a complete dictionary 121 | """ 122 | if complete_params_given: 123 | parameters = parameters["segmentation"]["ranking"] 124 | 125 | point = [] 126 | for name, (vmin, vmax) in sorted(rank_parameters_range.iteritems()): 127 | val = parameters[name] 128 | if vmax - vmin == 0: 129 | point.append(0) 130 | else: 131 | point.append((val - vmin) / float(vmax - vmin)) # scaling to [0,1] 132 | return point 133 | 134 | 135 | def pf_rank_parameters_decode(param_vector): 136 | """ 137 | @type param_vector: numpy.ndarray 138 | @return: only ranking parameters as a dict 139 | """ 140 | parameters = {} 141 | for (name, (vmin, vmax)), val in zip(sorted(rank_parameters_range.iteritems()), param_vector): 142 | rescaled = vmin + val * (vmax - vmin) 143 | parameters[name] = rescaled 144 | 145 | # set from default 146 | parameters["stickingWeight"] = 60 147 | 148 | return parameters 149 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/parameter_fitting/pf_mutator.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Mutator can be used to change (ie mutate) existing snakes to provide higher variability in ground_truth pool. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import copy 9 | import random 10 | 11 | import numpy as np 12 | 13 | from cellstar.core.point import * 14 | from cellstar.utils.calc_util import polar_to_cartesian 15 | 16 | 17 | def add_mutations(gt_and_grown, avg_cell_diameter): 18 | mutants = [] 19 | mutation_radiuses = 0.2 * avg_cell_diameter 20 | for (gt, grown) in gt_and_grown: 21 | mutants += [ 22 | (gt, grown.create_mutation(mutation_radiuses * 2, random_poly=True)), 23 | (gt, grown.create_mutation(-mutation_radiuses * 2, random_poly=True)), 24 | (gt, grown.create_mutation(mutation_radiuses)), (gt, grown.create_mutation(-mutation_radiuses)), 25 | ] 26 | return gt_and_grown + mutants 27 | 28 | 29 | def create_mutant_from_change(org_snake, polar_transform, boundary_change): 30 | mutant_snake = copy.copy(org_snake) 31 | # zero rank so it recalculates 32 | mutant_snake.rank = None 33 | 34 | # constrain change 35 | new_boundary = mutant_snake.polar_coordinate_boundary + boundary_change 36 | while (new_boundary <= 3).all() and abs(boundary_change).max() > 3: 37 | new_boundary = np.maximum(np.minimum( 38 | mutant_snake.polar_coordinate_boundary + boundary_change, 39 | len(polar_transform.R) - 1), 3) 40 | boundary_change /= 1.3 41 | 42 | px, py = polar_to_cartesian(new_boundary, mutant_snake.seed.x, mutant_snake.seed.y, polar_transform) 43 | 44 | mutant_snake.polar_coordinate_boundary = new_boundary 45 | mutant_snake.points = [Point(x, y) for x, y in zip(px, py)] 46 | 47 | # need to update self.final_edgepoints to calculate properties (for now we ignore this property) 48 | mutant_snake.evaluate(polar_transform) 49 | 50 | return mutant_snake 51 | 52 | 53 | def create_poly_mutation(org_snake, polar_transform, max_diff): 54 | # change to pixels 55 | length = org_snake.polar_coordinate_boundary.size 56 | max_diff /= polar_transform.step 57 | 58 | def polynomial(x1, x2): 59 | def eval(x): 60 | return x * (x - length) * (x - x1) * (x * 0.4 - x2) 61 | 62 | return eval 63 | 64 | poly = polynomial(random.uniform(0.001, length), random.uniform(0.001, length)) 65 | boundary_change = np.array([poly(x) for x in range(length)]) 66 | 67 | M = abs(boundary_change).max() 68 | boundary_change = boundary_change / M * max_diff 69 | 70 | mutant_snake = create_mutant_from_change(org_snake, polar_transform, boundary_change) 71 | return mutant_snake 72 | 73 | 74 | def create_mutation(org_snake, polar_transform, dilation): 75 | # change to pixels 76 | dilation /= polar_transform.step 77 | boundary_change = np.array([dilation for _ in range(org_snake.polar_coordinate_boundary.size)]) 78 | 79 | mutant_snake = create_mutant_from_change(org_snake, polar_transform, boundary_change) 80 | return mutant_snake 81 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/parameter_fitting/pf_rank_snake.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | PFRankSnake represents one ground_truth contour for ranking parameters fitting. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import copy 9 | import random 10 | 11 | random.seed(1) # make it deterministic 12 | 13 | from cellstar.core.polar_transform import PolarTransform 14 | from cellstar.parameter_fitting.pf_snake import PFSnake 15 | import pf_mutator 16 | 17 | 18 | class PFRankSnake(object): 19 | def __init__(self, gt_snake, grown_snake, avg_cell_diameter, params): 20 | self.gt_snake = gt_snake 21 | self.grown_snake = grown_snake 22 | self.avg_cell_diameter = avg_cell_diameter 23 | self.initial_parameters = params 24 | self.fitness = PFSnake.fitness_with_gt(grown_snake, gt_snake) 25 | self.rank_vector = grown_snake.properties_vector(avg_cell_diameter) 26 | self.polar_transform = PolarTransform.instance(params["segmentation"]["avgCellDiameter"], 27 | params["segmentation"]["stars"]["points"], 28 | params["segmentation"]["stars"]["step"], 29 | params["segmentation"]["stars"]["maxSize"]) 30 | 31 | @staticmethod 32 | def create_all(gt_snake, grown_pf_snake, params): 33 | return [(gt_snake, PFRankSnake(gt_snake, snake, grown_pf_snake.avg_cell_diameter, params)) for snake in 34 | grown_pf_snake.snakes] 35 | 36 | def create_mutation(self, dilation, random_poly=False): 37 | if random_poly: 38 | mutant = pf_mutator.create_poly_mutation(self.grown_snake, self.polar_transform, dilation) 39 | else: 40 | mutant = pf_mutator.create_mutation(self.grown_snake, self.polar_transform, dilation) 41 | return PFRankSnake(self.gt_snake, mutant, self.avg_cell_diameter, self.initial_parameters) 42 | 43 | @staticmethod 44 | def merge_rank_parameters(initial_parameters, new_params): 45 | params = copy.deepcopy(initial_parameters) 46 | for k, v in new_params.iteritems(): 47 | params["segmentation"]["ranking"][k] = v 48 | 49 | return params 50 | 51 | def merge_parameters_with_me(self, new_params): 52 | return PFRankSnake.merge_rank_parameters(self.initial_parameters, new_params) 53 | 54 | def calculate_ranking(self, ranking_params): 55 | return self.grown_snake.star_rank(ranking_params, self.avg_cell_diameter) 56 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/parameter_fitting/pf_runner.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Entry point for running fitting process both parameter sets: contour and ranking. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | import logging 8 | 9 | import sys 10 | import numpy as np 11 | import scipy as sp 12 | 13 | import cellstar.parameter_fitting.pf_process as pf_process 14 | import cellstar.parameter_fitting.pf_rank_process as pf_rank 15 | from cellstar.parameter_fitting.pf_snake import GTSnake 16 | 17 | try: 18 | from cellprofiler.preferences import get_max_workers 19 | except: 20 | get_max_workers = lambda: 1 21 | 22 | logger = logging.getLogger(__name__) 23 | 24 | 25 | def single_mask_to_snake(bool_mask, seed=None): 26 | return GTSnake(bool_mask, seed) 27 | 28 | 29 | def gt_label_to_snakes(components): 30 | num_components = components.max() 31 | return [single_mask_to_snake(components == label) for label in range(1, num_components + 1)] 32 | 33 | 34 | def image_to_label(image): 35 | values = np.unique(image) 36 | if len(values) == 2: # it is a mask 37 | components, num_components = sp.ndimage.label(image, np.ones((3, 3))) 38 | return components 39 | else: # remap labels to [1..] values 40 | curr = 1 41 | label_image = image.copy() 42 | for v in values[1:]: # zero is ignored 43 | label_image[image == v] = curr 44 | curr += 1 45 | return label_image 46 | 47 | 48 | def run_pf(input_image, background_image, ignore_mask_image, gt_label, parameters, precision, avg_cell_diameter, 49 | callback_progress=None): 50 | """ 51 | :param input_image: 52 | :param gt_label: 53 | :param parameters: 54 | :return: Best complete parameters settings, best distance 55 | """ 56 | 57 | gt_mask = image_to_label(gt_label) 58 | pf_process.callback_progress = callback_progress 59 | 60 | gt_snakes = gt_label_to_snakes(gt_mask) 61 | if get_max_workers() > 1: 62 | best_complete_params, _, best_score = pf_process.run(input_image, gt_snakes, precision=precision, 63 | avg_cell_diameter=avg_cell_diameter, initial_params=parameters, 64 | method='mp', background_image=background_image, 65 | ignore_mask=ignore_mask_image) 66 | else: 67 | best_complete_params, _, best_score = pf_process.run(input_image, gt_snakes, precision=precision, 68 | avg_cell_diameter=avg_cell_diameter, initial_params=parameters, 69 | method='brutemaxbasin', background_image=background_image, 70 | ignore_mask=ignore_mask_image) 71 | 72 | return best_complete_params, best_score 73 | 74 | 75 | def run_rank_pf(input_image, background_image, ignore_mask_image, gt_mask, parameters, callback_progress=None): 76 | """ 77 | :return: Best complete parameters settings, best distance 78 | """ 79 | 80 | gt_mask = image_to_label(gt_mask) 81 | pf_rank.callback_progress = callback_progress 82 | 83 | gt_snakes = gt_label_to_snakes(gt_mask) 84 | if get_max_workers() > 1 and not (getattr(sys, "frozen", False) and sys.platform == 'win32'): 85 | # multiprocessing do not work if frozen on win32 86 | best_complete_params, _, best_score = pf_rank.run_multiprocess(input_image, gt_snakes, 87 | initial_params=parameters, 88 | method='brutemaxbasin', 89 | background_image=background_image, 90 | ignore_mask=ignore_mask_image) 91 | else: 92 | best_complete_params, _, best_score = pf_rank.run_singleprocess(input_image, gt_snakes, 93 | initial_params=parameters, 94 | method='brutemaxbasin', 95 | background_image=background_image, 96 | ignore_mask=ignore_mask_image) 97 | 98 | return best_complete_params, best_score -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/utils/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Utilities package contain a number of general functions used in Cell Star segmentation process. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | __all__ = ["calc_util", "cluster_util", "image_util", "params_util", "python_util"] 8 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/utils/index.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Index is a convenient structure for processing calculate all angle x radius point in one numpy operation. 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import numpy as np 9 | 10 | 11 | class Index(object): 12 | @classmethod 13 | def create(cls, px, py): 14 | return np.column_stack((py.flat, px.flat)).astype(np.int64) 15 | 16 | @staticmethod 17 | def to_numpy(index): 18 | if len(index.shape) == 2: 19 | return index[:, 0], index[:, 1] 20 | elif len(index.shape) == 3: 21 | return index[:, :, 0], index[:, :, 1] 22 | else: 23 | return index 24 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/cellstar/utils/params_util.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Params util module contains methods for manipulating parameters and precision to parameters mapping 4 | Date: 2013-2016 5 | Website: http://cellstar-algorithm.org/ 6 | """ 7 | 8 | import numpy as np 9 | 10 | from cellstar.core.config import default_config 11 | 12 | 13 | def default_parameters(segmentation_precision=-1, avg_cell_diameter=-1): 14 | parameters = default_config() 15 | if avg_cell_diameter != -1: 16 | parameters["segmentation"]["avgCellDiameter"] = avg_cell_diameter 17 | 18 | if segmentation_precision is None: 19 | return parameters 20 | else: 21 | return parameters_from_segmentation_precision(parameters, segmentation_precision) 22 | 23 | 24 | def create_size_weights(size_weight_average, length): 25 | if length == 1: 26 | size_weight_multiplier = np.array([1]) 27 | elif length == 2: 28 | size_weight_multiplier = np.array([0.8, 1.25]) 29 | elif length == 3: 30 | size_weight_multiplier = np.array([0.6, 1, 1.6]) 31 | elif length == 4: 32 | size_weight_multiplier = np.array([0.5, 0.8, 1.3, 2]) 33 | elif length == 5: 34 | size_weight_multiplier = np.array([0.5, 0.8, 1, 1.3, 2]) 35 | elif length == 6: 36 | size_weight_multiplier = np.array([0.35, 0.5, 0.8, 1.3, 2, 3]) 37 | else: 38 | size_weight_multiplier = np.array([0.25, 0.35, 0.5, 0.8, 1.3, 2, 3, 5, 8]) 39 | 40 | return size_weight_average * size_weight_multiplier / np.average(size_weight_multiplier) 41 | 42 | 43 | def parameters_from_segmentation_precision(parameters, segmentation_precision): 44 | sfrom = lambda x: max(0, segmentation_precision - x) 45 | segmentation_precision = min(20, segmentation_precision) 46 | if segmentation_precision <= 0: 47 | parameters["segmentation"]["steps"] = 0 48 | elif segmentation_precision <= 6: 49 | parameters["segmentation"]["steps"] = 1 50 | else: 51 | parameters["segmentation"]["steps"] = min(10, segmentation_precision - 5) 52 | 53 | parameters["segmentation"]["stars"]["points"] = 8 + max(segmentation_precision - 2, 0) * 4 54 | 55 | parameters["segmentation"]["maxFreeBorder"] = \ 56 | max(0.4, 0.7 * 16 / max(16, parameters["segmentation"]["stars"]["points"])) 57 | 58 | parameters["segmentation"]["seeding"]["from"]["cellBorder"] = int(segmentation_precision >= 2) 59 | parameters["segmentation"]["seeding"]["from"]["cellBorderRandom"] = sfrom(14) 60 | parameters["segmentation"]["seeding"]["from"]["cellContent"] = int(segmentation_precision >= 11) 61 | parameters["segmentation"]["seeding"]["from"]["cellContentRandom"] = min(4, sfrom(12)) 62 | parameters["segmentation"]["seeding"]["from"]["cellBorderRemovingCurrSegments"] = \ 63 | int(segmentation_precision >= 11) 64 | parameters["segmentation"]["seeding"]["from"]["cellBorderRemovingCurrSegmentsRandom"] = max(0, min(4, sfrom(16))) 65 | parameters["segmentation"]["seeding"]["from"]["cellContentRemovingCurrSegments"] = \ 66 | int(segmentation_precision >= 7) 67 | parameters["segmentation"]["seeding"]["from"]["cellContentRemovingCurrSegmentsRandom"] = max(0, min(4, sfrom(12))) 68 | parameters["segmentation"]["seeding"]["from"]["snakesCentroids"] = int(segmentation_precision >= 9) 69 | parameters["segmentation"]["seeding"]["from"]["snakesCentroidsRandom"] = max(0, min(4, sfrom(14))) 70 | 71 | parameters["segmentation"]["stars"]["step"] = 0.0067 * max(1, (1 + (15 - segmentation_precision) / 2.0)) 72 | 73 | if segmentation_precision <= 9: 74 | weight_length = 1 75 | elif segmentation_precision <= 11: 76 | weight_length = 2 77 | elif segmentation_precision <= 13: 78 | weight_length = 3 79 | elif segmentation_precision <= 15: 80 | weight_length = 4 81 | elif segmentation_precision <= 17: 82 | weight_length = 6 83 | else: 84 | weight_length = 9 85 | 86 | parameters["segmentation"]["stars"]["sizeWeight"] = list( 87 | create_size_weights(np.average(parameters["segmentation"]["stars"]["sizeWeight"]), weight_length) 88 | ) 89 | 90 | parameters["segmentation"]["foreground"]["pickyDetection"] = segmentation_precision > 8 91 | 92 | return parameters 93 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/convertoutlinestoobjects.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | ConvertOutlinesToObjects 5 | ===================== 6 | 7 | **ConvertOutlinesToObjects** converts a binary image of outlines to objects. Contiguous outlined regions are converted 8 | to unique objects. 9 | 10 | | 11 | 12 | ============ ============ =============== 13 | Supports 2D? Supports 3D? Respects masks? 14 | ============ ============ =============== 15 | YES YES NO 16 | ============ ============ =============== 17 | """ 18 | 19 | import numpy 20 | import scipy.ndimage 21 | import skimage 22 | import skimage.measure 23 | 24 | import cellprofiler.module 25 | import cellprofiler.setting 26 | 27 | 28 | class ConvertOutlinesToObjects(cellprofiler.module.ImageSegmentation): 29 | category = "Advanced" 30 | 31 | module_name = "ConvertOutlinesToObjects" 32 | 33 | variable_revision_number = 1 34 | 35 | def create_settings(self): 36 | super(ConvertOutlinesToObjects, self).create_settings() 37 | 38 | self.diameter = cellprofiler.setting.FloatRange( 39 | text="Typical diameter of objects", 40 | value=(0.0, numpy.inf), 41 | doc="Typical diameter of objects, in pixels (min, max). Objects outside this range will be discarded." 42 | ) 43 | 44 | def settings(self): 45 | settings = super(ConvertOutlinesToObjects, self).settings() 46 | 47 | settings += [ 48 | self.diameter 49 | ] 50 | 51 | return settings 52 | 53 | def visible_settings(self): 54 | visible_settings = super(ConvertOutlinesToObjects, self).visible_settings() 55 | 56 | visible_settings += [ 57 | self.diameter 58 | ] 59 | 60 | return visible_settings 61 | 62 | def run(self, workspace): 63 | self.function = convert_outlines_to_objects 64 | 65 | super(ConvertOutlinesToObjects, self).run(workspace) 66 | 67 | 68 | def convert_outlines_to_objects(outlines, diameter): 69 | labels = skimage.measure.label( 70 | outlines > 0, 71 | background=True, 72 | connectivity=1 73 | ) 74 | 75 | indexes = numpy.unique(labels) 76 | 77 | radius = numpy.divide(diameter, 2.0) 78 | 79 | if labels.ndim == 2: 80 | factor = radius ** 2 81 | else: 82 | factor = (4.0 / 3.0) * (radius ** 3) 83 | 84 | min_area, max_area = numpy.pi * factor 85 | 86 | areas = scipy.ndimage.sum( 87 | numpy.ones_like(labels), 88 | labels, 89 | index=indexes 90 | ) 91 | 92 | is_background = numpy.logical_or( 93 | areas < min_area, 94 | areas > max_area 95 | ) 96 | 97 | background_indexes = numpy.unique(labels)[is_background] 98 | 99 | labels[numpy.isin(labels, background_indexes)] = 0 100 | 101 | return labels 102 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/edgedetection.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Detect edges in an image or volume using the Sobel transform. 5 | Multi-channel images are converted to grayscale before the transform is 6 | applied. An edge is a region in which intensity changes dramatically. 7 | For example, an edge is the line between a dark background and a bright 8 | foreground. 9 | """ 10 | 11 | import cellprofiler.image 12 | import cellprofiler.module 13 | import cellprofiler.setting 14 | import numpy 15 | import skimage.color 16 | import skimage.filters 17 | 18 | 19 | class EdgeDetection(cellprofiler.module.ImageProcessing): 20 | category = "Feature Detection" 21 | 22 | module_name = "EdgeDetection" 23 | 24 | variable_revision_number = 1 25 | 26 | def create_settings(self): 27 | super(EdgeDetection, self).create_settings() 28 | 29 | self.mask = cellprofiler.setting.ImageNameSubscriber( 30 | u"Mask", 31 | can_be_blank=True, 32 | doc=""" 33 | Optional. A binary image the same shape as "Input". Limit application of the edge filter to unmasked 34 | regions of "Input". 35 | """ 36 | ) 37 | 38 | def settings(self): 39 | __settings__ = super(EdgeDetection, self).settings() 40 | 41 | return __settings__ + [ 42 | self.mask 43 | ] 44 | 45 | def visible_settings(self): 46 | __settings__ = super(EdgeDetection, self).visible_settings() 47 | 48 | return __settings__ + [ 49 | self.mask 50 | ] 51 | 52 | def run(self, workspace): 53 | x_name = self.x_name.value 54 | 55 | images = workspace.image_set 56 | 57 | x = images.get_image(x_name) 58 | 59 | x_data = x.pixel_data 60 | 61 | if x.multichannel: 62 | x_data = skimage.color.rgb2gray(x_data) 63 | 64 | mask_data = None 65 | 66 | if not self.mask.is_blank: 67 | mask_name = self.mask.value 68 | 69 | mask = images.get_image(mask_name) 70 | 71 | mask_data = mask.pixel_data 72 | 73 | dimensions = x.dimensions 74 | 75 | if dimensions == 2: 76 | y_data = skimage.filters.sobel(x_data, mask=mask_data) 77 | else: 78 | y_data = numpy.zeros_like(x_data) 79 | 80 | for plane, image in enumerate(x_data): 81 | plane_mask = None if mask_data is None else mask_data[plane] 82 | 83 | y_data[plane] = skimage.filters.sobel(image, mask=plane_mask) 84 | 85 | y = cellprofiler.image.Image( 86 | image=y_data, 87 | parent_image=x, 88 | dimensions=dimensions 89 | ) 90 | 91 | y_name = self.y_name.value 92 | 93 | images.add(y_name, y) 94 | 95 | if self.show_window: 96 | workspace.display_data.x_data = x_data 97 | 98 | workspace.display_data.y_data = y_data 99 | 100 | workspace.display_data.dimensions = dimensions 101 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/gammacorrection.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Gamma correction is a non-linear operation used to encode and decode luminance 5 | values in images. 6 | """ 7 | 8 | import cellprofiler.module 9 | import cellprofiler.setting 10 | import skimage.exposure 11 | 12 | 13 | class GammaCorrection(cellprofiler.module.ImageProcessing): 14 | module_name = "GammaCorrection" 15 | 16 | variable_revision_number = 1 17 | 18 | def create_settings(self): 19 | super(GammaCorrection, self).create_settings() 20 | 21 | self.gamma = cellprofiler.setting.Float( 22 | doc=""" 23 | A gamma value < 1 is an encoding gamma, and the process of 24 | encoding with this compressive power-law non-linearity, gamma 25 | compression, darkens images; conversely a gamma value > 1 is a 26 | decoding gamma and the application of the expansive power-law 27 | non-linearity, gamma expansion, brightens images. 28 | """, 29 | maxval=100.0, 30 | minval=0.0, 31 | text="Gamma", 32 | value=1.0 33 | ) 34 | 35 | def settings(self): 36 | __settings__ = super(GammaCorrection, self).settings() 37 | 38 | return __settings__ + [ 39 | self.gamma 40 | ] 41 | 42 | def visible_settings(self): 43 | __settings__ = super(GammaCorrection, self).visible_settings() 44 | 45 | return __settings__ + [ 46 | self.gamma 47 | ] 48 | 49 | def run(self, workspace): 50 | self.function = skimage.exposure.adjust_gamma 51 | 52 | super(GammaCorrection, self).run(workspace) 53 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/histogramequalization.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Increase the global contrast of a low-contrast image or volume. A low 5 | contrast image has a background and foreground that are both dark, or 6 | both light. Histogram equalization redistributes intensities such that 7 | the most common frequencies are more distinct, increasing contrast. 8 | """ 9 | 10 | import cellprofiler.image 11 | import cellprofiler.module 12 | import cellprofiler.setting 13 | import numpy 14 | import skimage.exposure 15 | 16 | 17 | class HistogramEqualization(cellprofiler.module.ImageProcessing): 18 | module_name = "HistogramEqualization" 19 | 20 | variable_revision_number = 1 21 | 22 | def create_settings(self): 23 | super(HistogramEqualization, self).create_settings() 24 | 25 | self.nbins = cellprofiler.setting.Integer( 26 | u"Bins", 27 | value=256, 28 | minval=0, 29 | doc="Number of bins for image histogram." 30 | ) 31 | 32 | self.kernel_size = cellprofiler.setting.Integer( 33 | u"Kernel Size", 34 | value=256, 35 | minval=1, 36 | doc="""The image is partitioned into tiles with dimensions specified by the kernel size. Choose a kernel 37 | size that will fit at least one object of interest. 38 | """ 39 | ) 40 | 41 | self.mask = cellprofiler.setting.ImageNameSubscriber( 42 | u"Mask", 43 | can_be_blank=True, 44 | doc=""" 45 | Optional. Mask image must be the same size as "Input". Only unmasked points of the "Input" image are used 46 | to compute the equalization, which is applied to the entire "Input" image. 47 | """ 48 | ) 49 | 50 | self.local = cellprofiler.setting.Binary( 51 | u"Local", 52 | False 53 | ) 54 | 55 | def settings(self): 56 | __settings__ = super(HistogramEqualization, self).settings() 57 | 58 | return __settings__ + [ 59 | self.nbins, 60 | self.mask, 61 | self.local, 62 | self.kernel_size 63 | ] 64 | 65 | def visible_settings(self): 66 | __settings__ = super(HistogramEqualization, self).settings() 67 | 68 | __settings__ += [self.local, self.nbins] 69 | 70 | if not self.local.value: 71 | __settings__ += [self.mask] 72 | else: 73 | __settings__ += [self.kernel_size] 74 | 75 | return __settings__ 76 | 77 | def run(self, workspace): 78 | x_name = self.x_name.value 79 | 80 | y_name = self.y_name.value 81 | 82 | images = workspace.image_set 83 | 84 | x = images.get_image(x_name) 85 | 86 | dimensions = x.dimensions 87 | 88 | x_data = x.pixel_data 89 | 90 | mask_data = None 91 | 92 | if not self.mask.is_blank: 93 | mask_name = self.mask.value 94 | 95 | mask = images.get_image(mask_name) 96 | 97 | mask_data = mask.pixel_data 98 | 99 | nbins = self.nbins.value 100 | 101 | if self.local.value: 102 | 103 | kernel_size = self.kernel_size.value 104 | 105 | if x.volumetric: 106 | y_data = numpy.zeros_like(x_data, dtype=numpy.float) 107 | 108 | for index, plane in enumerate(x_data): 109 | y_data[index] = skimage.exposure.equalize_adapthist(plane, kernel_size=kernel_size, nbins=nbins) 110 | else: 111 | y_data = skimage.exposure.equalize_adapthist(x_data, kernel_size=kernel_size, nbins=nbins) 112 | else: 113 | y_data = skimage.exposure.equalize_hist(x_data, nbins=nbins, mask=mask_data) 114 | 115 | y = cellprofiler.image.Image( 116 | dimensions=dimensions, 117 | image=y_data, 118 | parent_image=x 119 | ) 120 | 121 | images.add(y_name, y) 122 | 123 | if self.show_window: 124 | workspace.display_data.x_data = x_data 125 | 126 | workspace.display_data.y_data = y_data 127 | 128 | workspace.display_data.dimensions = dimensions 129 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/imagegradient.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | **Image gradient** computes the local gradient of the image. The image 5 | gradient is useful for finding boundaries of objects. In a gradient 6 | image, pixels at the edges of bright regions of interest have the 7 | brightest intensities. Pixels in the background or in the centers of 8 | regions of interest have zero or dimmer intensity. 9 | """ 10 | 11 | import cellprofiler.image 12 | import cellprofiler.module 13 | import cellprofiler.setting 14 | import numpy 15 | import skimage 16 | import skimage.filters 17 | import skimage.morphology 18 | 19 | 20 | class ImageGradient(cellprofiler.module.ImageProcessing): 21 | module_name = "ImageGradient" 22 | 23 | variable_revision_number = 1 24 | 25 | def create_settings(self): 26 | super(ImageGradient, self).create_settings() 27 | 28 | self.structuring_element = cellprofiler.setting.StructuringElement( 29 | doc="""Neighborhood in which to compute the local gradient. Select a two-dimensional shape such as "disk" 30 | for images, and a three-dimensional shape such as "ball" for volumes. A larger size will compute the gradient 31 | over larger patches of the image and can obscure smaller features.""" 32 | ) 33 | 34 | def settings(self): 35 | __settings__ = super(ImageGradient, self).settings() 36 | 37 | return __settings__ + [ 38 | self.structuring_element 39 | ] 40 | 41 | def visible_settings(self): 42 | __settings__ = super(ImageGradient, self).visible_settings() 43 | 44 | return __settings__ + [ 45 | self.structuring_element 46 | ] 47 | 48 | def run(self, workspace): 49 | x_name = self.x_name.value 50 | 51 | y_name = self.y_name.value 52 | 53 | images = workspace.image_set 54 | 55 | x = images.get_image(x_name) 56 | 57 | x_data = x.pixel_data 58 | 59 | x_data = skimage.img_as_uint(x_data) 60 | 61 | if x.dimensions == 3 or x.multichannel: 62 | y_data = numpy.zeros_like(x_data) 63 | 64 | for z, image in enumerate(x_data): 65 | y_data[z] = skimage.filters.rank.gradient(image, self.__structuring_element()) 66 | else: 67 | y_data = skimage.filters.rank.gradient(x_data, self.structuring_element.value) 68 | 69 | y = cellprofiler.image.Image( 70 | image=y_data, 71 | dimensions=x.dimensions, 72 | parent_image=x, 73 | ) 74 | 75 | images.add(y_name, y) 76 | 77 | if self.show_window: 78 | workspace.display_data.x_data = x_data 79 | 80 | workspace.display_data.y_data = y_data 81 | 82 | workspace.display_data.dimensions = x.dimensions 83 | 84 | def __structuring_element(self): 85 | shape = self.structuring_element.shape 86 | 87 | size = self.structuring_element.size 88 | 89 | if shape == "ball": 90 | return skimage.morphology.disk(size) 91 | 92 | if shape == "cube": 93 | return skimage.morphology.square(size) 94 | 95 | if shape == "octahedron": 96 | return skimage.morphology.diamond(size) 97 | 98 | return self.structuring_element.value 99 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/laplacianofgaussian.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Laplacian of Gaussian filter. 5 | """ 6 | 7 | import cellprofiler.image 8 | import cellprofiler.module 9 | import cellprofiler.setting 10 | import scipy.ndimage.filters 11 | import skimage.color 12 | 13 | 14 | class LaplacianOfGaussian(cellprofiler.module.ImageProcessing): 15 | module_name = "LaplacianOfGaussian" 16 | 17 | variable_revision_number = 1 18 | 19 | def create_settings(self): 20 | super(LaplacianOfGaussian, self).create_settings() 21 | 22 | self.x = cellprofiler.setting.Float( 23 | "Sigma x", 24 | value=1.0, 25 | minval=0.0, 26 | doc="Sigma for x axis." 27 | ) 28 | 29 | self.y = cellprofiler.setting.Float( 30 | "Sigma y", 31 | value=1.0, 32 | minval=0.0, 33 | doc="Sigma for y axis." 34 | ) 35 | 36 | self.z = cellprofiler.setting.Float( 37 | "Sigma z", 38 | value=1.0, 39 | minval=0.0, 40 | doc="Sigma for z axis. Ignored when input is a two-dimensional image." 41 | ) 42 | 43 | def settings(self): 44 | __settings__ = super(LaplacianOfGaussian, self).settings() 45 | 46 | return __settings__ + [ 47 | self.x, 48 | self.y, 49 | self.z 50 | ] 51 | 52 | def visible_settings(self): 53 | __settings__ = super(LaplacianOfGaussian, self).visible_settings() 54 | 55 | return __settings__ + [ 56 | self.x, 57 | self.y, 58 | self.z 59 | ] 60 | 61 | def run(self, workspace): 62 | x_name = self.x_name.value 63 | 64 | y_name = self.y_name.value 65 | 66 | images = workspace.image_set 67 | 68 | x = images.get_image(x_name) 69 | 70 | x_data = x.pixel_data 71 | 72 | if x.multichannel: 73 | x_data = skimage.color.rgb2gray(x_data) 74 | 75 | x_data = skimage.img_as_float(x_data) 76 | 77 | dimensions = x.dimensions 78 | 79 | if dimensions == 2: 80 | sigma = (self.x.value, self.y.value) 81 | else: 82 | sigma = (self.z.value, self.x.value, self.y.value) 83 | 84 | y_data = scipy.ndimage.filters.gaussian_laplace(x_data, sigma) 85 | 86 | y = cellprofiler.image.Image( 87 | dimensions=dimensions, 88 | image=y_data, 89 | parent_image=x 90 | ) 91 | 92 | images.add(y_name, y) 93 | 94 | if self.show_window: 95 | workspace.display_data.x_data = x_data 96 | 97 | workspace.display_data.y_data = y_data 98 | 99 | workspace.display_data.dimensions = dimensions 100 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/measureimagefocus.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import logging 3 | 4 | import cellprofiler.measurement 5 | import cellprofiler.module 6 | import cellprofiler.preferences 7 | import cellprofiler.setting 8 | import microscopeimagequality.miq 9 | import microscopeimagequality.prediction 10 | import matplotlib.cm 11 | import matplotlib.pyplot 12 | import matplotlib.patches 13 | 14 | __doc__ = """ 15 | For installation instructions and platform support notes, please see the `wiki `_. 16 | 17 | This module can collect measurements indicating possible image aberrations, 18 | e.g. blur (poor focus), intensity, saturation (i.e., the percentage 19 | of pixels in the image that are minimal and maximal). 20 | It outputs an image focus score, an integer from 0 (in focus) to 10 (out of focus). 21 | There is also a certainty output indicating how certain the score is. 22 | """ 23 | 24 | C_IMAGE_FOCUS = "ImageFocus" 25 | F_SCORE = "Score" 26 | F_CERTAINTY = "Certainty" 27 | 28 | class MeasureImageFocus(cellprofiler.module.Module): 29 | category = "Measurement" 30 | 31 | module_name = "MeasureImageFocus" 32 | 33 | variable_revision_number = 1 34 | 35 | def create_settings(self): 36 | self.image_name = cellprofiler.setting.ImageNameSubscriber( 37 | "Image", 38 | doc=""" 39 | The name of an image. 40 | """ 41 | ) 42 | 43 | def settings(self): 44 | return [ 45 | self.image_name 46 | ] 47 | 48 | def display(self, workspace, figure): 49 | 50 | figure.set_subplots((2, 1)) 51 | 52 | patches= workspace.display_data.patches 53 | 54 | figure.subplot_table(0, 0, workspace.display_data.statistics) 55 | image = workspace.display_data.image 56 | 57 | ax = figure.subplot_imshow_grayscale(1, 0, image, 58 | title="Focus Score" 59 | ) 60 | # show patches 61 | cmap = matplotlib.cm.jet 62 | for patch in patches: 63 | rect = matplotlib.patches.Rectangle(xy=(patch[1], patch[0]), width=patch[3], height=patch[2]) 64 | rect.set_color(cmap(int(float(patch[4][0]) * 255 / 10))) 65 | rect.set_alpha(float(patch[4][1]['aggregate']) * 0.9) 66 | rect.set_linewidth(0) 67 | rect.set_fill(True) 68 | ax.add_patch(rect) 69 | 70 | # colorbar 71 | sm = matplotlib.pyplot.cm.ScalarMappable(cmap=cmap, norm=matplotlib.pyplot.Normalize(vmin=0, vmax=10)) 72 | sm.set_array([]) 73 | cbar = matplotlib.pyplot.colorbar(sm, ax=ax, ticks=[0, 10], shrink=.6) 74 | cbar.ax.set_yticklabels(['Focused', 'Unfocused']) 75 | 76 | def get_categories(self, pipeline, object_name): 77 | if object_name == cellprofiler.measurement.IMAGE: 78 | return [ 79 | C_IMAGE_FOCUS 80 | ] 81 | 82 | return [] 83 | 84 | def get_feature_name(self, name): 85 | image = self.image_name.value 86 | 87 | return C_IMAGE_FOCUS + "_{}_{}".format(name, image) 88 | 89 | def get_measurements(self, pipeline, object_name, category): 90 | name = self.image_name.value 91 | 92 | if object_name == cellprofiler.measurement.IMAGE and category == C_IMAGE_FOCUS: 93 | return [ 94 | F_SCORE + "_{}".format(name), 95 | F_CERTAINTY + "_{}".format(name) 96 | ] 97 | 98 | return [] 99 | 100 | def get_measurement_columns(self, pipeline): 101 | image = cellprofiler.measurement.IMAGE 102 | 103 | features = [ 104 | self.get_feature_name(F_SCORE), 105 | self.get_feature_name(F_CERTAINTY) 106 | ] 107 | 108 | column_type = cellprofiler.measurement.COLTYPE_FLOAT 109 | 110 | return [(image, feature, column_type) for feature in features] 111 | 112 | def get_measurement_images(self, pipeline, object_name, category, measurement): 113 | if measurement in self.get_measurements(pipeline, object_name, category): 114 | return [self.image_name.value] 115 | 116 | return [] 117 | 118 | def run(self, workspace): 119 | default_weights_index_file = microscopeimagequality.miq.DEFAULT_MODEL_PATH + '.index' 120 | if not os.path.exists(default_weights_index_file): 121 | logging.warning('weights index file not found at {}'.format(default_weights_index_file)) 122 | microscopeimagequality.miq.download_model() 123 | 124 | m = microscopeimagequality.prediction.ImageQualityClassifier(microscopeimagequality.miq.DEFAULT_MODEL_PATH, 84, 125 | 11) 126 | 127 | image_set = workspace.image_set 128 | image = image_set.get_image(self.image_name.value, must_be_grayscale=True) 129 | 130 | data = image.pixel_data 131 | 132 | measurements = workspace.measurements 133 | 134 | statistics = [] 135 | 136 | pred = m.predict(data) 137 | patches = m.get_patch_predictions(data) 138 | 139 | feature_score = self.get_feature_name(F_SCORE) 140 | score = str(pred[0]) 141 | feature_certainty = self.get_feature_name(F_CERTAINTY) 142 | certainty = str(pred[1]['aggregate']) 143 | 144 | statistics.append([feature_score, score]) 145 | statistics.append([feature_certainty, certainty]) 146 | 147 | measurements.add_image_measurement(feature_score, score) 148 | measurements.add_image_measurement(feature_certainty, certainty) 149 | 150 | # if self.show_window: 151 | workspace.display_data.statistics = statistics 152 | workspace.display_data.patches = patches 153 | workspace.display_data.image= data 154 | 155 | def volumetric(self): 156 | return False 157 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/nucleaizer.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | NucleAIzer 5 | ========== 6 | 7 | **NucleAIzer** identifies nuclei. 8 | 9 | Instructions: 10 | 11 | Warning: For correct usage, this module requires some experience with 12 | Python and Python dependencies! 13 | 14 | In addition to copying this plugin to your plugins directory, you'll 15 | need to clone the following Git repository and follow the 16 | `Prerequisites` instructions in the README: 17 | 18 | https://github.com/spreka/biomagdsb 19 | 20 | This includes installing a specific commit of Matterport's Mask R-CNN 21 | repository. This plugin _will not_ work with the latest commit! The 22 | model will not load if the Mask R-CNN modules are not available on your 23 | Python path since they use custom Keras layers! 24 | 25 | You'll also need to make sure you're running versions of Keras, NumPy, 26 | SciPy, and TensorFlow that work with `biomagdsb`, `Mask-RCNN`, 27 | and `CellProfiler`. I had success with the following versions: 28 | 29 | numpy==1.15.4 30 | scipy==1.1.0 31 | tensorflow==1.15.0 32 | 33 | Finally, you'll need to download the model configuration and weights: 34 | 35 | https://drive.google.com/drive/folders/1lbJ_LanxSO-n5rMjmhWAHtLcE9znHyJO?usp=sharing 36 | | 37 | 38 | ============ ============ =============== 39 | Supports 2D? Supports 3D? Respects masks? 40 | ============ ============ =============== 41 | YES NO YES 42 | ============ ============ =============== 43 | """ 44 | 45 | import os.path 46 | 47 | import numpy 48 | import skimage.measure 49 | import skimage.transform 50 | import tensorflow 51 | 52 | import cellprofiler.image 53 | import cellprofiler.module 54 | import cellprofiler.object 55 | import cellprofiler.setting 56 | 57 | 58 | class IdentifyNucleus(cellprofiler.module.ImageSegmentation): 59 | category = "Advanced" 60 | 61 | module_name = "IdentifyNucleus" 62 | 63 | variable_revision_number = 1 64 | 65 | def create_settings(self): 66 | super(IdentifyNucleus, self).create_settings() 67 | 68 | self.mask_name = cellprofiler.setting.ImageNameSubscriber( 69 | "Mask", 70 | can_be_blank=True, 71 | doc="" 72 | ) 73 | 74 | self.model_pathname = cellprofiler.setting.Pathname( 75 | "Model", 76 | doc="" 77 | ) 78 | 79 | self.weights_pathname = cellprofiler.setting.Pathname( 80 | "Weights", 81 | doc="" 82 | ) 83 | 84 | def settings(self): 85 | __settings__ = super(IdentifyNucleus, self).settings() 86 | 87 | return __settings__ + [ 88 | self.mask_name, 89 | self.model_pathname, 90 | self.weights_pathname 91 | ] 92 | 93 | def visible_settings(self): 94 | __settings__ = super(IdentifyNucleus, self).settings() 95 | 96 | __settings__ = __settings__ + [ 97 | self.mask_name, 98 | self.model_pathname, 99 | self.weights_pathname 100 | ] 101 | 102 | return __settings__ 103 | 104 | def run(self, workspace): 105 | model_pathname = os.path.abspath(self.model_pathname.value) 106 | 107 | model = tensorflow.keras.models.model_from_json(model_pathname) 108 | 109 | weights_pathname = os.path.abspath(self.weights_pathname.value) 110 | 111 | model.load_weights(weights_pathname, by_name=True) 112 | 113 | x_name = self.x_name.value 114 | y_name = self.y_name.value 115 | 116 | images = workspace.image_set 117 | 118 | x = images.get_image(x_name) 119 | 120 | dimensions = x.dimensions 121 | 122 | x_data = x.pixel_data 123 | 124 | x_data = skimage.transform.resize(x_data, (2048, 2048)) 125 | 126 | x_data = numpy.expand_dims(x_data, axis=0) 127 | 128 | mask_data = None 129 | 130 | if not self.mask_name.is_blank: 131 | mask_name = self.mask_name.value 132 | 133 | mask = images.get_image(mask_name) 134 | 135 | mask_data = mask.pixel_data 136 | 137 | prediction = model.predict(x_data) 138 | 139 | _, _, _, predicted_masks, _, _, _ = prediction 140 | 141 | count = predicted_masks.shape[0] 142 | 143 | for index in range(0, count): 144 | predicted_mask = predicted_masks[index] 145 | 146 | if mask_data: 147 | predicted_mask *= mask_data 148 | 149 | y_data = skimage.measure.label(predicted_mask) 150 | 151 | objects = cellprofiler.object.Objects() 152 | 153 | objects.segmented = y_data 154 | 155 | objects.parent_image = x 156 | 157 | workspace.object_set.add_objects(objects, y_name) 158 | 159 | self.add_measurements(workspace) 160 | 161 | if self.show_window: 162 | workspace.display_data.x_data = x.pixel_data 163 | 164 | workspace.display_data.dimensions = dimensions 165 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/randomwalkeralgorithm.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | 5 | Random walker algorithm 6 | 7 | Single-channel images can be two-or-three-dimensional. 8 | 9 | """ 10 | 11 | import numpy 12 | import skimage.color 13 | import skimage.measure 14 | import skimage.segmentation 15 | 16 | import cellprofiler.module 17 | import cellprofiler.object 18 | import cellprofiler.setting 19 | 20 | 21 | class RandomWalkerAlgorithm(cellprofiler.module.ImageSegmentation): 22 | module_name = "Random walker algorithm" 23 | 24 | variable_revision_number = 1 25 | 26 | def create_settings(self): 27 | super(RandomWalkerAlgorithm, self).create_settings() 28 | 29 | self.first_phase = cellprofiler.setting.Float( 30 | doc="First phase demarcates an image’s first phase.", 31 | text="First phase", 32 | value=0.5 33 | ) 34 | 35 | self.second_phase = cellprofiler.setting.Float( 36 | doc="Second phase demarcates an image’s second phase.", 37 | text="Second phase", 38 | value=0.5 39 | ) 40 | 41 | self.beta = cellprofiler.setting.Float( 42 | doc=""" 43 | Beta is the penalization coefficient for the random walker motion. Increasing the penalization 44 | coefficient increases the difficulty of the diffusion. Likewise, decreasing the penalization coefficient 45 | decreases the difficulty of the diffusion. 46 | """, 47 | text="Beta", 48 | value=130.0 49 | ) 50 | 51 | def settings(self): 52 | __settings__ = super(RandomWalkerAlgorithm, self).settings() 53 | 54 | return __settings__ + [ 55 | self.first_phase, 56 | self.second_phase, 57 | self.beta 58 | ] 59 | 60 | def visible_settings(self): 61 | __settings__ = super(RandomWalkerAlgorithm, self).settings() 62 | 63 | return __settings__ + [ 64 | self.first_phase, 65 | self.second_phase, 66 | self.beta 67 | ] 68 | 69 | def run(self, workspace): 70 | x_name = self.x_name.value 71 | 72 | y_name = self.y_name.value 73 | 74 | images = workspace.image_set 75 | 76 | x = images.get_image(x_name) 77 | 78 | x_data = x.pixel_data 79 | 80 | if x.multichannel: 81 | x_data = skimage.color.rgb2gray(x_data) 82 | 83 | labels_data = numpy.zeros_like(x_data, numpy.uint8) 84 | 85 | labels_data[x_data > self.first_phase.value] = 1 86 | 87 | labels_data[x_data < self.second_phase.value] = 2 88 | 89 | y_data = skimage.segmentation.random_walker( 90 | beta=self.beta.value, 91 | data=x_data, 92 | labels=labels_data, 93 | multichannel=False, 94 | spacing=x.spacing 95 | ) 96 | 97 | y_data = skimage.measure.label(y_data) 98 | 99 | objects = cellprofiler.object.Objects() 100 | 101 | objects.segmented = y_data 102 | 103 | objects.parent_image = x 104 | 105 | workspace.object_set.add_objects(objects, y_name) 106 | 107 | self.add_measurements(workspace) 108 | 109 | if self.show_window: 110 | workspace.display_data.x_data = x_data 111 | 112 | workspace.display_data.y_data = y_data 113 | 114 | workspace.display_data.dimensions = x.dimensions 115 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/tests/conftest.py: -------------------------------------------------------------------------------- 1 | # Something in CellProfiler is importing wx before we can set 2 | # headless mode. Setting headless here efore importing anything 3 | # else from CellProfiler. 4 | import cellprofiler.preferences 5 | 6 | cellprofiler.preferences.set_headless() 7 | 8 | import cellprofiler.image 9 | import cellprofiler.measurement 10 | import cellprofiler.object 11 | import cellprofiler.pipeline 12 | import cellprofiler.workspace 13 | import numpy 14 | import skimage.data 15 | import skimage.color 16 | import skimage.filters 17 | import skimage.measure 18 | import pytest 19 | 20 | 21 | @pytest.fixture( 22 | scope="module", 23 | params=[ 24 | (skimage.data.camera()[0:128, 0:128], 2), 25 | (skimage.data.astronaut()[0:128, 0:128, :], 2), 26 | (numpy.tile(skimage.data.camera()[0:32, 0:32], (2, 1)).reshape(2, 32, 32), 3) 27 | ], 28 | ids=[ 29 | "grayscale_image", 30 | "multichannel_image", 31 | "grayscale_volume" 32 | ] 33 | ) 34 | def image(request): 35 | data, dimensions = request.param 36 | 37 | return cellprofiler.image.Image(image=data, dimensions=dimensions) 38 | 39 | 40 | @pytest.fixture(scope="function") 41 | def image_empty(): 42 | image = cellprofiler.image.Image() 43 | 44 | return image 45 | 46 | 47 | @pytest.fixture(scope="function") 48 | def image_set(image, image_set_list): 49 | image_set = image_set_list.get_image_set(0) 50 | 51 | image_set.add("example", image) 52 | 53 | return image_set 54 | 55 | 56 | @pytest.fixture(scope="function") 57 | def image_set_empty(image_empty, image_set_list): 58 | image_set = image_set_list.get_image_set(0) 59 | image_set.add("example", image_empty) 60 | 61 | return image_set 62 | 63 | 64 | @pytest.fixture(scope="function") 65 | def image_set_list(): 66 | return cellprofiler.image.ImageSetList() 67 | 68 | 69 | @pytest.fixture(scope="function") 70 | def measurements(): 71 | return cellprofiler.measurement.Measurements() 72 | 73 | 74 | @pytest.fixture(scope="function") 75 | def module(request): 76 | instance = getattr(request.module, "instance") 77 | 78 | return instance() 79 | 80 | 81 | @pytest.fixture(scope="function") 82 | def objects(image): 83 | obj = cellprofiler.object.Objects() 84 | obj.parent_image = image 85 | 86 | return obj 87 | 88 | 89 | @pytest.fixture(scope="function") 90 | def objects_empty(): 91 | obj = cellprofiler.object.Objects() 92 | 93 | return obj 94 | 95 | 96 | @pytest.fixture(scope="function") 97 | def object_set(objects): 98 | objects_set = cellprofiler.object.ObjectSet() 99 | objects_set.add_objects(objects, "InputObjects") 100 | 101 | return objects_set 102 | 103 | 104 | @pytest.fixture(scope="function") 105 | def object_set_empty(objects_empty): 106 | objects_set = cellprofiler.object.ObjectSet() 107 | objects_set.add_objects(objects_empty, "InputObjects") 108 | 109 | return objects_set 110 | 111 | 112 | @pytest.fixture(scope="function") 113 | def object_with_data(image): 114 | data = image.pixel_data 115 | 116 | if image.multichannel: 117 | data = skimage.color.rgb2gray(data) 118 | 119 | binary = data > skimage.filters.threshold_li(data) 120 | 121 | labels = skimage.measure.label(binary) 122 | 123 | objects = cellprofiler.object.Objects() 124 | 125 | objects.segmented = labels 126 | objects.parent_image = image 127 | 128 | return objects 129 | 130 | 131 | @pytest.fixture(scope="function") 132 | def object_set_with_data(object_with_data): 133 | objects_set = cellprofiler.object.ObjectSet() 134 | objects_set.add_objects(object_with_data, "InputObjects") 135 | 136 | return objects_set 137 | 138 | 139 | @pytest.fixture(scope="function") 140 | def pipeline(): 141 | return cellprofiler.pipeline.Pipeline() 142 | 143 | 144 | @pytest.fixture(scope="function") 145 | def workspace(pipeline, module, image_set, object_set, measurements, image_set_list): 146 | return cellprofiler.workspace.Workspace(pipeline, module, image_set, object_set, measurements, image_set_list) 147 | 148 | 149 | @pytest.fixture(scope="function") 150 | def workspace_empty(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list): 151 | return cellprofiler.workspace.Workspace(pipeline, module, image_set_empty, object_set_empty, measurements, image_set_list) 152 | 153 | 154 | @pytest.fixture(scope="function") 155 | def workspace_with_data(pipeline, module, image_set, object_set_with_data, measurements, image_set_list): 156 | return cellprofiler.workspace.Workspace(pipeline, module, image_set, object_set_with_data, 157 | measurements, image_set_list) 158 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler3/tophattransform.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Top-hat transform 5 | ================= 6 | 7 | Perform a black or white top-hat transform on grayscale pixel data. 8 | 9 | Top-hat transforms are useful for extracting small elements and details 10 | from images and volumes. 11 | """ 12 | 13 | import skimage.morphology 14 | 15 | import cellprofiler.module 16 | import cellprofiler.setting 17 | 18 | 19 | class TopHatTransform(cellprofiler.module.ImageProcessing): 20 | module_name = "TopHatTransform" 21 | 22 | variable_revision_number = 1 23 | 24 | def create_settings(self): 25 | super(TopHatTransform, self).create_settings() 26 | 27 | self.operation_name = cellprofiler.setting.Choice( 28 | choices=[ 29 | "Black top-hat transform", 30 | "White top-hat transform" 31 | ], 32 | text="Operation", 33 | value="Black top-hat transform", 34 | doc=""" 35 | Select the top-hat transformation: 36 |
    37 |
  • Black top-hat transform: This operation returns the dark spots of the image that are smaller 38 | than the structuring element. Note that dark spots in the original image are bright spots after the 39 | black top hat.
  • 40 |
  • White top-hat transform: This operation returns the bright spots of the image that are 41 | smaller than the structuring element.
  • 42 |
43 | """ 44 | ) 45 | 46 | self.structuring_element = cellprofiler.setting.StructuringElement() 47 | 48 | def settings(self): 49 | __settings__ = super(TopHatTransform, self).settings() 50 | 51 | return __settings__ + [ 52 | self.structuring_element, 53 | self.operation_name 54 | ] 55 | 56 | def visible_settings(self): 57 | __settings__ = super(TopHatTransform, self).visible_settings() 58 | 59 | return __settings__ + [ 60 | self.operation_name, 61 | self.structuring_element 62 | ] 63 | 64 | def run(self, workspace): 65 | self.function = tophat_transform 66 | 67 | super(TopHatTransform, self).run(workspace) 68 | 69 | 70 | def tophat_transform(image, structuring_element, operation): 71 | if operation == "Black top-hat transform": 72 | return skimage.morphology.black_tophat(image, selem=structuring_element) 73 | 74 | return skimage.morphology.white_tophat(image, selem=structuring_element) 75 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/DoGNet.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | 4 | import numpy as np 5 | 6 | import dognet 7 | import torch 8 | from cellprofiler_core.setting.subscriber import ImageSubscriber 9 | from cellprofiler_core.setting.text import ImageName, Pathname 10 | from torch.autograd import Variable 11 | 12 | ################################# 13 | # 14 | # Imports from CellProfiler 15 | # 16 | ################################## 17 | 18 | import cellprofiler_core.image 19 | import cellprofiler_core.module 20 | import cellprofiler_core.setting 21 | 22 | __doc__ = """\ 23 | DoGNet 24 | ====== 25 | 26 | **DoGNet** takes input synapsin1, PSD95, vGlut, and predicts the location of synapses. 27 | 28 | | 29 | 30 | ============ ============ =============== 31 | Supports 2D? Supports 3D? Respects masks? 32 | ============ ============ =============== 33 | YES NO YES 34 | ============ ============ =============== 35 | 36 | 37 | What do I get as output? 38 | ^^^^^^^^^^^^^^^^^^^^^^^^ 39 | 40 | A synapse prediction map. 41 | 42 | 43 | References 44 | ^^^^^^^^^^ 45 | Kulikov V, Guo SM, Stone M, Goodman A, Carpenter A, et al. (2019) 46 | DoGNet: A deep architecture for synapse detection in multiplexed fluorescence images. 47 | PLOS Computational Biology 15(5): e1007012. https://doi.org/10.1371/journal.pcbi.1007012 48 | """ 49 | 50 | class DoGNet(cellprofiler_core.module.Module): 51 | category = "Advanced" 52 | module_name = "DoGNet" 53 | variable_revision_number = 1 54 | 55 | def create_settings(self): 56 | self.synapsin_image = ImageSubscriber( 57 | "Select the synapsin image", "None", doc="""\ 58 | Select the image of the synapsin-1 channel.""") 59 | 60 | self.PSD95_image = ImageSubscriber( 61 | "Select the PSD95 image", "None", doc="""\ 62 | Select the image of the PSD95 channel.""") 63 | 64 | self.vGlut_image = ImageSubscriber( 65 | "Select the vGlut image", "None", doc="""\ 66 | Select the image of the vGlut channel.""") 67 | 68 | self.prediction_image_name = ImageName( 69 | "Output image name", 70 | "SynapsePrediction", 71 | doc="""\ 72 | Enter the name to give the output prediction image created by this module. 73 | """) 74 | self.t7_name = Pathname( 75 | "Trained network location", 76 | doc="Specify the location of the trained network." 77 | ) 78 | 79 | def settings(self): 80 | 81 | settings = [ 82 | self.synapsin_image, 83 | self.PSD95_image, 84 | self.vGlut_image, 85 | self.prediction_image_name, 86 | self.t7_name 87 | ] 88 | 89 | return settings 90 | 91 | def run(self, workspace): 92 | net = dognet.SimpleAnisotropic(3,15,5,learn_amplitude=False) 93 | net.to('cpu') 94 | net.load_state_dict(torch.load(self.t7_name.value)) 95 | 96 | syn_normed=np.expand_dims( 97 | self.normalize( 98 | workspace.image_set.get_image(self.synapsin_image.value, must_be_grayscale=True) 99 | ) 100 | ,0) 101 | psd_normed=np.expand_dims( 102 | self.normalize( 103 | workspace.image_set.get_image(self.PSD95_image.value, must_be_grayscale=True) 104 | ) 105 | ,0) 106 | vglut_normed=np.expand_dims( 107 | self.normalize( 108 | workspace.image_set.get_image(self.vGlut_image.value, must_be_grayscale=True) 109 | ) 110 | ,0) 111 | 112 | data = np.concatenate([syn_normed,psd_normed,vglut_normed]) 113 | print(data.shape) 114 | y = self.inference(net,data) 115 | 116 | output_image = cellprofiler_core.image.Image(y[0,0]) 117 | 118 | workspace.image_set.add(self.prediction_image_name.value, output_image) 119 | 120 | if self.show_window: 121 | workspace.display_data.syn_pixels = workspace.image_set.get_image(self.synapsin_image.value).pixel_data 122 | 123 | workspace.display_data.psd_pixels = workspace.image_set.get_image(self.PSD95_image.value).pixel_data 124 | 125 | workspace.display_data.vglut_pixels = workspace.image_set.get_image(self.vGlut_image.value).pixel_data 126 | 127 | workspace.display_data.output_pixels = y[0,0] 128 | 129 | def display(self, workspace, figure): 130 | dimensions = (2, 2) 131 | 132 | figure.set_subplots(dimensions) 133 | 134 | figure.subplot_imshow_grayscale(0, 0, workspace.display_data.syn_pixels, "Synapsin") 135 | 136 | figure.subplot_imshow_grayscale( 137 | 1, 138 | 0, 139 | workspace.display_data.psd_pixels, 140 | "PSD-95", 141 | sharexy=figure.subplot(0, 0), 142 | ) 143 | 144 | figure.subplot_imshow_grayscale( 145 | 0, 146 | 1, 147 | workspace.display_data.vglut_pixels, 148 | "vGlut", 149 | sharexy=figure.subplot(0, 0), 150 | ) 151 | 152 | figure.subplot_imshow_grayscale( 153 | 1, 154 | 1, 155 | workspace.display_data.output_pixels, 156 | "Synapse prediction", 157 | sharexy=figure.subplot(0, 0), 158 | ) 159 | 160 | def normalize(self, im): 161 | meanx = im.pixel_data.mean() 162 | minx = im.pixel_data.min() 163 | maxx = im.pixel_data.max() 164 | x = np.copy(im.pixel_data.astype(np.float32)) 165 | x = (x - meanx - minx)/(maxx - minx).astype(np.float32) 166 | return x 167 | 168 | def inference(self, net,image,get_intermediate=False): 169 | x = np.expand_dims(image,0) 170 | vx = Variable(torch.from_numpy(x).float()).to('cpu') 171 | 172 | res,inter = net(vx) 173 | if get_intermediate: 174 | return res.data.cpu().numpy(),inter.data.cpu().numpy() 175 | return res.data.cpu().numpy() 176 | 177 | def volumetric(self): 178 | return False 179 | 180 | 181 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/README.md: -------------------------------------------------------------------------------- 1 | Plugins in the CellProfiler4_autoconverted folder were automatically converted from Python2 to Python3 in preparation for compatability with CellProfiler4, but 2 | they have not been tested for functionality. 3 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/convertoutlinestoobjects.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | ConvertOutlinesToObjects 5 | ===================== 6 | 7 | **ConvertOutlinesToObjects** converts a binary image of outlines to objects. Contiguous outlined regions are converted 8 | to unique objects. 9 | 10 | | 11 | 12 | ============ ============ =============== 13 | Supports 2D? Supports 3D? Respects masks? 14 | ============ ============ =============== 15 | YES YES NO 16 | ============ ============ =============== 17 | """ 18 | 19 | import numpy 20 | import scipy.ndimage 21 | import skimage 22 | import skimage.measure 23 | 24 | from cellprofiler_core.module.image_segmentation import ImageSegmentation 25 | from cellprofiler_core.setting.range import FloatRange 26 | 27 | 28 | class ConvertOutlinesToObjects(ImageSegmentation): 29 | category = "Advanced" 30 | 31 | module_name = "ConvertOutlinesToObjects" 32 | 33 | variable_revision_number = 1 34 | 35 | def create_settings(self): 36 | super(ConvertOutlinesToObjects, self).create_settings() 37 | 38 | self.diameter = FloatRange( 39 | text="Typical diameter of objects", 40 | value=(0.0, numpy.inf), 41 | doc="Typical diameter of objects, in pixels (min, max). Objects outside this range will be discarded." 42 | ) 43 | 44 | def settings(self): 45 | settings = super(ConvertOutlinesToObjects, self).settings() 46 | 47 | settings += [ 48 | self.diameter 49 | ] 50 | 51 | return settings 52 | 53 | def visible_settings(self): 54 | visible_settings = super(ConvertOutlinesToObjects, self).visible_settings() 55 | 56 | visible_settings += [ 57 | self.diameter 58 | ] 59 | 60 | return visible_settings 61 | 62 | def run(self, workspace): 63 | self.function = convert_outlines_to_objects 64 | 65 | super(ConvertOutlinesToObjects, self).run(workspace) 66 | 67 | 68 | def convert_outlines_to_objects(outlines, diameter): 69 | labels = skimage.measure.label( 70 | outlines > 0, 71 | background=True, 72 | connectivity=1 73 | ) 74 | 75 | indexes = numpy.unique(labels) 76 | 77 | radius = numpy.divide(diameter, 2.0) 78 | 79 | if labels.ndim == 2: 80 | factor = radius ** 2 81 | else: 82 | factor = (4.0 / 3.0) * (radius ** 3) 83 | 84 | min_area, max_area = numpy.pi * factor 85 | 86 | areas = scipy.ndimage.sum( 87 | numpy.ones_like(labels), 88 | labels, 89 | index=indexes 90 | ) 91 | 92 | is_background = numpy.logical_or( 93 | areas < min_area, 94 | areas > max_area 95 | ) 96 | 97 | background_indexes = numpy.unique(labels)[is_background] 98 | 99 | labels[numpy.isin(labels, background_indexes)] = 0 100 | 101 | return labels 102 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/edgedetection.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Detect edges in an image or volume using the Sobel transform. 5 | Multi-channel images are converted to grayscale before the transform is 6 | applied. An edge is a region in which intensity changes dramatically. 7 | For example, an edge is the line between a dark background and a bright 8 | foreground. 9 | """ 10 | 11 | import cellprofiler_core.image 12 | import cellprofiler_core.module 13 | import cellprofiler_core.setting 14 | import numpy 15 | import skimage.color 16 | import skimage.filters 17 | from cellprofiler_core.setting.subscriber import ImageSubscriber 18 | 19 | 20 | class EdgeDetection(cellprofiler_core.module.ImageProcessing): 21 | category = "Feature Detection" 22 | 23 | module_name = "EdgeDetection" 24 | 25 | variable_revision_number = 1 26 | 27 | def create_settings(self): 28 | super(EdgeDetection, self).create_settings() 29 | 30 | self.mask = ImageSubscriber( 31 | u"Mask", 32 | can_be_blank=True, 33 | doc=""" 34 | Optional. A binary image the same shape as "Input". Limit application of the edge filter to unmasked 35 | regions of "Input". 36 | """ 37 | ) 38 | 39 | def settings(self): 40 | __settings__ = super(EdgeDetection, self).settings() 41 | 42 | return __settings__ + [ 43 | self.mask 44 | ] 45 | 46 | def visible_settings(self): 47 | __settings__ = super(EdgeDetection, self).visible_settings() 48 | 49 | return __settings__ + [ 50 | self.mask 51 | ] 52 | 53 | def run(self, workspace): 54 | x_name = self.x_name.value 55 | 56 | images = workspace.image_set 57 | 58 | x = images.get_image(x_name) 59 | 60 | x_data = x.pixel_data 61 | 62 | if x.multichannel: 63 | x_data = skimage.color.rgb2gray(x_data) 64 | 65 | mask_data = None 66 | 67 | if not self.mask.is_blank: 68 | mask_name = self.mask.value 69 | 70 | mask = images.get_image(mask_name) 71 | 72 | mask_data = mask.pixel_data 73 | 74 | dimensions = x.dimensions 75 | 76 | if dimensions == 2: 77 | y_data = skimage.filters.sobel(x_data, mask=mask_data) 78 | else: 79 | y_data = numpy.zeros_like(x_data) 80 | 81 | for plane, image in enumerate(x_data): 82 | plane_mask = None if mask_data is None else mask_data[plane] 83 | 84 | y_data[plane] = skimage.filters.sobel(image, mask=plane_mask) 85 | 86 | y = cellprofiler_core.image.Image( 87 | image=y_data, 88 | parent_image=x, 89 | dimensions=dimensions 90 | ) 91 | 92 | y_name = self.y_name.value 93 | 94 | images.add(y_name, y) 95 | 96 | if self.show_window: 97 | workspace.display_data.x_data = x_data 98 | 99 | workspace.display_data.y_data = y_data 100 | 101 | workspace.display_data.dimensions = dimensions 102 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/gammacorrection.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Gamma correction is a non-linear operation used to encode and decode luminance 5 | values in images. 6 | """ 7 | 8 | import cellprofiler_core.module 9 | import cellprofiler_core.setting 10 | import skimage.exposure 11 | from cellprofiler_core.setting.text import Float 12 | 13 | 14 | class GammaCorrection(cellprofiler_core.module.ImageProcessing): 15 | module_name = "GammaCorrection" 16 | 17 | variable_revision_number = 1 18 | 19 | def create_settings(self): 20 | super(GammaCorrection, self).create_settings() 21 | 22 | self.gamma = Float( 23 | doc=""" 24 | A gamma value < 1 is an encoding gamma, and the process of 25 | encoding with this compressive power-law non-linearity, gamma 26 | compression, darkens images; conversely a gamma value > 1 is a 27 | decoding gamma and the application of the expansive power-law 28 | non-linearity, gamma expansion, brightens images. 29 | """, 30 | maxval=100.0, 31 | minval=0.0, 32 | text="Gamma", 33 | value=1.0 34 | ) 35 | 36 | def settings(self): 37 | __settings__ = super(GammaCorrection, self).settings() 38 | 39 | return __settings__ + [ 40 | self.gamma 41 | ] 42 | 43 | def visible_settings(self): 44 | __settings__ = super(GammaCorrection, self).visible_settings() 45 | 46 | return __settings__ + [ 47 | self.gamma 48 | ] 49 | 50 | def run(self, workspace): 51 | self.function = skimage.exposure.adjust_gamma 52 | 53 | super(GammaCorrection, self).run(workspace) 54 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/histogramequalization.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Increase the global contrast of a low-contrast image or volume. A low 5 | contrast image has a background and foreground that are both dark, or 6 | both light. Histogram equalization redistributes intensities such that 7 | the most common frequencies are more distinct, increasing contrast. 8 | """ 9 | 10 | import cellprofiler_core.image 11 | import cellprofiler_core.module 12 | import cellprofiler_core.setting 13 | import cellprofiler_core.setting.text 14 | import numpy 15 | import skimage.exposure 16 | from cellprofiler_core.setting.subscriber import ImageSubscriber 17 | 18 | 19 | class HistogramEqualization(cellprofiler_core.module.ImageProcessing): 20 | module_name = "HistogramEqualization" 21 | 22 | variable_revision_number = 1 23 | 24 | def create_settings(self): 25 | super(HistogramEqualization, self).create_settings() 26 | 27 | self.nbins = cellprofiler_core.setting.text.Integer( 28 | u"Bins", 29 | value=256, 30 | minval=0, 31 | doc="Number of bins for image histogram." 32 | ) 33 | 34 | self.kernel_size = cellprofiler_core.setting.text.Integer( 35 | u"Kernel Size", 36 | value=256, 37 | minval=1, 38 | doc="""The image is partitioned into tiles with dimensions specified by the kernel size. Choose a kernel 39 | size that will fit at least one object of interest. 40 | """ 41 | ) 42 | 43 | self.mask = ImageSubscriber( 44 | u"Mask", 45 | can_be_blank=True, 46 | doc=""" 47 | Optional. Mask image must be the same size as "Input". Only unmasked points of the "Input" image are used 48 | to compute the equalization, which is applied to the entire "Input" image. 49 | """ 50 | ) 51 | 52 | self.local = cellprofiler_core.setting.Binary( 53 | u"Local", 54 | False 55 | ) 56 | 57 | def settings(self): 58 | __settings__ = super(HistogramEqualization, self).settings() 59 | 60 | return __settings__ + [ 61 | self.nbins, 62 | self.mask, 63 | self.local, 64 | self.kernel_size 65 | ] 66 | 67 | def visible_settings(self): 68 | __settings__ = super(HistogramEqualization, self).settings() 69 | 70 | __settings__ += [self.local, self.nbins] 71 | 72 | if not self.local.value: 73 | __settings__ += [self.mask] 74 | else: 75 | __settings__ += [self.kernel_size] 76 | 77 | return __settings__ 78 | 79 | def run(self, workspace): 80 | x_name = self.x_name.value 81 | 82 | y_name = self.y_name.value 83 | 84 | images = workspace.image_set 85 | 86 | x = images.get_image(x_name) 87 | 88 | dimensions = x.dimensions 89 | 90 | x_data = x.pixel_data 91 | 92 | mask_data = None 93 | 94 | if not self.mask.is_blank: 95 | mask_name = self.mask.value 96 | 97 | mask = images.get_image(mask_name) 98 | 99 | mask_data = mask.pixel_data 100 | 101 | nbins = self.nbins.value 102 | 103 | if self.local.value: 104 | 105 | kernel_size = self.kernel_size.value 106 | 107 | if x.volumetric: 108 | y_data = numpy.zeros_like(x_data, dtype=numpy.float) 109 | 110 | for index, plane in enumerate(x_data): 111 | y_data[index] = skimage.exposure.equalize_adapthist(plane, kernel_size=kernel_size, nbins=nbins) 112 | else: 113 | y_data = skimage.exposure.equalize_adapthist(x_data, kernel_size=kernel_size, nbins=nbins) 114 | else: 115 | y_data = skimage.exposure.equalize_hist(x_data, nbins=nbins, mask=mask_data) 116 | 117 | y = cellprofiler_core.image.Image( 118 | dimensions=dimensions, 119 | image=y_data, 120 | parent_image=x 121 | ) 122 | 123 | images.add(y_name, y) 124 | 125 | if self.show_window: 126 | workspace.display_data.x_data = x_data 127 | 128 | workspace.display_data.y_data = y_data 129 | 130 | workspace.display_data.dimensions = dimensions 131 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/imagegradient.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | **Image gradient** computes the local gradient of the image. The image 5 | gradient is useful for finding boundaries of objects. In a gradient 6 | image, pixels at the edges of bright regions of interest have the 7 | brightest intensities. Pixels in the background or in the centers of 8 | regions of interest have zero or dimmer intensity. 9 | """ 10 | 11 | import cellprofiler_core.image 12 | import cellprofiler_core.module 13 | import cellprofiler_core.setting 14 | import numpy 15 | import skimage 16 | import skimage.filters 17 | import skimage.morphology 18 | 19 | 20 | class ImageGradient(cellprofiler_core.module.ImageProcessing): 21 | module_name = "ImageGradient" 22 | 23 | variable_revision_number = 1 24 | 25 | def create_settings(self): 26 | super(ImageGradient, self).create_settings() 27 | 28 | self.structuring_element = cellprofiler_core.setting.StructuringElement( 29 | doc="""Neighborhood in which to compute the local gradient. Select a two-dimensional shape such as "disk" 30 | for images, and a three-dimensional shape such as "ball" for volumes. A larger size will compute the gradient 31 | over larger patches of the image and can obscure smaller features.""" 32 | ) 33 | 34 | def settings(self): 35 | __settings__ = super(ImageGradient, self).settings() 36 | 37 | return __settings__ + [ 38 | self.structuring_element 39 | ] 40 | 41 | def visible_settings(self): 42 | __settings__ = super(ImageGradient, self).visible_settings() 43 | 44 | return __settings__ + [ 45 | self.structuring_element 46 | ] 47 | 48 | def run(self, workspace): 49 | x_name = self.x_name.value 50 | 51 | y_name = self.y_name.value 52 | 53 | images = workspace.image_set 54 | 55 | x = images.get_image(x_name) 56 | 57 | x_data = x.pixel_data 58 | 59 | x_data = skimage.img_as_uint(x_data) 60 | 61 | if x.dimensions == 3 or x.multichannel: 62 | y_data = numpy.zeros_like(x_data) 63 | 64 | for z, image in enumerate(x_data): 65 | y_data[z] = skimage.filters.rank.gradient(image, self.__structuring_element()) 66 | else: 67 | y_data = skimage.filters.rank.gradient(x_data, self.structuring_element.value) 68 | 69 | y = cellprofiler_core.image.Image( 70 | image=y_data, 71 | dimensions=x.dimensions, 72 | parent_image=x, 73 | ) 74 | 75 | images.add(y_name, y) 76 | 77 | if self.show_window: 78 | workspace.display_data.x_data = x_data 79 | 80 | workspace.display_data.y_data = y_data 81 | 82 | workspace.display_data.dimensions = x.dimensions 83 | 84 | def __structuring_element(self): 85 | shape = self.structuring_element.shape 86 | 87 | size = self.structuring_element.size 88 | 89 | if shape == "ball": 90 | return skimage.morphology.disk(size) 91 | 92 | if shape == "cube": 93 | return skimage.morphology.square(size) 94 | 95 | if shape == "octahedron": 96 | return skimage.morphology.diamond(size) 97 | 98 | return self.structuring_element.value 99 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/laplacianofgaussian.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Laplacian of Gaussian filter. 5 | """ 6 | 7 | import cellprofiler_core.image 8 | import cellprofiler_core.module 9 | import cellprofiler_core.setting 10 | import cellprofiler_core.setting.text 11 | import scipy.ndimage.filters 12 | import skimage.color 13 | 14 | 15 | class LaplacianOfGaussian(cellprofiler_core.module.ImageProcessing): 16 | module_name = "LaplacianOfGaussian" 17 | 18 | variable_revision_number = 1 19 | 20 | def create_settings(self): 21 | super(LaplacianOfGaussian, self).create_settings() 22 | 23 | self.x = cellprofiler_core.setting.text.Float( 24 | "Sigma x", 25 | value=1.0, 26 | minval=0.0, 27 | doc="Sigma for x axis." 28 | ) 29 | 30 | self.y = cellprofiler_core.setting.text.Float( 31 | "Sigma y", 32 | value=1.0, 33 | minval=0.0, 34 | doc="Sigma for y axis." 35 | ) 36 | 37 | self.z = cellprofiler_core.setting.text.Float( 38 | "Sigma z", 39 | value=1.0, 40 | minval=0.0, 41 | doc="Sigma for z axis. Ignored when input is a two-dimensional image." 42 | ) 43 | 44 | def settings(self): 45 | __settings__ = super(LaplacianOfGaussian, self).settings() 46 | 47 | return __settings__ + [ 48 | self.x, 49 | self.y, 50 | self.z 51 | ] 52 | 53 | def visible_settings(self): 54 | __settings__ = super(LaplacianOfGaussian, self).visible_settings() 55 | 56 | return __settings__ + [ 57 | self.x, 58 | self.y, 59 | self.z 60 | ] 61 | 62 | def run(self, workspace): 63 | x_name = self.x_name.value 64 | 65 | y_name = self.y_name.value 66 | 67 | images = workspace.image_set 68 | 69 | x = images.get_image(x_name) 70 | 71 | x_data = x.pixel_data 72 | 73 | if x.multichannel: 74 | x_data = skimage.color.rgb2gray(x_data) 75 | 76 | x_data = skimage.img_as_float(x_data) 77 | 78 | dimensions = x.dimensions 79 | 80 | if dimensions == 2: 81 | sigma = (self.x.value, self.y.value) 82 | else: 83 | sigma = (self.z.value, self.x.value, self.y.value) 84 | 85 | y_data = scipy.ndimage.filters.gaussian_laplace(x_data, sigma) 86 | 87 | y = cellprofiler_core.image.Image( 88 | dimensions=dimensions, 89 | image=y_data, 90 | parent_image=x 91 | ) 92 | 93 | images.add(y_name, y) 94 | 95 | if self.show_window: 96 | workspace.display_data.x_data = x_data 97 | 98 | workspace.display_data.y_data = y_data 99 | 100 | workspace.display_data.dimensions = dimensions 101 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/measureimagefocus.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import logging 3 | 4 | import cellprofiler_core.measurement 5 | import cellprofiler_core.module 6 | import cellprofiler_core.preferences 7 | import cellprofiler_core.setting 8 | import microscopeimagequality.miq 9 | import microscopeimagequality.prediction 10 | import matplotlib.cm 11 | import matplotlib.pyplot 12 | import matplotlib.patches 13 | 14 | __doc__ = """ 15 | For installation instructions and platform support notes, please see the `wiki `_. 16 | 17 | This module can collect measurements indicating possible image aberrations, 18 | e.g. blur (poor focus), intensity, saturation (i.e., the percentage 19 | of pixels in the image that are minimal and maximal). 20 | It outputs an image focus score, an integer from 0 (in focus) to 10 (out of focus). 21 | There is also a certainty output indicating how certain the score is. 22 | """ 23 | 24 | from cellprofiler_core.constants.measurement import COLTYPE_FLOAT 25 | 26 | from cellprofiler_core.setting.subscriber import ImageSubscriber 27 | 28 | C_IMAGE_FOCUS = "ImageFocus" 29 | F_SCORE = "Score" 30 | F_CERTAINTY = "Certainty" 31 | 32 | class MeasureImageFocus(cellprofiler_core.module.Module): 33 | category = "Measurement" 34 | 35 | module_name = "MeasureImageFocus" 36 | 37 | variable_revision_number = 1 38 | 39 | def create_settings(self): 40 | self.image_name = ImageSubscriber( 41 | "Image", 42 | doc=""" 43 | The name of an image. 44 | """ 45 | ) 46 | 47 | def settings(self): 48 | return [ 49 | self.image_name 50 | ] 51 | 52 | def display(self, workspace, figure): 53 | 54 | figure.set_subplots((2, 1)) 55 | 56 | patches= workspace.display_data.patches 57 | 58 | figure.subplot_table(0, 0, workspace.display_data.statistics) 59 | image = workspace.display_data.image 60 | 61 | ax = figure.subplot_imshow_grayscale(1, 0, image, 62 | title="Focus Score" 63 | ) 64 | # show patches 65 | cmap = matplotlib.cm.get_cmap("jet") 66 | for patch in patches: 67 | rect = matplotlib.patches.Rectangle(xy=(patch[1], patch[0]), width=patch[3], height=patch[2]) 68 | rect.set_color(cmap(int(float(patch[4][0]) * 255 / 10))) 69 | rect.set_alpha(float(patch[4][1]['aggregate']) * 0.9) 70 | rect.set_linewidth(0) 71 | rect.set_fill(True) 72 | ax.add_patch(rect) 73 | 74 | # colorbar 75 | sm = matplotlib.pyplot.cm.ScalarMappable(cmap=cmap, norm=matplotlib.pyplot.Normalize(vmin=0, vmax=10)) 76 | sm.set_array([]) 77 | cbar = matplotlib.pyplot.colorbar(sm, ax=ax, ticks=[0, 10], shrink=.6) 78 | cbar.ax.set_yticklabels(['Focused', 'Unfocused']) 79 | 80 | def get_categories(self, pipeline, object_name): 81 | if object_name == "Image": 82 | return [ 83 | C_IMAGE_FOCUS 84 | ] 85 | 86 | return [] 87 | 88 | def get_feature_name(self, name): 89 | image = self.image_name.value 90 | 91 | return C_IMAGE_FOCUS + "_{}_{}".format(name, image) 92 | 93 | def get_measurements(self, pipeline, object_name, category): 94 | name = self.image_name.value 95 | 96 | if object_name == "Image" and category == C_IMAGE_FOCUS: 97 | return [ 98 | F_SCORE + "_{}".format(name), 99 | F_CERTAINTY + "_{}".format(name) 100 | ] 101 | 102 | return [] 103 | 104 | def get_measurement_columns(self, pipeline): 105 | image = "Image" 106 | 107 | features = [ 108 | self.get_feature_name(F_SCORE), 109 | self.get_feature_name(F_CERTAINTY) 110 | ] 111 | 112 | column_type = COLTYPE_FLOAT 113 | 114 | return [(image, feature, column_type) for feature in features] 115 | 116 | def get_measurement_images(self, pipeline, object_name, category, measurement): 117 | if measurement in self.get_measurements(pipeline, object_name, category): 118 | return [self.image_name.value] 119 | 120 | return [] 121 | 122 | def run(self, workspace): 123 | default_weights_index_file = microscopeimagequality.miq.DEFAULT_MODEL_PATH + '.index' 124 | if not os.path.exists(default_weights_index_file): 125 | logging.warning('weights index file not found at {}'.format(default_weights_index_file)) 126 | microscopeimagequality.miq.download_model() 127 | 128 | m = microscopeimagequality.prediction.ImageQualityClassifier(microscopeimagequality.miq.DEFAULT_MODEL_PATH, 84, 129 | 11) 130 | 131 | image_set = workspace.image_set 132 | image = image_set.get_image(self.image_name.value, must_be_grayscale=True) 133 | 134 | data = image.pixel_data 135 | 136 | measurements = workspace.measurements 137 | 138 | statistics = [] 139 | 140 | pred = m.predict(data) 141 | patches = m.get_patch_predictions(data) 142 | 143 | feature_score = self.get_feature_name(F_SCORE) 144 | score = str(pred[0]) 145 | feature_certainty = self.get_feature_name(F_CERTAINTY) 146 | certainty = str(pred[1]['aggregate']) 147 | 148 | statistics.append([feature_score, score]) 149 | statistics.append([feature_certainty, certainty]) 150 | 151 | measurements.add_image_measurement(feature_score, score) 152 | measurements.add_image_measurement(feature_certainty, certainty) 153 | 154 | # if self.show_window: 155 | workspace.display_data.statistics = statistics 156 | workspace.display_data.patches = patches 157 | workspace.display_data.image= data 158 | 159 | def volumetric(self): 160 | return False 161 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/nucleaizer.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | NucleAIzer 5 | ========== 6 | 7 | **NucleAIzer** identifies nuclei. 8 | 9 | Instructions: 10 | 11 | Warning: For correct usage, this module requires some experience with 12 | Python and Python dependencies! 13 | 14 | In addition to copying this plugin to your plugins directory, you'll 15 | need to clone the following Git repository and follow the 16 | `Prerequisites` instructions in the README: 17 | 18 | https://github.com/spreka/biomagdsb 19 | 20 | This includes installing a specific commit of Matterport's Mask R-CNN 21 | repository. This plugin _will not_ work with the latest commit! The 22 | model will not load if the Mask R-CNN modules are not available on your 23 | Python path since they use custom Keras layers! 24 | 25 | You'll also need to make sure you're running versions of Keras, NumPy, 26 | SciPy, and TensorFlow that work with `biomagdsb`, `Mask-RCNN`, 27 | and `CellProfiler`. I had success with the following versions: 28 | 29 | numpy==1.15.4 30 | scipy==1.1.0 31 | tensorflow==1.15.0 32 | 33 | Finally, you'll need to download the model configuration and weights: 34 | 35 | https://drive.google.com/drive/folders/1lbJ_LanxSO-n5rMjmhWAHtLcE9znHyJO?usp=sharing 36 | | 37 | 38 | ============ ============ =============== 39 | Supports 2D? Supports 3D? Respects masks? 40 | ============ ============ =============== 41 | YES NO YES 42 | ============ ============ =============== 43 | """ 44 | 45 | import os.path 46 | 47 | import numpy 48 | import skimage.measure 49 | import skimage.transform 50 | import tensorflow 51 | 52 | import cellprofiler_core.image 53 | import cellprofiler_core.module 54 | import cellprofiler_core.object 55 | import cellprofiler_core.setting 56 | from cellprofiler_core.module.image_segmentation import ImageSegmentation 57 | from cellprofiler_core.setting.subscriber import ImageSubscriber 58 | from cellprofiler_core.setting.text import Pathname 59 | 60 | 61 | class IdentifyNucleus(ImageSegmentation): 62 | category = "Advanced" 63 | 64 | module_name = "IdentifyNucleus" 65 | 66 | variable_revision_number = 1 67 | 68 | def create_settings(self): 69 | super(IdentifyNucleus, self).create_settings() 70 | 71 | self.mask_name = ImageSubscriber( 72 | "Mask", 73 | can_be_blank=True, 74 | doc="" 75 | ) 76 | 77 | self.model_pathname = Pathname( 78 | "Model", 79 | doc="" 80 | ) 81 | 82 | self.weights_pathname = Pathname( 83 | "Weights", 84 | doc="" 85 | ) 86 | 87 | def settings(self): 88 | __settings__ = super(IdentifyNucleus, self).settings() 89 | 90 | return __settings__ + [ 91 | self.mask_name, 92 | self.model_pathname, 93 | self.weights_pathname 94 | ] 95 | 96 | def visible_settings(self): 97 | __settings__ = super(IdentifyNucleus, self).settings() 98 | 99 | __settings__ = __settings__ + [ 100 | self.mask_name, 101 | self.model_pathname, 102 | self.weights_pathname 103 | ] 104 | 105 | return __settings__ 106 | 107 | def run(self, workspace): 108 | model_pathname = os.path.abspath(self.model_pathname.value) 109 | 110 | model = tensorflow.keras.models.model_from_json(model_pathname) 111 | 112 | weights_pathname = os.path.abspath(self.weights_pathname.value) 113 | 114 | model.load_weights(weights_pathname, by_name=True) 115 | 116 | x_name = self.x_name.value 117 | y_name = self.y_name.value 118 | 119 | images = workspace.image_set 120 | 121 | x = images.get_image(x_name) 122 | 123 | dimensions = x.dimensions 124 | 125 | x_data = x.pixel_data 126 | 127 | x_data = skimage.transform.resize(x_data, (2048, 2048)) 128 | 129 | x_data = numpy.expand_dims(x_data, axis=0) 130 | 131 | mask_data = None 132 | 133 | if not self.mask_name.is_blank: 134 | mask_name = self.mask_name.value 135 | 136 | mask = images.get_image(mask_name) 137 | 138 | mask_data = mask.pixel_data 139 | 140 | prediction = model.predict(x_data) 141 | 142 | _, _, _, predicted_masks, _, _, _ = prediction 143 | 144 | count = predicted_masks.shape[0] 145 | 146 | for index in range(0, count): 147 | predicted_mask = predicted_masks[index] 148 | 149 | if mask_data: 150 | predicted_mask *= mask_data 151 | 152 | y_data = skimage.measure.label(predicted_mask) 153 | 154 | objects = cellprofiler_core.object.Objects() 155 | 156 | objects.segmented = y_data 157 | 158 | objects.parent_image = x 159 | 160 | workspace.object_set.add_objects(objects, y_name) 161 | 162 | self.add_measurements(workspace) 163 | 164 | if self.show_window: 165 | workspace.display_data.x_data = x.pixel_data 166 | 167 | workspace.display_data.dimensions = dimensions 168 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/randomwalkeralgorithm.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | 5 | Random walker algorithm 6 | 7 | Single-channel images can be two-or-three-dimensional. 8 | 9 | """ 10 | 11 | import numpy 12 | import skimage.color 13 | import skimage.measure 14 | import skimage.segmentation 15 | 16 | import cellprofiler_core.module 17 | import cellprofiler_core.object 18 | import cellprofiler_core.setting 19 | from cellprofiler_core.module.image_segmentation import ImageSegmentation 20 | from cellprofiler_core.setting.text import Float 21 | 22 | 23 | class RandomWalkerAlgorithm(ImageSegmentation): 24 | module_name = "Random walker algorithm" 25 | 26 | variable_revision_number = 1 27 | 28 | def create_settings(self): 29 | super(RandomWalkerAlgorithm, self).create_settings() 30 | 31 | self.first_phase = Float( 32 | doc="First phase demarcates an image’s first phase.", 33 | text="First phase", 34 | value=0.5 35 | ) 36 | 37 | self.second_phase = Float( 38 | doc="Second phase demarcates an image’s second phase.", 39 | text="Second phase", 40 | value=0.5 41 | ) 42 | 43 | self.beta = Float( 44 | doc=""" 45 | Beta is the penalization coefficient for the random walker motion. Increasing the penalization 46 | coefficient increases the difficulty of the diffusion. Likewise, decreasing the penalization coefficient 47 | decreases the difficulty of the diffusion. 48 | """, 49 | text="Beta", 50 | value=130.0 51 | ) 52 | 53 | def settings(self): 54 | __settings__ = super(RandomWalkerAlgorithm, self).settings() 55 | 56 | return __settings__ + [ 57 | self.first_phase, 58 | self.second_phase, 59 | self.beta 60 | ] 61 | 62 | def visible_settings(self): 63 | __settings__ = super(RandomWalkerAlgorithm, self).settings() 64 | 65 | return __settings__ + [ 66 | self.first_phase, 67 | self.second_phase, 68 | self.beta 69 | ] 70 | 71 | def run(self, workspace): 72 | x_name = self.x_name.value 73 | 74 | y_name = self.y_name.value 75 | 76 | images = workspace.image_set 77 | 78 | x = images.get_image(x_name) 79 | 80 | x_data = x.pixel_data 81 | 82 | if x.multichannel: 83 | x_data = skimage.color.rgb2gray(x_data) 84 | 85 | labels_data = numpy.zeros_like(x_data, numpy.uint8) 86 | 87 | labels_data[x_data > self.first_phase.value] = 1 88 | 89 | labels_data[x_data < self.second_phase.value] = 2 90 | 91 | y_data = skimage.segmentation.random_walker( 92 | beta=self.beta.value, 93 | data=x_data, 94 | labels=labels_data, 95 | multichannel=False, 96 | spacing=x.spacing 97 | ) 98 | 99 | y_data = skimage.measure.label(y_data) 100 | 101 | objects = cellprofiler_core.object.Objects() 102 | 103 | objects.segmented = y_data 104 | 105 | objects.parent_image = x 106 | 107 | workspace.object_set.add_objects(objects, y_name) 108 | 109 | self.add_measurements(workspace) 110 | 111 | if self.show_window: 112 | workspace.display_data.x_data = x_data 113 | 114 | workspace.display_data.y_data = y_data 115 | 116 | workspace.display_data.dimensions = x.dimensions 117 | -------------------------------------------------------------------------------- /unmaintained_plugins/CellProfiler4_autoconverted/tophattransform.py: -------------------------------------------------------------------------------- 1 | # coding=utf-8 2 | 3 | """ 4 | Top-hat transform 5 | ================= 6 | 7 | Perform a black or white top-hat transform on grayscale pixel data. 8 | 9 | Top-hat transforms are useful for extracting small elements and details 10 | from images and volumes. 11 | """ 12 | 13 | import skimage.morphology 14 | 15 | import cellprofiler_core.module 16 | import cellprofiler_core.setting 17 | from cellprofiler_core.setting.choice import Choice 18 | 19 | 20 | class TopHatTransform(cellprofiler_core.module.ImageProcessing): 21 | module_name = "TopHatTransform" 22 | 23 | variable_revision_number = 1 24 | 25 | def create_settings(self): 26 | super(TopHatTransform, self).create_settings() 27 | 28 | self.operation_name = Choice( 29 | choices=[ 30 | "Black top-hat transform", 31 | "White top-hat transform" 32 | ], 33 | text="Operation", 34 | value="Black top-hat transform", 35 | doc=""" 36 | Select the top-hat transformation: 37 |
    38 |
  • Black top-hat transform: This operation returns the dark spots of the image that are smaller 39 | than the structuring element. Note that dark spots in the original image are bright spots after the 40 | black top hat.
  • 41 |
  • White top-hat transform: This operation returns the bright spots of the image that are 42 | smaller than the structuring element.
  • 43 |
44 | """ 45 | ) 46 | 47 | self.structuring_element = cellprofiler_core.setting.StructuringElement() 48 | 49 | def settings(self): 50 | __settings__ = super(TopHatTransform, self).settings() 51 | 52 | return __settings__ + [ 53 | self.structuring_element, 54 | self.operation_name 55 | ] 56 | 57 | def visible_settings(self): 58 | __settings__ = super(TopHatTransform, self).visible_settings() 59 | 60 | return __settings__ + [ 61 | self.operation_name, 62 | self.structuring_element 63 | ] 64 | 65 | def run(self, workspace): 66 | self.function = tophat_transform 67 | 68 | super(TopHatTransform, self).run(workspace) 69 | 70 | 71 | def tophat_transform(image, structuring_element, operation): 72 | if operation == "Black top-hat transform": 73 | return skimage.morphology.black_tophat(image, selem=structuring_element) 74 | 75 | return skimage.morphology.white_tophat(image, selem=structuring_element) 76 | -------------------------------------------------------------------------------- /unmaintained_tests/test_edgedetection.py: -------------------------------------------------------------------------------- 1 | import cellprofiler.image 2 | import numpy 3 | import numpy.random 4 | import numpy.testing 5 | import skimage.color 6 | import skimage.filters 7 | 8 | import edgedetection 9 | 10 | instance = edgedetection.EdgeDetection 11 | 12 | 13 | def test_run_without_mask(image, image_set, module, workspace): 14 | module.x_name.value = "example" 15 | 16 | module.y_name.value = "EdgeDetection" 17 | 18 | module.mask.value = "Leave blank" 19 | 20 | module.run(workspace) 21 | 22 | actual = image_set.get_image("EdgeDetection") 23 | 24 | data = image.pixel_data 25 | 26 | if image.multichannel: 27 | data = skimage.color.rgb2gray(data) 28 | 29 | if image.dimensions == 2: 30 | expected_data = skimage.filters.sobel(data) 31 | else: 32 | expected_data = numpy.zeros_like(data) 33 | 34 | for idx, img in enumerate(data): 35 | expected_data[idx] = skimage.filters.sobel(img) 36 | 37 | expected = cellprofiler.image.Image( 38 | image=expected_data, 39 | parent_image=image, 40 | dimensions=image.dimensions 41 | ) 42 | 43 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 44 | 45 | 46 | def test_run_with_mask(image, image_set, module, workspace): 47 | module.x_name.value = "example" 48 | 49 | module.y_name.value = "EdgeDetection" 50 | 51 | module.mask.value = "mask" 52 | 53 | mask_shape = image.pixel_data.shape 54 | 55 | if image.dimensions == 2: 56 | mask_data = numpy.random.rand(mask_shape[0], mask_shape[1]) 57 | 58 | mask_data[:5] = 0 59 | 60 | mask_data[-5:] = 0 61 | 62 | mask_data[:, :5] = 0 63 | 64 | mask_data[:, -5:] = 0 65 | else: 66 | mask_data = numpy.random.rand(*mask_shape) 67 | 68 | mask_data[:, :5] = 0 69 | 70 | mask_data[:, -5:] = 0 71 | 72 | mask_data[:, :, :5] = 0 73 | 74 | mask_data[:, :, -5:] = 0 75 | 76 | mask_data = mask_data != 0 77 | 78 | mask = cellprofiler.image.Image( 79 | image=mask_data 80 | ) 81 | 82 | image_set.add("mask", mask) 83 | 84 | module.run(workspace) 85 | 86 | actual = image_set.get_image("EdgeDetection") 87 | 88 | data = image.pixel_data 89 | 90 | if image.multichannel: 91 | data = skimage.color.rgb2gray(data) 92 | 93 | if image.dimensions == 2: 94 | expected_data = skimage.filters.sobel(data, mask=mask_data) 95 | else: 96 | expected_data = numpy.zeros_like(data) 97 | 98 | for idx, img in enumerate(data): 99 | expected_data[idx] = skimage.filters.sobel(img, mask=mask_data[idx]) 100 | 101 | expected = cellprofiler.image.Image( 102 | image=expected_data, 103 | parent_image=image, 104 | dimensions=image.dimensions 105 | ) 106 | 107 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 108 | -------------------------------------------------------------------------------- /unmaintained_tests/test_gammacorrection.py: -------------------------------------------------------------------------------- 1 | import numpy.testing 2 | import skimage.exposure 3 | 4 | import gammacorrection 5 | 6 | instance = gammacorrection.GammaCorrection 7 | 8 | 9 | def test_run(image, module, image_set, workspace): 10 | module.x_name.value = "example" 11 | 12 | module.y_name.value = "GammaCorrection" 13 | 14 | module.run(workspace) 15 | 16 | actual = image_set.get_image("GammaCorrection") 17 | 18 | desired = skimage.exposure.adjust_gamma(image.pixel_data) 19 | 20 | numpy.testing.assert_array_equal(actual.pixel_data, desired) 21 | -------------------------------------------------------------------------------- /unmaintained_tests/test_imagegradient.py: -------------------------------------------------------------------------------- 1 | import cellprofiler.image 2 | import numpy 3 | import numpy.testing 4 | import skimage.filters.rank 5 | import skimage.morphology 6 | 7 | import imagegradient 8 | 9 | instance = imagegradient.ImageGradient 10 | 11 | 12 | def test_run(image, module, image_set, workspace): 13 | module.x_name.value = "example" 14 | 15 | module.y_name.value = "ImageGradient" 16 | 17 | if image.dimensions == 3: 18 | module.structuring_element.shape = "ball" 19 | 20 | module.run(workspace) 21 | 22 | actual = image_set.get_image("ImageGradient") 23 | 24 | data = image.pixel_data 25 | 26 | data = skimage.img_as_uint(data) 27 | 28 | disk = skimage.morphology.disk(1) 29 | 30 | if image.dimensions == 3 or image.multichannel: 31 | expected_data = numpy.zeros_like(data) 32 | 33 | for z, img in enumerate(data): 34 | expected_data[z] = skimage.filters.rank.gradient(img, disk) 35 | else: 36 | expected_data = skimage.filters.rank.gradient(data, disk) 37 | 38 | # CellProfiler converts Image data according to MatLab standards. Remove this and test against 39 | # expected_data once MatLab support is removed. Until then, use the Image constructro to convert the data 40 | # a la MatLab before comparison. 41 | expected = cellprofiler.image.Image( 42 | image=expected_data, 43 | dimensions=3 44 | ) 45 | 46 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 47 | -------------------------------------------------------------------------------- /unmaintained_tests/test_laplacianofgaussian.py: -------------------------------------------------------------------------------- 1 | import cellprofiler.image 2 | import numpy.testing 3 | import scipy.ndimage.filters 4 | import skimage.color 5 | 6 | import laplacianofgaussian 7 | 8 | instance = laplacianofgaussian.LaplacianOfGaussian 9 | 10 | 11 | def test_run(image, image_set, module, workspace): 12 | module.x_name.value = "example" 13 | 14 | module.y_name.value = "LaplacianOfGaussian" 15 | 16 | module.x.value = 1.1 17 | 18 | module.y.value = 1.2 19 | 20 | module.z.value = 1.3 21 | 22 | module.run(workspace) 23 | 24 | actual = image_set.get_image("LaplacianOfGaussian") 25 | 26 | data = image.pixel_data 27 | 28 | if image.multichannel: 29 | data = skimage.color.rgb2gray(data) 30 | 31 | data = skimage.img_as_float(data) 32 | 33 | if image.dimensions == 2: 34 | sigma = (1.1, 1.2) 35 | else: 36 | sigma = (1.3, 1.1, 1.2) 37 | 38 | expected_data = scipy.ndimage.filters.gaussian_laplace(data, sigma) 39 | 40 | expected = cellprofiler.image.Image( 41 | dimensions=image.dimensions, 42 | image=expected_data, 43 | parent_image=image 44 | ) 45 | 46 | numpy.testing.assert_array_equal(expected.pixel_data, actual.pixel_data) 47 | -------------------------------------------------------------------------------- /unmaintained_tests/test_measuretrackquality.py: -------------------------------------------------------------------------------- 1 | """test_measuretrackquality.py: test the MeasureTrackQuality module 2 | 3 | Copyright (c) 2017 University of Southern California 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated 6 | documentation files (the "Software"), to deal in the Software without restriction, including without limitation 7 | the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, 8 | and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 9 | 10 | The above copyright notice and this permission notice shall be included in all copies or substantial portions 11 | of the Software. 12 | 13 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED 14 | TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 15 | THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF 16 | CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 17 | DEALINGS IN THE SOFTWARE. 18 | 19 | Author: Dan Ruderman (ruderman@usc.edu) 20 | """ 21 | 22 | import unittest 23 | import numpy 24 | 25 | import measuretrackquality 26 | 27 | # run in headless mode so wx is not required 28 | import cellprofiler.preferences 29 | cellprofiler.preferences.set_headless() 30 | 31 | class test_MeasureTrackQuality(unittest.TestCase): 32 | def test_compute_typical_deviations(self): 33 | num_measurements = 10 # number of tests to run 34 | num_timepoints = 20 35 | num_cells = 10 36 | num_deviations_per_cell = num_timepoints-1 37 | num_deviations_per_measurement = num_cells * num_deviations_per_cell 38 | 39 | numpy.random.seed(17) 40 | target_medians = numpy.random.uniform(low=0.5, high=5, size=num_measurements) # results we expect 41 | 42 | def create_data_for_measurement(target_median): 43 | vals = numpy.abs(numpy.random.normal(size=num_deviations_per_measurement)) 44 | abs_deviations = vals - numpy.median(vals) + target_median # enforce the desired median 45 | # make the deviations randomly positive or negative 46 | deviations = numpy.multiply(abs_deviations, 47 | numpy.random.choice([-1, 1], size=len(abs_deviations), replace=True)) 48 | lists = [list(numpy.cumsum(numpy.insert(deviations[(cell*num_deviations_per_cell):((cell+1)*num_deviations_per_cell)], 49 | 0, numpy.random.normal()))) for cell in range(num_cells)] 50 | measurement_vals = sum(lists, []) # flatten 51 | return measurement_vals 52 | 53 | # create data set 54 | values_dict = {i : create_data_for_measurement(target_medians[i]) for i in range(num_measurements)} 55 | 56 | cell_ids = numpy.repeat(range(num_cells), num_timepoints) 57 | timepoints = numpy.tile(range(num_timepoints), num_cells) 58 | result_medians = measuretrackquality.MeasureTrackQuality.compute_typical_deviations(values_dict, cell_ids, timepoints) 59 | 60 | max_fractional_error = numpy.max(numpy.divide(numpy.abs(numpy.subtract(result_medians.values(), target_medians)), target_medians)) 61 | 62 | self.assertLess(max_fractional_error, 1e-5, "Excessive error in compute_typical_deviations") 63 | 64 | def test_compute_tram(self): 65 | numpy.random.seed(17) 66 | num_timepoints = 50 67 | num_features = 5 # number of measurements to combine 68 | feature_names = [str(i) for i in range(num_features)] 69 | num_knots = num_timepoints / 5 70 | tram_exponent = 0.5 71 | 72 | # Make data with big DC offsets (which should be ignored by TrAM) and small variations. TrAM should be small. 73 | fluctuation_scale = 1 74 | offset_scale = 100*fluctuation_scale # huge offset which should be ignored by TrAM because of smoothing 75 | error_scale = 10*fluctuation_scale # big error relative to fluctuations which should be detected by TrAM 76 | 77 | # start with constant random constant data values 78 | base_data_array = numpy.repeat(numpy.random.normal(0, offset_scale, (1, num_features)), num_timepoints, 0) 79 | 80 | # add uncorrelated noise 81 | noise_array = numpy.random.normal(0, fluctuation_scale, (num_timepoints, num_features)) 82 | data_1 = numpy.add(base_data_array, noise_array) 83 | 84 | tram_1 = measuretrackquality.MeasureTrackQuality.compute_TrAM(feature_names, data_1, range(num_timepoints), 85 | range(num_timepoints), num_knots, tram_exponent, 86 | []) 87 | 88 | # should be on the scale of the fluctuations 89 | self.assertLess(tram_1, 3*fluctuation_scale) 90 | 91 | # now add in a large sudden fluctuation which we should detect 92 | index = num_timepoints/2 # in the middle 93 | offset_array = numpy.zeros(data_1.shape) 94 | offset_array[index,:] = error_scale 95 | data_2 = numpy.add(data_1, offset_array) 96 | 97 | tram_2 = measuretrackquality.MeasureTrackQuality.compute_TrAM(feature_names, data_2, range(num_timepoints), 98 | range(num_timepoints), num_knots, tram_exponent, 99 | []) 100 | self.assertGreater(tram_2, error_scale/2) # should reflect the scale of the error 101 | 102 | -------------------------------------------------------------------------------- /unmaintained_tests/test_randomwalkeralgorithm.py: -------------------------------------------------------------------------------- 1 | import numpy 2 | import numpy.testing 3 | import skimage.color 4 | import skimage.measure 5 | import skimage.segmentation 6 | 7 | import randomwalkeralgorithm 8 | 9 | 10 | instance = randomwalkeralgorithm.RandomWalkerAlgorithm 11 | 12 | 13 | def test_run(image, module, workspace): 14 | module.x_name.value = "example" 15 | 16 | module.y_name.value = "RandomWalkerAlgorithm" 17 | 18 | module.first_phase.value = 0.5 19 | 20 | module.second_phase.value = 0.5 21 | 22 | module.beta.value = 130.0 23 | 24 | module.run(workspace) 25 | 26 | x_data = image.pixel_data 27 | 28 | if image.multichannel: 29 | x_data = skimage.color.rgb2gray(x_data) 30 | 31 | labels_data = numpy.zeros_like(x_data, numpy.uint) 32 | 33 | labels_data[x_data < 0.5] = 1 34 | 35 | labels_data[x_data > 0.5] = 2 36 | 37 | expected = skimage.segmentation.random_walker( 38 | beta=130.0, 39 | data=x_data, 40 | labels=labels_data, 41 | multichannel=False, 42 | spacing=image.spacing 43 | ) 44 | 45 | expected = skimage.measure.label(expected) 46 | 47 | actual = workspace.get_objects("RandomWalkerAlgorithm") 48 | 49 | numpy.testing.assert_array_equal(expected, actual.segmented) 50 | -------------------------------------------------------------------------------- /unmaintained_tests/test_shollanalysis.py: -------------------------------------------------------------------------------- 1 | import shollanalysis 2 | 3 | instance = shollanalysis.ShollAnalysis() 4 | -------------------------------------------------------------------------------- /unmaintained_tests/test_tophattransform.py: -------------------------------------------------------------------------------- 1 | import cellprofiler.image 2 | import numpy.testing 3 | import pytest 4 | import skimage.data 5 | import skimage.morphology 6 | 7 | import tophattransform 8 | 9 | instance = tophattransform.TopHatTransform 10 | 11 | 12 | @pytest.fixture( 13 | scope="module", 14 | params=[ 15 | (skimage.data.camera()[0:128, 0:128], 2), 16 | (numpy.tile(skimage.data.camera()[0:32, 0:32], (2, 1)).reshape(2, 32, 32), 3) 17 | ], 18 | ids=[ 19 | "grayscale_image", 20 | "grayscale_volume" 21 | ] 22 | ) 23 | def image(request): 24 | data, dimensions = request.param 25 | 26 | return cellprofiler.image.Image(image=data, dimensions=dimensions) 27 | 28 | 29 | def test_run_black_tophat(image, module, image_set, workspace): 30 | module.x_name.value = "example" 31 | 32 | module.y_name.value = "TopHatTransform" 33 | 34 | module.operation_name.value = "Black top-hat transform" 35 | 36 | if image.volumetric: 37 | module.structuring_element.value = "ball,1" 38 | 39 | structure = skimage.morphology.ball(1) 40 | else: 41 | module.structuring_element.value = "disk,1" 42 | 43 | structure = skimage.morphology.disk(1) 44 | 45 | module.run(workspace) 46 | 47 | actual = image_set.get_image("TopHatTransform") 48 | 49 | desired = skimage.morphology.black_tophat(image.pixel_data, structure) 50 | 51 | numpy.testing.assert_array_equal(actual.pixel_data, desired) 52 | 53 | 54 | def test_run_white_tophat(image, module, image_set, workspace): 55 | module.x_name.value = "example" 56 | 57 | module.y_name.value = "TopHatTransform" 58 | 59 | module.operation_name.value = "White top-hat transform" 60 | 61 | if image.volumetric: 62 | module.structuring_element.value = "ball,1" 63 | 64 | structure = skimage.morphology.ball(1) 65 | else: 66 | module.structuring_element.value = "disk,1" 67 | 68 | structure = skimage.morphology.disk(1) 69 | 70 | module.run(workspace) 71 | 72 | actual = image_set.get_image("TopHatTransform") 73 | 74 | desired = skimage.morphology.white_tophat(image.pixel_data, structure) 75 | 76 | numpy.testing.assert_array_equal(actual.pixel_data, desired) 77 | --------------------------------------------------------------------------------